summaryrefslogtreecommitdiff
path: root/deps/v8/src/codegen
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/codegen')
-rw-r--r--deps/v8/src/codegen/aligned-slot-allocator.cc1
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.cc57
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.h5
-rw-r--r--deps/v8/src/codegen/arm/cpu-arm.cc2
-rw-r--r--deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h27
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.cc368
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.h72
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.cc23
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.h4
-rw-r--r--deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h26
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h4
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.cc603
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.h106
-rw-r--r--deps/v8/src/codegen/arm64/register-arm64.h2
-rw-r--r--deps/v8/src/codegen/assembler.cc77
-rw-r--r--deps/v8/src/codegen/assembler.h76
-rw-r--r--deps/v8/src/codegen/bailout-reason.h1
-rw-r--r--deps/v8/src/codegen/code-factory.cc217
-rw-r--r--deps/v8/src/codegen/code-factory.h14
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.cc381
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.h79
-rw-r--r--deps/v8/src/codegen/compiler.cc133
-rw-r--r--deps/v8/src/codegen/compiler.h4
-rw-r--r--deps/v8/src/codegen/constant-pool.cc12
-rw-r--r--deps/v8/src/codegen/constant-pool.h8
-rw-r--r--deps/v8/src/codegen/external-reference-table.cc1
-rw-r--r--deps/v8/src/codegen/external-reference.cc108
-rw-r--r--deps/v8/src/codegen/external-reference.h14
-rw-r--r--deps/v8/src/codegen/handler-table.cc2
-rw-r--r--deps/v8/src/codegen/handler-table.h4
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32-inl.h23
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.cc15
-rw-r--r--deps/v8/src/codegen/ia32/cpu-ia32.cc2
-rw-r--r--deps/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h24
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.cc413
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.h67
-rw-r--r--deps/v8/src/codegen/interface-descriptors-inl.h69
-rw-r--r--deps/v8/src/codegen/interface-descriptors.cc26
-rw-r--r--deps/v8/src/codegen/interface-descriptors.h111
-rw-r--r--deps/v8/src/codegen/machine-type.cc1
-rw-r--r--deps/v8/src/codegen/macro-assembler.h39
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.cc5
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.h11
-rw-r--r--deps/v8/src/codegen/mips/interface-descriptors-mips-inl.h31
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.cc278
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.h91
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.cc5
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.h11
-rw-r--r--deps/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h42
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.cc291
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.h84
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.cc11
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.h12
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.cc42
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.h44
-rw-r--r--deps/v8/src/codegen/ppc/constants-ppc.h31
-rw-r--r--deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h27
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.cc1208
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.h271
-rw-r--r--deps/v8/src/codegen/reloc-info.cc22
-rw-r--r--deps/v8/src/codegen/reloc-info.h6
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h78
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64.cc134
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64.h37
-rw-r--r--deps/v8/src/codegen/riscv64/constants-riscv64.h40
-rw-r--r--deps/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h27
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc711
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h229
-rw-r--r--deps/v8/src/codegen/riscv64/register-riscv64.h33
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.cc1
-rw-r--r--deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h27
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.cc286
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.h70
-rw-r--r--deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h8
-rw-r--r--deps/v8/src/codegen/source-position-table.cc21
-rw-r--r--deps/v8/src/codegen/source-position-table.h8
-rw-r--r--deps/v8/src/codegen/tnode.h1
-rw-r--r--deps/v8/src/codegen/turbo-assembler.cc31
-rw-r--r--deps/v8/src/codegen/turbo-assembler.h54
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64-inl.h33
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.cc15
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.h1
-rw-r--r--deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h53
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.cc627
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.h119
85 files changed, 5230 insertions, 3158 deletions
diff --git a/deps/v8/src/codegen/aligned-slot-allocator.cc b/deps/v8/src/codegen/aligned-slot-allocator.cc
index 9e7ab09c81..91cd33d50c 100644
--- a/deps/v8/src/codegen/aligned-slot-allocator.cc
+++ b/deps/v8/src/codegen/aligned-slot-allocator.cc
@@ -66,7 +66,6 @@ int AlignedSlotAllocator::Allocate(int n) {
}
default:
UNREACHABLE();
- break;
}
DCHECK(IsValid(result));
size_ = std::max(size_, result + n);
diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc
index 09c57928ff..ec2588364c 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/assembler-arm.cc
@@ -2677,7 +2677,7 @@ void Assembler::vstm(BlockAddrMode am, Register base, SwVfpRegister first,
0xA * B8 | count);
}
-static void DoubleAsTwoUInt32(Double d, uint32_t* lo, uint32_t* hi) {
+static void DoubleAsTwoUInt32(base::Double d, uint32_t* lo, uint32_t* hi) {
uint64_t i = d.AsUint64();
*lo = i & 0xFFFFFFFF;
@@ -2750,7 +2750,7 @@ void Assembler::vmov(const QwNeonRegister dst, uint64_t imm) {
// Only works for little endian floating point formats.
// We don't support VFP on the mixed endian floating point platform.
-static bool FitsVmovFPImmediate(Double d, uint32_t* encoding) {
+static bool FitsVmovFPImmediate(base::Double d, uint32_t* encoding) {
// VMOV can accept an immediate of the form:
//
// +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
@@ -2799,7 +2799,7 @@ static bool FitsVmovFPImmediate(Double d, uint32_t* encoding) {
void Assembler::vmov(const SwVfpRegister dst, Float32 imm) {
uint32_t enc;
if (CpuFeatures::IsSupported(VFPv3) &&
- FitsVmovFPImmediate(Double(imm.get_scalar()), &enc)) {
+ FitsVmovFPImmediate(base::Double(imm.get_scalar()), &enc)) {
CpuFeatureScope scope(this, VFPv3);
// The float can be encoded in the instruction.
//
@@ -2818,7 +2818,7 @@ void Assembler::vmov(const SwVfpRegister dst, Float32 imm) {
}
}
-void Assembler::vmov(const DwVfpRegister dst, Double imm,
+void Assembler::vmov(const DwVfpRegister dst, base::Double imm,
const Register extra_scratch) {
DCHECK(VfpRegisterIsAvailable(dst));
uint32_t enc;
@@ -4046,6 +4046,8 @@ enum UnaryOp {
VTRN,
VRECPE,
VRSQRTE,
+ VPADAL_S,
+ VPADAL_U,
VPADDL_S,
VPADDL_U,
VCEQ0,
@@ -4119,6 +4121,12 @@ static Instr EncodeNeonUnaryOp(UnaryOp op, NeonRegType reg_type, NeonSize size,
// Only support floating point.
op_encoding = 0x3 * B16 | 0xB * B7;
break;
+ case VPADAL_S:
+ op_encoding = 0xC * B7;
+ break;
+ case VPADAL_U:
+ op_encoding = 0xD * B7;
+ break;
case VPADDL_S:
op_encoding = 0x4 * B7;
break;
@@ -5016,6 +5024,14 @@ void Assembler::vtrn(NeonSize size, QwNeonRegister src1, QwNeonRegister src2) {
emit(EncodeNeonUnaryOp(VTRN, NEON_Q, size, src1.code(), src2.code()));
}
+void Assembler::vpadal(NeonDataType dt, QwNeonRegister dst,
+ QwNeonRegister src) {
+ DCHECK(IsEnabled(NEON));
+ // vpadal.<dt>(Qd, Qm) SIMD Vector Pairwise Add and Accumulate Long
+ emit(EncodeNeonUnaryOp(NeonU(dt) ? VPADAL_U : VPADAL_S, NEON_Q,
+ NeonDataTypeToSize(dt), dst.code(), src.code()));
+}
+
void Assembler::vpaddl(NeonDataType dt, QwNeonRegister dst,
QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
@@ -5159,6 +5175,8 @@ void Assembler::RecordConstPool(int size) {
void Assembler::GrowBuffer() {
DCHECK_EQ(buffer_start_, buffer_->start());
+ bool previously_on_heap = buffer_->IsOnHeap();
+
// Compute new buffer size.
int old_size = buffer_->size();
int new_size = std::min(2 * old_size, old_size + 1 * MB);
@@ -5191,6 +5209,14 @@ void Assembler::GrowBuffer() {
reinterpret_cast<Address>(reloc_info_writer.last_pc()) + pc_delta);
reloc_info_writer.Reposition(new_reloc_start, new_last_pc);
+ // Patch on-heap references to handles.
+ if (previously_on_heap && !buffer_->IsOnHeap()) {
+ Address base = reinterpret_cast<Address>(buffer_->start());
+ for (auto p : saved_handles_for_raw_object_ptr_) {
+ WriteUnalignedValue(base + p.first, p.second);
+ }
+ }
+
// None of our relocation types are pc relative pointing outside the code
// buffer nor pc absolute pointing inside the code buffer, so there is no need
// to relocate any emitted relocation entries.
@@ -5357,9 +5383,9 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
while (buffer_space() <= needed_space) GrowBuffer();
{
+ ASM_CODE_COMMENT_STRING(this, "Constant Pool");
// Block recursive calls to CheckConstPool.
BlockConstPoolScope block_const_pool(this);
- RecordComment("[ Constant Pool");
RecordConstPool(size);
Label size_check;
@@ -5384,6 +5410,13 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
CHECK_LE(pc_offset(),
first_const_pool_32_use_ + kMaxDistToPcRelativeConstant);
+ // Check that the code buffer is large enough before emitting the constant
+ // pool (this includes the gap to the relocation information).
+ int needed_space = pending_32_bit_constants_.size() * kPointerSize + kGap;
+ while (buffer_space() <= needed_space) {
+ GrowBuffer();
+ }
+
// Emit 32-bit constant pool entries.
for (size_t i = 0; i < pending_32_bit_constants_.size(); i++) {
ConstantPoolEntry& entry = pending_32_bit_constants_[i];
@@ -5416,7 +5449,17 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
instr_at_put(entry.position(),
SetLdrRegisterImmediateOffset(instr, delta));
if (!entry.is_merged()) {
- emit(entry.value());
+ if (IsOnHeap() && RelocInfo::IsEmbeddedObjectMode(entry.rmode())) {
+ saved_handles_for_raw_object_ptr_.push_back(
+ std::make_pair(pc_offset(), entry.value()));
+ Handle<HeapObject> handle(reinterpret_cast<Address*>(entry.value()));
+ emit(handle->ptr());
+ // We must ensure that `emit` is not growing the assembler buffer
+ // and falling back to off-heap compilation.
+ DCHECK(IsOnHeap());
+ } else {
+ emit(entry.value());
+ }
}
}
@@ -5424,8 +5467,6 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
first_const_pool_32_use_ = -1;
- RecordComment("]");
-
DCHECK_EQ(size, SizeOfCodeGeneratedSince(&size_check));
if (after_pool.is_linked()) {
diff --git a/deps/v8/src/codegen/arm/assembler-arm.h b/deps/v8/src/codegen/arm/assembler-arm.h
index 04d5eef054..d96c761910 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.h
+++ b/deps/v8/src/codegen/arm/assembler-arm.h
@@ -45,13 +45,13 @@
#include <memory>
#include <vector>
+#include "src/base/numbers/double.h"
#include "src/base/small-vector.h"
#include "src/codegen/arm/constants-arm.h"
#include "src/codegen/arm/register-arm.h"
#include "src/codegen/assembler.h"
#include "src/codegen/constant-pool.h"
#include "src/codegen/machine-type.h"
-#include "src/numbers/double.h"
#include "src/utils/boxed-float.h"
namespace v8 {
@@ -715,7 +715,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
SwVfpRegister last, Condition cond = al);
void vmov(const SwVfpRegister dst, Float32 imm);
- void vmov(const DwVfpRegister dst, Double imm,
+ void vmov(const DwVfpRegister dst, base::Double imm,
const Register extra_scratch = no_reg);
void vmov(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond = al);
@@ -923,6 +923,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vpmax(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2);
+ void vpadal(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src);
void vpaddl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src);
void vqrdmulh(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2);
diff --git a/deps/v8/src/codegen/arm/cpu-arm.cc b/deps/v8/src/codegen/arm/cpu-arm.cc
index 88491c5e51..6fc1dd9659 100644
--- a/deps/v8/src/codegen/arm/cpu-arm.cc
+++ b/deps/v8/src/codegen/arm/cpu-arm.cc
@@ -10,6 +10,8 @@
#elif V8_OS_FREEBSD
#include <machine/sysarch.h> // for cache flushing
#include <sys/types.h>
+#elif V8_OS_STARBOARD
+#define __ARM_NR_cacheflush 0x0f0002
#else
#include <sys/syscall.h> // for cache flushing.
#endif
diff --git a/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h b/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h
index 296f72d157..83d82fe3ce 100644
--- a/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h
+++ b/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h
@@ -19,19 +19,38 @@ constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
return registers;
}
+#if DEBUG
+template <typename DerivedDescriptor>
+void StaticCallInterfaceDescriptor<DerivedDescriptor>::
+ VerifyArgumentRegisterCount(CallInterfaceDescriptorData* data, int argc) {
+ RegList allocatable_regs = data->allocatable_registers();
+ if (argc >= 1) DCHECK(allocatable_regs | r0.bit());
+ if (argc >= 2) DCHECK(allocatable_regs | r1.bit());
+ if (argc >= 3) DCHECK(allocatable_regs | r2.bit());
+ if (argc >= 4) DCHECK(allocatable_regs | r3.bit());
+ if (argc >= 5) DCHECK(allocatable_regs | r4.bit());
+ if (argc >= 6) DCHECK(allocatable_regs | r5.bit());
+ if (argc >= 7) DCHECK(allocatable_regs | r6.bit());
+ if (argc >= 8) DCHECK(allocatable_regs | r7.bit());
+ // Additional arguments are passed on the stack.
+}
+#endif // DEBUG
+
// static
-constexpr auto RecordWriteDescriptor::registers() {
- return RegisterArray(r0, r1, r2, r3, r4, kReturnRegister0);
+constexpr auto WriteBarrierDescriptor::registers() {
+ return RegisterArray(r1, r5, r4, r2, r0);
}
// static
constexpr auto DynamicCheckMapsDescriptor::registers() {
+ STATIC_ASSERT(kReturnRegister0 == r0);
return RegisterArray(r0, r1, r2, r3, cp);
}
// static
-constexpr auto EphemeronKeyBarrierDescriptor::registers() {
- return RegisterArray(r0, r1, r2, r3, r4, kReturnRegister0);
+constexpr auto DynamicCheckMapsWithFeedbackVectorDescriptor::registers() {
+ STATIC_ASSERT(kReturnRegister0 == r0);
+ return RegisterArray(r0, r1, r2, r3, cp);
}
// static
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
index d4e12f3092..49cb9d292c 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -8,6 +8,7 @@
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
+#include "src/base/numbers/double.h"
#include "src/base/utils/random-number-generator.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/callable.h"
@@ -21,10 +22,8 @@
#include "src/heap/memory-chunk.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
-#include "src/numbers/double.h"
#include "src/objects/objects-inl.h"
#include "src/runtime/runtime.h"
-#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
#if V8_ENABLE_WEBASSEMBLY
@@ -69,6 +68,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
+ ASM_CODE_COMMENT(this);
int bytes = 0;
RegList exclusions = 0;
if (exclusion1 != no_reg) {
@@ -96,6 +96,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
+ ASM_CODE_COMMENT(this);
int bytes = 0;
if (fp_mode == SaveFPRegsMode::kSave) {
RestoreFPRegs(sp, lr);
@@ -166,9 +167,9 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
DCHECK_IMPLIES(options().use_pc_relative_calls_and_jumps,
Builtins::IsIsolateIndependentBuiltin(*code));
- int builtin_index = Builtins::kNoBuiltinId;
+ Builtin builtin = Builtin::kNoBuiltinId;
bool target_is_builtin =
- isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin);
if (options().use_pc_relative_calls_and_jumps && target_is_builtin) {
int32_t code_target_index = AddCodeTarget(code);
@@ -180,19 +181,16 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
// size s.t. pc-relative calls may be used.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- int offset = IsolateData::builtin_entry_slot_offset(
- static_cast<Builtins::Name>(code->builtin_index()));
+ int offset = IsolateData::builtin_entry_slot_offset(code->builtin_id());
ldr(scratch, MemOperand(kRootRegister, offset));
Jump(scratch, cond);
return;
} else if (options().inline_offheap_trampolines && target_is_builtin) {
// Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ RecordCommentForOffHeapTrampoline(builtin);
// Use ip directly instead of using UseScratchRegisterScope, as we do not
// preserve scratch registers across calls.
- mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
Jump(ip, cond);
return;
}
@@ -258,9 +256,9 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
DCHECK_IMPLIES(options().use_pc_relative_calls_and_jumps,
Builtins::IsIsolateIndependentBuiltin(*code));
- int builtin_index = Builtins::kNoBuiltinId;
+ Builtin builtin = Builtin::kNoBuiltinId;
bool target_is_builtin =
- isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin);
if (target_is_builtin && options().use_pc_relative_calls_and_jumps) {
int32_t code_target_index = AddCodeTarget(code);
@@ -270,14 +268,13 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
// This branch is taken only for specific cctests, where we force isolate
// creation at runtime. At this point, Code space isn't restricted to a
// size s.t. pc-relative calls may be used.
- int offset = IsolateData::builtin_entry_slot_offset(
- static_cast<Builtins::Name>(code->builtin_index()));
+ int offset = IsolateData::builtin_entry_slot_offset(code->builtin_id());
ldr(ip, MemOperand(kRootRegister, offset));
Call(ip, cond);
return;
} else if (target_is_builtin && options().inline_offheap_trampolines) {
// Inline the trampoline.
- CallBuiltin(builtin_index);
+ CallBuiltin(builtin);
return;
}
@@ -287,6 +284,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
}
void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
+ ASM_CODE_COMMENT(this);
STATIC_ASSERT(kSystemPointerSize == 4);
STATIC_ASSERT(kSmiShiftSize == 0);
STATIC_ASSERT(kSmiTagSize == 1);
@@ -306,31 +304,31 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
Call(builtin_index);
}
-void TurboAssembler::LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
- Register destination) {
- ldr(destination, EntryFromBuiltinIndexAsOperand(builtin_index));
+void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin,
+ Register destination) {
+ ASM_CODE_COMMENT(this);
+ ldr(destination, EntryFromBuiltinAsOperand(builtin));
}
-MemOperand TurboAssembler::EntryFromBuiltinIndexAsOperand(
- Builtins::Name builtin_index) {
+MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
+ ASM_CODE_COMMENT(this);
DCHECK(root_array_available());
return MemOperand(kRootRegister,
- IsolateData::builtin_entry_slot_offset(builtin_index));
+ IsolateData::builtin_entry_slot_offset(builtin));
}
-void TurboAssembler::CallBuiltin(int builtin_index, Condition cond) {
- DCHECK(Builtins::IsBuiltinId(builtin_index));
- RecordCommentForOffHeapTrampoline(builtin_index);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
+void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) {
+ ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
+ DCHECK(Builtins::IsBuiltinId(builtin));
// Use ip directly instead of using UseScratchRegisterScope, as we do not
// preserve scratch registers across calls.
- mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
Call(ip, cond);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
+ ASM_CODE_COMMENT(this);
// Code objects are called differently depending on whether we are generating
// builtin code (which will later be embedded into the binary) or compiling
// user JS code at runtime.
@@ -378,17 +376,20 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
}
void TurboAssembler::CallCodeObject(Register code_object) {
+ ASM_CODE_COMMENT(this);
LoadCodeObjectEntry(code_object, code_object);
Call(code_object);
}
void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+ ASM_CODE_COMMENT(this);
DCHECK_EQ(JumpMode::kJump, jump_mode);
LoadCodeObjectEntry(code_object, code_object);
Jump(code_object);
}
void TurboAssembler::StoreReturnAddressAndCall(Register target) {
+ ASM_CODE_COMMENT(this);
// This generates the final instruction sequence for calls to C functions
// once an exit frame has been constructed.
//
@@ -440,6 +441,7 @@ void TurboAssembler::Push(Smi smi) {
void TurboAssembler::PushArray(Register array, Register size, Register scratch,
PushArrayOrder order) {
+ ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register counter = scratch;
Register tmp = temps.Acquire();
@@ -656,6 +658,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
+ ASM_CODE_COMMENT(this);
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
@@ -670,9 +673,11 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
DCHECK(IsAligned(offset, kPointerSize));
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT_STRING(this, "Verify slot_address");
Label ok;
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
+ DCHECK(!AreAliased(object, value, scratch));
add(scratch, object, Operand(offset - kHeapObjectTag));
tst(scratch, Operand(kPointerSize - 1));
b(eq, &ok);
@@ -686,20 +691,21 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
bind(&done);
}
-void TurboAssembler::SaveRegisters(RegList registers) {
- DCHECK_GT(NumRegs(registers), 0);
+void TurboAssembler::MaybeSaveRegisters(RegList registers) {
+ if (registers == 0) return;
+ ASM_CODE_COMMENT(this);
RegList regs = 0;
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
regs |= Register::from_code(i).bit();
}
}
-
stm(db_w, sp, regs);
}
-void TurboAssembler::RestoreRegisters(RegList registers) {
- DCHECK_GT(NumRegs(registers), 0);
+void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
+ if (registers == 0) return;
+ ASM_CODE_COMMENT(this);
RegList regs = 0;
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
@@ -711,78 +717,64 @@ void TurboAssembler::RestoreRegisters(RegList registers) {
void TurboAssembler::CallEphemeronKeyBarrier(Register object, Operand offset,
SaveFPRegsMode fp_mode) {
- EphemeronKeyBarrierDescriptor descriptor;
- RegList registers = descriptor.allocatable_registers();
-
- SaveRegisters(registers);
+ ASM_CODE_COMMENT(this);
+ RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object);
+ MaybeSaveRegisters(registers);
- Register object_parameter(
- descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kObject));
- Register slot_parameter(descriptor.GetRegisterParameter(
- EphemeronKeyBarrierDescriptor::kSlotAddress));
- Register fp_mode_parameter(
- descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kFPMode));
+ Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
+ Register slot_address_parameter =
+ WriteBarrierDescriptor::SlotAddressRegister();
+ MoveObjectAndSlot(object_parameter, slot_address_parameter, object, offset);
- MoveObjectAndSlot(object_parameter, slot_parameter, object, offset);
- Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
- Call(isolate()->builtins()->builtin_handle(Builtins::kEphemeronKeyBarrier),
+ Call(isolate()->builtins()->code_handle(
+ Builtins::GetEphemeronKeyBarrierStub(fp_mode)),
RelocInfo::CODE_TARGET);
- RestoreRegisters(registers);
+ MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStub(
+void TurboAssembler::CallRecordWriteStubSaveRegisters(
Register object, Operand offset, RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode) {
- CallRecordWriteStub(object, offset, remembered_set_action, fp_mode,
- Builtins::kRecordWrite, kNullAddress);
-}
+ SaveFPRegsMode fp_mode, StubCallMode mode) {
+ ASM_CODE_COMMENT(this);
+ RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object);
+ MaybeSaveRegisters(registers);
-void TurboAssembler::CallRecordWriteStub(
- Register object, Operand offset, RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode, Address wasm_target) {
- CallRecordWriteStub(object, offset, remembered_set_action, fp_mode,
- Builtins::kNoBuiltinId, wasm_target);
+ Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
+ Register slot_address_parameter =
+ WriteBarrierDescriptor::SlotAddressRegister();
+ MoveObjectAndSlot(object_parameter, slot_address_parameter, object, offset);
+
+ CallRecordWriteStub(object_parameter, slot_address_parameter,
+ remembered_set_action, fp_mode, mode);
+
+ MaybeRestoreRegisters(registers);
}
void TurboAssembler::CallRecordWriteStub(
- Register object, Operand offset, RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode, int builtin_index, Address wasm_target) {
- DCHECK_NE(builtin_index == Builtins::kNoBuiltinId,
- wasm_target == kNullAddress);
- // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
- // i.e. always emit remember set and save FP registers in RecordWriteStub. If
- // large performance regression is observed, we should use these values to
- // avoid unnecessary work.
-
- RecordWriteDescriptor descriptor;
- RegList registers = descriptor.allocatable_registers();
-
- SaveRegisters(registers);
-
- Register object_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
- Register slot_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
- Register remembered_set_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
- Register fp_mode_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
-
- MoveObjectAndSlot(object_parameter, slot_parameter, object, offset);
-
- Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
- Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
- if (builtin_index == Builtins::kNoBuiltinId) {
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode) {
+ ASM_CODE_COMMENT(this);
+ DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object);
+ DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address);
+#if V8_ENABLE_WEBASSEMBLY
+ if (mode == StubCallMode::kCallWasmRuntimeStub) {
+ auto wasm_target =
+ wasm::WasmCode::GetRecordWriteStub(remembered_set_action, fp_mode);
Call(wasm_target, RelocInfo::WASM_STUB_CALL);
- } else if (options().inline_offheap_trampolines) {
- CallBuiltin(builtin_index);
+#else
+ if (false) {
+#endif
} else {
- Handle<Code> code_target =
- isolate()->builtins()->builtin_handle(Builtins::kRecordWrite);
- Call(code_target, RelocInfo::CODE_TARGET);
+ Builtin builtin =
+ Builtins::GetRecordWriteStub(remembered_set_action, fp_mode);
+ if (options().inline_offheap_trampolines) {
+ CallBuiltin(builtin);
+ } else {
+ Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
+ Call(code_target, RelocInfo::CODE_TARGET);
+ }
}
-
- RestoreRegisters(registers);
}
void TurboAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot,
@@ -826,15 +818,15 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
- DCHECK_NE(object, value);
+ DCHECK(!AreAliased(object, value));
if (FLAG_debug_code) {
- {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- add(scratch, object, offset);
- ldr(scratch, MemOperand(scratch));
- cmp(scratch, value);
- }
+ ASM_CODE_COMMENT_STRING(this, "Verify slot_address");
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(!AreAliased(object, value, scratch));
+ add(scratch, object, offset);
+ ldr(scratch, MemOperand(scratch));
+ cmp(scratch, value);
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
@@ -861,15 +853,23 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
if (lr_status == kLRHasNotBeenSaved) {
push(lr);
}
- CallRecordWriteStub(object, offset, remembered_set_action, fp_mode);
+
+ Register slot_address = WriteBarrierDescriptor::SlotAddressRegister();
+ DCHECK(!AreAliased(object, value, slot_address));
+ DCHECK(!offset.IsRegister());
+ add(slot_address, object, offset);
+ CallRecordWriteStub(object, slot_address, remembered_set_action, fp_mode);
if (lr_status == kLRHasNotBeenSaved) {
pop(lr);
}
+ if (FLAG_debug_code) Move(slot_address, Operand(kZapValue));
+
bind(&done);
}
void TurboAssembler::PushCommonFrame(Register marker_reg) {
+ ASM_CODE_COMMENT(this);
if (marker_reg.is_valid()) {
if (marker_reg.code() > fp.code()) {
stm(db_w, sp, fp.bit() | lr.bit());
@@ -886,6 +886,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) {
}
void TurboAssembler::PushStandardFrame(Register function_reg) {
+ ASM_CODE_COMMENT(this);
DCHECK(!function_reg.is_valid() || function_reg.code() < cp.code());
stm(db_w, sp,
(function_reg.is_valid() ? function_reg.bit() : 0) | cp.bit() | fp.bit() |
@@ -1348,6 +1349,7 @@ void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
}
void TurboAssembler::StubPrologue(StackFrame::Type type) {
+ ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
mov(scratch, Operand(StackFrame::TypeToMarker(type)));
@@ -1358,6 +1360,7 @@ void TurboAssembler::Prologue() { PushStandardFrame(r1); }
void TurboAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
+ ASM_CODE_COMMENT(this);
// r0-r3: preserved
UseScratchRegisterScope temps(this);
Register scratch = no_reg;
@@ -1369,6 +1372,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type,
}
int TurboAssembler::LeaveFrame(StackFrame::Type type) {
+ ASM_CODE_COMMENT(this);
// r0: preserved
// r1: preserved
// r2: preserved
@@ -1386,6 +1390,7 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
// "Functions that allocate 4 KB or more on the stack must ensure that each
// page prior to the final page is touched in order." Source:
// https://docs.microsoft.com/en-us/cpp/build/overview-of-arm-abi-conventions?view=vs-2019#stack
+ ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
DwVfpRegister scratch = temps.AcquireD();
Label check_offset;
@@ -1405,6 +1410,7 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
}
void TurboAssembler::AllocateStackSpace(int bytes) {
+ ASM_CODE_COMMENT(this);
DCHECK_GE(bytes, 0);
UseScratchRegisterScope temps(this);
DwVfpRegister scratch = no_dreg;
@@ -1423,6 +1429,7 @@ void TurboAssembler::AllocateStackSpace(int bytes) {
void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StackFrame::Type frame_type) {
+ ASM_CODE_COMMENT(this);
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
UseScratchRegisterScope temps(this);
@@ -1491,6 +1498,7 @@ int TurboAssembler::ActivationFrameAlignment() {
void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
bool argument_count_is_length) {
+ ASM_CODE_COMMENT(this);
ConstantPoolUnavailableScope constant_pool_unavailable(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -1548,6 +1556,7 @@ void TurboAssembler::MovFromFloatParameter(DwVfpRegister dst) {
void TurboAssembler::PrepareForTailCall(Register callee_args_count,
Register caller_args_count,
Register scratch0, Register scratch1) {
+ ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(callee_args_count, caller_args_count, scratch0, scratch1));
// Calculate the end of destination area where we will put the arguments
@@ -1593,6 +1602,7 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
}
void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
+ ASM_CODE_COMMENT(this);
DCHECK(root_array_available());
Isolate* isolate = this->isolate();
ExternalReference limit =
@@ -1609,6 +1619,7 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
Label* stack_overflow) {
+ ASM_CODE_COMMENT(this);
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
@@ -1624,6 +1635,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
Label* done, InvokeType type) {
+ ASM_CODE_COMMENT(this);
Label regular_invoke;
// r0: actual arguments count
// r1: function (passed through to callee)
@@ -1691,6 +1703,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count) {
+ ASM_CODE_COMMENT(this);
// Load receiver to pass it later to DebugOnFunctionCall hook.
ldr(r4, ReceiverOperand(actual_parameter_count));
FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -1724,6 +1737,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count,
InvokeType type) {
+ ASM_CODE_COMMENT(this);
// You can't call a function without a valid frame.
DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
DCHECK_EQ(function, r1);
@@ -1777,6 +1791,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
void MacroAssembler::InvokeFunctionWithNewTarget(
Register fun, Register new_target, Register actual_parameter_count,
InvokeType type) {
+ ASM_CODE_COMMENT(this);
// You can't call a function without a valid frame.
DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
@@ -1800,6 +1815,7 @@ void MacroAssembler::InvokeFunction(Register function,
Register expected_parameter_count,
Register actual_parameter_count,
InvokeType type) {
+ ASM_CODE_COMMENT(this);
// You can't call a function without a valid frame.
DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
@@ -1814,6 +1830,7 @@ void MacroAssembler::InvokeFunction(Register function,
}
void MacroAssembler::PushStackHandler() {
+ ASM_CODE_COMMENT(this);
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
@@ -1829,6 +1846,7 @@ void MacroAssembler::PushStackHandler() {
}
void MacroAssembler::PopStackHandler() {
+ ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
@@ -1841,6 +1859,7 @@ void MacroAssembler::PopStackHandler() {
void MacroAssembler::CompareObjectType(Register object, Register map,
Register type_reg, InstanceType type) {
+ ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
const Register temp = type_reg == no_reg ? temps.Acquire() : type_reg;
@@ -1857,6 +1876,7 @@ void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
void MacroAssembler::CompareInstanceTypeRange(Register map, Register type_reg,
InstanceType lower_limit,
InstanceType higher_limit) {
+ ASM_CODE_COMMENT(this);
DCHECK_LT(lower_limit, higher_limit);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -1876,6 +1896,7 @@ void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit,
Label* on_in_range) {
+ ASM_CODE_COMMENT(this);
if (lower_limit != 0) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -1890,6 +1911,7 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
DwVfpRegister double_input,
Label* done) {
+ ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
SwVfpRegister single_scratch = SwVfpRegister::no_reg();
if (temps.CanAcquireVfp<SwVfpRegister>()) {
@@ -1916,6 +1938,7 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Register result,
DwVfpRegister double_input,
StubCallMode stub_mode) {
+ ASM_CODE_COMMENT(this);
Label done;
TryInlineTruncateDoubleToI(result, double_input, &done);
@@ -1933,7 +1956,7 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
if (false) {
#endif // V8_ENABLE_WEBASSEMBLY
} else if (options().inline_offheap_trampolines) {
- CallBuiltin(Builtins::kDoubleToI);
+ CallBuiltin(Builtin::kDoubleToI);
} else {
Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
}
@@ -1947,6 +1970,7 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles) {
+ ASM_CODE_COMMENT(this);
// All parameters are on the stack. r0 has the return value after call.
// If the expected number of arguments of the runtime function is
@@ -1966,6 +1990,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
}
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+ ASM_CODE_COMMENT(this);
const Runtime::Function* function = Runtime::FunctionForId(fid);
DCHECK_EQ(1, function->result_size);
if (function->nargs >= 0) {
@@ -2003,10 +2028,12 @@ void MacroAssembler::LoadWeakValue(Register out, Register in,
and_(out, in, Operand(~kWeakHeapObjectMask));
}
-void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
+void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value,
+ Register scratch1,
+ Register scratch2) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
+ ASM_CODE_COMMENT(this);
Move(scratch2, ExternalReference::Create(counter));
ldr(scratch1, MemOperand(scratch2));
add(scratch1, scratch1, Operand(value));
@@ -2014,10 +2041,12 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
}
}
-void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
+void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
+ Register scratch1,
+ Register scratch2) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
+ ASM_CODE_COMMENT(this);
Move(scratch2, ExternalReference::Create(counter));
ldr(scratch1, MemOperand(scratch2));
sub(scratch1, scratch1, Operand(value));
@@ -2042,6 +2071,7 @@ void TurboAssembler::Check(Condition cond, AbortReason reason) {
}
void TurboAssembler::Abort(AbortReason reason) {
+ ASM_CODE_COMMENT(this);
Label abort_start;
bind(&abort_start);
if (FLAG_code_comments) {
@@ -2087,10 +2117,12 @@ void TurboAssembler::LoadMap(Register destination, Register object) {
}
void MacroAssembler::LoadGlobalProxy(Register dst) {
+ ASM_CODE_COMMENT(this);
LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX);
}
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
+ ASM_CODE_COMMENT(this);
LoadMap(dst, cp);
ldr(dst, FieldMemOperand(
dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
@@ -2098,6 +2130,7 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
}
void TurboAssembler::InitializeRootRegister() {
+ ASM_CODE_COMMENT(this);
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
mov(kRootRegister, Operand(isolate_root));
}
@@ -2135,63 +2168,64 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
}
void MacroAssembler::AssertNotSmi(Register object) {
- if (FLAG_debug_code) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(object, Operand(kSmiTagMask));
- Check(ne, AbortReason::kOperandIsASmi);
- }
+ if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Check(ne, AbortReason::kOperandIsASmi);
}
void MacroAssembler::AssertSmi(Register object) {
- if (FLAG_debug_code) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(object, Operand(kSmiTagMask));
- Check(eq, AbortReason::kOperandIsNotASmi);
- }
+ if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Check(eq, AbortReason::kOperandIsNotASmi);
}
void MacroAssembler::AssertConstructor(Register object) {
- if (FLAG_debug_code) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(object, Operand(kSmiTagMask));
- Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor);
- push(object);
- LoadMap(object, object);
- ldrb(object, FieldMemOperand(object, Map::kBitFieldOffset));
- tst(object, Operand(Map::Bits1::IsConstructorBit::kMask));
- pop(object);
- Check(ne, AbortReason::kOperandIsNotAConstructor);
- }
+ if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor);
+ push(object);
+ LoadMap(object, object);
+ ldrb(object, FieldMemOperand(object, Map::kBitFieldOffset));
+ tst(object, Operand(Map::Bits1::IsConstructorBit::kMask));
+ pop(object);
+ Check(ne, AbortReason::kOperandIsNotAConstructor);
}
void MacroAssembler::AssertFunction(Register object) {
- if (FLAG_debug_code) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(object, Operand(kSmiTagMask));
- Check(ne, AbortReason::kOperandIsASmiAndNotAFunction);
- push(object);
- LoadMap(object, object);
- CompareInstanceTypeRange(object, object, FIRST_JS_FUNCTION_TYPE,
- LAST_JS_FUNCTION_TYPE);
- pop(object);
- Check(ls, AbortReason::kOperandIsNotAFunction);
- }
+ if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFunction);
+ push(object);
+ LoadMap(object, object);
+ CompareInstanceTypeRange(object, object, FIRST_JS_FUNCTION_TYPE,
+ LAST_JS_FUNCTION_TYPE);
+ pop(object);
+ Check(ls, AbortReason::kOperandIsNotAFunction);
}
void MacroAssembler::AssertBoundFunction(Register object) {
- if (FLAG_debug_code) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(object, Operand(kSmiTagMask));
- Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction);
- push(object);
- CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
- pop(object);
- Check(eq, AbortReason::kOperandIsNotABoundFunction);
- }
+ if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction);
+ push(object);
+ CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
+ pop(object);
+ Check(eq, AbortReason::kOperandIsNotABoundFunction);
}
void MacroAssembler::AssertGeneratorObject(Register object) {
if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
tst(object, Operand(kSmiTagMask));
Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
@@ -2221,25 +2255,27 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
- if (FLAG_debug_code) {
- Label done_checking;
- AssertNotSmi(object);
- CompareRoot(object, RootIndex::kUndefinedValue);
- b(eq, &done_checking);
- LoadMap(scratch, object);
- CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
- Assert(eq, AbortReason::kExpectedUndefinedOrCell);
- bind(&done_checking);
- }
+ if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
+ Label done_checking;
+ AssertNotSmi(object);
+ CompareRoot(object, RootIndex::kUndefinedValue);
+ b(eq, &done_checking);
+ LoadMap(scratch, object);
+ CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
+ Assert(eq, AbortReason::kExpectedUndefinedOrCell);
+ bind(&done_checking);
}
void TurboAssembler::CheckFor32DRegs(Register scratch) {
+ ASM_CODE_COMMENT(this);
Move(scratch, ExternalReference::cpu_features());
ldr(scratch, MemOperand(scratch));
tst(scratch, Operand(1u << VFP32DREGS));
}
void TurboAssembler::SaveFPRegs(Register location, Register scratch) {
+ ASM_CODE_COMMENT(this);
CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
CheckFor32DRegs(scratch);
vstm(db_w, location, d16, d31, ne);
@@ -2248,6 +2284,7 @@ void TurboAssembler::SaveFPRegs(Register location, Register scratch) {
}
void TurboAssembler::RestoreFPRegs(Register location, Register scratch) {
+ ASM_CODE_COMMENT(this);
CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
CheckFor32DRegs(scratch);
vldm(ia_w, location, d0, d15);
@@ -2256,6 +2293,7 @@ void TurboAssembler::RestoreFPRegs(Register location, Register scratch) {
}
void TurboAssembler::SaveFPRegsToHeap(Register location, Register scratch) {
+ ASM_CODE_COMMENT(this);
CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
CheckFor32DRegs(scratch);
vstm(ia_w, location, d0, d15);
@@ -2265,6 +2303,7 @@ void TurboAssembler::SaveFPRegsToHeap(Register location, Register scratch) {
void TurboAssembler::RestoreFPRegsFromHeap(Register location,
Register scratch) {
+ ASM_CODE_COMMENT(this);
CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
CheckFor32DRegs(scratch);
vldm(ia_w, location, d0, d15);
@@ -2440,6 +2479,7 @@ int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
+ ASM_CODE_COMMENT(this);
int frame_alignment = ActivationFrameAlignment();
int stack_passed_arguments =
CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
@@ -2506,6 +2546,7 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
void TurboAssembler::CallCFunctionHelper(Register function,
int num_reg_arguments,
int num_double_arguments) {
+ ASM_CODE_COMMENT(this);
DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
DCHECK(has_frame());
// Make sure that the stack is aligned before calling a C function unless
@@ -2516,6 +2557,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
+ ASM_CODE_COMMENT_STRING(this, "Check stack alignment");
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
Label alignment_as_expected;
tst(sp, Operand(frame_alignment_mask));
@@ -2586,8 +2628,10 @@ void TurboAssembler::CallCFunctionHelper(Register function,
void TurboAssembler::CheckPageFlag(Register object, int mask, Condition cc,
Label* condition_met) {
+ ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
+ DCHECK(!AreAliased(object, scratch));
DCHECK(cc == eq || cc == ne);
Bfc(scratch, object, 0, kPageSizeBits);
ldr(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
@@ -2617,6 +2661,7 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
}
void TurboAssembler::ComputeCodeStartAddress(Register dst) {
+ ASM_CODE_COMMENT(this);
// We can use the register pc - 8 for the address of the current instruction.
sub(dst, pc, Operand(pc_offset() + Instruction::kPcLoadDelta));
}
@@ -2625,9 +2670,10 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
mov(kSpeculationPoisonRegister, Operand(-1));
}
-void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
- Label* exit, DeoptimizeKind kind,
- Label* ret, Label*) {
+void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
+ DeoptimizeKind kind, Label* ret,
+ Label*) {
+ ASM_CODE_COMMENT(this);
BlockConstPoolScope block_const_pool(this);
ldr(ip, MemOperand(kRootRegister,
IsolateData::builtin_entry_slot_offset(target)));
@@ -2679,18 +2725,21 @@ void TurboAssembler::I64x2Ne(QwNeonRegister dst, QwNeonRegister src1,
void TurboAssembler::I64x2GtS(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
+ ASM_CODE_COMMENT(this);
vqsub(NeonS64, dst, src2, src1);
vshr(NeonS64, dst, dst, 63);
}
void TurboAssembler::I64x2GeS(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
+ ASM_CODE_COMMENT(this);
vqsub(NeonS64, dst, src1, src2);
vshr(NeonS64, dst, dst, 63);
vmvn(dst, dst);
}
void TurboAssembler::I64x2AllTrue(Register dst, QwNeonRegister src) {
+ ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
QwNeonRegister tmp = temps.AcquireQ();
// src = | a | b | c | d |
@@ -2714,6 +2763,7 @@ void TurboAssembler::I64x2AllTrue(Register dst, QwNeonRegister src) {
}
void TurboAssembler::I64x2Abs(QwNeonRegister dst, QwNeonRegister src) {
+ ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Simd128Register tmp = temps.AcquireQ();
vshr(NeonS64, tmp, src, 63);
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h
index 3a54f6c45f..d6671fff3f 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.h
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h
@@ -265,8 +265,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void MovFromFloatParameter(DwVfpRegister dst);
void MovFromFloatResult(DwVfpRegister dst);
- void Trap() override;
- void DebugBreak() override;
+ void Trap();
+ void DebugBreak();
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug-code to enable.
@@ -295,10 +295,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void AsrPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift);
- void LoadFromConstantsTable(Register destination,
- int constant_index) override;
- void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
- void LoadRootRelative(Register destination, int32_t offset) override;
+ void LoadFromConstantsTable(Register destination, int constant_index) final;
+ void LoadRootRegisterOffset(Register destination, intptr_t offset) final;
+ void LoadRootRelative(Register destination, int32_t offset) final;
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Call(Register target, Condition cond = al);
@@ -311,26 +310,25 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
bool check_constant_pool = true);
void Call(Label* target);
- MemOperand EntryFromBuiltinIndexAsOperand(Builtins::Name builtin_index);
- void LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
- Register destination);
- // Load the builtin given by the Smi in |builtin_index| into the same
+ MemOperand EntryFromBuiltinAsOperand(Builtin builtin);
+ void LoadEntryFromBuiltin(Builtin builtin, Register destination);
+ // Load the builtin given by the Smi in |builtin| into the same
// register.
void LoadEntryFromBuiltinIndex(Register builtin_index);
- void CallBuiltinByIndex(Register builtin_index) override;
- void CallBuiltin(int builtin_index, Condition cond = al);
+ void CallBuiltinByIndex(Register builtin_index);
+ void CallBuiltin(Builtin builtin, Condition cond = al);
- void LoadCodeObjectEntry(Register destination, Register code_object) override;
- void CallCodeObject(Register code_object) override;
+ void LoadCodeObjectEntry(Register destination, Register code_object);
+ void CallCodeObject(Register code_object);
void JumpCodeObject(Register code_object,
- JumpMode jump_mode = JumpMode::kJump) override;
+ JumpMode jump_mode = JumpMode::kJump);
// Generates an instruction sequence s.t. the return address points to the
// instruction following the call.
// The return address on the stack is used by frame iteration.
void StoreReturnAddressAndCall(Register target);
- void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
+ void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
DeoptimizeKind kind, Label* ret,
Label* jump_deoptimization_entry_label);
@@ -374,18 +372,21 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
void CheckFor32DRegs(Register scratch);
- void SaveRegisters(RegList registers);
- void RestoreRegisters(RegList registers);
+ void MaybeSaveRegisters(RegList registers);
+ void MaybeRestoreRegisters(RegList registers);
- void CallRecordWriteStub(Register object, Operand offset,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode);
- void CallRecordWriteStub(Register object, Operand offset,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode, Address wasm_target);
void CallEphemeronKeyBarrier(Register object, Operand offset,
SaveFPRegsMode fp_mode);
+ void CallRecordWriteStubSaveRegisters(
+ Register object, Operand offset,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode = StubCallMode::kCallBuiltinPointer);
+ void CallRecordWriteStub(
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode = StubCallMode::kCallBuiltinPointer);
+
// For a given |object| and |offset|:
// - Move |object| to |dst_object|.
// - Compute the address of the slot pointed to by |offset| in |object| and
@@ -430,7 +431,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Jump(Register target, Condition cond = al);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
- void Jump(const ExternalReference& reference) override;
+ void Jump(const ExternalReference& reference);
// Perform a floating-point min or max operation with the
// (IEEE-754-compatible) semantics of ARM64's fmin/fmax. Some cases, typically
@@ -519,7 +520,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
// Load an object from the root table.
- void LoadRoot(Register destination, RootIndex index) override {
+ void LoadRoot(Register destination, RootIndex index) final {
LoadRoot(destination, index, al);
}
void LoadRoot(Register destination, RootIndex index, Condition cond);
@@ -622,11 +623,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
-
- void CallRecordWriteStub(Register object, Operand offset,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode, int builtin_index,
- Address wasm_target);
};
// MacroAssembler implements a collection of frequently used macros.
@@ -812,9 +808,19 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// StatsCounter support
void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
- Register scratch2);
+ Register scratch2) {
+ if (!FLAG_native_code_counters) return;
+ EmitIncrementCounter(counter, value, scratch1, scratch2);
+ }
+ void EmitIncrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
- Register scratch2);
+ Register scratch2) {
+ if (!FLAG_native_code_counters) return;
+ EmitDecrementCounter(counter, value, scratch1, scratch2);
+ }
+ void EmitDecrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
// ---------------------------------------------------------------------------
// Stack limit utilities
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc
index 0551877403..bf39a2e416 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc
@@ -582,7 +582,7 @@ void Assembler::bind(Label* label) {
// Internal references do not get patched to an instruction but directly
// to an address.
internal_reference_positions_.push_back(linkoffset);
- base::Memcpy(link, &pc_, kSystemPointerSize);
+ memcpy(link, &pc_, kSystemPointerSize);
} else {
link->SetImmPCOffsetTarget(options(),
reinterpret_cast<Instruction*>(pc_));
@@ -4276,6 +4276,8 @@ bool Assembler::IsImmFP64(double imm) {
}
void Assembler::GrowBuffer() {
+ bool previously_on_heap = buffer_->IsOnHeap();
+
// Compute new buffer size.
int old_size = buffer_->size();
int new_size = std::min(2 * old_size, old_size + 1 * MB);
@@ -4318,6 +4320,21 @@ void Assembler::GrowBuffer() {
WriteUnalignedValue<intptr_t>(address, internal_ref);
}
+ // Patch on-heap references to handles.
+ if (previously_on_heap && !buffer_->IsOnHeap()) {
+ Address base = reinterpret_cast<Address>(buffer_->start());
+ for (auto p : saved_handles_for_raw_object_ptr_) {
+ WriteUnalignedValue(base + p.first, p.second);
+ }
+ for (auto p : saved_offsets_for_runtime_entries_) {
+ Instruction* instr = reinterpret_cast<Instruction*>(base + p.first);
+ DCHECK(is_int26(p.second));
+ DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
+ instr->SetInstructionBits(instr->Mask(UnconditionalBranchMask) |
+ p.second);
+ }
+ }
+
// Pending relocation entries are also relative, no need to relocate.
}
@@ -4493,8 +4510,8 @@ void Assembler::RecordVeneerPool(int location_offset, int size) {
void Assembler::EmitVeneers(bool force_emit, bool need_protection,
size_t margin) {
+ ASM_CODE_COMMENT(this);
BlockPoolsScope scope(this, PoolEmissionCheck::kSkip);
- RecordComment("[ Veneers");
// The exact size of the veneer pool must be recorded (see the comment at the
// declaration site of RecordConstPool()), but computing the number of
@@ -4587,8 +4604,6 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection,
RecordVeneerPool(veneer_pool_relocinfo_loc, pool_size);
bind(&end);
-
- RecordComment("]");
}
void Assembler::CheckVeneerPool(bool force_emit, bool require_jump,
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.h b/deps/v8/src/codegen/arm64/assembler-arm64.h
index 9d8b135954..6a0245fcd6 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.h
@@ -2616,7 +2616,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
STATIC_ASSERT(sizeof(instruction) == kInstrSize);
DCHECK_LE(pc_ + sizeof(instruction), buffer_start_ + buffer_->size());
- base::Memcpy(pc_, &instruction, sizeof(instruction));
+ memcpy(pc_, &instruction, sizeof(instruction));
pc_ += sizeof(instruction);
CheckBuffer();
}
@@ -2628,7 +2628,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// TODO(all): Somehow register we have some data here. Then we can
// disassemble it correctly.
- base::Memcpy(pc_, data, size);
+ memcpy(pc_, data, size);
pc_ += size;
CheckBuffer();
}
diff --git a/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h b/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h
index 90123dbdcb..e8fe4ef1d3 100644
--- a/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h
@@ -20,19 +20,37 @@ constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
return registers;
}
+#if DEBUG
+template <typename DerivedDescriptor>
+void StaticCallInterfaceDescriptor<DerivedDescriptor>::
+ VerifyArgumentRegisterCount(CallInterfaceDescriptorData* data, int argc) {
+ RegList allocatable_regs = data->allocatable_registers();
+ if (argc >= 1) DCHECK(allocatable_regs | x0.bit());
+ if (argc >= 2) DCHECK(allocatable_regs | x1.bit());
+ if (argc >= 3) DCHECK(allocatable_regs | x2.bit());
+ if (argc >= 4) DCHECK(allocatable_regs | x3.bit());
+ if (argc >= 5) DCHECK(allocatable_regs | x4.bit());
+ if (argc >= 6) DCHECK(allocatable_regs | x5.bit());
+ if (argc >= 7) DCHECK(allocatable_regs | x6.bit());
+ if (argc >= 8) DCHECK(allocatable_regs | x7.bit());
+}
+#endif // DEBUG
+
// static
-constexpr auto RecordWriteDescriptor::registers() {
- return RegisterArray(x0, x1, x2, x3, x4, kReturnRegister0);
+constexpr auto WriteBarrierDescriptor::registers() {
+ return RegisterArray(x1, x5, x4, x2, x0, x3);
}
// static
constexpr auto DynamicCheckMapsDescriptor::registers() {
+ STATIC_ASSERT(kReturnRegister0 == x0);
return RegisterArray(x0, x1, x2, x3, cp);
}
// static
-constexpr auto EphemeronKeyBarrierDescriptor::registers() {
- return RegisterArray(x0, x1, x2, x3, x4, kReturnRegister0);
+constexpr auto DynamicCheckMapsWithFeedbackVectorDescriptor::registers() {
+ STATIC_ASSERT(kReturnRegister0 == x0);
+ return RegisterArray(x0, x1, x2, x3, cp);
}
// static
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
index 8fb9bbfd7b..8986df823a 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
@@ -1288,7 +1288,7 @@ void TurboAssembler::Claim(int64_t count, uint64_t unit_size) {
return;
}
DCHECK_EQ(size % 16, 0);
-#if V8_OS_WIN
+#if V8_TARGET_OS_WIN
while (size > kStackPageSize) {
Sub(sp, sp, kStackPageSize);
Str(xzr, MemOperand(sp));
@@ -1310,7 +1310,7 @@ void TurboAssembler::Claim(const Register& count, uint64_t unit_size) {
}
AssertPositiveOrZero(count);
-#if V8_OS_WIN
+#if V8_TARGET_OS_WIN
// "Functions that allocate 4k or more worth of stack must ensure that each
// page prior to the final page is touched in order." Source:
// https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions?view=vs-2019#stack
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
index b18ff55455..9dba8800d9 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -22,7 +22,6 @@
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
#include "src/runtime/runtime.h"
-#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
#if V8_ENABLE_WEBASSEMBLY
@@ -45,45 +44,62 @@ CPURegList TurboAssembler::DefaultFPTmpList() {
return CPURegList(fp_scratch1, fp_scratch2);
}
+namespace {
+
+// For WebAssembly we care about the full floating point register. If we are not
+// running Wasm, we can get away with saving half of those registers.
+#if V8_ENABLE_WEBASSEMBLY
+constexpr int kStackSavedSavedFPSizeInBits = kQRegSizeInBits;
+#else
+constexpr int kStackSavedSavedFPSizeInBits = kDRegSizeInBits;
+#endif // V8_ENABLE_WEBASSEMBLY
+
+} // namespace
+
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion) const {
auto list = kCallerSaved;
list.Remove(exclusion);
list.Align();
- int bytes = list.Count() * kXRegSizeInBits / 8;
+ int bytes = list.TotalSizeInBytes();
if (fp_mode == SaveFPRegsMode::kSave) {
- DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
- bytes += kCallerSavedV.Count() * kDRegSizeInBits / 8;
+ auto fp_list = CPURegList::GetCallerSavedV(kStackSavedSavedFPSizeInBits);
+ DCHECK_EQ(fp_list.Count() % 2, 0);
+ bytes += fp_list.TotalSizeInBytes();
}
return bytes;
}
int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion) {
+ ASM_CODE_COMMENT(this);
auto list = kCallerSaved;
list.Remove(exclusion);
list.Align();
PushCPURegList<kDontStoreLR>(list);
- int bytes = list.Count() * kXRegSizeInBits / 8;
+ int bytes = list.TotalSizeInBytes();
if (fp_mode == SaveFPRegsMode::kSave) {
- DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
- PushCPURegList(kCallerSavedV);
- bytes += kCallerSavedV.Count() * kDRegSizeInBits / 8;
+ auto fp_list = CPURegList::GetCallerSavedV(kStackSavedSavedFPSizeInBits);
+ DCHECK_EQ(fp_list.Count() % 2, 0);
+ PushCPURegList(fp_list);
+ bytes += fp_list.TotalSizeInBytes();
}
return bytes;
}
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
+ ASM_CODE_COMMENT(this);
int bytes = 0;
if (fp_mode == SaveFPRegsMode::kSave) {
- DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
- PopCPURegList(kCallerSavedV);
- bytes += kCallerSavedV.Count() * kDRegSizeInBits / 8;
+ auto fp_list = CPURegList::GetCallerSavedV(kStackSavedSavedFPSizeInBits);
+ DCHECK_EQ(fp_list.Count() % 2, 0);
+ PopCPURegList(fp_list);
+ bytes += fp_list.TotalSizeInBytes();
}
auto list = kCallerSaved;
@@ -91,13 +107,14 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
list.Align();
PopCPURegList<kDontLoadLR>(list);
- bytes += list.Count() * kXRegSizeInBits / 8;
+ bytes += list.TotalSizeInBytes();
return bytes;
}
void TurboAssembler::LogicalMacro(const Register& rd, const Register& rn,
const Operand& operand, LogicalOp op) {
+ ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
if (operand.NeedsRelocation(this)) {
@@ -378,7 +395,7 @@ void TurboAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) {
DCHECK(is_uint32(imm));
uint8_t bytes[sizeof(imm)];
- base::Memcpy(bytes, &imm, sizeof(imm));
+ memcpy(bytes, &imm, sizeof(imm));
// All bytes are either 0x00 or 0xFF.
{
@@ -1204,26 +1221,26 @@ void MacroAssembler::PeekPair(const CPURegister& dst1, const CPURegister& dst2,
}
void MacroAssembler::PushCalleeSavedRegisters() {
- {
- // Ensure that the macro-assembler doesn't use any scratch registers.
- InstructionAccurateScope scope(this);
+ ASM_CODE_COMMENT(this);
+ // Ensure that the macro-assembler doesn't use any scratch registers.
+ InstructionAccurateScope scope(this);
- MemOperand tos(sp, -2 * static_cast<int>(kXRegSize), PreIndex);
+ MemOperand tos(sp, -2 * static_cast<int>(kXRegSize), PreIndex);
- stp(d14, d15, tos);
- stp(d12, d13, tos);
- stp(d10, d11, tos);
- stp(d8, d9, tos);
+ stp(d14, d15, tos);
+ stp(d12, d13, tos);
+ stp(d10, d11, tos);
+ stp(d8, d9, tos);
- stp(x27, x28, tos);
- stp(x25, x26, tos);
- stp(x23, x24, tos);
- stp(x21, x22, tos);
- stp(x19, x20, tos);
+ stp(x27, x28, tos);
+ stp(x25, x26, tos);
+ stp(x23, x24, tos);
+ stp(x21, x22, tos);
+ stp(x19, x20, tos);
- STATIC_ASSERT(
- EntryFrameConstants::kCalleeSavedRegisterBytesPushedBeforeFpLrPair ==
- 18 * kSystemPointerSize);
+ STATIC_ASSERT(
+ EntryFrameConstants::kCalleeSavedRegisterBytesPushedBeforeFpLrPair ==
+ 18 * kSystemPointerSize);
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
// Use the stack pointer's value immediately before pushing the LR as the
@@ -1235,22 +1252,22 @@ void MacroAssembler::PushCalleeSavedRegisters() {
STATIC_ASSERT(
EntryFrameConstants::kCalleeSavedRegisterBytesPushedAfterFpLrPair == 0);
- }
}
void MacroAssembler::PopCalleeSavedRegisters() {
- {
- // Ensure that the macro-assembler doesn't use any scratch registers.
- InstructionAccurateScope scope(this);
+ ASM_CODE_COMMENT(this);
+ // Ensure that the macro-assembler doesn't use any scratch registers.
+ InstructionAccurateScope scope(this);
- MemOperand tos(sp, 2 * kXRegSize, PostIndex);
+ MemOperand tos(sp, 2 * kXRegSize, PostIndex);
- ldp(x29, x30, tos); // fp, lr
+ ldp(x29, x30, tos); // fp, lr
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
- // The context (stack pointer value) for authenticating the LR here must
- // match the one used for signing it (see `PushCalleeSavedRegisters`).
- autibsp();
+ // The context (stack pointer value) for authenticating
+ // the LR here must
+ // match the one used for signing it (see `PushCalleeSavedRegisters`).
+ autibsp();
#endif
ldp(x19, x20, tos);
@@ -1263,20 +1280,19 @@ void MacroAssembler::PopCalleeSavedRegisters() {
ldp(d10, d11, tos);
ldp(d12, d13, tos);
ldp(d14, d15, tos);
- }
}
void TurboAssembler::AssertSpAligned() {
- if (FLAG_debug_code) {
- HardAbortScope hard_abort(this); // Avoid calls to Abort.
- // Arm64 requires the stack pointer to be 16-byte aligned prior to address
- // calculation.
- UseScratchRegisterScope scope(this);
- Register temp = scope.AcquireX();
- Mov(temp, sp);
- Tst(temp, 15);
- Check(eq, AbortReason::kUnexpectedStackPointer);
- }
+ if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
+ HardAbortScope hard_abort(this); // Avoid calls to Abort.
+ // Arm64 requires the stack pointer to be 16-byte aligned prior to address
+ // calculation.
+ UseScratchRegisterScope scope(this);
+ Register temp = scope.AcquireX();
+ Mov(temp, sp);
+ Tst(temp, 15);
+ Check(eq, AbortReason::kUnexpectedStackPointer);
}
void TurboAssembler::CopySlots(int dst, Register src, Register slot_count) {
@@ -1298,6 +1314,7 @@ void TurboAssembler::CopySlots(Register dst, Register src,
void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count,
CopyDoubleWordsMode mode) {
+ ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(dst, src, count));
if (FLAG_debug_code) {
@@ -1375,13 +1392,14 @@ void TurboAssembler::SlotAddress(Register dst, Register slot_offset) {
}
void TurboAssembler::AssertFPCRState(Register fpcr) {
- if (FLAG_debug_code) {
- Label unexpected_mode, done;
- UseScratchRegisterScope temps(this);
- if (fpcr.IsNone()) {
- fpcr = temps.AcquireX();
- Mrs(fpcr, FPCR);
- }
+ if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
+ Label unexpected_mode, done;
+ UseScratchRegisterScope temps(this);
+ if (fpcr.IsNone()) {
+ fpcr = temps.AcquireX();
+ Mrs(fpcr, FPCR);
+ }
// Settings left to their default values:
// - Assert that flush-to-zero is not set.
@@ -1395,7 +1413,6 @@ void TurboAssembler::AssertFPCRState(Register fpcr) {
Abort(AbortReason::kUnexpectedFPCRMode);
Bind(&done);
- }
}
void TurboAssembler::CanonicalizeNaN(const VRegister& dst,
@@ -1409,6 +1426,7 @@ void TurboAssembler::CanonicalizeNaN(const VRegister& dst,
}
void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
+ ASM_CODE_COMMENT(this);
// TODO(jbramley): Most root values are constants, and can be synthesized
// without a load. Refer to the ARM back end for details.
Ldr(destination,
@@ -1416,6 +1434,7 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
}
void TurboAssembler::PushRoot(RootIndex index) {
+ ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register tmp = temps.AcquireX();
LoadRoot(tmp, index);
@@ -1474,63 +1493,78 @@ void TurboAssembler::Swap(VRegister lhs, VRegister rhs) {
}
void TurboAssembler::AssertSmi(Register object, AbortReason reason) {
- if (FLAG_debug_code) {
- STATIC_ASSERT(kSmiTag == 0);
- Tst(object, kSmiTagMask);
- Check(eq, reason);
- }
+ if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ Tst(object, kSmiTagMask);
+ Check(eq, reason);
}
void MacroAssembler::AssertNotSmi(Register object, AbortReason reason) {
- if (FLAG_debug_code) {
- STATIC_ASSERT(kSmiTag == 0);
- Tst(object, kSmiTagMask);
- Check(ne, reason);
- }
+ if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ Tst(object, kSmiTagMask);
+ Check(ne, reason);
+}
+
+void MacroAssembler::AssertCodeT(Register object) {
+ if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
+ AssertNotSmi(object, AbortReason::kOperandIsNotACodeT);
+
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ CompareObjectType(
+ object, temp, temp,
+ V8_EXTERNAL_CODE_SPACE_BOOL ? CODE_DATA_CONTAINER_TYPE : CODE_TYPE);
+ Check(eq, AbortReason::kOperandIsNotACodeT);
}
void MacroAssembler::AssertConstructor(Register object) {
- if (FLAG_debug_code) {
- AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAConstructor);
+ if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
+ AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAConstructor);
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
- LoadMap(temp, object);
- Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
- Tst(temp, Operand(Map::Bits1::IsConstructorBit::kMask));
+ LoadMap(temp, object);
+ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+ Tst(temp, Operand(Map::Bits1::IsConstructorBit::kMask));
- Check(ne, AbortReason::kOperandIsNotAConstructor);
- }
+ Check(ne, AbortReason::kOperandIsNotAConstructor);
}
void MacroAssembler::AssertFunction(Register object) {
- if (FLAG_debug_code) {
- AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAFunction);
+ if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
+ AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAFunction);
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
- LoadMap(temp, object);
- CompareInstanceTypeRange(temp, temp, FIRST_JS_FUNCTION_TYPE,
- LAST_JS_FUNCTION_TYPE);
- Check(ls, AbortReason::kOperandIsNotAFunction);
- }
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ LoadMap(temp, object);
+ CompareInstanceTypeRange(temp, temp, FIRST_JS_FUNCTION_TYPE,
+ LAST_JS_FUNCTION_TYPE);
+ Check(ls, AbortReason::kOperandIsNotAFunction);
}
void MacroAssembler::AssertBoundFunction(Register object) {
- if (FLAG_debug_code) {
- AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotABoundFunction);
+ if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
+ AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotABoundFunction);
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
- CompareObjectType(object, temp, temp, JS_BOUND_FUNCTION_TYPE);
- Check(eq, AbortReason::kOperandIsNotABoundFunction);
- }
+ CompareObjectType(object, temp, temp, JS_BOUND_FUNCTION_TYPE);
+ Check(eq, AbortReason::kOperandIsNotABoundFunction);
}
void MacroAssembler::AssertGeneratorObject(Register object) {
if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
// Load map
@@ -1556,31 +1590,32 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
- if (FLAG_debug_code) {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.AcquireX();
- Label done_checking;
- AssertNotSmi(object);
- JumpIfRoot(object, RootIndex::kUndefinedValue, &done_checking);
- LoadMap(scratch, object);
- CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
- Assert(eq, AbortReason::kExpectedUndefinedOrCell);
- Bind(&done_checking);
- }
+ if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX();
+ Label done_checking;
+ AssertNotSmi(object);
+ JumpIfRoot(object, RootIndex::kUndefinedValue, &done_checking);
+ LoadMap(scratch, object);
+ CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
+ Assert(eq, AbortReason::kExpectedUndefinedOrCell);
+ Bind(&done_checking);
}
void TurboAssembler::AssertPositiveOrZero(Register value) {
- if (FLAG_debug_code) {
- Label done;
- int sign_bit = value.Is64Bits() ? kXSignBit : kWSignBit;
- Tbz(value, sign_bit, &done);
- Abort(AbortReason::kUnexpectedNegativeValue);
- Bind(&done);
- }
+ if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
+ Label done;
+ int sign_bit = value.Is64Bits() ? kXSignBit : kWSignBit;
+ Tbz(value, sign_bit, &done);
+ Abort(AbortReason::kUnexpectedNegativeValue);
+ Bind(&done);
}
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles) {
+ ASM_CODE_COMMENT(this);
// All arguments must be on the stack before this function is called.
// x0 holds the return value after the call.
@@ -1599,6 +1634,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
+ ASM_CODE_COMMENT(this);
Mov(x1, builtin);
Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
ArgvMode::kStack, builtin_exit_frame);
@@ -1611,6 +1647,7 @@ void MacroAssembler::JumpToInstructionStream(Address entry) {
}
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+ ASM_CODE_COMMENT(this);
const Runtime::Function* function = Runtime::FunctionForId(fid);
DCHECK_EQ(1, function->result_size);
if (function->nargs >= 0) {
@@ -1647,6 +1684,7 @@ void TurboAssembler::CallCFunction(ExternalReference function,
void TurboAssembler::CallCFunction(ExternalReference function,
int num_of_reg_args,
int num_of_double_args) {
+ ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
Mov(temp, function);
@@ -1657,6 +1695,7 @@ static const int kRegisterPassedArguments = 8;
void TurboAssembler::CallCFunction(Register function, int num_of_reg_args,
int num_of_double_args) {
+ ASM_CODE_COMMENT(this);
DCHECK_LE(num_of_reg_args + num_of_double_args, kMaxCParameters);
DCHECK(has_frame());
@@ -1725,6 +1764,7 @@ void TurboAssembler::CallCFunction(Register function, int num_of_reg_args,
void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
+ ASM_CODE_COMMENT(this);
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
LoadTaggedPointerField(
@@ -1804,7 +1844,13 @@ int64_t TurboAssembler::CalculateTargetOffset(Address target,
void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
Condition cond) {
- JumpHelper(CalculateTargetOffset(target, rmode, pc_), rmode, cond);
+ int64_t offset = CalculateTargetOffset(target, rmode, pc_);
+ if (RelocInfo::IsRuntimeEntry(rmode) && IsOnHeap()) {
+ saved_offsets_for_runtime_entries_.push_back(
+ std::make_pair(pc_offset(), offset));
+ offset = CalculateTargetOffset(target, RelocInfo::NONE, pc_);
+ }
+ JumpHelper(offset, rmode, cond);
}
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
@@ -1814,11 +1860,11 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Builtins::IsIsolateIndependentBuiltin(*code));
if (options().inline_offheap_trampolines) {
- int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index)) {
+ Builtin builtin = Builtin::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code, &builtin)) {
// Inline the trampoline.
CHECK_EQ(cond, Condition::al); // Implement if necessary.
- TailCallBuiltin(builtin_index);
+ TailCallBuiltin(builtin);
return;
}
}
@@ -1846,9 +1892,13 @@ void TurboAssembler::Call(Register target) {
void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
BlockPoolsScope scope(this);
-
if (CanUseNearCallOrJump(rmode)) {
int64_t offset = CalculateTargetOffset(target, rmode, pc_);
+ if (IsOnHeap() && RelocInfo::IsRuntimeEntry(rmode)) {
+ saved_offsets_for_runtime_entries_.push_back(
+ std::make_pair(pc_offset(), offset));
+ offset = CalculateTargetOffset(target, RelocInfo::NONE, pc_);
+ }
DCHECK(IsNearCallOffset(offset));
near_call(static_cast<int>(offset), rmode);
} else {
@@ -1862,10 +1912,10 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
BlockPoolsScope scope(this);
if (options().inline_offheap_trampolines) {
- int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index)) {
+ Builtin builtin = Builtin::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code, &builtin)) {
// Inline the trampoline.
- CallBuiltin(builtin_index);
+ CallBuiltin(builtin);
return;
}
}
@@ -1888,6 +1938,7 @@ void TurboAssembler::Call(ExternalReference target) {
}
void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
+ ASM_CODE_COMMENT(this);
// The builtin_index register contains the builtin index as a Smi.
// Untagging is folded into the indexing operand below.
if (SmiValuesAre32Bits()) {
@@ -1909,55 +1960,49 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
}
}
-void TurboAssembler::LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
- Register destination) {
- Ldr(destination, EntryFromBuiltinIndexAsOperand(builtin_index));
+void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin,
+ Register destination) {
+ Ldr(destination, EntryFromBuiltinAsOperand(builtin));
}
-MemOperand TurboAssembler::EntryFromBuiltinIndexAsOperand(
- Builtins::Name builtin_index) {
+MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
+ ASM_CODE_COMMENT(this);
DCHECK(root_array_available());
return MemOperand(kRootRegister,
- IsolateData::builtin_entry_slot_offset(builtin_index));
+ IsolateData::builtin_entry_slot_offset(builtin));
}
void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+ ASM_CODE_COMMENT(this);
LoadEntryFromBuiltinIndex(builtin_index);
Call(builtin_index);
}
-void TurboAssembler::CallBuiltin(int builtin_index) {
- DCHECK(Builtins::IsBuiltinId(builtin_index));
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+void TurboAssembler::CallBuiltin(Builtin builtin) {
+ ASM_CODE_COMMENT(this);
+ DCHECK(Builtins::IsBuiltinId(builtin));
+ RecordCommentForOffHeapTrampoline(builtin);
+ CHECK_NE(builtin, Builtin::kNoBuiltinId);
if (options().short_builtin_calls) {
- EmbeddedData d = EmbeddedData::FromBlob(isolate());
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- Call(entry, RelocInfo::RUNTIME_ENTRY);
+ Call(BuiltinEntry(builtin), RelocInfo::RUNTIME_ENTRY);
} else {
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
- Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Ldr(scratch, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
Call(scratch);
}
- RecordComment("]");
}
-void TurboAssembler::TailCallBuiltin(int builtin_index) {
- DCHECK(Builtins::IsBuiltinId(builtin_index));
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+void TurboAssembler::TailCallBuiltin(Builtin builtin) {
+ ASM_CODE_COMMENT(this);
+ DCHECK(Builtins::IsBuiltinId(builtin));
+ RecordCommentForOffHeapTrampoline(builtin);
+ CHECK_NE(builtin, Builtin::kNoBuiltinId);
if (options().short_builtin_calls) {
- EmbeddedData d = EmbeddedData::FromBlob(isolate());
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ Jump(BuiltinEntry(builtin), RelocInfo::RUNTIME_ENTRY);
} else {
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
// The control flow integrity (CFI) feature allows us to "sign" code entry
// points as a target for calls, jumps or both. Arm64 has special
// instructions for this purpose, so-called "landing pads" (see
@@ -1969,14 +2014,14 @@ void TurboAssembler::TailCallBuiltin(int builtin_index) {
// (i.e. `bti j`) landing pads for the tail-called code.
Register temp = x17;
- Ldr(temp, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Ldr(temp, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
Jump(temp);
}
- RecordComment("]");
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
+ ASM_CODE_COMMENT(this);
// Code objects are called differently depending on whether we are generating
// builtin code (which will later be embedded into the binary) or compiling
// user JS code at runtime.
@@ -2025,11 +2070,13 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
}
void TurboAssembler::CallCodeObject(Register code_object) {
+ ASM_CODE_COMMENT(this);
LoadCodeObjectEntry(code_object, code_object);
Call(code_object);
}
void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+ ASM_CODE_COMMENT(this);
DCHECK_EQ(JumpMode::kJump, jump_mode);
LoadCodeObjectEntry(code_object, code_object);
@@ -2041,7 +2088,71 @@ void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
Jump(x17);
}
+void TurboAssembler::LoadCodeDataContainerEntry(
+ Register destination, Register code_data_container_object) {
+ ASM_CODE_COMMENT(this);
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ Ldr(destination, FieldMemOperand(code_data_container_object,
+ CodeDataContainer::kCodeEntryPointOffset));
+}
+
+void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin(
+ Register destination, Register code_data_container_object) {
+ ASM_CODE_COMMENT(this);
+ LoadTaggedPointerField(destination,
+ FieldMemOperand(code_data_container_object,
+ CodeDataContainer::kCodeOffset));
+}
+
+void TurboAssembler::CallCodeDataContainerObject(
+ Register code_data_container_object) {
+ ASM_CODE_COMMENT(this);
+ LoadCodeDataContainerEntry(code_data_container_object,
+ code_data_container_object);
+ Call(code_data_container_object);
+}
+
+void TurboAssembler::JumpCodeDataContainerObject(
+ Register code_data_container_object, JumpMode jump_mode) {
+ ASM_CODE_COMMENT(this);
+ DCHECK_EQ(JumpMode::kJump, jump_mode);
+ LoadCodeDataContainerEntry(code_data_container_object,
+ code_data_container_object);
+ UseScratchRegisterScope temps(this);
+ if (code_data_container_object != x17) {
+ temps.Exclude(x17);
+ Mov(x17, code_data_container_object);
+ }
+ Jump(x17);
+}
+
+void TurboAssembler::LoadCodeTEntry(Register destination, Register code) {
+ ASM_CODE_COMMENT(this);
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ LoadCodeDataContainerEntry(destination, code);
+ } else {
+ Add(destination, code, Operand(Code::kHeaderSize - kHeapObjectTag));
+ }
+}
+
+void TurboAssembler::CallCodeTObject(Register code) {
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ CallCodeDataContainerObject(code);
+ } else {
+ CallCodeObject(code);
+ }
+}
+
+void TurboAssembler::JumpCodeTObject(Register code, JumpMode jump_mode) {
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ JumpCodeDataContainerObject(code, jump_mode);
+ } else {
+ JumpCodeObject(code, jump_mode);
+ }
+}
+
void TurboAssembler::StoreReturnAddressAndCall(Register target) {
+ ASM_CODE_COMMENT(this);
// This generates the final instruction sequence for calls to C functions
// once an exit frame has been constructed.
//
@@ -2061,6 +2172,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
Poke(x17, 0);
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT_STRING(this, "Verify fp[kSPOffset]-8");
// Verify that the slot below fp[kSPOffset]-8 points to the signed return
// location.
Ldr(x16, MemOperand(fp, ExitFrameConstants::kSPOffset));
@@ -2074,6 +2186,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
}
void TurboAssembler::IndirectCall(Address target, RelocInfo::Mode rmode) {
+ ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
Mov(temp, Immediate(target, rmode));
@@ -2085,8 +2198,9 @@ bool TurboAssembler::IsNearCallOffset(int64_t offset) {
}
void TurboAssembler::CallForDeoptimization(
- Builtins::Name target, int deopt_id, Label* exit, DeoptimizeKind kind,
- Label* ret, Label* jump_deoptimization_entry_label) {
+ Builtin target, int deopt_id, Label* exit, DeoptimizeKind kind, Label* ret,
+ Label* jump_deoptimization_entry_label) {
+ ASM_CODE_COMMENT(this);
BlockPoolsScope scope(this);
bl(jump_deoptimization_entry_label);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
@@ -2104,6 +2218,7 @@ void TurboAssembler::CallForDeoptimization(
void TurboAssembler::PrepareForTailCall(Register callee_args_count,
Register caller_args_count,
Register scratch0, Register scratch1) {
+ ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(callee_args_count, caller_args_count, scratch0, scratch1));
// Calculate the end of destination area where we will put the arguments
@@ -2157,6 +2272,7 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
}
void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
+ ASM_CODE_COMMENT(this);
DCHECK(root_array_available());
Isolate* isolate = this->isolate();
ExternalReference limit =
@@ -2172,6 +2288,7 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
void MacroAssembler::StackOverflowCheck(Register num_args,
Label* stack_overflow) {
+ ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
@@ -2191,6 +2308,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args,
void MacroAssembler::InvokePrologue(Register formal_parameter_count,
Register actual_argument_count, Label* done,
InvokeType type) {
+ ASM_CODE_COMMENT(this);
// x0: actual arguments count.
// x1: function (passed through to callee).
// x2: expected arguments count.
@@ -2299,6 +2417,7 @@ void MacroAssembler::InvokePrologue(Register formal_parameter_count,
void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count) {
+ ASM_CODE_COMMENT(this);
// Load receiver to pass it later to DebugOnFunctionCall hook.
Peek(x4, ReceiverOperand(actual_parameter_count));
FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -2322,6 +2441,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count,
InvokeType type) {
+ ASM_CODE_COMMENT(this);
// You can't call a function without a valid frame.
DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
DCHECK_EQ(function, x1);
@@ -2355,10 +2475,10 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
FieldMemOperand(function, JSFunction::kCodeOffset));
switch (type) {
case InvokeType::kCall:
- CallCodeObject(code);
+ CallCodeTObject(code);
break;
case InvokeType::kJump:
- JumpCodeObject(code);
+ JumpCodeTObject(code);
break;
}
B(&done);
@@ -2381,6 +2501,7 @@ Operand MacroAssembler::ReceiverOperand(Register arg_count) {
void MacroAssembler::InvokeFunctionWithNewTarget(
Register function, Register new_target, Register actual_parameter_count,
InvokeType type) {
+ ASM_CODE_COMMENT(this);
// You can't call a function without a valid frame.
DCHECK(type == InvokeType::kJump || has_frame());
@@ -2410,6 +2531,7 @@ void MacroAssembler::InvokeFunction(Register function,
Register expected_parameter_count,
Register actual_parameter_count,
InvokeType type) {
+ ASM_CODE_COMMENT(this);
// You can't call a function without a valid frame.
DCHECK(type == InvokeType::kJump || has_frame());
@@ -2428,6 +2550,7 @@ void MacroAssembler::InvokeFunction(Register function,
void TurboAssembler::TryConvertDoubleToInt64(Register result,
DoubleRegister double_input,
Label* done) {
+ ASM_CODE_COMMENT(this);
// Try to convert with an FPU convert instruction. It's trivial to compute
// the modulo operation on an integer register so we convert to a 64-bit
// integer.
@@ -2454,6 +2577,7 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
DoubleRegister double_input,
StubCallMode stub_mode,
LinkRegisterStatus lr_status) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(JSCVT)) {
Fjcvtzs(result.W(), double_input);
return;
@@ -2481,7 +2605,7 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
if (false) {
#endif // V8_ENABLE_WEBASSEMBLY
} else if (options().inline_offheap_trampolines) {
- CallBuiltin(Builtins::kDoubleToI);
+ CallBuiltin(Builtin::kDoubleToI);
} else {
Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
}
@@ -2502,6 +2626,7 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
}
void TurboAssembler::Prologue() {
+ ASM_CODE_COMMENT(this);
Push<TurboAssembler::kSignLR>(lr, fp);
mov(fp, sp);
STATIC_ASSERT(kExtraSlotClaimedByPrologue == 1);
@@ -2568,6 +2693,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
+ ASM_CODE_COMMENT(this);
// Drop the execution stack down to the frame pointer and restore
// the caller frame pointer and return address.
Mov(sp, fp);
@@ -2575,6 +2701,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
}
void MacroAssembler::ExitFramePreserveFPRegs() {
+ ASM_CODE_COMMENT(this);
DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
PushCPURegList(kCallerSavedV);
}
@@ -2582,6 +2709,7 @@ void MacroAssembler::ExitFramePreserveFPRegs() {
void MacroAssembler::ExitFrameRestoreFPRegs() {
// Read the registers from the stack without popping them. The stack pointer
// will be reset as part of the unwinding process.
+ ASM_CODE_COMMENT(this);
CPURegList saved_fp_regs = kCallerSavedV;
DCHECK_EQ(saved_fp_regs.Count() % 2, 0);
@@ -2597,6 +2725,7 @@ void MacroAssembler::ExitFrameRestoreFPRegs() {
void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
int extra_space,
StackFrame::Type frame_type) {
+ ASM_CODE_COMMENT(this);
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
@@ -2658,6 +2787,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
void MacroAssembler::LeaveExitFrame(bool restore_doubles,
const Register& scratch,
const Register& scratch2) {
+ ASM_CODE_COMMENT(this);
if (restore_doubles) {
ExitFrameRestoreFPRegs();
}
@@ -2688,19 +2818,23 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles,
}
void MacroAssembler::LoadGlobalProxy(Register dst) {
+ ASM_CODE_COMMENT(this);
LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX);
}
void MacroAssembler::LoadWeakValue(Register out, Register in,
Label* target_if_cleared) {
+ ASM_CODE_COMMENT(this);
CompareAndBranch(in.W(), Operand(kClearedWeakHeapObjectLower32), eq,
target_if_cleared);
and_(out, in, Operand(~kWeakHeapObjectMask));
}
-void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
+void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value,
+ Register scratch1,
+ Register scratch2) {
+ ASM_CODE_COMMENT(this);
DCHECK_NE(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
// This operation has to be exactly 32-bit wide in case the external
@@ -2713,14 +2847,10 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
}
}
-void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
- IncrementCounter(counter, -value, scratch1, scratch2);
-}
-
void MacroAssembler::JumpIfObjectType(Register object, Register map,
Register type_reg, InstanceType type,
Label* if_cond_pass, Condition cond) {
+ ASM_CODE_COMMENT(this);
CompareObjectType(object, map, type_reg, type);
B(cond, if_cond_pass);
}
@@ -2728,17 +2858,20 @@ void MacroAssembler::JumpIfObjectType(Register object, Register map,
// Sets condition flags based on comparison, and returns type in type_reg.
void MacroAssembler::CompareObjectType(Register object, Register map,
Register type_reg, InstanceType type) {
+ ASM_CODE_COMMENT(this);
LoadMap(map, object);
CompareInstanceType(map, type_reg, type);
}
void TurboAssembler::LoadMap(Register dst, Register object) {
+ ASM_CODE_COMMENT(this);
LoadTaggedPointerField(dst, FieldMemOperand(object, HeapObject::kMapOffset));
}
// Sets condition flags based on comparison, and returns type in type_reg.
void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
InstanceType type) {
+ ASM_CODE_COMMENT(this);
Ldrh(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
Cmp(type_reg, type);
}
@@ -2747,6 +2880,7 @@ void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
void MacroAssembler::CompareInstanceTypeRange(Register map, Register type_reg,
InstanceType lower_limit,
InstanceType higher_limit) {
+ ASM_CODE_COMMENT(this);
DCHECK_LT(lower_limit, higher_limit);
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
@@ -2756,6 +2890,7 @@ void MacroAssembler::CompareInstanceTypeRange(Register map, Register type_reg,
}
void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
+ ASM_CODE_COMMENT(this);
// Load the map's "bit field 2".
Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
@@ -2763,6 +2898,7 @@ void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
}
void MacroAssembler::CompareRoot(const Register& obj, RootIndex index) {
+ ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
DCHECK(!AreAliased(obj, temp));
@@ -2786,6 +2922,7 @@ void MacroAssembler::JumpIfIsInRange(const Register& value,
unsigned lower_limit,
unsigned higher_limit,
Label* on_in_range) {
+ ASM_CODE_COMMENT(this);
if (lower_limit != 0) {
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireW();
@@ -2840,41 +2977,38 @@ void TurboAssembler::StoreTaggedField(const Register& value,
void TurboAssembler::DecompressTaggedSigned(const Register& destination,
const MemOperand& field_operand) {
- RecordComment("[ DecompressTaggedSigned");
+ ASM_CODE_COMMENT(this);
Ldr(destination.W(), field_operand);
if (FLAG_debug_code) {
// Corrupt the top 32 bits. Made up of 16 fixed bits and 16 pc offset bits.
Add(destination, destination,
((kDebugZapValue << 16) | (pc_offset() & 0xffff)) << 32);
}
- RecordComment("]");
}
void TurboAssembler::DecompressTaggedPointer(const Register& destination,
const MemOperand& field_operand) {
- RecordComment("[ DecompressTaggedPointer");
+ ASM_CODE_COMMENT(this);
Ldr(destination.W(), field_operand);
Add(destination, kPtrComprCageBaseRegister, destination);
- RecordComment("]");
}
void TurboAssembler::DecompressTaggedPointer(const Register& destination,
const Register& source) {
- RecordComment("[ DecompressTaggedPointer");
+ ASM_CODE_COMMENT(this);
Add(destination, kPtrComprCageBaseRegister, Operand(source, UXTW));
- RecordComment("]");
}
void TurboAssembler::DecompressAnyTagged(const Register& destination,
const MemOperand& field_operand) {
- RecordComment("[ DecompressAnyTagged");
+ ASM_CODE_COMMENT(this);
Ldr(destination.W(), field_operand);
Add(destination, kPtrComprCageBaseRegister, destination);
- RecordComment("]");
}
void TurboAssembler::CheckPageFlag(const Register& object, int mask,
Condition cc, Label* condition_met) {
+ ASM_CODE_COMMENT(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
And(scratch, object, ~kPageAlignmentMask);
@@ -2893,6 +3027,8 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
+ ASM_CODE_COMMENT(this);
+ DCHECK(!AreAliased(object, value));
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
@@ -2907,9 +3043,11 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
DCHECK(IsAligned(offset, kTaggedSize));
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT_STRING(this, "Verify slot_address");
Label ok;
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
+ DCHECK(!AreAliased(object, value, scratch));
Add(scratch, object, offset - kHeapObjectTag);
Tst(scratch, kTaggedSize - 1);
B(eq, &ok);
@@ -2923,8 +3061,9 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Bind(&done);
}
-void TurboAssembler::SaveRegisters(RegList registers) {
- DCHECK_GT(NumRegs(registers), 0);
+void TurboAssembler::MaybeSaveRegisters(RegList registers) {
+ if (registers == 0) return;
+ ASM_CODE_COMMENT(this);
CPURegList regs(CPURegister::kRegister, kXRegSizeInBits, registers);
// If we were saving LR, we might need to sign it.
DCHECK(!regs.IncludesAliasOf(lr));
@@ -2932,8 +3071,9 @@ void TurboAssembler::SaveRegisters(RegList registers) {
PushCPURegList(regs);
}
-void TurboAssembler::RestoreRegisters(RegList registers) {
- DCHECK_GT(NumRegs(registers), 0);
+void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
+ if (registers == 0) return;
+ ASM_CODE_COMMENT(this);
CPURegList regs(CPURegister::kRegister, kXRegSizeInBits, registers);
// If we were saving LR, we might need to sign it.
DCHECK(!regs.IncludesAliasOf(lr));
@@ -2943,83 +3083,68 @@ void TurboAssembler::RestoreRegisters(RegList registers) {
void TurboAssembler::CallEphemeronKeyBarrier(Register object, Operand offset,
SaveFPRegsMode fp_mode) {
- EphemeronKeyBarrierDescriptor descriptor;
- RegList registers = descriptor.allocatable_registers();
+ ASM_CODE_COMMENT(this);
+ RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object);
+ MaybeSaveRegisters(registers);
- SaveRegisters(registers);
+ MoveObjectAndSlot(WriteBarrierDescriptor::ObjectRegister(),
+ WriteBarrierDescriptor::SlotAddressRegister(), object,
+ offset);
- Register object_parameter(
- descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kObject));
- Register slot_parameter(descriptor.GetRegisterParameter(
- EphemeronKeyBarrierDescriptor::kSlotAddress));
- Register fp_mode_parameter(
- descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kFPMode));
-
- MoveObjectAndSlot(object_parameter, slot_parameter, object, offset);
-
- Mov(fp_mode_parameter, Smi::FromEnum(fp_mode));
- Call(isolate()->builtins()->builtin_handle(Builtins::kEphemeronKeyBarrier),
+ Call(isolate()->builtins()->code_handle(
+ Builtins::GetEphemeronKeyBarrierStub(fp_mode)),
RelocInfo::CODE_TARGET);
- RestoreRegisters(registers);
+ MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStub(
+void TurboAssembler::CallRecordWriteStubSaveRegisters(
Register object, Operand offset, RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode) {
- CallRecordWriteStub(object, offset, remembered_set_action, fp_mode,
- Builtins::kRecordWrite, kNullAddress);
-}
+ SaveFPRegsMode fp_mode, StubCallMode mode) {
+ ASM_CODE_COMMENT(this);
+ RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object);
+ MaybeSaveRegisters(registers);
-void TurboAssembler::CallRecordWriteStub(
- Register object, Operand offset, RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode, Address wasm_target) {
- CallRecordWriteStub(object, offset, remembered_set_action, fp_mode,
- Builtins::kNoBuiltinId, wasm_target);
+ Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
+ Register slot_address_parameter =
+ WriteBarrierDescriptor::SlotAddressRegister();
+ MoveObjectAndSlot(object_parameter, slot_address_parameter, object, offset);
+
+ CallRecordWriteStub(object_parameter, slot_address_parameter,
+ remembered_set_action, fp_mode, mode);
+
+ MaybeRestoreRegisters(registers);
}
void TurboAssembler::CallRecordWriteStub(
- Register object, Operand offset, RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode, int builtin_index, Address wasm_target) {
- DCHECK_NE(builtin_index == Builtins::kNoBuiltinId,
- wasm_target == kNullAddress);
- // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
- // i.e. always emit remember set and save FP registers in RecordWriteStub. If
- // large performance regression is observed, we should use these values to
- // avoid unnecessary work.
-
- RecordWriteDescriptor descriptor;
- RegList registers = descriptor.allocatable_registers();
-
- SaveRegisters(registers);
-
- Register object_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
- Register slot_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
- Register remembered_set_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
- Register fp_mode_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
-
- MoveObjectAndSlot(object_parameter, slot_parameter, object, offset);
-
- Mov(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
- Mov(fp_mode_parameter, Smi::FromEnum(fp_mode));
- if (builtin_index == Builtins::kNoBuiltinId) {
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode) {
+ ASM_CODE_COMMENT(this);
+ DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object);
+ DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address);
+#if V8_ENABLE_WEBASSEMBLY
+ if (mode == StubCallMode::kCallWasmRuntimeStub) {
+ auto wasm_target =
+ wasm::WasmCode::GetRecordWriteStub(remembered_set_action, fp_mode);
Call(wasm_target, RelocInfo::WASM_STUB_CALL);
- } else if (options().inline_offheap_trampolines) {
- CallBuiltin(builtin_index);
+#else
+ if (false) {
+#endif
} else {
- Handle<Code> code_target =
- isolate()->builtins()->builtin_handle(Builtins::kRecordWrite);
- Call(code_target, RelocInfo::CODE_TARGET);
+ Builtin builtin =
+ Builtins::GetRecordWriteStub(remembered_set_action, fp_mode);
+ if (options().inline_offheap_trampolines) {
+ CallBuiltin(builtin);
+ } else {
+ Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
+ Call(code_target, RelocInfo::CODE_TARGET);
+ }
}
-
- RestoreRegisters(registers);
}
void TurboAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot,
Register object, Operand offset) {
+ ASM_CODE_COMMENT(this);
DCHECK_NE(dst_object, dst_slot);
// If `offset` is a register, it cannot overlap with `object`.
DCHECK_IMPLIES(!offset.IsImmediate(), offset.reg() != object);
@@ -3060,13 +3185,15 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
+ ASM_CODE_COMMENT(this);
ASM_LOCATION_IN_ASSEMBLER("MacroAssembler::RecordWrite");
DCHECK(!AreAliased(object, value));
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT_STRING(this, "Verify slot_address");
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
-
+ DCHECK(!AreAliased(object, value, temp));
Add(temp, object, offset);
LoadTaggedPointerField(temp, MemOperand(temp));
Cmp(temp, value);
@@ -3097,10 +3224,16 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
if (lr_status == kLRHasNotBeenSaved) {
Push<TurboAssembler::kSignLR>(padreg, lr);
}
- CallRecordWriteStub(object, offset, remembered_set_action, fp_mode);
+ Register slot_address = WriteBarrierDescriptor::SlotAddressRegister();
+ DCHECK(!AreAliased(object, slot_address, value));
+ // TODO(cbruni): Turn offset into int.
+ DCHECK(offset.IsImmediate());
+ Add(slot_address, object, offset);
+ CallRecordWriteStub(object, slot_address, remembered_set_action, fp_mode);
if (lr_status == kLRHasNotBeenSaved) {
Pop<TurboAssembler::kAuthLR>(lr, padreg);
}
+ if (FLAG_debug_code) Mov(slot_address, Operand(kZapValue));
Bind(&done);
}
@@ -3127,6 +3260,7 @@ void TurboAssembler::Trap() { Brk(0); }
void TurboAssembler::DebugBreak() { Debug("DebugBreak", 0, BREAK); }
void TurboAssembler::Abort(AbortReason reason) {
+ ASM_CODE_COMMENT(this);
if (FLAG_code_comments) {
RecordComment("Abort message: ");
RecordComment(GetAbortReason(reason));
@@ -3183,6 +3317,7 @@ void TurboAssembler::PrintfNoPreserve(const char* format,
const CPURegister& arg1,
const CPURegister& arg2,
const CPURegister& arg3) {
+ ASM_CODE_COMMENT(this);
// We cannot handle a caller-saved stack pointer. It doesn't make much sense
// in most cases anyway, so this restriction shouldn't be too serious.
DCHECK(!kCallerSaved.IncludesAliasOf(sp));
@@ -3328,6 +3463,7 @@ void TurboAssembler::PrintfNoPreserve(const char* format,
}
void TurboAssembler::CallPrintf(int arg_count, const CPURegister* args) {
+ ASM_CODE_COMMENT(this);
// A call to printf needs special handling for the simulator, since the system
// printf function will use a different instruction set and the procedure-call
// standard will not be compatible.
@@ -3359,6 +3495,7 @@ void TurboAssembler::CallPrintf(int arg_count, const CPURegister* args) {
void TurboAssembler::Printf(const char* format, CPURegister arg0,
CPURegister arg1, CPURegister arg2,
CPURegister arg3) {
+ ASM_CODE_COMMENT(this);
// Printf is expected to preserve all registers, so make sure that none are
// available as scratch registers until we've preserved them.
RegList old_tmp_list = TmpList()->list();
@@ -3495,6 +3632,7 @@ void TurboAssembler::StoreReturnAddressInWasmExitFrame(Label* return_location) {
#endif // V8_ENABLE_WEBASSEMBLY
void TurboAssembler::I64x2BitMask(Register dst, VRegister src) {
+ ASM_CODE_COMMENT(this);
UseScratchRegisterScope scope(this);
VRegister tmp1 = scope.AcquireV(kFormat2D);
Register tmp2 = scope.AcquireX();
@@ -3505,6 +3643,7 @@ void TurboAssembler::I64x2BitMask(Register dst, VRegister src) {
}
void TurboAssembler::I64x2AllTrue(Register dst, VRegister src) {
+ ASM_CODE_COMMENT(this);
UseScratchRegisterScope scope(this);
VRegister tmp = scope.AcquireV(kFormat2D);
Cmeq(tmp.V2D(), src.V2D(), 0);
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
index 7bc6432c36..235b9a4b69 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -584,8 +584,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
- void Trap() override;
- void DebugBreak() override;
+ void Trap();
+ void DebugBreak();
// Print a message to stderr and abort execution.
void Abort(AbortReason reason);
@@ -837,18 +837,21 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
template <StoreLRMode lr_mode = kDontStoreLR>
void Push(const Register& src0, const VRegister& src1);
- void SaveRegisters(RegList registers);
- void RestoreRegisters(RegList registers);
+ void MaybeSaveRegisters(RegList registers);
+ void MaybeRestoreRegisters(RegList registers);
- void CallRecordWriteStub(Register object, Operand offset,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode);
- void CallRecordWriteStub(Register object, Operand offset,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode, Address wasm_target);
void CallEphemeronKeyBarrier(Register object, Operand offset,
SaveFPRegsMode fp_mode);
+ void CallRecordWriteStubSaveRegisters(
+ Register object, Operand offset,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode = StubCallMode::kCallBuiltinPointer);
+ void CallRecordWriteStub(
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode = StubCallMode::kCallBuiltinPointer);
+
// For a given |object| and |offset|:
// - Move |object| to |dst_object|.
// - Compute the address of the slot pointed to by |offset| in |object| and
@@ -950,15 +953,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
int shift_amount = 0);
void Movi(const VRegister& vd, uint64_t hi, uint64_t lo);
- void LoadFromConstantsTable(Register destination,
- int constant_index) override;
- void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
- void LoadRootRelative(Register destination, int32_t offset) override;
+ void LoadFromConstantsTable(Register destination, int constant_index) final;
+ void LoadRootRegisterOffset(Register destination, intptr_t offset) final;
+ void LoadRootRelative(Register destination, int32_t offset) final;
void Jump(Register target, Condition cond = al);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
- void Jump(const ExternalReference& reference) override;
+ void Jump(const ExternalReference& reference);
void Call(Register target);
void Call(Address target, RelocInfo::Mode rmode);
@@ -968,35 +970,45 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Generate an indirect call (for when a direct call's range is not adequate).
void IndirectCall(Address target, RelocInfo::Mode rmode);
- // Load the builtin given by the Smi in |builtin_index| into the same
+ // Load the builtin given by the Smi in |builtin_| into the same
// register.
- void LoadEntryFromBuiltinIndex(Register builtin_index);
- void LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
- Register destination);
- MemOperand EntryFromBuiltinIndexAsOperand(Builtins::Name builtin_index);
- void CallBuiltinByIndex(Register builtin_index) override;
- void CallBuiltin(Builtins::Name builtin) {
- // TODO(11527): drop the int overload in favour of the Builtins::Name one.
- return CallBuiltin(static_cast<int>(builtin));
- }
- void CallBuiltin(int builtin_index);
- void TailCallBuiltin(Builtins::Name builtin) {
- // TODO(11527): drop the int overload in favour of the Builtins::Name one.
- return TailCallBuiltin(static_cast<int>(builtin));
- }
- void TailCallBuiltin(int builtin_index);
-
- void LoadCodeObjectEntry(Register destination, Register code_object) override;
- void CallCodeObject(Register code_object) override;
+ void LoadEntryFromBuiltinIndex(Register builtin);
+ void LoadEntryFromBuiltin(Builtin builtin, Register destination);
+ MemOperand EntryFromBuiltinAsOperand(Builtin builtin);
+ void CallBuiltinByIndex(Register builtin);
+ void CallBuiltin(Builtin builtin);
+ void TailCallBuiltin(Builtin builtin);
+
+ void LoadCodeObjectEntry(Register destination, Register code_object);
+ void CallCodeObject(Register code_object);
void JumpCodeObject(Register code_object,
- JumpMode jump_mode = JumpMode::kJump) override;
+ JumpMode jump_mode = JumpMode::kJump);
+
+ // Load code entry point from the CodeDataContainer object.
+ void LoadCodeDataContainerEntry(Register destination,
+ Register code_data_container_object);
+ // Load code entry point from the CodeDataContainer object and compute
+ // Code object pointer out of it. Must not be used for CodeDataContainers
+ // corresponding to builtins, because their entry points values point to
+ // the embedded instruction stream in .text section.
+ void LoadCodeDataContainerCodeNonBuiltin(Register destination,
+ Register code_data_container_object);
+ void CallCodeDataContainerObject(Register code_data_container_object);
+ void JumpCodeDataContainerObject(Register code_data_container_object,
+ JumpMode jump_mode = JumpMode::kJump);
+
+ // Helper functions that dispatch either to Call/JumpCodeObject or to
+ // Call/JumpCodeDataContainerObject.
+ void LoadCodeTEntry(Register destination, Register code);
+ void CallCodeTObject(Register code);
+ void JumpCodeTObject(Register code, JumpMode jump_mode = JumpMode::kJump);
// Generates an instruction sequence s.t. the return address points to the
// instruction following the call.
// The return address on the stack is used by frame iteration.
void StoreReturnAddressAndCall(Register target);
- void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
+ void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
DeoptimizeKind kind, Label* ret,
Label* jump_deoptimization_entry_label);
@@ -1276,7 +1288,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#undef DECLARE_FUNCTION
// Load an object from the root table.
- void LoadRoot(Register destination, RootIndex index) override;
+ void LoadRoot(Register destination, RootIndex index) final;
void PushRoot(RootIndex index);
inline void Ret(const Register& xn = lr);
@@ -1462,11 +1474,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
byte* pc);
void JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond = al);
-
- void CallRecordWriteStub(Register object, Operand offset,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode, int builtin_index,
- Address wasm_target);
};
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
@@ -1826,6 +1833,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void AssertNotSmi(Register object,
AbortReason reason = AbortReason::kOperandIsASmi);
+ // Abort execution if argument is not a CodeT, enabled via --debug-code.
+ void AssertCodeT(Register object);
+
// Abort execution if argument is not a Constructor, enabled via --debug-code.
void AssertConstructor(Register object);
@@ -2007,9 +2017,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// StatsCounter support
void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
- Register scratch2);
+ Register scratch2) {
+ if (!FLAG_native_code_counters) return;
+ EmitIncrementCounter(counter, value, scratch1, scratch2);
+ }
+ void EmitIncrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
- Register scratch2);
+ Register scratch2) {
+ if (!FLAG_native_code_counters) return;
+ EmitIncrementCounter(counter, -value, scratch1, scratch2);
+ }
// ---------------------------------------------------------------------------
// Stack limit utilities
diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h
index 5b234526a4..21007a5973 100644
--- a/deps/v8/src/codegen/arm64/register-arm64.h
+++ b/deps/v8/src/codegen/arm64/register-arm64.h
@@ -547,8 +547,6 @@ using Simd128Register = VRegister;
// Lists of registers.
class V8_EXPORT_PRIVATE CPURegList {
public:
- CPURegList() = default;
-
template <typename... CPURegisters>
explicit CPURegList(CPURegister reg0, CPURegisters... regs)
: list_(CPURegister::ListOf(reg0, regs...)),
diff --git a/deps/v8/src/codegen/assembler.cc b/deps/v8/src/codegen/assembler.cc
index bb80d366de..8eb5ae55e2 100644
--- a/deps/v8/src/codegen/assembler.cc
+++ b/deps/v8/src/codegen/assembler.cc
@@ -34,6 +34,10 @@
#include "src/codegen/assembler.h"
+#ifdef V8_CODE_COMMENTS
+#include <iomanip>
+#endif
+#include "src/base/vector.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/string-constants.h"
#include "src/deoptimizer/deoptimizer.h"
@@ -43,7 +47,6 @@
#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
#include "src/utils/ostreams.h"
-#include "src/utils/vector.h"
namespace v8 {
namespace internal {
@@ -96,9 +99,10 @@ namespace {
class DefaultAssemblerBuffer : public AssemblerBuffer {
public:
explicit DefaultAssemblerBuffer(int size)
- : buffer_(OwnedVector<uint8_t>::NewForOverwrite(size)) {
+ : buffer_(base::OwnedVector<uint8_t>::NewForOverwrite(
+ std::max(AssemblerBase::kMinimalBufferSize, size))) {
#ifdef DEBUG
- ZapCode(reinterpret_cast<Address>(buffer_.start()), size);
+ ZapCode(reinterpret_cast<Address>(buffer_.start()), buffer_.size());
#endif
}
@@ -112,7 +116,7 @@ class DefaultAssemblerBuffer : public AssemblerBuffer {
}
private:
- OwnedVector<uint8_t> buffer_;
+ base::OwnedVector<uint8_t> buffer_;
};
class ExternalAssemblerBufferImpl : public AssemblerBuffer {
@@ -136,6 +140,35 @@ class ExternalAssemblerBufferImpl : public AssemblerBuffer {
const int size_;
};
+class OnHeapAssemblerBuffer : public AssemblerBuffer {
+ public:
+ OnHeapAssemblerBuffer(Handle<Code> code, int size)
+ : code_(code), size_(size) {}
+
+ byte* start() const override {
+ return reinterpret_cast<byte*>(code_->raw_instruction_start());
+ }
+
+ int size() const override { return size_; }
+
+ std::unique_ptr<AssemblerBuffer> Grow(int new_size) override {
+ DCHECK_LT(size(), new_size);
+ // We fall back to the slow path using the default assembler buffer and
+ // compile the code off the GC heap. Compiling directly on heap makes less
+ // sense now, since we will need to allocate a new Code object, copy the
+ // content generated so far and relocate.
+ return std::make_unique<DefaultAssemblerBuffer>(new_size);
+ }
+
+ bool IsOnHeap() const override { return true; }
+
+ MaybeHandle<Code> code() const override { return code_; }
+
+ private:
+ Handle<Code> code_;
+ const int size_;
+};
+
static thread_local std::aligned_storage_t<sizeof(ExternalAssemblerBufferImpl),
alignof(ExternalAssemblerBufferImpl)>
tls_singleton_storage;
@@ -172,9 +205,24 @@ std::unique_ptr<AssemblerBuffer> NewAssemblerBuffer(int size) {
return std::make_unique<DefaultAssemblerBuffer>(size);
}
+std::unique_ptr<AssemblerBuffer> NewOnHeapAssemblerBuffer(Isolate* isolate,
+ int estimated) {
+ int size = std::max(AssemblerBase::kMinimalBufferSize, estimated);
+ MaybeHandle<Code> code =
+ isolate->factory()->NewEmptyCode(CodeKind::BASELINE, size);
+ if (code.is_null()) return {};
+ return std::make_unique<OnHeapAssemblerBuffer>(code.ToHandleChecked(), size);
+}
+
// -----------------------------------------------------------------------------
// Implementation of AssemblerBase
+// static
+constexpr int AssemblerBase::kMinimalBufferSize;
+
+// static
+constexpr int AssemblerBase::kDefaultBufferSize;
+
AssemblerBase::AssemblerBase(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
: buffer_(std::move(buffer)),
@@ -192,7 +240,7 @@ AssemblerBase::~AssemblerBase() = default;
void AssemblerBase::Print(Isolate* isolate) {
StdoutStream os;
- v8::internal::Disassembler::Decode(isolate, &os, buffer_start_, pc_);
+ v8::internal::Disassembler::Decode(isolate, os, buffer_start_, pc_);
}
// -----------------------------------------------------------------------------
@@ -308,5 +356,24 @@ int Assembler::WriteCodeComments() {
return size;
}
+#ifdef V8_CODE_COMMENTS
+int Assembler::CodeComment::depth() const { return assembler_->comment_depth_; }
+void Assembler::CodeComment::Open(const std::string& comment) {
+ std::stringstream sstream;
+ sstream << std::setfill(' ') << std::setw(depth() * kIndentWidth + 2);
+ sstream << "[ " << comment;
+ assembler_->comment_depth_++;
+ assembler_->RecordComment(sstream.str());
+}
+
+void Assembler::CodeComment::Close() {
+ assembler_->comment_depth_--;
+ std::string comment = "]";
+ comment.insert(0, depth() * kIndentWidth, ' ');
+ DCHECK_LE(0, depth());
+ assembler_->RecordComment(comment);
+}
+#endif
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h
index 98cca61a7c..ee5aef524d 100644
--- a/deps/v8/src/codegen/assembler.h
+++ b/deps/v8/src/codegen/assembler.h
@@ -202,6 +202,8 @@ class AssemblerBuffer {
// destructed), but not written.
virtual std::unique_ptr<AssemblerBuffer> Grow(int new_size)
V8_WARN_UNUSED_RESULT = 0;
+ virtual bool IsOnHeap() const { return false; }
+ virtual MaybeHandle<Code> code() const { return MaybeHandle<Code>(); }
};
// Allocate an AssemblerBuffer which uses an existing buffer. This buffer cannot
@@ -214,6 +216,10 @@ std::unique_ptr<AssemblerBuffer> ExternalAssemblerBuffer(void* buffer,
V8_EXPORT_PRIVATE
std::unique_ptr<AssemblerBuffer> NewAssemblerBuffer(int size);
+V8_EXPORT_PRIVATE
+std::unique_ptr<AssemblerBuffer> NewOnHeapAssemblerBuffer(Isolate* isolate,
+ int size);
+
class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
public:
AssemblerBase(const AssemblerOptions& options,
@@ -275,10 +281,26 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
#endif
}
+ bool IsOnHeap() const { return buffer_->IsOnHeap(); }
+
+ MaybeHandle<Code> code() const {
+ DCHECK(IsOnHeap());
+ return buffer_->code();
+ }
+
byte* buffer_start() const { return buffer_->start(); }
int buffer_size() const { return buffer_->size(); }
int instruction_size() const { return pc_offset(); }
+ std::unique_ptr<AssemblerBuffer> ReleaseBuffer() {
+ std::unique_ptr<AssemblerBuffer> buffer = std::move(buffer_);
+ DCHECK_NULL(buffer_);
+ // Reset fields to prevent accidental further modifications of the buffer.
+ buffer_start_ = nullptr;
+ pc_ = nullptr;
+ return buffer;
+ }
+
// This function is called when code generation is aborted, so that
// the assembler could clean up internal data structures.
virtual void AbortedCodeGeneration() {}
@@ -288,15 +310,48 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
// Record an inline code comment that can be used by a disassembler.
// Use --code-comments to enable.
- V8_INLINE void RecordComment(const char* msg) {
+ V8_INLINE void RecordComment(const char* comment) {
+ // Set explicit dependency on --code-comments for dead-code elimination in
+ // release builds.
+ if (!FLAG_code_comments) return;
+ if (options().emit_code_comments) {
+ code_comments_writer_.Add(pc_offset(), std::string(comment));
+ }
+ }
+
+ V8_INLINE void RecordComment(std::string comment) {
// Set explicit dependency on --code-comments for dead-code elimination in
// release builds.
if (!FLAG_code_comments) return;
if (options().emit_code_comments) {
- code_comments_writer_.Add(pc_offset(), std::string(msg));
+ code_comments_writer_.Add(pc_offset(), std::move(comment));
}
}
+#ifdef V8_CODE_COMMENTS
+ class CodeComment {
+ public:
+ explicit CodeComment(Assembler* assembler, const std::string& comment)
+ : assembler_(assembler) {
+ if (FLAG_code_comments) Open(comment);
+ }
+ ~CodeComment() {
+ if (FLAG_code_comments) Close();
+ }
+ static const int kIndentWidth = 2;
+
+ private:
+ int depth() const;
+ void Open(const std::string& comment);
+ void Close();
+ Assembler* assembler_;
+ };
+#else // V8_CODE_COMMENTS
+ class CodeComment {
+ explicit CodeComment(Assembler* assembler, std::string comment) {}
+ };
+#endif
+
// The minimum buffer size. Should be at least two times the platform-specific
// {Assembler::kGap}.
static constexpr int kMinimalBufferSize = 128;
@@ -354,6 +409,10 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
CodeCommentsWriter code_comments_writer_;
+ // Relocation information when code allocated directly on heap.
+ std::vector<std::pair<uint32_t, Address>> saved_handles_for_raw_object_ptr_;
+ std::vector<std::pair<uint32_t, uint32_t>> saved_offsets_for_runtime_entries_;
+
private:
// Before we copy code into the code space, we sometimes cannot encode
// call/jump code targets as we normally would, as the difference between the
@@ -386,6 +445,10 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
JumpOptimizationInfo* jump_optimization_info_;
+#ifdef V8_CODE_COMMENTS
+ int comment_depth_ = 0;
+#endif
+
// Constant pool.
friend class FrameAndConstantPoolScope;
friend class ConstantPoolUnavailableScope;
@@ -416,6 +479,15 @@ class V8_EXPORT_PRIVATE V8_NODISCARD CpuFeatureScope {
#endif
};
+#ifdef V8_CODE_COMMENTS
+#define ASM_CODE_COMMENT(asm) ASM_CODE_COMMENT_STRING(asm, __func__)
+#define ASM_CODE_COMMENT_STRING(asm, comment) \
+ AssemblerBase::CodeComment asm_code_comment(asm, comment)
+#else
+#define ASM_CODE_COMMENT(asm)
+#define ASM_CODE_COMMENT_STRING(asm, ...)
+#endif
+
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_ASSEMBLER_H_
diff --git a/deps/v8/src/codegen/bailout-reason.h b/deps/v8/src/codegen/bailout-reason.h
index e8afa74e16..2a5893974f 100644
--- a/deps/v8/src/codegen/bailout-reason.h
+++ b/deps/v8/src/codegen/bailout-reason.h
@@ -49,6 +49,7 @@ namespace internal {
V(kOperandIsNotAFixedArray, "Operand is not a fixed array") \
V(kOperandIsNotAFunction, "Operand is not a function") \
V(kOperandIsNotAGeneratorObject, "Operand is not a generator object") \
+ V(kOperandIsNotACodeT, "Operand is not a CodeT") \
V(kOperandIsNotASmi, "Operand is not a smi") \
V(kPromiseAlreadySettled, "Promise already settled") \
V(kReceivedInvalidReturnAddress, "Received invalid return address") \
diff --git a/deps/v8/src/codegen/code-factory.cc b/deps/v8/src/codegen/code-factory.cc
index 854969f8cb..f3cb604478 100644
--- a/deps/v8/src/codegen/code-factory.cc
+++ b/deps/v8/src/codegen/code-factory.cc
@@ -70,144 +70,41 @@ Handle<Code> CodeFactory::CEntry(Isolate* isolate, int result_size,
// static
Callable CodeFactory::ApiGetter(Isolate* isolate) {
- return Builtins::CallableFor(isolate, Builtins::kCallApiGetter);
+ return Builtins::CallableFor(isolate, Builtin::kCallApiGetter);
}
// static
Callable CodeFactory::CallApiCallback(Isolate* isolate) {
- return Builtins::CallableFor(isolate, Builtins::kCallApiCallback);
+ return Builtins::CallableFor(isolate, Builtin::kCallApiCallback);
}
// static
Callable CodeFactory::LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode) {
return typeof_mode == TypeofMode::kNotInside
- ? Builtins::CallableFor(isolate, Builtins::kLoadGlobalICTrampoline)
+ ? Builtins::CallableFor(isolate, Builtin::kLoadGlobalICTrampoline)
: Builtins::CallableFor(
- isolate, Builtins::kLoadGlobalICInsideTypeofTrampoline);
+ isolate, Builtin::kLoadGlobalICInsideTypeofTrampoline);
}
// static
Callable CodeFactory::LoadGlobalICInOptimizedCode(Isolate* isolate,
TypeofMode typeof_mode) {
return typeof_mode == TypeofMode::kNotInside
- ? Builtins::CallableFor(isolate, Builtins::kLoadGlobalIC)
+ ? Builtins::CallableFor(isolate, Builtin::kLoadGlobalIC)
: Builtins::CallableFor(isolate,
- Builtins::kLoadGlobalICInsideTypeof);
+ Builtin::kLoadGlobalICInsideTypeof);
}
Callable CodeFactory::StoreOwnIC(Isolate* isolate) {
// TODO(ishell): Currently we use StoreOwnIC only for storing properties that
// already exist in the boilerplate therefore we can use StoreIC.
- return Builtins::CallableFor(isolate, Builtins::kStoreICTrampoline);
+ return Builtins::CallableFor(isolate, Builtin::kStoreICTrampoline);
}
Callable CodeFactory::StoreOwnICInOptimizedCode(Isolate* isolate) {
// TODO(ishell): Currently we use StoreOwnIC only for storing properties that
// already exist in the boilerplate therefore we can use StoreIC.
- return Builtins::CallableFor(isolate, Builtins::kStoreIC);
-}
-
-Callable CodeFactory::KeyedStoreIC_SloppyArguments(Isolate* isolate,
- KeyedAccessStoreMode mode) {
- Builtins::Name builtin_index;
- switch (mode) {
- case STANDARD_STORE:
- builtin_index = Builtins::kKeyedStoreIC_SloppyArguments_Standard;
- break;
- case STORE_AND_GROW_HANDLE_COW:
- builtin_index =
- Builtins::kKeyedStoreIC_SloppyArguments_GrowNoTransitionHandleCOW;
- break;
- case STORE_IGNORE_OUT_OF_BOUNDS:
- builtin_index =
- Builtins::kKeyedStoreIC_SloppyArguments_NoTransitionIgnoreOOB;
- break;
- case STORE_HANDLE_COW:
- builtin_index =
- Builtins::kKeyedStoreIC_SloppyArguments_NoTransitionHandleCOW;
- break;
- default:
- UNREACHABLE();
- }
- return isolate->builtins()->CallableFor(isolate, builtin_index);
-}
-
-Callable CodeFactory::ElementsTransitionAndStore(Isolate* isolate,
- KeyedAccessStoreMode mode) {
- Builtins::Name builtin_index;
- switch (mode) {
- case STANDARD_STORE:
- builtin_index = Builtins::kElementsTransitionAndStore_Standard;
- break;
- case STORE_AND_GROW_HANDLE_COW:
- builtin_index =
- Builtins::kElementsTransitionAndStore_GrowNoTransitionHandleCOW;
- break;
- case STORE_IGNORE_OUT_OF_BOUNDS:
- builtin_index =
- Builtins::kElementsTransitionAndStore_NoTransitionIgnoreOOB;
- break;
- case STORE_HANDLE_COW:
- builtin_index =
- Builtins::kElementsTransitionAndStore_NoTransitionHandleCOW;
- break;
- default:
- UNREACHABLE();
- }
- return isolate->builtins()->CallableFor(isolate, builtin_index);
-}
-
-Callable CodeFactory::StoreFastElementIC(Isolate* isolate,
- KeyedAccessStoreMode mode) {
- Builtins::Name builtin_index;
- switch (mode) {
- case STANDARD_STORE:
- builtin_index = Builtins::kStoreFastElementIC_Standard;
- break;
- case STORE_AND_GROW_HANDLE_COW:
- builtin_index = Builtins::kStoreFastElementIC_GrowNoTransitionHandleCOW;
- break;
- case STORE_IGNORE_OUT_OF_BOUNDS:
- builtin_index = Builtins::kStoreFastElementIC_NoTransitionIgnoreOOB;
- break;
- case STORE_HANDLE_COW:
- builtin_index = Builtins::kStoreFastElementIC_NoTransitionHandleCOW;
- break;
- default:
- UNREACHABLE();
- }
- return isolate->builtins()->CallableFor(isolate, builtin_index);
-}
-
-// static
-Callable CodeFactory::BinaryOperation(Isolate* isolate, Operation op) {
- switch (op) {
- case Operation::kShiftRight:
- return Builtins::CallableFor(isolate, Builtins::kShiftRight);
- case Operation::kShiftLeft:
- return Builtins::CallableFor(isolate, Builtins::kShiftLeft);
- case Operation::kShiftRightLogical:
- return Builtins::CallableFor(isolate, Builtins::kShiftRightLogical);
- case Operation::kAdd:
- return Builtins::CallableFor(isolate, Builtins::kAdd);
- case Operation::kSubtract:
- return Builtins::CallableFor(isolate, Builtins::kSubtract);
- case Operation::kMultiply:
- return Builtins::CallableFor(isolate, Builtins::kMultiply);
- case Operation::kDivide:
- return Builtins::CallableFor(isolate, Builtins::kDivide);
- case Operation::kModulus:
- return Builtins::CallableFor(isolate, Builtins::kModulus);
- case Operation::kBitwiseOr:
- return Builtins::CallableFor(isolate, Builtins::kBitwiseOr);
- case Operation::kBitwiseAnd:
- return Builtins::CallableFor(isolate, Builtins::kBitwiseAnd);
- case Operation::kBitwiseXor:
- return Builtins::CallableFor(isolate, Builtins::kBitwiseXor);
- default:
- break;
- }
- UNREACHABLE();
+ return Builtins::CallableFor(isolate, Builtin::kStoreIC);
}
// static
@@ -228,18 +125,18 @@ Callable CodeFactory::OrdinaryToPrimitive(Isolate* isolate,
Callable CodeFactory::StringAdd(Isolate* isolate, StringAddFlags flags) {
switch (flags) {
case STRING_ADD_CHECK_NONE:
- return Builtins::CallableFor(isolate, Builtins::kStringAdd_CheckNone);
+ return Builtins::CallableFor(isolate, Builtin::kStringAdd_CheckNone);
case STRING_ADD_CONVERT_LEFT:
- return Builtins::CallableFor(isolate, Builtins::kStringAddConvertLeft);
+ return Builtins::CallableFor(isolate, Builtin::kStringAddConvertLeft);
case STRING_ADD_CONVERT_RIGHT:
- return Builtins::CallableFor(isolate, Builtins::kStringAddConvertRight);
+ return Builtins::CallableFor(isolate, Builtin::kStringAddConvertRight);
}
UNREACHABLE();
}
// static
Callable CodeFactory::ResumeGenerator(Isolate* isolate) {
- return Builtins::CallableFor(isolate, Builtins::kResumeGeneratorTrampoline);
+ return Builtins::CallableFor(isolate, Builtin::kResumeGeneratorTrampoline);
}
// static
@@ -248,10 +145,10 @@ Callable CodeFactory::FastNewFunctionContext(Isolate* isolate,
switch (scope_type) {
case ScopeType::EVAL_SCOPE:
return Builtins::CallableFor(isolate,
- Builtins::kFastNewFunctionContextEval);
+ Builtin::kFastNewFunctionContextEval);
case ScopeType::FUNCTION_SCOPE:
return Builtins::CallableFor(isolate,
- Builtins::kFastNewFunctionContextFunction);
+ Builtin::kFastNewFunctionContextFunction);
default:
UNREACHABLE();
}
@@ -268,25 +165,25 @@ Callable CodeFactory::Call_WithFeedback(Isolate* isolate,
switch (mode) {
case ConvertReceiverMode::kNullOrUndefined:
return Builtins::CallableFor(
- isolate, Builtins::kCall_ReceiverIsNullOrUndefined_WithFeedback);
+ isolate, Builtin::kCall_ReceiverIsNullOrUndefined_WithFeedback);
case ConvertReceiverMode::kNotNullOrUndefined:
return Builtins::CallableFor(
- isolate, Builtins::kCall_ReceiverIsNotNullOrUndefined_WithFeedback);
+ isolate, Builtin::kCall_ReceiverIsNotNullOrUndefined_WithFeedback);
case ConvertReceiverMode::kAny:
return Builtins::CallableFor(isolate,
- Builtins::kCall_ReceiverIsAny_WithFeedback);
+ Builtin::kCall_ReceiverIsAny_WithFeedback);
}
UNREACHABLE();
}
// static
Callable CodeFactory::CallWithArrayLike(Isolate* isolate) {
- return Builtins::CallableFor(isolate, Builtins::kCallWithArrayLike);
+ return Builtins::CallableFor(isolate, Builtin::kCallWithArrayLike);
}
// static
Callable CodeFactory::CallWithSpread(Isolate* isolate) {
- return Builtins::CallableFor(isolate, Builtins::kCallWithSpread);
+ return Builtins::CallableFor(isolate, Builtin::kCallWithSpread);
}
// static
@@ -297,48 +194,48 @@ Callable CodeFactory::CallFunction(Isolate* isolate, ConvertReceiverMode mode) {
// static
Callable CodeFactory::CallVarargs(Isolate* isolate) {
- return Builtins::CallableFor(isolate, Builtins::kCallVarargs);
+ return Builtins::CallableFor(isolate, Builtin::kCallVarargs);
}
// static
Callable CodeFactory::CallForwardVarargs(Isolate* isolate) {
- return Builtins::CallableFor(isolate, Builtins::kCallForwardVarargs);
+ return Builtins::CallableFor(isolate, Builtin::kCallForwardVarargs);
}
// static
Callable CodeFactory::CallFunctionForwardVarargs(Isolate* isolate) {
- return Builtins::CallableFor(isolate, Builtins::kCallFunctionForwardVarargs);
+ return Builtins::CallableFor(isolate, Builtin::kCallFunctionForwardVarargs);
}
// static
Callable CodeFactory::Construct(Isolate* isolate) {
- return Builtins::CallableFor(isolate, Builtins::kConstruct);
+ return Builtins::CallableFor(isolate, Builtin::kConstruct);
}
// static
Callable CodeFactory::ConstructWithSpread(Isolate* isolate) {
- return Builtins::CallableFor(isolate, Builtins::kConstructWithSpread);
+ return Builtins::CallableFor(isolate, Builtin::kConstructWithSpread);
}
// static
Callable CodeFactory::ConstructFunction(Isolate* isolate) {
- return Builtins::CallableFor(isolate, Builtins::kConstructFunction);
+ return Builtins::CallableFor(isolate, Builtin::kConstructFunction);
}
// static
Callable CodeFactory::ConstructVarargs(Isolate* isolate) {
- return Builtins::CallableFor(isolate, Builtins::kConstructVarargs);
+ return Builtins::CallableFor(isolate, Builtin::kConstructVarargs);
}
// static
Callable CodeFactory::ConstructForwardVarargs(Isolate* isolate) {
- return Builtins::CallableFor(isolate, Builtins::kConstructForwardVarargs);
+ return Builtins::CallableFor(isolate, Builtin::kConstructForwardVarargs);
}
// static
Callable CodeFactory::ConstructFunctionForwardVarargs(Isolate* isolate) {
return Builtins::CallableFor(isolate,
- Builtins::kConstructFunctionForwardVarargs);
+ Builtin::kConstructFunctionForwardVarargs);
}
// static
@@ -352,16 +249,16 @@ Callable CodeFactory::InterpreterPushArgsThenCall(
UNREACHABLE();
case InterpreterPushArgsMode::kWithFinalSpread:
return Builtins::CallableFor(
- isolate, Builtins::kInterpreterPushArgsThenCallWithFinalSpread);
+ isolate, Builtin::kInterpreterPushArgsThenCallWithFinalSpread);
case InterpreterPushArgsMode::kOther:
switch (receiver_mode) {
case ConvertReceiverMode::kNullOrUndefined:
return Builtins::CallableFor(
- isolate, Builtins::kInterpreterPushUndefinedAndArgsThenCall);
+ isolate, Builtin::kInterpreterPushUndefinedAndArgsThenCall);
case ConvertReceiverMode::kNotNullOrUndefined:
case ConvertReceiverMode::kAny:
return Builtins::CallableFor(isolate,
- Builtins::kInterpreterPushArgsThenCall);
+ Builtin::kInterpreterPushArgsThenCall);
}
}
UNREACHABLE();
@@ -373,13 +270,13 @@ Callable CodeFactory::InterpreterPushArgsThenConstruct(
switch (mode) {
case InterpreterPushArgsMode::kArrayFunction:
return Builtins::CallableFor(
- isolate, Builtins::kInterpreterPushArgsThenConstructArrayFunction);
+ isolate, Builtin::kInterpreterPushArgsThenConstructArrayFunction);
case InterpreterPushArgsMode::kWithFinalSpread:
return Builtins::CallableFor(
- isolate, Builtins::kInterpreterPushArgsThenConstructWithFinalSpread);
+ isolate, Builtin::kInterpreterPushArgsThenConstructWithFinalSpread);
case InterpreterPushArgsMode::kOther:
return Builtins::CallableFor(isolate,
- Builtins::kInterpreterPushArgsThenConstruct);
+ Builtin::kInterpreterPushArgsThenConstruct);
}
UNREACHABLE();
}
@@ -401,14 +298,14 @@ Callable CodeFactory::InterpreterCEntry(Isolate* isolate, int result_size) {
// static
Callable CodeFactory::InterpreterOnStackReplacement(Isolate* isolate) {
return Builtins::CallableFor(isolate,
- Builtins::kInterpreterOnStackReplacement);
+ Builtin::kInterpreterOnStackReplacement);
}
// static
Callable CodeFactory::InterpreterOnStackReplacement_ToBaseline(
Isolate* isolate) {
return Builtins::CallableFor(
- isolate, Builtins::kInterpreterOnStackReplacement_ToBaseline);
+ isolate, Builtin::kInterpreterOnStackReplacement_ToBaseline);
}
// static
@@ -419,7 +316,7 @@ Callable CodeFactory::ArrayNoArgumentConstructor(
case kind_caps: \
return Builtins::CallableFor( \
isolate, \
- Builtins::kArrayNoArgumentConstructor_##kind_camel##_##mode_camel);
+ Builtin::kArrayNoArgumentConstructor_##kind_camel##_##mode_camel);
if (override_mode == DONT_OVERRIDE && AllocationSite::ShouldTrack(kind)) {
DCHECK(IsSmiElementsKind(kind));
switch (kind) {
@@ -453,7 +350,7 @@ Callable CodeFactory::ArraySingleArgumentConstructor(
case kind_caps: \
return Builtins::CallableFor( \
isolate, \
- Builtins::kArraySingleArgumentConstructor_##kind_camel##_##mode_camel)
+ Builtin::kArraySingleArgumentConstructor_##kind_camel##_##mode_camel)
if (override_mode == DONT_OVERRIDE && AllocationSite::ShouldTrack(kind)) {
DCHECK(IsSmiElementsKind(kind));
switch (kind) {
@@ -479,5 +376,43 @@ Callable CodeFactory::ArraySingleArgumentConstructor(
#undef CASE
}
+#ifdef V8_IS_TSAN
+// static
+Builtin CodeFactory::GetTSANRelaxedStoreStub(SaveFPRegsMode fp_mode, int size) {
+ if (size == kInt8Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANRelaxedStore8IgnoreFP
+ : Builtin::kTSANRelaxedStore8SaveFP;
+ } else if (size == kInt16Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANRelaxedStore16IgnoreFP
+ : Builtin::kTSANRelaxedStore16SaveFP;
+ } else if (size == kInt32Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANRelaxedStore32IgnoreFP
+ : Builtin::kTSANRelaxedStore32SaveFP;
+ } else {
+ CHECK_EQ(size, kInt64Size);
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANRelaxedStore64IgnoreFP
+ : Builtin::kTSANRelaxedStore64SaveFP;
+ }
+}
+
+// static
+Builtin CodeFactory::GetTSANRelaxedLoadStub(SaveFPRegsMode fp_mode, int size) {
+ if (size == kInt32Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANRelaxedLoad32IgnoreFP
+ : Builtin::kTSANRelaxedLoad32SaveFP;
+ } else {
+ CHECK_EQ(size, kInt64Size);
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANRelaxedLoad64IgnoreFP
+ : Builtin::kTSANRelaxedLoad64SaveFP;
+ }
+}
+#endif // V8_IS_TSAN
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/code-factory.h b/deps/v8/src/codegen/code-factory.h
index e55de10533..4780678dad 100644
--- a/deps/v8/src/codegen/code-factory.h
+++ b/deps/v8/src/codegen/code-factory.h
@@ -40,17 +40,8 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable StoreOwnIC(Isolate* isolate);
static Callable StoreOwnICInOptimizedCode(Isolate* isolate);
- static Callable KeyedStoreIC_SloppyArguments(Isolate* isolate,
- KeyedAccessStoreMode mode);
- static Callable ElementsTransitionAndStore(Isolate* isolate,
- KeyedAccessStoreMode mode);
- static Callable StoreFastElementIC(Isolate* isolate,
- KeyedAccessStoreMode mode);
-
static Callable ResumeGenerator(Isolate* isolate);
- static Callable BinaryOperation(Isolate* isolate, Operation op);
-
static Callable ApiGetter(Isolate* isolate);
static Callable CallApiCallback(Isolate* isolate);
@@ -97,6 +88,11 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable ArraySingleArgumentConstructor(
Isolate* isolate, ElementsKind kind,
AllocationSiteOverrideMode override_mode);
+
+#ifdef V8_IS_TSAN
+ static Builtin GetTSANRelaxedStoreStub(SaveFPRegsMode fp_mode, int size);
+ static Builtin GetTSANRelaxedLoadStub(SaveFPRegsMode fp_mode, int size);
+#endif // V8_IS_TSAN
};
} // namespace internal
diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc
index 5995a766d1..5493ba6caa 100644
--- a/deps/v8/src/codegen/code-stub-assembler.cc
+++ b/deps/v8/src/codegen/code-stub-assembler.cc
@@ -169,7 +169,7 @@ void CodeStubAssembler::FailAssert(
const char* message, const std::vector<FileAndLine>& files_and_lines,
std::initializer_list<ExtraNode> extra_nodes) {
DCHECK_NOT_NULL(message);
- EmbeddedVector<char, 1024> chars;
+ base::EmbeddedVector<char, 1024> chars;
std::stringstream stream;
for (auto it = files_and_lines.rbegin(); it != files_and_lines.rend(); ++it) {
if (it->first != nullptr) {
@@ -1394,16 +1394,29 @@ TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
new_space
? ExternalReference::new_space_allocation_top_address(isolate())
: ExternalReference::old_space_allocation_top_address(isolate()));
- DCHECK_EQ(kSystemPointerSize,
- ExternalReference::new_space_allocation_limit_address(isolate())
- .address() -
- ExternalReference::new_space_allocation_top_address(isolate())
- .address());
+
+#ifdef DEBUG
+ // New space is optional and if disabled both top and limit return
+ // kNullAddress.
+ if (ExternalReference::new_space_allocation_top_address(isolate())
+ .address() != kNullAddress) {
+ Address top_address =
+ ExternalReference::new_space_allocation_top_address(isolate())
+ .address();
+ Address limit_address =
+ ExternalReference::new_space_allocation_limit_address(isolate())
+ .address();
+
+ CHECK_EQ(kSystemPointerSize, limit_address - top_address);
+ }
+
DCHECK_EQ(kSystemPointerSize,
ExternalReference::old_space_allocation_limit_address(isolate())
.address() -
ExternalReference::old_space_allocation_top_address(isolate())
.address());
+#endif
+
TNode<IntPtrT> limit_address =
IntPtrAdd(ReinterpretCast<IntPtrT>(top_address),
IntPtrConstant(kSystemPointerSize));
@@ -1431,22 +1444,39 @@ TNode<HeapObject> CodeStubAssembler::Allocate(int size_in_bytes,
return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags);
}
-TNode<HeapObject> CodeStubAssembler::InnerAllocate(TNode<HeapObject> previous,
- TNode<IntPtrT> offset) {
- return UncheckedCast<HeapObject>(
- BitcastWordToTagged(IntPtrAdd(BitcastTaggedToWord(previous), offset)));
-}
-
-TNode<HeapObject> CodeStubAssembler::InnerAllocate(TNode<HeapObject> previous,
- int offset) {
- return InnerAllocate(previous, IntPtrConstant(offset));
-}
-
TNode<BoolT> CodeStubAssembler::IsRegularHeapObjectSize(TNode<IntPtrT> size) {
return UintPtrLessThanOrEqual(size,
IntPtrConstant(kMaxRegularHeapObjectSize));
}
+#if V8_ENABLE_WEBASSEMBLY
+TNode<HeapObject> CodeStubAssembler::AllocateWasmArray(
+ TNode<IntPtrT> size_in_bytes, int initialization) {
+ TNode<HeapObject> array =
+ Allocate(size_in_bytes, AllocationFlag::kAllowLargeObjectAllocation);
+ if (initialization == kUninitialized) return array;
+
+ TNode<IntPtrT> array_address = BitcastTaggedToWord(array);
+ TNode<IntPtrT> start = IntPtrAdd(
+ array_address, IntPtrConstant(WasmArray::kHeaderSize - kHeapObjectTag));
+ TNode<IntPtrT> limit = IntPtrAdd(
+ array_address, IntPtrSub(size_in_bytes, IntPtrConstant(kHeapObjectTag)));
+
+ TNode<Object> value;
+ if (initialization == kInitializeToZero) {
+ // A pointer-sized zero pattern is just what we need for numeric Wasm
+ // arrays (their object size is rounded up to a multiple of kPointerSize).
+ value = SmiConstant(0);
+ } else if (initialization == kInitializeToNull) {
+ value = NullConstant();
+ } else {
+ UNREACHABLE();
+ }
+ StoreFieldsNoWriteBarrier(start, limit, value);
+ return array;
+}
+#endif // V8_ENABLE_WEBASSEMBLY
+
void CodeStubAssembler::BranchIfToBooleanIsTrue(TNode<Object> value,
Label* if_true,
Label* if_false) {
@@ -2524,7 +2554,7 @@ TNode<Numeric> CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
Label done(this), if_unknown_type(this, Label::kDeferred);
int32_t elements_kinds[] = {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) TYPE##_ELEMENTS,
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ TYPED_ARRAYS(TYPED_ARRAY_CASE) RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
};
@@ -2535,6 +2565,9 @@ TNode<Numeric> CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
Label* elements_kind_labels[] = {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) &if_##type##array,
TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ // The same labels again for RAB / GSAB. We dispatch RAB / GSAB elements
+ // kinds to the corresponding non-RAB / GSAB elements kinds.
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
};
STATIC_ASSERT(arraysize(elements_kinds) == arraysize(elements_kind_labels));
@@ -3904,6 +3937,7 @@ TNode<JSArray> CodeStubAssembler::AllocateJSArray(
int base_size = array_header_size;
if (allocation_site) {
+ DCHECK(V8_ALLOCATION_SITE_TRACKING_BOOL);
base_size += AllocationMemento::kSize;
}
@@ -3914,6 +3948,20 @@ TNode<JSArray> CodeStubAssembler::AllocateJSArray(
return result;
}
+namespace {
+
+// To prevent GC between the array and elements allocation, the elements
+// object allocation is folded together with the js-array allocation.
+TNode<FixedArrayBase> InnerAllocateElements(CodeStubAssembler* csa,
+ TNode<JSArray> js_array,
+ int offset) {
+ return csa->UncheckedCast<FixedArrayBase>(
+ csa->BitcastWordToTagged(csa->IntPtrAdd(
+ csa->BitcastTaggedToWord(js_array), csa->IntPtrConstant(offset))));
+}
+
+} // namespace
+
std::pair<TNode<JSArray>, TNode<FixedArrayBase>>
CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
ElementsKind kind, TNode<Map> array_map, TNode<Smi> length,
@@ -3956,6 +4004,7 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
{
int base_size = array_header_size;
if (allocation_site) {
+ DCHECK(V8_ALLOCATION_SITE_TRACKING_BOOL);
base_size += AllocationMemento::kSize;
}
@@ -3996,12 +4045,10 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
BIND(&next);
}
-
// Fold all objects into a single new space allocation.
array =
AllocateUninitializedJSArray(array_map, length, allocation_site, size);
- elements = UncheckedCast<FixedArrayBase>(
- InnerAllocate(array.value(), elements_offset));
+ elements = InnerAllocateElements(this, array.value(), elements_offset);
StoreObjectFieldNoWriteBarrier(array.value(), JSObject::kElementsOffset,
elements.value());
@@ -4040,6 +4087,7 @@ TNode<JSArray> CodeStubAssembler::AllocateUninitializedJSArray(
RootIndex::kEmptyFixedArray);
if (allocation_site) {
+ DCHECK(V8_ALLOCATION_SITE_TRACKING_BOOL);
InitializeAllocationMemento(array, IntPtrConstant(JSArray::kHeaderSize),
*allocation_site);
}
@@ -5331,11 +5379,28 @@ template TNode<FixedArrayBase> CodeStubAssembler::GrowElementsCapacity<IntPtrT>(
TNode<HeapObject>, TNode<FixedArrayBase>, ElementsKind, ElementsKind,
TNode<IntPtrT>, TNode<IntPtrT>, compiler::CodeAssemblerLabel*);
+namespace {
+
+// Helper function for folded memento allocation.
+// Memento objects are designed to be put right after the objects they are
+// tracking on. So memento allocations have to be folded together with previous
+// object allocations.
+TNode<HeapObject> InnerAllocateMemento(CodeStubAssembler* csa,
+ TNode<HeapObject> previous,
+ TNode<IntPtrT> offset) {
+ return csa->UncheckedCast<HeapObject>(csa->BitcastWordToTagged(
+ csa->IntPtrAdd(csa->BitcastTaggedToWord(previous), offset)));
+}
+
+} // namespace
+
void CodeStubAssembler::InitializeAllocationMemento(
TNode<HeapObject> base, TNode<IntPtrT> base_allocation_size,
TNode<AllocationSite> allocation_site) {
+ DCHECK(V8_ALLOCATION_SITE_TRACKING_BOOL);
Comment("[Initialize AllocationMemento");
- TNode<HeapObject> memento = InnerAllocate(base, base_allocation_size);
+ TNode<HeapObject> memento =
+ InnerAllocateMemento(this, base, base_allocation_size);
StoreMapNoWriteBarrier(memento, RootIndex::kAllocationMementoMap);
StoreObjectFieldNoWriteBarrier(
memento, AllocationMemento::kAllocationSiteOffset, allocation_site);
@@ -5351,6 +5416,28 @@ void CodeStubAssembler::InitializeAllocationMemento(
Comment("]");
}
+TNode<IntPtrT> CodeStubAssembler::TryTaggedToInt32AsIntPtr(
+ TNode<Object> acc, Label* if_not_possible) {
+ TVARIABLE(IntPtrT, acc_intptr);
+ Label is_not_smi(this), have_int32(this);
+
+ GotoIfNot(TaggedIsSmi(acc), &is_not_smi);
+ acc_intptr = SmiUntag(CAST(acc));
+ Goto(&have_int32);
+
+ BIND(&is_not_smi);
+ GotoIfNot(IsHeapNumber(CAST(acc)), if_not_possible);
+ TNode<Float64T> value = LoadHeapNumberValue(CAST(acc));
+ TNode<Int32T> value32 = RoundFloat64ToInt32(value);
+ TNode<Float64T> value64 = ChangeInt32ToFloat64(value32);
+ GotoIfNot(Float64Equal(value, value64), if_not_possible);
+ acc_intptr = ChangeInt32ToIntPtr(value32);
+ Goto(&have_int32);
+
+ BIND(&have_int32);
+ return acc_intptr.value();
+}
+
TNode<Float64T> CodeStubAssembler::TryTaggedToFloat64(
TNode<Object> value, Label* if_valueisnotnumber) {
return Select<Float64T>(
@@ -5383,7 +5470,7 @@ TNode<Float64T> CodeStubAssembler::TruncateTaggedToFloat64(
BIND(&if_valueisnotnumber);
{
// Convert the {value} to a Number first.
- var_value = CallBuiltin(Builtins::kNonNumberToNumber, context, value);
+ var_value = CallBuiltin(Builtin::kNonNumberToNumber, context, value);
Goto(&loop);
}
}
@@ -5468,8 +5555,8 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
GotoIf(InstanceTypeEqual(instance_type, ODDBALL_TYPE), &is_oddball);
// Not an oddball either -> convert.
auto builtin = conversion == Object::Conversion::kToNumeric
- ? Builtins::kNonNumberToNumeric
- : Builtins::kNonNumberToNumber;
+ ? Builtin::kNonNumberToNumeric
+ : Builtin::kNonNumberToNumber;
var_value = CallBuiltin(builtin, context, value);
OverwriteFeedback(var_feedback, BinaryOperationFeedback::kAny);
Goto(&loop);
@@ -5734,7 +5821,7 @@ TNode<String> CodeStubAssembler::ToThisString(TNode<Context> context,
Label if_valueisnullorundefined(this, Label::kDeferred);
GotoIf(IsNullOrUndefined(value), &if_valueisnullorundefined);
// Convert the {value} to a String.
- var_value = CallBuiltin(Builtins::kToString, context, value);
+ var_value = CallBuiltin(Builtin::kToString, context, value);
Goto(&if_valueisstring);
BIND(&if_valueisnullorundefined);
@@ -5748,7 +5835,7 @@ TNode<String> CodeStubAssembler::ToThisString(TNode<Context> context,
BIND(&if_valueissmi);
{
// The {value} is a Smi, convert it to a String.
- var_value = CallBuiltin(Builtins::kNumberToString, context, value);
+ var_value = CallBuiltin(Builtin::kNumberToString, context, value);
Goto(&if_valueisstring);
}
BIND(&if_valueisstring);
@@ -5800,7 +5887,7 @@ TNode<Int32T> CodeStubAssembler::ChangeTaggedNonSmiToInt32(
},
[=] {
return TruncateNumberToWord32(
- CAST(CallBuiltin(Builtins::kNonNumberToNumber, context, input)));
+ CAST(CallBuiltin(Builtin::kNonNumberToNumber, context, input)));
});
}
@@ -5819,7 +5906,7 @@ TNode<Float64T> CodeStubAssembler::ChangeTaggedToFloat64(TNode<Context> context,
[=] { return LoadHeapNumberValue(CAST(input)); },
[=] {
return ChangeNumberToFloat64(
- CAST(CallBuiltin(Builtins::kNonNumberToNumber, context, input)));
+ CAST(CallBuiltin(Builtin::kNonNumberToNumber, context, input)));
});
Goto(&end);
@@ -7387,8 +7474,7 @@ TNode<Number> CodeStubAssembler::ToNumber_Inline(TNode<Context> context,
var_result = Select<Number>(
IsHeapNumber(CAST(input)), [=] { return CAST(input); },
[=] {
- return CAST(
- CallBuiltin(Builtins::kNonNumberToNumber, context, input));
+ return CAST(CallBuiltin(Builtin::kNonNumberToNumber, context, input));
});
Goto(&end);
}
@@ -7535,7 +7621,7 @@ void CodeStubAssembler::TaggedToNumeric(TNode<Context> context,
// {heap_object_value} is not a Numeric yet.
GotoIf(Word32Equal(instance_type, Int32Constant(ODDBALL_TYPE)), &if_oddball);
*var_numeric = CAST(
- CallBuiltin(Builtins::kNonNumberToNumeric, context, heap_object_value));
+ CallBuiltin(Builtin::kNonNumberToNumeric, context, heap_object_value));
OverwriteFeedback(var_feedback, BinaryOperationFeedback::kAny);
Goto(&done);
@@ -7676,7 +7762,7 @@ TNode<String> CodeStubAssembler::ToString_Inline(TNode<Context> context,
Branch(IsString(CAST(input)), &out, &stub_call);
BIND(&stub_call);
- var_result = CallBuiltin(Builtins::kToString, context, input);
+ var_result = CallBuiltin(Builtin::kToString, context, input);
Goto(&out);
BIND(&out);
@@ -7685,7 +7771,7 @@ TNode<String> CodeStubAssembler::ToString_Inline(TNode<Context> context,
TNode<JSReceiver> CodeStubAssembler::ToObject(TNode<Context> context,
TNode<Object> input) {
- return CAST(CallBuiltin(Builtins::kToObject, context, input));
+ return CAST(CallBuiltin(Builtin::kToObject, context, input));
}
TNode<JSReceiver> CodeStubAssembler::ToObject_Inline(TNode<Context> context,
@@ -7717,7 +7803,7 @@ TNode<Number> CodeStubAssembler::ToLength_Inline(TNode<Context> context,
TNode<Smi> smi_zero = SmiConstant(0);
return Select<Number>(
TaggedIsSmi(input), [=] { return SmiMax(CAST(input), smi_zero); },
- [=] { return CAST(CallBuiltin(Builtins::kToLength, context, input)); });
+ [=] { return CAST(CallBuiltin(Builtin::kToLength, context, input)); });
}
TNode<Object> CodeStubAssembler::OrdinaryToPrimitive(
@@ -9273,6 +9359,9 @@ void CodeStubAssembler::LoadPropertyFromFastObject(
TNode<Uint32T> representation =
DecodeWord32<PropertyDetails::RepresentationField>(details);
+ // TODO(ishell): support WasmValues.
+ CSA_ASSERT(this, Word32NotEqual(representation,
+ Int32Constant(Representation::kWasmValue)));
field_index =
IntPtrAdd(field_index, LoadMapInobjectPropertiesStartInWords(map));
TNode<IntPtrT> instance_size_in_words = LoadMapInstanceSizeInWords(map);
@@ -9438,7 +9527,7 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
TNode<NativeContext> creation_context =
GetCreationContext(CAST(holder), if_bailout);
var_value = CallBuiltin(
- Builtins::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver,
+ Builtin::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver,
creation_context, getter, IntPtrConstant(0), receiver);
Goto(&done);
@@ -9776,8 +9865,6 @@ void CodeStubAssembler::TryPrototypeChainLookup(
TNode<Uint16T> instance_type = LoadMapInstanceType(map);
{
Label if_objectisreceiver(this);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- STATIC_ASSERT(FIRST_JS_RECEIVER_TYPE == JS_PROXY_TYPE);
Branch(IsJSReceiverInstanceType(instance_type), &if_objectisreceiver,
if_bailout);
BIND(&if_objectisreceiver);
@@ -10611,7 +10698,7 @@ TNode<Word32T> CodeStubAssembler::PrepareValueForWriteToTypedArray<Word32T>(
BIND(&convert);
{
- var_input = CallBuiltin(Builtins::kNonNumberToNumber, context, input);
+ var_input = CallBuiltin(Builtin::kNonNumberToNumber, context, input);
Goto(&loop);
}
@@ -10659,7 +10746,7 @@ TNode<Float32T> CodeStubAssembler::PrepareValueForWriteToTypedArray<Float32T>(
BIND(&convert);
{
- var_input = CallBuiltin(Builtins::kNonNumberToNumber, context, input);
+ var_input = CallBuiltin(Builtin::kNonNumberToNumber, context, input);
Goto(&loop);
}
@@ -10706,7 +10793,7 @@ TNode<Float64T> CodeStubAssembler::PrepareValueForWriteToTypedArray<Float64T>(
BIND(&convert);
{
- var_input = CallBuiltin(Builtins::kNonNumberToNumber, context, input);
+ var_input = CallBuiltin(Builtin::kNonNumberToNumber, context, input);
Goto(&loop);
}
@@ -10834,6 +10921,14 @@ void CodeStubAssembler::EmitElementStoreTypedArray(
TNode<Context> context, TVariable<Object>* maybe_converted_value) {
Label done(this), update_value_and_bailout(this, Label::kDeferred);
+ bool is_rab_gsab = false;
+ if (IsRabGsabTypedArrayElementsKind(elements_kind)) {
+ is_rab_gsab = true;
+ // For the rest of the function, use the corresponding non-RAB/GSAB
+ // ElementsKind.
+ elements_kind = GetCorrespondingNonRabGsabElementsKind(elements_kind);
+ }
+
TNode<TValue> converted_value =
PrepareValueForWriteToTypedArray<TValue>(value, elements_kind, context);
@@ -10842,16 +10937,23 @@ void CodeStubAssembler::EmitElementStoreTypedArray(
// the buffer is not alive or move the elements.
// TODO(ishell): introduce DisallowGarbageCollectionCode scope here.
- // Check if buffer has been detached.
+ // Check if buffer has been detached. (For RAB / GSAB this is part of loading
+ // the length, so no additional check is needed.)
TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(typed_array);
- if (maybe_converted_value) {
+ if (!is_rab_gsab) {
GotoIf(IsDetachedBuffer(buffer), &update_value_and_bailout);
- } else {
- GotoIf(IsDetachedBuffer(buffer), bailout);
}
// Bounds check.
- TNode<UintPtrT> length = LoadJSTypedArrayLength(typed_array);
+ TNode<UintPtrT> length;
+ if (is_rab_gsab) {
+ length = LoadVariableLengthJSTypedArrayLength(
+ typed_array, buffer,
+ store_mode == STORE_IGNORE_OUT_OF_BOUNDS ? &done
+ : &update_value_and_bailout);
+ } else {
+ length = LoadJSTypedArrayLength(typed_array);
+ }
if (store_mode == STORE_IGNORE_OUT_OF_BOUNDS) {
// Skip the store if we write beyond the length or
@@ -10866,19 +10968,21 @@ void CodeStubAssembler::EmitElementStoreTypedArray(
StoreElement(data_ptr, elements_kind, key, converted_value);
Goto(&done);
- BIND(&update_value_and_bailout);
- // We already prepared the incoming value for storing into a typed array.
- // This might involve calling ToNumber in some cases. We shouldn't call
- // ToNumber again in the runtime so pass the converted value to the runtime.
- // The prepared value is an untagged value. Convert it to a tagged value
- // to pass it to runtime. It is not possible to do the detached buffer check
- // before we prepare the value, since ToNumber can detach the ArrayBuffer.
- // The spec specifies the order of these operations.
- if (maybe_converted_value != nullptr) {
- EmitElementStoreTypedArrayUpdateValue(value, elements_kind, converted_value,
- maybe_converted_value);
+ if (!is_rab_gsab || store_mode != STORE_IGNORE_OUT_OF_BOUNDS) {
+ BIND(&update_value_and_bailout);
+ // We already prepared the incoming value for storing into a typed array.
+ // This might involve calling ToNumber in some cases. We shouldn't call
+ // ToNumber again in the runtime so pass the converted value to the runtime.
+ // The prepared value is an untagged value. Convert it to a tagged value
+ // to pass it to runtime. It is not possible to do the detached buffer check
+ // before we prepare the value, since ToNumber can detach the ArrayBuffer.
+ // The spec specifies the order of these operations.
+ if (maybe_converted_value != nullptr) {
+ EmitElementStoreTypedArrayUpdateValue(
+ value, elements_kind, converted_value, maybe_converted_value);
+ }
+ Goto(bailout);
}
- Goto(bailout);
BIND(&done);
}
@@ -10889,12 +10993,6 @@ void CodeStubAssembler::EmitElementStore(
TNode<Context> context, TVariable<Object>* maybe_converted_value) {
CSA_ASSERT(this, Word32BinaryNot(IsJSProxy(object)));
- // TODO(v8:11111): Fast path for RAB / GSAB backed TypedArrays.
- if (IsRabGsabTypedArrayElementsKind(elements_kind)) {
- GotoIf(Int32TrueConstant(), bailout);
- return;
- }
-
TNode<FixedArrayBase> elements = LoadElements(object);
if (!(IsSmiOrObjectElementsKind(elements_kind) ||
IsSealedElementsKind(elements_kind) ||
@@ -10909,7 +11007,7 @@ void CodeStubAssembler::EmitElementStore(
// TODO(rmcilroy): TNodify the converted value once this funciton and
// StoreElement are templated based on the type elements_kind type.
- if (IsTypedArrayElementsKind(elements_kind)) {
+ if (IsTypedArrayOrRabGsabTypedArrayElementsKind(elements_kind)) {
TNode<JSTypedArray> typed_array = CAST(object);
switch (elements_kind) {
case UINT8_ELEMENTS:
@@ -10919,22 +11017,33 @@ void CodeStubAssembler::EmitElementStore(
case UINT32_ELEMENTS:
case INT32_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
+ case RAB_GSAB_UINT8_ELEMENTS:
+ case RAB_GSAB_INT8_ELEMENTS:
+ case RAB_GSAB_UINT16_ELEMENTS:
+ case RAB_GSAB_INT16_ELEMENTS:
+ case RAB_GSAB_UINT32_ELEMENTS:
+ case RAB_GSAB_INT32_ELEMENTS:
+ case RAB_GSAB_UINT8_CLAMPED_ELEMENTS:
EmitElementStoreTypedArray<Word32T>(typed_array, intptr_key, value,
elements_kind, store_mode, bailout,
context, maybe_converted_value);
break;
case FLOAT32_ELEMENTS:
+ case RAB_GSAB_FLOAT32_ELEMENTS:
EmitElementStoreTypedArray<Float32T>(typed_array, intptr_key, value,
elements_kind, store_mode, bailout,
context, maybe_converted_value);
break;
case FLOAT64_ELEMENTS:
+ case RAB_GSAB_FLOAT64_ELEMENTS:
EmitElementStoreTypedArray<Float64T>(typed_array, intptr_key, value,
elements_kind, store_mode, bailout,
context, maybe_converted_value);
break;
case BIGINT64_ELEMENTS:
case BIGUINT64_ELEMENTS:
+ case RAB_GSAB_BIGINT64_ELEMENTS:
+ case RAB_GSAB_BIGUINT64_ELEMENTS:
EmitElementStoreTypedArray<BigInt>(typed_array, intptr_key, value,
elements_kind, store_mode, bailout,
context, maybe_converted_value);
@@ -11129,6 +11238,7 @@ void CodeStubAssembler::TransitionElementsKind(TNode<JSObject> object,
void CodeStubAssembler::TrapAllocationMemento(TNode<JSObject> object,
Label* memento_found) {
+ DCHECK(V8_ALLOCATION_SITE_TRACKING_BOOL);
Comment("[ TrapAllocationMemento");
Label no_memento_found(this);
Label top_check(this), map_check(this);
@@ -11152,8 +11262,8 @@ void CodeStubAssembler::TrapAllocationMemento(TNode<JSObject> object,
IntPtrConstant(MemoryChunk::kIsInYoungGenerationMask)),
IntPtrConstant(0)),
&no_memento_found);
- // TODO(ulan): Support allocation memento for a large object by allocating
- // additional word for the memento after the large object.
+ // TODO(v8:11799): Support allocation memento for a large object by
+ // allocating additional word for the memento after the large object.
GotoIf(WordNotEqual(WordAnd(page_flags,
IntPtrConstant(MemoryChunk::kIsLargePageMask)),
IntPtrConstant(0)),
@@ -11200,7 +11310,7 @@ void CodeStubAssembler::TrapAllocationMemento(TNode<JSObject> object,
}
TNode<IntPtrT> CodeStubAssembler::PageFromAddress(TNode<IntPtrT> address) {
- if (FLAG_enable_third_party_heap) Unreachable();
+ DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return WordAnd(address, IntPtrConstant(~kPageAlignmentMask));
}
@@ -11684,8 +11794,7 @@ TNode<Oddball> CodeStubAssembler::RelationalComparison(
// dedicated ToPrimitive(right, hint Number) operation, as the
// ToNumeric(right) will by itself already invoke ToPrimitive with
// a Number hint.
- var_right =
- CallBuiltin(Builtins::kNonNumberToNumeric, context(), right);
+ var_right = CallBuiltin(Builtin::kNonNumberToNumeric, context(), right);
Goto(&loop);
}
}
@@ -11730,8 +11839,7 @@ TNode<Oddball> CodeStubAssembler::RelationalComparison(
// dedicated ToPrimitive(left, hint Number) operation, as the
// ToNumeric(left) will by itself already invoke ToPrimitive with
// a Number hint.
- var_left =
- CallBuiltin(Builtins::kNonNumberToNumeric, context(), left);
+ var_left = CallBuiltin(Builtin::kNonNumberToNumeric, context(), left);
Goto(&loop);
}
}
@@ -11787,7 +11895,7 @@ TNode<Oddball> CodeStubAssembler::RelationalComparison(
// ToNumeric(right) will by itself already invoke ToPrimitive with
// a Number hint.
var_right =
- CallBuiltin(Builtins::kNonNumberToNumeric, context(), right);
+ CallBuiltin(Builtin::kNonNumberToNumeric, context(), right);
Goto(&loop);
}
}
@@ -11842,7 +11950,7 @@ TNode<Oddball> CodeStubAssembler::RelationalComparison(
// ToNumeric(right) will by itself already invoke ToPrimitive with
// a Number hint.
var_right =
- CallBuiltin(Builtins::kNonNumberToNumeric, context(), right);
+ CallBuiltin(Builtin::kNonNumberToNumeric, context(), right);
Goto(&loop);
}
}
@@ -11857,19 +11965,19 @@ TNode<Oddball> CodeStubAssembler::RelationalComparison(
// Both {left} and {right} are strings.
CombineFeedback(var_type_feedback, CompareOperationFeedback::kString);
- Builtins::Name builtin;
+ Builtin builtin;
switch (op) {
case Operation::kLessThan:
- builtin = Builtins::kStringLessThan;
+ builtin = Builtin::kStringLessThan;
break;
case Operation::kLessThanOrEqual:
- builtin = Builtins::kStringLessThanOrEqual;
+ builtin = Builtin::kStringLessThanOrEqual;
break;
case Operation::kGreaterThan:
- builtin = Builtins::kStringGreaterThan;
+ builtin = Builtin::kStringGreaterThan;
break;
case Operation::kGreaterThanOrEqual:
- builtin = Builtins::kStringGreaterThanOrEqual;
+ builtin = Builtin::kStringGreaterThanOrEqual;
break;
default:
UNREACHABLE();
@@ -11893,8 +12001,8 @@ TNode<Oddball> CodeStubAssembler::RelationalComparison(
&if_right_receiver);
var_left =
- CallBuiltin(Builtins::kNonNumberToNumeric, context(), left);
- var_right = CallBuiltin(Builtins::kToNumeric, context(), right);
+ CallBuiltin(Builtin::kNonNumberToNumeric, context(), left);
+ var_right = CallBuiltin(Builtin::kToNumeric, context(), right);
Goto(&loop);
BIND(&if_right_bigint);
@@ -11957,9 +12065,8 @@ TNode<Oddball> CodeStubAssembler::RelationalComparison(
GotoIf(IsJSReceiverInstanceType(left_instance_type),
&if_left_receiver);
- var_right = CallBuiltin(Builtins::kToNumeric, context(), right);
- var_left =
- CallBuiltin(Builtins::kNonNumberToNumeric, context(), left);
+ var_right = CallBuiltin(Builtin::kToNumeric, context(), right);
+ var_left = CallBuiltin(Builtin::kNonNumberToNumeric, context(), left);
Goto(&loop);
BIND(&if_left_receiver);
@@ -12268,7 +12375,7 @@ TNode<Oddball> CodeStubAssembler::Equal(TNode<Object> left, TNode<Object> right,
{
GotoIfNot(IsStringInstanceType(right_type), &use_symmetry);
result =
- CAST(CallBuiltin(Builtins::kStringEqual, context(), left, right));
+ CAST(CallBuiltin(Builtin::kStringEqual, context(), left, right));
CombineFeedback(var_type_feedback,
SmiOr(CollectFeedbackForString(left_type),
CollectFeedbackForString(right_type)));
@@ -12528,7 +12635,7 @@ TNode<Oddball> CodeStubAssembler::Equal(TNode<Object> left, TNode<Object> right,
CombineFeedback(var_type_feedback,
CollectFeedbackForString(right_type));
}
- var_right = CallBuiltin(Builtins::kStringToNumber, context(), right);
+ var_right = CallBuiltin(Builtin::kStringToNumber, context(), right);
Goto(&loop);
}
@@ -12733,7 +12840,7 @@ TNode<Oddball> CodeStubAssembler::StrictEqual(
CollectFeedbackForString(rhs_instance_type);
*var_type_feedback = SmiOr(lhs_feedback, rhs_feedback);
}
- result = CAST(CallBuiltin(Builtins::kStringEqual,
+ result = CAST(CallBuiltin(Builtin::kStringEqual,
NoContextConstant(), lhs, rhs));
Goto(&end);
}
@@ -13010,7 +13117,7 @@ void CodeStubAssembler::BranchIfSameValue(TNode<Object> lhs, TNode<Object> rhs,
// with the same sequence of characters.
GotoIfNot(IsString(CAST(rhs)), if_false);
const TNode<Object> result = CallBuiltin(
- Builtins::kStringEqual, NoContextConstant(), lhs, rhs);
+ Builtin::kStringEqual, NoContextConstant(), lhs, rhs);
Branch(IsTrue(result), if_true, if_false);
}
@@ -13095,13 +13202,13 @@ TNode<Oddball> CodeStubAssembler::HasProperty(TNode<Context> context,
BIND(&if_proxy);
{
- TNode<Name> name = CAST(CallBuiltin(Builtins::kToName, context, key));
+ TNode<Name> name = CAST(CallBuiltin(Builtin::kToName, context, key));
switch (mode) {
case kHasProperty:
GotoIf(IsPrivateSymbol(name), &return_false);
result = CAST(
- CallBuiltin(Builtins::kProxyHasProperty, context, object, name));
+ CallBuiltin(Builtin::kProxyHasProperty, context, object, name));
Goto(&end);
break;
case kForInHasProperty:
@@ -13392,7 +13499,7 @@ TNode<Oddball> CodeStubAssembler::InstanceOf(TNode<Object> object,
// Use the OrdinaryHasInstance algorithm.
var_result = CAST(
- CallBuiltin(Builtins::kOrdinaryHasInstance, context, callable, object));
+ CallBuiltin(Builtin::kOrdinaryHasInstance, context, callable, object));
Goto(&return_result);
}
@@ -13608,8 +13715,8 @@ TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResultForEntry(
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<Smi> length = SmiConstant(2);
int const elements_size = FixedArray::SizeFor(2);
- TNode<FixedArray> elements = UncheckedCast<FixedArray>(
- Allocate(elements_size + JSArray::kHeaderSize + JSIteratorResult::kSize));
+ TNode<FixedArray> elements =
+ UncheckedCast<FixedArray>(Allocate(elements_size));
StoreObjectFieldRoot(elements, FixedArray::kMapOffset,
RootIndex::kFixedArrayMap);
StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset, length);
@@ -13617,7 +13724,7 @@ TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResultForEntry(
StoreFixedArrayElement(elements, 1, value);
TNode<Map> array_map = CAST(LoadContextElement(
native_context, Context::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX));
- TNode<HeapObject> array = InnerAllocate(elements, elements_size);
+ TNode<HeapObject> array = Allocate(JSArray::kHeaderSize);
StoreMapNoWriteBarrier(array, array_map);
StoreObjectFieldRoot(array, JSArray::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
@@ -13625,7 +13732,7 @@ TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResultForEntry(
StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
TNode<Map> iterator_map = CAST(
LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX));
- TNode<HeapObject> result = InnerAllocate(array, JSArray::kHeaderSize);
+ TNode<HeapObject> result = Allocate(JSIteratorResult::kSize);
StoreMapNoWriteBarrier(result, iterator_map);
StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
@@ -13687,9 +13794,31 @@ TNode<UintPtrT> CodeStubAssembler::LoadJSArrayBufferViewByteOffset(
JSArrayBufferView::kByteOffsetOffset);
}
-TNode<UintPtrT> CodeStubAssembler::LoadJSTypedArrayLength(
- TNode<JSTypedArray> typed_array) {
- return LoadObjectField<UintPtrT>(typed_array, JSTypedArray::kLengthOffset);
+TNode<UintPtrT> CodeStubAssembler::LoadJSTypedArrayLengthAndCheckDetached(
+ TNode<JSTypedArray> typed_array, Label* detached) {
+ TVARIABLE(UintPtrT, result);
+ TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(typed_array);
+
+ Label variable_length(this), fixed_length(this), end(this);
+ Branch(IsVariableLengthTypedArray(typed_array), &variable_length,
+ &fixed_length);
+ BIND(&variable_length);
+ {
+ result =
+ LoadVariableLengthJSTypedArrayLength(typed_array, buffer, detached);
+ Goto(&end);
+ }
+
+ BIND(&fixed_length);
+ {
+ Label not_detached(this);
+ Branch(IsDetachedBuffer(buffer), detached, &not_detached);
+ BIND(&not_detached);
+ result = LoadJSTypedArrayLength(typed_array);
+ Goto(&end);
+ }
+ BIND(&end);
+ return result.value();
}
// ES #sec-integerindexedobjectlength
@@ -14091,7 +14220,7 @@ TNode<BoolT> CodeStubAssembler::NeedsAnyPromiseHooks(TNode<Uint32T> flags) {
}
TNode<Code> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
- CSA_ASSERT(this, SmiBelow(builtin_id, SmiConstant(Builtins::builtin_count)));
+ CSA_ASSERT(this, SmiBelow(builtin_id, SmiConstant(Builtins::kBuiltinCount)));
TNode<IntPtrT> offset =
ElementOffsetFromIndex(SmiToBInt(builtin_id), SYSTEM_POINTER_ELEMENTS);
@@ -14118,7 +14247,7 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
*data_type_out = Uint16Constant(0);
}
if (if_compile_lazy) {
- GotoIf(SmiEqual(CAST(sfi_data), SmiConstant(Builtins::kCompileLazy)),
+ GotoIf(SmiEqual(CAST(sfi_data), SmiConstant(Builtin::kCompileLazy)),
if_compile_lazy);
}
sfi_code = LoadBuiltin(CAST(sfi_data));
@@ -14138,22 +14267,20 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE,
FUNCTION_TEMPLATE_INFO_TYPE,
#if V8_ENABLE_WEBASSEMBLY
+ WASM_CAPI_FUNCTION_DATA_TYPE,
WASM_EXPORTED_FUNCTION_DATA_TYPE,
- ASM_WASM_DATA_TYPE,
WASM_JS_FUNCTION_DATA_TYPE,
- WASM_CAPI_FUNCTION_DATA_TYPE,
+ ASM_WASM_DATA_TYPE,
#endif // V8_ENABLE_WEBASSEMBLY
};
Label check_is_bytecode_array(this);
Label check_is_baseline_data(this);
- Label check_is_exported_function_data(this);
Label check_is_asm_wasm_data(this);
Label check_is_uncompiled_data_without_preparse_data(this);
Label check_is_uncompiled_data_with_preparse_data(this);
Label check_is_function_template_info(this);
Label check_is_interpreter_data(this);
- Label check_is_wasm_js_function_data(this);
- Label check_is_wasm_capi_function_data(this);
+ Label check_is_wasm_function_data(this);
Label* case_labels[] = {
&check_is_bytecode_array,
&check_is_baseline_data,
@@ -14161,10 +14288,10 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
&check_is_uncompiled_data_with_preparse_data,
&check_is_function_template_info,
#if V8_ENABLE_WEBASSEMBLY
- &check_is_exported_function_data,
+ &check_is_wasm_function_data,
+ &check_is_wasm_function_data,
+ &check_is_wasm_function_data,
&check_is_asm_wasm_data,
- &check_is_wasm_js_function_data,
- &check_is_wasm_capi_function_data
#endif // V8_ENABLE_WEBASSEMBLY
};
STATIC_ASSERT(arraysize(case_values) == arraysize(case_labels));
@@ -14178,11 +14305,11 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
// IsBaselineData: Execute baseline code
BIND(&check_is_baseline_data);
- TNode<BaselineData> baseline_data = CAST(sfi_data);
- TNode<Code> baseline_code =
- CAST(LoadObjectField(baseline_data, BaselineData::kBaselineCodeOffset));
- sfi_code = baseline_code;
- Goto(&done);
+ {
+ TNode<CodeT> baseline_code = LoadBaselineDataBaselineCode(CAST(sfi_data));
+ sfi_code = FromCodeT(baseline_code);
+ Goto(&done);
+ }
// IsUncompiledDataWithPreparseData | IsUncompiledDataWithoutPreparseData:
// Compile lazy
@@ -14202,13 +14329,16 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
// This is the default branch, so assert that we have the expected data type.
CSA_ASSERT(this,
Word32Equal(data_type, Int32Constant(INTERPRETER_DATA_TYPE)));
- sfi_code = CAST(LoadObjectField(
- CAST(sfi_data), InterpreterData::kInterpreterTrampolineOffset));
+ {
+ TNode<CodeT> trampoline =
+ LoadInterpreterDataInterpreterTrampoline(CAST(sfi_data));
+ sfi_code = FromCodeT(trampoline);
+ }
Goto(&done);
#if V8_ENABLE_WEBASSEMBLY
- // IsWasmExportedFunctionData: Use the wrapper code
- BIND(&check_is_exported_function_data);
+ // IsWasmFunctionData: Use the wrapper code
+ BIND(&check_is_wasm_function_data);
sfi_code = CAST(LoadObjectField(
CAST(sfi_data), WasmExportedFunctionData::kWrapperCodeOffset));
Goto(&done);
@@ -14217,18 +14347,6 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
BIND(&check_is_asm_wasm_data);
sfi_code = HeapConstant(BUILTIN_CODE(isolate(), InstantiateAsmJs));
Goto(&done);
-
- // IsWasmJSFunctionData: Use the wrapper code.
- BIND(&check_is_wasm_js_function_data);
- sfi_code = CAST(
- LoadObjectField(CAST(sfi_data), WasmJSFunctionData::kWrapperCodeOffset));
- Goto(&done);
-
- // IsWasmCapiFunctionData: Use the wrapper code.
- BIND(&check_is_wasm_capi_function_data);
- sfi_code = CAST(LoadObjectField(CAST(sfi_data),
- WasmCapiFunctionData::kWrapperCodeOffset));
- Goto(&done);
#endif // V8_ENABLE_WEBASSEMBLY
BIND(&done);
@@ -14238,6 +14356,7 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
TNode<JSFunction> CodeStubAssembler::AllocateFunctionWithMapAndContext(
TNode<Map> map, TNode<SharedFunctionInfo> shared_info,
TNode<Context> context) {
+ // TODO(v8:11880): avoid roundtrips between cdc and code.
const TNode<Code> code = GetSharedFunctionInfoCode(shared_info);
// TODO(ishell): All the callers of this function pass map loaded from
@@ -14257,7 +14376,7 @@ TNode<JSFunction> CodeStubAssembler::AllocateFunctionWithMapAndContext(
StoreObjectFieldNoWriteBarrier(fun, JSFunction::kSharedFunctionInfoOffset,
shared_info);
StoreObjectFieldNoWriteBarrier(fun, JSFunction::kContextOffset, context);
- StoreObjectFieldNoWriteBarrier(fun, JSFunction::kCodeOffset, code);
+ StoreObjectFieldNoWriteBarrier(fun, JSFunction::kCodeOffset, ToCodeT(code));
return CAST(fun);
}
@@ -14578,7 +14697,7 @@ void CodeStubAssembler::RemoveFinalizationRegistryCellFromUnregisterTokenMap(
PrototypeCheckAssembler::PrototypeCheckAssembler(
compiler::CodeAssemblerState* state, Flags flags,
TNode<NativeContext> native_context, TNode<Map> initial_prototype_map,
- Vector<DescriptorIndexNameValue> properties)
+ base::Vector<DescriptorIndexNameValue> properties)
: CodeStubAssembler(state),
flags_(flags),
native_context_(native_context),
diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h
index 9b54b5014e..a6970a0a00 100644
--- a/deps/v8/src/codegen/code-stub-assembler.h
+++ b/deps/v8/src/codegen/code-stub-assembler.h
@@ -15,8 +15,11 @@
#include "src/compiler/code-assembler.h"
#include "src/objects/arguments.h"
#include "src/objects/bigint.h"
+#include "src/objects/cell.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/js-function.h"
+#include "src/objects/js-generator.h"
+#include "src/objects/js-promise.h"
#include "src/objects/objects.h"
#include "src/objects/promise.h"
#include "src/objects/shared-function-info.h"
@@ -333,6 +336,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return Signed(value);
}
+ enum InitializationMode {
+ kUninitialized,
+ kInitializeToZero,
+ kInitializeToNull
+ };
+
TNode<Smi> ParameterToTagged(TNode<Smi> value) { return value; }
TNode<Smi> ParameterToTagged(TNode<IntPtrT> value) { return SmiTag(value); }
@@ -748,9 +757,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
AllocationFlags flags = kNone);
TNode<HeapObject> Allocate(int size, AllocationFlags flags = kNone);
- TNode<HeapObject> InnerAllocate(TNode<HeapObject> previous, int offset);
- TNode<HeapObject> InnerAllocate(TNode<HeapObject> previous,
- TNode<IntPtrT> offset);
TNode<BoolT> IsRegularHeapObjectSize(TNode<IntPtrT> size);
@@ -783,6 +789,49 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void FastCheck(TNode<BoolT> condition);
+ TNode<BoolT> IsCodeTMap(TNode<Map> map) {
+ return V8_EXTERNAL_CODE_SPACE_BOOL ? IsCodeDataContainerMap(map)
+ : IsCodeMap(map);
+ }
+ TNode<BoolT> IsCodeT(TNode<HeapObject> object) {
+ return IsCodeTMap(LoadMap(object));
+ }
+
+ TNode<Code> FromCodeT(TNode<CodeT> code) {
+#ifdef V8_EXTERNAL_CODE_SPACE
+ return LoadObjectField<Code>(code, CodeDataContainer::kCodeOffset);
+#else
+ return code;
+#endif
+ }
+
+ TNode<CodeDataContainer> CodeDataContainerFromCodeT(TNode<CodeT> code) {
+#ifdef V8_EXTERNAL_CODE_SPACE
+ return code;
+#else
+ return LoadObjectField<CodeDataContainer>(code,
+ Code::kCodeDataContainerOffset);
+#endif
+ }
+
+ TNode<CodeT> ToCodeT(TNode<Code> code) {
+#ifdef V8_EXTERNAL_CODE_SPACE
+ return LoadObjectField<CodeDataContainer>(code,
+ Code::kCodeDataContainerOffset);
+#else
+ return code;
+#endif
+ }
+
+ TNode<CodeT> ToCodeT(TNode<Code> code,
+ TNode<CodeDataContainer> code_data_container) {
+#ifdef V8_EXTERNAL_CODE_SPACE
+ return code_data_container;
+#else
+ return code;
+#endif
+ }
+
// The following Call wrappers call an object according to the semantics that
// one finds in the EcmaScript spec, operating on an Callable (e.g. a
// JSFunction or proxy) rather than a Code object.
@@ -1942,6 +1991,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<PropertyArray> AllocatePropertyArray(TNode<IntPtrT> capacity);
+ TNode<HeapObject> AllocateWasmArray(TNode<IntPtrT> size_in_bytes,
+ int initialization);
+
// TODO(v8:9722): Return type should be JSIteratorResult
TNode<JSObject> AllocateJSIteratorResult(TNode<Context> context,
TNode<Object> value,
@@ -2254,6 +2306,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<IntPtrT> base_allocation_size,
TNode<AllocationSite> allocation_site);
+ TNode<IntPtrT> TryTaggedToInt32AsIntPtr(TNode<Object> value,
+ Label* if_not_possible);
TNode<Float64T> TryTaggedToFloat64(TNode<Object> value,
Label* if_valueisnotnumber);
TNode<Float64T> TruncateTaggedToFloat64(TNode<Context> context,
@@ -3025,19 +3079,19 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Object> GetProperty(TNode<Context> context, TNode<Object> receiver,
TNode<Object> name) {
- return CallBuiltin(Builtins::kGetProperty, context, receiver, name);
+ return CallBuiltin(Builtin::kGetProperty, context, receiver, name);
}
TNode<Object> SetPropertyStrict(TNode<Context> context,
TNode<Object> receiver, TNode<Object> key,
TNode<Object> value) {
- return CallBuiltin(Builtins::kSetProperty, context, receiver, key, value);
+ return CallBuiltin(Builtin::kSetProperty, context, receiver, key, value);
}
TNode<Object> SetPropertyInLiteral(TNode<Context> context,
TNode<JSObject> receiver,
TNode<Object> key, TNode<Object> value) {
- return CallBuiltin(Builtins::kSetPropertyInLiteral, context, receiver, key,
+ return CallBuiltin(Builtin::kSetPropertyInLiteral, context, receiver, key,
value);
}
@@ -3052,15 +3106,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Object> sync_iterator);
template <class... TArgs>
- TNode<Object> CallBuiltin(Builtins::Name id, TNode<Object> context,
- TArgs... args) {
+ TNode<Object> CallBuiltin(Builtin id, TNode<Object> context, TArgs... args) {
return CallStub<Object>(Builtins::CallableFor(isolate(), id), context,
args...);
}
template <class... TArgs>
- void TailCallBuiltin(Builtins::Name id, TNode<Object> context,
- TArgs... args) {
+ void TailCallBuiltin(Builtin id, TNode<Object> context, TArgs... args) {
return TailCallStub(Builtins::CallableFor(isolate(), id), context, args...);
}
@@ -3503,7 +3555,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
const char* method_name);
// JSTypedArray helpers
- TNode<UintPtrT> LoadJSTypedArrayLength(TNode<JSTypedArray> typed_array);
+ TNode<UintPtrT> LoadJSTypedArrayLengthAndCheckDetached(
+ TNode<JSTypedArray> typed_array, Label* detached);
// Helper for length tracking JSTypedArrays and JSTypedArrays backed by
// ResizableArrayBuffer.
TNode<UintPtrT> LoadVariableLengthJSTypedArrayLength(
@@ -4154,7 +4207,7 @@ class PrototypeCheckAssembler : public CodeStubAssembler {
PrototypeCheckAssembler(compiler::CodeAssemblerState* state, Flags flags,
TNode<NativeContext> native_context,
TNode<Map> initial_prototype_map,
- Vector<DescriptorIndexNameValue> properties);
+ base::Vector<DescriptorIndexNameValue> properties);
void CheckAndBranch(TNode<HeapObject> prototype, Label* if_unmodified,
Label* if_modified);
@@ -4163,7 +4216,7 @@ class PrototypeCheckAssembler : public CodeStubAssembler {
const Flags flags_;
const TNode<NativeContext> native_context_;
const TNode<Map> initial_prototype_map_;
- const Vector<DescriptorIndexNameValue> properties_;
+ const base::Vector<DescriptorIndexNameValue> properties_;
};
DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags)
diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc
index 9de4ae24a3..99f6d725f9 100644
--- a/deps/v8/src/codegen/compiler.cc
+++ b/deps/v8/src/codegen/compiler.cc
@@ -43,7 +43,9 @@
#include "src/heap/parked-scope.h"
#include "src/init/bootstrapper.h"
#include "src/interpreter/interpreter.h"
+#include "src/logging/counters.h"
#include "src/logging/log-inl.h"
+#include "src/logging/runtime-call-stats-scope.h"
#include "src/objects/feedback-cell-inl.h"
#include "src/objects/js-function-inl.h"
#include "src/objects/map.h"
@@ -628,7 +630,6 @@ void UpdateSharedFunctionFlagsAfterCompilation(FunctionLiteral* literal,
DCHECK_EQ(shared_info.language_mode(), literal->language_mode());
shared_info.set_has_duplicate_parameters(literal->has_duplicate_parameters());
- shared_info.set_is_oneshot_iife(literal->is_oneshot_iife());
shared_info.UpdateAndFinalizeExpectedNofPropertiesFromEstimate(literal);
if (literal->dont_optimize_reason() != BailoutReason::kNoReason) {
shared_info.DisableOptimization(literal->dont_optimize_reason());
@@ -642,57 +643,6 @@ void UpdateSharedFunctionFlagsAfterCompilation(FunctionLiteral* literal,
shared_info.SetScopeInfo(*literal->scope()->scope_info());
}
-bool CompileSharedWithBaseline(Isolate* isolate,
- Handle<SharedFunctionInfo> shared,
- Compiler::ClearExceptionFlag flag,
- IsCompiledScope* is_compiled_scope) {
- // We shouldn't be passing uncompiled functions into this function.
- DCHECK(is_compiled_scope->is_compiled());
-
- // Early return for already baseline-compiled functions.
- if (shared->HasBaselineData()) return true;
-
- // Check if we actually can compile with baseline.
- if (!CanCompileWithBaseline(isolate, *shared)) return false;
-
- StackLimitCheck check(isolate);
- if (check.JsHasOverflowed(kStackSpaceRequiredForCompilation * KB)) {
- if (flag == Compiler::KEEP_EXCEPTION) {
- isolate->StackOverflow();
- }
- return false;
- }
-
- CompilerTracer::TraceStartBaselineCompile(isolate, shared);
- Handle<Code> code;
- base::TimeDelta time_taken;
- {
- ScopedTimer timer(&time_taken);
- if (!GenerateBaselineCode(isolate, shared).ToHandle(&code)) {
- // TODO(leszeks): This can only fail because of an OOM. Do we want to
- // report these somehow, or silently ignore them?
- return false;
- }
-
- Handle<HeapObject> function_data =
- handle(HeapObject::cast(shared->function_data(kAcquireLoad)), isolate);
- Handle<BaselineData> baseline_data =
- isolate->factory()->NewBaselineData(code, function_data);
- shared->set_baseline_data(*baseline_data);
- }
- double time_taken_ms = time_taken.InMillisecondsF();
-
- CompilerTracer::TraceFinishBaselineCompile(isolate, shared, time_taken_ms);
-
- if (shared->script().IsScript()) {
- Compiler::LogFunctionCompilation(
- isolate, CodeEventListener::FUNCTION_TAG, shared,
- handle(Script::cast(shared->script()), isolate),
- Handle<AbstractCode>::cast(code), CodeKind::BASELINE, time_taken_ms);
- }
- return true;
-}
-
// Finalize a single compilation job. This function can return
// RETRY_ON_MAIN_THREAD if the job cannot be finalized off-thread, in which case
// it should be safe to call it again on the main thread with the same job.
@@ -1362,8 +1312,8 @@ void CompileAllWithBaseline(Isolate* isolate,
IsCompiledScope is_compiled_scope(*shared_info, isolate);
if (!is_compiled_scope.is_compiled()) continue;
if (!CanCompileWithBaseline(isolate, *shared_info)) continue;
- CompileSharedWithBaseline(isolate, shared_info, Compiler::CLEAR_EXCEPTION,
- &is_compiled_scope);
+ Compiler::CompileSharedWithBaseline(
+ isolate, shared_info, Compiler::CLEAR_EXCEPTION, &is_compiled_scope);
}
}
@@ -1533,7 +1483,7 @@ BackgroundCompileTask::BackgroundCompileTask(ScriptStreamingData* streamed_data,
Isolate* isolate, ScriptType type)
: flags_(UnoptimizedCompileFlags::ForToplevelCompile(
isolate, true, construct_language_mode(FLAG_use_strict),
- REPLMode::kNo, type)),
+ REPLMode::kNo, type, FLAG_lazy_streaming)),
compile_state_(isolate),
info_(std::make_unique<ParseInfo>(isolate, flags_, &compile_state_)),
isolate_for_local_isolate_(isolate),
@@ -1659,7 +1609,7 @@ void BackgroundCompileTask::Run() {
// Save the language mode.
language_mode_ = info_->language_mode();
- if (!FLAG_finalize_streaming_on_background || info_->flags().is_module()) {
+ if (!FLAG_finalize_streaming_on_background) {
if (info_->literal() != nullptr) {
CompileOnBackgroundThread(info_.get(), compile_state_.allocator(),
&compilation_jobs_);
@@ -1773,9 +1723,7 @@ bool Compiler::CollectSourcePositions(Isolate* isolate,
// Set up parse info.
UnoptimizedCompileFlags flags =
UnoptimizedCompileFlags::ForFunctionCompile(isolate, *shared_info);
- flags.set_is_lazy_compile(true);
flags.set_collect_source_positions(true);
- flags.set_allow_natives_syntax(FLAG_allow_natives_syntax);
UnoptimizedCompileState compile_state(isolate);
ParseInfo parse_info(isolate, flags, &compile_state);
@@ -1850,7 +1798,6 @@ bool Compiler::Compile(Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
// Set up parse info.
UnoptimizedCompileFlags flags =
UnoptimizedCompileFlags::ForFunctionCompile(isolate, *shared_info);
- flags.set_is_lazy_compile(true);
UnoptimizedCompileState compile_state(isolate);
ParseInfo parse_info(isolate, flags, &compile_state);
@@ -1980,6 +1927,63 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function,
}
// static
+bool Compiler::CompileSharedWithBaseline(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared,
+ Compiler::ClearExceptionFlag flag,
+ IsCompiledScope* is_compiled_scope) {
+ // We shouldn't be passing uncompiled functions into this function.
+ DCHECK(is_compiled_scope->is_compiled());
+
+ // Early return for already baseline-compiled functions.
+ if (shared->HasBaselineData()) return true;
+
+ // Check if we actually can compile with baseline.
+ if (!CanCompileWithBaseline(isolate, *shared)) return false;
+
+ StackLimitCheck check(isolate);
+ if (check.JsHasOverflowed(kStackSpaceRequiredForCompilation * KB)) {
+ if (flag == Compiler::KEEP_EXCEPTION) {
+ isolate->StackOverflow();
+ }
+ return false;
+ }
+
+ CompilerTracer::TraceStartBaselineCompile(isolate, shared);
+ Handle<Code> code;
+ base::TimeDelta time_taken;
+ {
+ ScopedTimer timer(&time_taken);
+ if (!GenerateBaselineCode(isolate, shared).ToHandle(&code)) {
+ // TODO(leszeks): This can only fail because of an OOM. Do we want to
+ // report these somehow, or silently ignore them?
+ return false;
+ }
+
+ Handle<HeapObject> function_data =
+ handle(HeapObject::cast(shared->function_data(kAcquireLoad)), isolate);
+ Handle<BaselineData> baseline_data =
+ isolate->factory()->NewBaselineData(code, function_data);
+ shared->set_baseline_data(*baseline_data);
+ if (V8_LIKELY(FLAG_use_osr)) {
+ // Arm back edges for OSR
+ shared->GetBytecodeArray(isolate).set_osr_loop_nesting_level(
+ AbstractCode::kMaxLoopNestingMarker);
+ }
+ }
+ double time_taken_ms = time_taken.InMillisecondsF();
+
+ CompilerTracer::TraceFinishBaselineCompile(isolate, shared, time_taken_ms);
+
+ if (shared->script().IsScript()) {
+ Compiler::LogFunctionCompilation(
+ isolate, CodeEventListener::FUNCTION_TAG, shared,
+ handle(Script::cast(shared->script()), isolate),
+ Handle<AbstractCode>::cast(code), CodeKind::BASELINE, time_taken_ms);
+ }
+ return true;
+}
+
+// static
bool Compiler::CompileBaseline(Isolate* isolate, Handle<JSFunction> function,
ClearExceptionFlag flag,
IsCompiledScope* is_compiled_scope) {
@@ -2142,7 +2146,8 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
allow_eval_cache = true;
} else {
UnoptimizedCompileFlags flags = UnoptimizedCompileFlags::ForToplevelCompile(
- isolate, true, language_mode, REPLMode::kNo);
+ isolate, true, language_mode, REPLMode::kNo, ScriptType::kClassic,
+ FLAG_lazy_eval);
flags.set_is_eval(true);
DCHECK(!flags.is_module());
flags.set_parse_restriction(restriction);
@@ -2873,7 +2878,8 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
isolate, natives == NOT_NATIVES_CODE, language_mode,
script_details.repl_mode,
origin_options.IsModule() ? ScriptType::kModule
- : ScriptType::kClassic);
+ : ScriptType::kClassic,
+ FLAG_lazy);
flags.set_is_eager(compile_options == ScriptCompiler::kEagerCompile);
@@ -2941,7 +2947,8 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
IsCompiledScope is_compiled_scope;
if (!maybe_result.ToHandle(&wrapped)) {
UnoptimizedCompileFlags flags = UnoptimizedCompileFlags::ForToplevelCompile(
- isolate, true, language_mode, script_details.repl_mode);
+ isolate, true, language_mode, script_details.repl_mode,
+ ScriptType::kClassic, FLAG_lazy);
flags.set_is_eval(true); // Use an eval scope as declaration scope.
flags.set_function_syntax_kind(FunctionSyntaxKind::kWrapped);
// TODO(delphick): Remove this and instead make the wrapped and wrapper
@@ -3027,7 +3034,7 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
DCHECK_EQ(task->flags().is_module(), origin_options.IsModule());
Handle<Script> script;
- if (FLAG_finalize_streaming_on_background && !origin_options.IsModule()) {
+ if (FLAG_finalize_streaming_on_background) {
RCS_SCOPE(isolate,
RuntimeCallCounterId::kCompilePublishBackgroundFinalization);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
diff --git a/deps/v8/src/codegen/compiler.h b/deps/v8/src/codegen/compiler.h
index e7d05b3ba3..78b5bea7bb 100644
--- a/deps/v8/src/codegen/compiler.h
+++ b/deps/v8/src/codegen/compiler.h
@@ -71,6 +71,10 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
static bool Compile(Isolate* isolate, Handle<JSFunction> function,
ClearExceptionFlag flag,
IsCompiledScope* is_compiled_scope);
+ static bool CompileSharedWithBaseline(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared,
+ ClearExceptionFlag flag,
+ IsCompiledScope* is_compiled_scope);
static bool CompileBaseline(Isolate* isolate, Handle<JSFunction> function,
ClearExceptionFlag flag,
IsCompiledScope* is_compiled_scope);
diff --git a/deps/v8/src/codegen/constant-pool.cc b/deps/v8/src/codegen/constant-pool.cc
index 4f8a6286a4..71d0d4d419 100644
--- a/deps/v8/src/codegen/constant-pool.cc
+++ b/deps/v8/src/codegen/constant-pool.cc
@@ -353,7 +353,17 @@ void ConstantPool::Emit(const ConstantPoolKey& key) {
if (key.is_value32()) {
assm_->dd(key.value32());
} else {
- assm_->dq(key.value64());
+ if (assm_->IsOnHeap() && RelocInfo::IsEmbeddedObjectMode(key.rmode())) {
+ assm_->saved_handles_for_raw_object_ptr_.push_back(
+ std::make_pair(assm_->pc_offset(), key.value64()));
+ Handle<Object> handle = assm_->GetEmbeddedObject(key.value64());
+ assm_->dq(handle->ptr());
+ // We must ensure that `dq` is not growing the assembler buffer
+ // and falling back to off-heap compilation.
+ DCHECK(assm_->IsOnHeap());
+ } else {
+ assm_->dq(key.value64());
+ }
}
}
diff --git a/deps/v8/src/codegen/constant-pool.h b/deps/v8/src/codegen/constant-pool.h
index fb4147507c..47346d3455 100644
--- a/deps/v8/src/codegen/constant-pool.h
+++ b/deps/v8/src/codegen/constant-pool.h
@@ -7,10 +7,10 @@
#include <map>
+#include "src/base/numbers/double.h"
#include "src/codegen/label.h"
#include "src/codegen/reloc-info.h"
#include "src/common/globals.h"
-#include "src/numbers/double.h"
namespace v8 {
namespace internal {
@@ -29,7 +29,7 @@ class ConstantPoolEntry {
merged_index_(sharing_ok ? SHARING_ALLOWED : SHARING_PROHIBITED),
value_(value),
rmode_(rmode) {}
- ConstantPoolEntry(int position, Double value,
+ ConstantPoolEntry(int position, base::Double value,
RelocInfo::Mode rmode = RelocInfo::NONE)
: position_(position),
merged_index_(SHARING_ALLOWED),
@@ -106,14 +106,14 @@ class ConstantPoolBuilder {
}
// Add double constant to the embedded constant pool
- ConstantPoolEntry::Access AddEntry(int position, Double value) {
+ ConstantPoolEntry::Access AddEntry(int position, base::Double value) {
ConstantPoolEntry entry(position, value);
return AddEntry(&entry, ConstantPoolEntry::DOUBLE);
}
// Add double constant to the embedded constant pool
ConstantPoolEntry::Access AddEntry(int position, double value) {
- return AddEntry(position, Double(value));
+ return AddEntry(position, base::Double(value));
}
// Previews the access type required for the next new entry to be added.
diff --git a/deps/v8/src/codegen/external-reference-table.cc b/deps/v8/src/codegen/external-reference-table.cc
index 6c109861a2..0a22fbdd75 100644
--- a/deps/v8/src/codegen/external-reference-table.cc
+++ b/deps/v8/src/codegen/external-reference-table.cc
@@ -6,6 +6,7 @@
#include "src/builtins/accessors.h"
#include "src/codegen/external-reference.h"
+#include "src/execution/isolate.h"
#include "src/ic/stub-cache.h"
#include "src/logging/counters.h"
diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc
index 3e91306b15..d10f8e398e 100644
--- a/deps/v8/src/codegen/external-reference.cc
+++ b/deps/v8/src/codegen/external-reference.cc
@@ -7,7 +7,6 @@
#include "src/api/api.h"
#include "src/base/ieee754.h"
#include "src/codegen/cpu-features.h"
-#include "src/compiler/code-assembler.h"
#include "src/date/date.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
@@ -23,6 +22,7 @@
#include "src/numbers/hash-seed-inl.h"
#include "src/numbers/math-random.h"
#include "src/objects/elements.h"
+#include "src/objects/object-type.h"
#include "src/objects/objects-inl.h"
#include "src/objects/ordered-hash-table.h"
#include "src/regexp/experimental/experimental.h"
@@ -37,6 +37,7 @@
#ifdef V8_INTL_SUPPORT
#include "src/base/platform/wrappers.h"
+#include "src/base/strings.h"
#include "src/objects/intl-objects.h"
#endif // V8_INTL_SUPPORT
@@ -183,7 +184,8 @@ ExternalReference ExternalReference::isolate_address(Isolate* isolate) {
}
ExternalReference ExternalReference::builtins_address(Isolate* isolate) {
- return ExternalReference(isolate->heap()->builtin_address(0));
+ return ExternalReference(
+ isolate->heap()->builtin_address(Builtins::FromInt(0)));
}
ExternalReference ExternalReference::handle_scope_implementer_address(
@@ -736,6 +738,11 @@ ExternalReference ExternalReference::address_of_regexp_stack_memory_top_address(
isolate->regexp_stack()->memory_top_address_address());
}
+ExternalReference ExternalReference::javascript_execution_assert(
+ Isolate* isolate) {
+ return ExternalReference(isolate->javascript_execution_assert_address());
+}
+
FUNCTION_REFERENCE_WITH_TYPE(ieee754_acos_function, base::ieee754::acos,
BUILTIN_FP_CALL)
FUNCTION_REFERENCE_WITH_TYPE(ieee754_acosh_function, base::ieee754::acosh,
@@ -786,7 +793,7 @@ void* libc_memchr(void* string, int character, size_t search_length) {
FUNCTION_REFERENCE(libc_memchr_function, libc_memchr)
void* libc_memcpy(void* dest, const void* src, size_t n) {
- return base::Memcpy(dest, src, n);
+ return memcpy(dest, src, n);
}
FUNCTION_REFERENCE(libc_memcpy_function, libc_memcpy)
@@ -804,6 +811,13 @@ void* libc_memset(void* dest, int value, size_t n) {
FUNCTION_REFERENCE(libc_memset_function, libc_memset)
+void relaxed_memcpy(volatile base::Atomic8* dest,
+ volatile const base::Atomic8* src, size_t n) {
+ base::Relaxed_Memcpy(dest, src, n);
+}
+
+FUNCTION_REFERENCE(relaxed_memcpy_function, relaxed_memcpy)
+
ExternalReference ExternalReference::printf_function() {
return ExternalReference(Redirect(FUNCTION_ADDR(std::printf)));
}
@@ -827,15 +841,15 @@ ExternalReference ExternalReference::search_string_raw_one_one() {
}
ExternalReference ExternalReference::search_string_raw_one_two() {
- return search_string_raw<const uint8_t, const uc16>();
+ return search_string_raw<const uint8_t, const base::uc16>();
}
ExternalReference ExternalReference::search_string_raw_two_one() {
- return search_string_raw<const uc16, const uint8_t>();
+ return search_string_raw<const base::uc16, const uint8_t>();
}
ExternalReference ExternalReference::search_string_raw_two_two() {
- return search_string_raw<const uc16, const uc16>();
+ return search_string_raw<const base::uc16, const base::uc16>();
}
namespace {
@@ -953,11 +967,11 @@ ExternalReference ExternalReference::intl_to_latin1_lower_table() {
template ExternalReference
ExternalReference::search_string_raw<const uint8_t, const uint8_t>();
template ExternalReference
-ExternalReference::search_string_raw<const uint8_t, const uc16>();
+ExternalReference::search_string_raw<const uint8_t, const base::uc16>();
template ExternalReference
-ExternalReference::search_string_raw<const uc16, const uint8_t>();
+ExternalReference::search_string_raw<const base::uc16, const uint8_t>();
template ExternalReference
-ExternalReference::search_string_raw<const uc16, const uc16>();
+ExternalReference::search_string_raw<const base::uc16, const base::uc16>();
ExternalReference ExternalReference::FromRawAddress(Address address) {
return ExternalReference(address);
@@ -1153,6 +1167,82 @@ static uint64_t atomic_pair_compare_exchange(intptr_t address,
FUNCTION_REFERENCE(atomic_pair_compare_exchange_function,
atomic_pair_compare_exchange)
+#ifdef V8_IS_TSAN
+namespace {
+// Mimics the store in generated code by having a relaxed store to the same
+// address, with the same value. This is done in order for TSAN to see these
+// stores from generated code.
+// Note that {value} is an int64_t irrespective of the store size. This is on
+// purpose to keep the function signatures the same accross stores. The
+// static_cast inside the method will ignore the bits which will not be stored.
+void tsan_relaxed_store_8_bits(Address addr, int64_t value) {
+#if V8_TARGET_ARCH_X64
+ base::Relaxed_Store(reinterpret_cast<base::Atomic8*>(addr),
+ static_cast<base::Atomic8>(value));
+#else
+ UNREACHABLE();
+#endif // V8_TARGET_ARCH_X64
+}
+
+void tsan_relaxed_store_16_bits(Address addr, int64_t value) {
+#if V8_TARGET_ARCH_X64
+ base::Relaxed_Store(reinterpret_cast<base::Atomic16*>(addr),
+ static_cast<base::Atomic16>(value));
+#else
+ UNREACHABLE();
+#endif // V8_TARGET_ARCH_X64
+}
+
+void tsan_relaxed_store_32_bits(Address addr, int64_t value) {
+#if V8_TARGET_ARCH_X64
+ base::Relaxed_Store(reinterpret_cast<base::Atomic32*>(addr),
+ static_cast<base::Atomic32>(value));
+#else
+ UNREACHABLE();
+#endif // V8_TARGET_ARCH_X64
+}
+
+void tsan_relaxed_store_64_bits(Address addr, int64_t value) {
+#if V8_TARGET_ARCH_X64
+ base::Relaxed_Store(reinterpret_cast<base::Atomic64*>(addr),
+ static_cast<base::Atomic64>(value));
+#else
+ UNREACHABLE();
+#endif // V8_TARGET_ARCH_X64
+}
+
+base::Atomic32 tsan_relaxed_load_32_bits(Address addr, int64_t value) {
+#if V8_TARGET_ARCH_X64
+ return base::Relaxed_Load(reinterpret_cast<base::Atomic32*>(addr));
+#else
+ UNREACHABLE();
+#endif // V8_TARGET_ARCH_X64
+}
+
+base::Atomic64 tsan_relaxed_load_64_bits(Address addr, int64_t value) {
+#if V8_TARGET_ARCH_X64
+ return base::Relaxed_Load(reinterpret_cast<base::Atomic64*>(addr));
+#else
+ UNREACHABLE();
+#endif // V8_TARGET_ARCH_X64
+}
+
+} // namespace
+#endif // V8_IS_TSAN
+
+IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_store_function_8_bits,
+ tsan_relaxed_store_8_bits)
+IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_store_function_16_bits,
+ tsan_relaxed_store_16_bits)
+IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_store_function_32_bits,
+ tsan_relaxed_store_32_bits)
+IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_store_function_64_bits,
+ tsan_relaxed_store_64_bits)
+IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_load_function_32_bits,
+ tsan_relaxed_load_32_bits)
+IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_load_function_64_bits,
+ tsan_relaxed_load_64_bits)
+
static int EnterMicrotaskContextWrapper(HandleScopeImplementer* hsi,
Address raw_context) {
Context context = Context::cast(Object(raw_context));
diff --git a/deps/v8/src/codegen/external-reference.h b/deps/v8/src/codegen/external-reference.h
index f75a5c694a..169050e4ac 100644
--- a/deps/v8/src/codegen/external-reference.h
+++ b/deps/v8/src/codegen/external-reference.h
@@ -83,6 +83,7 @@ class StatsCounter;
"RegExpMacroAssembler*::CheckStackGuardState()") \
V(re_grow_stack, "NativeRegExpMacroAssembler::GrowStack()") \
V(re_word_character_map, "NativeRegExpMacroAssembler::word_character_map") \
+ V(javascript_execution_assert, "javascript_execution_assert") \
EXTERNAL_REFERENCE_LIST_WITH_ISOLATE_HEAP_SANDBOX(V)
#ifdef V8_HEAP_SANDBOX
@@ -175,6 +176,7 @@ class StatsCounter;
V(libc_memcpy_function, "libc_memcpy") \
V(libc_memmove_function, "libc_memmove") \
V(libc_memset_function, "libc_memset") \
+ V(relaxed_memcpy_function, "relaxed_memcpy") \
V(mod_two_doubles_operation, "mod_two_doubles") \
V(mutable_big_int_absolute_add_and_canonicalize_function, \
"MutableBigInt_AbsoluteAddAndCanonicalize") \
@@ -265,6 +267,18 @@ class StatsCounter;
V(atomic_pair_exchange_function, "atomic_pair_exchange_function") \
V(atomic_pair_compare_exchange_function, \
"atomic_pair_compare_exchange_function") \
+ IF_TSAN(V, tsan_relaxed_store_function_8_bits, \
+ "tsan_relaxed_store_function_8_bits") \
+ IF_TSAN(V, tsan_relaxed_store_function_16_bits, \
+ "tsan_relaxed_store_function_16_bits") \
+ IF_TSAN(V, tsan_relaxed_store_function_32_bits, \
+ "tsan_relaxed_store_function_32_bits") \
+ IF_TSAN(V, tsan_relaxed_store_function_64_bits, \
+ "tsan_relaxed_store_function_64_bits") \
+ IF_TSAN(V, tsan_relaxed_load_function_32_bits, \
+ "tsan_relaxed_load_function_32_bits") \
+ IF_TSAN(V, tsan_relaxed_load_function_64_bits, \
+ "tsan_relaxed_load_function_64_bits") \
V(js_finalization_registry_remove_cell_from_unregister_token_map, \
"JSFinalizationRegistry::RemoveCellFromUnregisterTokenMap") \
V(re_match_for_call_from_js, "IrregexpInterpreter::MatchForCallFromJs") \
diff --git a/deps/v8/src/codegen/handler-table.cc b/deps/v8/src/codegen/handler-table.cc
index 7bede6aa9b..f2db944baa 100644
--- a/deps/v8/src/codegen/handler-table.cc
+++ b/deps/v8/src/codegen/handler-table.cc
@@ -209,6 +209,8 @@ int HandlerTable::LookupReturn(int pc_offset) {
bool operator==(const Iterator& other) const {
return index == other.index;
}
+ // GLIBCXX_DEBUG checks uses the <= comparator.
+ bool operator<=(const Iterator& other) { return index <= other.index; }
Iterator& operator++() {
index++;
return *this;
diff --git a/deps/v8/src/codegen/handler-table.h b/deps/v8/src/codegen/handler-table.h
index 5b83bf4a69..36b418f90f 100644
--- a/deps/v8/src/codegen/handler-table.h
+++ b/deps/v8/src/codegen/handler-table.h
@@ -40,10 +40,6 @@ class V8_EXPORT_PRIVATE HandlerTable {
UNCAUGHT, // The handler will (likely) rethrow the exception.
CAUGHT, // The exception will be caught by the handler.
PROMISE, // The exception will be caught and cause a promise rejection.
- DESUGARING, // The exception will be caught, but both the exception and
- // the catching are part of a desugaring and should therefore
- // not be visible to the user (we won't notify the debugger of
- // such exceptions).
ASYNC_AWAIT, // The exception will be caught and cause a promise rejection
// in the desugaring of an async function, so special
// async/await handling in the debugger can take place.
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32-inl.h b/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
index 1585f970e8..a5829e77d1 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
@@ -185,6 +185,16 @@ void Assembler::emit(Handle<HeapObject> handle) {
void Assembler::emit(uint32_t x, RelocInfo::Mode rmode) {
if (!RelocInfo::IsNone(rmode)) {
RecordRelocInfo(rmode);
+ if (rmode == RelocInfo::FULL_EMBEDDED_OBJECT && IsOnHeap()) {
+ Handle<HeapObject> object(reinterpret_cast<Address*>(x));
+ saved_handles_for_raw_object_ptr_.push_back(
+ std::make_pair(pc_offset(), x));
+ emit(object->ptr());
+ // We must ensure that `emit` is not growing the assembler buffer
+ // and falling back to off-heap compilation.
+ DCHECK(IsOnHeap());
+ return;
+ }
}
emit(x);
}
@@ -203,9 +213,18 @@ void Assembler::emit(const Immediate& x) {
if (x.is_heap_object_request()) {
RequestHeapObject(x.heap_object_request());
emit(0);
- } else {
- emit(x.immediate());
+ return;
+ }
+ if (x.is_embedded_object() && IsOnHeap()) {
+ saved_handles_for_raw_object_ptr_.push_back(
+ std::make_pair(pc_offset(), x.immediate()));
+ emit(x.embedded_object()->ptr());
+ // We must ensure that `emit` is not growing the assembler buffer
+ // and falling back to off-heap compilation.
+ DCHECK(IsOnHeap());
+ return;
}
+ emit(x.immediate());
}
void Assembler::emit_code_relative_offset(Label* label) {
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.cc b/deps/v8/src/codegen/ia32/assembler-ia32.cc
index 688b038e91..1880ee1ad7 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.cc
@@ -3345,6 +3345,8 @@ void Assembler::GrowBuffer() {
DCHECK(buffer_overflow());
DCHECK_EQ(buffer_start_, buffer_->start());
+ bool previously_on_heap = buffer_->IsOnHeap();
+
// Compute new buffer size.
int old_size = buffer_->size();
int new_size = 2 * old_size;
@@ -3384,13 +3386,22 @@ void Assembler::GrowBuffer() {
// Relocate pc-relative references.
int mode_mask = RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET);
DCHECK_EQ(mode_mask, RelocInfo::kApplyMask & mode_mask);
- Vector<byte> instructions{buffer_start_, static_cast<size_t>(pc_offset())};
- Vector<const byte> reloc_info{reloc_info_writer.pos(), reloc_size};
+ base::Vector<byte> instructions{buffer_start_,
+ static_cast<size_t>(pc_offset())};
+ base::Vector<const byte> reloc_info{reloc_info_writer.pos(), reloc_size};
for (RelocIterator it(instructions, reloc_info, 0, mode_mask); !it.done();
it.next()) {
it.rinfo()->apply(pc_delta);
}
+ // Patch on-heap references to handles.
+ if (previously_on_heap && !buffer_->IsOnHeap()) {
+ Address base = reinterpret_cast<Address>(buffer_->start());
+ for (auto p : saved_handles_for_raw_object_ptr_) {
+ WriteUnalignedValue<uint32_t>(base + p.first, p.second);
+ }
+ }
+
DCHECK(!buffer_overflow());
}
diff --git a/deps/v8/src/codegen/ia32/cpu-ia32.cc b/deps/v8/src/codegen/ia32/cpu-ia32.cc
index 5e6d8a6207..0ebfa27f6d 100644
--- a/deps/v8/src/codegen/ia32/cpu-ia32.cc
+++ b/deps/v8/src/codegen/ia32/cpu-ia32.cc
@@ -4,7 +4,7 @@
// CPU specific code for ia32 independent of OS goes here.
-#ifdef __GNUC__
+#if defined(__GNUC__) && !defined(GOOGLE3)
#include "src/third_party/valgrind/valgrind.h"
#endif
diff --git a/deps/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h b/deps/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h
index d079dfd725..b383b5df76 100644
--- a/deps/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h
+++ b/deps/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h
@@ -18,19 +18,35 @@ constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
return registers;
}
+#if DEBUG
+template <typename DerivedDescriptor>
+void StaticCallInterfaceDescriptor<DerivedDescriptor>::
+ VerifyArgumentRegisterCount(CallInterfaceDescriptorData* data,
+ int nof_expected_args) {
+ RegList allocatable_regs = data->allocatable_registers();
+ if (nof_expected_args >= 1) DCHECK(allocatable_regs | esi.bit());
+ if (nof_expected_args >= 2) DCHECK(allocatable_regs | edi.bit());
+ // Additional arguments are passed on the stack.
+}
+#endif // DEBUG
+
// static
-constexpr auto RecordWriteDescriptor::registers() {
- return RegisterArray(ecx, edx, esi, edi, kReturnRegister0);
+constexpr auto WriteBarrierDescriptor::registers() {
+ return RegisterArray(edi, ecx, edx, esi, kReturnRegister0);
}
// static
constexpr auto DynamicCheckMapsDescriptor::registers() {
+ STATIC_ASSERT(esi == kContextRegister);
+ STATIC_ASSERT(eax == kReturnRegister0);
return RegisterArray(eax, ecx, edx, edi, esi);
}
// static
-constexpr auto EphemeronKeyBarrierDescriptor::registers() {
- return RegisterArray(ecx, edx, esi, edi, kReturnRegister0);
+constexpr auto DynamicCheckMapsWithFeedbackVectorDescriptor::registers() {
+ STATIC_ASSERT(esi == kContextRegister);
+ STATIC_ASSERT(eax == kReturnRegister0);
+ return RegisterArray(eax, ecx, edx, edi, esi);
}
// static
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
index 7c8af3fde0..16298ed536 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -54,7 +54,6 @@
#include "src/roots/roots-inl.h"
#include "src/roots/roots.h"
#include "src/runtime/runtime.h"
-#include "src/snapshot/embedded/embedded-data.h"
#include "src/utils/utils.h"
// Satisfy cpplint check, but don't include platform-specific header. It is
@@ -77,11 +76,13 @@ Operand StackArgumentsAccessor::GetArgumentOperand(int index) const {
// MacroAssembler implementation.
void TurboAssembler::InitializeRootRegister() {
+ ASM_CODE_COMMENT(this);
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
Move(kRootRegister, Immediate(isolate_root));
}
void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
+ ASM_CODE_COMMENT(this);
if (root_array_available()) {
mov(destination,
Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
@@ -108,6 +109,7 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
void TurboAssembler::CompareRoot(Register with, Register scratch,
RootIndex index) {
+ ASM_CODE_COMMENT(this);
if (root_array_available()) {
CompareRoot(with, index);
} else {
@@ -119,6 +121,7 @@ void TurboAssembler::CompareRoot(Register with, Register scratch,
}
void TurboAssembler::CompareRoot(Register with, RootIndex index) {
+ ASM_CODE_COMMENT(this);
if (root_array_available()) {
cmp(with, Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
return;
@@ -134,6 +137,7 @@ void TurboAssembler::CompareRoot(Register with, RootIndex index) {
}
void MacroAssembler::PushRoot(RootIndex index) {
+ ASM_CODE_COMMENT(this);
if (root_array_available()) {
DCHECK(RootsTable::IsImmortalImmovable(index));
push(Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
@@ -165,6 +169,7 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
void TurboAssembler::PushArray(Register array, Register size, Register scratch,
PushArrayOrder order) {
+ ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(array, size, scratch));
Register counter = scratch;
Label loop, entry;
@@ -226,17 +231,15 @@ Operand TurboAssembler::ExternalReferenceAddressAsOperand(
Operand TurboAssembler::HeapObjectAsOperand(Handle<HeapObject> object) {
DCHECK(root_array_available());
- int builtin_index;
+ Builtin builtin;
RootIndex root_index;
if (isolate()->roots_table().IsRootHandle(object, &root_index)) {
return Operand(kRootRegister, RootRegisterOffsetForRootIndex(root_index));
- } else if (isolate()->builtins()->IsBuiltinHandle(object, &builtin_index)) {
- return Operand(kRootRegister,
- RootRegisterOffsetForBuiltinIndex(builtin_index));
+ } else if (isolate()->builtins()->IsBuiltinHandle(object, &builtin)) {
+ return Operand(kRootRegister, RootRegisterOffsetForBuiltin(builtin));
} else if (object.is_identical_to(code_object_) &&
- Builtins::IsBuiltinId(maybe_builtin_index_)) {
- return Operand(kRootRegister,
- RootRegisterOffsetForBuiltinIndex(maybe_builtin_index_));
+ Builtins::IsBuiltinId(maybe_builtin_)) {
+ return Operand(kRootRegister, RootRegisterOffsetForBuiltin(maybe_builtin_));
} else {
// Objects in the constants table need an additional indirection, which
// cannot be represented as a single Operand.
@@ -246,6 +249,7 @@ Operand TurboAssembler::HeapObjectAsOperand(Handle<HeapObject> object) {
void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
+ ASM_CODE_COMMENT(this);
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
mov(destination,
@@ -254,6 +258,7 @@ void TurboAssembler::LoadFromConstantsTable(Register destination,
void TurboAssembler::LoadRootRegisterOffset(Register destination,
intptr_t offset) {
+ ASM_CODE_COMMENT(this);
DCHECK(is_int32(offset));
DCHECK(root_array_available());
if (offset == 0) {
@@ -264,6 +269,7 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination,
}
void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
+ ASM_CODE_COMMENT(this);
DCHECK(root_array_available());
mov(destination, Operand(kRootRegister, offset));
}
@@ -296,7 +302,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
if (fp_mode == SaveFPRegsMode::kSave) {
// Count all XMM registers except XMM0.
- bytes += kDoubleSize * (XMMRegister::kNumRegisters - 1);
+ bytes += kStackSavedSavedFPSize * (XMMRegister::kNumRegisters - 1);
}
return bytes;
@@ -304,6 +310,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
+ ASM_CODE_COMMENT(this);
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
// restore them.
@@ -318,11 +325,15 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
if (fp_mode == SaveFPRegsMode::kSave) {
// Save all XMM registers except XMM0.
- int delta = kDoubleSize * (XMMRegister::kNumRegisters - 1);
+ const int delta = kStackSavedSavedFPSize * (XMMRegister::kNumRegisters - 1);
AllocateStackSpace(delta);
for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
- movsd(Operand(esp, (i - 1) * kDoubleSize), reg);
+#if V8_ENABLE_WEBASSEMBLY
+ Movdqu(Operand(esp, (i - 1) * kStackSavedSavedFPSize), reg);
+#else
+ Movsd(Operand(esp, (i - 1) * kStackSavedSavedFPSize), reg);
+#endif // V8_ENABLE_WEBASSEMBLY
}
bytes += delta;
}
@@ -332,13 +343,18 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
+ ASM_CODE_COMMENT(this);
int bytes = 0;
if (fp_mode == SaveFPRegsMode::kSave) {
// Restore all XMM registers except XMM0.
- int delta = kDoubleSize * (XMMRegister::kNumRegisters - 1);
+ const int delta = kStackSavedSavedFPSize * (XMMRegister::kNumRegisters - 1);
for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
- movsd(reg, Operand(esp, (i - 1) * kDoubleSize));
+#if V8_ENABLE_WEBASSEMBLY
+ Movdqu(reg, Operand(esp, (i - 1) * kStackSavedSavedFPSize));
+#else
+ Movsd(reg, Operand(esp, (i - 1) * kStackSavedSavedFPSize));
+#endif // V8_ENABLE_WEBASSEMBLY
}
add(esp, Immediate(delta));
bytes += delta;
@@ -356,10 +372,11 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
}
void MacroAssembler::RecordWriteField(Register object, int offset,
- Register value, Register dst,
+ Register value, Register slot_address,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
+ ASM_CODE_COMMENT(this);
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
@@ -373,16 +390,16 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// of the object, so so offset must be a multiple of kTaggedSize.
DCHECK(IsAligned(offset, kTaggedSize));
- lea(dst, FieldOperand(object, offset));
+ lea(slot_address, FieldOperand(object, offset));
if (FLAG_debug_code) {
Label ok;
- test_b(dst, Immediate(kTaggedSize - 1));
+ test_b(slot_address, Immediate(kTaggedSize - 1));
j(zero, &ok, Label::kNear);
int3();
bind(&ok);
}
- RecordWrite(object, dst, value, save_fp, remembered_set_action,
+ RecordWrite(object, slot_address, value, save_fp, remembered_set_action,
SmiCheck::kOmit);
bind(&done);
@@ -391,12 +408,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// turned on to provoke errors.
if (FLAG_debug_code) {
mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
- mov(dst, Immediate(bit_cast<int32_t>(kZapValue)));
+ mov(slot_address, Immediate(bit_cast<int32_t>(kZapValue)));
}
}
-void TurboAssembler::SaveRegisters(RegList registers) {
- DCHECK_GT(NumRegs(registers), 0);
+void TurboAssembler::MaybeSaveRegisters(RegList registers) {
+ if (registers == 0) return;
+ ASM_CODE_COMMENT(this);
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
push(Register::from_code(i));
@@ -404,8 +422,9 @@ void TurboAssembler::SaveRegisters(RegList registers) {
}
}
-void TurboAssembler::RestoreRegisters(RegList registers) {
- DCHECK_GT(NumRegs(registers), 0);
+void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
+ if (registers == 0) return;
+ ASM_CODE_COMMENT(this);
for (int i = Register::kNumRegisters - 1; i >= 0; --i) {
if ((registers >> i) & 1u) {
pop(Register::from_code(i));
@@ -413,102 +432,92 @@ void TurboAssembler::RestoreRegisters(RegList registers) {
}
}
-void TurboAssembler::CallEphemeronKeyBarrier(Register object, Register address,
+void TurboAssembler::CallEphemeronKeyBarrier(Register object,
+ Register slot_address,
SaveFPRegsMode fp_mode) {
- EphemeronKeyBarrierDescriptor descriptor;
- RegList registers = descriptor.allocatable_registers();
+ ASM_CODE_COMMENT(this);
+ DCHECK(!AreAliased(object, slot_address));
+ RegList registers =
+ WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
+ MaybeSaveRegisters(registers);
- SaveRegisters(registers);
-
- Register object_parameter(
- descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kObject));
- Register slot_parameter(descriptor.GetRegisterParameter(
- EphemeronKeyBarrierDescriptor::kSlotAddress));
- Register fp_mode_parameter(
- descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kFPMode));
+ Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
+ Register slot_address_parameter =
+ WriteBarrierDescriptor::SlotAddressRegister();
push(object);
- push(address);
-
- pop(slot_parameter);
+ push(slot_address);
+ pop(slot_address_parameter);
pop(object_parameter);
- Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
- Call(isolate()->builtins()->builtin_handle(Builtins::kEphemeronKeyBarrier),
+ Call(isolate()->builtins()->code_handle(
+ Builtins::GetEphemeronKeyBarrierStub(fp_mode)),
RelocInfo::CODE_TARGET);
- RestoreRegisters(registers);
-}
-
-void TurboAssembler::CallRecordWriteStub(
- Register object, Register address,
- RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
- CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
- Builtins::kRecordWrite, kNullAddress);
+ MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStub(
- Register object, Register address,
+void TurboAssembler::CallRecordWriteStubSaveRegisters(
+ Register object, Register slot_address,
RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
- Address wasm_target) {
- CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
- Builtins::kNoBuiltinId, wasm_target);
-}
+ StubCallMode mode) {
+ ASM_CODE_COMMENT(this);
+ DCHECK(!AreAliased(object, slot_address));
+ RegList registers =
+ WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
+ MaybeSaveRegisters(registers);
-void TurboAssembler::CallRecordWriteStub(
- Register object, Register address,
- RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
- int builtin_index, Address wasm_target) {
- DCHECK_NE(builtin_index == Builtins::kNoBuiltinId,
- wasm_target == kNullAddress);
- // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
- // i.e. always emit remember set and save FP registers in RecordWriteStub. If
- // large performance regression is observed, we should use these values to
- // avoid unnecessary work.
-
- RecordWriteDescriptor descriptor;
- RegList registers = descriptor.allocatable_registers();
-
- SaveRegisters(registers);
-
- Register object_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
- Register slot_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
- Register remembered_set_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
- Register fp_mode_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
+ Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
+ Register slot_address_parameter =
+ WriteBarrierDescriptor::SlotAddressRegister();
push(object);
- push(address);
-
- pop(slot_parameter);
+ push(slot_address);
+ pop(slot_address_parameter);
pop(object_parameter);
- Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
- Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
- if (builtin_index == Builtins::kNoBuiltinId) {
+ CallRecordWriteStub(object_parameter, slot_address_parameter,
+ remembered_set_action, fp_mode, mode);
+
+ MaybeRestoreRegisters(registers);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode) {
+ ASM_CODE_COMMENT(this);
+ // Use CallRecordWriteStubSaveRegisters if the object and slot registers
+ // need to be caller saved.
+ DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object);
+ DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address);
+#if V8_ENABLE_WEBASSEMBLY
+ if (mode == StubCallMode::kCallWasmRuntimeStub) {
// Use {wasm_call} for direct Wasm call within a module.
+ auto wasm_target =
+ wasm::WasmCode::GetRecordWriteStub(remembered_set_action, fp_mode);
wasm_call(wasm_target, RelocInfo::WASM_STUB_CALL);
- } else if (options().inline_offheap_trampolines) {
- CallBuiltin(builtin_index);
+#else
+ if (false) {
+#endif
} else {
- Handle<Code> code_target =
- isolate()->builtins()->builtin_handle(Builtins::kRecordWrite);
- Call(code_target, RelocInfo::CODE_TARGET);
+ Builtin builtin =
+ Builtins::GetRecordWriteStub(remembered_set_action, fp_mode);
+ if (options().inline_offheap_trampolines) {
+ CallBuiltin(builtin);
+ } else {
+ Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
+ Call(code_target, RelocInfo::CODE_TARGET);
+ }
}
-
- RestoreRegisters(registers);
}
-void MacroAssembler::RecordWrite(Register object, Register address,
+void MacroAssembler::RecordWrite(Register object, Register slot_address,
Register value, SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
- DCHECK(object != value);
- DCHECK(object != address);
- DCHECK(value != address);
+ ASM_CODE_COMMENT(this);
+ DCHECK(!AreAliased(object, value, slot_address));
AssertNotSmi(object);
if ((remembered_set_action == RememberedSetAction::kOmit &&
@@ -518,8 +527,9 @@ void MacroAssembler::RecordWrite(Register object, Register address,
}
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT_STRING(this, "Verify slot_address");
Label ok;
- cmp(value, Operand(address, 0));
+ cmp(value, Operand(slot_address, 0));
j(equal, &ok, Label::kNear);
int3();
bind(&ok);
@@ -542,15 +552,17 @@ void MacroAssembler::RecordWrite(Register object, Register address,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done,
Label::kNear);
+ RecordComment("CheckPageFlag]");
- CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
+ CallRecordWriteStub(object, slot_address, remembered_set_action, fp_mode);
bind(&done);
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (FLAG_debug_code) {
- mov(address, Immediate(bit_cast<int32_t>(kZapValue)));
+ ASM_CODE_COMMENT_STRING(this, "Clobber slot_address and value");
+ mov(slot_address, Immediate(bit_cast<int32_t>(kZapValue)));
mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
}
}
@@ -631,6 +643,7 @@ void TurboAssembler::Pmulhrsw(XMMRegister dst, XMMRegister src1,
void TurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
// k = i16x8.splat(0x8000)
Pcmpeqd(scratch, scratch);
Psllw(scratch, scratch, byte{15});
@@ -643,6 +656,7 @@ void TurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1,
void TurboAssembler::I8x16Popcnt(XMMRegister dst, XMMRegister src,
XMMRegister tmp1, XMMRegister tmp2,
Register scratch) {
+ ASM_CODE_COMMENT(this);
DCHECK_NE(dst, tmp1);
DCHECK_NE(src, tmp1);
DCHECK_NE(dst, tmp2);
@@ -731,6 +745,7 @@ void TurboAssembler::F64x2ConvertLowI32x4U(XMMRegister dst, XMMRegister src,
void TurboAssembler::I32x4TruncSatF64x2SZero(XMMRegister dst, XMMRegister src,
XMMRegister scratch,
Register tmp) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
XMMRegister original_dst = dst;
@@ -771,6 +786,7 @@ void TurboAssembler::I32x4TruncSatF64x2SZero(XMMRegister dst, XMMRegister src,
void TurboAssembler::I32x4TruncSatF64x2UZero(XMMRegister dst, XMMRegister src,
XMMRegister scratch,
Register tmp) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
vxorpd(scratch, scratch, scratch);
@@ -859,6 +875,7 @@ void TurboAssembler::I32x4ExtAddPairwiseI16x8S(XMMRegister dst, XMMRegister src,
void TurboAssembler::I32x4ExtAddPairwiseI16x8U(XMMRegister dst, XMMRegister src,
XMMRegister tmp) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
// src = |a|b|c|d|e|f|g|h| (low)
@@ -936,6 +953,7 @@ void TurboAssembler::ShlPair(Register high, Register low, uint8_t shift) {
}
void TurboAssembler::ShlPair_cl(Register high, Register low) {
+ ASM_CODE_COMMENT(this);
shld_cl(high, low);
shl_cl(low);
Label done;
@@ -959,6 +977,7 @@ void TurboAssembler::ShrPair(Register high, Register low, uint8_t shift) {
}
void TurboAssembler::ShrPair_cl(Register high, Register low) {
+ ASM_CODE_COMMENT(this);
shrd_cl(low, high);
shr_cl(high);
Label done;
@@ -970,6 +989,7 @@ void TurboAssembler::ShrPair_cl(Register high, Register low) {
}
void TurboAssembler::SarPair(Register high, Register low, uint8_t shift) {
+ ASM_CODE_COMMENT(this);
DCHECK_GE(63, shift);
if (shift >= 32) {
mov(low, high);
@@ -982,6 +1002,7 @@ void TurboAssembler::SarPair(Register high, Register low, uint8_t shift) {
}
void TurboAssembler::SarPair_cl(Register high, Register low) {
+ ASM_CODE_COMMENT(this);
shrd_cl(low, high);
sar_cl(high);
Label done;
@@ -998,6 +1019,7 @@ void TurboAssembler::LoadMap(Register destination, Register object) {
void MacroAssembler::CmpObjectType(Register heap_object, InstanceType type,
Register map) {
+ ASM_CODE_COMMENT(this);
LoadMap(map, heap_object);
CmpInstanceType(map, type);
}
@@ -1009,6 +1031,7 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
void MacroAssembler::CmpInstanceTypeRange(Register map, Register scratch,
InstanceType lower_limit,
InstanceType higher_limit) {
+ ASM_CODE_COMMENT(this);
DCHECK_LT(lower_limit, higher_limit);
movzx_w(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
lea(scratch, Operand(scratch, 0u - lower_limit));
@@ -1017,6 +1040,7 @@ void MacroAssembler::CmpInstanceTypeRange(Register map, Register scratch,
void MacroAssembler::AssertSmi(Register object) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
test(object, Immediate(kSmiTagMask));
Check(equal, AbortReason::kOperandIsNotASmi);
}
@@ -1024,6 +1048,7 @@ void MacroAssembler::AssertSmi(Register object) {
void MacroAssembler::AssertConstructor(Register object) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
test(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotAConstructor);
Push(object);
@@ -1037,6 +1062,7 @@ void MacroAssembler::AssertConstructor(Register object) {
void MacroAssembler::AssertFunction(Register object, Register scratch) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
test(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
Push(object);
@@ -1050,6 +1076,7 @@ void MacroAssembler::AssertFunction(Register object, Register scratch) {
void MacroAssembler::AssertBoundFunction(Register object) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
test(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotABoundFunction);
Push(object);
@@ -1061,6 +1088,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
void MacroAssembler::AssertGeneratorObject(Register object) {
if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
test(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
@@ -1093,6 +1121,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
Label done_checking;
AssertNotSmi(object);
CompareRoot(object, scratch, RootIndex::kUndefinedValue);
@@ -1106,18 +1135,21 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
void MacroAssembler::AssertNotSmi(Register object) {
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT(this);
test(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmi);
}
}
void TurboAssembler::StubPrologue(StackFrame::Type type) {
+ ASM_CODE_COMMENT(this);
push(ebp); // Caller's frame pointer.
mov(ebp, esp);
push(Immediate(StackFrame::TypeToMarker(type)));
}
void TurboAssembler::Prologue() {
+ ASM_CODE_COMMENT(this);
push(ebp); // Caller's frame pointer.
mov(ebp, esp);
push(kContextRegister); // Callee's context.
@@ -1126,6 +1158,7 @@ void TurboAssembler::Prologue() {
}
void TurboAssembler::EnterFrame(StackFrame::Type type) {
+ ASM_CODE_COMMENT(this);
push(ebp);
mov(ebp, esp);
if (!StackFrame::IsJavaScript(type)) {
@@ -1134,6 +1167,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
+ ASM_CODE_COMMENT(this);
if (FLAG_debug_code && !StackFrame::IsJavaScript(type)) {
cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(StackFrame::TypeToMarker(type)));
@@ -1144,6 +1178,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
#ifdef V8_OS_WIN
void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
+ ASM_CODE_COMMENT(this);
// In windows, we cannot increment the stack size by more than one page
// (minimum page size is 4KB) without accessing at least one byte on the
// page. Check this:
@@ -1165,6 +1200,7 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
}
void TurboAssembler::AllocateStackSpace(int bytes) {
+ ASM_CODE_COMMENT(this);
DCHECK_GE(bytes, 0);
while (bytes > kStackPageSize) {
sub(esp, Immediate(kStackPageSize));
@@ -1178,6 +1214,7 @@ void TurboAssembler::AllocateStackSpace(int bytes) {
void MacroAssembler::EnterExitFramePrologue(StackFrame::Type frame_type,
Register scratch) {
+ ASM_CODE_COMMENT(this);
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
@@ -1211,6 +1248,7 @@ void MacroAssembler::EnterExitFramePrologue(StackFrame::Type frame_type,
}
void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
+ ASM_CODE_COMMENT(this);
// Optionally save all XMM registers.
if (save_doubles) {
int space =
@@ -1238,6 +1276,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
void MacroAssembler::EnterExitFrame(int argc, bool save_doubles,
StackFrame::Type frame_type) {
+ ASM_CODE_COMMENT(this);
EnterExitFramePrologue(frame_type, edi);
// Set up argc and argv in callee-saved registers.
@@ -1255,6 +1294,7 @@ void MacroAssembler::EnterApiExitFrame(int argc, Register scratch) {
}
void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
+ ASM_CODE_COMMENT(this);
// Optionally restore all XMM registers.
if (save_doubles) {
const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
@@ -1283,6 +1323,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
}
void MacroAssembler::LeaveExitFrameEpilogue() {
+ ASM_CODE_COMMENT(this);
// Clear the top frame.
ExternalReference c_entry_fp_address =
ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate());
@@ -1301,6 +1342,7 @@ void MacroAssembler::LeaveExitFrameEpilogue() {
}
void MacroAssembler::LeaveApiExitFrame() {
+ ASM_CODE_COMMENT(this);
mov(esp, ebp);
pop(ebp);
@@ -1308,6 +1350,7 @@ void MacroAssembler::LeaveApiExitFrame() {
}
void MacroAssembler::PushStackHandler(Register scratch) {
+ ASM_CODE_COMMENT(this);
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
@@ -1324,6 +1367,7 @@ void MacroAssembler::PushStackHandler(Register scratch) {
}
void MacroAssembler::PopStackHandler(Register scratch) {
+ ASM_CODE_COMMENT(this);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
ExternalReference handler_address =
ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
@@ -1333,6 +1377,7 @@ void MacroAssembler::PopStackHandler(Register scratch) {
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles) {
+ ASM_CODE_COMMENT(this);
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
@@ -1359,7 +1404,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
// For runtime functions with variable arguments:
// -- eax : number of arguments
// -----------------------------------
-
+ ASM_CODE_COMMENT(this);
const Runtime::Function* function = Runtime::FunctionForId(fid);
DCHECK_EQ(1, function->result_size);
if (function->nargs >= 0) {
@@ -1374,6 +1419,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
bool builtin_exit_frame) {
+ ASM_CODE_COMMENT(this);
// Set the entry point and jump to the C entry runtime stub.
Move(kRuntimeCallFunctionRegister, Immediate(ext));
Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
@@ -1388,6 +1434,7 @@ void MacroAssembler::JumpToInstructionStream(Address entry) {
void TurboAssembler::PrepareForTailCall(
Register callee_args_count, Register caller_args_count, Register scratch0,
Register scratch1, int number_of_temp_values_after_return_address) {
+ ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(callee_args_count, caller_args_count, scratch0, scratch1));
// Calculate the destination address where we will put the return address
@@ -1439,6 +1486,7 @@ void TurboAssembler::PrepareForTailCall(
}
void MacroAssembler::CompareStackLimit(Register with, StackLimitKind kind) {
+ ASM_CODE_COMMENT(this);
DCHECK(root_array_available());
Isolate* isolate = this->isolate();
// Address through the root register. No load is needed.
@@ -1456,6 +1504,7 @@ void MacroAssembler::CompareStackLimit(Register with, StackLimitKind kind) {
void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
Label* stack_overflow,
bool include_receiver) {
+ ASM_CODE_COMMENT(this);
DCHECK_NE(num_args, scratch);
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
@@ -1482,55 +1531,56 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
Label* done, InvokeType type) {
- if (expected_parameter_count != actual_parameter_count) {
- DCHECK_EQ(actual_parameter_count, eax);
- DCHECK_EQ(expected_parameter_count, ecx);
- Label regular_invoke;
-
- // If the expected parameter count is equal to the adaptor sentinel, no need
- // to push undefined value as arguments.
- cmp(expected_parameter_count, Immediate(kDontAdaptArgumentsSentinel));
- j(equal, &regular_invoke, Label::kFar);
-
- // If overapplication or if the actual argument count is equal to the
- // formal parameter count, no need to push extra undefined values.
- sub(expected_parameter_count, actual_parameter_count);
- j(less_equal, &regular_invoke, Label::kFar);
-
- // We need to preserve edx, edi, esi and ebx.
- movd(xmm0, edx);
- movd(xmm1, edi);
- movd(xmm2, esi);
- movd(xmm3, ebx);
-
- Label stack_overflow;
- StackOverflowCheck(expected_parameter_count, edx, &stack_overflow);
-
- Register scratch = esi;
-
- // Underapplication. Move the arguments already in the stack, including the
- // receiver and the return address.
- {
- Label copy, check;
- Register src = edx, dest = esp, num = edi, current = ebx;
- mov(src, esp);
- lea(scratch,
- Operand(expected_parameter_count, times_system_pointer_size, 0));
- AllocateStackSpace(scratch);
- // Extra words are the receiver and the return address (if a jump).
- int extra_words = type == InvokeType::kCall ? 1 : 2;
- lea(num, Operand(eax, extra_words)); // Number of words to copy.
- Move(current, 0);
- // Fall-through to the loop body because there are non-zero words to copy.
- bind(&copy);
- mov(scratch, Operand(src, current, times_system_pointer_size, 0));
- mov(Operand(dest, current, times_system_pointer_size, 0), scratch);
- inc(current);
- bind(&check);
- cmp(current, num);
- j(less, &copy);
- lea(edx, Operand(esp, num, times_system_pointer_size, 0));
- }
+ if (expected_parameter_count == actual_parameter_count) return;
+ ASM_CODE_COMMENT(this);
+ DCHECK_EQ(actual_parameter_count, eax);
+ DCHECK_EQ(expected_parameter_count, ecx);
+ Label regular_invoke;
+
+ // If the expected parameter count is equal to the adaptor sentinel, no need
+ // to push undefined value as arguments.
+ cmp(expected_parameter_count, Immediate(kDontAdaptArgumentsSentinel));
+ j(equal, &regular_invoke, Label::kFar);
+
+ // If overapplication or if the actual argument count is equal to the
+ // formal parameter count, no need to push extra undefined values.
+ sub(expected_parameter_count, actual_parameter_count);
+ j(less_equal, &regular_invoke, Label::kFar);
+
+ // We need to preserve edx, edi, esi and ebx.
+ movd(xmm0, edx);
+ movd(xmm1, edi);
+ movd(xmm2, esi);
+ movd(xmm3, ebx);
+
+ Label stack_overflow;
+ StackOverflowCheck(expected_parameter_count, edx, &stack_overflow);
+
+ Register scratch = esi;
+
+ // Underapplication. Move the arguments already in the stack, including the
+ // receiver and the return address.
+ {
+ Label copy, check;
+ Register src = edx, dest = esp, num = edi, current = ebx;
+ mov(src, esp);
+ lea(scratch,
+ Operand(expected_parameter_count, times_system_pointer_size, 0));
+ AllocateStackSpace(scratch);
+ // Extra words are the receiver and the return address (if a jump).
+ int extra_words = type == InvokeType::kCall ? 1 : 2;
+ lea(num, Operand(eax, extra_words)); // Number of words to copy.
+ Move(current, 0);
+ // Fall-through to the loop body because there are non-zero words to copy.
+ bind(&copy);
+ mov(scratch, Operand(src, current, times_system_pointer_size, 0));
+ mov(Operand(dest, current, times_system_pointer_size, 0), scratch);
+ inc(current);
+ bind(&check);
+ cmp(current, num);
+ j(less, &copy);
+ lea(edx, Operand(esp, num, times_system_pointer_size, 0));
+ }
// Fill remaining expected arguments with undefined values.
movd(ebx, xmm3); // Restore root.
@@ -1560,12 +1610,12 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
}
bind(&regular_invoke);
- }
}
void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count) {
+ ASM_CODE_COMMENT(this);
FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
SmiTag(expected_parameter_count);
Push(expected_parameter_count);
@@ -1598,6 +1648,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count,
InvokeType type) {
+ ASM_CODE_COMMENT(this);
// You can't call a function without a valid frame.
DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
DCHECK_EQ(function, edi);
@@ -1651,6 +1702,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
void MacroAssembler::InvokeFunction(Register fun, Register new_target,
Register actual_parameter_count,
InvokeType type) {
+ ASM_CODE_COMMENT(this);
// You can't call a function without a valid frame.
DCHECK(type == InvokeType::kJump || has_frame());
@@ -1668,6 +1720,7 @@ void MacroAssembler::LoadGlobalProxy(Register dst) {
}
void MacroAssembler::LoadNativeContextSlot(Register destination, int index) {
+ ASM_CODE_COMMENT(this);
// Load the native context from the current context.
LoadMap(destination, esi);
mov(destination,
@@ -1992,16 +2045,18 @@ void TurboAssembler::Popcnt(Register dst, Operand src) {
}
void MacroAssembler::LoadWeakValue(Register in_out, Label* target_if_cleared) {
+ ASM_CODE_COMMENT(this);
cmp(in_out, Immediate(kClearedWeakHeapObjectLower32));
j(equal, target_if_cleared);
and_(in_out, Immediate(~kWeakHeapObjectMask));
}
-void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
- Register scratch) {
+void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value,
+ Register scratch) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
+ ASM_CODE_COMMENT(this);
Operand operand =
ExternalReferenceAsOperand(ExternalReference::Create(counter), scratch);
if (value == 1) {
@@ -2012,10 +2067,11 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
}
}
-void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
- Register scratch) {
+void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
+ Register scratch) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
+ ASM_CODE_COMMENT(this);
Operand operand =
ExternalReferenceAsOperand(ExternalReference::Create(counter), scratch);
if (value == 1) {
@@ -2043,6 +2099,7 @@ void TurboAssembler::Check(Condition cc, AbortReason reason) {
}
void TurboAssembler::CheckStackAlignment() {
+ ASM_CODE_COMMENT(this);
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kSystemPointerSize) {
@@ -2094,6 +2151,7 @@ void TurboAssembler::Abort(AbortReason reason) {
}
void TurboAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
+ ASM_CODE_COMMENT(this);
int frame_alignment = base::OS::ActivationFrameAlignment();
if (frame_alignment != 0) {
// Make stack end at alignment and make room for num_arguments words
@@ -2116,6 +2174,7 @@ void TurboAssembler::CallCFunction(ExternalReference function,
}
void TurboAssembler::CallCFunction(Register function, int num_arguments) {
+ ASM_CODE_COMMENT(this);
DCHECK_LE(num_arguments, kMaxCParameters);
DCHECK(has_frame());
// Check stack alignment.
@@ -2174,13 +2233,14 @@ void TurboAssembler::PushPC() {
}
void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
+ ASM_CODE_COMMENT(this);
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code_object));
if (options().inline_offheap_trampolines) {
- int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index)) {
+ Builtin builtin = Builtin::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin)) {
// Inline the trampoline.
- CallBuiltin(builtin_index);
+ CallBuiltin(builtin);
return;
}
}
@@ -2189,6 +2249,7 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
}
void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
+ ASM_CODE_COMMENT(this);
STATIC_ASSERT(kSystemPointerSize == 4);
STATIC_ASSERT(kSmiShiftSize == 0);
STATIC_ASSERT(kSmiTagSize == 1);
@@ -2204,27 +2265,26 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
}
void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+ ASM_CODE_COMMENT(this);
LoadEntryFromBuiltinIndex(builtin_index);
call(builtin_index);
}
-void TurboAssembler::CallBuiltin(int builtin_index) {
- DCHECK(Builtins::IsBuiltinId(builtin_index));
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- call(entry, RelocInfo::OFF_HEAP_TARGET);
+void TurboAssembler::CallBuiltin(Builtin builtin) {
+ ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
+ DCHECK(Builtins::IsBuiltinId(builtin));
+ call(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET);
}
-Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(
- Builtins::Name builtin_index) {
+Operand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
+ ASM_CODE_COMMENT(this);
return Operand(kRootRegister,
- IsolateData::builtin_entry_slot_offset(builtin_index));
+ IsolateData::builtin_entry_slot_offset(builtin));
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
+ ASM_CODE_COMMENT(this);
// Code objects are called differently depending on whether we are generating
// builtin code (which will later be embedded into the binary) or compiling
// user JS code at runtime.
@@ -2267,11 +2327,13 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
}
void TurboAssembler::CallCodeObject(Register code_object) {
+ ASM_CODE_COMMENT(this);
LoadCodeObjectEntry(code_object, code_object);
call(code_object);
}
void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+ ASM_CODE_COMMENT(this);
LoadCodeObjectEntry(code_object, code_object);
switch (jump_mode) {
case JumpMode::kJump:
@@ -2294,14 +2356,11 @@ void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code_object));
if (options().inline_offheap_trampolines) {
- int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index)) {
+ Builtin builtin = Builtin::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin)) {
// Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- jmp(entry, RelocInfo::OFF_HEAP_TARGET);
+ RecordCommentForOffHeapTrampoline(builtin);
+ jmp(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET);
return;
}
}
@@ -2310,6 +2369,7 @@ void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
}
void TurboAssembler::RetpolineCall(Register reg) {
+ ASM_CODE_COMMENT(this);
Label setup_return, setup_target, inner_indirect_branch, capture_spec;
jmp(&setup_return); // Jump past the entire retpoline below.
@@ -2330,6 +2390,7 @@ void TurboAssembler::RetpolineCall(Register reg) {
}
void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) {
+ ASM_CODE_COMMENT(this);
Label setup_return, setup_target, inner_indirect_branch, capture_spec;
jmp(&setup_return); // Jump past the entire retpoline below.
@@ -2350,6 +2411,7 @@ void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) {
}
void TurboAssembler::RetpolineJump(Register reg) {
+ ASM_CODE_COMMENT(this);
Label setup_target, capture_spec;
call(&setup_target);
@@ -2366,6 +2428,7 @@ void TurboAssembler::RetpolineJump(Register reg) {
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met,
Label::Distance condition_met_distance) {
+ ASM_CODE_COMMENT(this);
DCHECK(cc == zero || cc == not_zero);
if (scratch == object) {
and_(scratch, Immediate(~kPageAlignmentMask));
@@ -2382,6 +2445,7 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
}
void TurboAssembler::ComputeCodeStartAddress(Register dst) {
+ ASM_CODE_COMMENT(this);
// In order to get the address of the current instruction, we first need
// to use a call and then use a pop, thus pushing the return address to
// the stack and then popping it into the register.
@@ -2395,9 +2459,10 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
}
}
-void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
- Label* exit, DeoptimizeKind kind,
- Label* ret, Label*) {
+void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
+ DeoptimizeKind kind, Label* ret,
+ Label*) {
+ ASM_CODE_COMMENT(this);
CallBuiltin(target);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
(kind == DeoptimizeKind::kLazy)
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
index a21a355568..cfec105d87 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
@@ -139,7 +139,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
- Operand EntryFromBuiltinIndexAsOperand(Builtins::Name builtin_index);
+ Operand EntryFromBuiltinAsOperand(Builtin builtin);
void Call(Register reg) { call(reg); }
void Call(Operand op) { call(op); }
@@ -149,14 +149,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
// Load the builtin given by the Smi in |builtin_index| into the same
// register.
void LoadEntryFromBuiltinIndex(Register builtin_index);
- void CallBuiltinByIndex(Register builtin_index) override;
- void CallBuiltin(int builtin_index);
+ void CallBuiltinByIndex(Register builtin_index);
+ void CallBuiltin(Builtin builtin);
- void LoadCodeObjectEntry(Register destination, Register code_object) override;
- void CallCodeObject(Register code_object) override;
+ void LoadCodeObjectEntry(Register destination, Register code_object);
+ void CallCodeObject(Register code_object);
void JumpCodeObject(Register code_object,
- JumpMode jump_mode = JumpMode::kJump) override;
- void Jump(const ExternalReference& reference) override;
+ JumpMode jump_mode = JumpMode::kJump);
+ void Jump(const ExternalReference& reference);
void RetpolineCall(Register reg);
void RetpolineCall(Address destination, RelocInfo::Mode rmode);
@@ -167,10 +167,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void RetpolineJump(Register reg);
- void Trap() override;
- void DebugBreak() override;
+ void Trap();
+ void DebugBreak();
- void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
+ void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
DeoptimizeKind kind, Label* ret,
Label* jump_deoptimization_entry_label);
@@ -269,13 +269,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void InitializeRootRegister();
- void LoadRoot(Register destination, RootIndex index) override;
+ void LoadRoot(Register destination, RootIndex index) final;
// Indirect root-relative loads.
- void LoadFromConstantsTable(Register destination,
- int constant_index) override;
- void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
- void LoadRootRelative(Register destination, int32_t offset) override;
+ void LoadFromConstantsTable(Register destination, int constant_index) final;
+ void LoadRootRegisterOffset(Register destination, intptr_t offset) final;
+ void LoadRootRelative(Register destination, int32_t offset) final;
void PushPC();
@@ -437,18 +436,21 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
movd(dst, scratch);
}
- void SaveRegisters(RegList registers);
- void RestoreRegisters(RegList registers);
+ void MaybeSaveRegisters(RegList registers);
+ void MaybeRestoreRegisters(RegList registers);
- void CallRecordWriteStub(Register object, Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode);
- void CallRecordWriteStub(Register object, Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode, Address wasm_target);
- void CallEphemeronKeyBarrier(Register object, Register address,
+ void CallEphemeronKeyBarrier(Register object, Register slot_address,
SaveFPRegsMode fp_mode);
+ void CallRecordWriteStubSaveRegisters(
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode = StubCallMode::kCallBuiltinPointer);
+ void CallRecordWriteStub(
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode = StubCallMode::kCallBuiltinPointer);
+
// Calculate how much stack space (in bytes) are required to store caller
// registers excluding those specified in the arguments.
int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
@@ -488,11 +490,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void ExceptionHandler() {}
// Define an exception handler and bind a label.
void BindExceptionHandler(Label* label) { bind(label); }
-
- void CallRecordWriteStub(Register object, Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode, int builtin_index,
- Address wasm_target);
};
// MacroAssembler implements a collection of frequently used macros.
@@ -712,8 +709,16 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// StatsCounter support
- void IncrementCounter(StatsCounter* counter, int value, Register scratch);
- void DecrementCounter(StatsCounter* counter, int value, Register scratch);
+ void IncrementCounter(StatsCounter* counter, int value, Register scratch) {
+ if (!FLAG_native_code_counters) return;
+ EmitIncrementCounter(counter, value, scratch);
+ }
+ void EmitIncrementCounter(StatsCounter* counter, int value, Register scratch);
+ void DecrementCounter(StatsCounter* counter, int value, Register scratch) {
+ if (!FLAG_native_code_counters) return;
+ EmitDecrementCounter(counter, value, scratch);
+ }
+ void EmitDecrementCounter(StatsCounter* counter, int value, Register scratch);
// ---------------------------------------------------------------------------
// Stack limit utilities
diff --git a/deps/v8/src/codegen/interface-descriptors-inl.h b/deps/v8/src/codegen/interface-descriptors-inl.h
index 1b06aab68c..cf4ff5b0e6 100644
--- a/deps/v8/src/codegen/interface-descriptors-inl.h
+++ b/deps/v8/src/codegen/interface-descriptors-inl.h
@@ -69,6 +69,8 @@ void StaticCallInterfaceDescriptor<DerivedDescriptor>::Initialize(
if (DerivedDescriptor::kRestrictAllocatableRegisters) {
data->RestrictAllocatableRegisters(registers.data(), registers.size());
+ } else {
+ DCHECK(!DerivedDescriptor::kCalleeSaveRegisters);
}
data->InitializeRegisters(
@@ -82,8 +84,10 @@ void StaticCallInterfaceDescriptor<DerivedDescriptor>::Initialize(
DCHECK(data->IsInitialized());
DCHECK(this->CheckFloatingPointParameters(data));
+#if DEBUG
+ DerivedDescriptor::Verify(data);
+#endif
}
-
// static
template <typename DerivedDescriptor>
constexpr int
@@ -194,6 +198,42 @@ constexpr Register FastNewObjectDescriptor::NewTargetRegister() {
}
// static
+constexpr Register WriteBarrierDescriptor::ObjectRegister() {
+ return std::get<kObject>(registers());
+}
+// static
+constexpr Register WriteBarrierDescriptor::SlotAddressRegister() {
+ return std::get<kSlotAddress>(registers());
+}
+
+// static
+constexpr Register WriteBarrierDescriptor::ValueRegister() {
+ return std::get<kSlotAddress + 1>(registers());
+}
+
+// static
+constexpr RegList WriteBarrierDescriptor::ComputeSavedRegisters(
+ Register object, Register slot_address) {
+ DCHECK(!AreAliased(object, slot_address));
+ RegList saved_registers = 0;
+#if V8_TARGET_ARCH_X64
+ // Only push clobbered registers.
+ if (object != ObjectRegister()) saved_registers |= ObjectRegister().bit();
+ if (slot_address != no_reg && slot_address != SlotAddressRegister()) {
+ saved_registers |= SlotAddressRegister().bit();
+ }
+#else
+ // TODO(cbruni): Enable callee-saved registers for other platforms.
+ // This is a temporary workaround to prepare code for callee-saved registers.
+ constexpr auto allocated_registers = registers();
+ for (size_t i = 0; i < allocated_registers.size(); ++i) {
+ saved_registers |= allocated_registers[i].bit();
+ }
+#endif
+ return saved_registers;
+}
+
+// static
constexpr Register ApiGetterDescriptor::ReceiverRegister() {
return LoadDescriptor::ReceiverRegister();
}
@@ -278,13 +318,18 @@ constexpr auto LoadWithReceiverBaselineDescriptor::registers() {
// static
constexpr auto BaselineOutOfLinePrologueDescriptor::registers() {
// TODO(v8:11421): Implement on other platforms.
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_IA32 || \
- V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || \
- V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_RISCV64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || \
+ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390 || \
+ V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_MIPS
return RegisterArray(
kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
kJavaScriptCallExtraArg1Register, kJavaScriptCallNewTargetRegister,
kInterpreterBytecodeArrayRegister);
+#elif V8_TARGET_ARCH_IA32
+ STATIC_ASSERT(kJSFunctionRegister == kInterpreterBytecodeArrayRegister);
+ return RegisterArray(
+ kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
+ kJavaScriptCallExtraArg1Register, kJavaScriptCallNewTargetRegister);
#else
return DefaultRegisterArray();
#endif
@@ -293,9 +338,10 @@ constexpr auto BaselineOutOfLinePrologueDescriptor::registers() {
// static
constexpr auto BaselineLeaveFrameDescriptor::registers() {
// TODO(v8:11421): Implement on other platforms.
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || \
- V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_RISCV64
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
+ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || \
+ V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
+ V8_TARGET_ARCH_MIPS
return RegisterArray(ParamsSizeRegister(), WeightRegister());
#else
return DefaultRegisterArray();
@@ -334,11 +380,6 @@ constexpr auto FastNewObjectDescriptor::registers() {
}
// static
-constexpr auto TailCallOptimizedCodeSlotDescriptor::registers() {
- return RegisterArray(kJavaScriptCallCodeStartRegister);
-}
-
-// static
constexpr auto LoadNoFeedbackDescriptor::registers() {
return RegisterArray(LoadDescriptor::ReceiverRegister(),
LoadDescriptor::NameRegister(), ICKindRegister());
@@ -469,7 +510,7 @@ constexpr Register RunMicrotasksDescriptor::MicrotaskQueueRegister() {
#define DEFINE_STATIC_BUILTIN_DESCRIPTOR_GETTER(Name, DescriptorName) \
template <> \
- struct CallInterfaceDescriptorFor<Builtins::k##Name> { \
+ struct CallInterfaceDescriptorFor<Builtin::k##Name> { \
using type = DescriptorName##Descriptor; \
};
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN,
@@ -479,7 +520,7 @@ BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN,
#undef DEFINE_STATIC_BUILTIN_DESCRIPTOR_GETTER
#define DEFINE_STATIC_BUILTIN_DESCRIPTOR_GETTER(Name, ...) \
template <> \
- struct CallInterfaceDescriptorFor<Builtins::k##Name> { \
+ struct CallInterfaceDescriptorFor<Builtin::k##Name> { \
using type = Name##Descriptor; \
};
BUILTIN_LIST_TFS(DEFINE_STATIC_BUILTIN_DESCRIPTOR_GETTER)
diff --git a/deps/v8/src/codegen/interface-descriptors.cc b/deps/v8/src/codegen/interface-descriptors.cc
index 2cafcae344..a30299011d 100644
--- a/deps/v8/src/codegen/interface-descriptors.cc
+++ b/deps/v8/src/codegen/interface-descriptors.cc
@@ -125,5 +125,31 @@ bool CallInterfaceDescriptor::IsValidFloatParameterRegister(Register reg) {
#endif
}
+#if DEBUG
+template <typename DerivedDescriptor>
+void StaticCallInterfaceDescriptor<DerivedDescriptor>::Verify(
+ CallInterfaceDescriptorData* data) {}
+// static
+void WriteBarrierDescriptor::Verify(CallInterfaceDescriptorData* data) {
+ DCHECK(!AreAliased(ObjectRegister(), SlotAddressRegister(), ValueRegister()));
+ // The default parameters should not clobber vital registers in order to
+ // reduce code size:
+ DCHECK(!AreAliased(ObjectRegister(), kContextRegister,
+ kInterpreterAccumulatorRegister));
+ DCHECK(!AreAliased(SlotAddressRegister(), kContextRegister,
+ kInterpreterAccumulatorRegister));
+ DCHECK(!AreAliased(ValueRegister(), kContextRegister,
+ kInterpreterAccumulatorRegister));
+ DCHECK(!AreAliased(SlotAddressRegister(), kJavaScriptCallNewTargetRegister));
+ // Coincidental: to make calling from various builtins easier.
+ DCHECK_EQ(ObjectRegister(), kJSFunctionRegister);
+ // We need a certain set of registers by default:
+ RegList allocatable_regs = data->allocatable_registers();
+ DCHECK(allocatable_regs | kContextRegister.bit());
+ DCHECK(allocatable_regs | kReturnRegister0.bit());
+ VerifyArgumentRegisterCount(data, 4);
+}
+#endif // DEBUG
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/interface-descriptors.h b/deps/v8/src/codegen/interface-descriptors.h
index 9a526370ca..cf4840bfd7 100644
--- a/deps/v8/src/codegen/interface-descriptors.h
+++ b/deps/v8/src/codegen/interface-descriptors.h
@@ -67,7 +67,7 @@ namespace internal {
V(ContextOnly) \
V(CppBuiltinAdaptor) \
V(DynamicCheckMaps) \
- V(EphemeronKeyBarrier) \
+ V(DynamicCheckMapsWithFeedbackVector) \
V(FastNewObject) \
V(ForInPrepare) \
V(GetIteratorStackParameter) \
@@ -78,7 +78,6 @@ namespace internal {
V(InterpreterCEntry1) \
V(InterpreterCEntry2) \
V(InterpreterDispatch) \
- V(TailCallOptimizedCodeSlot) \
V(InterpreterPushArgsThenCall) \
V(InterpreterPushArgsThenConstruct) \
V(JSTrampoline) \
@@ -96,7 +95,6 @@ namespace internal {
V(LoadWithReceiverBaseline) \
V(LookupBaseline) \
V(NoContext) \
- V(RecordWrite) \
V(ResumeGenerator) \
V(SuspendGeneratorBaseline) \
V(ResumeGeneratorBaseline) \
@@ -113,6 +111,8 @@ namespace internal {
V(StringAt) \
V(StringAtAsString) \
V(StringSubstring) \
+ IF_TSAN(V, TSANRelaxedStore) \
+ IF_TSAN(V, TSANRelaxedLoad) \
V(TypeConversion) \
V(TypeConversionNoContext) \
V(TypeConversion_Baseline) \
@@ -124,6 +124,7 @@ namespace internal {
V(WasmFloat64ToNumber) \
V(WasmI32AtomicWait32) \
V(WasmI64AtomicWait32) \
+ V(WriteBarrier) \
BUILTIN_LIST_TFS(V) \
TORQUE_BUILTIN_LIST_TFC(V)
@@ -149,6 +150,8 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
// passed on the stack.
// This does not indicate if arguments adaption is used or not.
kAllowVarArgs = 1u << 2,
+ // Callee save allocatable_registers.
+ kCalleeSaveRegisters = 1u << 3,
};
using Flags = base::Flags<Flag>;
@@ -321,6 +324,10 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
return flags() & CallInterfaceDescriptorData::kAllowVarArgs;
}
+ bool CalleeSaveRegisters() const {
+ return flags() & CallInterfaceDescriptorData::kCalleeSaveRegisters;
+ }
+
int GetReturnCount() const { return data()->return_count(); }
MachineType GetReturnType(int index) const {
@@ -431,6 +438,9 @@ class StaticCallInterfaceDescriptor : public CallInterfaceDescriptor {
// the first kParameterCount registers() are the parameters of the builtin.
static constexpr bool kRestrictAllocatableRegisters = false;
+ // If set to true, builtins will callee save the set returned by registers().
+ static constexpr bool kCalleeSaveRegisters = false;
+
// End of customization points.
// ===========================================================================
@@ -443,6 +453,9 @@ class StaticCallInterfaceDescriptor : public CallInterfaceDescriptor {
: 0) |
(DerivedDescriptor::kNoStackScan
? CallInterfaceDescriptorData::kNoStackScan
+ : 0) |
+ (DerivedDescriptor::kCalleeSaveRegisters
+ ? CallInterfaceDescriptorData::kCalleeSaveRegisters
: 0));
}
static constexpr inline bool AllowVarArgs() {
@@ -462,6 +475,15 @@ class StaticCallInterfaceDescriptor : public CallInterfaceDescriptor {
explicit StaticCallInterfaceDescriptor(CallDescriptors::Key key)
: CallInterfaceDescriptor(key) {}
+#if DEBUG
+ // Overwritten in DerivedDescriptor.
+ static void Verify(CallInterfaceDescriptorData* data);
+ // Verify that the CallInterfaceDescriptorData contains the default
+ // argument registers for {argc} arguments.
+ static inline void VerifyArgumentRegisterCount(
+ CallInterfaceDescriptorData* data, int nof_expected_args);
+#endif
+
private:
// {CallDescriptors} is allowed to call the private {Initialize} method.
friend class CallDescriptors;
@@ -487,7 +509,7 @@ class StaticJSCallInterfaceDescriptor
Descriptor>::StaticCallInterfaceDescriptor;
};
-template <Builtins::Name kBuiltin>
+template <Builtin kBuiltin>
struct CallInterfaceDescriptorFor;
// Stub class replacing std::array<Register, 0>, as a workaround for MSVC's
@@ -498,11 +520,12 @@ struct EmptyRegisterArray {
Register operator[](size_t i) const { UNREACHABLE(); }
};
-// Helper method for defining an array of registers for the various
+// Helper method for defining an array of unique registers for the various
// Descriptor::registers() methods.
template <typename... Registers>
constexpr std::array<Register, 1 + sizeof...(Registers)> RegisterArray(
Register first_reg, Registers... regs) {
+ DCHECK(!AreAliased(first_reg, regs...));
return {first_reg, regs...};
}
constexpr EmptyRegisterArray RegisterArray() { return {}; }
@@ -975,6 +998,24 @@ class DynamicCheckMapsDescriptor final
static constexpr bool kRestrictAllocatableRegisters = true;
};
+class DynamicCheckMapsWithFeedbackVectorDescriptor final
+ : public StaticCallInterfaceDescriptor<
+ DynamicCheckMapsWithFeedbackVectorDescriptor> {
+ public:
+ DEFINE_PARAMETERS(kMap, kFeedbackVector, kSlot, kHandler)
+ DEFINE_RESULT_AND_PARAMETER_TYPES(
+ MachineType::Int32(), // return val
+ MachineType::TaggedPointer(), // kMap
+ MachineType::TaggedPointer(), // kFeedbackVector
+ MachineType::IntPtr(), // kSlot
+ MachineType::TaggedSigned()) // kHandler
+
+ DECLARE_DESCRIPTOR(DynamicCheckMapsWithFeedbackVectorDescriptor)
+
+ static constexpr auto registers();
+ static constexpr bool kRestrictAllocatableRegisters = true;
+};
+
class FastNewObjectDescriptor
: public StaticCallInterfaceDescriptor<FastNewObjectDescriptor> {
public:
@@ -989,35 +1030,56 @@ class FastNewObjectDescriptor
static constexpr auto registers();
};
-class RecordWriteDescriptor final
- : public StaticCallInterfaceDescriptor<RecordWriteDescriptor> {
+class WriteBarrierDescriptor final
+ : public StaticCallInterfaceDescriptor<WriteBarrierDescriptor> {
public:
- DEFINE_PARAMETERS_NO_CONTEXT(kObject, kSlot, kRememberedSet, kFPMode)
+ DEFINE_PARAMETERS_NO_CONTEXT(kObject, kSlotAddress)
DEFINE_PARAMETER_TYPES(MachineType::TaggedPointer(), // kObject
- MachineType::Pointer(), // kSlot
- MachineType::TaggedSigned(), // kRememberedSet
- MachineType::TaggedSigned()) // kFPMode
+ MachineType::Pointer()) // kSlotAddress
+
+ DECLARE_DESCRIPTOR(WriteBarrierDescriptor)
+ static constexpr auto registers();
+ static constexpr bool kRestrictAllocatableRegisters = true;
+ static constexpr bool kCalleeSaveRegisters = true;
+ static constexpr inline Register ObjectRegister();
+ static constexpr inline Register SlotAddressRegister();
+ // A temporary register used in helpers.
+ static constexpr inline Register ValueRegister();
+ static constexpr inline RegList ComputeSavedRegisters(
+ Register object, Register slot_address = no_reg);
+#if DEBUG
+ static void Verify(CallInterfaceDescriptorData* data);
+#endif
+};
+
+#ifdef V8_IS_TSAN
+class TSANRelaxedStoreDescriptor final
+ : public StaticCallInterfaceDescriptor<TSANRelaxedStoreDescriptor> {
+ public:
+ DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kValue)
+ DEFINE_PARAMETER_TYPES(MachineType::Pointer(), // kAddress
+ MachineType::AnyTagged()) // kValue
- DECLARE_DESCRIPTOR(RecordWriteDescriptor)
+ DECLARE_DESCRIPTOR(TSANRelaxedStoreDescriptor)
static constexpr auto registers();
static constexpr bool kRestrictAllocatableRegisters = true;
};
-class EphemeronKeyBarrierDescriptor final
- : public StaticCallInterfaceDescriptor<EphemeronKeyBarrierDescriptor> {
+class TSANRelaxedLoadDescriptor final
+ : public StaticCallInterfaceDescriptor<TSANRelaxedLoadDescriptor> {
public:
- DEFINE_PARAMETERS_NO_CONTEXT(kObject, kSlotAddress, kFPMode)
- DEFINE_PARAMETER_TYPES(MachineType::TaggedPointer(), // kObject
- MachineType::Pointer(), // kSlotAddress
- MachineType::TaggedSigned()) // kFPMode
+ DEFINE_PARAMETERS_NO_CONTEXT(kAddress)
+ DEFINE_PARAMETER_TYPES(MachineType::Pointer()) // kAddress
- DECLARE_DESCRIPTOR(EphemeronKeyBarrierDescriptor)
+ DECLARE_DESCRIPTOR(TSANRelaxedLoadDescriptor)
static constexpr auto registers();
static constexpr bool kRestrictAllocatableRegisters = true;
};
+#endif // V8_IS_TSAN
+
class TypeConversionDescriptor final
: public StaticCallInterfaceDescriptor<TypeConversionDescriptor> {
public:
@@ -1519,17 +1581,6 @@ class GrowArrayElementsDescriptor
static constexpr auto registers();
};
-class V8_EXPORT_PRIVATE TailCallOptimizedCodeSlotDescriptor
- : public StaticCallInterfaceDescriptor<
- TailCallOptimizedCodeSlotDescriptor> {
- public:
- DEFINE_PARAMETERS(kOptimizedCodeEntry)
- DEFINE_PARAMETER_TYPES(MachineType::AnyTagged()) // kAccumulator
- DECLARE_DESCRIPTOR(TailCallOptimizedCodeSlotDescriptor)
-
- static constexpr auto registers();
-};
-
class BaselineOutOfLinePrologueDescriptor
: public StaticCallInterfaceDescriptor<
BaselineOutOfLinePrologueDescriptor> {
diff --git a/deps/v8/src/codegen/machine-type.cc b/deps/v8/src/codegen/machine-type.cc
index 1972c41b24..064e17d2f0 100644
--- a/deps/v8/src/codegen/machine-type.cc
+++ b/deps/v8/src/codegen/machine-type.cc
@@ -93,7 +93,6 @@ std::ostream& operator<<(std::ostream& os, MachineType type) {
} else {
return os << type.representation() << "|" << type.semantic();
}
- return os;
}
} // namespace internal
diff --git a/deps/v8/src/codegen/macro-assembler.h b/deps/v8/src/codegen/macro-assembler.h
index 484ec9e4b2..cfa7a4d341 100644
--- a/deps/v8/src/codegen/macro-assembler.h
+++ b/deps/v8/src/codegen/macro-assembler.h
@@ -28,7 +28,11 @@ enum AllocationFlags {
PRETENURE = 1 << 3,
};
-enum class RememberedSetAction { kOmit, kEmit };
+enum class JumpMode {
+ kJump, // Does a direct jump to the given address
+ kPushAndReturn // Pushes the given address as the current return address and
+ // does a return
+};
enum class SmiCheck { kOmit, kInline };
@@ -79,7 +83,13 @@ static constexpr int kMaxCParameters = 256;
class V8_NODISCARD FrameScope {
public:
explicit FrameScope(TurboAssembler* tasm, StackFrame::Type type)
- : tasm_(tasm), type_(type), old_has_frame_(tasm->has_frame()) {
+ :
+#ifdef V8_CODE_COMMENTS
+ comment_(tasm, frame_name(type)),
+#endif
+ tasm_(tasm),
+ type_(type),
+ old_has_frame_(tasm->has_frame()) {
tasm->set_has_frame(true);
if (type != StackFrame::MANUAL && type_ != StackFrame::NONE) {
tasm->EnterFrame(type);
@@ -94,9 +104,30 @@ class V8_NODISCARD FrameScope {
}
private:
+#ifdef V8_CODE_COMMENTS
+ const char* frame_name(StackFrame::Type type) {
+ switch (type) {
+ case StackFrame::NONE:
+ return "Frame: NONE";
+ case StackFrame::MANUAL:
+ return "Frame: MANUAL";
+#define FRAME_TYPE_CASE(type, field) \
+ case StackFrame::type: \
+ return "Frame: " #type;
+ STACK_FRAME_TYPE_LIST(FRAME_TYPE_CASE)
+#undef FRAME_TYPE_CASE
+ case StackFrame::NUMBER_OF_TYPES:
+ break;
+ }
+ return "Frame";
+ }
+
+ Assembler::CodeComment comment_;
+#endif // V8_CODE_COMMENTS
+
TurboAssembler* tasm_;
- StackFrame::Type type_;
- bool old_has_frame_;
+ StackFrame::Type const type_;
+ bool const old_has_frame_;
};
class V8_NODISCARD FrameAndConstantPoolScope {
diff --git a/deps/v8/src/codegen/mips/assembler-mips.cc b/deps/v8/src/codegen/mips/assembler-mips.cc
index c254860b14..2948dbd18a 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/assembler-mips.cc
@@ -3570,8 +3570,9 @@ void Assembler::GrowBuffer() {
reloc_info_writer.last_pc() + pc_delta);
// Relocate runtime entries.
- Vector<byte> instructions{buffer_start_, pc_offset()};
- Vector<const byte> reloc_info{reloc_info_writer.pos(), reloc_size};
+ base::Vector<byte> instructions{buffer_start_,
+ static_cast<size_t>(pc_offset())};
+ base::Vector<const byte> reloc_info{reloc_info_writer.pos(), reloc_size};
for (RelocIterator it(instructions, reloc_info, 0); !it.done(); it.next()) {
RelocInfo::Mode rmode = it.rinfo()->rmode();
if (rmode == RelocInfo::INTERNAL_REFERENCE_ENCODED ||
diff --git a/deps/v8/src/codegen/mips/assembler-mips.h b/deps/v8/src/codegen/mips/assembler-mips.h
index 47bdf26d55..2eced6363b 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.h
+++ b/deps/v8/src/codegen/mips/assembler-mips.h
@@ -1918,6 +1918,17 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
Register Acquire();
bool hasAvailable() const;
+ void Include(const RegList& list) { *available_ |= list; }
+ void Exclude(const RegList& list) { *available_ &= ~list; }
+ void Include(const Register& reg1, const Register& reg2 = no_reg) {
+ RegList list(reg1.bit() | reg2.bit());
+ Include(list);
+ }
+ void Exclude(const Register& reg1, const Register& reg2 = no_reg) {
+ RegList list(reg1.bit() | reg2.bit());
+ Exclude(list);
+ }
+
private:
RegList* available_;
RegList old_available_;
diff --git a/deps/v8/src/codegen/mips/interface-descriptors-mips-inl.h b/deps/v8/src/codegen/mips/interface-descriptors-mips-inl.h
index edea1b3844..e1b7451eda 100644
--- a/deps/v8/src/codegen/mips/interface-descriptors-mips-inl.h
+++ b/deps/v8/src/codegen/mips/interface-descriptors-mips-inl.h
@@ -19,19 +19,34 @@ constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
return registers;
}
+#if DEBUG
+template <typename DerivedDescriptor>
+void StaticCallInterfaceDescriptor<DerivedDescriptor>::
+ VerifyArgumentRegisterCount(CallInterfaceDescriptorData* data, int argc) {
+ RegList allocatable_regs = data->allocatable_registers();
+ if (argc >= 1) DCHECK(allocatable_regs | a0.bit());
+ if (argc >= 2) DCHECK(allocatable_regs | a1.bit());
+ if (argc >= 3) DCHECK(allocatable_regs | a2.bit());
+ if (argc >= 4) DCHECK(allocatable_regs | a3.bit());
+ // Additional arguments are passed on the stack.
+}
+#endif // DEBUG
+
// static
-constexpr auto RecordWriteDescriptor::registers() {
- return RegisterArray(a0, a1, a2, a3, kReturnRegister0);
+constexpr auto WriteBarrierDescriptor::registers() {
+ return RegisterArray(a1, v0, a0, a2, a3);
}
// static
constexpr auto DynamicCheckMapsDescriptor::registers() {
+ STATIC_ASSERT(kReturnRegister0 == v0);
return RegisterArray(kReturnRegister0, a0, a1, a2, cp);
}
// static
-constexpr auto EphemeronKeyBarrierDescriptor::registers() {
- return RegisterArray(a0, a1, a2, a3, kReturnRegister0);
+constexpr auto DynamicCheckMapsWithFeedbackVectorDescriptor::registers() {
+ STATIC_ASSERT(kReturnRegister0 == v0);
+ return RegisterArray(kReturnRegister0, a0, a1, a2, cp);
}
// static
@@ -77,14 +92,13 @@ constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
// static
constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
- // TODO(v8:11421): Implement on this platform.
- return a3;
+ return a2;
}
// static
constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() {
// TODO(v8:11421): Implement on this platform.
- return t0;
+ return a3;
}
// static
@@ -192,6 +206,9 @@ constexpr auto CompareDescriptor::registers() { return RegisterArray(a1, a0); }
// static
constexpr auto Compare_BaselineDescriptor::registers() {
+ // a1: left operand
+ // a0: right operand
+ // a2: feedback slot
return RegisterArray(a1, a0, a2);
}
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
index d48b441c7b..c197d8e6f3 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -22,7 +22,6 @@
#include "src/logging/counters.h"
#include "src/objects/heap-number.h"
#include "src/runtime/runtime.h"
-#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
#if V8_ENABLE_WEBASSEMBLY
@@ -211,8 +210,8 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
}
-void TurboAssembler::SaveRegisters(RegList registers) {
- DCHECK_GT(NumRegs(registers), 0);
+void TurboAssembler::MaybeSaveRegisters(RegList registers) {
+ if (registers == 0) return;
RegList regs = 0;
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
@@ -222,8 +221,8 @@ void TurboAssembler::SaveRegisters(RegList registers) {
MultiPush(regs);
}
-void TurboAssembler::RestoreRegisters(RegList registers) {
- DCHECK_GT(NumRegs(registers), 0);
+void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
+ if (registers == 0) return;
RegList regs = 0;
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
@@ -233,97 +232,82 @@ void TurboAssembler::RestoreRegisters(RegList registers) {
MultiPop(regs);
}
-void TurboAssembler::CallEphemeronKeyBarrier(Register object, Register address,
+void TurboAssembler::CallEphemeronKeyBarrier(Register object,
+ Register slot_address,
SaveFPRegsMode fp_mode) {
- EphemeronKeyBarrierDescriptor descriptor;
- RegList registers = descriptor.allocatable_registers();
+ DCHECK(!AreAliased(object, slot_address));
+ RegList registers =
+ WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
+ MaybeSaveRegisters(registers);
- SaveRegisters(registers);
-
- Register object_parameter(
- descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kObject));
- Register slot_parameter(descriptor.GetRegisterParameter(
- EphemeronKeyBarrierDescriptor::kSlotAddress));
- Register fp_mode_parameter(
- descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kFPMode));
+ Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
+ Register slot_address_parameter =
+ WriteBarrierDescriptor::SlotAddressRegister();
Push(object);
- Push(address);
-
- Pop(slot_parameter);
+ Push(slot_address);
+ Pop(slot_address_parameter);
Pop(object_parameter);
- Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
- Call(isolate()->builtins()->builtin_handle(Builtins::kEphemeronKeyBarrier),
+ Call(isolate()->builtins()->code_handle(
+ Builtins::GetEphemeronKeyBarrierStub(fp_mode)),
RelocInfo::CODE_TARGET);
- RestoreRegisters(registers);
-}
-
-void TurboAssembler::CallRecordWriteStub(
- Register object, Register address,
- RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
- CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
- Builtins::kRecordWrite, kNullAddress);
+ MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStub(
- Register object, Register address,
+void TurboAssembler::CallRecordWriteStubSaveRegisters(
+ Register object, Register slot_address,
RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
- Address wasm_target) {
- CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
- Builtins::kNoBuiltinId, wasm_target);
-}
+ StubCallMode mode) {
+ DCHECK(!AreAliased(object, slot_address));
+ RegList registers =
+ WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
+ MaybeSaveRegisters(registers);
-void TurboAssembler::CallRecordWriteStub(
- Register object, Register address,
- RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
- int builtin_index, Address wasm_target) {
- DCHECK_NE(builtin_index == Builtins::kNoBuiltinId,
- wasm_target == kNullAddress);
- // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
- // i.e. always emit remember set and save FP registers in RecordWriteStub. If
- // large performance regression is observed, we should use these values to
- // avoid unnecessary work.
-
- RecordWriteDescriptor descriptor;
- RegList registers = descriptor.allocatable_registers();
-
- SaveRegisters(registers);
- Register object_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
- Register slot_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
- Register remembered_set_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
- Register fp_mode_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
+ Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
+ Register slot_address_parameter =
+ WriteBarrierDescriptor::SlotAddressRegister();
Push(object);
- Push(address);
-
- Pop(slot_parameter);
+ Push(slot_address);
+ Pop(slot_address_parameter);
Pop(object_parameter);
- Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
- Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
- if (builtin_index == Builtins::kNoBuiltinId) {
+ CallRecordWriteStub(object_parameter, slot_address_parameter,
+ remembered_set_action, fp_mode, mode);
+
+ MaybeRestoreRegisters(registers);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode) {
+ // Use CallRecordWriteStubSaveRegisters if the object and slot registers
+ // need to be caller saved.
+ DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object);
+ DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address);
+#if V8_ENABLE_WEBASSEMBLY
+ if (mode == StubCallMode::kCallWasmRuntimeStub) {
+ auto wasm_target =
+ wasm::WasmCode::GetRecordWriteStub(remembered_set_action, fp_mode);
Call(wasm_target, RelocInfo::WASM_STUB_CALL);
- } else if (options().inline_offheap_trampolines) {
- // Inline the trampoline.
- DCHECK(Builtins::IsBuiltinId(builtin_index));
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- Call(t9);
+#else
+ if (false) {
+#endif
} else {
- Handle<Code> code_target =
- isolate()->builtins()->builtin_handle(Builtins::kRecordWrite);
- Call(code_target, RelocInfo::CODE_TARGET);
+ Builtin builtin =
+ Builtins::GetRecordWriteStub(remembered_set_action, fp_mode);
+ if (options().inline_offheap_trampolines) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin);
+ li(t9, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
+ Call(t9);
+ } else {
+ Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
+ Call(code_target, RelocInfo::CODE_TARGET);
+ }
}
-
- RestoreRegisters(registers);
}
// Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
@@ -340,6 +324,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
if (FLAG_debug_code) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
+ DCHECK(!AreAliased(object, value, scratch));
lw(scratch, MemOperand(address));
Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, scratch,
Operand(value));
@@ -371,7 +356,12 @@ void MacroAssembler::RecordWrite(Register object, Register address,
if (ra_status == kRAHasNotBeenSaved) {
push(ra);
}
- CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
+
+ Register slot_address = WriteBarrierDescriptor::SlotAddressRegister();
+ DCHECK(!AreAliased(object, slot_address, value));
+ mov(slot_address, address);
+ CallRecordWriteStub(object, slot_address, remembered_set_action, fp_mode);
+
if (ra_status == kRAHasNotBeenSaved) {
pop(ra);
}
@@ -383,6 +373,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
if (FLAG_debug_code) {
li(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
li(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
+ li(slot_address, Operand(bit_cast<int32_t>(kZapValue + 20)));
}
}
@@ -3782,10 +3773,10 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
DCHECK(RelocInfo::IsCodeTarget(rmode));
BlockTrampolinePoolScope block_trampoline_pool(this);
- int builtin_index = Builtins::kNoBuiltinId;
+ Builtin builtin = Builtin::kNoBuiltinId;
bool target_is_isolate_independent_builtin =
- isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index);
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin) &&
+ Builtins::IsIsolateIndependent(builtin);
if (target_is_isolate_independent_builtin &&
options().use_pc_relative_calls_and_jumps) {
int32_t code_target_index = AddCodeTarget(code);
@@ -3807,11 +3798,8 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
} else if (target_is_isolate_independent_builtin &&
options().inline_offheap_trampolines) {
// Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ RecordCommentForOffHeapTrampoline(builtin);
+ li(t9, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
Jump(t9, 0, cond, rs, rt, bd);
return;
}
@@ -3924,10 +3912,10 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
- int builtin_index = Builtins::kNoBuiltinId;
+ Builtin builtin = Builtin::kNoBuiltinId;
bool target_is_isolate_independent_builtin =
- isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index);
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin) &&
+ Builtins::IsIsolateIndependent(builtin);
if (target_is_isolate_independent_builtin &&
options().use_pc_relative_calls_and_jumps) {
int32_t code_target_index = AddCodeTarget(code);
@@ -3947,11 +3935,8 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
} else if (target_is_isolate_independent_builtin &&
options().inline_offheap_trampolines) {
// Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ RecordCommentForOffHeapTrampoline(builtin);
+ li(t9, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
Call(t9, 0, cond, rs, rt, bd);
return;
}
@@ -3973,11 +3958,25 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
lw(builtin_index,
MemOperand(builtin_index, IsolateData::builtin_entry_table_offset()));
}
+void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin,
+ Register destination) {
+ Lw(destination, EntryFromBuiltinAsOperand(builtin));
+}
+MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
+ DCHECK(root_array_available());
+ return MemOperand(kRootRegister,
+ IsolateData::builtin_entry_slot_offset(builtin));
+}
void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
LoadEntryFromBuiltinIndex(builtin_index);
Call(builtin_index);
}
+void TurboAssembler::CallBuiltin(Builtin builtin) {
+ RecordCommentForOffHeapTrampoline(builtin);
+ Call(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET);
+ if (FLAG_code_comments) RecordComment("]");
+}
void TurboAssembler::PatchAndJump(Address target) {
if (kArchVariant != kMips32r6) {
@@ -4711,8 +4710,9 @@ void MacroAssembler::LoadWeakValue(Register out, Register in,
And(out, in, Operand(~kWeakHeapObjectMask));
}
-void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
+void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value,
+ Register scratch1,
+ Register scratch2) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
li(scratch2, ExternalReference::Create(counter));
@@ -4722,8 +4722,9 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
}
}
-void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
+void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
+ Register scratch1,
+ Register scratch2) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
li(scratch2, ExternalReference::Create(counter));
@@ -4826,19 +4827,12 @@ void TurboAssembler::Prologue() { PushStandardFrame(a1); }
void TurboAssembler::EnterFrame(StackFrame::Type type) {
BlockTrampolinePoolScope block_trampoline_pool(this);
- int stack_offset = -3 * kPointerSize;
- const int fp_offset = 1 * kPointerSize;
- addiu(sp, sp, stack_offset);
- stack_offset = -stack_offset - kPointerSize;
- sw(ra, MemOperand(sp, stack_offset));
- stack_offset -= kPointerSize;
- sw(fp, MemOperand(sp, stack_offset));
- stack_offset -= kPointerSize;
- li(t9, Operand(StackFrame::TypeToMarker(type)));
- sw(t9, MemOperand(sp, stack_offset));
- // Adjust FP to point to saved FP.
- DCHECK_EQ(stack_offset, 0);
- Addu(fp, sp, Operand(fp_offset));
+ Push(ra, fp);
+ Move(fp, sp);
+ if (!StackFrame::IsJavaScript(type)) {
+ li(kScratchReg, Operand(StackFrame::TypeToMarker(type)));
+ Push(kScratchReg);
+ }
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
@@ -5549,9 +5543,9 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
li(kSpeculationPoisonRegister, -1);
}
-void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
- Label* exit, DeoptimizeKind kind,
- Label* ret, Label*) {
+void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
+ DeoptimizeKind kind, Label* ret,
+ Label*) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Lw(t9,
MemOperand(kRootRegister, IsolateData::builtin_entry_slot_offset(target)));
@@ -5567,6 +5561,60 @@ void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
}
}
+void TurboAssembler::LoadCodeObjectEntry(Register destination,
+ Register code_object) {
+ // Code objects are called differently depending on whether we are generating
+ // builtin code (which will later be embedded into the binary) or compiling
+ // user JS code at runtime.
+ // * Builtin code runs in --jitless mode and thus must not call into on-heap
+ // Code targets. Instead, we dispatch through the builtins entry table.
+ // * Codegen at runtime does not have this restriction and we can use the
+ // shorter, branchless instruction sequence. The assumption here is that
+ // targets are usually generated code and not builtin Code objects.
+ if (options().isolate_independent_code) {
+ DCHECK(root_array_available());
+ Label if_code_is_off_heap, out;
+
+ Register scratch = kScratchReg;
+ DCHECK(!AreAliased(destination, scratch));
+ DCHECK(!AreAliased(code_object, scratch));
+
+ // Check whether the Code object is an off-heap trampoline. If so, call its
+ // (off-heap) entry point directly without going through the (on-heap)
+ // trampoline. Otherwise, just call the Code object as always.
+ Lw(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
+ And(scratch, scratch, Operand(Code::IsOffHeapTrampoline::kMask));
+ Branch(&if_code_is_off_heap, ne, scratch, Operand(zero_reg));
+
+ // Not an off-heap trampoline object, the entry point is at
+ // Code::raw_instruction_start().
+ Addu(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
+ Branch(&out);
+
+ // An off-heap trampoline, the entry point is loaded from the builtin entry
+ // table.
+ bind(&if_code_is_off_heap);
+ Lw(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
+ Lsa(destination, kRootRegister, scratch, kSystemPointerSizeLog2);
+ Lw(destination,
+ MemOperand(destination, IsolateData::builtin_entry_table_offset()));
+
+ bind(&out);
+ } else {
+ Addu(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
+ }
+}
+
+void TurboAssembler::CallCodeObject(Register code_object) {
+ LoadCodeObjectEntry(code_object, code_object);
+ Call(code_object);
+}
+void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+ DCHECK_EQ(JumpMode::kJump, jump_mode);
+ LoadCodeObjectEntry(code_object, code_object);
+ Jump(code_object);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.h b/deps/v8/src/codegen/mips/macro-assembler-mips.h
index 8a82eea6fa..cb362da51d 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.h
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.h
@@ -13,6 +13,7 @@
#include "src/codegen/mips/assembler-mips.h"
#include "src/common/globals.h"
#include "src/objects/contexts.h"
+#include "src/objects/tagged-index.h"
namespace v8 {
namespace internal {
@@ -107,8 +108,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// -------------------------------------------------------------------------
// Debugging.
- void Trap() override;
- void DebugBreak() override;
+ void Trap();
+ void DebugBreak();
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
@@ -188,10 +189,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void li(Register dst, const StringConstantBase* string,
LiFlags mode = OPTIMIZE_SIZE);
- void LoadFromConstantsTable(Register destination,
- int constant_index) override;
- void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
- void LoadRootRelative(Register destination, int32_t offset) override;
+ void LoadFromConstantsTable(Register destination, int constant_index) final;
+ void LoadRootRegisterOffset(Register destination, intptr_t offset) final;
+ void LoadRootRelative(Register destination, int32_t offset) final;
+
+ inline void Move(Register output, MemOperand operand) { Lw(output, operand); }
// Jump, Call, and Ret pseudo instructions implementing inter-working.
#define COND_ARGS \
@@ -209,7 +211,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// patching.
void PatchAndJump(Address target);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
- void Jump(const ExternalReference& reference) override;
+ void Jump(const ExternalReference& reference);
void Call(Register target, int16_t offset = 0, COND_ARGS);
void Call(Register target, Register base, int16_t offset = 0, COND_ARGS);
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
@@ -218,32 +220,27 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Call(Label* target);
void LoadAddress(Register dst, Label* target);
- // Load the builtin given by the Smi in |builtin_index| into the same
+ // Load the builtin given by the Smi in |builtin| into the same
// register.
- void LoadEntryFromBuiltinIndex(Register builtin_index);
- void CallBuiltinByIndex(Register builtin_index) override;
+ void LoadEntryFromBuiltinIndex(Register builtin);
+ void LoadEntryFromBuiltin(Builtin builtin, Register destination);
+ MemOperand EntryFromBuiltinAsOperand(Builtin builtin);
+
+ void CallBuiltinByIndex(Register builtin_index);
+ void CallBuiltin(Builtin builtin);
+
+ void LoadCodeObjectEntry(Register destination, Register code_object);
+ void CallCodeObject(Register code_object);
- void LoadCodeObjectEntry(Register destination,
- Register code_object) override {
- // TODO(mips): Implement.
- UNIMPLEMENTED();
- }
- void CallCodeObject(Register code_object) override {
- // TODO(mips): Implement.
- UNIMPLEMENTED();
- }
void JumpCodeObject(Register code_object,
- JumpMode jump_mode = JumpMode::kJump) override {
- // TODO(mips): Implement.
- UNIMPLEMENTED();
- }
+ JumpMode jump_mode = JumpMode::kJump);
// Generates an instruction sequence s.t. the return address points to the
// instruction following the call.
// The return address on the stack is used by frame iteration.
void StoreReturnAddressAndCall(Register target);
- void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
+ void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
DeoptimizeKind kind, Label* ret,
Label* jump_deoptimization_entry_label);
@@ -322,18 +319,21 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void PushArray(Register array, Register size, Register scratch,
Register scratch2, PushArrayOrder order = kNormal);
- void SaveRegisters(RegList registers);
- void RestoreRegisters(RegList registers);
+ void MaybeSaveRegisters(RegList registers);
+ void MaybeRestoreRegisters(RegList registers);
- void CallRecordWriteStub(Register object, Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode);
- void CallRecordWriteStub(Register object, Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode, Address wasm_target);
- void CallEphemeronKeyBarrier(Register object, Register address,
+ void CallEphemeronKeyBarrier(Register object, Register slot_address,
SaveFPRegsMode fp_mode);
+ void CallRecordWriteStubSaveRegisters(
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode = StubCallMode::kCallBuiltinPointer);
+ void CallRecordWriteStub(
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode = StubCallMode::kCallBuiltinPointer);
+
// Push multiple registers on the stack.
// Registers are saved in numerical order, with higher numbered registers
// saved in higher memory addresses.
@@ -782,7 +782,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Func GetLabelFunction);
// Load an object from the root table.
- void LoadRoot(Register destination, RootIndex index) override;
+ void LoadRoot(Register destination, RootIndex index) final;
void LoadRoot(Register destination, RootIndex index, Condition cond,
Register src1, const Operand& src2);
@@ -802,8 +802,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Trunc_uw_d(Register rd, FPURegister fs, FPURegister scratch);
// Jump the register contains a smi.
- void JumpIfSmi(Register value, Label* smi_label, Register scratch = at,
- BranchDelaySlot bd = PROTECT);
+ void JumpIfSmi(Register value, Label* smi_label,
+ Register scratch = kScratchReg, BranchDelaySlot bd = PROTECT);
void JumpIfEqual(Register a, int32_t b, Label* dest) {
li(kScratchReg, Operand(b));
@@ -905,11 +905,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Push a fixed frame, consisting of ra, fp.
void PushCommonFrame(Register marker_reg = no_reg);
-
- void CallRecordWriteStub(Register object, Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode, int builtin_index,
- Address wasm_target);
};
// MacroAssembler implements a collection of frequently used macros.
@@ -1086,9 +1081,19 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// StatsCounter support.
void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
- Register scratch2);
+ Register scratch2) {
+ if (!FLAG_native_code_counters) return;
+ EmitIncrementCounter(counter, value, scratch1, scratch2);
+ }
+ void EmitIncrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
- Register scratch2);
+ Register scratch2) {
+ if (!FLAG_native_code_counters) return;
+ EmitDecrementCounter(counter, value, scratch1, scratch2);
+ }
+ void EmitDecrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
// -------------------------------------------------------------------------
// Stack limit utilities
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.cc b/deps/v8/src/codegen/mips64/assembler-mips64.cc
index 70a02ddb6f..73fbe4ce4d 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.cc
@@ -3769,8 +3769,9 @@ void Assembler::GrowBuffer() {
reloc_info_writer.last_pc() + pc_delta);
// Relocate runtime entries.
- Vector<byte> instructions{buffer_start_, pc_offset()};
- Vector<const byte> reloc_info{reloc_info_writer.pos(), reloc_size};
+ base::Vector<byte> instructions{buffer_start_,
+ static_cast<size_t>(pc_offset())};
+ base::Vector<const byte> reloc_info{reloc_info_writer.pos(), reloc_size};
for (RelocIterator it(instructions, reloc_info, 0); !it.done(); it.next()) {
RelocInfo::Mode rmode = it.rinfo()->rmode();
if (rmode == RelocInfo::INTERNAL_REFERENCE) {
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.h b/deps/v8/src/codegen/mips64/assembler-mips64.h
index 41ebea8e5b..ae3a2a2819 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.h
@@ -1948,6 +1948,17 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
Register Acquire();
bool hasAvailable() const;
+ void Include(const RegList& list) { *available_ |= list; }
+ void Exclude(const RegList& list) { *available_ &= ~list; }
+ void Include(const Register& reg1, const Register& reg2 = no_reg) {
+ RegList list(reg1.bit() | reg2.bit());
+ Include(list);
+ }
+ void Exclude(const Register& reg1, const Register& reg2 = no_reg) {
+ RegList list(reg1.bit() | reg2.bit());
+ Exclude(list);
+ }
+
private:
RegList* available_;
RegList old_available_;
diff --git a/deps/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h b/deps/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h
index 62e32776ef..3bb84dfa37 100644
--- a/deps/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h
+++ b/deps/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h
@@ -19,19 +19,38 @@ constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
return registers;
}
+#if DEBUG
+template <typename DerivedDescriptor>
+void StaticCallInterfaceDescriptor<DerivedDescriptor>::
+ VerifyArgumentRegisterCount(CallInterfaceDescriptorData* data, int argc) {
+ RegList allocatable_regs = data->allocatable_registers();
+ if (argc >= 1) DCHECK(allocatable_regs | a0.bit());
+ if (argc >= 2) DCHECK(allocatable_regs | a1.bit());
+ if (argc >= 3) DCHECK(allocatable_regs | a2.bit());
+ if (argc >= 4) DCHECK(allocatable_regs | a3.bit());
+ if (argc >= 5) DCHECK(allocatable_regs | a4.bit());
+ if (argc >= 6) DCHECK(allocatable_regs | a5.bit());
+ if (argc >= 7) DCHECK(allocatable_regs | a6.bit());
+ if (argc >= 8) DCHECK(allocatable_regs | a7.bit());
+ // Additional arguments are passed on the stack.
+}
+#endif // DEBUG
+
// static
-constexpr auto RecordWriteDescriptor::registers() {
- return RegisterArray(a0, a1, a2, a3, kReturnRegister0);
+constexpr auto WriteBarrierDescriptor::registers() {
+ return RegisterArray(a1, a5, a4, a0, a2, v0, a3);
}
// static
constexpr auto DynamicCheckMapsDescriptor::registers() {
+ STATIC_ASSERT(kReturnRegister0 == v0);
return RegisterArray(kReturnRegister0, a0, a1, a2, cp);
}
// static
-constexpr auto EphemeronKeyBarrierDescriptor::registers() {
- return RegisterArray(a0, a1, a2, a3, kReturnRegister0);
+constexpr auto DynamicCheckMapsWithFeedbackVectorDescriptor::registers() {
+ STATIC_ASSERT(kReturnRegister0 == v0);
+ return RegisterArray(kReturnRegister0, a0, a1, a2, cp);
}
// static
@@ -77,15 +96,11 @@ constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
// static
constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
- // TODO(v8:11421): Implement on this platform.
- return a3;
+ return a2;
}
// static
-constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() {
- // TODO(v8:11421): Implement on this platform.
- return a4;
-}
+constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() { return a3; }
// static
constexpr Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
@@ -192,7 +207,9 @@ constexpr auto CompareDescriptor::registers() { return RegisterArray(a1, a0); }
// static
constexpr auto Compare_BaselineDescriptor::registers() {
- // TODO(v8:11421): Implement on this platform.
+ // a1: left operand
+ // a0: right operand
+ // a2: feedback slot
return RegisterArray(a1, a0, a2);
}
@@ -201,6 +218,9 @@ constexpr auto BinaryOpDescriptor::registers() { return RegisterArray(a1, a0); }
// static
constexpr auto BinaryOp_BaselineDescriptor::registers() {
+ // a1: left operand
+ // a0: right operand
+ // a2: feedback slot
return RegisterArray(a1, a0, a2);
}
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
index a1896624e5..a2e37bd9af 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -22,7 +22,6 @@
#include "src/logging/counters.h"
#include "src/objects/heap-number.h"
#include "src/runtime/runtime.h"
-#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
#if V8_ENABLE_WEBASSEMBLY
@@ -209,8 +208,8 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
}
-void TurboAssembler::SaveRegisters(RegList registers) {
- DCHECK_GT(NumRegs(registers), 0);
+void TurboAssembler::MaybeSaveRegisters(RegList registers) {
+ if (registers == 0) return;
RegList regs = 0;
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
@@ -220,8 +219,8 @@ void TurboAssembler::SaveRegisters(RegList registers) {
MultiPush(regs);
}
-void TurboAssembler::RestoreRegisters(RegList registers) {
- DCHECK_GT(NumRegs(registers), 0);
+void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
+ if (registers == 0) return;
RegList regs = 0;
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
@@ -231,97 +230,81 @@ void TurboAssembler::RestoreRegisters(RegList registers) {
MultiPop(regs);
}
-void TurboAssembler::CallEphemeronKeyBarrier(Register object, Register address,
+void TurboAssembler::CallEphemeronKeyBarrier(Register object,
+ Register slot_address,
SaveFPRegsMode fp_mode) {
- EphemeronKeyBarrierDescriptor descriptor;
- RegList registers = descriptor.allocatable_registers();
+ DCHECK(!AreAliased(object, slot_address));
+ RegList registers =
+ WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
+ MaybeSaveRegisters(registers);
- SaveRegisters(registers);
-
- Register object_parameter(
- descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kObject));
- Register slot_parameter(descriptor.GetRegisterParameter(
- EphemeronKeyBarrierDescriptor::kSlotAddress));
- Register fp_mode_parameter(
- descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kFPMode));
+ Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
+ Register slot_address_parameter =
+ WriteBarrierDescriptor::SlotAddressRegister();
Push(object);
- Push(address);
-
- Pop(slot_parameter);
+ Push(slot_address);
+ Pop(slot_address_parameter);
Pop(object_parameter);
- Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
- Call(isolate()->builtins()->builtin_handle(Builtins::kEphemeronKeyBarrier),
+ Call(isolate()->builtins()->code_handle(
+ Builtins::GetEphemeronKeyBarrierStub(fp_mode)),
RelocInfo::CODE_TARGET);
- RestoreRegisters(registers);
+ MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStub(
- Register object, Register address,
- RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
- CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
- Builtins::kRecordWrite, kNullAddress);
-}
-
-void TurboAssembler::CallRecordWriteStub(
- Register object, Register address,
+void TurboAssembler::CallRecordWriteStubSaveRegisters(
+ Register object, Register slot_address,
RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
- Address wasm_target) {
- CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
- Builtins::kNoBuiltinId, wasm_target);
-}
+ StubCallMode mode) {
+ DCHECK(!AreAliased(object, slot_address));
+ RegList registers =
+ WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
+ MaybeSaveRegisters(registers);
-void TurboAssembler::CallRecordWriteStub(
- Register object, Register address,
- RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
- int builtin_index, Address wasm_target) {
- DCHECK_NE(builtin_index == Builtins::kNoBuiltinId,
- wasm_target == kNullAddress);
- // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
- // i.e. always emit remember set and save FP registers in RecordWriteStub. If
- // large performance regression is observed, we should use these values to
- // avoid unnecessary work.
-
- RecordWriteDescriptor descriptor;
- RegList registers = descriptor.allocatable_registers();
-
- SaveRegisters(registers);
- Register object_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
- Register slot_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
- Register remembered_set_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
- Register fp_mode_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
+ Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
+ Register slot_address_parameter =
+ WriteBarrierDescriptor::SlotAddressRegister();
Push(object);
- Push(address);
-
- Pop(slot_parameter);
+ Push(slot_address);
+ Pop(slot_address_parameter);
Pop(object_parameter);
- Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
- Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
- if (builtin_index == Builtins::kNoBuiltinId) {
+ CallRecordWriteStub(object_parameter, slot_address_parameter,
+ remembered_set_action, fp_mode, mode);
+
+ MaybeRestoreRegisters(registers);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode) {
+ // Use CallRecordWriteStubSaveRegisters if the object and slot registers
+ // need to be caller saved.
+ DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object);
+ DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address);
+#if V8_ENABLE_WEBASSEMBLY
+ if (mode == StubCallMode::kCallWasmRuntimeStub) {
+ auto wasm_target =
+ wasm::WasmCode::GetRecordWriteStub(remembered_set_action, fp_mode);
Call(wasm_target, RelocInfo::WASM_STUB_CALL);
- } else if (options().inline_offheap_trampolines) {
- // Inline the trampoline.
- DCHECK(Builtins::IsBuiltinId(builtin_index));
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- Call(t9);
+#else
+ if (false) {
+#endif
} else {
- Handle<Code> code_target =
- isolate()->builtins()->builtin_handle(Builtins::kRecordWrite);
- Call(code_target, RelocInfo::CODE_TARGET);
+ auto builtin = Builtins::GetRecordWriteStub(remembered_set_action, fp_mode);
+ if (options().inline_offheap_trampolines) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin);
+ li(t9, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
+ Call(t9);
+ } else {
+ Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
+ Call(code_target, RelocInfo::CODE_TARGET);
+ }
}
-
- RestoreRegisters(registers);
}
// Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
@@ -338,6 +321,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
if (FLAG_debug_code) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
+ DCHECK(!AreAliased(object, value, scratch));
Ld(scratch, MemOperand(address));
Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, scratch,
Operand(value));
@@ -369,7 +353,12 @@ void MacroAssembler::RecordWrite(Register object, Register address,
if (ra_status == kRAHasNotBeenSaved) {
push(ra);
}
- CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
+
+ Register slot_address = WriteBarrierDescriptor::SlotAddressRegister();
+ DCHECK(!AreAliased(object, slot_address, value));
+ mov(slot_address, address);
+ CallRecordWriteStub(object, slot_address, remembered_set_action, fp_mode);
+
if (ra_status == kRAHasNotBeenSaved) {
pop(ra);
}
@@ -381,6 +370,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
if (FLAG_debug_code) {
li(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
li(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
+ li(slot_address, Operand(bit_cast<int64_t>(kZapValue + 20)));
}
}
@@ -2158,7 +2148,7 @@ void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) {
neg_d(fd, fs); // In delay slot.
bind(&is_nan);
dmfc1(scratch1, fs);
- li(scratch2, Double::kSignMask);
+ li(scratch2, base::Double::kSignMask);
Xor(scratch1, scratch1, scratch2);
dmtc1(scratch1, fd);
bind(&done);
@@ -4353,15 +4343,12 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Jump(t9, cond, rs, rt, bd);
return;
} else if (options().inline_offheap_trampolines) {
- int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
+ Builtin builtin = Builtin::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code, &builtin) &&
+ Builtins::IsIsolateIndependent(builtin)) {
// Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ RecordCommentForOffHeapTrampoline(builtin);
+ li(t9, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
Jump(t9, cond, rs, rt, bd);
return;
}
@@ -4432,15 +4419,12 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Call(t9, cond, rs, rt, bd);
return;
} else if (options().inline_offheap_trampolines) {
- int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
+ Builtin builtin = Builtin::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code, &builtin) &&
+ Builtins::IsIsolateIndependent(builtin)) {
// Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ RecordCommentForOffHeapTrampoline(builtin);
+ li(t9, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
Call(t9, cond, rs, rt, bd);
return;
}
@@ -4462,11 +4446,25 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
Ld(builtin_index,
MemOperand(builtin_index, IsolateData::builtin_entry_table_offset()));
}
+void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin,
+ Register destination) {
+ Ld(destination, EntryFromBuiltinAsOperand(builtin));
+}
+MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
+ DCHECK(root_array_available());
+ return MemOperand(kRootRegister,
+ IsolateData::builtin_entry_slot_offset(builtin));
+}
void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
LoadEntryFromBuiltinIndex(builtin_index);
Call(builtin_index);
}
+void TurboAssembler::CallBuiltin(Builtin builtin) {
+ RecordCommentForOffHeapTrampoline(builtin);
+ Call(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET);
+ if (FLAG_code_comments) RecordComment("]");
+}
void TurboAssembler::PatchAndJump(Address target) {
if (kArchVariant != kMips64r6) {
@@ -5226,8 +5224,9 @@ void MacroAssembler::LoadWeakValue(Register out, Register in,
And(out, in, Operand(~kWeakHeapObjectMask));
}
-void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
+void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value,
+ Register scratch1,
+ Register scratch2) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
// This operation has to be exactly 32-bit wide in case the external
@@ -5240,8 +5239,9 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
}
}
-void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
+void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
+ Register scratch1,
+ Register scratch2) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
// This operation has to be exactly 32-bit wide in case the external
@@ -5347,19 +5347,12 @@ void TurboAssembler::Prologue() { PushStandardFrame(a1); }
void TurboAssembler::EnterFrame(StackFrame::Type type) {
BlockTrampolinePoolScope block_trampoline_pool(this);
- int stack_offset = -3 * kPointerSize;
- const int fp_offset = 1 * kPointerSize;
- daddiu(sp, sp, stack_offset);
- stack_offset = -stack_offset - kPointerSize;
- Sd(ra, MemOperand(sp, stack_offset));
- stack_offset -= kPointerSize;
- Sd(fp, MemOperand(sp, stack_offset));
- stack_offset -= kPointerSize;
- li(t9, Operand(StackFrame::TypeToMarker(type)));
- Sd(t9, MemOperand(sp, stack_offset));
- // Adjust FP to point to saved FP.
- DCHECK_EQ(stack_offset, 0);
- Daddu(fp, sp, Operand(fp_offset));
+ Push(ra, fp);
+ Move(fp, sp);
+ if (!StackFrame::IsJavaScript(type)) {
+ li(kScratchReg, Operand(StackFrame::TypeToMarker(type)));
+ Push(kScratchReg);
+ }
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
@@ -5473,11 +5466,12 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
Ld(cp, MemOperand(t8));
-#ifdef DEBUG
- li(t8,
- ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
- Sd(a3, MemOperand(t8));
-#endif
+ if (FLAG_debug_code) {
+ UseScratchRegisterScope temp(this);
+ Register scratch = temp.Acquire();
+ li(scratch, Operand(Context::kInvalidContext));
+ Sd(scratch, MemOperand(t8));
+ }
// Pop the arguments, restore registers, and return.
mov(sp, fp); // Respect ABI stack constraint.
@@ -6078,9 +6072,9 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
li(kSpeculationPoisonRegister, -1);
}
-void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
- Label* exit, DeoptimizeKind kind,
- Label* ret, Label*) {
+void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
+ DeoptimizeKind kind, Label* ret,
+ Label*) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Ld(t9,
MemOperand(kRootRegister, IsolateData::builtin_entry_slot_offset(target)));
@@ -6097,6 +6091,61 @@ void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
}
}
+void TurboAssembler::LoadCodeObjectEntry(Register destination,
+ Register code_object) {
+ // Code objects are called differently depending on whether we are generating
+ // builtin code (which will later be embedded into the binary) or compiling
+ // user JS code at runtime.
+ // * Builtin code runs in --jitless mode and thus must not call into on-heap
+ // Code targets. Instead, we dispatch through the builtins entry table.
+ // * Codegen at runtime does not have this restriction and we can use the
+ // shorter, branchless instruction sequence. The assumption here is that
+ // targets are usually generated code and not builtin Code objects.
+ if (options().isolate_independent_code) {
+ DCHECK(root_array_available());
+ Label if_code_is_off_heap, out;
+
+ Register scratch = kScratchReg;
+ DCHECK(!AreAliased(destination, scratch));
+ DCHECK(!AreAliased(code_object, scratch));
+
+ // Check whether the Code object is an off-heap trampoline. If so, call its
+ // (off-heap) entry point directly without going through the (on-heap)
+ // trampoline. Otherwise, just call the Code object as always.
+ Lw(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
+ And(scratch, scratch, Operand(Code::IsOffHeapTrampoline::kMask));
+ Branch(&if_code_is_off_heap, ne, scratch, Operand(zero_reg));
+
+ // Not an off-heap trampoline object, the entry point is at
+ // Code::raw_instruction_start().
+ Daddu(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
+ Branch(&out);
+
+ // An off-heap trampoline, the entry point is loaded from the builtin entry
+ // table.
+ bind(&if_code_is_off_heap);
+ Lw(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
+ Dlsa(destination, kRootRegister, scratch, kSystemPointerSizeLog2);
+ Ld(destination,
+ MemOperand(destination, IsolateData::builtin_entry_table_offset()));
+
+ bind(&out);
+ } else {
+ Daddu(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
+ }
+}
+
+void TurboAssembler::CallCodeObject(Register code_object) {
+ LoadCodeObjectEntry(code_object, code_object);
+ Call(code_object);
+}
+
+void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+ DCHECK_EQ(JumpMode::kJump, jump_mode);
+ LoadCodeObjectEntry(code_object, code_object);
+ Jump(code_object);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
index 054f3345d1..a71f09a67d 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
@@ -12,6 +12,7 @@
#include "src/codegen/assembler.h"
#include "src/codegen/mips64/assembler-mips64.h"
#include "src/common/globals.h"
+#include "src/objects/tagged-index.h"
namespace v8 {
namespace internal {
@@ -128,8 +129,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// -------------------------------------------------------------------------
// Debugging.
- void Trap() override;
- void DebugBreak() override;
+ void Trap();
+ void DebugBreak();
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
@@ -215,10 +216,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void li(Register dst, const StringConstantBase* string,
LiFlags mode = OPTIMIZE_SIZE);
- void LoadFromConstantsTable(Register destination,
- int constant_index) override;
- void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
- void LoadRootRelative(Register destination, int32_t offset) override;
+ void LoadFromConstantsTable(Register destination, int constant_index) final;
+ void LoadRootRegisterOffset(Register destination, intptr_t offset) final;
+ void LoadRootRelative(Register destination, int32_t offset) final;
+
+ inline void Move(Register output, MemOperand operand) { Ld(output, operand); }
// Jump, Call, and Ret pseudo instructions implementing inter-working.
#define COND_ARGS \
@@ -234,7 +236,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// patching.
void PatchAndJump(Address target);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
- void Jump(const ExternalReference& reference) override;
+ void Jump(const ExternalReference& reference);
void Call(Register target, COND_ARGS);
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
@@ -244,30 +246,24 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Load the builtin given by the Smi in |builtin_index| into the same
// register.
- void LoadEntryFromBuiltinIndex(Register builtin_index);
- void CallBuiltinByIndex(Register builtin_index) override;
+ void LoadEntryFromBuiltinIndex(Register builtin);
+ void LoadEntryFromBuiltin(Builtin builtin, Register destination);
+ MemOperand EntryFromBuiltinAsOperand(Builtin builtin);
- void LoadCodeObjectEntry(Register destination,
- Register code_object) override {
- // TODO(mips): Implement.
- UNIMPLEMENTED();
- }
- void CallCodeObject(Register code_object) override {
- // TODO(mips): Implement.
- UNIMPLEMENTED();
- }
+ void CallBuiltinByIndex(Register builtin);
+ void CallBuiltin(Builtin builtin);
+
+ void LoadCodeObjectEntry(Register destination, Register code_object);
+ void CallCodeObject(Register code_object);
void JumpCodeObject(Register code_object,
- JumpMode jump_mode = JumpMode::kJump) override {
- // TODO(mips): Implement.
- UNIMPLEMENTED();
- }
+ JumpMode jump_mode = JumpMode::kJump);
// Generates an instruction sequence s.t. the return address points to the
// instruction following the call.
// The return address on the stack is used by frame iteration.
void StoreReturnAddressAndCall(Register target);
- void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
+ void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
DeoptimizeKind kind, Label* ret,
Label* jump_deoptimization_entry_label);
@@ -345,18 +341,21 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void PushArray(Register array, Register size, Register scratch,
Register scratch2, PushArrayOrder order = kNormal);
- void SaveRegisters(RegList registers);
- void RestoreRegisters(RegList registers);
+ void MaybeSaveRegisters(RegList registers);
+ void MaybeRestoreRegisters(RegList registers);
- void CallRecordWriteStub(Register object, Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode);
- void CallRecordWriteStub(Register object, Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode, Address wasm_target);
- void CallEphemeronKeyBarrier(Register object, Register address,
+ void CallEphemeronKeyBarrier(Register object, Register slot_address,
SaveFPRegsMode fp_mode);
+ void CallRecordWriteStubSaveRegisters(
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode = StubCallMode::kCallBuiltinPointer);
+ void CallRecordWriteStub(
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode = StubCallMode::kCallBuiltinPointer);
+
// Push multiple registers on the stack.
// Registers are saved in numerical order, with higher numbered registers
// saved in higher memory addresses.
@@ -746,7 +745,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Func GetLabelFunction);
// Load an object from the root table.
- void LoadRoot(Register destination, RootIndex index) override;
+ void LoadRoot(Register destination, RootIndex index) final;
void LoadRoot(Register destination, RootIndex index, Condition cond,
Register src1, const Operand& src2);
@@ -923,11 +922,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Push a fixed frame, consisting of ra, fp.
void PushCommonFrame(Register marker_reg = no_reg);
-
- void CallRecordWriteStub(Register object, Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode, int builtin_index,
- Address wasm_target);
};
// MacroAssembler implements a collection of frequently used macros.
@@ -1141,9 +1135,19 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// StatsCounter support.
void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
- Register scratch2);
+ Register scratch2) {
+ if (!FLAG_native_code_counters) return;
+ EmitIncrementCounter(counter, value, scratch1, scratch2);
+ }
+ void EmitIncrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
- Register scratch2);
+ Register scratch2) {
+ if (!FLAG_native_code_counters) return;
+ EmitDecrementCounter(counter, value, scratch1, scratch2);
+ }
+ void EmitDecrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
// -------------------------------------------------------------------------
// Stack limit utilities
diff --git a/deps/v8/src/codegen/optimized-compilation-info.cc b/deps/v8/src/codegen/optimized-compilation-info.cc
index 4d5c7a1d57..e3ca07a3c9 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.cc
+++ b/deps/v8/src/codegen/optimized-compilation-info.cc
@@ -53,13 +53,13 @@ OptimizedCompilationInfo::OptimizedCompilationInfo(
}
OptimizedCompilationInfo::OptimizedCompilationInfo(
- Vector<const char> debug_name, Zone* zone, CodeKind code_kind)
+ base::Vector<const char> debug_name, Zone* zone, CodeKind code_kind)
: code_kind_(code_kind),
zone_(zone),
optimization_id_(kNoOptimizationId),
debug_name_(debug_name) {
SetTracingFlags(
- PassesFilter(debug_name, CStrVector(FLAG_trace_turbo_filter)));
+ PassesFilter(debug_name, base::CStrVector(FLAG_trace_turbo_filter)));
ConfigureFlags();
}
@@ -165,10 +165,10 @@ std::unique_ptr<char[]> OptimizedCompilationInfo::GetDebugName() const {
if (!shared_info().is_null()) {
return shared_info()->DebugNameCStr();
}
- Vector<const char> name_vec = debug_name_;
- if (name_vec.empty()) name_vec = ArrayVector("unknown");
+ base::Vector<const char> name_vec = debug_name_;
+ if (name_vec.empty()) name_vec = base::ArrayVector("unknown");
std::unique_ptr<char[]> name(new char[name_vec.length() + 1]);
- base::Memcpy(name.get(), name_vec.begin(), name_vec.length());
+ memcpy(name.get(), name_vec.begin(), name_vec.length());
name[name_vec.length()] = '\0';
return name;
}
@@ -193,7 +193,6 @@ StackFrame::Type OptimizedCompilationInfo::GetOutputStackFrameType() const {
#endif // V8_ENABLE_WEBASSEMBLY
default:
UNIMPLEMENTED();
- return StackFrame::NONE;
}
}
diff --git a/deps/v8/src/codegen/optimized-compilation-info.h b/deps/v8/src/codegen/optimized-compilation-info.h
index b5ad1c9816..b7ed0d29c4 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.h
+++ b/deps/v8/src/codegen/optimized-compilation-info.h
@@ -7,6 +7,7 @@
#include <memory>
+#include "src/base/vector.h"
#include "src/codegen/bailout-reason.h"
#include "src/codegen/source-position-table.h"
#include "src/codegen/tick-counter.h"
@@ -18,7 +19,6 @@
#include "src/objects/objects.h"
#include "src/utils/identity-map.h"
#include "src/utils/utils.h"
-#include "src/utils/vector.h"
namespace v8 {
@@ -114,7 +114,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
: OptimizedCompilationInfo(zone, isolate, shared, closure, code_kind,
BytecodeOffset::None(), nullptr) {}
// Construct a compilation info for stub compilation, Wasm, and testing.
- OptimizedCompilationInfo(Vector<const char> debug_name, Zone* zone,
+ OptimizedCompilationInfo(base::Vector<const char> debug_name, Zone* zone,
CodeKind code_kind);
OptimizedCompilationInfo(const OptimizedCompilationInfo&) = delete;
@@ -131,8 +131,8 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
Handle<JSFunction> closure() const { return closure_; }
Handle<Code> code() const { return code_; }
CodeKind code_kind() const { return code_kind_; }
- int32_t builtin_index() const { return builtin_index_; }
- void set_builtin_index(int32_t index) { builtin_index_ = index; }
+ Builtin builtin() const { return builtin_; }
+ void set_builtin(Builtin builtin) { builtin_ = builtin; }
BytecodeOffset osr_offset() const { return osr_offset_; }
JavaScriptFrame* osr_frame() const { return osr_frame_; }
void SetNodeObserver(compiler::NodeObserver* observer) {
@@ -273,7 +273,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
PoisoningMitigationLevel::kDontPoison;
const CodeKind code_kind_;
- int32_t builtin_index_ = -1;
+ Builtin builtin_ = Builtin::kNoBuiltinId;
// We retain a reference the bytecode array specifically to ensure it doesn't
// get flushed while we are optimizing the code.
@@ -311,7 +311,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
const int optimization_id_;
unsigned inlined_bytecode_size_ = 0;
- Vector<const char> debug_name_;
+ base::Vector<const char> debug_name_;
std::unique_ptr<char[]> trace_turbo_filename_;
TickCounter tick_counter_;
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.cc b/deps/v8/src/codegen/ppc/assembler-ppc.cc
index 437f5f96c6..e0ecfffd9d 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.cc
@@ -226,6 +226,9 @@ MemOperand::MemOperand(Register rn, int32_t offset)
MemOperand::MemOperand(Register ra, Register rb)
: ra_(ra), offset_(0), rb_(rb) {}
+MemOperand::MemOperand(Register ra, Register rb, int32_t offset)
+ : ra_(ra), offset_(offset), rb_(rb) {}
+
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
for (auto& request : heap_object_requests_) {
@@ -1823,72 +1826,84 @@ void Assembler::mtvsrdd(const Simd128Register rt, const Register ra,
}
void Assembler::lxvd(const Simd128Register rt, const MemOperand& src) {
+ CHECK(src.rb().is_valid());
int TX = 1;
emit(LXVD | rt.code() * B21 | src.ra().code() * B16 | src.rb().code() * B11 |
TX);
}
void Assembler::lxvx(const Simd128Register rt, const MemOperand& src) {
+ CHECK(src.rb().is_valid());
int TX = 1;
emit(LXVX | rt.code() * B21 | src.ra().code() * B16 | src.rb().code() * B11 |
TX);
}
void Assembler::lxsdx(const Simd128Register rt, const MemOperand& src) {
+ CHECK(src.rb().is_valid());
int TX = 1;
emit(LXSDX | rt.code() * B21 | src.ra().code() * B16 | src.rb().code() * B11 |
TX);
}
void Assembler::lxsibzx(const Simd128Register rt, const MemOperand& src) {
+ CHECK(src.rb().is_valid());
int TX = 1;
emit(LXSIBZX | rt.code() * B21 | src.ra().code() * B16 |
src.rb().code() * B11 | TX);
}
void Assembler::lxsihzx(const Simd128Register rt, const MemOperand& src) {
+ CHECK(src.rb().is_valid());
int TX = 1;
emit(LXSIHZX | rt.code() * B21 | src.ra().code() * B16 |
src.rb().code() * B11 | TX);
}
void Assembler::lxsiwzx(const Simd128Register rt, const MemOperand& src) {
+ CHECK(src.rb().is_valid());
int TX = 1;
emit(LXSIWZX | rt.code() * B21 | src.ra().code() * B16 |
src.rb().code() * B11 | TX);
}
-void Assembler::stxsdx(const Simd128Register rs, const MemOperand& src) {
+void Assembler::stxsdx(const Simd128Register rs, const MemOperand& dst) {
+ CHECK(dst.rb().is_valid());
int SX = 1;
- emit(STXSDX | rs.code() * B21 | src.ra().code() * B16 |
- src.rb().code() * B11 | SX);
+ emit(STXSDX | rs.code() * B21 | dst.ra().code() * B16 |
+ dst.rb().code() * B11 | SX);
}
-void Assembler::stxsibx(const Simd128Register rs, const MemOperand& src) {
+void Assembler::stxsibx(const Simd128Register rs, const MemOperand& dst) {
+ CHECK(dst.rb().is_valid());
int SX = 1;
- emit(STXSIBX | rs.code() * B21 | src.ra().code() * B16 |
- src.rb().code() * B11 | SX);
+ emit(STXSIBX | rs.code() * B21 | dst.ra().code() * B16 |
+ dst.rb().code() * B11 | SX);
}
-void Assembler::stxsihx(const Simd128Register rs, const MemOperand& src) {
+void Assembler::stxsihx(const Simd128Register rs, const MemOperand& dst) {
+ CHECK(dst.rb().is_valid());
int SX = 1;
- emit(STXSIHX | rs.code() * B21 | src.ra().code() * B16 |
- src.rb().code() * B11 | SX);
+ emit(STXSIHX | rs.code() * B21 | dst.ra().code() * B16 |
+ dst.rb().code() * B11 | SX);
}
-void Assembler::stxsiwx(const Simd128Register rs, const MemOperand& src) {
+void Assembler::stxsiwx(const Simd128Register rs, const MemOperand& dst) {
+ CHECK(dst.rb().is_valid());
int SX = 1;
- emit(STXSIWX | rs.code() * B21 | src.ra().code() * B16 |
- src.rb().code() * B11 | SX);
+ emit(STXSIWX | rs.code() * B21 | dst.ra().code() * B16 |
+ dst.rb().code() * B11 | SX);
}
void Assembler::stxvd(const Simd128Register rt, const MemOperand& dst) {
+ CHECK(dst.rb().is_valid());
int SX = 1;
emit(STXVD | rt.code() * B21 | dst.ra().code() * B16 | dst.rb().code() * B11 |
SX);
}
void Assembler::stxvx(const Simd128Register rt, const MemOperand& dst) {
+ CHECK(dst.rb().is_valid());
int SX = 1;
emit(STXVX | rt.code() * B21 | dst.ra().code() * B16 | dst.rb().code() * B11 |
SX);
@@ -1897,7 +1912,8 @@ void Assembler::stxvx(const Simd128Register rt, const MemOperand& dst) {
void Assembler::xxspltib(const Simd128Register rt, const Operand& imm) {
int TX = 1;
CHECK(is_uint8(imm.immediate()));
- emit(XXSPLTIB | rt.code() * B21 | imm.immediate() * B11 | TX);
+ emit(XXSPLTIB | (rt.code() & 0x1F) * B21 | (imm.immediate() & 0xFF) * B11 |
+ TX);
}
// Pseudo instructions.
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.h b/deps/v8/src/codegen/ppc/assembler-ppc.h
index 1d7ecf76d7..37d5674078 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.h
@@ -41,16 +41,17 @@
#define V8_CODEGEN_PPC_ASSEMBLER_PPC_H_
#include <stdio.h>
+
#include <memory>
#include <vector>
+#include "src/base/numbers/double.h"
#include "src/codegen/assembler.h"
#include "src/codegen/constant-pool.h"
#include "src/codegen/external-reference.h"
#include "src/codegen/label.h"
#include "src/codegen/ppc/constants-ppc.h"
#include "src/codegen/ppc/register-ppc.h"
-#include "src/numbers/double.h"
#include "src/objects/smi.h"
namespace v8 {
@@ -136,6 +137,8 @@ class V8_EXPORT_PRIVATE MemOperand {
explicit MemOperand(Register ra, Register rb);
+ explicit MemOperand(Register ra, Register rb, int32_t offset);
+
int32_t offset() const { return offset_; }
// PowerPC - base register
@@ -485,17 +488,26 @@ class Assembler : public AssemblerBase {
inline void name(const Simd128Register rt, const Simd128Register rb) { \
vx_form(instr_name, rt, rb); \
}
+#define DECLARE_PPC_VX_INSTRUCTIONS_E_FORM(name, instr_name, instr_value) \
+ inline void name(const Simd128Register rt, const Operand& imm) { \
+ vx_form(instr_name, rt, imm); \
+ }
inline void vx_form(Instr instr, Simd128Register rt, Simd128Register rb,
const Operand& imm) {
- emit(instr | rt.code() * B21 | imm.immediate() * B16 | rb.code() * B11);
+ emit(instr | (rt.code() & 0x1F) * B21 | (imm.immediate() & 0x1F) * B16 |
+ (rb.code() & 0x1F) * B11);
}
inline void vx_form(Instr instr, Simd128Register rt, Simd128Register ra,
Simd128Register rb) {
- emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
+ emit(instr | (rt.code() & 0x1F) * B21 | ra.code() * B16 |
+ (rb.code() & 0x1F) * B11);
}
inline void vx_form(Instr instr, Simd128Register rt, Simd128Register rb) {
- emit(instr | rt.code() * B21 | rb.code() * B11);
+ emit(instr | (rt.code() & 0x1F) * B21 | (rb.code() & 0x1F) * B11);
+ }
+ inline void vx_form(Instr instr, Simd128Register rt, const Operand& imm) {
+ emit(instr | (rt.code() & 0x1F) * B21 | (imm.immediate() & 0x1F) * B16);
}
PPC_VX_OPCODE_A_FORM_LIST(DECLARE_PPC_VX_INSTRUCTIONS_A_FORM)
@@ -504,9 +516,11 @@ class Assembler : public AssemblerBase {
PPC_VX_OPCODE_D_FORM_LIST(
DECLARE_PPC_VX_INSTRUCTIONS_C_FORM) /* OPCODE_D_FORM can use
INSTRUCTIONS_C_FORM */
+ PPC_VX_OPCODE_E_FORM_LIST(DECLARE_PPC_VX_INSTRUCTIONS_E_FORM)
#undef DECLARE_PPC_VX_INSTRUCTIONS_A_FORM
#undef DECLARE_PPC_VX_INSTRUCTIONS_B_FORM
#undef DECLARE_PPC_VX_INSTRUCTIONS_C_FORM
+#undef DECLARE_PPC_VX_INSTRUCTIONS_E_FORM
#define DECLARE_PPC_VA_INSTRUCTIONS_A_FORM(name, instr_name, instr_value) \
inline void name(const Simd128Register rt, const Simd128Register ra, \
@@ -516,8 +530,8 @@ class Assembler : public AssemblerBase {
inline void va_form(Instr instr, Simd128Register rt, Simd128Register ra,
Simd128Register rb, Simd128Register rc) {
- emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
- rc.code() * B6);
+ emit(instr | (rt.code() & 0x1F) * B21 | (ra.code() & 0x1F) * B16 |
+ (rb.code() & 0x1F) * B11 | (rc.code() & 0x1F) * B6);
}
PPC_VA_OPCODE_A_FORM_LIST(DECLARE_PPC_VA_INSTRUCTIONS_A_FORM)
@@ -531,8 +545,8 @@ class Assembler : public AssemblerBase {
inline void vc_form(Instr instr, Simd128Register rt, Simd128Register ra,
Simd128Register rb, int rc) {
- emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
- rc * B10);
+ emit(instr | (rt.code() & 0x1F) * B21 | (ra.code() & 0x1F) * B16 |
+ (rb.code() & 0x1F) * B11 | rc * B10);
}
PPC_VC_OPCODE_LIST(DECLARE_PPC_VC_INSTRUCTIONS)
@@ -1037,12 +1051,12 @@ class Assembler : public AssemblerBase {
void lxsibzx(const Simd128Register rt, const MemOperand& src);
void lxsihzx(const Simd128Register rt, const MemOperand& src);
void lxsiwzx(const Simd128Register rt, const MemOperand& src);
- void stxsdx(const Simd128Register rs, const MemOperand& src);
- void stxsibx(const Simd128Register rs, const MemOperand& src);
- void stxsihx(const Simd128Register rs, const MemOperand& src);
- void stxsiwx(const Simd128Register rs, const MemOperand& src);
- void stxvd(const Simd128Register rt, const MemOperand& src);
- void stxvx(const Simd128Register rt, const MemOperand& src);
+ void stxsdx(const Simd128Register rs, const MemOperand& dst);
+ void stxsibx(const Simd128Register rs, const MemOperand& dst);
+ void stxsihx(const Simd128Register rs, const MemOperand& dst);
+ void stxsiwx(const Simd128Register rs, const MemOperand& dst);
+ void stxvd(const Simd128Register rt, const MemOperand& dst);
+ void stxvx(const Simd128Register rt, const MemOperand& dst);
void xxspltib(const Simd128Register rt, const Operand& imm);
// Pseudo instructions
@@ -1240,7 +1254,7 @@ class Assembler : public AssemblerBase {
!RelocInfo::IsWasmCall(rmode) && !RelocInfo::IsWasmStubCall(rmode));
return constant_pool_builder_.AddEntry(pc_offset(), value, sharing_ok);
}
- ConstantPoolEntry::Access ConstantPoolAddEntry(Double value) {
+ ConstantPoolEntry::Access ConstantPoolAddEntry(base::Double value) {
return constant_pool_builder_.AddEntry(pc_offset(), value);
}
diff --git a/deps/v8/src/codegen/ppc/constants-ppc.h b/deps/v8/src/codegen/ppc/constants-ppc.h
index 56732b7f8b..7dfb3d427d 100644
--- a/deps/v8/src/codegen/ppc/constants-ppc.h
+++ b/deps/v8/src/codegen/ppc/constants-ppc.h
@@ -86,6 +86,9 @@ const int kLoadDoubleMaxReachBits = 15;
// TODO(sigurds): Choose best value.
constexpr int kRootRegisterBias = 128;
+// sign-extend the least significant 5-bits of value <imm>
+#define SIGN_EXT_IMM5(imm) ((static_cast<int>(imm) << 27) >> 27)
+
// sign-extend the least significant 16-bits of value <imm>
#define SIGN_EXT_IMM16(imm) ((static_cast<int>(imm) << 16) >> 16)
@@ -1237,6 +1240,12 @@ using Instr = uint32_t;
V(stfsux, STFSUX, 0x7C00056E) \
/* Store Floating-Point Single Indexed */ \
V(stfsx, STFSX, 0x7C00052E) \
+ /* Store Doubleword Byte-Reverse Indexed */ \
+ V(stdbrx, STDBRX, 0x7C000528) \
+ /* Store Word Byte-Reverse Indexed */ \
+ V(stwbrx, STWBRX, 0x7C00052C) \
+ /* Store Halfword Byte-Reverse Indexed */ \
+ V(sthbrx, STHBRX, 0x7C00072C) \
/* Load Vector Indexed */ \
V(lvx, LVX, 0x7C0000CE) \
/* Store Vector Indexed */ \
@@ -1283,8 +1292,6 @@ using Instr = uint32_t;
V(lwax, LWAX, 0x7C0002AA) \
/* Parity Doubleword */ \
V(prtyd, PRTYD, 0x7C000174) \
- /* Store Doubleword Byte-Reverse Indexed */ \
- V(stdbrx, STDBRX, 0x7C000528) \
/* Trap Doubleword */ \
V(td, TD, 0x7C000088) \
/* Branch Conditional to Branch Target Address Register */ \
@@ -1309,10 +1316,6 @@ using Instr = uint32_t;
V(nand, NAND, 0x7C0003B8) \
/* Parity Word */ \
V(prtyw, PRTYW, 0x7C000134) \
- /* Store Halfword Byte-Reverse Indexed */ \
- V(sthbrx, STHBRX, 0x7C00072C) \
- /* Store Word Byte-Reverse Indexed */ \
- V(stwbrx, STWBRX, 0x7C00052C) \
/* Synchronize */ \
V(sync, SYNC, 0x7C0004AC) \
/* Trap Word */ \
@@ -2443,6 +2446,14 @@ using Instr = uint32_t;
/* Vector Negate Doubleword */ \
V(vnegd, VNEGD, 0x10070602)
+#define PPC_VX_OPCODE_E_FORM_LIST(V) \
+ /* Vector Splat Immediate Signed Byte */ \
+ V(vspltisb, VSPLTISB, 0x1000030C) \
+ /* Vector Splat Immediate Signed Halfword */ \
+ V(vspltish, VSPLTISH, 0x1000034C) \
+ /* Vector Splat Immediate Signed Word */ \
+ V(vspltisw, VSPLTISW, 0x1000038C)
+
#define PPC_VX_OPCODE_UNUSED_LIST(V) \
/* Decimal Add Modulo */ \
V(bcdadd, BCDADD, 0xF0000400) \
@@ -2548,12 +2559,6 @@ using Instr = uint32_t;
V(vrsqrtefp, VRSQRTEFP, 0x1000014A) \
/* Vector Shift Left */ \
V(vsl, VSL, 0x100001C4) \
- /* Vector Splat Immediate Signed Byte */ \
- V(vspltisb, VSPLTISB, 0x1000030C) \
- /* Vector Splat Immediate Signed Halfword */ \
- V(vspltish, VSPLTISH, 0x1000034C) \
- /* Vector Splat Immediate Signed Word */ \
- V(vspltisw, VSPLTISW, 0x1000038C) \
/* Vector Shift Right */ \
V(vsr, VSR, 0x100002C4) \
/* Vector Subtract & write Carry Unsigned Quadword */ \
@@ -2600,6 +2605,7 @@ using Instr = uint32_t;
PPC_VX_OPCODE_B_FORM_LIST(V) \
PPC_VX_OPCODE_C_FORM_LIST(V) \
PPC_VX_OPCODE_D_FORM_LIST(V) \
+ PPC_VX_OPCODE_E_FORM_LIST(V) \
PPC_VX_OPCODE_UNUSED_LIST(V)
#define PPC_XS_OPCODE_LIST(V) \
@@ -2945,6 +2951,7 @@ class Instruction {
PPC_VX_OPCODE_A_FORM_LIST(OPCODE_CASES)
PPC_VX_OPCODE_B_FORM_LIST(OPCODE_CASES)
PPC_VX_OPCODE_C_FORM_LIST(OPCODE_CASES)
+ PPC_VX_OPCODE_E_FORM_LIST(OPCODE_CASES)
PPC_VX_OPCODE_UNUSED_LIST(OPCODE_CASES)
PPC_X_OPCODE_EH_S_FORM_LIST(OPCODE_CASES)
return static_cast<Opcode>(opcode);
diff --git a/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h b/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h
index 69529a3ce6..adc36e2407 100644
--- a/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h
+++ b/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h
@@ -19,19 +19,38 @@ constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
return registers;
}
+#if DEBUG
+template <typename DerivedDescriptor>
+void StaticCallInterfaceDescriptor<DerivedDescriptor>::
+ VerifyArgumentRegisterCount(CallInterfaceDescriptorData* data, int argc) {
+ RegList allocatable_regs = data->allocatable_registers();
+ if (argc >= 1) DCHECK(allocatable_regs | r3.bit());
+ if (argc >= 2) DCHECK(allocatable_regs | r4.bit());
+ if (argc >= 3) DCHECK(allocatable_regs | r5.bit());
+ if (argc >= 4) DCHECK(allocatable_regs | r6.bit());
+ if (argc >= 5) DCHECK(allocatable_regs | r7.bit());
+ if (argc >= 6) DCHECK(allocatable_regs | r8.bit());
+ if (argc >= 7) DCHECK(allocatable_regs | r9.bit());
+ if (argc >= 8) DCHECK(allocatable_regs | r10.bit());
+ // Additional arguments are passed on the stack.
+}
+#endif // DEBUG
+
// static
-constexpr auto RecordWriteDescriptor::registers() {
- return RegisterArray(r3, r4, r5, r6, r7, kReturnRegister0);
+constexpr auto WriteBarrierDescriptor::registers() {
+ return RegisterArray(r4, r8, r7, r5, r3);
}
// static
constexpr auto DynamicCheckMapsDescriptor::registers() {
+ STATIC_ASSERT(kReturnRegister0 == r3);
return RegisterArray(r3, r4, r5, r6, cp);
}
// static
-constexpr auto EphemeronKeyBarrierDescriptor::registers() {
- return RegisterArray(r3, r4, r5, r6, r7, kReturnRegister0);
+constexpr auto DynamicCheckMapsWithFeedbackVectorDescriptor::registers() {
+ STATIC_ASSERT(kReturnRegister0 == r3);
+ return RegisterArray(r3, r4, r5, r6, cp);
}
// static
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
index e9bce8411f..03a197f9fb 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -21,7 +21,6 @@
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
#include "src/runtime/runtime.h"
-#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
#if V8_ENABLE_WEBASSEMBLY
@@ -37,6 +36,22 @@
namespace v8 {
namespace internal {
+namespace {
+
+// Simd and Floating Pointer registers are not shared. For WebAssembly we save
+// both registers, If we are not running Wasm, we can get away with only saving
+// FP registers.
+#if V8_ENABLE_WEBASSEMBLY
+constexpr int kStackSavedSavedFPSizeInBytes =
+ (kNumCallerSavedDoubles * kSimd128Size) +
+ (kNumCallerSavedDoubles * kDoubleSize);
+#else
+constexpr int kStackSavedSavedFPSizeInBytes =
+ kNumCallerSavedDoubles * kDoubleSize;
+#endif // V8_ENABLE_WEBASSEMBLY
+
+} // namespace
+
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1,
Register exclusion2,
@@ -57,7 +72,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
bytes += NumRegs(list) * kSystemPointerSize;
if (fp_mode == SaveFPRegsMode::kSave) {
- bytes += kNumCallerSavedDoubles * kDoubleSize;
+ bytes += kStackSavedSavedFPSizeInBytes;
}
return bytes;
@@ -82,8 +97,8 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
bytes += NumRegs(list) * kSystemPointerSize;
if (fp_mode == SaveFPRegsMode::kSave) {
- MultiPushDoubles(kCallerSavedDoubles);
- bytes += kNumCallerSavedDoubles * kDoubleSize;
+ MultiPushF64AndV128(kCallerSavedDoubles, kCallerSavedDoubles);
+ bytes += kStackSavedSavedFPSizeInBytes;
}
return bytes;
@@ -93,8 +108,8 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
if (fp_mode == SaveFPRegsMode::kSave) {
- MultiPopDoubles(kCallerSavedDoubles);
- bytes += kNumCallerSavedDoubles * kDoubleSize;
+ MultiPopF64AndV128(kCallerSavedDoubles, kCallerSavedDoubles);
+ bytes += kStackSavedSavedFPSizeInBytes;
}
RegList exclusions = 0;
@@ -141,11 +156,8 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination,
intptr_t offset) {
if (offset == 0) {
mr(destination, kRootRegister);
- } else if (is_int16(offset)) {
- addi(destination, kRootRegister, Operand(offset));
} else {
- mov(destination, Operand(offset));
- add(destination, kRootRegister, destination);
+ AddS64(destination, kRootRegister, Operand(offset), destination);
}
}
@@ -176,14 +188,14 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code));
- int builtin_index = Builtins::kNoBuiltinId;
+ Builtin builtin_index = Builtin::kNoBuiltinId;
bool target_is_builtin =
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
if (root_array_available_ && options().isolate_independent_code) {
Label skip;
Register scratch = ip;
- int offset = code->builtin_index() * kSystemPointerSize +
+ int offset = static_cast<int>(code->builtin_id()) * kSystemPointerSize +
IsolateData::builtin_entry_table_offset();
LoadU64(scratch, MemOperand(kRootRegister, offset), r0);
if (cond != al) b(NegateCondition(cond), &skip, cr);
@@ -194,11 +206,9 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
// Inline the trampoline.
Label skip;
RecordCommentForOffHeapTrampoline(builtin_index);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
// Use ip directly instead of using UseScratchRegisterScope, as we do
// not preserve scratch registers across calls.
- mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ mov(ip, Operand(BuiltinEntry(builtin_index), RelocInfo::OFF_HEAP_TARGET));
if (cond != al) b(NegateCondition(cond), &skip, cr);
Jump(ip);
bind(&skip);
@@ -265,13 +275,13 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
DCHECK_IMPLIES(options().use_pc_relative_calls_and_jumps,
Builtins::IsIsolateIndependentBuiltin(*code));
- int builtin_index = Builtins::kNoBuiltinId;
+ Builtin builtin_index = Builtin::kNoBuiltinId;
bool target_is_builtin =
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
if (root_array_available_ && options().isolate_independent_code) {
Label skip;
- int offset = code->builtin_index() * kSystemPointerSize +
+ int offset = static_cast<int>(code->builtin_id()) * kSystemPointerSize +
IsolateData::builtin_entry_table_offset();
LoadU64(ip, MemOperand(kRootRegister, offset));
if (cond != al) b(NegateCondition(cond), &skip);
@@ -281,11 +291,9 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
} else if (options().inline_offheap_trampolines && target_is_builtin) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
// Use ip directly instead of using UseScratchRegisterScope, as we do
// not preserve scratch registers across calls.
- mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ mov(ip, Operand(BuiltinEntry(builtin_index), RelocInfo::OFF_HEAP_TARGET));
Label skip;
if (cond != al) b(NegateCondition(cond), &skip);
Call(ip);
@@ -299,7 +307,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
void TurboAssembler::Drop(int count) {
if (count > 0) {
- Add(sp, sp, count * kSystemPointerSize, r0);
+ AddS64(sp, sp, Operand(count * kSystemPointerSize), r0);
}
}
@@ -332,8 +340,8 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
mtctr(size);
bind(&loop);
- LoadPU(scratch2, MemOperand(scratch, -kSystemPointerSize));
- StorePU(scratch2, MemOperand(sp, -kSystemPointerSize));
+ LoadU64WithUpdate(scratch2, MemOperand(scratch, -kSystemPointerSize));
+ StoreU64WithUpdate(scratch2, MemOperand(sp, -kSystemPointerSize));
bdnz(&loop);
bind(&done);
@@ -345,8 +353,8 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
subi(scratch, array, Operand(kSystemPointerSize));
bind(&loop);
- LoadPU(scratch2, MemOperand(scratch, kSystemPointerSize));
- StorePU(scratch2, MemOperand(sp, -kSystemPointerSize));
+ LoadU64WithUpdate(scratch2, MemOperand(scratch, kSystemPointerSize));
+ StoreU64WithUpdate(scratch2, MemOperand(sp, -kSystemPointerSize));
bdnz(&loop);
bind(&done);
}
@@ -402,7 +410,7 @@ void TurboAssembler::MultiPush(RegList regs, Register location) {
for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kSystemPointerSize;
- StoreP(ToRegister(i), MemOperand(location, stack_offset));
+ StoreU64(ToRegister(i), MemOperand(location, stack_offset));
}
}
}
@@ -475,6 +483,70 @@ void TurboAssembler::MultiPopV128(RegList dregs, Register location) {
addi(location, location, Operand(stack_offset));
}
+void TurboAssembler::MultiPushF64AndV128(RegList dregs, RegList simd_regs,
+ Register location) {
+ MultiPushDoubles(dregs);
+#if V8_ENABLE_WEBASSEMBLY
+ bool generating_bultins =
+ isolate() && isolate()->IsGeneratingEmbeddedBuiltins();
+ if (generating_bultins) {
+ // V8 uses the same set of fp param registers as Simd param registers.
+ // As these registers are two different sets on ppc we must make
+ // sure to also save them when Simd is enabled.
+ // Check the comments under crrev.com/c/2645694 for more details.
+ Label push_empty_simd, simd_pushed;
+ Move(ip, ExternalReference::supports_wasm_simd_128_address());
+ LoadU8(ip, MemOperand(ip), r0);
+ cmpi(ip, Operand::Zero()); // If > 0 then simd is available.
+ ble(&push_empty_simd);
+ MultiPushV128(simd_regs);
+ b(&simd_pushed);
+ bind(&push_empty_simd);
+ // We still need to allocate empty space on the stack even if we
+ // are not pushing Simd registers (see kFixedFrameSizeFromFp).
+ addi(sp, sp,
+ Operand(-static_cast<int8_t>(NumRegs(simd_regs)) * kSimd128Size));
+ bind(&simd_pushed);
+ } else {
+ if (CpuFeatures::SupportsWasmSimd128()) {
+ MultiPushV128(simd_regs);
+ } else {
+ addi(sp, sp,
+ Operand(-static_cast<int8_t>(NumRegs(simd_regs)) * kSimd128Size));
+ }
+ }
+#endif
+}
+
+void TurboAssembler::MultiPopF64AndV128(RegList dregs, RegList simd_regs,
+ Register location) {
+#if V8_ENABLE_WEBASSEMBLY
+ bool generating_bultins =
+ isolate() && isolate()->IsGeneratingEmbeddedBuiltins();
+ if (generating_bultins) {
+ Label pop_empty_simd, simd_popped;
+ Move(ip, ExternalReference::supports_wasm_simd_128_address());
+ LoadU8(ip, MemOperand(ip), r0);
+ cmpi(ip, Operand::Zero()); // If > 0 then simd is available.
+ ble(&pop_empty_simd);
+ MultiPopV128(simd_regs);
+ b(&simd_popped);
+ bind(&pop_empty_simd);
+ addi(sp, sp,
+ Operand(static_cast<int8_t>(NumRegs(simd_regs)) * kSimd128Size));
+ bind(&simd_popped);
+ } else {
+ if (CpuFeatures::SupportsWasmSimd128()) {
+ MultiPopV128(simd_regs);
+ } else {
+ addi(sp, sp,
+ Operand(static_cast<int8_t>(NumRegs(simd_regs)) * kSimd128Size));
+ }
+ }
+#endif
+ MultiPopDoubles(dregs);
+}
+
void TurboAssembler::LoadRoot(Register destination, RootIndex index,
Condition cond) {
DCHECK(cond == al);
@@ -502,42 +574,26 @@ void TurboAssembler::LoadAnyTaggedField(const Register& destination,
}
}
-void TurboAssembler::SmiUntag(Register dst, const MemOperand& src, RCBit rc) {
+void TurboAssembler::SmiUntag(Register dst, const MemOperand& src, RCBit rc,
+ Register scratch) {
if (SmiValuesAre31Bits()) {
- lwz(dst, src);
+ LoadU32(dst, src, scratch);
} else {
- LoadU64(dst, src);
+ LoadU64(dst, src, scratch);
}
SmiUntag(dst, rc);
}
-void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src,
- RCBit rc) {
- SmiUntag(dst, src, rc);
-}
-
-void TurboAssembler::StoreTaggedFieldX(const Register& value,
- const MemOperand& dst_field_operand,
- const Register& scratch) {
- if (COMPRESS_POINTERS_BOOL) {
- RecordComment("[ StoreTagged");
- stwx(value, dst_field_operand);
- RecordComment("]");
- } else {
- StorePX(value, dst_field_operand);
- }
-}
-
void TurboAssembler::StoreTaggedField(const Register& value,
const MemOperand& dst_field_operand,
const Register& scratch) {
if (COMPRESS_POINTERS_BOOL) {
RecordComment("[ StoreTagged");
- StoreWord(value, dst_field_operand, scratch);
+ StoreU32(value, dst_field_operand, scratch);
RecordComment("]");
} else {
- StoreP(value, dst_field_operand, scratch);
+ StoreU64(value, dst_field_operand, scratch);
}
}
@@ -588,7 +644,7 @@ void TurboAssembler::DecompressAnyTagged(Register destination,
}
void MacroAssembler::RecordWriteField(Register object, int offset,
- Register value, Register dst,
+ Register value, Register slot_address,
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
@@ -606,17 +662,17 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// of the object, so so offset must be a multiple of kSystemPointerSize.
DCHECK(IsAligned(offset, kTaggedSize));
- Add(dst, object, offset - kHeapObjectTag, r0);
+ AddS64(slot_address, object, Operand(offset - kHeapObjectTag), r0);
if (FLAG_debug_code) {
Label ok;
- andi(r0, dst, Operand(kTaggedSize - 1));
+ andi(r0, slot_address, Operand(kTaggedSize - 1));
beq(&ok, cr0);
stop();
bind(&ok);
}
- RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
- SmiCheck::kOmit);
+ RecordWrite(object, slot_address, value, lr_status, save_fp,
+ remembered_set_action, SmiCheck::kOmit);
bind(&done);
@@ -624,24 +680,23 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// turned on to provoke errors.
if (FLAG_debug_code) {
mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
- mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
+ mov(slot_address, Operand(bit_cast<intptr_t>(kZapValue + 8)));
}
}
-void TurboAssembler::SaveRegisters(RegList registers) {
- DCHECK_GT(NumRegs(registers), 0);
+void TurboAssembler::MaybeSaveRegisters(RegList registers) {
+ if (registers == 0) return;
RegList regs = 0;
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
regs |= Register::from_code(i).bit();
}
}
-
MultiPush(regs);
}
-void TurboAssembler::RestoreRegisters(RegList registers) {
- DCHECK_GT(NumRegs(registers), 0);
+void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
+ if (registers == 0) return;
RegList regs = 0;
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
@@ -651,111 +706,99 @@ void TurboAssembler::RestoreRegisters(RegList registers) {
MultiPop(regs);
}
-void TurboAssembler::CallEphemeronKeyBarrier(Register object, Register address,
+void TurboAssembler::CallEphemeronKeyBarrier(Register object,
+ Register slot_address,
SaveFPRegsMode fp_mode) {
- EphemeronKeyBarrierDescriptor descriptor;
- RegList registers = descriptor.allocatable_registers();
+ DCHECK(!AreAliased(object, slot_address));
+ RegList registers =
+ WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
+ MaybeSaveRegisters(registers);
- SaveRegisters(registers);
-
- Register object_parameter(
- descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kObject));
- Register slot_parameter(descriptor.GetRegisterParameter(
- EphemeronKeyBarrierDescriptor::kSlotAddress));
- Register fp_mode_parameter(
- descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kFPMode));
+ Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
+ Register slot_address_parameter =
+ WriteBarrierDescriptor::SlotAddressRegister();
push(object);
- push(address);
-
- pop(slot_parameter);
+ push(slot_address);
+ pop(slot_address_parameter);
pop(object_parameter);
- Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
- Call(isolate()->builtins()->builtin_handle(Builtins::kEphemeronKeyBarrier),
+ Call(isolate()->builtins()->code_handle(
+ Builtins::GetEphemeronKeyBarrierStub(fp_mode)),
RelocInfo::CODE_TARGET);
- RestoreRegisters(registers);
+ MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStub(
- Register object, Register address,
- RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
- CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
- Builtins::kRecordWrite, kNullAddress);
-}
-
-void TurboAssembler::CallRecordWriteStub(
- Register object, Register address,
+void TurboAssembler::CallRecordWriteStubSaveRegisters(
+ Register object, Register slot_address,
RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
- Address wasm_target) {
- CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
- Builtins::kNoBuiltinId, wasm_target);
-}
+ StubCallMode mode) {
+ DCHECK(!AreAliased(object, slot_address));
+ RegList registers =
+ WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
+ MaybeSaveRegisters(registers);
-void TurboAssembler::CallRecordWriteStub(
- Register object, Register address,
- RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
- int builtin_index, Address wasm_target) {
- DCHECK_NE(builtin_index == Builtins::kNoBuiltinId,
- wasm_target == kNullAddress);
- // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
- // i.e. always emit remember set and save FP registers in RecordWriteStub. If
- // large performance regression is observed, we should use these values to
- // avoid unnecessary work.
-
- RecordWriteDescriptor descriptor;
- RegList registers = descriptor.allocatable_registers();
-
- SaveRegisters(registers);
-
- Register object_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
- Register slot_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
- Register remembered_set_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
- Register fp_mode_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
+ Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
+ Register slot_address_parameter =
+ WriteBarrierDescriptor::SlotAddressRegister();
push(object);
- push(address);
-
- pop(slot_parameter);
+ push(slot_address);
+ pop(slot_address_parameter);
pop(object_parameter);
- Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
- Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
- if (builtin_index == Builtins::kNoBuiltinId) {
+ CallRecordWriteStub(object_parameter, slot_address_parameter,
+ remembered_set_action, fp_mode, mode);
+
+ MaybeRestoreRegisters(registers);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode) {
+ // Use CallRecordWriteStubSaveRegisters if the object and slot registers
+ // need to be caller saved.
+ DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object);
+ DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address);
+#if V8_ENABLE_WEBASSEMBLY
+ if (mode == StubCallMode::kCallWasmRuntimeStub) {
+ // Use {near_call} for direct Wasm call within a module.
+ auto wasm_target =
+ wasm::WasmCode::GetRecordWriteStub(remembered_set_action, fp_mode);
Call(wasm_target, RelocInfo::WASM_STUB_CALL);
- } else if (options().inline_offheap_trampolines) {
- RecordCommentForOffHeapTrampoline(builtin_index);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- // Use ip directly instead of using UseScratchRegisterScope, as we do
- // not preserve scratch registers across calls.
- mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- Call(ip);
+#else
+ if (false) {
+#endif
} else {
- Handle<Code> code_target =
- isolate()->builtins()->builtin_handle(Builtins::kRecordWrite);
- Call(code_target, RelocInfo::CODE_TARGET);
+ auto builtin_index =
+ Builtins::GetRecordWriteStub(remembered_set_action, fp_mode);
+ if (options().inline_offheap_trampolines) {
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ // Use ip directly instead of using UseScratchRegisterScope, as we do
+ // not preserve scratch registers across calls.
+ mov(ip, Operand(BuiltinEntry(builtin_index), RelocInfo::OFF_HEAP_TARGET));
+ Call(ip);
+ } else {
+ Handle<Code> code_target =
+ isolate()->builtins()->code_handle(builtin_index);
+ Call(code_target, RelocInfo::CODE_TARGET);
+ }
}
-
- RestoreRegisters(registers);
}
// Will clobber 4 registers: object, address, scratch, ip. The
// register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object, Register address,
+void MacroAssembler::RecordWrite(Register object, Register slot_address,
Register value, LinkRegisterStatus lr_status,
SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
- DCHECK(object != value);
+ DCHECK(!AreAliased(object, value, slot_address));
if (FLAG_debug_code) {
- LoadTaggedPointerField(r0, MemOperand(address));
- cmp(r0, value);
+ LoadTaggedPointerField(r0, MemOperand(slot_address));
+ CmpS64(r0, value);
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
@@ -785,18 +828,21 @@ void MacroAssembler::RecordWrite(Register object, Register address,
mflr(r0);
push(r0);
}
- CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
+ CallRecordWriteStubSaveRegisters(object, slot_address, remembered_set_action,
+ fp_mode);
if (lr_status == kLRHasNotBeenSaved) {
pop(r0);
mtlr(r0);
}
+ if (FLAG_debug_code) mov(slot_address, Operand(kZapValue));
+
bind(&done);
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (FLAG_debug_code) {
- mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
+ mov(slot_address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
}
}
@@ -1183,7 +1229,9 @@ int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
}
mtlr(r0);
frame_ends = pc_offset();
- Add(sp, fp, StandardFrameConstants::kCallerSPOffset + stack_adjustment, r0);
+ AddS64(sp, fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + stack_adjustment),
+ r0);
mr(fp, ip);
return frame_ends;
}
@@ -1224,20 +1272,20 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
if (FLAG_debug_code) {
li(r8, Operand::Zero());
- StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
+ StoreU64(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
if (FLAG_enable_embedded_constant_pool) {
- StoreP(kConstantPoolRegister,
- MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
+ StoreU64(kConstantPoolRegister,
+ MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
}
// Save the frame pointer and the context in top.
Move(r8, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
isolate()));
- StoreP(fp, MemOperand(r8));
+ StoreU64(fp, MemOperand(r8));
Move(r8,
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
- StoreP(cp, MemOperand(r8));
+ StoreU64(cp, MemOperand(r8));
// Optionally save all volatile double registers.
if (save_doubles) {
@@ -1248,7 +1296,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// since the sp slot and code slot were pushed after the fp.
}
- addi(sp, sp, Operand(-stack_space * kSystemPointerSize));
+ AddS64(sp, sp, Operand(-stack_space * kSystemPointerSize));
// Allocate and align the frame preparing for calling the runtime
// function.
@@ -1259,13 +1307,14 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
Operand(base::bits::WhichPowerOfTwo(frame_alignment)));
}
li(r0, Operand::Zero());
- StorePU(r0,
- MemOperand(sp, -kNumRequiredStackFrameSlots * kSystemPointerSize));
+ StoreU64WithUpdate(
+ r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kSystemPointerSize));
// Set the exit frame sp value to point just before the return address
// location.
- addi(r8, sp, Operand((kStackFrameExtraParamSlot + 1) * kSystemPointerSize));
- StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
+ AddS64(r8, sp, Operand((kStackFrameExtraParamSlot + 1) * kSystemPointerSize),
+ r0);
+ StoreU64(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
int TurboAssembler::ActivationFrameAlignment() {
@@ -1293,7 +1342,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
const int kNumRegs = kNumCallerSavedDoubles;
const int offset =
(ExitFrameConstants::kFixedFrameSizeFromFp + kNumRegs * kDoubleSize);
- addi(r6, fp, Operand(-offset));
+ AddS64(r6, fp, Operand(-offset), r0);
MultiPopDoubles(kCallerSavedDoubles, r6);
}
@@ -1301,7 +1350,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
li(r6, Operand::Zero());
Move(ip, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
isolate()));
- StoreP(r6, MemOperand(ip));
+ StoreU64(r6, MemOperand(ip));
// Restore current context from top and clear it in debug mode.
Move(ip,
@@ -1312,7 +1361,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
mov(r6, Operand(Context::kInvalidContext));
Move(ip,
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
- StoreP(r6, MemOperand(ip));
+ StoreU64(r6, MemOperand(ip));
#endif
// Tear down the exit frame, pop the arguments, and return.
@@ -1346,17 +1395,18 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
Register dst_reg = scratch0;
ShiftLeftImm(dst_reg, caller_args_count, Operand(kSystemPointerSizeLog2));
add(dst_reg, fp, dst_reg);
- addi(dst_reg, dst_reg,
- Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
+ AddS64(dst_reg, dst_reg,
+ Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize),
+ scratch0);
Register src_reg = caller_args_count;
// Calculate the end of source area. +kSystemPointerSize is for the receiver.
ShiftLeftImm(src_reg, callee_args_count, Operand(kSystemPointerSizeLog2));
add(src_reg, sp, src_reg);
- addi(src_reg, src_reg, Operand(kSystemPointerSize));
+ AddS64(src_reg, src_reg, Operand(kSystemPointerSize), scratch0);
if (FLAG_debug_code) {
- cmpl(src_reg, dst_reg);
+ CmpU64(src_reg, dst_reg);
Check(lt, AbortReason::kStackAccessBelowStackPointer);
}
@@ -1374,8 +1424,8 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
addi(tmp_reg, callee_args_count, Operand(1)); // +1 for receiver
mtctr(tmp_reg);
bind(&loop);
- LoadPU(tmp_reg, MemOperand(src_reg, -kSystemPointerSize));
- StorePU(tmp_reg, MemOperand(dst_reg, -kSystemPointerSize));
+ LoadU64WithUpdate(tmp_reg, MemOperand(src_reg, -kSystemPointerSize));
+ StoreU64WithUpdate(tmp_reg, MemOperand(dst_reg, -kSystemPointerSize));
bdnz(&loop);
// Leave current frame.
@@ -1408,7 +1458,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
sub(scratch, sp, scratch);
// Check if the arguments will overflow the stack.
ShiftLeftImm(r0, num_args, Operand(kSystemPointerSizeLog2));
- cmp(scratch, r0);
+ CmpS64(scratch, r0);
ble(stack_overflow); // Signed comparison.
}
@@ -1427,7 +1477,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
// If the expected parameter count is equal to the adaptor sentinel, no need
// to push undefined value as arguments.
mov(r0, Operand(kDontAdaptArgumentsSentinel));
- cmp(expected_parameter_count, r0);
+ CmpS64(expected_parameter_count, r0);
beq(&regular_invoke);
// If overapplication or if the actual argument count is equal to the
@@ -1454,8 +1504,8 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
mtctr(r0);
bind(&copy);
- LoadPU(r0, MemOperand(src, kSystemPointerSize));
- StorePU(r0, MemOperand(dest, kSystemPointerSize));
+ LoadU64WithUpdate(r0, MemOperand(src, kSystemPointerSize));
+ StoreU64WithUpdate(r0, MemOperand(dest, kSystemPointerSize));
bdnz(&copy);
}
@@ -1466,7 +1516,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Label loop;
bind(&loop);
- StorePU(scratch, MemOperand(r8, kSystemPointerSize));
+ StoreU64WithUpdate(scratch, MemOperand(r8, kSystemPointerSize));
bdnz(&loop);
}
b(&regular_invoke);
@@ -1490,7 +1540,7 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
Move(r7, debug_hook_active);
- LoadByte(r7, MemOperand(r7), r0);
+ LoadU8(r7, MemOperand(r7), r0);
extsb(r7, r7);
CmpSmiLiteral(r7, Smi::zero(), r0);
beq(&skip_hook);
@@ -1550,8 +1600,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
- LoadTaggedPointerField(code,
- FieldMemOperand(function, JSFunction::kCodeOffset));
+ LoadTaggedPointerField(
+ code, FieldMemOperand(function, JSFunction::kCodeOffset), r0);
switch (type) {
case InvokeType::kCall:
CallCodeObject(code);
@@ -1579,11 +1629,12 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
Register temp_reg = r7;
LoadTaggedPointerField(
- temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
- LoadHalfWord(expected_reg,
- FieldMemOperand(
- temp_reg, SharedFunctionInfo::kFormalParameterCountOffset));
+ temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
+ LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset),
+ r0);
+ LoadU16(expected_reg,
+ FieldMemOperand(temp_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
InvokeFunctionCode(fun, new_target, expected_reg, actual_parameter_count,
type);
@@ -1600,7 +1651,8 @@ void MacroAssembler::InvokeFunction(Register function,
DCHECK_EQ(function, r4);
// Get the function and setup the context.
- LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+ LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset),
+ r0);
InvokeFunctionCode(r4, no_reg, expected_parameter_count,
actual_parameter_count, type);
@@ -1621,7 +1673,7 @@ void MacroAssembler::PushStackHandler() {
push(r0);
// Set this new handler as the current one.
- StoreP(sp, MemOperand(r3));
+ StoreU64(sp, MemOperand(r3));
}
void MacroAssembler::PopStackHandler() {
@@ -1631,7 +1683,7 @@ void MacroAssembler::PopStackHandler() {
pop(r4);
Move(ip,
ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
- StoreP(r4, MemOperand(ip));
+ StoreU64(r4, MemOperand(ip));
Drop(1); // Drop padding.
}
@@ -1658,7 +1710,7 @@ void MacroAssembler::CompareInstanceTypeRange(Register map, Register type_reg,
DCHECK_LT(lower_limit, higher_limit);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- LoadHalfWord(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ LoadU16(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
mov(scratch, Operand(lower_limit));
sub(scratch, type_reg, scratch);
cmpli(scratch, Operand(higher_limit - lower_limit));
@@ -1667,7 +1719,7 @@ void MacroAssembler::CompareInstanceTypeRange(Register map, Register type_reg,
void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
DCHECK(obj != r0);
LoadRoot(r0, index);
- cmp(obj, r0);
+ CmpS64(obj, r0);
}
void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left,
@@ -1718,7 +1770,7 @@ void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left,
original_left = overflow_dst;
mr(original_left, left);
}
- Add(dst, left, right, scratch);
+ AddS64(dst, left, Operand(right), scratch);
xor_(overflow_dst, dst, original_left);
if (right >= 0) {
and_(overflow_dst, overflow_dst, dst, SetRC);
@@ -1758,6 +1810,85 @@ void TurboAssembler::SubAndCheckForOverflow(Register dst, Register left,
}
}
+void TurboAssembler::MinF64(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs, DoubleRegister scratch) {
+ Label check_zero, return_left, return_right, return_nan, done;
+ fcmpu(lhs, rhs);
+ bunordered(&return_nan);
+ beq(&check_zero);
+ ble(&return_left);
+ b(&return_right);
+
+ bind(&check_zero);
+ fcmpu(lhs, kDoubleRegZero);
+ /* left == right != 0. */
+ bne(&return_left);
+ /* At this point, both left and right are either 0 or -0. */
+ /* Min: The algorithm is: -((-L) + (-R)), which in case of L and R */
+ /* being different registers is most efficiently expressed */
+ /* as -((-L) - R). */
+ fneg(scratch, lhs);
+ if (scratch == rhs) {
+ fadd(dst, scratch, rhs);
+ } else {
+ fsub(dst, scratch, rhs);
+ }
+ fneg(dst, dst);
+ b(&done);
+
+ bind(&return_nan);
+ /* If left or right are NaN, fadd propagates the appropriate one.*/
+ fadd(dst, lhs, rhs);
+ b(&done);
+
+ bind(&return_right);
+ if (rhs != dst) {
+ fmr(dst, rhs);
+ }
+ b(&done);
+
+ bind(&return_left);
+ if (lhs != dst) {
+ fmr(dst, lhs);
+ }
+ bind(&done);
+}
+
+void TurboAssembler::MaxF64(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs, DoubleRegister scratch) {
+ Label check_zero, return_left, return_right, return_nan, done;
+ fcmpu(lhs, rhs);
+ bunordered(&return_nan);
+ beq(&check_zero);
+ bge(&return_left);
+ b(&return_right);
+
+ bind(&check_zero);
+ fcmpu(lhs, kDoubleRegZero);
+ /* left == right != 0. */
+ bne(&return_left);
+ /* At this point, both left and right are either 0 or -0. */
+ fadd(dst, lhs, rhs);
+ b(&done);
+
+ bind(&return_nan);
+ /* If left or right are NaN, fadd propagates the appropriate one.*/
+ fadd(dst, lhs, rhs);
+ b(&done);
+
+ bind(&return_right);
+ if (rhs != dst) {
+ fmr(dst, rhs);
+ }
+ b(&done);
+
+ bind(&return_left);
+ if (lhs != dst) {
+ fmr(dst, lhs);
+ }
+ bind(&done);
+}
+
void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit,
Label* on_in_range) {
@@ -1768,7 +1899,7 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
cmpli(scratch, Operand(higher_limit - lower_limit));
} else {
mov(scratch, Operand(higher_limit));
- cmpl(value, scratch);
+ CmpU64(value, scratch);
}
ble(on_in_range);
}
@@ -1877,15 +2008,16 @@ void MacroAssembler::JumpToInstructionStream(Address entry) {
void MacroAssembler::LoadWeakValue(Register out, Register in,
Label* target_if_cleared) {
- cmpwi(in, Operand(kClearedWeakHeapObjectLower32));
+ CmpS32(in, Operand(kClearedWeakHeapObjectLower32), r0);
beq(target_if_cleared);
mov(r0, Operand(~kWeakHeapObjectMask));
and_(out, in, r0);
}
-void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
+void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value,
+ Register scratch1,
+ Register scratch2) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
// This operation has to be exactly 32-bit wide in case the external
@@ -1898,8 +2030,9 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
}
}
-void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
+void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
+ Register scratch1,
+ Register scratch2) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
// This operation has to be exactly 32-bit wide in case the external
@@ -1964,15 +2097,16 @@ void TurboAssembler::Abort(AbortReason reason) {
void TurboAssembler::LoadMap(Register destination, Register object) {
LoadTaggedPointerField(destination,
- FieldMemOperand(object, HeapObject::kMapOffset));
+ FieldMemOperand(object, HeapObject::kMapOffset), r0);
}
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
LoadMap(dst, cp);
LoadTaggedPointerField(
- dst, FieldMemOperand(
- dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
- LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)));
+ dst,
+ FieldMemOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset),
+ r0);
+ LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)), r0);
}
void MacroAssembler::AssertNotSmi(Register object) {
@@ -2102,12 +2236,13 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
// Make stack end at alignment and make room for stack arguments
// -- preserving original value of sp.
mr(scratch, sp);
- addi(sp, sp, Operand(-(stack_passed_arguments + 1) * kSystemPointerSize));
+ AddS64(sp, sp, Operand(-(stack_passed_arguments + 1) * kSystemPointerSize),
+ scratch);
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
ClearRightImm(sp, sp,
Operand(base::bits::WhichPowerOfTwo(frame_alignment)));
- StoreP(scratch,
- MemOperand(sp, stack_passed_arguments * kSystemPointerSize));
+ StoreU64(scratch,
+ MemOperand(sp, stack_passed_arguments * kSystemPointerSize));
} else {
// Make room for stack arguments
stack_space += stack_passed_arguments;
@@ -2115,7 +2250,7 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
// Allocate frame with required slots to make ABI work.
li(r0, Operand::Zero());
- StorePU(r0, MemOperand(sp, -stack_space * kSystemPointerSize));
+ StoreU64WithUpdate(r0, MemOperand(sp, -stack_space * kSystemPointerSize));
}
void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
@@ -2182,10 +2317,10 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// See x64 code for reasoning about how to address the isolate data fields.
if (root_array_available()) {
LoadPC(r0);
- StoreP(r0, MemOperand(kRootRegister,
- IsolateData::fast_c_call_caller_pc_offset()));
- StoreP(fp, MemOperand(kRootRegister,
- IsolateData::fast_c_call_caller_fp_offset()));
+ StoreU64(r0, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_pc_offset()));
+ StoreU64(fp, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
} else {
DCHECK_NOT_NULL(isolate());
Push(addr_scratch);
@@ -2193,10 +2328,10 @@ void TurboAssembler::CallCFunctionHelper(Register function,
Move(addr_scratch,
ExternalReference::fast_c_call_caller_pc_address(isolate()));
LoadPC(r0);
- StoreP(r0, MemOperand(addr_scratch));
+ StoreU64(r0, MemOperand(addr_scratch));
Move(addr_scratch,
ExternalReference::fast_c_call_caller_fp_address(isolate()));
- StoreP(fp, MemOperand(addr_scratch));
+ StoreU64(fp, MemOperand(addr_scratch));
Pop(addr_scratch);
}
mtlr(scratch);
@@ -2226,7 +2361,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
mov(zero_scratch, Operand::Zero());
if (root_array_available()) {
- StoreP(
+ StoreU64(
zero_scratch,
MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()));
} else {
@@ -2234,7 +2369,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
Push(addr_scratch);
Move(addr_scratch,
ExternalReference::fast_c_call_caller_fp_address(isolate()));
- StoreP(zero_scratch, MemOperand(addr_scratch));
+ StoreU64(zero_scratch, MemOperand(addr_scratch));
Pop(addr_scratch);
}
@@ -2243,9 +2378,9 @@ void TurboAssembler::CallCFunctionHelper(Register function,
CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
if (ActivationFrameAlignment() > kSystemPointerSize) {
- LoadU64(sp, MemOperand(sp, stack_space * kSystemPointerSize));
+ LoadU64(sp, MemOperand(sp, stack_space * kSystemPointerSize), r0);
} else {
- addi(sp, sp, Operand(stack_space * kSystemPointerSize));
+ AddS64(sp, sp, Operand(stack_space * kSystemPointerSize), r0);
}
}
@@ -2287,8 +2422,8 @@ void TurboAssembler::LoadSmiLiteral(Register dst, Smi smi) {
mov(dst, Operand(smi));
}
-void TurboAssembler::LoadDoubleLiteral(DoubleRegister result, Double value,
- Register scratch) {
+void TurboAssembler::LoadDoubleLiteral(DoubleRegister result,
+ base::Double value, Register scratch) {
if (FLAG_enable_embedded_constant_pool && is_constant_pool_available() &&
!(scratch == r0 && ConstantPoolAccessIsInOverflow())) {
ConstantPoolEntry::Access access = ConstantPoolAddEntry(value);
@@ -2540,50 +2675,82 @@ void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
addi(sp, sp, Operand(kFloatSize));
}
-void TurboAssembler::Add(Register dst, Register src, intptr_t value,
- Register scratch) {
- if (is_int16(value)) {
- addi(dst, src, Operand(value));
+void TurboAssembler::AddS64(Register dst, Register src, Register value, OEBit s,
+ RCBit r) {
+ add(dst, src, value, s, r);
+}
+
+void TurboAssembler::AddS64(Register dst, Register src, const Operand& value,
+ Register scratch, OEBit s, RCBit r) {
+ if (is_int16(value.immediate()) && s == LeaveOE && r == LeaveRC) {
+ addi(dst, src, value);
} else {
- mov(scratch, Operand(value));
- add(dst, src, scratch);
+ mov(scratch, value);
+ add(dst, src, scratch, s, r);
}
}
-void TurboAssembler::Cmpi(Register src1, const Operand& src2, Register scratch,
- CRegister cr) {
+void TurboAssembler::SubS64(Register dst, Register src, Register value, OEBit s,
+ RCBit r) {
+ sub(dst, src, value, s, r);
+}
+
+void TurboAssembler::SubS64(Register dst, Register src, const Operand& value,
+ Register scratch, OEBit s, RCBit r) {
+ if (is_int16(value.immediate()) && s == LeaveOE && r == LeaveRC) {
+ subi(dst, src, value);
+ } else {
+ mov(scratch, value);
+ sub(dst, src, scratch, s, r);
+ }
+}
+
+void TurboAssembler::CmpS64(Register src1, Register src2, CRegister cr) {
+ cmp(src1, src2, cr);
+}
+
+void TurboAssembler::CmpS64(Register src1, const Operand& src2,
+ Register scratch, CRegister cr) {
intptr_t value = src2.immediate();
if (is_int16(value)) {
cmpi(src1, src2, cr);
} else {
mov(scratch, src2);
- cmp(src1, scratch, cr);
+ CmpS64(src1, scratch, cr);
}
}
-void TurboAssembler::Cmpli(Register src1, const Operand& src2, Register scratch,
- CRegister cr) {
+void TurboAssembler::CmpU64(Register src1, const Operand& src2,
+ Register scratch, CRegister cr) {
intptr_t value = src2.immediate();
if (is_uint16(value)) {
cmpli(src1, src2, cr);
} else {
mov(scratch, src2);
- cmpl(src1, scratch, cr);
+ CmpU64(src1, scratch, cr);
}
}
-void TurboAssembler::Cmpwi(Register src1, const Operand& src2, Register scratch,
- CRegister cr) {
+void TurboAssembler::CmpU64(Register src1, Register src2, CRegister cr) {
+ cmpl(src1, src2, cr);
+}
+
+void TurboAssembler::CmpS32(Register src1, const Operand& src2,
+ Register scratch, CRegister cr) {
intptr_t value = src2.immediate();
if (is_int16(value)) {
cmpwi(src1, src2, cr);
} else {
mov(scratch, src2);
- cmpw(src1, scratch, cr);
+ CmpS32(src1, scratch, cr);
}
}
-void MacroAssembler::Cmplwi(Register src1, const Operand& src2,
+void TurboAssembler::CmpS32(Register src1, Register src2, CRegister cr) {
+ cmpw(src1, src2, cr);
+}
+
+void TurboAssembler::CmpU32(Register src1, const Operand& src2,
Register scratch, CRegister cr) {
intptr_t value = src2.immediate();
if (is_uint16(value)) {
@@ -2594,6 +2761,10 @@ void MacroAssembler::Cmplwi(Register src1, const Operand& src2,
}
}
+void TurboAssembler::CmpU32(Register src1, Register src2, CRegister cr) {
+ cmplw(src1, src2, cr);
+}
+
void MacroAssembler::And(Register ra, Register rs, const Operand& rb,
RCBit rc) {
if (rb.is_reg()) {
@@ -2647,27 +2818,27 @@ void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
void MacroAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch,
CRegister cr) {
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
- Cmpwi(src1, Operand(smi), scratch, cr);
+ CmpS32(src1, Operand(smi), scratch, cr);
#else
LoadSmiLiteral(scratch, smi);
- cmp(src1, scratch, cr);
+ CmpS64(src1, scratch, cr);
#endif
}
void MacroAssembler::CmplSmiLiteral(Register src1, Smi smi, Register scratch,
CRegister cr) {
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
- Cmpli(src1, Operand(smi), scratch, cr);
+ CmpU64(src1, Operand(smi), scratch, cr);
#else
LoadSmiLiteral(scratch, smi);
- cmpl(src1, scratch, cr);
+ CmpU64(src1, scratch, cr);
#endif
}
void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi smi,
Register scratch) {
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
- Add(dst, src, static_cast<intptr_t>(smi.ptr()), scratch);
+ AddS64(dst, src, Operand(smi.ptr()), scratch);
#else
LoadSmiLiteral(scratch, smi);
add(dst, src, scratch);
@@ -2677,7 +2848,7 @@ void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi smi,
void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi smi,
Register scratch) {
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
- Add(dst, src, -(static_cast<intptr_t>(smi.ptr())), scratch);
+ AddS64(dst, src, Operand(-(static_cast<intptr_t>(smi.ptr()))), scratch);
#else
LoadSmiLiteral(scratch, smi);
sub(dst, src, scratch);
@@ -2694,352 +2865,230 @@ void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi smi,
#endif
}
-// Load a "pointer" sized value from the memory location
-void TurboAssembler::LoadU64(Register dst, const MemOperand& mem,
- Register scratch) {
- DCHECK_EQ(mem.rb(), no_reg);
- int offset = mem.offset();
- int misaligned = (offset & 3);
- int adj = (offset & 3) - 4;
- int alignedOffset = (offset & ~3) + 4;
-
- if (!is_int16(offset) || (misaligned && !is_int16(alignedOffset))) {
- /* cannot use d-form */
- mov(scratch, Operand(offset));
- LoadPX(dst, MemOperand(mem.ra(), scratch));
- } else {
- if (misaligned) {
- // adjust base to conform to offset alignment requirements
- // Todo: enhance to use scratch if dst is unsuitable
- DCHECK_NE(dst, r0);
- addi(dst, mem.ra(), Operand(adj));
- ld(dst, MemOperand(dst, alignedOffset));
- } else {
- ld(dst, mem);
- }
- }
-}
-
-void TurboAssembler::LoadPU(Register dst, const MemOperand& mem,
+#define GenerateMemoryOperation(reg, mem, ri_op, rr_op) \
+ { \
+ int offset = mem.offset(); \
+ \
+ if (mem.rb() == no_reg) { \
+ if (!is_int16(offset)) { \
+ /* cannot use d-form */ \
+ CHECK_NE(scratch, no_reg); \
+ mov(scratch, Operand(offset)); \
+ rr_op(reg, MemOperand(mem.ra(), scratch)); \
+ } else { \
+ ri_op(reg, mem); \
+ } \
+ } else { \
+ if (offset == 0) { \
+ rr_op(reg, mem); \
+ } else if (is_int16(offset)) { \
+ CHECK_NE(scratch, no_reg); \
+ addi(scratch, mem.rb(), Operand(offset)); \
+ rr_op(reg, MemOperand(mem.ra(), scratch)); \
+ } else { \
+ CHECK_NE(scratch, no_reg); \
+ mov(scratch, Operand(offset)); \
+ add(scratch, scratch, mem.rb()); \
+ rr_op(reg, MemOperand(mem.ra(), scratch)); \
+ } \
+ } \
+ }
+
+#define GenerateMemoryOperationWithAlign(reg, mem, ri_op, rr_op) \
+ { \
+ int offset = mem.offset(); \
+ int misaligned = (offset & 3); \
+ \
+ if (mem.rb() == no_reg) { \
+ if (!is_int16(offset) || misaligned) { \
+ /* cannot use d-form */ \
+ CHECK_NE(scratch, no_reg); \
+ mov(scratch, Operand(offset)); \
+ rr_op(reg, MemOperand(mem.ra(), scratch)); \
+ } else { \
+ ri_op(reg, mem); \
+ } \
+ } else { \
+ if (offset == 0) { \
+ rr_op(reg, mem); \
+ } else if (is_int16(offset)) { \
+ CHECK_NE(scratch, no_reg); \
+ addi(scratch, mem.rb(), Operand(offset)); \
+ rr_op(reg, MemOperand(mem.ra(), scratch)); \
+ } else { \
+ CHECK_NE(scratch, no_reg); \
+ mov(scratch, Operand(offset)); \
+ add(scratch, scratch, mem.rb()); \
+ rr_op(reg, MemOperand(mem.ra(), scratch)); \
+ } \
+ } \
+ }
+
+#define MEM_OP_WITH_ALIGN_LIST(V) \
+ V(LoadU64, ld, ldx) \
+ V(LoadS32, lwa, lwax) \
+ V(StoreU64, std, stdx) \
+ V(StoreU64WithUpdate, stdu, stdux)
+
+#define MEM_OP_WITH_ALIGN_FUNCTION(name, ri_op, rr_op) \
+ void TurboAssembler::name(Register reg, const MemOperand& mem, \
+ Register scratch) { \
+ GenerateMemoryOperationWithAlign(reg, mem, ri_op, rr_op); \
+ }
+MEM_OP_WITH_ALIGN_LIST(MEM_OP_WITH_ALIGN_FUNCTION)
+#undef MEM_OP_WITH_ALIGN_LIST
+#undef MEM_OP_WITH_ALIGN_FUNCTION
+
+#define MEM_OP_LIST(V) \
+ V(LoadU32, Register, lwz, lwzx) \
+ V(LoadS16, Register, lha, lhax) \
+ V(LoadU16, Register, lhz, lhzx) \
+ V(LoadU8, Register, lbz, lbzx) \
+ V(StoreU32, Register, stw, stwx) \
+ V(StoreU16, Register, sth, sthx) \
+ V(StoreU8, Register, stb, stbx) \
+ V(LoadF64, DoubleRegister, lfd, lfdx) \
+ V(LoadF32, DoubleRegister, lfs, lfsx) \
+ V(StoreF64, DoubleRegister, stfd, stfdx) \
+ V(StoreF32, DoubleRegister, stfs, stfsx) \
+ V(LoadU64WithUpdate, Register, ldu, ldux) \
+ V(LoadF64WithUpdate, DoubleRegister, lfdu, lfdux) \
+ V(LoadF32WithUpdate, DoubleRegister, lfsu, lfsux) \
+ V(StoreF64WithUpdate, DoubleRegister, stfdu, stfdux) \
+ V(StoreF32WithUpdate, DoubleRegister, stfsu, stfsux)
+
+#define MEM_OP_FUNCTION(name, result_t, ri_op, rr_op) \
+ void TurboAssembler::name(result_t reg, const MemOperand& mem, \
+ Register scratch) { \
+ GenerateMemoryOperation(reg, mem, ri_op, rr_op); \
+ }
+MEM_OP_LIST(MEM_OP_FUNCTION)
+#undef MEM_OP_LIST
+#undef MEM_OP_FUNCTION
+
+void TurboAssembler::LoadS8(Register dst, const MemOperand& mem,
Register scratch) {
- int offset = mem.offset();
-
- if (!is_int16(offset)) {
- /* cannot use d-form */
- DCHECK(scratch != no_reg);
- mov(scratch, Operand(offset));
- LoadPUX(dst, MemOperand(mem.ra(), scratch));
- } else {
-#if V8_TARGET_ARCH_PPC64
- ldu(dst, mem);
-#else
- lwzu(dst, mem);
-#endif
- }
-}
-
-// Store a "pointer" sized value to the memory location
-void TurboAssembler::StoreP(Register src, const MemOperand& mem,
- Register scratch) {
- int offset = mem.offset();
-
- if (!is_int16(offset)) {
- /* cannot use d-form */
- DCHECK(scratch != no_reg);
- mov(scratch, Operand(offset));
- StorePX(src, MemOperand(mem.ra(), scratch));
- } else {
-#if V8_TARGET_ARCH_PPC64
- int misaligned = (offset & 3);
- if (misaligned) {
- // adjust base to conform to offset alignment requirements
- // a suitable scratch is required here
- DCHECK(scratch != no_reg);
- if (scratch == r0) {
- LoadIntLiteral(scratch, offset);
- stdx(src, MemOperand(mem.ra(), scratch));
- } else {
- addi(scratch, mem.ra(), Operand((offset & 3) - 4));
- std(src, MemOperand(scratch, (offset & ~3) + 4));
- }
- } else {
- std(src, mem);
- }
-#else
- stw(src, mem);
-#endif
- }
-}
-
-void TurboAssembler::StorePU(Register src, const MemOperand& mem,
- Register scratch) {
- int offset = mem.offset();
-
- if (!is_int16(offset)) {
- /* cannot use d-form */
- DCHECK(scratch != no_reg);
- mov(scratch, Operand(offset));
- StorePUX(src, MemOperand(mem.ra(), scratch));
- } else {
-#if V8_TARGET_ARCH_PPC64
- stdu(src, mem);
-#else
- stwu(src, mem);
-#endif
- }
+ LoadU8(dst, mem, scratch);
+ extsb(dst, dst);
}
-void TurboAssembler::LoadS32(Register dst, const MemOperand& mem,
- Register scratch) {
- int offset = mem.offset();
-
- if (!is_int16(offset)) {
- CHECK(scratch != no_reg);
- mov(scratch, Operand(offset));
- lwax(dst, MemOperand(mem.ra(), scratch));
- } else {
- int misaligned = (offset & 3);
- if (misaligned) {
- // adjust base to conform to offset alignment requirements
- // Todo: enhance to use scratch if dst is unsuitable
- CHECK(dst != r0);
- addi(dst, mem.ra(), Operand((offset & 3) - 4));
- lwa(dst, MemOperand(dst, (offset & ~3) + 4));
- } else {
- lwa(dst, mem);
- }
- }
+void TurboAssembler::LoadSimd128(Simd128Register src, const MemOperand& mem) {
+ DCHECK(mem.rb().is_valid());
+ lxvx(src, mem);
}
-// Variable length depending on whether offset fits into immediate field
-// MemOperand currently only supports d-form
-void TurboAssembler::LoadU32(Register dst, const MemOperand& mem,
- Register scratch) {
- Register base = mem.ra();
- int offset = mem.offset();
-
- if (!is_int16(offset)) {
- CHECK(scratch != no_reg);
- mov(scratch, Operand(offset));
- lwzx(dst, MemOperand(base, scratch));
- } else {
- // lwz can handle offset misalign
- lwz(dst, mem);
- }
-}
-
-// Variable length depending on whether offset fits into immediate field
-// MemOperand current only supports d-form
-void TurboAssembler::StoreWord(Register src, const MemOperand& mem,
- Register scratch) {
- Register base = mem.ra();
- int offset = mem.offset();
-
- if (!is_int16(offset)) {
- LoadIntLiteral(scratch, offset);
- stwx(src, MemOperand(base, scratch));
- } else {
- stw(src, mem);
- }
-}
-
-void MacroAssembler::LoadHalfWordArith(Register dst, const MemOperand& mem,
- Register scratch) {
- int offset = mem.offset();
-
- if (!is_int16(offset)) {
- DCHECK(scratch != no_reg);
- mov(scratch, Operand(offset));
- lhax(dst, MemOperand(mem.ra(), scratch));
- } else {
- lha(dst, mem);
- }
+void TurboAssembler::StoreSimd128(Simd128Register src, const MemOperand& mem) {
+ DCHECK(mem.rb().is_valid());
+ stxvx(src, mem);
}
-// Variable length depending on whether offset fits into immediate field
-// MemOperand currently only supports d-form
-void MacroAssembler::LoadHalfWord(Register dst, const MemOperand& mem,
- Register scratch) {
- Register base = mem.ra();
- int offset = mem.offset();
-
- if (!is_int16(offset)) {
- DCHECK_NE(scratch, no_reg);
- LoadIntLiteral(scratch, offset);
- lhzx(dst, MemOperand(base, scratch));
- } else {
- lhz(dst, mem);
+#define GenerateMemoryLEOperation(reg, mem, op) \
+ { \
+ if (mem.offset() == 0) { \
+ op(reg, mem); \
+ } else if (is_int16(mem.offset())) { \
+ if (mem.rb() != no_reg) \
+ addi(scratch, mem.rb(), Operand(mem.offset())); \
+ else \
+ mov(scratch, Operand(mem.offset())); \
+ op(reg, MemOperand(mem.ra(), scratch)); \
+ } else { \
+ mov(scratch, Operand(mem.offset())); \
+ if (mem.rb() != no_reg) add(scratch, scratch, mem.rb()); \
+ op(reg, MemOperand(mem.ra(), scratch)); \
+ } \
+ }
+
+#define MEM_LE_OP_LIST(V) \
+ V(LoadU64, ldbrx) \
+ V(LoadU32, lwbrx) \
+ V(LoadU16, lhbrx) \
+ V(StoreU64, stdbrx) \
+ V(StoreU32, stwbrx) \
+ V(StoreU16, sthbrx)
+
+#ifdef V8_TARGET_BIG_ENDIAN
+#define MEM_LE_OP_FUNCTION(name, op) \
+ void TurboAssembler::name##LE(Register reg, const MemOperand& mem, \
+ Register scratch) { \
+ GenerateMemoryLEOperation(reg, mem, op); \
}
-}
-
-// Variable length depending on whether offset fits into immediate field
-// MemOperand current only supports d-form
-void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
- Register scratch) {
- Register base = mem.ra();
- int offset = mem.offset();
-
- if (!is_int16(offset)) {
- LoadIntLiteral(scratch, offset);
- sthx(src, MemOperand(base, scratch));
- } else {
- sth(src, mem);
+#else
+#define MEM_LE_OP_FUNCTION(name, op) \
+ void TurboAssembler::name##LE(Register reg, const MemOperand& mem, \
+ Register scratch) { \
+ name(reg, mem, scratch); \
}
-}
+#endif
-// Variable length depending on whether offset fits into immediate field
-// MemOperand currently only supports d-form
-void MacroAssembler::LoadByte(Register dst, const MemOperand& mem,
- Register scratch) {
- Register base = mem.ra();
- int offset = mem.offset();
+MEM_LE_OP_LIST(MEM_LE_OP_FUNCTION)
+#undef MEM_LE_OP_FUNCTION
+#undef MEM_LE_OP_LIST
- if (!is_int16(offset)) {
- LoadIntLiteral(scratch, offset);
- lbzx(dst, MemOperand(base, scratch));
- } else {
- lbz(dst, mem);
- }
-}
-
-// Variable length depending on whether offset fits into immediate field
-// MemOperand current only supports d-form
-void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
+void TurboAssembler::LoadS32LE(Register dst, const MemOperand& mem,
Register scratch) {
- Register base = mem.ra();
- int offset = mem.offset();
-
- if (!is_int16(offset)) {
- LoadIntLiteral(scratch, offset);
- stbx(src, MemOperand(base, scratch));
- } else {
- stb(src, mem);
- }
-}
-
-void TurboAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem,
- Register scratch) {
- Register base = mem.ra();
- int offset = mem.offset();
-
- if (!is_int16(offset)) {
- mov(scratch, Operand(offset));
- lfdx(dst, MemOperand(base, scratch));
- } else {
- lfd(dst, mem);
- }
-}
-
-void TurboAssembler::LoadFloat32(DoubleRegister dst, const MemOperand& mem,
- Register scratch) {
- Register base = mem.ra();
- int offset = mem.offset();
-
- if (!is_int16(offset)) {
- mov(scratch, Operand(offset));
- lfsx(dst, MemOperand(base, scratch));
- } else {
- lfs(dst, mem);
- }
-}
-
-void MacroAssembler::LoadDoubleU(DoubleRegister dst, const MemOperand& mem,
- Register scratch) {
- Register base = mem.ra();
- int offset = mem.offset();
-
- if (!is_int16(offset)) {
- mov(scratch, Operand(offset));
- lfdux(dst, MemOperand(base, scratch));
- } else {
- lfdu(dst, mem);
- }
-}
-
-void TurboAssembler::LoadSingle(DoubleRegister dst, const MemOperand& mem,
- Register scratch) {
- Register base = mem.ra();
- int offset = mem.offset();
-
- if (!is_int16(offset)) {
- mov(scratch, Operand(offset));
- lfsx(dst, MemOperand(base, scratch));
- } else {
- lfs(dst, mem);
- }
-}
-
-void TurboAssembler::LoadSingleU(DoubleRegister dst, const MemOperand& mem,
- Register scratch) {
- Register base = mem.ra();
- int offset = mem.offset();
-
- if (!is_int16(offset)) {
- mov(scratch, Operand(offset));
- lfsux(dst, MemOperand(base, scratch));
- } else {
- lfsu(dst, mem);
- }
-}
-
-void TurboAssembler::LoadSimd128(Simd128Register dst, const MemOperand& mem) {
- lxvx(dst, mem);
+#ifdef V8_TARGET_BIG_ENDIAN
+ LoadU32LE(dst, mem, scratch);
+ extsw(dst, dst);
+#else
+ LoadS32(dst, mem, scratch);
+#endif
}
-void TurboAssembler::StoreDouble(DoubleRegister src, const MemOperand& mem,
- Register scratch) {
- Register base = mem.ra();
- int offset = mem.offset();
-
- if (!is_int16(offset)) {
- mov(scratch, Operand(offset));
- stfdx(src, MemOperand(base, scratch));
- } else {
- stfd(src, mem);
- }
+void TurboAssembler::LoadS16LE(Register dst, const MemOperand& mem,
+ Register scratch) {
+#ifdef V8_TARGET_BIG_ENDIAN
+ LoadU16LE(dst, mem, scratch);
+ extsh(dst, dst);
+#else
+ LoadS16(dst, mem, scratch);
+#endif
}
-void TurboAssembler::StoreDoubleU(DoubleRegister src, const MemOperand& mem,
- Register scratch) {
- Register base = mem.ra();
- int offset = mem.offset();
-
- if (!is_int16(offset)) {
- mov(scratch, Operand(offset));
- stfdux(src, MemOperand(base, scratch));
- } else {
- stfdu(src, mem);
- }
+void TurboAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& mem,
+ Register scratch, Register scratch2) {
+#ifdef V8_TARGET_BIG_ENDIAN
+ LoadU64LE(scratch, mem, scratch2);
+ push(scratch);
+ LoadF64(dst, MemOperand(sp), scratch2);
+ pop(scratch);
+#else
+ LoadF64(dst, mem, scratch);
+#endif
}
-void TurboAssembler::StoreSingle(DoubleRegister src, const MemOperand& mem,
- Register scratch) {
- Register base = mem.ra();
- int offset = mem.offset();
-
- if (!is_int16(offset)) {
- mov(scratch, Operand(offset));
- stfsx(src, MemOperand(base, scratch));
- } else {
- stfs(src, mem);
- }
+void TurboAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& mem,
+ Register scratch, Register scratch2) {
+#ifdef V8_TARGET_BIG_ENDIAN
+ LoadU32LE(scratch, mem, scratch2);
+ push(scratch);
+ LoadF32(dst, MemOperand(sp, 4), scratch2);
+ pop(scratch);
+#else
+ LoadF32(dst, mem, scratch);
+#endif
}
-void TurboAssembler::StoreSingleU(DoubleRegister src, const MemOperand& mem,
- Register scratch) {
- Register base = mem.ra();
- int offset = mem.offset();
-
- if (!is_int16(offset)) {
- mov(scratch, Operand(offset));
- stfsux(src, MemOperand(base, scratch));
- } else {
- stfsu(src, mem);
- }
+void TurboAssembler::StoreF64LE(DoubleRegister dst, const MemOperand& mem,
+ Register scratch, Register scratch2) {
+#ifdef V8_TARGET_BIG_ENDIAN
+ StoreF64(dst, mem, scratch2);
+ LoadU64(scratch, mem, scratch2);
+ StoreU64LE(scratch, mem, scratch2);
+#else
+ LoadF64(dst, mem, scratch);
+#endif
}
-void TurboAssembler::StoreSimd128(Simd128Register src, const MemOperand& mem) {
- stxvx(src, mem);
+void TurboAssembler::StoreF32LE(DoubleRegister dst, const MemOperand& mem,
+ Register scratch, Register scratch2) {
+#ifdef V8_TARGET_BIG_ENDIAN
+ StoreF32(dst, mem, scratch2);
+ LoadU32(scratch, mem, scratch2);
+ StoreU32LE(scratch, mem, scratch2);
+#else
+ LoadF64(dst, mem, scratch);
+#endif
}
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
@@ -3079,7 +3128,7 @@ void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
DCHECK(!AreAliased(src, scratch));
mr(scratch, src);
LoadU64(src, dst, r0);
- StoreP(scratch, dst, r0);
+ StoreU64(scratch, dst, r0);
}
void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
@@ -3102,15 +3151,15 @@ void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
}
LoadU64(scratch_1, dst, scratch_0);
LoadU64(scratch_0, src);
- StoreP(scratch_1, src);
- StoreP(scratch_0, dst, scratch_1);
+ StoreU64(scratch_1, src);
+ StoreU64(scratch_0, dst, scratch_1);
} else {
LoadU64(scratch_1, dst, scratch_0);
push(scratch_1);
LoadU64(scratch_0, src, scratch_1);
- StoreP(scratch_0, dst, scratch_1);
+ StoreU64(scratch_0, dst, scratch_1);
pop(scratch_1);
- StoreP(scratch_1, src, scratch_0);
+ StoreU64(scratch_1, src, scratch_0);
}
}
@@ -3127,18 +3176,18 @@ void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst,
DoubleRegister scratch) {
DCHECK(!AreAliased(src, scratch));
fmr(scratch, src);
- LoadSingle(src, dst, r0);
- StoreSingle(scratch, dst, r0);
+ LoadF32(src, dst, r0);
+ StoreF32(scratch, dst, r0);
}
void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst,
DoubleRegister scratch_0,
DoubleRegister scratch_1) {
DCHECK(!AreAliased(scratch_0, scratch_1));
- LoadSingle(scratch_0, src, r0);
- LoadSingle(scratch_1, dst, r0);
- StoreSingle(scratch_0, dst, r0);
- StoreSingle(scratch_1, src, r0);
+ LoadF32(scratch_0, src, r0);
+ LoadF32(scratch_1, dst, r0);
+ StoreF32(scratch_0, dst, r0);
+ StoreF32(scratch_1, src, r0);
}
void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst,
@@ -3154,18 +3203,18 @@ void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst,
DoubleRegister scratch) {
DCHECK(!AreAliased(src, scratch));
fmr(scratch, src);
- LoadDouble(src, dst, r0);
- StoreDouble(scratch, dst, r0);
+ LoadF64(src, dst, r0);
+ StoreF64(scratch, dst, r0);
}
void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst,
DoubleRegister scratch_0,
DoubleRegister scratch_1) {
DCHECK(!AreAliased(scratch_0, scratch_1));
- LoadDouble(scratch_0, src, r0);
- LoadDouble(scratch_1, dst, r0);
- StoreDouble(scratch_0, dst, r0);
- StoreDouble(scratch_1, src, r0);
+ LoadF64(scratch_0, src, r0);
+ LoadF64(scratch_1, dst, r0);
+ StoreF64(scratch_0, dst, r0);
+ StoreF64(scratch_1, src, r0);
}
void TurboAssembler::SwapSimd128(Simd128Register src, Simd128Register dst,
@@ -3220,12 +3269,12 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
}
void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
- Cmpi(x, Operand(y), r0);
+ CmpS64(x, Operand(y), r0);
beq(dest);
}
void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
- Cmpi(x, Operand(y), r0);
+ CmpS64(x, Operand(y), r0);
blt(dest);
}
@@ -3243,9 +3292,9 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
ShiftLeftImm(builtin_index, builtin_index,
Operand(kSystemPointerSizeLog2 - kSmiShift));
}
- addi(builtin_index, builtin_index,
- Operand(IsolateData::builtin_entry_table_offset()));
- LoadPX(builtin_index, MemOperand(kRootRegister, builtin_index));
+ AddS64(builtin_index, builtin_index,
+ Operand(IsolateData::builtin_entry_table_offset()));
+ LoadU64(builtin_index, MemOperand(kRootRegister, builtin_index));
}
void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
@@ -3276,7 +3325,7 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
// Check whether the Code object is an off-heap trampoline. If so, call its
// (off-heap) entry point directly without going through the (on-heap)
// trampoline. Otherwise, just call the Code object as always.
- LoadS32(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
+ LoadS32(scratch, FieldMemOperand(code_object, Code::kFlagsOffset), r0);
mov(r0, Operand(Code::IsOffHeapTrampoline::kMask));
and_(r0, scratch, r0, SetRC);
bne(&if_code_is_off_heap, cr0);
@@ -3289,7 +3338,8 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
// An off-heap trampoline, the entry point is loaded from the builtin entry
// table.
bind(&if_code_is_off_heap);
- LoadS32(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
+ LoadS32(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset),
+ r0);
ShiftLeftImm(destination, scratch, Operand(kSystemPointerSizeLog2));
add(destination, destination, kRootRegister);
LoadU64(destination,
@@ -3340,16 +3390,16 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
LoadPC(r7);
bind(&start_call);
addi(r7, r7, Operand(after_call_offset));
- StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
+ StoreU64(r7, MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
Call(dest);
DCHECK_EQ(after_call_offset - kInstrSize,
SizeOfCodeGeneratedSince(&start_call));
}
-void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
- Label* exit, DeoptimizeKind kind,
- Label* ret, Label*) {
+void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
+ DeoptimizeKind kind, Label* ret,
+ Label*) {
BlockTrampolinePoolScope block_trampoline_pool(this);
LoadU64(ip, MemOperand(kRootRegister,
IsolateData::builtin_entry_slot_offset(target)));
@@ -3380,6 +3430,10 @@ void TurboAssembler::ZeroExtWord32(Register dst, Register src) {
void TurboAssembler::Trap() { stop(); }
void TurboAssembler::DebugBreak() { stop(); }
+void TurboAssembler::Popcnt32(Register dst, Register src) { popcntw(dst, src); }
+
+void TurboAssembler::Popcnt64(Register dst, Register src) { popcntd(dst, src); }
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
index f657f90f76..bae3b4732c 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
@@ -9,10 +9,10 @@
#ifndef V8_CODEGEN_PPC_MACRO_ASSEMBLER_PPC_H_
#define V8_CODEGEN_PPC_MACRO_ASSEMBLER_PPC_H_
+#include "src/base/numbers/double.h"
#include "src/codegen/bailout-reason.h"
#include "src/codegen/ppc/assembler-ppc.h"
#include "src/common/globals.h"
-#include "src/numbers/double.h"
#include "src/objects/contexts.h"
namespace v8 {
@@ -38,10 +38,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
// These exist to provide portability between 32 and 64bit
#if V8_TARGET_ARCH_PPC64
-#define LoadPX ldx
-#define LoadPUX ldux
-#define StorePX stdx
-#define StorePUX stdux
#define ShiftLeftImm sldi
#define ShiftRightImm srdi
#define ClearLeftImm clrldi
@@ -51,10 +47,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
#define ShiftRight_ srd
#define ShiftRightArith srad
#else
-#define LoadPX lwzx
-#define LoadPUX lwzux
-#define StorePX stwx
-#define StorePUX stwux
#define ShiftLeftImm slwi
#define ShiftRightImm srwi
#define ClearLeftImm clrlwi
@@ -69,6 +61,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
using TurboAssemblerBase::TurboAssemblerBase;
+ void Popcnt32(Register dst, Register src);
+ void Popcnt64(Register dst, Register src);
// Converts the integer (untagged smi) in |src| to a double, storing
// the result to |dst|
void ConvertIntToDouble(Register src, DoubleRegister dst);
@@ -120,7 +114,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void AllocateStackSpace(int bytes) {
DCHECK_GE(bytes, 0);
if (bytes == 0) return;
- Add(sp, sp, -bytes, r0);
+ AddS64(sp, sp, Operand(-bytes), r0);
}
// Push a fixed frame, consisting of lr, fp, constant pool.
@@ -146,18 +140,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
mov(kRootRegister, Operand(isolate_root));
}
- // These exist to provide portability between 32 and 64bit
- void LoadU64(Register dst, const MemOperand& mem, Register scratch = no_reg);
- void LoadPU(Register dst, const MemOperand& mem, Register scratch = no_reg);
- void LoadS32(Register dst, const MemOperand& mem, Register scratch = no_reg);
- void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
- void StorePU(Register src, const MemOperand& mem, Register scratch = no_reg);
-
- void LoadDouble(DoubleRegister dst, const MemOperand& mem,
- Register scratch = no_reg);
- void LoadFloat32(DoubleRegister dst, const MemOperand& mem,
- Register scratch = no_reg);
- void LoadDoubleLiteral(DoubleRegister result, Double value, Register scratch);
+ void LoadDoubleLiteral(DoubleRegister result, base::Double value,
+ Register scratch);
void LoadSimd128(Simd128Register dst, const MemOperand& mem);
// load a literal signed int value <value> to GPR <dst>
@@ -165,44 +149,48 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// load an SMI value <value> to GPR <dst>
void LoadSmiLiteral(Register dst, Smi smi);
- void LoadSingle(DoubleRegister dst, const MemOperand& mem,
- Register scratch = no_reg);
- void LoadSingleU(DoubleRegister dst, const MemOperand& mem,
- Register scratch = no_reg);
void LoadPC(Register dst);
void ComputeCodeStartAddress(Register dst);
- void StoreDouble(DoubleRegister src, const MemOperand& mem,
- Register scratch = no_reg);
- void StoreDoubleU(DoubleRegister src, const MemOperand& mem,
- Register scratch = no_reg);
-
- void StoreSingle(DoubleRegister src, const MemOperand& mem,
- Register scratch = no_reg);
- void StoreSingleU(DoubleRegister src, const MemOperand& mem,
- Register scratch = no_reg);
- void StoreSimd128(Simd128Register src, const MemOperand& mem);
-
- void Cmpi(Register src1, const Operand& src2, Register scratch,
- CRegister cr = cr7);
- void Cmpli(Register src1, const Operand& src2, Register scratch,
- CRegister cr = cr7);
- void Cmpwi(Register src1, const Operand& src2, Register scratch,
- CRegister cr = cr7);
+ void CmpS64(Register src1, const Operand& src2, Register scratch,
+ CRegister cr = cr7);
+ void CmpS64(Register src1, Register src2, CRegister cr = cr7);
+ void CmpU64(Register src1, const Operand& src2, Register scratch,
+ CRegister cr = cr7);
+ void CmpU64(Register src1, Register src2, CRegister cr = cr7);
+ void CmpS32(Register src1, const Operand& src2, Register scratch,
+ CRegister cr = cr7);
+ void CmpS32(Register src1, Register src2, CRegister cr = cr7);
+ void CmpU32(Register src1, const Operand& src2, Register scratch,
+ CRegister cr = cr7);
+ void CmpU32(Register src1, Register src2, CRegister cr = cr7);
void CompareTagged(Register src1, Register src2, CRegister cr = cr7) {
if (COMPRESS_POINTERS_BOOL) {
- cmpw(src1, src2, cr);
+ CmpS32(src1, src2, cr);
} else {
- cmp(src1, src2, cr);
+ CmpS64(src1, src2, cr);
}
}
+ void MinF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs,
+ DoubleRegister scratch = kScratchDoubleReg);
+ void MaxF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs,
+ DoubleRegister scratch = kScratchDoubleReg);
+
// Set new rounding mode RN to FPSCR
void SetRoundingMode(FPRoundingMode RN);
// reset rounding mode to default (kRoundToNearest)
void ResetRoundingMode();
- void Add(Register dst, Register src, intptr_t value, Register scratch);
+
+ void AddS64(Register dst, Register src, const Operand& value,
+ Register scratch = r0, OEBit s = LeaveOE, RCBit r = LeaveRC);
+ void AddS64(Register dst, Register src, Register value, OEBit s = LeaveOE,
+ RCBit r = LeaveRC);
+ void SubS64(Register dst, Register src, const Operand& value,
+ Register scratch = r0, OEBit s = LeaveOE, RCBit r = LeaveRC);
+ void SubS64(Register dst, Register src, Register value, OEBit s = LeaveOE,
+ RCBit r = LeaveRC);
void Push(Register src) { push(src); }
// Push a handle.
@@ -211,33 +199,33 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
- StorePU(src2, MemOperand(sp, -2 * kSystemPointerSize));
- StoreP(src1, MemOperand(sp, kSystemPointerSize));
+ StoreU64WithUpdate(src2, MemOperand(sp, -2 * kSystemPointerSize));
+ StoreU64(src1, MemOperand(sp, kSystemPointerSize));
}
// Push three registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3) {
- StorePU(src3, MemOperand(sp, -3 * kSystemPointerSize));
- StoreP(src2, MemOperand(sp, kSystemPointerSize));
- StoreP(src1, MemOperand(sp, 2 * kSystemPointerSize));
+ StoreU64WithUpdate(src3, MemOperand(sp, -3 * kSystemPointerSize));
+ StoreU64(src2, MemOperand(sp, kSystemPointerSize));
+ StoreU64(src1, MemOperand(sp, 2 * kSystemPointerSize));
}
// Push four registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4) {
- StorePU(src4, MemOperand(sp, -4 * kSystemPointerSize));
- StoreP(src3, MemOperand(sp, kSystemPointerSize));
- StoreP(src2, MemOperand(sp, 2 * kSystemPointerSize));
- StoreP(src1, MemOperand(sp, 3 * kSystemPointerSize));
+ StoreU64WithUpdate(src4, MemOperand(sp, -4 * kSystemPointerSize));
+ StoreU64(src3, MemOperand(sp, kSystemPointerSize));
+ StoreU64(src2, MemOperand(sp, 2 * kSystemPointerSize));
+ StoreU64(src1, MemOperand(sp, 3 * kSystemPointerSize));
}
// Push five registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4,
Register src5) {
- StorePU(src5, MemOperand(sp, -5 * kSystemPointerSize));
- StoreP(src4, MemOperand(sp, kSystemPointerSize));
- StoreP(src3, MemOperand(sp, 2 * kSystemPointerSize));
- StoreP(src2, MemOperand(sp, 3 * kSystemPointerSize));
- StoreP(src1, MemOperand(sp, 4 * kSystemPointerSize));
+ StoreU64WithUpdate(src5, MemOperand(sp, -5 * kSystemPointerSize));
+ StoreU64(src4, MemOperand(sp, kSystemPointerSize));
+ StoreU64(src3, MemOperand(sp, 2 * kSystemPointerSize));
+ StoreU64(src2, MemOperand(sp, 3 * kSystemPointerSize));
+ StoreU64(src1, MemOperand(sp, 4 * kSystemPointerSize));
}
enum PushArrayOrder { kNormal, kReverse };
@@ -281,18 +269,21 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
addi(sp, sp, Operand(5 * kSystemPointerSize));
}
- void SaveRegisters(RegList registers);
- void RestoreRegisters(RegList registers);
+ void MaybeSaveRegisters(RegList registers);
+ void MaybeRestoreRegisters(RegList registers);
- void CallRecordWriteStub(Register object, Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode);
- void CallRecordWriteStub(Register object, Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode, Address wasm_target);
- void CallEphemeronKeyBarrier(Register object, Register address,
+ void CallEphemeronKeyBarrier(Register object, Register slot_address,
SaveFPRegsMode fp_mode);
+ void CallRecordWriteStubSaveRegisters(
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode = StubCallMode::kCallBuiltinPointer);
+ void CallRecordWriteStub(
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode = StubCallMode::kCallBuiltinPointer);
+
void MultiPush(RegList regs, Register location = sp);
void MultiPop(RegList regs, Register location = sp);
@@ -302,6 +293,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void MultiPushV128(RegList dregs, Register location = sp);
void MultiPopV128(RegList dregs, Register location = sp);
+ void MultiPushF64AndV128(RegList dregs, RegList simd_regs,
+ Register location = sp);
+ void MultiPopF64AndV128(RegList dregs, RegList simd_regs,
+ Register location = sp);
+
// Calculate how much stack space (in bytes) are required to store caller
// registers excluding those specified in the arguments.
int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
@@ -321,7 +317,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register exclusion3 = no_reg);
// Load an object from the root table.
- void LoadRoot(Register destination, RootIndex index) override {
+ void LoadRoot(Register destination, RootIndex index) final {
LoadRoot(destination, index, al);
}
void LoadRoot(Register destination, RootIndex index, Condition cond);
@@ -391,8 +387,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void MovFromFloatParameter(DoubleRegister dst);
void MovFromFloatResult(DoubleRegister dst);
- void Trap() override;
- void DebugBreak() override;
+ void Trap();
+ void DebugBreak();
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
@@ -419,10 +415,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register src_high, uint32_t shift);
#endif
- void LoadFromConstantsTable(Register destination,
- int constant_index) override;
- void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
- void LoadRootRelative(Register destination, int32_t offset) override;
+ void LoadFromConstantsTable(Register destination, int constant_index) final;
+ void LoadRootRegisterOffset(Register destination, intptr_t offset) final;
+ void LoadRootRelative(Register destination, int32_t offset) final;
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target);
@@ -430,7 +425,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
CRegister cr = cr7);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al,
CRegister cr = cr7);
- void Jump(const ExternalReference& reference) override;
+ void Jump(const ExternalReference& reference);
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
CRegister cr = cr7);
void Call(Register target);
@@ -442,13 +437,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Load the builtin given by the Smi in |builtin_index| into the same
// register.
void LoadEntryFromBuiltinIndex(Register builtin_index);
- void LoadCodeObjectEntry(Register destination, Register code_object) override;
- void CallCodeObject(Register code_object) override;
+ void LoadCodeObjectEntry(Register destination, Register code_object);
+ void CallCodeObject(Register code_object);
void JumpCodeObject(Register code_object,
- JumpMode jump_mode = JumpMode::kJump) override;
+ JumpMode jump_mode = JumpMode::kJump);
- void CallBuiltinByIndex(Register builtin_index) override;
- void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
+ void CallBuiltinByIndex(Register builtin_index);
+ void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
DeoptimizeKind kind, Label* ret,
Label* jump_deoptimization_entry_label);
@@ -504,7 +499,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src);
- void SmiUntag(Register dst, const MemOperand& src, RCBit rc);
+ void SmiUntag(Register dst, const MemOperand& src, RCBit rc = LeaveRC,
+ Register scratch = no_reg);
void SmiUntag(Register reg, RCBit rc = LeaveRC) { SmiUntag(reg, reg, rc); }
void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC) {
@@ -612,14 +608,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
CRegister cr = cr7) {
// High bits must be identical to fit into an 32-bit integer
extsw(scratch, value);
- cmp(scratch, value, cr);
+ CmpS64(scratch, value, cr);
}
#else
inline void TestIfInt32(Register hi_word, Register lo_word, Register scratch,
CRegister cr = cr7) {
// High bits must be identical to fit into an 32-bit integer
srawi(scratch, lo_word, 31);
- cmp(scratch, hi_word, cr);
+ CmpS64(scratch, hi_word, cr);
}
#endif
@@ -693,16 +689,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
const MemOperand& field_operand,
const Register& scratch = no_reg);
- // Loads a field containing smi value and untags it.
- void SmiUntagField(Register dst, const MemOperand& src, RCBit rc = LeaveRC);
-
// Compresses and stores tagged value to given on-heap location.
void StoreTaggedField(const Register& value,
const MemOperand& dst_field_operand,
const Register& scratch = no_reg);
- void StoreTaggedFieldX(const Register& value,
- const MemOperand& dst_field_operand,
- const Register& scratch = no_reg);
void DecompressTaggedSigned(Register destination, MemOperand field_operand);
void DecompressTaggedSigned(Register destination, Register src);
@@ -711,8 +701,65 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void DecompressAnyTagged(Register destination, MemOperand field_operand);
void DecompressAnyTagged(Register destination, Register source);
- void LoadU32(Register dst, const MemOperand& mem, Register scratch);
- void StoreWord(Register src, const MemOperand& mem, Register scratch);
+ void LoadF64(DoubleRegister dst, const MemOperand& mem,
+ Register scratch = no_reg);
+ void LoadF32(DoubleRegister dst, const MemOperand& mem,
+ Register scratch = no_reg);
+
+ void StoreF32(DoubleRegister src, const MemOperand& mem,
+ Register scratch = no_reg);
+ void StoreF64(DoubleRegister src, const MemOperand& mem,
+ Register scratch = no_reg);
+
+ void LoadF32WithUpdate(DoubleRegister dst, const MemOperand& mem,
+ Register scratch = no_reg);
+ void LoadF64WithUpdate(DoubleRegister dst, const MemOperand& mem,
+ Register scratch = no_reg);
+
+ void StoreF32WithUpdate(DoubleRegister src, const MemOperand& mem,
+ Register scratch = no_reg);
+ void StoreF64WithUpdate(DoubleRegister src, const MemOperand& mem,
+ Register scratch = no_reg);
+
+ void StoreSimd128(Simd128Register src, const MemOperand& mem);
+
+ void LoadU64(Register dst, const MemOperand& mem, Register scratch = no_reg);
+ void LoadU32(Register dst, const MemOperand& mem, Register scratch = no_reg);
+ void LoadS32(Register dst, const MemOperand& mem, Register scratch = no_reg);
+ void LoadU16(Register dst, const MemOperand& mem, Register scratch = no_reg);
+ void LoadS16(Register dst, const MemOperand& mem, Register scratch = no_reg);
+ void LoadU8(Register dst, const MemOperand& mem, Register scratch = no_reg);
+ void LoadS8(Register dst, const MemOperand& mem, Register scratch = no_reg);
+
+ void StoreU64(Register src, const MemOperand& mem, Register scratch = no_reg);
+ void StoreU32(Register src, const MemOperand& mem, Register scratch);
+ void StoreU16(Register src, const MemOperand& mem, Register scratch);
+ void StoreU8(Register src, const MemOperand& mem, Register scratch);
+
+ void LoadU64WithUpdate(Register dst, const MemOperand& mem,
+ Register scratch = no_reg);
+ void StoreU64WithUpdate(Register src, const MemOperand& mem,
+ Register scratch = no_reg);
+
+ void LoadU64LE(Register dst, const MemOperand& mem, Register scratch);
+ void LoadU32LE(Register dst, const MemOperand& mem, Register scratch);
+ void LoadU16LE(Register dst, const MemOperand& mem, Register scratch);
+ void StoreU64LE(Register src, const MemOperand& mem, Register scratch);
+ void StoreU32LE(Register src, const MemOperand& mem, Register scratch);
+ void StoreU16LE(Register src, const MemOperand& mem, Register scratch);
+
+ void LoadS32LE(Register dst, const MemOperand& mem, Register scratch);
+ void LoadS16LE(Register dst, const MemOperand& mem, Register scratch);
+
+ void LoadF64LE(DoubleRegister dst, const MemOperand& mem, Register scratch,
+ Register scratch2);
+ void LoadF32LE(DoubleRegister dst, const MemOperand& mem, Register scratch,
+ Register scratch2);
+
+ void StoreF32LE(DoubleRegister src, const MemOperand& mem, Register scratch,
+ Register scratch2);
+ void StoreF64LE(DoubleRegister src, const MemOperand& mem, Register scratch,
+ Register scratch2);
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
@@ -722,10 +769,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments,
bool has_function_descriptor);
- void CallRecordWriteStub(Register object, Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode, int builtin_index,
- Address wasm_target);
};
// MacroAssembler implements a collection of frequently used acros.
@@ -742,7 +785,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
}
void StoreReceiver(Register rec, Register argc, Register scratch) {
- StoreP(rec, MemOperand(sp, 0));
+ StoreU64(rec, MemOperand(sp, 0));
}
// ---------------------------------------------------------------------------
@@ -754,7 +797,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// The offset is the offset from the start of the object, not the offset from
// the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
void RecordWriteField(
- Register object, int offset, Register value, Register scratch,
+ Register object, int offset, Register value, Register slot_address,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
SmiCheck smi_check = SmiCheck::kInline);
@@ -763,7 +806,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// has been written. |value| is the object being stored. The value and
// address registers are clobbered by the operation.
void RecordWrite(
- Register object, Register address, Register value,
+ Register object, Register slot_address, Register value,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
SmiCheck smi_check = SmiCheck::kInline);
@@ -793,20 +836,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// load a literal double value <value> to FPR <result>
- void LoadHalfWord(Register dst, const MemOperand& mem,
- Register scratch = no_reg);
- void LoadHalfWordArith(Register dst, const MemOperand& mem,
- Register scratch = no_reg);
- void StoreHalfWord(Register src, const MemOperand& mem, Register scratch);
-
- void LoadByte(Register dst, const MemOperand& mem, Register scratch);
- void StoreByte(Register src, const MemOperand& mem, Register scratch);
-
- void LoadDoubleU(DoubleRegister dst, const MemOperand& mem,
- Register scratch = no_reg);
-
- void Cmplwi(Register src1, const Operand& src2, Register scratch,
- CRegister cr = cr7);
void And(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
void Or(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
void Xor(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
@@ -955,9 +984,19 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// StatsCounter support
void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
- Register scratch2);
+ Register scratch2) {
+ if (!FLAG_native_code_counters) return;
+ EmitIncrementCounter(counter, value, scratch1, scratch2);
+ }
+ void EmitIncrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
- Register scratch2);
+ Register scratch2) {
+ if (!FLAG_native_code_counters) return;
+ EmitDecrementCounter(counter, value, scratch1, scratch2);
+ }
+ void EmitDecrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
// ---------------------------------------------------------------------------
// Stack limit utilities
diff --git a/deps/v8/src/codegen/reloc-info.cc b/deps/v8/src/codegen/reloc-info.cc
index 4781e7609b..14011fb9e3 100644
--- a/deps/v8/src/codegen/reloc-info.cc
+++ b/deps/v8/src/codegen/reloc-info.cc
@@ -276,11 +276,11 @@ RelocIterator::RelocIterator(const CodeReference code_reference, int mode_mask)
RelocIterator::RelocIterator(EmbeddedData* embedded_data, Code code,
int mode_mask)
- : RelocIterator(
- code, embedded_data->InstructionStartOfBuiltin(code.builtin_index()),
- code.constant_pool(),
- code.relocation_start() + code.relocation_size(),
- code.relocation_start(), mode_mask) {}
+ : RelocIterator(code,
+ embedded_data->InstructionStartOfBuiltin(code.builtin_id()),
+ code.constant_pool(),
+ code.relocation_start() + code.relocation_size(),
+ code.relocation_start(), mode_mask) {}
RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask)
: RelocIterator(Code(), reinterpret_cast<Address>(desc.buffer), 0,
@@ -288,9 +288,9 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask)
desc.buffer + desc.buffer_size - desc.reloc_size,
mode_mask) {}
-RelocIterator::RelocIterator(Vector<byte> instructions,
- Vector<const byte> reloc_info, Address const_pool,
- int mode_mask)
+RelocIterator::RelocIterator(base::Vector<byte> instructions,
+ base::Vector<const byte> reloc_info,
+ Address const_pool, int mode_mask)
: RelocIterator(Code(), reinterpret_cast<Address>(instructions.begin()),
const_pool, reloc_info.begin() + reloc_info.size(),
reloc_info.begin(), mode_mask) {}
@@ -463,7 +463,7 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) {
DCHECK(code.IsCode());
os << " (" << CodeKindToString(code.kind());
if (Builtins::IsBuiltin(code)) {
- os << " " << Builtins::name(code.builtin_index());
+ os << " " << Builtins::name(code.builtin_id());
}
os << ") (" << reinterpret_cast<const void*>(target_address()) << ")";
} else if (IsRuntimeEntry(rmode_)) {
@@ -485,9 +485,11 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) {
void RelocInfo::Verify(Isolate* isolate) {
switch (rmode_) {
case COMPRESSED_EMBEDDED_OBJECT:
+ Object::VerifyPointer(isolate, target_object());
+ break;
case FULL_EMBEDDED_OBJECT:
case DATA_EMBEDDED_OBJECT:
- Object::VerifyPointer(isolate, target_object());
+ Object::VerifyAnyTagged(isolate, target_object());
break;
case CODE_TARGET:
case RELATIVE_CODE_TARGET: {
diff --git a/deps/v8/src/codegen/reloc-info.h b/deps/v8/src/codegen/reloc-info.h
index e8b3c0b98b..f2a2d04523 100644
--- a/deps/v8/src/codegen/reloc-info.h
+++ b/deps/v8/src/codegen/reloc-info.h
@@ -432,9 +432,9 @@ class V8_EXPORT_PRIVATE RelocIterator : public Malloced {
explicit RelocIterator(const CodeDesc& desc, int mode_mask = -1);
explicit RelocIterator(const CodeReference code_reference,
int mode_mask = -1);
- explicit RelocIterator(Vector<byte> instructions,
- Vector<const byte> reloc_info, Address const_pool,
- int mode_mask = -1);
+ explicit RelocIterator(base::Vector<byte> instructions,
+ base::Vector<const byte> reloc_info,
+ Address const_pool, int mode_mask = -1);
RelocIterator(RelocIterator&&) V8_NOEXCEPT = default;
RelocIterator(const RelocIterator&) = delete;
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h b/deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h
index d301a00bf4..e3ac9b83f4 100644
--- a/deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h
@@ -96,7 +96,37 @@ Address RelocInfo::target_address_address() {
Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); }
-int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
+int RelocInfo::target_address_size() {
+ if (IsCodedSpecially()) {
+ return Assembler::kSpecialTargetSize;
+ } else {
+ return kSystemPointerSize;
+ }
+}
+
+void Assembler::set_target_compressed_address_at(
+ Address pc, Address constant_pool, Tagged_t target,
+ ICacheFlushMode icache_flush_mode) {
+ Assembler::set_target_address_at(
+ pc, constant_pool, static_cast<Address>(target), icache_flush_mode);
+}
+
+Tagged_t Assembler::target_compressed_address_at(Address pc,
+ Address constant_pool) {
+ return static_cast<Tagged_t>(target_address_at(pc, constant_pool));
+}
+
+Handle<Object> Assembler::code_target_object_handle_at(Address pc,
+ Address constant_pool) {
+ int index =
+ static_cast<int>(target_address_at(pc, constant_pool)) & 0xFFFFFFFF;
+ return GetCodeTarget(index);
+}
+
+Handle<HeapObject> Assembler::compressed_embedded_object_handle_at(
+ Address pc, Address const_pool) {
+ return GetEmbeddedObject(target_compressed_address_at(pc, const_pool));
+}
void Assembler::deserialization_set_special_target_at(
Address instruction_payload, Code code, Address target) {
@@ -127,17 +157,38 @@ void Assembler::deserialization_set_target_internal_reference_at(
}
HeapObject RelocInfo::target_object() {
- DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
- return HeapObject::cast(
- Object(Assembler::target_address_at(pc_, constant_pool_)));
+ DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
+ if (IsDataEmbeddedObject(rmode_)) {
+ return HeapObject::cast(Object(ReadUnalignedValue<Address>(pc_)));
+ } else if (IsCompressedEmbeddedObject(rmode_)) {
+ return HeapObject::cast(Object(DecompressTaggedAny(
+ host_.address(),
+ Assembler::target_compressed_address_at(pc_, constant_pool_))));
+ } else {
+ return HeapObject::cast(
+ Object(Assembler::target_address_at(pc_, constant_pool_)));
+ }
}
HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
- return target_object();
+ if (IsCompressedEmbeddedObject(rmode_)) {
+ return HeapObject::cast(Object(DecompressTaggedAny(
+ isolate,
+ Assembler::target_compressed_address_at(pc_, constant_pool_))));
+ } else {
+ return target_object();
+ }
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
- if (IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)) {
+ if (IsDataEmbeddedObject(rmode_)) {
+ return Handle<HeapObject>::cast(ReadUnalignedValue<Handle<Object>>(pc_));
+ } else if (IsCodeTarget(rmode_)) {
+ return Handle<HeapObject>::cast(
+ origin->code_target_object_handle_at(pc_, constant_pool_));
+ } else if (IsCompressedEmbeddedObject(rmode_)) {
+ return origin->compressed_embedded_object_handle_at(pc_, constant_pool_);
+ } else if (IsFullEmbeddedObject(rmode_)) {
return Handle<HeapObject>(reinterpret_cast<Address*>(
Assembler::target_address_at(pc_, constant_pool_)));
} else {
@@ -149,9 +200,18 @@ Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
- DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
- Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
- icache_flush_mode);
+ DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
+ if (IsDataEmbeddedObject(rmode_)) {
+ WriteUnalignedValue(pc_, target.ptr());
+ // No need to flush icache since no instructions were changed.
+ } else if (IsCompressedEmbeddedObject(rmode_)) {
+ Assembler::set_target_compressed_address_at(
+ pc_, constant_pool_, CompressTagged(target.ptr()), icache_flush_mode);
+ } else {
+ DCHECK(IsFullEmbeddedObject(rmode_));
+ Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
+ icache_flush_mode);
+ }
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
!FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
index e99fe2ce11..3875a93158 100644
--- a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
@@ -207,7 +207,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
: AssemblerBase(options, std::move(buffer)),
- scratch_register_list_(t3.bit() | t5.bit()),
+ scratch_register_list_(t3.bit() | t5.bit() | s10.bit()),
constpool_(this) {
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
@@ -302,11 +302,18 @@ bool Assembler::IsBranch(Instr instr) {
return (instr & kBaseOpcodeMask) == BRANCH;
}
+bool Assembler::IsCBranch(Instr instr) {
+ int Op = instr & kRvcOpcodeMask;
+ return Op == RO_C_BNEZ || Op == RO_C_BEQZ;
+}
+
bool Assembler::IsJump(Instr instr) {
int Op = instr & kBaseOpcodeMask;
return Op == JAL || Op == JALR;
}
+bool Assembler::IsNop(Instr instr) { return instr == kNopByte; }
+
bool Assembler::IsJal(Instr instr) { return (instr & kBaseOpcodeMask) == JAL; }
bool Assembler::IsJalr(Instr instr) {
@@ -413,6 +420,12 @@ int Assembler::target_at(int pos, bool is_internal) {
if (offset == kEndOfJumpChain) return kEndOfChain;
return offset + pos;
} break;
+ case RO_C_BNEZ:
+ case RO_C_BEQZ: {
+ int32_t offset = instruction->RvcImm8BValue();
+ if (offset == kEndOfJumpChain) return kEndOfChain;
+ return pos + offset;
+ } break;
default: {
if (instr == kEndOfJumpChain) {
return kEndOfChain;
@@ -496,7 +509,24 @@ static inline ShortInstr SetCJalOffset(int32_t pos, int32_t target_pos,
return instr | (imm11 & kImm11Mask);
}
-void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
+static inline Instr SetCBranchOffset(int32_t pos, int32_t target_pos,
+ Instr instr) {
+ DCHECK(Assembler::IsCBranch(instr));
+ int32_t imm = target_pos - pos;
+ DCHECK_EQ(imm & 1, 0);
+ DCHECK(is_intn(imm, Assembler::kCBranchOffsetBits));
+
+ instr &= ~kRvcBImm8Mask;
+ int32_t imm8 = ((imm & 0x20) >> 5) | ((imm & 0x6)) | ((imm & 0xc0) >> 3) |
+ ((imm & 0x18) << 2) | ((imm & 0x100) >> 1);
+ imm8 = ((imm8 & 0x1f) << 2) | ((imm8 & 0xe0) << 5);
+ DCHECK(Assembler::IsCBranch(instr | imm8 & kRvcBImm8Mask));
+
+ return instr | (imm8 & kRvcBImm8Mask);
+}
+
+void Assembler::target_at_put(int pos, int target_pos, bool is_internal,
+ bool trampoline) {
if (is_internal) {
uint64_t imm = reinterpret_cast<uint64_t>(buffer_start_) + target_pos;
*reinterpret_cast<uint64_t*>(buffer_start_ + pos) = imm;
@@ -515,6 +545,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
instr_at_put(pos, instr);
} break;
case JAL: {
+ DCHECK(IsJal(instr));
instr = SetJalOffset(pos, target_pos, instr);
instr_at_put(pos, instr);
} break;
@@ -529,24 +560,39 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
DCHECK(IsJalr(instr_I) || IsAddi(instr_I));
int64_t offset = target_pos - pos;
- DCHECK(is_int32(offset));
+ if (is_int21(offset) && IsJalr(instr_I) && trampoline) {
+ DCHECK(is_int21(offset) && ((offset & 1) == 0));
+ Instr instr = JAL;
+ instr = SetJalOffset(pos, target_pos, instr);
+ DCHECK(IsJal(instr));
+ DCHECK(JumpOffset(instr) == offset);
+ instr_at_put(pos, instr);
+ instr_at_put(pos + 4, kNopByte);
+ } else {
+ DCHECK(is_int32(offset));
- int32_t Hi20 = (((int32_t)offset + 0x800) >> 12);
- int32_t Lo12 = (int32_t)offset << 20 >> 20;
+ int32_t Hi20 = (((int32_t)offset + 0x800) >> 12);
+ int32_t Lo12 = (int32_t)offset << 20 >> 20;
- instr_auipc =
- (instr_auipc & ~kImm31_12Mask) | ((Hi20 & kImm19_0Mask) << 12);
- instr_at_put(pos, instr_auipc);
+ instr_auipc =
+ (instr_auipc & ~kImm31_12Mask) | ((Hi20 & kImm19_0Mask) << 12);
+ instr_at_put(pos, instr_auipc);
- const int kImm31_20Mask = ((1 << 12) - 1) << 20;
- const int kImm11_0Mask = ((1 << 12) - 1);
- instr_I = (instr_I & ~kImm31_20Mask) | ((Lo12 & kImm11_0Mask) << 20);
- instr_at_put(pos + 4, instr_I);
+ const int kImm31_20Mask = ((1 << 12) - 1) << 20;
+ const int kImm11_0Mask = ((1 << 12) - 1);
+ instr_I = (instr_I & ~kImm31_20Mask) | ((Lo12 & kImm11_0Mask) << 20);
+ instr_at_put(pos + 4, instr_I);
+ }
} break;
case RO_C_J: {
ShortInstr short_instr = SetCJalOffset(pos, target_pos, instr);
instr_at_put(pos, short_instr);
} break;
+ case RO_C_BNEZ:
+ case RO_C_BEQZ: {
+ instr = SetCBranchOffset(pos, target_pos, instr);
+ instr_at_put(pos, instr);
+ } break;
default: {
// Emitted label constant, not part of a branch.
// Make label relative to Code pointer of generated Code object.
@@ -611,7 +657,7 @@ void Assembler::bind_to(Label* L, int pos) {
}
CHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
DEBUG_PRINTF("\t\ttrampolining: %d\n", trampoline_pos);
- target_at_put(fixup_pos, trampoline_pos, false);
+ target_at_put(fixup_pos, trampoline_pos, false, true);
fixup_pos = trampoline_pos;
}
target_at_put(fixup_pos, pos, false);
@@ -623,7 +669,7 @@ void Assembler::bind_to(Label* L, int pos) {
}
CHECK((trampoline_pos - fixup_pos) <= kMaxJumpOffset);
DEBUG_PRINTF("\t\ttrampolining: %d\n", trampoline_pos);
- target_at_put(fixup_pos, trampoline_pos, false);
+ target_at_put(fixup_pos, trampoline_pos, false, true);
fixup_pos = trampoline_pos;
}
target_at_put(fixup_pos, pos, false);
@@ -701,6 +747,8 @@ int Assembler::BrachlongOffset(Instr auipc, Instr instr_I) {
DCHECK(reinterpret_cast<Instruction*>(&instr_I)->InstructionType() ==
InstructionBase::kIType);
DCHECK(IsAuipc(auipc));
+ DCHECK_EQ((auipc & kRdFieldMask) >> kRdShift,
+ (instr_I & kRs1FieldMask) >> kRs1Shift);
int32_t imm_auipc = AuipcOffset(auipc);
int32_t imm12 = static_cast<int32_t>(instr_I & kImm12Mask) >> 20;
int32_t offset = imm12 + imm_auipc;
@@ -750,7 +798,7 @@ void Assembler::disassembleInstr(Instr instr) {
if (!FLAG_riscv_debug) return;
disasm::NameConverter converter;
disasm::Disassembler disasm(converter);
- EmbeddedVector<char, 128> disasm_buffer;
+ base::EmbeddedVector<char, 128> disasm_buffer;
disasm.InstructionDecode(disasm_buffer, reinterpret_cast<byte*>(&instr));
DEBUG_PRINTF("%s\n", disasm_buffer.begin());
@@ -1068,6 +1116,24 @@ void Assembler::GenInstrCS(uint8_t funct3, Opcode opcode, FPURegister rs2,
emit(instr);
}
+void Assembler::GenInstrCB(uint8_t funct3, Opcode opcode, Register rs1,
+ uint8_t uimm8) {
+ DCHECK(is_uint3(funct3) && is_uint8(uimm8));
+ ShortInstr instr = opcode | ((uimm8 & 0x1f) << 2) | ((uimm8 & 0xe0) << 5) |
+ ((rs1.code() & 0x7) << kRvcRs1sShift) |
+ (funct3 << kRvcFunct3Shift);
+ emit(instr);
+}
+
+void Assembler::GenInstrCBA(uint8_t funct3, uint8_t funct2, Opcode opcode,
+ Register rs1, uint8_t uimm6) {
+ DCHECK(is_uint3(funct3) && is_uint2(funct2) && is_uint6(uimm6));
+ ShortInstr instr = opcode | ((uimm6 & 0x1f) << 2) | ((uimm6 & 0x20) << 7) |
+ ((rs1.code() & 0x7) << kRvcRs1sShift) |
+ (funct3 << kRvcFunct3Shift) | (funct2 << 10);
+ emit(instr);
+}
+
// ----- Instruction class templates match those in the compiler
void Assembler::GenInstrBranchCC_rri(uint8_t funct3, Register rs1, Register rs2,
@@ -2200,6 +2266,37 @@ void Assembler::c_j(int16_t imm12) {
BlockTrampolinePoolFor(1);
}
+// CB Instructions
+
+void Assembler::c_bnez(Register rs1, int16_t imm9) {
+ DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_int9(imm9));
+ uint8_t uimm8 = ((imm9 & 0x20) >> 5) | ((imm9 & 0x6)) | ((imm9 & 0xc0) >> 3) |
+ ((imm9 & 0x18) << 2) | ((imm9 & 0x100) >> 1);
+ GenInstrCB(0b111, C1, rs1, uimm8);
+}
+
+void Assembler::c_beqz(Register rs1, int16_t imm9) {
+ DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_int9(imm9));
+ uint8_t uimm8 = ((imm9 & 0x20) >> 5) | ((imm9 & 0x6)) | ((imm9 & 0xc0) >> 3) |
+ ((imm9 & 0x18) << 2) | ((imm9 & 0x100) >> 1);
+ GenInstrCB(0b110, C1, rs1, uimm8);
+}
+
+void Assembler::c_srli(Register rs1, uint8_t uimm6) {
+ DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_uint6(uimm6));
+ GenInstrCBA(0b100, 0b00, C1, rs1, uimm6);
+}
+
+void Assembler::c_srai(Register rs1, uint8_t uimm6) {
+ DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_uint6(uimm6));
+ GenInstrCBA(0b100, 0b01, C1, rs1, uimm6);
+}
+
+void Assembler::c_andi(Register rs1, uint8_t uimm6) {
+ DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_uint6(uimm6));
+ GenInstrCBA(0b100, 0b10, C1, rs1, uimm6);
+}
+
// Privileged
void Assembler::uret() {
@@ -2512,7 +2609,7 @@ void Assembler::li_ptr(Register rd, int64_t imm) {
// Pointers are 48 bits
// 6 fixed instructions are generated
DCHECK_EQ((imm & 0xfff0000000000000ll), 0);
- int64_t a6 = imm & 0x3f; // bits 0:6. 6 bits
+ int64_t a6 = imm & 0x3f; // bits 0:5. 6 bits
int64_t b11 = (imm >> 6) & 0x7ff; // bits 6:11. 11 bits
int64_t high_31 = (imm >> 17) & 0x7fffffff; // 31 bits
int64_t high_20 = ((high_31 + 0x800) >> 12); // 19 bits
@@ -2692,8 +2789,9 @@ void Assembler::GrowBuffer() {
reloc_info_writer.last_pc() + pc_delta);
// Relocate runtime entries.
- Vector<byte> instructions{buffer_start_, static_cast<size_t>(pc_offset())};
- Vector<const byte> reloc_info{reloc_info_writer.pos(), reloc_size};
+ base::Vector<byte> instructions{buffer_start_,
+ static_cast<size_t>(pc_offset())};
+ base::Vector<const byte> reloc_info{reloc_info_writer.pos(), reloc_size};
for (RelocIterator it(instructions, reloc_info, 0); !it.done(); it.next()) {
RelocInfo::Mode rmode = it.rinfo()->rmode();
if (rmode == RelocInfo::INTERNAL_REFERENCE) {
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64.h b/deps/v8/src/codegen/riscv64/assembler-riscv64.h
index ff66351d6a..720a654c58 100644
--- a/deps/v8/src/codegen/riscv64/assembler-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64.h
@@ -198,7 +198,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
kOffset20 = 20, // RISCV imm20
kOffset13 = 13, // RISCV branch
kOffset32 = 32, // RISCV auipc + instr_I
- kOffset11 = 11 // RISCV C_J
+ kOffset11 = 11, // RISCV C_J
+ kOffset8 = 8 // RISCV compressed branch
};
// Determines if Label is bound and near enough so that branch instruction
@@ -214,6 +215,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
int32_t offset);
int JumpOffset(Instr instr);
int CJumpOffset(Instr instr);
+ int CBranchOffset(Instr instr);
static int LdOffset(Instr instr);
static int AuipcOffset(Instr instr);
static int JalrOffset(Instr instr);
@@ -231,6 +233,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
inline int16_t cjump_offset(Label* L) {
return (int16_t)branch_offset_helper(L, OffsetSize::kOffset11);
}
+ inline int32_t cbranch_offset(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset8);
+ }
uint64_t jump_address(Label* L);
uint64_t branch_long_offset(Label* L);
@@ -254,6 +259,18 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ // Read/Modify the code target address in the branch/call instruction at pc.
+ inline static Tagged_t target_compressed_address_at(Address pc,
+ Address constant_pool);
+ inline static void set_target_compressed_address_at(
+ Address pc, Address constant_pool, Tagged_t target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+
+ inline Handle<Object> code_target_object_handle_at(Address pc,
+ Address constant_pool);
+ inline Handle<HeapObject> compressed_embedded_object_handle_at(
+ Address pc, Address constant_pool);
+
static bool IsConstantPoolAt(Instruction* instr);
static int ConstantPoolSizeAt(Instruction* instr);
// See Assembler::CheckConstPool for more info.
@@ -322,6 +339,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Bits available for offset field in compresed jump
static constexpr int kCJalOffsetBits = 12;
+ // Bits available for offset field in compressed branch
+ static constexpr int kCBranchOffsetBits = 9;
+
// Max offset for b instructions with 12-bit offset field (multiple of 2)
static constexpr int kMaxBranchOffset = (1 << (13 - 1)) - 1;
@@ -627,6 +647,13 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void c_sw(Register rs2, Register rs1, uint16_t uimm7);
void c_sd(Register rs2, Register rs1, uint16_t uimm8);
void c_fsd(FPURegister rs2, Register rs1, uint16_t uimm8);
+ void c_bnez(Register rs1, int16_t imm9);
+ inline void c_bnez(Register rs1, Label* L) { c_bnez(rs1, branch_offset(L)); }
+ void c_beqz(Register rs1, int16_t imm9);
+ inline void c_beqz(Register rs1, Label* L) { c_beqz(rs1, branch_offset(L)); }
+ void c_srli(Register rs1, uint8_t uimm6);
+ void c_srai(Register rs1, uint8_t uimm6);
+ void c_andi(Register rs1, uint8_t uimm6);
// Privileged
void uret();
@@ -855,6 +882,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Check if an instruction is a branch of some kind.
static bool IsBranch(Instr instr);
+ static bool IsCBranch(Instr instr);
+ static bool IsNop(Instr instr);
static bool IsJump(Instr instr);
static bool IsJal(Instr instr);
static bool IsCJal(Instr instr);
@@ -930,7 +959,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
int target_at(int pos, bool is_internal);
// Patch branch instruction at pos to branch to given branch target pos.
- void target_at_put(int pos, int target_pos, bool is_internal);
+ void target_at_put(int pos, int target_pos, bool is_internal,
+ bool trampoline = false);
// Say if we need to relocate with this mode.
bool MustUseReg(RelocInfo::Mode rmode);
@@ -1103,6 +1133,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void GenInstrCS(uint8_t funct3, Opcode opcode, FPURegister rs2, Register rs1,
uint8_t uimm5);
void GenInstrCJ(uint8_t funct3, Opcode opcode, uint16_t uint11);
+ void GenInstrCB(uint8_t funct3, Opcode opcode, Register rs1, uint8_t uimm8);
+ void GenInstrCBA(uint8_t funct3, uint8_t funct2, Opcode opcode, Register rs1,
+ uint8_t uimm6);
// ----- Instruction class templates match those in LLVM's RISCVInstrInfo.td
void GenInstrBranchCC_rri(uint8_t funct3, Register rs1, Register rs2,
diff --git a/deps/v8/src/codegen/riscv64/constants-riscv64.h b/deps/v8/src/codegen/riscv64/constants-riscv64.h
index 2187e61da8..bd1f63b673 100644
--- a/deps/v8/src/codegen/riscv64/constants-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/constants-riscv64.h
@@ -210,12 +210,14 @@ const int kRvcRs1sBits = 3;
const int kRvcRs2sShift = 2;
const int kRvcRs2sBits = 3;
const int kRvcFunct2Shift = 5;
+const int kRvcFunct2BShift = 10;
const int kRvcFunct2Bits = 2;
const int kRvcFunct6Shift = 10;
const int kRvcFunct6Bits = 6;
// RISCV Instruction bit masks
-const uint32_t kBaseOpcodeMask = ((1 << kBaseOpcodeBits) - 1) << kBaseOpcodeShift;
+const uint32_t kBaseOpcodeMask = ((1 << kBaseOpcodeBits) - 1)
+ << kBaseOpcodeShift;
const uint32_t kFunct3Mask = ((1 << kFunct3Bits) - 1) << kFunct3Shift;
const uint32_t kFunct5Mask = ((1 << kFunct5Bits) - 1) << kFunct5Shift;
const uint32_t kFunct7Mask = ((1 << kFunct7Bits) - 1) << kFunct7Shift;
@@ -241,13 +243,20 @@ const uint32_t kImm31_12Mask = ((1 << 20) - 1) << 12;
const uint32_t kImm19_0Mask = ((1 << 20) - 1);
const uint32_t kRvcOpcodeMask =
0b11 | (((1 << kRvcFunct3Bits) - 1) << kRvcFunct3Shift);
-const uint32_t kRvcFunct3Mask = (((1 << kRvcFunct3Bits) - 1) << kRvcFunct3Shift);
-const uint32_t kRvcFunct4Mask = (((1 << kRvcFunct4Bits) - 1) << kRvcFunct4Shift);
-const uint32_t kRvcFunct6Mask = (((1 << kRvcFunct6Bits) - 1) << kRvcFunct6Shift);
-const uint32_t kRvcFunct2Mask = (((1 << kRvcFunct2Bits) - 1) << kRvcFunct2Shift);
+const uint32_t kRvcFunct3Mask =
+ (((1 << kRvcFunct3Bits) - 1) << kRvcFunct3Shift);
+const uint32_t kRvcFunct4Mask =
+ (((1 << kRvcFunct4Bits) - 1) << kRvcFunct4Shift);
+const uint32_t kRvcFunct6Mask =
+ (((1 << kRvcFunct6Bits) - 1) << kRvcFunct6Shift);
+const uint32_t kRvcFunct2Mask =
+ (((1 << kRvcFunct2Bits) - 1) << kRvcFunct2Shift);
+const uint32_t kRvcFunct2BMask =
+ (((1 << kRvcFunct2Bits) - 1) << kRvcFunct2BShift);
const uint32_t kCRTypeMask = kRvcOpcodeMask | kRvcFunct4Mask;
const uint32_t kCSTypeMask = kRvcOpcodeMask | kRvcFunct6Mask;
const uint32_t kCATypeMask = kRvcOpcodeMask | kRvcFunct6Mask | kRvcFunct2Mask;
+const uint32_t kRvcBImm8Mask = (((1 << 5) - 1) << 2) | (((1 << 3) - 1) << 10);
// RISCV CSR related bit mask and shift
const int kFcsrFlagsBits = 5;
@@ -258,6 +267,7 @@ const uint32_t kFcsrFrmMask = ((1 << kFcsrFrmBits) - 1) << kFcsrFrmShift;
const int kFcsrBits = kFcsrFlagsBits + kFcsrFrmBits;
const uint32_t kFcsrMask = kFcsrFlagsMask | kFcsrFrmMask;
+const int kNopByte = 0x00000013;
// Original MIPS constants
// TODO(RISCV): to be cleaned up
const int kImm16Shift = 0;
@@ -728,7 +738,7 @@ class InstructionBase {
};
inline bool IsIllegalInstruction() const {
- uint8_t FirstHalfWord = *reinterpret_cast<const uint16_t*>(this);
+ uint16_t FirstHalfWord = *reinterpret_cast<const uint16_t*>(this);
return FirstHalfWord == 0;
}
@@ -917,6 +927,11 @@ class InstructionGetters : public T {
return this->Bits(kRvcFunct2Shift + kRvcFunct2Bits - 1, kRvcFunct2Shift);
}
+ inline int RvcFunct2BValue() const {
+ DCHECK(this->IsShortInstruction());
+ return this->Bits(kRvcFunct2BShift + kRvcFunct2Bits - 1, kRvcFunct2BShift);
+ }
+
inline int CsrValue() const {
DCHECK(this->InstructionType() == InstructionBase::kIType &&
this->BaseOpcode() == SYSTEM);
@@ -1125,6 +1140,17 @@ class InstructionGetters : public T {
return imm12 << 20 >> 20;
}
+ inline int RvcImm8BValue() const {
+ DCHECK(this->IsShortInstruction());
+ // | funct3 | imm[8|4:3] | rs1` | imm[7:6|2:1|5] | opcode |
+ // 15 12 10 7 2
+ uint32_t Bits = this->InstructionBits();
+ int32_t imm9 = ((Bits & 0x4) << 3) | ((Bits & 0x18) >> 2) |
+ ((Bits & 0x60) << 1) | ((Bits & 0xc00) >> 7) |
+ ((Bits & 0x1000) >> 4);
+ return imm9 << 23 >> 23;
+ }
+
inline bool AqValue() const { return this->Bits(kAqShift, kAqShift); }
inline bool RlValue() const { return this->Bits(kRlShift, kRlShift); }
@@ -1154,7 +1180,7 @@ class Instruction : public InstructionGetters<InstructionBase> {
// C/C++ argument slots size.
const int kCArgSlotCount = 0;
-// TODO(plind): below should be based on kPointerSize
+// TODO(plind): below should be based on kSystemPointerSize
// TODO(plind): find all usages and remove the needless instructions for n64.
const int kCArgsSlotsSize = kCArgSlotCount * kInstrSize * 2;
diff --git a/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h b/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h
index 4a8bb0d9ee..1200b138e8 100644
--- a/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h
+++ b/deps/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h
@@ -20,19 +20,38 @@ constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
return registers;
}
+#if DEBUG
+template <typename DerivedDescriptor>
+void StaticCallInterfaceDescriptor<DerivedDescriptor>::
+ VerifyArgumentRegisterCount(CallInterfaceDescriptorData* data, int argc) {
+ RegList allocatable_regs = data->allocatable_registers();
+ if (argc >= 1) DCHECK(allocatable_regs | a0.bit());
+ if (argc >= 2) DCHECK(allocatable_regs | a1.bit());
+ if (argc >= 3) DCHECK(allocatable_regs | a2.bit());
+ if (argc >= 4) DCHECK(allocatable_regs | a3.bit());
+ if (argc >= 5) DCHECK(allocatable_regs | a4.bit());
+ if (argc >= 6) DCHECK(allocatable_regs | a5.bit());
+ if (argc >= 7) DCHECK(allocatable_regs | a6.bit());
+ if (argc >= 8) DCHECK(allocatable_regs | a7.bit());
+ // Additional arguments are passed on the stack.
+}
+#endif // DEBUG
+
// static
-constexpr auto RecordWriteDescriptor::registers() {
- return RegisterArray(a0, a1, a2, a3, kReturnRegister0);
+constexpr auto WriteBarrierDescriptor::registers() {
+ return RegisterArray(a1, a5, a4, a2, a0, a3);
}
// static
constexpr auto DynamicCheckMapsDescriptor::registers() {
+ STATIC_ASSERT(kReturnRegister0 == a0);
return RegisterArray(kReturnRegister0, a1, a2, a3, cp);
}
// static
-constexpr auto EphemeronKeyBarrierDescriptor::registers() {
- return RegisterArray(a0, a1, a2, a3, kReturnRegister0);
+constexpr auto DynamicCheckMapsWithFeedbackVectorDescriptor::registers() {
+ STATIC_ASSERT(kReturnRegister0 == a0);
+ return RegisterArray(kReturnRegister0, a1, a2, a3, cp);
}
// static
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
index 801a74f569..d94352951d 100644
--- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
@@ -22,7 +22,6 @@
#include "src/logging/counters.h"
#include "src/objects/heap-number.h"
#include "src/runtime/runtime.h"
-#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
#include "src/wasm/wasm-code-manager.h"
@@ -60,7 +59,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
}
RegList list = kJSCallerSaved & ~exclusions;
- bytes += NumRegs(list) * kPointerSize;
+ bytes += NumRegs(list) * kSystemPointerSize;
if (fp_mode == SaveFPRegsMode::kSave) {
bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
@@ -85,7 +84,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
RegList list = kJSCallerSaved & ~exclusions;
MultiPush(list);
- bytes += NumRegs(list) * kPointerSize;
+ bytes += NumRegs(list) * kSystemPointerSize;
if (fp_mode == SaveFPRegsMode::kSave) {
MultiPushFPU(kCallerSavedFPU);
@@ -116,28 +115,30 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
RegList list = kJSCallerSaved & ~exclusions;
MultiPop(list);
- bytes += NumRegs(list) * kPointerSize;
+ bytes += NumRegs(list) * kSystemPointerSize;
return bytes;
}
void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
- Ld(destination, MemOperand(s6, RootRegisterOffsetForRootIndex(index)));
+ Ld(destination,
+ MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
}
void TurboAssembler::LoadRoot(Register destination, RootIndex index,
Condition cond, Register src1,
const Operand& src2) {
Label skip;
- Branch(&skip, NegateCondition(cond), src1, src2);
- Ld(destination, MemOperand(s6, RootRegisterOffsetForRootIndex(index)));
+ BranchShort(&skip, NegateCondition(cond), src1, src2);
+ Ld(destination,
+ MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
bind(&skip);
}
void TurboAssembler::PushCommonFrame(Register marker_reg) {
if (marker_reg.is_valid()) {
Push(ra, fp, marker_reg);
- Add64(fp, sp, Operand(kPointerSize));
+ Add64(fp, sp, Operand(kSystemPointerSize));
} else {
Push(ra, fp);
mv(fp, sp);
@@ -148,10 +149,10 @@ void TurboAssembler::PushStandardFrame(Register function_reg) {
int offset = -StandardFrameConstants::kContextOffset;
if (function_reg.is_valid()) {
Push(ra, fp, cp, function_reg, kJavaScriptCallArgCountRegister);
- offset += 2 * kPointerSize;
+ offset += 2 * kSystemPointerSize;
} else {
Push(ra, fp, cp, kJavaScriptCallArgCountRegister);
- offset += kPointerSize;
+ offset += kSystemPointerSize;
}
Add64(fp, sp, Operand(offset));
}
@@ -166,52 +167,44 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// The register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
void MacroAssembler::RecordWriteField(Register object, int offset,
- Register value, Register dst,
- RAStatus ra_status,
+ Register value, RAStatus ra_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
+ DCHECK(!AreAliased(object, value));
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
- // Skip barrier if writing a smi.
+ // Skip the barrier if writing a smi.
if (smi_check == SmiCheck::kInline) {
JumpIfSmi(value, &done);
}
// Although the object register is tagged, the offset is relative to the start
- // of the object, so so offset must be a multiple of kPointerSize.
- DCHECK(IsAligned(offset, kPointerSize));
+ // of the object, so offset must be a multiple of kTaggedSize.
+ DCHECK(IsAligned(offset, kTaggedSize));
- Add64(dst, object, Operand(offset - kHeapObjectTag));
if (FLAG_debug_code) {
+ Label ok;
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- BlockTrampolinePoolScope block_trampoline_pool(this);
- Label ok;
- DCHECK(!AreAliased(value, dst, scratch, object));
- And(scratch, dst, Operand(kPointerSize - 1));
- Branch(&ok, eq, scratch, Operand(zero_reg));
- ebreak();
+ DCHECK(!AreAliased(object, value, scratch));
+ Add64(scratch, object, offset - kHeapObjectTag);
+ And(scratch, scratch, Operand(kTaggedSize - 1));
+ BranchShort(&ok, eq, scratch, Operand(zero_reg));
+ Abort(AbortReason::kUnalignedCellInWriteBarrier);
bind(&ok);
}
- RecordWrite(object, dst, value, ra_status, save_fp, remembered_set_action,
- SmiCheck::kOmit);
+ RecordWrite(object, Operand(offset - kHeapObjectTag), value, ra_status,
+ save_fp, remembered_set_action, SmiCheck::kOmit);
bind(&done);
-
- // Clobber clobbered input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (FLAG_debug_code) {
- li(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
- li(dst, Operand(bit_cast<int64_t>(kZapValue + 8)));
- }
}
-void TurboAssembler::SaveRegisters(RegList registers) {
- DCHECK_GT(NumRegs(registers), 0);
+void TurboAssembler::MaybeSaveRegisters(RegList registers) {
+ if (registers == 0) return;
RegList regs = 0;
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
@@ -221,8 +214,8 @@ void TurboAssembler::SaveRegisters(RegList registers) {
MultiPush(regs);
}
-void TurboAssembler::RestoreRegisters(RegList registers) {
- DCHECK_GT(NumRegs(registers), 0);
+void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
+ if (registers == 0) return;
RegList regs = 0;
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
@@ -232,116 +225,101 @@ void TurboAssembler::RestoreRegisters(RegList registers) {
MultiPop(regs);
}
-void TurboAssembler::CallEphemeronKeyBarrier(Register object, Register address,
+void TurboAssembler::CallEphemeronKeyBarrier(Register object,
+ Register slot_address,
SaveFPRegsMode fp_mode) {
- EphemeronKeyBarrierDescriptor descriptor;
- RegList registers = descriptor.allocatable_registers();
+ DCHECK(!AreAliased(object, slot_address));
+ RegList registers =
+ WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
+ MaybeSaveRegisters(registers);
- SaveRegisters(registers);
-
- Register object_parameter(
- descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kObject));
- Register slot_parameter(descriptor.GetRegisterParameter(
- EphemeronKeyBarrierDescriptor::kSlotAddress));
- Register fp_mode_parameter(
- descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kFPMode));
+ Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
+ Register slot_address_parameter =
+ WriteBarrierDescriptor::SlotAddressRegister();
Push(object);
- Push(address);
-
- Pop(slot_parameter);
+ Push(slot_address);
+ Pop(slot_address_parameter);
Pop(object_parameter);
- Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
- Call(isolate()->builtins()->builtin_handle(Builtins::kEphemeronKeyBarrier),
+ Call(isolate()->builtins()->code_handle(
+ Builtins::GetEphemeronKeyBarrierStub(fp_mode)),
RelocInfo::CODE_TARGET);
- RestoreRegisters(registers);
-}
-
-void TurboAssembler::CallRecordWriteStub(
- Register object, Register address,
- RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
- CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
- Builtins::kRecordWrite, kNullAddress);
+ MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStub(
- Register object, Register address,
+void TurboAssembler::CallRecordWriteStubSaveRegisters(
+ Register object, Register slot_address,
RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
- Address wasm_target) {
- CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
- Builtins::kNoBuiltinId, wasm_target);
-}
+ StubCallMode mode) {
+ DCHECK(!AreAliased(object, slot_address));
+ RegList registers =
+ WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
+ MaybeSaveRegisters(registers);
-void TurboAssembler::CallRecordWriteStub(
- Register object, Register address,
- RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
- int builtin_index, Address wasm_target) {
- DCHECK_NE(builtin_index == Builtins::kNoBuiltinId,
- wasm_target == kNullAddress);
- // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
- // i.e. always emit remember set and save FP registers in RecordWriteStub. If
- // large performance regression is observed, we should use these values to
- // avoid unnecessary work.
-
- RecordWriteDescriptor descriptor;
- RegList registers = descriptor.allocatable_registers();
-
- SaveRegisters(registers);
- Register object_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
- Register slot_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
- Register remembered_set_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
- Register fp_mode_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
+ Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
+ Register slot_address_parameter =
+ WriteBarrierDescriptor::SlotAddressRegister();
Push(object);
- Push(address);
-
- Pop(slot_parameter);
+ Push(slot_address);
+ Pop(slot_address_parameter);
Pop(object_parameter);
- Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
- Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
- if (builtin_index == Builtins::kNoBuiltinId) {
- Call(wasm_target, RelocInfo::WASM_STUB_CALL);
- } else if (options().inline_offheap_trampolines) {
- // Inline the trampoline. //qj
- DCHECK(Builtins::IsBuiltinId(builtin_index));
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ CallRecordWriteStub(object_parameter, slot_address_parameter,
+ remembered_set_action, fp_mode, mode);
- UseScratchRegisterScope temps(this);
- BlockTrampolinePoolScope block_trampoline_pool(this);
- Register scratch = temps.Acquire();
- li(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- Call(scratch);
+ MaybeRestoreRegisters(registers);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode) {
+ // Use CallRecordWriteStubSaveRegisters if the object and slot registers
+ // need to be caller saved.
+ DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object);
+ DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address);
+ if (mode == StubCallMode::kCallWasmRuntimeStub) {
+ auto wasm_target =
+ wasm::WasmCode::GetRecordWriteStub(remembered_set_action, fp_mode);
+ Call(wasm_target, RelocInfo::WASM_STUB_CALL);
} else {
- Handle<Code> code_target =
- isolate()->builtins()->builtin_handle(Builtins::kRecordWrite);
- Call(code_target, RelocInfo::CODE_TARGET);
- }
+ auto builtin = Builtins::GetRecordWriteStub(remembered_set_action, fp_mode);
+ if (options().inline_offheap_trampolines) {
+ // Inline the trampoline. //qj
+ RecordCommentForOffHeapTrampoline(builtin);
- RestoreRegisters(registers);
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
+ Call(scratch);
+ } else {
+ Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
+ Call(code_target, RelocInfo::CODE_TARGET);
+ }
+ }
}
// Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
// The register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object, Register address,
+void MacroAssembler::RecordWrite(Register object, Operand offset,
Register value, RAStatus ra_status,
SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
+ DCHECK(!AreAliased(object, value));
+
if (FLAG_debug_code) {
- DCHECK(!AreAliased(object, address, value, kScratchReg));
- Ld(kScratchReg, MemOperand(address));
- Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite,
- kScratchReg, Operand(value));
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.Acquire();
+ DCHECK(!AreAliased(object, value, temp));
+ Add64(temp, object, offset);
+ LoadTaggedPointerField(temp, MemOperand(temp));
+ Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, temp,
+ Operand(value));
}
if ((remembered_set_action == RememberedSetAction::kOmit &&
@@ -359,30 +337,39 @@ void MacroAssembler::RecordWrite(Register object, Register address,
JumpIfSmi(value, &done);
}
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
- CheckPageFlag(object,
- value, // Used as scratch.
- MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
-
+ {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.Acquire();
+ CheckPageFlag(value,
+ temp, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ eq, // In RISC-V, it uses cc for a comparison with 0, so if
+ // no bits are set, and cc is eq, it will branch to done
+ &done);
+
+ CheckPageFlag(object,
+ temp, // Used as scratch.
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ eq, // In RISC-V, it uses cc for a comparison with 0, so if
+ // no bits are set, and cc is eq, it will branch to done
+ &done);
+ }
// Record the actual write.
if (ra_status == kRAHasNotBeenSaved) {
push(ra);
}
- CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
+ Register slot_address = WriteBarrierDescriptor::SlotAddressRegister();
+ DCHECK(!AreAliased(object, slot_address, value));
+ // TODO(cbruni): Turn offset into int.
+ DCHECK(offset.IsImmediate());
+ Add64(slot_address, object, offset);
+ CallRecordWriteStub(object, slot_address, remembered_set_action, fp_mode);
if (ra_status == kRAHasNotBeenSaved) {
pop(ra);
}
+ if (FLAG_debug_code) li(slot_address, Operand(kZapValue));
bind(&done);
-
- // Clobber clobbered registers when running with the debug-code flag
- // turned on to provoke errors.
- if (FLAG_debug_code) {
- li(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
- li(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
- }
}
// ---------------------------------------------------------------------------
@@ -969,7 +956,7 @@ void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size) {
DCHECK((rd != t6) && (rs != t6));
Register x0 = temps.Acquire();
Register x1 = temps.Acquire();
- Register x2 = t6;
+ Register x2 = temps.Acquire();
li(x1, 0x00FF00FF);
slliw(x0, rs, 16);
srliw(rd, rs, 16);
@@ -991,7 +978,7 @@ void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size) {
DCHECK((rd != t6) && (rs != t6));
Register x0 = temps.Acquire();
Register x1 = temps.Acquire();
- Register x2 = t6;
+ Register x2 = temps.Acquire();
li(x1, 0x0000FFFF0000FFFFl);
slli(x0, rs, 32);
srli(rd, rs, 32);
@@ -1109,8 +1096,7 @@ void TurboAssembler::UnalignedFLoadHelper(FPURegister frd,
NBYTES - 1);
}
Register scratch_other = temps.Acquire();
- Register scratch = t2;
- push(t2);
+ Register scratch = temps.Acquire();
DCHECK(scratch != rs.rm() && scratch_other != scratch &&
scratch_other != rs.rm());
LoadNBytes<NBYTES, true>(scratch, source, scratch_other);
@@ -1118,7 +1104,6 @@ void TurboAssembler::UnalignedFLoadHelper(FPURegister frd,
fmv_w_x(frd, scratch);
else
fmv_d_x(frd, scratch);
- pop(t2);
}
template <int NBYTES>
@@ -1239,7 +1224,7 @@ void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Lwu(rd, rs);
- Lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
+ Lw(scratch, MemOperand(rs.rm(), rs.offset() + kSystemPointerSize / 2));
slli(scratch, scratch, 32);
Add64(rd, rd, scratch);
}
@@ -1254,7 +1239,7 @@ void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs) {
Register scratch = temps.Acquire();
Sw(rd, rs);
srai(scratch, rd, 32);
- Sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
+ Sw(scratch, MemOperand(rs.rm(), rs.offset() + kSystemPointerSize / 2));
}
void TurboAssembler::ULoadFloat(FPURegister fd, const MemOperand& rs) {
@@ -1426,15 +1411,22 @@ void TurboAssembler::Scd(Register rd, const MemOperand& rs) {
}
}
-void TurboAssembler::li(Register dst, Handle<HeapObject> value, LiFlags mode) {
+void TurboAssembler::li(Register dst, Handle<HeapObject> value,
+ RelocInfo::Mode rmode) {
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
// non-isolate-independent code. In many cases it might be cheaper than
// embedding the relocatable value.
if (root_array_available_ && options().isolate_independent_code) {
IndirectLoadConstant(dst, value);
return;
+ } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
+ EmbeddedObjectIndex index = AddEmbeddedObject(value);
+ DCHECK(is_uint32(index));
+ li(dst, Operand(static_cast<int>(index), rmode));
+ } else {
+ DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
+ li(dst, Operand(value.address(), rmode));
}
- li(dst, Operand(value), mode);
}
void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) {
@@ -1529,11 +1521,11 @@ static RegList s_regs =
void TurboAssembler::MultiPush(RegList regs) {
int16_t num_to_push = base::bits::CountPopulation(regs);
- int16_t stack_offset = num_to_push * kPointerSize;
+ int16_t stack_offset = num_to_push * kSystemPointerSize;
#define TEST_AND_PUSH_REG(reg) \
if ((regs & reg.bit()) != 0) { \
- stack_offset -= kPointerSize; \
+ stack_offset -= kSystemPointerSize; \
Sd(reg, MemOperand(sp, stack_offset)); \
regs &= ~reg.bit(); \
}
@@ -1577,7 +1569,7 @@ void TurboAssembler::MultiPop(RegList regs) {
#define TEST_AND_POP_REG(reg) \
if ((regs & reg.bit()) != 0) { \
Ld(reg, MemOperand(sp, stack_offset)); \
- stack_offset += kPointerSize; \
+ stack_offset += kSystemPointerSize; \
regs &= ~reg.bit(); \
}
@@ -1735,6 +1727,22 @@ void TurboAssembler::RoundFloatingPointToInteger(Register rd, FPURegister fs,
}
}
+void TurboAssembler::Clear_if_nan_d(Register rd, FPURegister fs) {
+ Label no_nan;
+ feq_d(kScratchReg, fs, fs);
+ bnez(kScratchReg, &no_nan);
+ Move(rd, zero_reg);
+ bind(&no_nan);
+}
+
+void TurboAssembler::Clear_if_nan_s(Register rd, FPURegister fs) {
+ Label no_nan;
+ feq_s(kScratchReg, fs, fs);
+ bnez(kScratchReg, &no_nan);
+ Move(rd, zero_reg);
+ bind(&no_nan);
+}
+
void TurboAssembler::Trunc_uw_d(Register rd, FPURegister fs, Register result) {
RoundFloatingPointToInteger(
rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) {
@@ -1909,12 +1917,12 @@ void TurboAssembler::RoundHelper(FPURegister dst, FPURegister src,
bind(&not_NaN);
}
- // If real exponent (i.e., t6 - kFloatExponentBias) is greater than
+ // If real exponent (i.e., scratch2 - kFloatExponentBias) is greater than
// kFloat32MantissaBits, it means the floating-point value has no fractional
// part, thus the input is already rounded, jump to done. Note that, NaN and
// Infinity in floating-point representation sets maximal exponent value, so
- // they also satisfy (t6-kFloatExponentBias >= kFloatMantissaBits), and JS
- // round semantics specify that rounding of NaN (Infinity) returns NaN
+ // they also satisfy (scratch2 - kFloatExponentBias >= kFloatMantissaBits),
+ // and JS round semantics specify that rounding of NaN (Infinity) returns NaN
// (Infinity), so NaN and Infinity are considered rounded value too.
Branch(&done, greater_equal, scratch2,
Operand(kFloatExponentBias + kFloatMantissaBits));
@@ -2305,28 +2313,28 @@ void TurboAssembler::Clz32(Register rd, Register xx) {
Move(x, xx);
li(n, Operand(32));
srliw(y, x, 16);
- Branch(&L0, eq, y, Operand(zero_reg));
+ BranchShort(&L0, eq, y, Operand(zero_reg));
Move(x, y);
addiw(n, n, -16);
bind(&L0);
srliw(y, x, 8);
- Branch(&L1, eq, y, Operand(zero_reg));
+ BranchShort(&L1, eq, y, Operand(zero_reg));
addiw(n, n, -8);
Move(x, y);
bind(&L1);
srliw(y, x, 4);
- Branch(&L2, eq, y, Operand(zero_reg));
+ BranchShort(&L2, eq, y, Operand(zero_reg));
addiw(n, n, -4);
Move(x, y);
bind(&L2);
srliw(y, x, 2);
- Branch(&L3, eq, y, Operand(zero_reg));
+ BranchShort(&L3, eq, y, Operand(zero_reg));
addiw(n, n, -2);
Move(x, y);
bind(&L3);
srliw(y, x, 1);
subw(rd, n, x);
- Branch(&L4, eq, y, Operand(zero_reg));
+ BranchShort(&L4, eq, y, Operand(zero_reg));
addiw(rd, n, -2);
bind(&L4);
}
@@ -2354,33 +2362,33 @@ void TurboAssembler::Clz64(Register rd, Register xx) {
Move(x, xx);
li(n, Operand(64));
srli(y, x, 32);
- Branch(&L0, eq, y, Operand(zero_reg));
+ BranchShort(&L0, eq, y, Operand(zero_reg));
addiw(n, n, -32);
Move(x, y);
bind(&L0);
srli(y, x, 16);
- Branch(&L1, eq, y, Operand(zero_reg));
+ BranchShort(&L1, eq, y, Operand(zero_reg));
addiw(n, n, -16);
Move(x, y);
bind(&L1);
srli(y, x, 8);
- Branch(&L2, eq, y, Operand(zero_reg));
+ BranchShort(&L2, eq, y, Operand(zero_reg));
addiw(n, n, -8);
Move(x, y);
bind(&L2);
srli(y, x, 4);
- Branch(&L3, eq, y, Operand(zero_reg));
+ BranchShort(&L3, eq, y, Operand(zero_reg));
addiw(n, n, -4);
Move(x, y);
bind(&L3);
srli(y, x, 2);
- Branch(&L4, eq, y, Operand(zero_reg));
+ BranchShort(&L4, eq, y, Operand(zero_reg));
addiw(n, n, -2);
Move(x, y);
bind(&L4);
srli(y, x, 1);
subw(rd, n, x);
- Branch(&L5, eq, y, Operand(zero_reg));
+ BranchShort(&L5, eq, y, Operand(zero_reg));
addiw(rd, n, -2);
bind(&L5);
}
@@ -2459,8 +2467,8 @@ void TurboAssembler::Popcnt32(Register rd, Register rs) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = temps.Acquire();
Register scratch2 = temps.Acquire();
- Register value = t6;
- DCHECK((rd != t6) && (rs != t6));
+ Register value = temps.Acquire();
+ DCHECK((rd != value) && (rs != value));
li(value, 0x01010101); // value = 0x01010101;
li(scratch2, 0x55555555); // B0 = 0x55555555;
Srl32(scratch, rs, 1);
@@ -2494,8 +2502,8 @@ void TurboAssembler::Popcnt64(Register rd, Register rs) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = temps.Acquire();
Register scratch2 = temps.Acquire();
- Register value = t6;
- DCHECK((rd != t6) && (rs != t6));
+ Register value = temps.Acquire();
+ DCHECK((rd != value) && (rs != value));
li(value, 0x1111111111111111l); // value = 0x1111111111111111l;
li(scratch2, 5);
Mul64(scratch2, value, scratch2); // B0 = 0x5555555555555555l;
@@ -2567,7 +2575,7 @@ void TurboAssembler::Branch(int32_t offset) {
}
void TurboAssembler::Branch(int32_t offset, Condition cond, Register rs,
- const Operand& rt) {
+ const Operand& rt, Label::Distance near_jump) {
bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt);
DCHECK(is_near);
USE(is_near);
@@ -2590,7 +2598,7 @@ void TurboAssembler::Branch(Label* L) {
}
void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
- const Operand& rt) {
+ const Operand& rt, Label::Distance near_jump) {
if (L->is_bound()) {
if (!BranchShortCheck(0, L, cond, rs, rt)) {
if (cond != cc_always) {
@@ -2942,9 +2950,9 @@ void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
- Ld(destination,
- FieldMemOperand(destination,
- FixedArray::kHeaderSize + constant_index * kPointerSize));
+ LoadTaggedPointerField(
+ destination, FieldMemOperand(destination, FixedArray::OffsetOfElementAt(
+ constant_index)));
}
void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
@@ -2999,10 +3007,10 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
DCHECK(RelocInfo::IsCodeTarget(rmode));
BlockTrampolinePoolScope block_trampoline_pool(this);
- int builtin_index = Builtins::kNoBuiltinId;
+ Builtin builtin = Builtin::kNoBuiltinId;
bool target_is_isolate_independent_builtin =
- isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index);
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin) &&
+ Builtins::IsIsolateIndependent(builtin);
if (target_is_isolate_independent_builtin &&
options().use_pc_relative_calls_and_jumps) {
int32_t code_target_index = AddCodeTarget(code);
@@ -3017,7 +3025,7 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
return;
} else if (root_array_available_ && options().isolate_independent_code &&
target_is_isolate_independent_builtin) {
- int offset = code->builtin_index() * kSystemPointerSize +
+ int offset = static_cast<int>(code->builtin_id()) * kSystemPointerSize +
IsolateData::builtin_entry_table_offset();
Ld(t6, MemOperand(kRootRegister, offset));
Jump(t6, cond, rs, rt);
@@ -3025,16 +3033,14 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
} else if (options().inline_offheap_trampolines &&
target_is_isolate_independent_builtin) {
// Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- li(t6, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ RecordCommentForOffHeapTrampoline(builtin);
+ li(t6, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
Jump(t6, cond, rs, rt);
return;
}
- Jump(static_cast<intptr_t>(code.address()), rmode, cond, rs, rt);
+ int32_t target_index = AddCodeTarget(code);
+ Jump(static_cast<intptr_t>(target_index), rmode, cond, rs, rt);
}
void TurboAssembler::Jump(const ExternalReference& reference) {
@@ -3078,16 +3084,16 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt) {
- int builtin_index = Builtins::kNoBuiltinId;
+ Builtin builtin = Builtin::kNoBuiltinId;
bool target_is_isolate_independent_builtin =
- isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index);
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin) &&
+ Builtins::IsIsolateIndependent(builtin);
if (target_is_isolate_independent_builtin &&
options().use_pc_relative_calls_and_jumps) {
int32_t code_target_index = AddCodeTarget(code);
Label skip;
BlockTrampolinePoolScope block_trampoline_pool(this);
- RecordCommentForOffHeapTrampoline(builtin_index);
+ RecordCommentForOffHeapTrampoline(builtin);
if (cond != al) {
Branch(&skip, NegateCondition(cond), rs, rt);
}
@@ -3098,7 +3104,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
return;
} else if (root_array_available_ && options().isolate_independent_code &&
target_is_isolate_independent_builtin) {
- int offset = code->builtin_index() * kSystemPointerSize +
+ int offset = static_cast<int>(code->builtin_id()) * kSystemPointerSize +
IsolateData::builtin_entry_table_offset();
LoadRootRelative(t6, offset);
Call(t6, cond, rs, rt);
@@ -3106,76 +3112,63 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
} else if (options().inline_offheap_trampolines &&
target_is_isolate_independent_builtin) {
// Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- li(t6, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ RecordCommentForOffHeapTrampoline(builtin);
+ li(t6, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
Call(t6, cond, rs, rt);
return;
}
DCHECK(RelocInfo::IsCodeTarget(rmode));
DCHECK(code->IsExecutable());
- Call(code.address(), rmode, cond, rs, rt);
+ int32_t target_index = AddCodeTarget(code);
+ Call(static_cast<Address>(target_index), rmode, cond, rs, rt);
}
-void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
+void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin) {
STATIC_ASSERT(kSystemPointerSize == 8);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
- // The builtin_index register contains the builtin index as a Smi.
- SmiUntag(builtin_index, builtin_index);
- CalcScaledAddress(builtin_index, kRootRegister, builtin_index,
- kSystemPointerSizeLog2);
- Ld(builtin_index,
- MemOperand(builtin_index, IsolateData::builtin_entry_table_offset()));
+ // The builtin register contains the builtin index as a Smi.
+ SmiUntag(builtin, builtin);
+ CalcScaledAddress(builtin, kRootRegister, builtin, kSystemPointerSizeLog2);
+ Ld(builtin, MemOperand(builtin, IsolateData::builtin_entry_table_offset()));
}
-void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
- LoadEntryFromBuiltinIndex(builtin_index);
- Call(builtin_index);
+void TurboAssembler::CallBuiltinByIndex(Register builtin) {
+ LoadEntryFromBuiltinIndex(builtin);
+ Call(builtin);
}
-void TurboAssembler::CallBuiltin(int builtin_index) {
- DCHECK(Builtins::IsBuiltinId(builtin_index));
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- EmbeddedData d = EmbeddedData::FromBlob(isolate());
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
+void TurboAssembler::CallBuiltin(Builtin builtin) {
+ RecordCommentForOffHeapTrampoline(builtin);
if (options().short_builtin_calls) {
- Call(entry, RelocInfo::RUNTIME_ENTRY);
+ Call(BuiltinEntry(builtin), RelocInfo::RUNTIME_ENTRY);
} else {
- Call(entry, RelocInfo::OFF_HEAP_TARGET);
+ Call(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET);
}
RecordComment("]");
}
-void TurboAssembler::TailCallBuiltin(int builtin_index) {
- DCHECK(Builtins::IsBuiltinId(builtin_index));
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- EmbeddedData d = EmbeddedData::FromBlob(isolate());
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
+void TurboAssembler::TailCallBuiltin(Builtin builtin) {
+ RecordCommentForOffHeapTrampoline(builtin);
if (options().short_builtin_calls) {
- Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ Jump(BuiltinEntry(builtin), RelocInfo::RUNTIME_ENTRY);
} else {
- Jump(entry, RelocInfo::OFF_HEAP_TARGET);
+ Jump(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET);
}
RecordComment("]");
}
-void TurboAssembler::LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
- Register destination) {
- Ld(destination, EntryFromBuiltinIndexAsOperand(builtin_index));
+void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin,
+ Register destination) {
+ Ld(destination, EntryFromBuiltinAsOperand(builtin));
}
-MemOperand TurboAssembler::EntryFromBuiltinIndexAsOperand(
- Builtins::Name builtin_index) {
+MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
DCHECK(root_array_available());
return MemOperand(kRootRegister,
- IsolateData::builtin_entry_slot_offset(builtin_index));
+ IsolateData::builtin_entry_slot_offset(builtin));
}
void TurboAssembler::PatchAndJump(Address target) {
@@ -3268,7 +3261,7 @@ void TurboAssembler::BranchAndLinkLong(Label* L) {
}
void TurboAssembler::DropAndRet(int drop) {
- Add64(sp, sp, drop * kPointerSize);
+ Add64(sp, sp, drop * kSystemPointerSize);
Ret();
}
@@ -3300,7 +3293,7 @@ void TurboAssembler::Drop(int count, Condition cond, Register reg,
Branch(&skip, NegateCondition(cond), reg, op);
}
- Add64(sp, sp, Operand(count * kPointerSize));
+ Add64(sp, sp, Operand(count * kSystemPointerSize));
if (cond != al) {
bind(&skip);
@@ -3352,7 +3345,7 @@ void TurboAssembler::PushArray(Register array, Register size,
mv(scratch, zero_reg);
jmp(&entry);
bind(&loop);
- CalcScaledAddress(scratch2, array, scratch, kPointerSizeLog2);
+ CalcScaledAddress(scratch2, array, scratch, kSystemPointerSizeLog2);
Ld(scratch2, MemOperand(scratch2));
push(scratch2);
Add64(scratch, scratch, Operand(1));
@@ -3362,7 +3355,7 @@ void TurboAssembler::PushArray(Register array, Register size,
mv(scratch, size);
jmp(&entry);
bind(&loop);
- CalcScaledAddress(scratch2, array, scratch, kPointerSizeLog2);
+ CalcScaledAddress(scratch2, array, scratch, kSystemPointerSizeLog2);
Ld(scratch2, MemOperand(scratch2));
push(scratch2);
bind(&entry);
@@ -3383,27 +3376,30 @@ void TurboAssembler::Push(Handle<HeapObject> handle) {
void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kSystemPointerSize);
Push(Smi::zero()); // Padding.
// Link the current handler as the next handler.
- li(t2,
+ UseScratchRegisterScope temps(this);
+ Register handler_address = temps.Acquire();
+ Register handler = temps.Acquire();
+ li(handler_address,
ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
- Ld(t1, MemOperand(t2));
- push(t1);
+ Ld(handler, MemOperand(handler_address));
+ push(handler);
// Set this new handler as the current one.
- Sd(sp, MemOperand(t2));
+ Sd(sp, MemOperand(handler_address));
}
void MacroAssembler::PopStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(a1);
Add64(sp, sp,
- Operand(
- static_cast<int64_t>(StackHandlerConstants::kSize - kPointerSize)));
+ Operand(static_cast<int64_t>(StackHandlerConstants::kSize -
+ kSystemPointerSize)));
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch,
@@ -3451,17 +3447,17 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
Register caller_args_count,
Register scratch0, Register scratch1) {
// Calculate the end of destination area where we will put the arguments
- // after we drop current frame. We add kPointerSize to count the receiver
- // argument which is not included into formal parameters count.
+ // after we drop current frame. We add kSystemPointerSize to count the
+ // receiver argument which is not included into formal parameters count.
Register dst_reg = scratch0;
- CalcScaledAddress(dst_reg, fp, caller_args_count, kPointerSizeLog2);
+ CalcScaledAddress(dst_reg, fp, caller_args_count, kSystemPointerSizeLog2);
Add64(dst_reg, dst_reg,
- Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+ Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
Register src_reg = caller_args_count;
- // Calculate the end of source area. +kPointerSize is for the receiver.
- CalcScaledAddress(src_reg, sp, callee_args_count, kPointerSizeLog2);
- Add64(src_reg, src_reg, Operand(kPointerSize));
+ // Calculate the end of source area. +kSystemPointerSize is for the receiver.
+ CalcScaledAddress(src_reg, sp, callee_args_count, kSystemPointerSizeLog2);
+ Add64(src_reg, src_reg, Operand(kSystemPointerSize));
if (FLAG_debug_code) {
Check(Uless, AbortReason::kStackAccessBelowStackPointer, src_reg,
@@ -3482,8 +3478,8 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
Label loop, entry;
Branch(&entry);
bind(&loop);
- Sub64(src_reg, src_reg, Operand(kPointerSize));
- Sub64(dst_reg, dst_reg, Operand(kPointerSize));
+ Sub64(src_reg, src_reg, Operand(kSystemPointerSize));
+ Sub64(dst_reg, dst_reg, Operand(kSystemPointerSize));
Ld(tmp_reg, MemOperand(src_reg));
Sd(tmp_reg, MemOperand(dst_reg));
bind(&entry);
@@ -3510,19 +3506,25 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
Register scratch2,
- Label* stack_overflow) {
+ Label* stack_overflow, Label* done) {
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
-
+ DCHECK(stack_overflow != nullptr || done != nullptr);
LoadStackLimit(scratch1, StackLimitKind::kRealStackLimit);
// Make scratch1 the space we have left. The stack might already be overflowed
// here which will cause scratch1 to become negative.
Sub64(scratch1, sp, scratch1);
// Check if the arguments will overflow the stack.
- Sll64(scratch2, num_args, kPointerSizeLog2);
+ Sll64(scratch2, num_args, kSystemPointerSizeLog2);
// Signed comparison.
- Branch(stack_overflow, le, scratch1, Operand(scratch2));
+ if (stack_overflow != nullptr) {
+ Branch(stack_overflow, le, scratch1, Operand(scratch2));
+ } else if (done != nullptr) {
+ Branch(done, gt, scratch1, Operand(scratch2));
+ } else {
+ UNREACHABLE();
+ }
}
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
@@ -3549,7 +3551,11 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Branch(&regular_invoke, le, expected_parameter_count, Operand(zero_reg));
Label stack_overflow;
- StackOverflowCheck(expected_parameter_count, t0, t1, &stack_overflow);
+ {
+ UseScratchRegisterScope temps(this);
+ StackOverflowCheck(expected_parameter_count, temps.Acquire(),
+ temps.Acquire(), &stack_overflow);
+ }
// Underapplication. Move the arguments already in the stack, including the
// receiver and the return address.
{
@@ -3596,14 +3602,19 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count) {
Label skip_hook;
-
- li(t0, ExternalReference::debug_hook_on_function_call_address(isolate()));
- Lb(t0, MemOperand(t0));
- Branch(&skip_hook, eq, t0, Operand(zero_reg));
-
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch,
+ ExternalReference::debug_hook_on_function_call_address(isolate()));
+ Lb(scratch, MemOperand(scratch));
+ Branch(&skip_hook, eq, scratch, Operand(zero_reg));
+ }
{
// Load receiver to pass it later to DebugOnFunctionCall hook.
- LoadReceiver(t0, actual_parameter_count);
+ UseScratchRegisterScope temps(this);
+ Register receiver = temps.Acquire();
+ LoadReceiver(receiver, actual_parameter_count);
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -3618,7 +3629,7 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
}
Push(fun);
Push(fun);
- Push(t0);
+ Push(receiver);
CallRuntime(Runtime::kDebugOnFunctionCall);
Pop(fun);
if (new_target.is_valid()) {
@@ -3658,7 +3669,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
- Ld(code, FieldMemOperand(function, JSFunction::kCodeOffset));
+ LoadTaggedPointerField(code,
+ FieldMemOperand(function, JSFunction::kCodeOffset));
switch (type) {
case InvokeType::kCall:
CallCodeObject(code);
@@ -3682,15 +3694,19 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
// Contract with called JS functions requires that function is passed in a1.
DCHECK_EQ(function, a1);
Register expected_parameter_count = a2;
- Register temp_reg = t0;
- Ld(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ UseScratchRegisterScope temps(this);
+ Register temp_reg = temps.Acquire();
+ LoadTaggedPointerField(
+ temp_reg,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ LoadTaggedPointerField(cp,
+ FieldMemOperand(function, JSFunction::kContextOffset));
// The argument count is stored as uint16_t
Lhu(expected_parameter_count,
FieldMemOperand(temp_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
- InvokeFunctionCode(a1, new_target, expected_parameter_count,
+ InvokeFunctionCode(function, new_target, expected_parameter_count,
actual_parameter_count, type);
}
@@ -3705,7 +3721,7 @@ void MacroAssembler::InvokeFunction(Register function,
DCHECK_EQ(function, a1);
// Get the function and setup the context.
- Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ LoadTaggedPointerField(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
InvokeFunctionCode(a1, no_reg, expected_parameter_count,
actual_parameter_count, type);
@@ -3874,8 +3890,9 @@ void MacroAssembler::LoadWeakValue(Register out, Register in,
And(out, in, Operand(~kWeakHeapObjectMask));
}
-void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
+void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value,
+ Register scratch1,
+ Register scratch2) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
// This operation has to be exactly 32-bit wide in case the external
@@ -3888,8 +3905,9 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
}
}
-void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
+void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
+ Register scratch1,
+ Register scratch2) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
// This operation has to be exactly 32-bit wide in case the external
@@ -3974,14 +3992,16 @@ void TurboAssembler::Abort(AbortReason reason) {
}
void TurboAssembler::LoadMap(Register destination, Register object) {
- Ld(destination, FieldMemOperand(object, HeapObject::kMapOffset));
+ LoadTaggedPointerField(destination,
+ FieldMemOperand(object, HeapObject::kMapOffset));
}
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
LoadMap(dst, cp);
- Ld(dst,
- FieldMemOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
- Ld(dst, MemOperand(dst, Context::SlotOffset(index)));
+ LoadTaggedPointerField(
+ dst, FieldMemOperand(
+ dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
+ LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)));
}
void TurboAssembler::StubPrologue(StackFrame::Type type) {
@@ -4006,9 +4026,9 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
- addi(sp, fp, 2 * kPointerSize);
- Ld(ra, MemOperand(fp, 1 * kPointerSize));
- Ld(fp, MemOperand(fp, 0 * kPointerSize));
+ addi(sp, fp, 2 * kSystemPointerSize);
+ Ld(ra, MemOperand(fp, 1 * kSystemPointerSize));
+ Ld(fp, MemOperand(fp, 0 * kSystemPointerSize));
}
void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
@@ -4017,9 +4037,10 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
frame_type == StackFrame::BUILTIN_EXIT);
// Set up the frame structure on the stack.
- STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
- STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
- STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
+ STATIC_ASSERT(2 * kSystemPointerSize ==
+ ExitFrameConstants::kCallerSPDisplacement);
+ STATIC_ASSERT(1 * kSystemPointerSize == ExitFrameConstants::kCallerPCOffset);
+ STATIC_ASSERT(0 * kSystemPointerSize == ExitFrameConstants::kCallerFPOffset);
// This is how the stack will look:
// fp + 2 (==kCallerSPDisplacement) - old stack's end
@@ -4031,14 +4052,15 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// new stack (will contain saved ra)
// Save registers and reserve room for saved entry sp.
- addi(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
- Sd(ra, MemOperand(sp, 3 * kPointerSize));
- Sd(fp, MemOperand(sp, 2 * kPointerSize));
+ addi(sp, sp,
+ -2 * kSystemPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
+ Sd(ra, MemOperand(sp, 3 * kSystemPointerSize));
+ Sd(fp, MemOperand(sp, 2 * kSystemPointerSize));
{
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, Operand(StackFrame::TypeToMarker(frame_type)));
- Sd(scratch, MemOperand(sp, 1 * kPointerSize));
+ Sd(scratch, MemOperand(sp, 1 * kSystemPointerSize));
}
// Set up new frame pointer.
addi(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
@@ -4076,7 +4098,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// (used by DirectCEntry to hold the return value if a struct is
// returned) and align the frame preparing for calling the runtime function.
DCHECK_GE(stack_space, 0);
- Sub64(sp, sp, Operand((stack_space + 2) * kPointerSize));
+ Sub64(sp, sp, Operand((stack_space + 2) * kSystemPointerSize));
if (frame_alignment > 0) {
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
And(sp, sp, Operand(-frame_alignment)); // Align stack.
@@ -4086,7 +4108,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// location.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- addi(scratch, sp, kPointerSize);
+ addi(scratch, sp, kSystemPointerSize);
Sd(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@@ -4135,11 +4157,11 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
if (argument_count_is_length) {
add(sp, sp, argument_count);
} else {
- CalcScaledAddress(sp, sp, argument_count, kPointerSizeLog2);
+ CalcScaledAddress(sp, sp, argument_count, kSystemPointerSizeLog2);
}
}
- addi(sp, sp, 2 * kPointerSize);
+ addi(sp, sp, 2 * kSystemPointerSize);
if (do_return) {
Ret();
@@ -4167,7 +4189,7 @@ void MacroAssembler::AssertStackIsAligned() {
const int frame_alignment = ActivationFrameAlignment();
const int frame_alignment_mask = frame_alignment - 1;
- if (frame_alignment > kPointerSize) {
+ if (frame_alignment > kSystemPointerSize) {
Label alignment_as_expected;
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
{
@@ -4188,7 +4210,11 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
Lw(dst, MemOperand(src.rm(), SmiWordOffset(src.offset())));
} else {
DCHECK(SmiValuesAre31Bits());
- Lw(dst, src);
+ if (COMPRESS_POINTERS_BOOL) {
+ Lw(dst, src);
+ } else {
+ Ld(dst, src);
+ }
SmiUntag(dst);
}
}
@@ -4254,8 +4280,10 @@ void MacroAssembler::AssertFunction(Register object) {
Operand(zero_reg));
push(object);
LoadMap(object, object);
- GetInstanceTypeRange(object, object, FIRST_JS_FUNCTION_TYPE, t5);
- Check(Uless_equal, AbortReason::kOperandIsNotAFunction, t5,
+ UseScratchRegisterScope temps(this);
+ Register range = temps.Acquire();
+ GetInstanceTypeRange(object, object, FIRST_JS_FUNCTION_TYPE, range);
+ Check(Uless_equal, AbortReason::kOperandIsNotAFunction, range,
Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
pop(object);
}
@@ -4420,16 +4448,16 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
// by CalculateStackPassedDWords()).
int stack_passed_arguments =
CalculateStackPassedDWords(num_reg_arguments, num_double_arguments);
- if (frame_alignment > kPointerSize) {
+ if (frame_alignment > kSystemPointerSize) {
// Make stack end at alignment and make room for stack arguments and the
// original value of sp.
mv(scratch, sp);
- Sub64(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
+ Sub64(sp, sp, Operand((stack_passed_arguments + 1) * kSystemPointerSize));
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
And(sp, sp, Operand(-frame_alignment));
- Sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ Sd(scratch, MemOperand(sp, stack_passed_arguments * kSystemPointerSize));
} else {
- Sub64(sp, sp, Operand(stack_passed_arguments * kPointerSize));
+ Sub64(sp, sp, Operand(stack_passed_arguments * kSystemPointerSize));
}
}
@@ -4475,7 +4503,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
if (FLAG_debug_code) {
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
- if (frame_alignment > kPointerSize) {
+ if (frame_alignment > kSystemPointerSize) {
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
Label alignment_as_expected;
{
@@ -4531,7 +4559,8 @@ void TurboAssembler::CallCFunctionHelper(Register function,
if (isolate() != nullptr) {
// We don't unset the PC; the FP is the source of truth.
- Register scratch = t1;
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
Sd(zero_reg, MemOperand(scratch));
}
@@ -4540,10 +4569,10 @@ void TurboAssembler::CallCFunctionHelper(Register function,
int stack_passed_arguments =
CalculateStackPassedDWords(num_reg_arguments, num_double_arguments);
- if (base::OS::ActivationFrameAlignment() > kPointerSize) {
- Ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ if (base::OS::ActivationFrameAlignment() > kSystemPointerSize) {
+ Ld(sp, MemOperand(sp, stack_passed_arguments * kSystemPointerSize));
} else {
- Add64(sp, sp, Operand(stack_passed_arguments * kPointerSize));
+ Add64(sp, sp, Operand(stack_passed_arguments * kSystemPointerSize));
}
}
@@ -4596,9 +4625,9 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
li(kSpeculationPoisonRegister, -1);
}
-void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
- Label* exit, DeoptimizeKind kind,
- Label* ret, Label*) {
+void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
+ DeoptimizeKind kind, Label* ret,
+ Label*) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Ld(t6,
MemOperand(kRootRegister, IsolateData::builtin_entry_slot_offset(target)));
@@ -4624,9 +4653,11 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
// * Codegen at runtime does not have this restriction and we can use the
// shorter, branchless instruction sequence. The assumption here is that
// targets are usually generated code and not builtin Code objects.
+
if (options().isolate_independent_code) {
DCHECK(root_array_available());
- Label if_code_is_off_heap, no_builtin_index, out;
+ Label if_code_is_off_heap, out;
+
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -4636,12 +4667,12 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
// Check whether the Code object is an off-heap trampoline. If so, call its
// (off-heap) entry point directly without going through the (on-heap)
// trampoline. Otherwise, just call the Code object as always.
+
Lw(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
- Branch(&if_code_is_off_heap, ne, scratch,
- Operand(Code::IsOffHeapTrampoline::kMask));
+ And(scratch, scratch, Operand(Code::IsOffHeapTrampoline::kMask));
+ Branch(&if_code_is_off_heap, ne, scratch, Operand(zero_reg));
// Not an off-heap trampoline object, the entry point is at
// Code::raw_instruction_start().
- bind(&no_builtin_index);
Add64(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
Branch(&out);
@@ -4649,8 +4680,6 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
// table.
bind(&if_code_is_off_heap);
Lw(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
- // TODO(RISCV): https://github.com/v8-riscv/v8/issues/373
- Branch(&no_builtin_index, eq, scratch, Operand(Builtins::kNoBuiltinId));
slli(destination, scratch, kSystemPointerSizeLog2);
Add64(destination, destination, kRootRegister);
Ld(destination,
@@ -4673,6 +4702,82 @@ void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
Jump(code_object);
}
+void TurboAssembler::LoadTaggedPointerField(const Register& destination,
+ const MemOperand& field_operand) {
+ if (COMPRESS_POINTERS_BOOL) {
+ DecompressTaggedPointer(destination, field_operand);
+ } else {
+ Ld(destination, field_operand);
+ }
+}
+
+void TurboAssembler::LoadAnyTaggedField(const Register& destination,
+ const MemOperand& field_operand) {
+ if (COMPRESS_POINTERS_BOOL) {
+ DecompressAnyTagged(destination, field_operand);
+ } else {
+ Ld(destination, field_operand);
+ }
+}
+
+void TurboAssembler::LoadTaggedSignedField(const Register& destination,
+ const MemOperand& field_operand) {
+ if (COMPRESS_POINTERS_BOOL) {
+ DecompressTaggedSigned(destination, field_operand);
+ } else {
+ Ld(destination, field_operand);
+ }
+}
+
+void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) {
+ SmiUntag(dst, src);
+}
+
+void TurboAssembler::StoreTaggedField(const Register& value,
+ const MemOperand& dst_field_operand) {
+ if (COMPRESS_POINTERS_BOOL) {
+ Sw(value, dst_field_operand);
+ } else {
+ Sd(value, dst_field_operand);
+ }
+}
+
+void TurboAssembler::DecompressTaggedSigned(const Register& destination,
+ const MemOperand& field_operand) {
+ RecordComment("[ DecompressTaggedSigned");
+ Lwu(destination, field_operand);
+ if (FLAG_debug_code) {
+ // Corrupt the top 32 bits. Made up of 16 fixed bits and 16 pc offset bits.
+ Add64(destination, destination,
+ Operand(((kDebugZapValue << 16) | (pc_offset() & 0xffff)) << 32));
+ }
+ RecordComment("]");
+}
+
+void TurboAssembler::DecompressTaggedPointer(const Register& destination,
+ const MemOperand& field_operand) {
+ RecordComment("[ DecompressTaggedPointer");
+ Lwu(destination, field_operand);
+ Add64(destination, kPtrComprCageBaseRegister, destination);
+ RecordComment("]");
+}
+
+void TurboAssembler::DecompressTaggedPointer(const Register& destination,
+ const Register& source) {
+ RecordComment("[ DecompressTaggedPointer");
+ And(destination, source, Operand(0xFFFFFFFF));
+ Add64(destination, kPtrComprCageBaseRegister, Operand(destination));
+ RecordComment("]");
+}
+
+void TurboAssembler::DecompressAnyTagged(const Register& destination,
+ const MemOperand& field_operand) {
+ RecordComment("[ DecompressAnyTagged");
+ Lwu(destination, field_operand);
+ Add64(destination, kPtrComprCageBaseRegister, destination);
+ RecordComment("]");
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
index 81e5565606..d06b4ce176 100644
--- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
@@ -12,6 +12,7 @@
#include "src/codegen/assembler.h"
#include "src/codegen/riscv64/assembler-riscv64.h"
#include "src/common/globals.h"
+#include "src/execution/isolate-data.h"
#include "src/objects/tagged-index.h"
namespace v8 {
@@ -66,7 +67,7 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
// Static helper functions.
#if defined(V8_TARGET_LITTLE_ENDIAN)
-#define SmiWordOffset(offset) (offset + kPointerSize / 2)
+#define SmiWordOffset(offset) (offset + kSystemPointerSize / 2)
#else
#define SmiWordOffset(offset) offset
#endif
@@ -84,7 +85,7 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
inline MemOperand CFunctionArgumentOperand(int index) {
DCHECK_GT(index, kCArgSlotCount);
// Argument 5 takes the slot just past the four Arg-slots.
- int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
+ int offset = (index - 5) * kSystemPointerSize + kCArgsSlotsSize;
return MemOperand(sp, offset);
}
@@ -107,6 +108,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void InitializeRootRegister() {
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
li(kRootRegister, Operand(isolate_root));
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ LoadRootRelative(kPtrComprCageBaseRegister,
+ IsolateData::cage_base_offset());
+#endif
}
// Jump unconditionally to given label.
@@ -115,8 +120,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// -------------------------------------------------------------------------
// Debugging.
- void Trap() override;
- void DebugBreak() override;
+ void Trap();
+ void DebugBreak();
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
@@ -141,10 +146,15 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
DECLARE_NORELOC_PROTOTYPE(Name, int32_t)
- DECLARE_BRANCH_PROTOTYPES(Branch)
DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
DECLARE_BRANCH_PROTOTYPES(BranchShort)
+ void Branch(Label* target);
+ void Branch(int32_t target);
+ void Branch(Label* target, Condition cond, Register r1, const Operand& r2,
+ Label::Distance near_jump = Label::kFar);
+ void Branch(int32_t target, Condition cond, Register r1, const Operand& r2,
+ Label::Distance near_jump = Label::kFar);
#undef DECLARE_BRANCH_PROTOTYPES
#undef COND_TYPED_ARGS
#undef COND_ARGS
@@ -189,16 +199,15 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
inline void Move(Register output, MemOperand operand) { Ld(output, operand); }
-
- void li(Register dst, Handle<HeapObject> value, LiFlags mode = OPTIMIZE_SIZE);
+ void li(Register dst, Handle<HeapObject> value,
+ RelocInfo::Mode rmode = RelocInfo::FULL_EMBEDDED_OBJECT);
void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE);
void li(Register dst, const StringConstantBase* string,
LiFlags mode = OPTIMIZE_SIZE);
- void LoadFromConstantsTable(Register destination,
- int constant_index) override;
- void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
- void LoadRootRelative(Register destination, int32_t offset) override;
+ void LoadFromConstantsTable(Register destination, int constant_index) final;
+ void LoadRootRegisterOffset(Register destination, intptr_t offset) final;
+ void LoadRootRelative(Register destination, int32_t offset) final;
inline void GenPCRelativeJump(Register rd, int64_t imm32);
inline void GenPCRelativeJumpAndLink(Register rd, int64_t imm32);
@@ -215,7 +224,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// patching.
void PatchAndJump(Address target);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
- void Jump(const ExternalReference& reference) override;
+ void Jump(const ExternalReference& reference);
void Call(Register target, COND_ARGS);
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
@@ -225,35 +234,26 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register dst, Label* target,
RelocInfo::Mode rmode = RelocInfo::INTERNAL_REFERENCE_ENCODED);
- // Load the builtin given by the Smi in |builtin_index| into the same
+ // Load the builtin given by the Smi in |builtin| into the same
// register.
- void LoadEntryFromBuiltinIndex(Register builtin_index);
- void LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
- Register destination);
- MemOperand EntryFromBuiltinIndexAsOperand(Builtins::Name builtin_index);
- void CallBuiltinByIndex(Register builtin_index) override;
- void CallBuiltin(Builtins::Name builtin) {
- // TODO(11527): drop the int overload in favour of the Builtins::Name one.
- return CallBuiltin(static_cast<int>(builtin));
- }
- void CallBuiltin(int builtin_index);
- void TailCallBuiltin(Builtins::Name builtin) {
- // TODO(11527): drop the int overload in favour of the Builtins::Name one.
- return TailCallBuiltin(static_cast<int>(builtin));
- }
- void TailCallBuiltin(int builtin_index);
-
- void LoadCodeObjectEntry(Register destination, Register code_object) override;
- void CallCodeObject(Register code_object) override;
+ void LoadEntryFromBuiltinIndex(Register builtin);
+ void LoadEntryFromBuiltin(Builtin builtin, Register destination);
+ MemOperand EntryFromBuiltinAsOperand(Builtin builtin);
+ void CallBuiltinByIndex(Register builtin);
+ void CallBuiltin(Builtin builtin);
+ void TailCallBuiltin(Builtin builtin);
+
+ void LoadCodeObjectEntry(Register destination, Register code_object);
+ void CallCodeObject(Register code_object);
void JumpCodeObject(Register code_object,
- JumpMode jump_mode = JumpMode::kJump) override;
+ JumpMode jump_mode = JumpMode::kJump);
// Generates an instruction sequence s.t. the return address points to the
// instruction following the call.
// The return address on the stack is used by frame iteration.
void StoreReturnAddressAndCall(Register target);
- void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
+ void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
DeoptimizeKind kind, Label* ret,
Label* jump_deoptimization_entry_label);
@@ -273,7 +273,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Sd(Register rd, const MemOperand& rs);
void push(Register src) {
- Add64(sp, sp, Operand(-kPointerSize));
+ Add64(sp, sp, Operand(-kSystemPointerSize));
Sd(src, MemOperand(sp, 0));
}
void Push(Register src) { push(src); }
@@ -282,61 +282,64 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
- Sub64(sp, sp, Operand(2 * kPointerSize));
- Sd(src1, MemOperand(sp, 1 * kPointerSize));
- Sd(src2, MemOperand(sp, 0 * kPointerSize));
+ Sub64(sp, sp, Operand(2 * kSystemPointerSize));
+ Sd(src1, MemOperand(sp, 1 * kSystemPointerSize));
+ Sd(src2, MemOperand(sp, 0 * kSystemPointerSize));
}
// Push three registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3) {
- Sub64(sp, sp, Operand(3 * kPointerSize));
- Sd(src1, MemOperand(sp, 2 * kPointerSize));
- Sd(src2, MemOperand(sp, 1 * kPointerSize));
- Sd(src3, MemOperand(sp, 0 * kPointerSize));
+ Sub64(sp, sp, Operand(3 * kSystemPointerSize));
+ Sd(src1, MemOperand(sp, 2 * kSystemPointerSize));
+ Sd(src2, MemOperand(sp, 1 * kSystemPointerSize));
+ Sd(src3, MemOperand(sp, 0 * kSystemPointerSize));
}
// Push four registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4) {
- Sub64(sp, sp, Operand(4 * kPointerSize));
- Sd(src1, MemOperand(sp, 3 * kPointerSize));
- Sd(src2, MemOperand(sp, 2 * kPointerSize));
- Sd(src3, MemOperand(sp, 1 * kPointerSize));
- Sd(src4, MemOperand(sp, 0 * kPointerSize));
+ Sub64(sp, sp, Operand(4 * kSystemPointerSize));
+ Sd(src1, MemOperand(sp, 3 * kSystemPointerSize));
+ Sd(src2, MemOperand(sp, 2 * kSystemPointerSize));
+ Sd(src3, MemOperand(sp, 1 * kSystemPointerSize));
+ Sd(src4, MemOperand(sp, 0 * kSystemPointerSize));
}
// Push five registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4,
Register src5) {
- Sub64(sp, sp, Operand(5 * kPointerSize));
- Sd(src1, MemOperand(sp, 4 * kPointerSize));
- Sd(src2, MemOperand(sp, 3 * kPointerSize));
- Sd(src3, MemOperand(sp, 2 * kPointerSize));
- Sd(src4, MemOperand(sp, 1 * kPointerSize));
- Sd(src5, MemOperand(sp, 0 * kPointerSize));
+ Sub64(sp, sp, Operand(5 * kSystemPointerSize));
+ Sd(src1, MemOperand(sp, 4 * kSystemPointerSize));
+ Sd(src2, MemOperand(sp, 3 * kSystemPointerSize));
+ Sd(src3, MemOperand(sp, 2 * kSystemPointerSize));
+ Sd(src4, MemOperand(sp, 1 * kSystemPointerSize));
+ Sd(src5, MemOperand(sp, 0 * kSystemPointerSize));
}
void Push(Register src, Condition cond, Register tst1, Register tst2) {
// Since we don't have conditional execution we use a Branch.
Branch(3, cond, tst1, Operand(tst2));
- Sub64(sp, sp, Operand(kPointerSize));
+ Sub64(sp, sp, Operand(kSystemPointerSize));
Sd(src, MemOperand(sp, 0));
}
enum PushArrayOrder { kNormal, kReverse };
void PushArray(Register array, Register size, PushArrayOrder order = kNormal);
- void SaveRegisters(RegList registers);
- void RestoreRegisters(RegList registers);
+ void MaybeSaveRegisters(RegList registers);
+ void MaybeRestoreRegisters(RegList registers);
- void CallRecordWriteStub(Register object, Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode);
- void CallRecordWriteStub(Register object, Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode, Address wasm_target);
- void CallEphemeronKeyBarrier(Register object, Register address,
+ void CallEphemeronKeyBarrier(Register object, Register slot_address,
SaveFPRegsMode fp_mode);
+ void CallRecordWriteStubSaveRegisters(
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode = StubCallMode::kCallBuiltinPointer);
+ void CallRecordWriteStub(
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode = StubCallMode::kCallBuiltinPointer);
+
// Push multiple registers on the stack.
// Registers are saved in numerical order, with higher numbered registers
// saved in higher memory addresses.
@@ -363,27 +366,29 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void pop(Register dst) {
Ld(dst, MemOperand(sp, 0));
- Add64(sp, sp, Operand(kPointerSize));
+ Add64(sp, sp, Operand(kSystemPointerSize));
}
void Pop(Register dst) { pop(dst); }
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2) {
DCHECK(src1 != src2);
- Ld(src2, MemOperand(sp, 0 * kPointerSize));
- Ld(src1, MemOperand(sp, 1 * kPointerSize));
- Add64(sp, sp, 2 * kPointerSize);
+ Ld(src2, MemOperand(sp, 0 * kSystemPointerSize));
+ Ld(src1, MemOperand(sp, 1 * kSystemPointerSize));
+ Add64(sp, sp, 2 * kSystemPointerSize);
}
// Pop three registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3) {
- Ld(src3, MemOperand(sp, 0 * kPointerSize));
- Ld(src2, MemOperand(sp, 1 * kPointerSize));
- Ld(src1, MemOperand(sp, 2 * kPointerSize));
- Add64(sp, sp, 3 * kPointerSize);
+ Ld(src3, MemOperand(sp, 0 * kSystemPointerSize));
+ Ld(src2, MemOperand(sp, 1 * kSystemPointerSize));
+ Ld(src1, MemOperand(sp, 2 * kSystemPointerSize));
+ Add64(sp, sp, 3 * kSystemPointerSize);
}
- void Pop(uint32_t count = 1) { Add64(sp, sp, Operand(count * kPointerSize)); }
+ void Pop(uint32_t count = 1) {
+ Add64(sp, sp, Operand(count * kSystemPointerSize));
+ }
// Pops multiple values from the stack and load them in the
// registers specified in regs. Pop order is the opposite as in MultiPush.
@@ -458,11 +463,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void SmiUntag(Register dst, const MemOperand& src);
void SmiUntag(Register dst, Register src) {
- if (SmiValuesAre32Bits()) {
- srai(dst, src, kSmiShift);
- } else {
- DCHECK(SmiValuesAre31Bits());
+ DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
+ if (COMPRESS_POINTERS_BOOL) {
sraiw(dst, src, kSmiShift);
+ } else {
+ srai(dst, src, kSmiShift);
}
}
@@ -577,6 +582,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Change endianness
void ByteSwap(Register dest, Register src, int operand_size);
+ void Clear_if_nan_d(Register rd, FPURegister fs);
+ void Clear_if_nan_s(Register rd, FPURegister fs);
// Convert single to unsigned word.
void Trunc_uw_s(Register rd, FPURegister fs, Register result = no_reg);
@@ -735,7 +742,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Func GetLabelFunction);
// Load an object from the root table.
- void LoadRoot(Register destination, RootIndex index) override;
+ void LoadRoot(Register destination, RootIndex index) final;
void LoadRoot(Register destination, RootIndex index, Condition cond,
Register src1, const Operand& src2);
@@ -852,6 +859,46 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Define an exception handler and bind a label.
void BindExceptionHandler(Label* label) { bind(label); }
+ // ---------------------------------------------------------------------------
+ // Pointer compression Support
+
+ // Loads a field containing a HeapObject and decompresses it if pointer
+ // compression is enabled.
+ void LoadTaggedPointerField(const Register& destination,
+ const MemOperand& field_operand);
+
+ // Loads a field containing any tagged value and decompresses it if necessary.
+ void LoadAnyTaggedField(const Register& destination,
+ const MemOperand& field_operand);
+
+ // Loads a field containing a tagged signed value and decompresses it if
+ // necessary.
+ void LoadTaggedSignedField(const Register& destination,
+ const MemOperand& field_operand);
+
+ // Loads a field containing smi value and untags it.
+ void SmiUntagField(Register dst, const MemOperand& src);
+
+ // Compresses and stores tagged value to given on-heap location.
+ void StoreTaggedField(const Register& value,
+ const MemOperand& dst_field_operand);
+
+ void DecompressTaggedSigned(const Register& destination,
+ const MemOperand& field_operand);
+ void DecompressTaggedPointer(const Register& destination,
+ const MemOperand& field_operand);
+ void DecompressTaggedPointer(const Register& destination,
+ const Register& source);
+ void DecompressAnyTagged(const Register& destination,
+ const MemOperand& field_operand);
+ void CmpTagged(const Register& rd, const Register& rs1, const Register& rs2) {
+ if (COMPRESS_POINTERS_BOOL) {
+ Sub32(rd, rs1, rs2);
+ } else {
+ Sub64(rd, rs1, rs2);
+ }
+ }
+
protected:
inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
@@ -901,11 +948,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Push a fixed frame, consisting of ra, fp.
void PushCommonFrame(Register marker_reg = no_reg);
-
- void CallRecordWriteStub(Register object, Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode, int builtin_index,
- Address wasm_target);
};
// MacroAssembler implements a collection of frequently used macros.
@@ -968,8 +1010,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// The offset is the offset from the start of the object, not the offset from
// the tagged HeapObject pointer. For use with FieldOperand(reg, off).
void RecordWriteField(
- Register object, int offset, Register value, Register scratch,
- RAStatus ra_status, SaveFPRegsMode save_fp,
+ Register object, int offset, Register value, RAStatus ra_status,
+ SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
SmiCheck smi_check = SmiCheck::kInline);
@@ -977,7 +1019,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// has been written. |value| is the object being stored. The value and
// address registers are clobbered by the operation.
void RecordWrite(
- Register object, Register address, Register value, RAStatus ra_status,
+ Register object, Operand offset, Register value, RAStatus ra_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
SmiCheck smi_check = SmiCheck::kInline);
@@ -1098,9 +1140,19 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// StatsCounter support.
void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
- Register scratch2);
+ Register scratch2) {
+ if (!FLAG_native_code_counters) return;
+ EmitIncrementCounter(counter, value, scratch1, scratch2);
+ }
+ void EmitIncrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
- Register scratch2);
+ Register scratch2) {
+ if (!FLAG_native_code_counters) return;
+ EmitDecrementCounter(counter, value, scratch1, scratch2);
+ }
+ void EmitDecrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
// -------------------------------------------------------------------------
// Stack limit utilities
@@ -1108,7 +1160,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
void LoadStackLimit(Register destination, StackLimitKind kind);
void StackOverflowCheck(Register num_args, Register scratch1,
- Register scratch2, Label* stack_overflow);
+ Register scratch2, Label* stack_overflow,
+ Label* done = nullptr);
// -------------------------------------------------------------------------
// Smi utilities.
@@ -1210,7 +1263,7 @@ void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
// Load the address from the jump table at index and jump to it
auipc(scratch, 0); // Load the current PC into scratch
slli(scratch2, index,
- kPointerSizeLog2); // scratch2 = offset of indexth entry
+ kSystemPointerSizeLog2); // scratch2 = offset of indexth entry
add(scratch2, scratch2,
scratch); // scratch2 = (saved PC) + (offset of indexth entry)
ld(scratch2, scratch2,
diff --git a/deps/v8/src/codegen/riscv64/register-riscv64.h b/deps/v8/src/codegen/riscv64/register-riscv64.h
index 4aacad611d..69654a4f54 100644
--- a/deps/v8/src/codegen/riscv64/register-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/register-riscv64.h
@@ -13,15 +13,34 @@ namespace v8 {
namespace internal {
// clang-format off
+
#define GENERAL_REGISTERS(V) \
V(zero_reg) V(ra) V(sp) V(gp) V(tp) V(t0) V(t1) V(t2) \
V(fp) V(s1) V(a0) V(a1) V(a2) V(a3) V(a4) V(a5) \
V(a6) V(a7) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(s8) V(s9) \
V(s10) V(s11) V(t3) V(t4) V(t5) V(t6)
+// s3: scratch register s4: scratch register 2 used in code-generator-riscv64
+// s6: roots in Javascript code s7: context register
+// s11: PtrComprCageBaseRegister
+// t3 t5 s10 : scratch register used in scratch_register_list
+
+// t0 t1 t2 t4:caller saved scratch register can be used in macroassembler and
+// builtin-riscv64
+#define ALWAYS_ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(a0) V(a1) V(a2) V(a3) \
+ V(a4) V(a5) V(a6) V(a7) V(t0) \
+ V(t1) V(t2) V(t4) V(s7) V(s8) V(s9)
+
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+#define MAYBE_ALLOCATABLE_GENERAL_REGISTERS(V)
+#else
+#define MAYBE_ALLOCATABLE_GENERAL_REGISTERS(V) V(s11)
+#endif
+
#define ALLOCATABLE_GENERAL_REGISTERS(V) \
- V(a0) V(a1) V(a2) V(a3) \
- V(a4) V(a5) V(a6) V(a7) V(t0) V(t1) V(t2) V(s7) V(t4)
+ ALWAYS_ALLOCATABLE_GENERAL_REGISTERS(V) \
+ MAYBE_ALLOCATABLE_GENERAL_REGISTERS(V)
#define DOUBLE_REGISTERS(V) \
V(ft0) V(ft1) V(ft2) V(ft3) V(ft4) V(ft5) V(ft6) V(ft7) \
@@ -72,8 +91,8 @@ const int kNumJSCallerSaved = 12;
const RegList kCalleeSaved = 1 << 8 | // fp/s0
1 << 9 | // s1
1 << 18 | // s2
- 1 << 19 | // s3
- 1 << 20 | // s4
+ 1 << 19 | // s3 scratch register
+ 1 << 20 | // s4 scratch register 2
1 << 21 | // s5
1 << 22 | // s6 (roots in Javascript code)
1 << 23 | // s7 (cp in Javascript code)
@@ -346,6 +365,12 @@ constexpr Register kWasmCompileLazyFuncIndexRegister = t0;
constexpr DoubleRegister kFPReturnRegister0 = fa0;
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+constexpr Register kPtrComprCageBaseRegister = s11; // callee save
+#else
+constexpr Register kPtrComprCageBaseRegister = kRootRegister;
+#endif
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/s390/assembler-s390.cc b/deps/v8/src/codegen/s390/assembler-s390.cc
index da51395dfd..99d95c7ede 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/assembler-s390.cc
@@ -247,6 +247,7 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
USE(supportsCPUFeature);
supported_ |= (1u << VECTOR_FACILITY);
supported_ |= (1u << VECTOR_ENHANCE_FACILITY_1);
+ supported_ |= (1u << VECTOR_ENHANCE_FACILITY_2);
#endif
supported_ |= (1u << FPU);
diff --git a/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h b/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h
index d672c4354d..a51909b936 100644
--- a/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h
+++ b/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h
@@ -19,19 +19,38 @@ constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
return registers;
}
+#if DEBUG
+template <typename DerivedDescriptor>
+void StaticCallInterfaceDescriptor<DerivedDescriptor>::
+ VerifyArgumentRegisterCount(CallInterfaceDescriptorData* data, int argc) {
+ RegList allocatable_regs = data->allocatable_registers();
+ if (argc >= 1) DCHECK(allocatable_regs | r2.bit());
+ if (argc >= 2) DCHECK(allocatable_regs | r3.bit());
+ if (argc >= 3) DCHECK(allocatable_regs | r4.bit());
+ if (argc >= 4) DCHECK(allocatable_regs | r5.bit());
+ if (argc >= 5) DCHECK(allocatable_regs | r6.bit());
+ if (argc >= 6) DCHECK(allocatable_regs | r7.bit());
+ if (argc >= 7) DCHECK(allocatable_regs | r8.bit());
+ if (argc >= 8) DCHECK(allocatable_regs | r9.bit());
+ // Additional arguments are passed on the stack.
+}
+#endif // DEBUG
+
// static
-constexpr auto RecordWriteDescriptor::registers() {
- return RegisterArray(r2, r3, r4, r5, r6, kReturnRegister0);
+constexpr auto WriteBarrierDescriptor::registers() {
+ return RegisterArray(r3, r7, r6, r4, r2);
}
// static
constexpr auto DynamicCheckMapsDescriptor::registers() {
+ STATIC_ASSERT(kReturnRegister0 == r2);
return RegisterArray(r2, r3, r4, r5, cp);
}
// static
-constexpr auto EphemeronKeyBarrierDescriptor::registers() {
- return RegisterArray(r2, r3, r4, r5, r6, kReturnRegister0);
+constexpr auto DynamicCheckMapsWithFeedbackVectorDescriptor::registers() {
+ STATIC_ASSERT(kReturnRegister0 == r2);
+ return RegisterArray(r2, r3, r4, r5, cp);
}
// static
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
index de25a93d8b..cb67d8d9f9 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -22,7 +22,6 @@
#include "src/logging/counters.h"
#include "src/objects/smi.h"
#include "src/runtime/runtime.h"
-#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
#if V8_ENABLE_WEBASSEMBLY
@@ -38,6 +37,21 @@
namespace v8 {
namespace internal {
+namespace {
+
+// For WebAssembly we care about the full floating point (Simd) registers. If we
+// are not running Wasm, we can get away with saving half of those (F64)
+// registers.
+#if V8_ENABLE_WEBASSEMBLY
+constexpr int kStackSavedSavedFPSizeInBytes =
+ kNumCallerSavedDoubles * kSimd128Size;
+#else
+constexpr int kStackSavedSavedFPSizeInBytes =
+ kNumCallerSavedDoubles * kDoubleSize;
+#endif // V8_ENABLE_WEBASSEMBLY
+
+} // namespace
+
void TurboAssembler::DoubleMax(DoubleRegister result_reg,
DoubleRegister left_reg,
DoubleRegister right_reg) {
@@ -284,7 +298,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
bytes += NumRegs(list) * kSystemPointerSize;
if (fp_mode == SaveFPRegsMode::kSave) {
- bytes += NumRegs(kCallerSavedDoubles) * kDoubleSize;
+ bytes += kStackSavedSavedFPSizeInBytes;
}
return bytes;
@@ -309,8 +323,8 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
bytes += NumRegs(list) * kSystemPointerSize;
if (fp_mode == SaveFPRegsMode::kSave) {
- MultiPushDoubles(kCallerSavedDoubles);
- bytes += NumRegs(kCallerSavedDoubles) * kDoubleSize;
+ MultiPushF64OrV128(kCallerSavedDoubles);
+ bytes += kStackSavedSavedFPSizeInBytes;
}
return bytes;
@@ -320,8 +334,8 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
if (fp_mode == SaveFPRegsMode::kSave) {
- MultiPopDoubles(kCallerSavedDoubles);
- bytes += NumRegs(kCallerSavedDoubles) * kDoubleSize;
+ MultiPopF64OrV128(kCallerSavedDoubles);
+ bytes += kStackSavedSavedFPSizeInBytes;
}
RegList exclusions = 0;
@@ -401,17 +415,14 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code));
- int builtin_index = Builtins::kNoBuiltinId;
+ Builtin builtin_index = Builtin::kNoBuiltinId;
bool target_is_builtin =
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
if (options().inline_offheap_trampolines && target_is_builtin) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ mov(ip, Operand(BuiltinEntry(builtin_index), RelocInfo::OFF_HEAP_TARGET));
b(cond, ip);
return;
}
@@ -462,17 +473,14 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code));
- int builtin_index = Builtins::kNoBuiltinId;
+ Builtin builtin_index = Builtin::kNoBuiltinId;
bool target_is_builtin =
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
if (target_is_builtin && options().inline_offheap_trampolines) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ mov(ip, Operand(BuiltinEntry(builtin_index), RelocInfo::OFF_HEAP_TARGET));
Call(ip);
return;
}
@@ -708,6 +716,70 @@ void TurboAssembler::MultiPopV128(RegList dregs, Register location) {
AddS64(location, location, Operand(stack_offset));
}
+void TurboAssembler::MultiPushF64OrV128(RegList dregs, Register location) {
+#if V8_ENABLE_WEBASSEMBLY
+ bool generating_bultins =
+ isolate() && isolate()->IsGeneratingEmbeddedBuiltins();
+ if (generating_bultins) {
+ Label push_doubles, simd_pushed;
+ Move(r1, ExternalReference::supports_wasm_simd_128_address());
+ LoadU8(r1, MemOperand(r1));
+ LoadAndTestP(r1, r1); // If > 0 then simd is available.
+ ble(&push_doubles, Label::kNear);
+ // Save vector registers, don't save double registers anymore.
+ MultiPushV128(dregs);
+ b(&simd_pushed);
+ bind(&push_doubles);
+ // Simd not supported, only save double registers.
+ MultiPushDoubles(dregs);
+ // We still need to allocate empty space on the stack as if
+ // Simd rgeisters were saved (see kFixedFrameSizeFromFp).
+ lay(sp, MemOperand(sp, -(NumRegs(dregs) * kDoubleSize)));
+ bind(&simd_pushed);
+ } else {
+ if (CpuFeatures::SupportsWasmSimd128()) {
+ MultiPushV128(dregs);
+ } else {
+ MultiPushDoubles(dregs);
+ lay(sp, MemOperand(sp, -(NumRegs(dregs) * kDoubleSize)));
+ }
+ }
+#else
+ MultiPushDoubles(dregs);
+#endif
+}
+
+void TurboAssembler::MultiPopF64OrV128(RegList dregs, Register location) {
+#if V8_ENABLE_WEBASSEMBLY
+ bool generating_bultins =
+ isolate() && isolate()->IsGeneratingEmbeddedBuiltins();
+ if (generating_bultins) {
+ Label pop_doubles, simd_popped;
+ Move(r1, ExternalReference::supports_wasm_simd_128_address());
+ LoadU8(r1, MemOperand(r1));
+ LoadAndTestP(r1, r1); // If > 0 then simd is available.
+ ble(&pop_doubles, Label::kNear);
+ // Pop vector registers, don't pop double registers anymore.
+ MultiPopV128(dregs);
+ b(&simd_popped);
+ bind(&pop_doubles);
+ // Simd not supported, only pop double registers.
+ lay(sp, MemOperand(sp, NumRegs(dregs) * kDoubleSize));
+ MultiPopDoubles(dregs);
+ bind(&simd_popped);
+ } else {
+ if (CpuFeatures::SupportsWasmSimd128()) {
+ MultiPopV128(dregs);
+ } else {
+ lay(sp, MemOperand(sp, NumRegs(dregs) * kDoubleSize));
+ MultiPopDoubles(dregs);
+ }
+ }
+#else
+ MultiPopDoubles(dregs);
+#endif
+}
+
void TurboAssembler::LoadRoot(Register destination, RootIndex index,
Condition) {
LoadU64(destination,
@@ -805,7 +877,7 @@ void TurboAssembler::DecompressAnyTagged(Register destination,
RecordComment("]");
}
void MacroAssembler::RecordWriteField(Register object, int offset,
- Register value, Register dst,
+ Register value, Register slot_address,
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
@@ -823,17 +895,17 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// of the object, so so offset must be a multiple of kSystemPointerSize.
DCHECK(IsAligned(offset, kTaggedSize));
- lay(dst, MemOperand(object, offset - kHeapObjectTag));
+ lay(slot_address, MemOperand(object, offset - kHeapObjectTag));
if (FLAG_debug_code) {
Label ok;
- AndP(r0, dst, Operand(kTaggedSize - 1));
+ AndP(r0, slot_address, Operand(kTaggedSize - 1));
beq(&ok, Label::kNear);
stop();
bind(&ok);
}
- RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
- SmiCheck::kOmit);
+ RecordWrite(object, slot_address, value, lr_status, save_fp,
+ remembered_set_action, SmiCheck::kOmit);
bind(&done);
@@ -841,12 +913,12 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// turned on to provoke errors.
if (FLAG_debug_code) {
mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
- mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
+ mov(slot_address, Operand(bit_cast<intptr_t>(kZapValue + 8)));
}
}
-void TurboAssembler::SaveRegisters(RegList registers) {
- DCHECK_GT(NumRegs(registers), 0);
+void TurboAssembler::MaybeSaveRegisters(RegList registers) {
+ if (registers == 0) return;
RegList regs = 0;
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
@@ -856,8 +928,8 @@ void TurboAssembler::SaveRegisters(RegList registers) {
MultiPush(regs);
}
-void TurboAssembler::RestoreRegisters(RegList registers) {
- DCHECK_GT(NumRegs(registers), 0);
+void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
+ if (registers == 0) return;
RegList regs = 0;
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
@@ -867,108 +939,95 @@ void TurboAssembler::RestoreRegisters(RegList registers) {
MultiPop(regs);
}
-void TurboAssembler::CallEphemeronKeyBarrier(Register object, Register address,
+void TurboAssembler::CallEphemeronKeyBarrier(Register object,
+ Register slot_address,
SaveFPRegsMode fp_mode) {
- EphemeronKeyBarrierDescriptor descriptor;
- RegList registers = descriptor.allocatable_registers();
+ DCHECK(!AreAliased(object, slot_address));
+ RegList registers =
+ WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
+ MaybeSaveRegisters(registers);
- SaveRegisters(registers);
-
- Register object_parameter(
- descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kObject));
- Register slot_parameter(descriptor.GetRegisterParameter(
- EphemeronKeyBarrierDescriptor::kSlotAddress));
- Register fp_mode_parameter(
- descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kFPMode));
+ Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
+ Register slot_address_parameter =
+ WriteBarrierDescriptor::SlotAddressRegister();
Push(object);
- Push(address);
-
- Pop(slot_parameter);
+ Push(slot_address);
+ Pop(slot_address_parameter);
Pop(object_parameter);
- Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
- Call(isolate()->builtins()->builtin_handle(Builtins::kEphemeronKeyBarrier),
+ Call(isolate()->builtins()->code_handle(
+ Builtins::GetEphemeronKeyBarrierStub(fp_mode)),
RelocInfo::CODE_TARGET);
- RestoreRegisters(registers);
+ MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStub(
- Register object, Register address,
- RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
- CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
- Builtins::kRecordWrite, kNullAddress);
-}
-
-void TurboAssembler::CallRecordWriteStub(
- Register object, Register address,
+void TurboAssembler::CallRecordWriteStubSaveRegisters(
+ Register object, Register slot_address,
RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
- Address wasm_target) {
- CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
- Builtins::kNoBuiltinId, wasm_target);
-}
+ StubCallMode mode) {
+ DCHECK(!AreAliased(object, slot_address));
+ RegList registers =
+ WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
+ MaybeSaveRegisters(registers);
-void TurboAssembler::CallRecordWriteStub(
- Register object, Register address,
- RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
- int builtin_index, Address wasm_target) {
- DCHECK_NE(builtin_index == Builtins::kNoBuiltinId,
- wasm_target == kNullAddress);
- // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
- // i.e. always emit remember set and save FP registers in RecordWriteStub. If
- // large performance regression is observed, we should use these values to
- // avoid unnecessary work.
-
- RecordWriteDescriptor descriptor;
- RegList registers = descriptor.allocatable_registers();
-
- SaveRegisters(registers);
- Register object_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
- Register slot_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
- Register remembered_set_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
- Register fp_mode_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
+ Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
+ Register slot_address_parameter =
+ WriteBarrierDescriptor::SlotAddressRegister();
Push(object);
- Push(address);
-
- Pop(slot_parameter);
+ Push(slot_address);
+ Pop(slot_address_parameter);
Pop(object_parameter);
- Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
- Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
- if (builtin_index == Builtins::kNoBuiltinId) {
+ CallRecordWriteStub(object_parameter, slot_address_parameter,
+ remembered_set_action, fp_mode, mode);
+
+ MaybeRestoreRegisters(registers);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode) {
+ // Use CallRecordWriteStubSaveRegisters if the object and slot registers
+ // need to be caller saved.
+ DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object);
+ DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address);
+#if V8_ENABLE_WEBASSEMBLY
+ if (mode == StubCallMode::kCallWasmRuntimeStub) {
+ auto wasm_target =
+ wasm::WasmCode::GetRecordWriteStub(remembered_set_action, fp_mode);
Call(wasm_target, RelocInfo::WASM_STUB_CALL);
- } else if (options().inline_offheap_trampolines) {
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- Call(ip);
+#else
+ if (false) {
+#endif
} else {
- Handle<Code> code_target =
- isolate()->builtins()->builtin_handle(Builtins::kRecordWrite);
- Call(code_target, RelocInfo::CODE_TARGET);
+ auto builtin_index =
+ Builtins::GetRecordWriteStub(remembered_set_action, fp_mode);
+ if (options().inline_offheap_trampolines) {
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ mov(ip, Operand(BuiltinEntry(builtin_index), RelocInfo::OFF_HEAP_TARGET));
+ Call(ip);
+ } else {
+ Handle<Code> code_target =
+ isolate()->builtins()->code_handle(builtin_index);
+ Call(code_target, RelocInfo::CODE_TARGET);
+ }
}
-
- RestoreRegisters(registers);
}
// Will clobber 4 registers: object, address, scratch, ip. The
// register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object, Register address,
+void MacroAssembler::RecordWrite(Register object, Register slot_address,
Register value, LinkRegisterStatus lr_status,
SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
- DCHECK(object != value);
+ DCHECK(!AreAliased(object, slot_address, value));
if (FLAG_debug_code) {
- LoadTaggedPointerField(r0, MemOperand(address));
+ LoadTaggedPointerField(r0, MemOperand(slot_address));
CmpS64(value, r0);
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
@@ -997,17 +1056,20 @@ void MacroAssembler::RecordWrite(Register object, Register address,
if (lr_status == kLRHasNotBeenSaved) {
push(r14);
}
- CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
+ CallRecordWriteStubSaveRegisters(object, slot_address, remembered_set_action,
+ fp_mode);
if (lr_status == kLRHasNotBeenSaved) {
pop(r14);
}
+ if (FLAG_debug_code) mov(slot_address, Operand(kZapValue));
+
bind(&done);
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (FLAG_debug_code) {
- mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
+ mov(slot_address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
}
}
@@ -1969,8 +2031,9 @@ void MacroAssembler::LoadWeakValue(Register out, Register in,
AndP(out, in, Operand(~kWeakHeapObjectMask));
}
-void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
+void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value,
+ Register scratch1,
+ Register scratch2) {
DCHECK(value > 0 && is_int8(value));
if (FLAG_native_code_counters && counter->Enabled()) {
Move(scratch2, ExternalReference::Create(counter));
@@ -1981,8 +2044,9 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
}
}
-void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
+void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
+ Register scratch1,
+ Register scratch2) {
DCHECK(value > 0 && is_int8(value));
if (FLAG_native_code_counters && counter->Enabled()) {
Move(scratch2, ExternalReference::Create(counter));
@@ -3851,7 +3915,9 @@ void TurboAssembler::StoreF32LE(DoubleRegister src, const MemOperand& opnd,
void TurboAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem,
Register scratch1, Register scratch2) {
- if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)) {
+ bool use_vstbr = CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) &&
+ is_uint12(mem.offset());
+ if (use_vstbr) {
vstbr(src, mem, Condition(4));
} else {
vlgv(scratch1, src, MemOperand(r0, 1), Condition(3));
@@ -4732,9 +4798,9 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
bind(&return_label);
}
-void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
- Label* exit, DeoptimizeKind kind,
- Label* ret, Label*) {
+void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
+ DeoptimizeKind kind, Label* ret,
+ Label*) {
LoadU64(ip, MemOperand(kRootRegister,
IsolateData::builtin_entry_slot_offset(target)));
Call(ip);
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h
index 13d7ac696b..fbf2ad0510 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.h
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h
@@ -77,16 +77,15 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void NearestIntF32(DoubleRegister dst, DoubleRegister src);
void NearestIntF64(DoubleRegister dst, DoubleRegister src);
- void LoadFromConstantsTable(Register destination,
- int constant_index) override;
- void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
- void LoadRootRelative(Register destination, int32_t offset) override;
+ void LoadFromConstantsTable(Register destination, int constant_index) final;
+ void LoadRootRegisterOffset(Register destination, intptr_t offset) final;
+ void LoadRootRelative(Register destination, int32_t offset) final;
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target, Condition cond = al);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
- void Jump(const ExternalReference& reference) override;
+ void Jump(const ExternalReference& reference);
// Jump the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label) {
TestIfSmi(value);
@@ -104,7 +103,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Ret() { b(r14); }
void Ret(Condition cond) { b(cond, r14); }
- void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
+ void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
DeoptimizeKind kind, Label* ret,
Label* jump_deoptimization_entry_label);
@@ -123,12 +122,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Load the builtin given by the Smi in |builtin_index| into the same
// register.
void LoadEntryFromBuiltinIndex(Register builtin_index);
- void LoadCodeObjectEntry(Register destination, Register code_object) override;
- void CallCodeObject(Register code_object) override;
+ void LoadCodeObjectEntry(Register destination, Register code_object);
+ void CallCodeObject(Register code_object);
void JumpCodeObject(Register code_object,
- JumpMode jump_mode = JumpMode::kJump) override;
+ JumpMode jump_mode = JumpMode::kJump);
- void CallBuiltinByIndex(Register builtin_index) override;
+ void CallBuiltinByIndex(Register builtin_index);
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Smi smi) { LoadSmiLiteral(dst, smi); }
@@ -153,18 +152,21 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void BranchRelativeOnIdxHighP(Register dst, Register inc, Label* L);
- void SaveRegisters(RegList registers);
- void RestoreRegisters(RegList registers);
+ void MaybeSaveRegisters(RegList registers);
+ void MaybeRestoreRegisters(RegList registers);
- void CallRecordWriteStub(Register object, Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode);
- void CallRecordWriteStub(Register object, Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode, Address wasm_target);
- void CallEphemeronKeyBarrier(Register object, Register address,
+ void CallEphemeronKeyBarrier(Register object, Register slot_address,
SaveFPRegsMode fp_mode);
+ void CallRecordWriteStubSaveRegisters(
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode = StubCallMode::kCallBuiltinPointer);
+ void CallRecordWriteStub(
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode = StubCallMode::kCallBuiltinPointer);
+
void MultiPush(RegList regs, Register location = sp);
void MultiPop(RegList regs, Register location = sp);
@@ -174,6 +176,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void MultiPushV128(RegList dregs, Register location = sp);
void MultiPopV128(RegList dregs, Register location = sp);
+ void MultiPushF64OrV128(RegList dregs, Register location = sp);
+ void MultiPopF64OrV128(RegList dregs, Register location = sp);
+
// Calculate how much stack space (in bytes) are required to store caller
// registers excluding those specified in the arguments.
int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
@@ -851,8 +856,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void MovFromFloatParameter(DoubleRegister dst);
void MovFromFloatResult(DoubleRegister dst);
- void Trap() override;
- void DebugBreak() override;
+ void Trap();
+ void DebugBreak();
// Emit code for a truncating division by a constant. The dividend register is
// unchanged and ip gets clobbered. Dividend and result must be different.
@@ -1070,11 +1075,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
- void CallRecordWriteStub(Register object, Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode, int builtin_index,
- Address wasm_target);
-
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
@@ -1196,9 +1196,19 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// StatsCounter support
void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
- Register scratch2);
+ Register scratch2) {
+ if (!FLAG_native_code_counters) return;
+ EmitIncrementCounter(counter, value, scratch1, scratch2);
+ }
+ void EmitIncrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
- Register scratch2);
+ Register scratch2) {
+ if (!FLAG_native_code_counters) return;
+ EmitDecrementCounter(counter, value, scratch1, scratch2);
+ }
+ void EmitDecrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
// ---------------------------------------------------------------------------
// Stack limit utilities
@@ -1352,7 +1362,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// The offset is the offset from the start of the object, not the offset from
// the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
void RecordWriteField(
- Register object, int offset, Register value, Register scratch,
+ Register object, int offset, Register value, Register slot_address,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
SmiCheck smi_check = SmiCheck::kInline);
@@ -1361,7 +1371,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// has been written. |value| is the object being stored. The value and
// address registers are clobbered by the operation.
void RecordWrite(
- Register object, Register address, Register value,
+ Register object, Register slot_address, Register value,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
SmiCheck smi_check = SmiCheck::kInline);
diff --git a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
index 6be9444c65..add62a5a49 100644
--- a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
+++ b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
@@ -21,6 +21,14 @@ namespace v8 {
namespace internal {
class Assembler;
+// For WebAssembly we care about the full floating point register. If we are not
+// running Wasm, we can get away with saving half of those registers.
+#if V8_ENABLE_WEBASSEMBLY
+constexpr int kStackSavedSavedFPSize = 2 * kDoubleSize;
+#else
+constexpr int kStackSavedSavedFPSize = kDoubleSize;
+#endif // V8_ENABLE_WEBASSEMBLY
+
class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
public:
using TurboAssemblerBase::TurboAssemblerBase;
diff --git a/deps/v8/src/codegen/source-position-table.cc b/deps/v8/src/codegen/source-position-table.cc
index 27466a2690..6579a419fa 100644
--- a/deps/v8/src/codegen/source-position-table.cc
+++ b/deps/v8/src/codegen/source-position-table.cc
@@ -87,7 +87,7 @@ void EncodeEntry(ZoneVector<byte>* bytes, const PositionTableEntry& entry) {
// Helper: Decode an integer.
template <typename T>
-T DecodeInt(Vector<const byte> bytes, int* index) {
+T DecodeInt(base::Vector<const byte> bytes, int* index) {
byte current;
int shift = 0;
T decoded = 0;
@@ -105,7 +105,7 @@ T DecodeInt(Vector<const byte> bytes, int* index) {
return decoded;
}
-void DecodeEntry(Vector<const byte> bytes, int* index,
+void DecodeEntry(base::Vector<const byte> bytes, int* index,
PositionTableEntry* entry) {
int tmp = DecodeInt<int>(bytes, index);
if (tmp >= 0) {
@@ -118,9 +118,9 @@ void DecodeEntry(Vector<const byte> bytes, int* index,
entry->source_position = DecodeInt<int64_t>(bytes, index);
}
-Vector<const byte> VectorFromByteArray(ByteArray byte_array) {
- return Vector<const byte>(byte_array.GetDataStartAddress(),
- byte_array.length());
+base::Vector<const byte> VectorFromByteArray(ByteArray byte_array) {
+ return base::Vector<const byte>(byte_array.GetDataStartAddress(),
+ byte_array.length());
}
#ifdef ENABLE_SLOW_DCHECKS
@@ -200,11 +200,12 @@ template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable(
LocalIsolate* isolate);
-OwnedVector<byte> SourcePositionTableBuilder::ToSourcePositionTableVector() {
- if (bytes_.empty()) return OwnedVector<byte>();
+base::OwnedVector<byte>
+SourcePositionTableBuilder::ToSourcePositionTableVector() {
+ if (bytes_.empty()) return base::OwnedVector<byte>();
DCHECK(!Omit());
- OwnedVector<byte> table = OwnedVector<byte>::Of(bytes_);
+ base::OwnedVector<byte> table = base::OwnedVector<byte>::Of(bytes_);
#ifdef ENABLE_SLOW_DCHECKS
// Brute force testing: Record all positions and decode
@@ -250,7 +251,7 @@ SourcePositionTableIterator::SourcePositionTableIterator(
}
SourcePositionTableIterator::SourcePositionTableIterator(
- Vector<const byte> bytes, IterationFilter iteration_filter,
+ base::Vector<const byte> bytes, IterationFilter iteration_filter,
FunctionEntryFilter function_entry_filter)
: raw_table_(bytes),
iteration_filter_(iteration_filter),
@@ -263,7 +264,7 @@ SourcePositionTableIterator::SourcePositionTableIterator(
}
void SourcePositionTableIterator::Advance() {
- Vector<const byte> bytes =
+ base::Vector<const byte> bytes =
table_.is_null() ? raw_table_ : VectorFromByteArray(*table_);
DCHECK(!done());
DCHECK(index_ >= 0 && index_ <= bytes.length());
diff --git a/deps/v8/src/codegen/source-position-table.h b/deps/v8/src/codegen/source-position-table.h
index afd7cc434c..3e7340da3c 100644
--- a/deps/v8/src/codegen/source-position-table.h
+++ b/deps/v8/src/codegen/source-position-table.h
@@ -6,11 +6,11 @@
#define V8_CODEGEN_SOURCE_POSITION_TABLE_H_
#include "src/base/export-template.h"
+#include "src/base/vector.h"
#include "src/codegen/source-position.h"
#include "src/common/assert-scope.h"
#include "src/common/checks.h"
#include "src/common/globals.h"
-#include "src/utils/vector.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -57,7 +57,7 @@ class V8_EXPORT_PRIVATE SourcePositionTableBuilder {
template <typename IsolateT>
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
Handle<ByteArray> ToSourcePositionTable(IsolateT* isolate);
- OwnedVector<byte> ToSourcePositionTableVector();
+ base::OwnedVector<byte> ToSourcePositionTableVector();
inline bool Omit() const { return mode_ != RECORD_SOURCE_POSITIONS; }
inline bool Lazy() const { return mode_ == LAZY_SOURCE_POSITIONS; }
@@ -114,7 +114,7 @@ class V8_EXPORT_PRIVATE SourcePositionTableIterator {
// Handle-safe iterator based on an a vector located outside the garbage
// collected heap, allows allocation during its lifetime.
explicit SourcePositionTableIterator(
- Vector<const byte> bytes,
+ base::Vector<const byte> bytes,
IterationFilter iteration_filter = kJavaScriptOnly,
FunctionEntryFilter function_entry_filter = kSkipFunctionEntry);
@@ -152,7 +152,7 @@ class V8_EXPORT_PRIVATE SourcePositionTableIterator {
static const int kDone = -1;
- Vector<const byte> raw_table_;
+ base::Vector<const byte> raw_table_;
Handle<ByteArray> table_;
int index_ = 0;
PositionTableEntry current_;
diff --git a/deps/v8/src/codegen/tnode.h b/deps/v8/src/codegen/tnode.h
index 3a56fda9d1..0687bb638d 100644
--- a/deps/v8/src/codegen/tnode.h
+++ b/deps/v8/src/codegen/tnode.h
@@ -349,6 +349,7 @@ class TNode {
TNode(const TNode<U>& other) : node_(other) {
LazyTemplateChecks();
}
+ TNode(const TNode& other) : node_(other) { LazyTemplateChecks(); }
TNode() : TNode(nullptr) {}
TNode operator=(TNode other) {
diff --git a/deps/v8/src/codegen/turbo-assembler.cc b/deps/v8/src/codegen/turbo-assembler.cc
index a9f9e08ead..59de4733ff 100644
--- a/deps/v8/src/codegen/turbo-assembler.cc
+++ b/deps/v8/src/codegen/turbo-assembler.cc
@@ -9,6 +9,7 @@
#include "src/codegen/external-reference-encoder.h"
#include "src/execution/isolate-data.h"
#include "src/execution/isolate-inl.h"
+#include "src/snapshot/embedded/embedded-data.h"
namespace v8 {
namespace internal {
@@ -24,6 +25,19 @@ TurboAssemblerBase::TurboAssemblerBase(Isolate* isolate,
}
}
+Address TurboAssemblerBase::BuiltinEntry(Builtin builtin) {
+ DCHECK(Builtins::IsBuiltinId(builtin));
+ if (isolate_ != nullptr) {
+ Address entry =
+ isolate_->builtin_entry_table()[static_cast<int32_t>(builtin)];
+ DCHECK_EQ(entry, EmbeddedData::FromBlob(isolate_).InstructionStartOfBuiltin(
+ builtin));
+ return entry;
+ }
+ EmbeddedData d = EmbeddedData::FromBlob();
+ return d.InstructionStartOfBuiltin(builtin);
+}
+
void TurboAssemblerBase::IndirectLoadConstant(Register destination,
Handle<HeapObject> object) {
CHECK(root_array_available_);
@@ -31,21 +45,19 @@ void TurboAssemblerBase::IndirectLoadConstant(Register destination,
// Before falling back to the (fairly slow) lookup from the constants table,
// check if any of the fast paths can be applied.
- int builtin_index;
+ Builtin builtin;
RootIndex root_index;
if (isolate()->roots_table().IsRootHandle(object, &root_index)) {
// Roots are loaded relative to the root register.
LoadRoot(destination, root_index);
- } else if (isolate()->builtins()->IsBuiltinHandle(object, &builtin_index)) {
+ } else if (isolate()->builtins()->IsBuiltinHandle(object, &builtin)) {
// Similar to roots, builtins may be loaded from the builtins table.
- LoadRootRelative(destination,
- RootRegisterOffsetForBuiltinIndex(builtin_index));
+ LoadRootRelative(destination, RootRegisterOffsetForBuiltin(builtin));
} else if (object.is_identical_to(code_object_) &&
- Builtins::IsBuiltinId(maybe_builtin_index_)) {
+ Builtins::IsBuiltinId(maybe_builtin_)) {
// The self-reference loaded through Codevalue() may also be a builtin
// and thus viable for a fast load.
- LoadRootRelative(destination,
- RootRegisterOffsetForBuiltinIndex(maybe_builtin_index_));
+ LoadRootRelative(destination, RootRegisterOffsetForBuiltin(maybe_builtin_));
} else {
CHECK(isolate()->IsGeneratingEmbeddedBuiltins());
// Ensure the given object is in the builtins constants table and fetch its
@@ -84,9 +96,8 @@ int32_t TurboAssemblerBase::RootRegisterOffsetForRootIndex(
}
// static
-int32_t TurboAssemblerBase::RootRegisterOffsetForBuiltinIndex(
- int builtin_index) {
- return IsolateData::builtin_slot_offset(builtin_index);
+int32_t TurboAssemblerBase::RootRegisterOffsetForBuiltin(Builtin builtin) {
+ return IsolateData::builtin_slot_offset(builtin);
}
// static
diff --git a/deps/v8/src/codegen/turbo-assembler.h b/deps/v8/src/codegen/turbo-assembler.h
index e25ee2a629..2f2deadaac 100644
--- a/deps/v8/src/codegen/turbo-assembler.h
+++ b/deps/v8/src/codegen/turbo-assembler.h
@@ -15,12 +15,6 @@
namespace v8 {
namespace internal {
-enum class JumpMode {
- kJump, // Does a direct jump to the given address
- kPushAndReturn // Pushes the given address as the current return address and
- // does a return
-};
-
// Common base class for platform-specific TurboAssemblers containing
// platform-independent bits.
// You will encounter two subclasses, TurboAssembler (derives from
@@ -59,33 +53,19 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
bool should_abort_hard() const { return hard_abort_; }
void set_abort_hard(bool v) { hard_abort_ = v; }
- void set_builtin_index(int i) { maybe_builtin_index_ = i; }
+ void set_builtin(Builtin builtin) { maybe_builtin_ = builtin; }
void set_has_frame(bool v) { has_frame_ = v; }
bool has_frame() const { return has_frame_; }
- virtual void Jump(const ExternalReference& reference) = 0;
-
- // Calls the builtin given by the Smi in |builtin|. If builtins are embedded,
- // the trampoline Code object on the heap is not used.
- virtual void CallBuiltinByIndex(Register builtin_index) = 0;
-
- // Calls/jumps to the given Code object. If builtins are embedded, the
- // trampoline Code object on the heap is not used.
- virtual void CallCodeObject(Register code_object) = 0;
- virtual void JumpCodeObject(Register code_object,
- JumpMode jump_mode = JumpMode::kJump) = 0;
-
- // Loads the given Code object's entry point into the destination register.
- virtual void LoadCodeObjectEntry(Register destination,
- Register code_object) = 0;
-
// Loads the given constant or external reference without embedding its direct
// pointer. The produced code is isolate-independent.
void IndirectLoadConstant(Register destination, Handle<HeapObject> object);
void IndirectLoadExternalReference(Register destination,
ExternalReference reference);
+ Address BuiltinEntry(Builtin builtin);
+
virtual void LoadFromConstantsTable(Register destination,
int constant_index) = 0;
@@ -98,11 +78,8 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
virtual void LoadRoot(Register destination, RootIndex index) = 0;
- virtual void Trap() = 0;
- virtual void DebugBreak() = 0;
-
static int32_t RootRegisterOffsetForRootIndex(RootIndex root_index);
- static int32_t RootRegisterOffsetForBuiltinIndex(int builtin_index);
+ static int32_t RootRegisterOffsetForBuiltin(Builtin builtin);
// Returns the root-relative offset to reference.address().
static intptr_t RootRegisterOffsetForExternalReference(
@@ -124,13 +101,24 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
static constexpr int kStackPageSize = 4 * KB;
#endif
- V8_INLINE void RecordCommentForOffHeapTrampoline(int builtin_index) {
+ V8_INLINE std::string CommentForOffHeapTrampoline(const char* prefix,
+ Builtin builtin) {
+ if (!FLAG_code_comments) return "";
+ std::ostringstream str;
+ str << "Inlined Trampoline for " << prefix << " to "
+ << Builtins::name(builtin);
+ return str.str();
+ }
+
+ V8_INLINE void RecordCommentForOffHeapTrampoline(Builtin builtin) {
if (!FLAG_code_comments) return;
std::ostringstream str;
- str << "[ Inlined Trampoline to " << Builtins::name(builtin_index);
+ str << "[ Inlined Trampoline to " << Builtins::name(builtin);
RecordComment(str.str().c_str());
}
+ enum class RecordWriteCallMode { kDefault, kWasm };
+
protected:
Isolate* const isolate_ = nullptr;
@@ -147,15 +135,17 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
bool hard_abort_ = false;
// May be set while generating builtins.
- int maybe_builtin_index_ = Builtins::kNoBuiltinId;
+ Builtin maybe_builtin_ = Builtin::kNoBuiltinId;
bool has_frame_ = false;
+ int comment_depth_ = 0;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(TurboAssemblerBase);
};
-// Avoids emitting calls to the {Builtins::kAbort} builtin when emitting debug
-// code during the lifetime of this scope object.
+// Avoids emitting calls to the {Builtin::kAbort} builtin when emitting
+// debug code during the lifetime of this scope object.
class V8_NODISCARD HardAbortScope {
public:
explicit HardAbortScope(TurboAssemblerBase* assembler)
diff --git a/deps/v8/src/codegen/x64/assembler-x64-inl.h b/deps/v8/src/codegen/x64/assembler-x64-inl.h
index 02879ba354..a617391372 100644
--- a/deps/v8/src/codegen/x64/assembler-x64-inl.h
+++ b/deps/v8/src/codegen/x64/assembler-x64-inl.h
@@ -40,7 +40,17 @@ void Assembler::emit_runtime_entry(Address entry, RelocInfo::Mode rmode) {
DCHECK(RelocInfo::IsRuntimeEntry(rmode));
DCHECK_NE(options().code_range_start, 0);
RecordRelocInfo(rmode);
- emitl(static_cast<uint32_t>(entry - options().code_range_start));
+ uint32_t offset = static_cast<uint32_t>(entry - options().code_range_start);
+ if (IsOnHeap()) {
+ saved_offsets_for_runtime_entries_.push_back(
+ std::make_pair(pc_offset(), offset));
+ emitl(relative_target_offset(entry, reinterpret_cast<Address>(pc_)));
+ // We must ensure that `emitl` is not growing the assembler buffer
+ // and falling back to off-heap compilation.
+ DCHECK(IsOnHeap());
+ } else {
+ emitl(offset);
+ }
}
void Assembler::emit(Immediate x) {
@@ -53,6 +63,18 @@ void Assembler::emit(Immediate x) {
void Assembler::emit(Immediate64 x) {
if (!RelocInfo::IsNone(x.rmode_)) {
RecordRelocInfo(x.rmode_);
+ if (x.rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT && IsOnHeap()) {
+ Address handle_address = reinterpret_cast<Address>(&x.value_);
+ Handle<HeapObject> object = Handle<HeapObject>::cast(
+ ReadUnalignedValue<Handle<Object>>(handle_address));
+ saved_handles_for_raw_object_ptr_.push_back(
+ std::make_pair(pc_offset(), x.value_));
+ emitq(static_cast<uint64_t>(object->ptr()));
+ // We must ensure that `emitq` is not growing the assembler buffer
+ // and falling back to off-heap compilation.
+ DCHECK(IsOnHeap());
+ return;
+ }
}
emitq(static_cast<uint64_t>(x.value_));
}
@@ -234,13 +256,18 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
void Assembler::set_target_address_at(Address pc, Address constant_pool,
Address target,
ICacheFlushMode icache_flush_mode) {
- DCHECK(is_int32(target - pc - 4));
- WriteUnalignedValue(pc, static_cast<int32_t>(target - pc - 4));
+ WriteUnalignedValue(pc, relative_target_offset(target, pc));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
FlushInstructionCache(pc, sizeof(int32_t));
}
}
+int32_t Assembler::relative_target_offset(Address target, Address pc) {
+ Address offset = target - pc - 4;
+ DCHECK(is_int32(offset));
+ return static_cast<int32_t>(offset);
+}
+
void Assembler::deserialization_set_target_internal_reference_at(
Address pc, Address target, RelocInfo::Mode mode) {
WriteUnalignedValue(pc, target);
diff --git a/deps/v8/src/codegen/x64/assembler-x64.cc b/deps/v8/src/codegen/x64/assembler-x64.cc
index eb07f3ba3b..1a99afa8dd 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/assembler-x64.cc
@@ -536,6 +536,8 @@ bool Assembler::is_optimizable_farjmp(int idx) {
void Assembler::GrowBuffer() {
DCHECK(buffer_overflow());
+ bool previously_on_heap = buffer_->IsOnHeap();
+
// Compute new buffer size.
DCHECK_EQ(buffer_start_, buffer_->start());
int old_size = buffer_->size();
@@ -573,6 +575,17 @@ void Assembler::GrowBuffer() {
WriteUnalignedValue(p, ReadUnalignedValue<intptr_t>(p) + pc_delta);
}
+ // Patch on-heap references to handles.
+ if (previously_on_heap && !buffer_->IsOnHeap()) {
+ Address base = reinterpret_cast<Address>(buffer_->start());
+ for (auto p : saved_handles_for_raw_object_ptr_) {
+ WriteUnalignedValue(base + p.first, p.second);
+ }
+ for (auto p : saved_offsets_for_runtime_entries_) {
+ WriteUnalignedValue<uint32_t>(base + p.first, p.second);
+ }
+ }
+
DCHECK(!buffer_overflow());
}
@@ -2034,7 +2047,7 @@ void Assembler::Nop(int n) {
EnsureSpace ensure_space(this);
int nop_bytes = std::min(n, 9);
const char* sequence = kNopSequences + kNopOffsets[nop_bytes];
- base::Memcpy(pc_, sequence, nop_bytes);
+ memcpy(pc_, sequence, nop_bytes);
pc_ += nop_bytes;
n -= nop_bytes;
} while (n);
diff --git a/deps/v8/src/codegen/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h
index e6205311c2..6c64f8ded9 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.h
+++ b/deps/v8/src/codegen/x64/assembler-x64.h
@@ -438,6 +438,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static inline void set_target_address_at(
Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ static inline int32_t relative_target_offset(Address target, Address pc);
// This sets the branch destination (which is in the instruction on x64).
// This is for calls and branches within generated code.
diff --git a/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h b/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h
index a24330a4c7..50ba12b836 100644
--- a/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h
+++ b/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h
@@ -18,22 +18,63 @@ constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
return registers;
}
+#if DEBUG
+template <typename DerivedDescriptor>
+void StaticCallInterfaceDescriptor<DerivedDescriptor>::
+ VerifyArgumentRegisterCount(CallInterfaceDescriptorData* data,
+ int nof_expected_args) {
+ RegList allocatable_regs = data->allocatable_registers();
+ if (nof_expected_args >= 1) DCHECK(allocatable_regs | arg_reg_1.bit());
+ if (nof_expected_args >= 2) DCHECK(allocatable_regs | arg_reg_2.bit());
+ if (nof_expected_args >= 3) DCHECK(allocatable_regs | arg_reg_3.bit());
+ if (nof_expected_args >= 4) DCHECK(allocatable_regs | arg_reg_4.bit());
+ // Additional arguments are passed on the stack.
+}
+#endif // DEBUG
+
+// static
+constexpr auto WriteBarrierDescriptor::registers() {
+#if V8_TARGET_OS_WIN
+ return RegisterArray(rdi, r8, rcx, rax, r9, rdx, rsi);
+#else
+ return RegisterArray(rdi, rbx, rdx, rcx, rax, rsi);
+#endif // V8_TARGET_OS_WIN
+}
+
+#ifdef V8_IS_TSAN
+// static
+constexpr auto TSANRelaxedStoreDescriptor::registers() {
+ return RegisterArray(arg_reg_1, arg_reg_2, kReturnRegister0);
+}
+
// static
-constexpr auto RecordWriteDescriptor::registers() {
- return RegisterArray(arg_reg_1, arg_reg_2, arg_reg_3, arg_reg_4,
- kReturnRegister0);
+constexpr auto TSANRelaxedLoadDescriptor::registers() {
+ return RegisterArray(arg_reg_1, kReturnRegister0);
}
+#endif // V8_IS_TSAN
// static
constexpr auto DynamicCheckMapsDescriptor::registers() {
+#if V8_TARGET_OS_WIN
return RegisterArray(kReturnRegister0, arg_reg_1, arg_reg_2, arg_reg_3,
kRuntimeCallFunctionRegister, kContextRegister);
+#else
+ STATIC_ASSERT(kContextRegister == arg_reg_2);
+ return RegisterArray(kReturnRegister0, arg_reg_1, arg_reg_2, arg_reg_3,
+ kRuntimeCallFunctionRegister);
+#endif // V8_TARGET_OS_WIN
}
// static
-constexpr auto EphemeronKeyBarrierDescriptor::registers() {
- return RegisterArray(arg_reg_1, arg_reg_2, arg_reg_3, arg_reg_4,
- kReturnRegister0);
+constexpr auto DynamicCheckMapsWithFeedbackVectorDescriptor::registers() {
+#if V8_TARGET_OS_WIN
+ return RegisterArray(kReturnRegister0, arg_reg_1, arg_reg_2, arg_reg_3,
+ kRuntimeCallFunctionRegister, kContextRegister);
+#else
+ STATIC_ASSERT(kContextRegister == arg_reg_2);
+ return RegisterArray(kReturnRegister0, arg_reg_1, arg_reg_2, arg_reg_3,
+ kRuntimeCallFunctionRegister);
+#endif // V8_TARGET_OS_WIN
}
// static
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
index 53f3f97f9a..e670da113e 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -27,7 +27,6 @@
#include "src/logging/counters.h"
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
-#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
// Satisfy cpplint check, but don't include platform-specific header. It is
@@ -293,40 +292,38 @@ void TurboAssembler::StoreTaggedSignedField(Operand dst_field_operand,
void TurboAssembler::DecompressTaggedSigned(Register destination,
Operand field_operand) {
- RecordComment("[ DecompressTaggedSigned");
+ ASM_CODE_COMMENT(this);
movl(destination, field_operand);
- RecordComment("]");
}
void TurboAssembler::DecompressTaggedPointer(Register destination,
Operand field_operand) {
- RecordComment("[ DecompressTaggedPointer");
+ ASM_CODE_COMMENT(this);
movl(destination, field_operand);
addq(destination, kPtrComprCageBaseRegister);
- RecordComment("]");
}
void TurboAssembler::DecompressTaggedPointer(Register destination,
Register source) {
- RecordComment("[ DecompressTaggedPointer");
+ ASM_CODE_COMMENT(this);
movl(destination, source);
addq(destination, kPtrComprCageBaseRegister);
- RecordComment("]");
}
void TurboAssembler::DecompressAnyTagged(Register destination,
Operand field_operand) {
- RecordComment("[ DecompressAnyTagged");
+ ASM_CODE_COMMENT(this);
movl(destination, field_operand);
addq(destination, kPtrComprCageBaseRegister);
- RecordComment("]");
}
void MacroAssembler::RecordWriteField(Register object, int offset,
- Register value, Register dst,
+ Register value, Register slot_address,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
+ ASM_CODE_COMMENT(this);
+ DCHECK(!AreAliased(object, value, slot_address));
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
@@ -340,16 +337,17 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// of the object, so the offset must be a multiple of kTaggedSize.
DCHECK(IsAligned(offset, kTaggedSize));
- leaq(dst, FieldOperand(object, offset));
+ leaq(slot_address, FieldOperand(object, offset));
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT_STRING(this, "Debug check slot_address");
Label ok;
- testb(dst, Immediate(kTaggedSize - 1));
+ testb(slot_address, Immediate(kTaggedSize - 1));
j(zero, &ok, Label::kNear);
int3();
bind(&ok);
}
- RecordWrite(object, dst, value, save_fp, remembered_set_action,
+ RecordWrite(object, slot_address, value, save_fp, remembered_set_action,
SmiCheck::kOmit);
bind(&done);
@@ -357,23 +355,16 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT_STRING(this, "Zap scratch registers");
Move(value, kZapValue, RelocInfo::NONE);
- Move(dst, kZapValue, RelocInfo::NONE);
- }
-}
-
-void TurboAssembler::SaveRegisters(RegList registers) {
- DCHECK_GT(NumRegs(registers), 0);
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if ((registers >> i) & 1u) {
- pushq(Register::from_code(i));
- }
+ Move(slot_address, kZapValue, RelocInfo::NONE);
}
}
void TurboAssembler::LoadExternalPointerField(
Register destination, Operand field_operand, ExternalPointerTag tag,
Register scratch, IsolateRootLocation isolateRootLocation) {
+ DCHECK(!AreAliased(destination, scratch));
#ifdef V8_HEAP_SANDBOX
DCHECK(!field_operand.AddressUsesRegister(scratch));
if (isolateRootLocation == IsolateRootLocation::kInRootRegister) {
@@ -398,7 +389,18 @@ void TurboAssembler::LoadExternalPointerField(
#endif // V8_HEAP_SANDBOX
}
-void TurboAssembler::RestoreRegisters(RegList registers) {
+void TurboAssembler::MaybeSaveRegisters(RegList registers) {
+ if (registers == 0) return;
+ DCHECK_GT(NumRegs(registers), 0);
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ pushq(Register::from_code(i));
+ }
+ }
+}
+
+void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
+ if (registers == 0) return;
DCHECK_GT(NumRegs(registers), 0);
for (int i = Register::kNumRegisters - 1; i >= 0; --i) {
if ((registers >> i) & 1u) {
@@ -407,99 +409,169 @@ void TurboAssembler::RestoreRegisters(RegList registers) {
}
}
-void TurboAssembler::CallEphemeronKeyBarrier(Register object, Register address,
+void TurboAssembler::CallEphemeronKeyBarrier(Register object,
+ Register slot_address,
SaveFPRegsMode fp_mode) {
- EphemeronKeyBarrierDescriptor descriptor;
- RegList registers = descriptor.allocatable_registers();
-
- SaveRegisters(registers);
-
- Register object_parameter(
- descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kObject));
- Register slot_parameter(descriptor.GetRegisterParameter(
- EphemeronKeyBarrierDescriptor::kSlotAddress));
- Register fp_mode_parameter(
- descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kFPMode));
-
- MovePair(slot_parameter, address, object_parameter, object);
- Smi smi_fm = Smi::FromEnum(fp_mode);
- Move(fp_mode_parameter, smi_fm);
- Call(isolate()->builtins()->builtin_handle(Builtins::kEphemeronKeyBarrier),
+ ASM_CODE_COMMENT(this);
+ DCHECK(!AreAliased(object, slot_address));
+ RegList registers =
+ WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
+ MaybeSaveRegisters(registers);
+
+ Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
+ Register slot_address_parameter =
+ WriteBarrierDescriptor::SlotAddressRegister();
+ MovePair(slot_address_parameter, slot_address, object_parameter, object);
+
+ Call(isolate()->builtins()->code_handle(
+ Builtins::GetEphemeronKeyBarrierStub(fp_mode)),
RelocInfo::CODE_TARGET);
-
- RestoreRegisters(registers);
+ MaybeRestoreRegisters(registers);
}
-void TurboAssembler::CallRecordWriteStub(
- Register object, Register address,
- RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
- CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
- Builtins::kRecordWrite, kNullAddress);
-}
-
-void TurboAssembler::CallRecordWriteStub(
- Register object, Register address,
+void TurboAssembler::CallRecordWriteStubSaveRegisters(
+ Register object, Register slot_address,
RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
- Address wasm_target) {
- CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
- Builtins::kNoBuiltinId, wasm_target);
+ StubCallMode mode) {
+ ASM_CODE_COMMENT(this);
+ DCHECK(!AreAliased(object, slot_address));
+ RegList registers =
+ WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
+ MaybeSaveRegisters(registers);
+ Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
+ Register slot_address_parameter =
+ WriteBarrierDescriptor::SlotAddressRegister();
+ MovePair(object_parameter, object, slot_address_parameter, slot_address);
+
+ CallRecordWriteStub(object_parameter, slot_address_parameter,
+ remembered_set_action, fp_mode, mode);
+ MaybeRestoreRegisters(registers);
}
void TurboAssembler::CallRecordWriteStub(
- Register object, Register address,
+ Register object, Register slot_address,
RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
- int builtin_index, Address wasm_target) {
- DCHECK_NE(builtin_index == Builtins::kNoBuiltinId,
- wasm_target == kNullAddress);
+ StubCallMode mode) {
+ ASM_CODE_COMMENT(this);
+ // Use CallRecordWriteStubSaveRegisters if the object and slot registers
+ // need to be caller saved.
+ DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object);
+ DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address);
+#if V8_ENABLE_WEBASSEMBLY
+ if (mode == StubCallMode::kCallWasmRuntimeStub) {
+ // Use {near_call} for direct Wasm call within a module.
+ auto wasm_target =
+ wasm::WasmCode::GetRecordWriteStub(remembered_set_action, fp_mode);
+ near_call(wasm_target, RelocInfo::WASM_STUB_CALL);
+#else
+ if (false) {
+#endif
+ } else {
+ Builtin builtin =
+ Builtins::GetRecordWriteStub(remembered_set_action, fp_mode);
+ if (options().inline_offheap_trampolines) {
+ CallBuiltin(builtin);
+ } else {
+ Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
+ Call(code_target, RelocInfo::CODE_TARGET);
+ }
+ }
+}
- RecordWriteDescriptor descriptor;
+#ifdef V8_IS_TSAN
+void TurboAssembler::CallTSANRelaxedStoreStub(Register address, Register value,
+ SaveFPRegsMode fp_mode, int size,
+ StubCallMode mode) {
+ ASM_CODE_COMMENT(this);
+ DCHECK(!AreAliased(address, value));
+ TSANRelaxedStoreDescriptor descriptor;
RegList registers = descriptor.allocatable_registers();
- SaveRegisters(registers);
+ MaybeSaveRegisters(registers);
- Register object_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
- Register slot_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
- Register remembered_set_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
- Register fp_mode_parameter(
- descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
+ Register address_parameter(
+ descriptor.GetRegisterParameter(TSANRelaxedStoreDescriptor::kAddress));
+ Register value_parameter(
+ descriptor.GetRegisterParameter(TSANRelaxedStoreDescriptor::kValue));
- // Prepare argument registers for calling RecordWrite
- // slot_parameter <= address
- // object_parameter <= object
- MovePair(slot_parameter, address, object_parameter, object);
+ // Prepare argument registers for calling GetTSANRelaxedStoreStub.
+ MovePair(address_parameter, address, value_parameter, value);
- Smi smi_rsa = Smi::FromEnum(remembered_set_action);
- Smi smi_fm = Smi::FromEnum(fp_mode);
- Move(remembered_set_parameter, smi_rsa);
- if (smi_rsa != smi_fm) {
- Move(fp_mode_parameter, smi_fm);
- } else {
- movq(fp_mode_parameter, remembered_set_parameter);
+ if (isolate()) {
+ Builtin builtin = CodeFactory::GetTSANRelaxedStoreStub(fp_mode, size);
+ Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
+ Call(code_target, RelocInfo::CODE_TARGET);
}
- if (builtin_index == Builtins::kNoBuiltinId) {
+#if V8_ENABLE_WEBASSEMBLY
+ // There are two different kinds of wasm-to-js functions: one lives in the
+ // wasm code space, and another one lives on the heap. Both of them have the
+ // same CodeKind (WASM_TO_JS_FUNCTION), but depending on where they are they
+ // have to either use the wasm stub calls, or call the builtin using the
+ // isolate like JS does. In order to know which wasm-to-js function we are
+ // compiling right now, we check if the isolate is null.
+ // TODO(solanes, v8:11600): Split CodeKind::WASM_TO_JS_FUNCTION into two
+ // different CodeKinds and pass the CodeKind as a parameter so that we can use
+ // that instead of a nullptr check.
+ // NOLINTNEXTLINE(readability/braces)
+ else {
+ DCHECK_EQ(mode, StubCallMode::kCallWasmRuntimeStub);
// Use {near_call} for direct Wasm call within a module.
+ auto wasm_target = wasm::WasmCode::GetTSANRelaxedStoreStub(fp_mode, size);
near_call(wasm_target, RelocInfo::WASM_STUB_CALL);
- } else if (options().inline_offheap_trampolines) {
- CallBuiltin(builtin_index);
- } else {
- Handle<Code> code_target =
- isolate()->builtins()->builtin_handle(Builtins::kRecordWrite);
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+
+ MaybeRestoreRegisters(registers);
+}
+
+void TurboAssembler::CallTSANRelaxedLoadStub(Register address,
+ SaveFPRegsMode fp_mode, int size,
+ StubCallMode mode) {
+ TSANRelaxedLoadDescriptor descriptor;
+ RegList registers = descriptor.allocatable_registers();
+
+ MaybeSaveRegisters(registers);
+
+ Register address_parameter(
+ descriptor.GetRegisterParameter(TSANRelaxedLoadDescriptor::kAddress));
+
+ // Prepare argument registers for calling TSANRelaxedLoad.
+ Move(address_parameter, address);
+
+ if (isolate()) {
+ Builtin builtin = CodeFactory::GetTSANRelaxedLoadStub(fp_mode, size);
+ Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
Call(code_target, RelocInfo::CODE_TARGET);
}
+#if V8_ENABLE_WEBASSEMBLY
+ // There are two different kinds of wasm-to-js functions: one lives in the
+ // wasm code space, and another one lives on the heap. Both of them have the
+ // same CodeKind (WASM_TO_JS_FUNCTION), but depending on where they are they
+ // have to either use the wasm stub calls, or call the builtin using the
+ // isolate like JS does. In order to know which wasm-to-js function we are
+ // compiling right now, we check if the isolate is null.
+ // TODO(solanes, v8:11600): Split CodeKind::WASM_TO_JS_FUNCTION into two
+ // different CodeKinds and pass the CodeKind as a parameter so that we can use
+ // that instead of a nullptr check.
+ // NOLINTNEXTLINE(readability/braces)
+ else {
+ DCHECK_EQ(mode, StubCallMode::kCallWasmRuntimeStub);
+ // Use {near_call} for direct Wasm call within a module.
+ auto wasm_target = wasm::WasmCode::GetTSANRelaxedLoadStub(fp_mode, size);
+ near_call(wasm_target, RelocInfo::WASM_STUB_CALL);
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
- RestoreRegisters(registers);
+ MaybeRestoreRegisters(registers);
}
+#endif // V8_IS_TSAN
-void MacroAssembler::RecordWrite(Register object, Register address,
+void MacroAssembler::RecordWrite(Register object, Register slot_address,
Register value, SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
- DCHECK(object != value);
- DCHECK(object != address);
- DCHECK(value != address);
+ ASM_CODE_COMMENT(this);
+ DCHECK(!AreAliased(object, slot_address, value));
AssertNotSmi(object);
if ((remembered_set_action == RememberedSetAction::kOmit &&
@@ -509,8 +581,9 @@ void MacroAssembler::RecordWrite(Register object, Register address,
}
if (FLAG_debug_code) {
+ ASM_CODE_COMMENT_STRING(this, "Debug check slot_address");
Label ok;
- cmp_tagged(value, Operand(address, 0));
+ cmp_tagged(value, Operand(slot_address, 0));
j(equal, &ok, Label::kNear);
int3();
bind(&ok);
@@ -535,14 +608,15 @@ void MacroAssembler::RecordWrite(Register object, Register address,
MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done,
Label::kNear);
- CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
+ CallRecordWriteStub(object, slot_address, remembered_set_action, fp_mode);
bind(&done);
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (FLAG_debug_code) {
- Move(address, kZapValue, RelocInfo::NONE);
+ ASM_CODE_COMMENT_STRING(this, "Zap scratch registers");
+ Move(slot_address, kZapValue, RelocInfo::NONE);
Move(value, kZapValue, RelocInfo::NONE);
}
}
@@ -567,6 +641,7 @@ void TurboAssembler::CheckStackAlignment() {
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kSystemPointerSize) {
+ ASM_CODE_COMMENT(this);
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
Label alignment_as_expected;
testq(rsp, Immediate(frame_alignment_mask));
@@ -578,6 +653,7 @@ void TurboAssembler::CheckStackAlignment() {
}
void TurboAssembler::Abort(AbortReason reason) {
+ ASM_CODE_COMMENT(this);
if (FLAG_code_comments) {
const char* msg = GetAbortReason(reason);
RecordComment("Abort message: ");
@@ -593,7 +669,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (should_abort_hard()) {
// We don't care if we constructed a frame. Just pretend we did.
FrameScope assume_frame(this, StackFrame::NONE);
- movl(arg_reg_1, Immediate(static_cast<int>(reason)));
+ Move(arg_reg_1, static_cast<int>(reason));
PrepareCallCFunction(1);
LoadAddress(rax, ExternalReference::abort_with_reason());
call(rax);
@@ -616,6 +692,7 @@ void TurboAssembler::Abort(AbortReason reason) {
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles) {
+ ASM_CODE_COMMENT(this);
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
@@ -642,7 +719,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
// For runtime functions with variable arguments:
// -- rax : number of arguments
// -----------------------------------
-
+ ASM_CODE_COMMENT(this);
const Runtime::Function* function = Runtime::FunctionForId(fid);
DCHECK_EQ(1, function->result_size);
if (function->nargs >= 0) {
@@ -653,6 +730,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
bool builtin_exit_frame) {
+ ASM_CODE_COMMENT(this);
// Set the entry point and jump to the C entry runtime stub.
LoadAddress(rbx, ext);
Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
@@ -679,7 +757,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
// R12 to r15 are callee save on all platforms.
if (fp_mode == SaveFPRegsMode::kSave) {
- bytes += kDoubleSize * XMMRegister::kNumRegisters;
+ bytes += kStackSavedSavedFPSize * XMMRegister::kNumRegisters;
}
return bytes;
@@ -687,6 +765,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
+ ASM_CODE_COMMENT(this);
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
// restore them.
@@ -701,11 +780,15 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
// R12 to r15 are callee save on all platforms.
if (fp_mode == SaveFPRegsMode::kSave) {
- int delta = kDoubleSize * XMMRegister::kNumRegisters;
+ const int delta = kStackSavedSavedFPSize * XMMRegister::kNumRegisters;
AllocateStackSpace(delta);
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- Movsd(Operand(rsp, i * kDoubleSize), reg);
+#if V8_ENABLE_WEBASSEMBLY
+ Movdqu(Operand(rsp, i * kStackSavedSavedFPSize), reg);
+#else
+ Movsd(Operand(rsp, i * kStackSavedSavedFPSize), reg);
+#endif // V8_ENABLE_WEBASSEMBLY
}
bytes += delta;
}
@@ -715,14 +798,19 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
+ ASM_CODE_COMMENT(this);
int bytes = 0;
if (fp_mode == SaveFPRegsMode::kSave) {
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- Movsd(reg, Operand(rsp, i * kDoubleSize));
+#if V8_ENABLE_WEBASSEMBLY
+ Movdqu(reg, Operand(rsp, i * kStackSavedSavedFPSize));
+#else
+ Movsd(reg, Operand(rsp, i * kStackSavedSavedFPSize));
+#endif // V8_ENABLE_WEBASSEMBLY
}
- int delta = kDoubleSize * XMMRegister::kNumRegisters;
- addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+ const int delta = kStackSavedSavedFPSize * XMMRegister::kNumRegisters;
+ addq(rsp, Immediate(delta));
bytes += delta;
}
@@ -1613,14 +1701,14 @@ void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code_object));
if (options().inline_offheap_trampolines) {
- int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index)) {
+ Builtin builtin = Builtin::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin)) {
Label skip;
if (cc != always) {
if (cc == never) return;
j(NegateCondition(cc), &skip, Label::kNear);
}
- TailCallBuiltin(builtin_index);
+ TailCallBuiltin(builtin);
bind(&skip);
return;
}
@@ -1656,10 +1744,10 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code_object));
if (options().inline_offheap_trampolines) {
- int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index)) {
+ Builtin builtin = Builtin::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin)) {
// Inline the trampoline.
- CallBuiltin(builtin_index);
+ CallBuiltin(builtin);
return;
}
}
@@ -1667,11 +1755,10 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
call(code_object, rmode);
}
-Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(
- Builtins::Name builtin_index) {
+Operand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
DCHECK(root_array_available());
return Operand(kRootRegister,
- IsolateData::builtin_entry_slot_offset(builtin_index));
+ IsolateData::builtin_entry_slot_offset(builtin));
}
Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(Register builtin_index) {
@@ -1695,43 +1782,29 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
Call(EntryFromBuiltinIndexAsOperand(builtin_index));
}
-void TurboAssembler::CallBuiltin(int builtin_index) {
- DCHECK(Builtins::IsBuiltinId(builtin_index));
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+void TurboAssembler::CallBuiltin(Builtin builtin) {
+ ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
if (options().short_builtin_calls) {
- EmbeddedData d = EmbeddedData::FromBlob(isolate());
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- call(entry, RelocInfo::RUNTIME_ENTRY);
-
+ call(BuiltinEntry(builtin), RelocInfo::RUNTIME_ENTRY);
} else {
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
+ Move(kScratchRegister, BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET);
call(kScratchRegister);
}
- RecordComment("]");
}
-void TurboAssembler::TailCallBuiltin(int builtin_index) {
- DCHECK(Builtins::IsBuiltinId(builtin_index));
- RecordCommentForOffHeapTrampoline(builtin_index);
- CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+void TurboAssembler::TailCallBuiltin(Builtin builtin) {
+ ASM_CODE_COMMENT_STRING(this,
+ CommentForOffHeapTrampoline("tail call", builtin));
if (options().short_builtin_calls) {
- EmbeddedData d = EmbeddedData::FromBlob(isolate());
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- jmp(entry, RelocInfo::RUNTIME_ENTRY);
-
+ jmp(BuiltinEntry(builtin), RelocInfo::RUNTIME_ENTRY);
} else {
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- Jump(entry, RelocInfo::OFF_HEAP_TARGET);
+ Jump(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET);
}
- RecordComment("]");
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
+ ASM_CODE_COMMENT(this);
// Code objects are called differently depending on whether we are generating
// builtin code (which will later be embedded into the binary) or compiling
// user JS code at runtime.
@@ -1791,7 +1864,74 @@ void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
}
}
+void TurboAssembler::LoadCodeDataContainerEntry(
+ Register destination, Register code_data_container_object) {
+ ASM_CODE_COMMENT(this);
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ LoadExternalPointerField(
+ destination,
+ FieldOperand(code_data_container_object,
+ CodeDataContainer::kCodeEntryPointOffset),
+ kCodeEntryPointTag, kScratchRegister);
+}
+
+void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin(
+ Register destination, Register code_data_container_object) {
+ ASM_CODE_COMMENT(this);
+ LoadTaggedPointerField(
+ destination,
+ FieldOperand(code_data_container_object, CodeDataContainer::kCodeOffset));
+}
+
+void TurboAssembler::CallCodeDataContainerObject(
+ Register code_data_container_object) {
+ LoadCodeDataContainerEntry(code_data_container_object,
+ code_data_container_object);
+ call(code_data_container_object);
+}
+
+void TurboAssembler::JumpCodeDataContainerObject(
+ Register code_data_container_object, JumpMode jump_mode) {
+ LoadCodeDataContainerEntry(code_data_container_object,
+ code_data_container_object);
+ switch (jump_mode) {
+ case JumpMode::kJump:
+ jmp(code_data_container_object);
+ return;
+ case JumpMode::kPushAndReturn:
+ pushq(code_data_container_object);
+ Ret();
+ return;
+ }
+}
+
+void TurboAssembler::LoadCodeTEntry(Register destination, Register code) {
+ ASM_CODE_COMMENT(this);
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ LoadCodeDataContainerEntry(destination, code);
+ } else {
+ leaq(destination, Operand(code, Code::kHeaderSize - kHeapObjectTag));
+ }
+}
+
+void TurboAssembler::CallCodeTObject(Register code) {
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ CallCodeDataContainerObject(code);
+ } else {
+ CallCodeObject(code);
+ }
+}
+
+void TurboAssembler::JumpCodeTObject(Register code, JumpMode jump_mode) {
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ JumpCodeDataContainerObject(code, jump_mode);
+ } else {
+ JumpCodeObject(code, jump_mode);
+ }
+}
+
void TurboAssembler::RetpolineCall(Register reg) {
+ ASM_CODE_COMMENT(this);
Label setup_return, setup_target, inner_indirect_branch, capture_spec;
jmp(&setup_return); // Jump past the entire retpoline below.
@@ -1817,6 +1957,7 @@ void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) {
}
void TurboAssembler::RetpolineJump(Register reg) {
+ ASM_CODE_COMMENT(this);
Label setup_target, capture_spec;
call(&setup_target);
@@ -2360,7 +2501,7 @@ void TurboAssembler::Lzcntl(Register dst, Register src) {
Label not_zero_src;
bsrl(dst, src);
j(not_zero, &not_zero_src, Label::kNear);
- movl(dst, Immediate(63)); // 63^31 == 32
+ Move(dst, 63); // 63^31 == 32
bind(&not_zero_src);
xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
}
@@ -2374,7 +2515,7 @@ void TurboAssembler::Lzcntl(Register dst, Operand src) {
Label not_zero_src;
bsrl(dst, src);
j(not_zero, &not_zero_src, Label::kNear);
- movl(dst, Immediate(63)); // 63^31 == 32
+ Move(dst, 63); // 63^31 == 32
bind(&not_zero_src);
xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
}
@@ -2388,7 +2529,7 @@ void TurboAssembler::Lzcntq(Register dst, Register src) {
Label not_zero_src;
bsrq(dst, src);
j(not_zero, &not_zero_src, Label::kNear);
- movl(dst, Immediate(127)); // 127^63 == 64
+ Move(dst, 127); // 127^63 == 64
bind(&not_zero_src);
xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x
}
@@ -2402,7 +2543,7 @@ void TurboAssembler::Lzcntq(Register dst, Operand src) {
Label not_zero_src;
bsrq(dst, src);
j(not_zero, &not_zero_src, Label::kNear);
- movl(dst, Immediate(127)); // 127^63 == 64
+ Move(dst, 127); // 127^63 == 64
bind(&not_zero_src);
xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x
}
@@ -2417,7 +2558,7 @@ void TurboAssembler::Tzcntq(Register dst, Register src) {
bsfq(dst, src);
j(not_zero, &not_zero_src, Label::kNear);
// Define the result of tzcnt(0) separately, because bsf(0) is undefined.
- movl(dst, Immediate(64));
+ Move(dst, 64);
bind(&not_zero_src);
}
@@ -2431,7 +2572,7 @@ void TurboAssembler::Tzcntq(Register dst, Operand src) {
bsfq(dst, src);
j(not_zero, &not_zero_src, Label::kNear);
// Define the result of tzcnt(0) separately, because bsf(0) is undefined.
- movl(dst, Immediate(64));
+ Move(dst, 64);
bind(&not_zero_src);
}
@@ -2444,7 +2585,7 @@ void TurboAssembler::Tzcntl(Register dst, Register src) {
Label not_zero_src;
bsfl(dst, src);
j(not_zero, &not_zero_src, Label::kNear);
- movl(dst, Immediate(32)); // The result of tzcnt is 32 if src = 0.
+ Move(dst, 32); // The result of tzcnt is 32 if src = 0.
bind(&not_zero_src);
}
@@ -2457,7 +2598,7 @@ void TurboAssembler::Tzcntl(Register dst, Operand src) {
Label not_zero_src;
bsfl(dst, src);
j(not_zero, &not_zero_src, Label::kNear);
- movl(dst, Immediate(32)); // The result of tzcnt is 32 if src = 0.
+ Move(dst, 32); // The result of tzcnt is 32 if src = 0.
bind(&not_zero_src);
}
@@ -2554,73 +2695,87 @@ void MacroAssembler::CmpInstanceTypeRange(Register map,
}
void MacroAssembler::AssertNotSmi(Register object) {
- if (FLAG_debug_code) {
- Condition is_smi = CheckSmi(object);
- Check(NegateCondition(is_smi), AbortReason::kOperandIsASmi);
- }
+ if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
+ Condition is_smi = CheckSmi(object);
+ Check(NegateCondition(is_smi), AbortReason::kOperandIsASmi);
}
void MacroAssembler::AssertSmi(Register object) {
- if (FLAG_debug_code) {
- Condition is_smi = CheckSmi(object);
- Check(is_smi, AbortReason::kOperandIsNotASmi);
- }
+ if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
+ Condition is_smi = CheckSmi(object);
+ Check(is_smi, AbortReason::kOperandIsNotASmi);
}
void MacroAssembler::AssertSmi(Operand object) {
- if (FLAG_debug_code) {
- Condition is_smi = CheckSmi(object);
- Check(is_smi, AbortReason::kOperandIsNotASmi);
- }
+ if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
+ Condition is_smi = CheckSmi(object);
+ Check(is_smi, AbortReason::kOperandIsNotASmi);
}
void TurboAssembler::AssertZeroExtended(Register int32_register) {
- if (FLAG_debug_code) {
- DCHECK_NE(int32_register, kScratchRegister);
- movq(kScratchRegister, int64_t{0x0000000100000000});
- cmpq(kScratchRegister, int32_register);
- Check(above, AbortReason::k32BitValueInRegisterIsNotZeroExtended);
- }
+ if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
+ DCHECK_NE(int32_register, kScratchRegister);
+ movq(kScratchRegister, int64_t{0x0000000100000000});
+ cmpq(kScratchRegister, int32_register);
+ Check(above, AbortReason::k32BitValueInRegisterIsNotZeroExtended);
+}
+
+void MacroAssembler::AssertCodeT(Register object) {
+ if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
+ testb(object, Immediate(kSmiTagMask));
+ Check(not_equal, AbortReason::kOperandIsNotACodeT);
+ Push(object);
+ LoadMap(object, object);
+ CmpInstanceType(object, V8_EXTERNAL_CODE_SPACE_BOOL ? CODE_DATA_CONTAINER_TYPE
+ : CODE_TYPE);
+ Pop(object);
+ Check(equal, AbortReason::kOperandIsNotACodeT);
}
void MacroAssembler::AssertConstructor(Register object) {
- if (FLAG_debug_code) {
- testb(object, Immediate(kSmiTagMask));
- Check(not_equal, AbortReason::kOperandIsASmiAndNotAConstructor);
- Push(object);
- LoadMap(object, object);
- testb(FieldOperand(object, Map::kBitFieldOffset),
- Immediate(Map::Bits1::IsConstructorBit::kMask));
- Pop(object);
- Check(not_zero, AbortReason::kOperandIsNotAConstructor);
- }
+ if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
+ testb(object, Immediate(kSmiTagMask));
+ Check(not_equal, AbortReason::kOperandIsASmiAndNotAConstructor);
+ Push(object);
+ LoadMap(object, object);
+ testb(FieldOperand(object, Map::kBitFieldOffset),
+ Immediate(Map::Bits1::IsConstructorBit::kMask));
+ Pop(object);
+ Check(not_zero, AbortReason::kOperandIsNotAConstructor);
}
void MacroAssembler::AssertFunction(Register object) {
- if (FLAG_debug_code) {
- testb(object, Immediate(kSmiTagMask));
- Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
- Push(object);
- LoadMap(object, object);
- CmpInstanceTypeRange(object, FIRST_JS_FUNCTION_TYPE, LAST_JS_FUNCTION_TYPE);
- Pop(object);
- Check(below_equal, AbortReason::kOperandIsNotAFunction);
- }
+ if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
+ testb(object, Immediate(kSmiTagMask));
+ Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
+ Push(object);
+ LoadMap(object, object);
+ CmpInstanceTypeRange(object, FIRST_JS_FUNCTION_TYPE, LAST_JS_FUNCTION_TYPE);
+ Pop(object);
+ Check(below_equal, AbortReason::kOperandIsNotAFunction);
}
void MacroAssembler::AssertBoundFunction(Register object) {
- if (FLAG_debug_code) {
- testb(object, Immediate(kSmiTagMask));
- Check(not_equal, AbortReason::kOperandIsASmiAndNotABoundFunction);
- Push(object);
- CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
- Pop(object);
- Check(equal, AbortReason::kOperandIsNotABoundFunction);
- }
+ if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
+ testb(object, Immediate(kSmiTagMask));
+ Check(not_equal, AbortReason::kOperandIsASmiAndNotABoundFunction);
+ Push(object);
+ CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
+ Pop(object);
+ Check(equal, AbortReason::kOperandIsNotABoundFunction);
}
void MacroAssembler::AssertGeneratorObject(Register object) {
if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
testb(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
@@ -2648,19 +2803,19 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
- if (FLAG_debug_code) {
- Label done_checking;
- AssertNotSmi(object);
- Cmp(object, isolate()->factory()->undefined_value());
- j(equal, &done_checking);
- Register map = object;
- Push(object);
- LoadMap(map, object);
- Cmp(map, isolate()->factory()->allocation_site_map());
- Pop(object);
- Assert(equal, AbortReason::kExpectedUndefinedOrCell);
- bind(&done_checking);
- }
+ if (!FLAG_debug_code) return;
+ ASM_CODE_COMMENT(this);
+ Label done_checking;
+ AssertNotSmi(object);
+ Cmp(object, isolate()->factory()->undefined_value());
+ j(equal, &done_checking);
+ Register map = object;
+ Push(object);
+ LoadMap(map, object);
+ Cmp(map, isolate()->factory()->allocation_site_map());
+ Pop(object);
+ Assert(equal, AbortReason::kExpectedUndefinedOrCell);
+ bind(&done_checking);
}
void MacroAssembler::LoadWeakValue(Register in_out, Label* target_if_cleared) {
@@ -2670,9 +2825,10 @@ void MacroAssembler::LoadWeakValue(Register in_out, Label* target_if_cleared) {
andq(in_out, Immediate(~static_cast<int32_t>(kWeakHeapObjectMask)));
}
-void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
+void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
+ ASM_CODE_COMMENT(this);
Operand counter_operand =
ExternalReferenceAsOperand(ExternalReference::Create(counter));
// This operation has to be exactly 32-bit wide in case the external
@@ -2686,9 +2842,10 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
}
}
-void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
+void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
+ ASM_CODE_COMMENT(this);
Operand counter_operand =
ExternalReferenceAsOperand(ExternalReference::Create(counter));
// This operation has to be exactly 32-bit wide in case the external
@@ -2705,6 +2862,7 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
void TurboAssembler::PrepareForTailCall(Register callee_args_count,
Register caller_args_count,
Register scratch0, Register scratch1) {
+ ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(callee_args_count, caller_args_count, scratch0, scratch1));
// Calculate the destination address where we will put the return address
@@ -2753,6 +2911,7 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
void MacroAssembler::InvokeFunction(Register function, Register new_target,
Register actual_parameter_count,
InvokeType type) {
+ ASM_CODE_COMMENT(this);
LoadTaggedPointerField(
rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
movzxwq(rbx,
@@ -2776,6 +2935,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count,
InvokeType type) {
+ ASM_CODE_COMMENT(this);
// You can't call a function without a valid frame.
DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
DCHECK_EQ(function, rdi);
@@ -2807,10 +2967,10 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
LoadTaggedPointerField(rcx, FieldOperand(function, JSFunction::kCodeOffset));
switch (type) {
case InvokeType::kCall:
- CallCodeObject(rcx);
+ CallCodeTObject(rcx);
break;
case InvokeType::kJump:
- JumpCodeObject(rcx);
+ JumpCodeTObject(rcx);
break;
}
jmp(&done, Label::kNear);
@@ -2842,6 +3002,7 @@ Operand MacroAssembler::StackLimitAsOperand(StackLimitKind kind) {
void MacroAssembler::StackOverflowCheck(
Register num_args, Register scratch, Label* stack_overflow,
Label::Distance stack_overflow_distance) {
+ ASM_CODE_COMMENT(this);
DCHECK_NE(num_args, scratch);
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
@@ -2865,7 +3026,11 @@ void MacroAssembler::StackOverflowCheck(
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
Label* done, InvokeType type) {
- if (expected_parameter_count != actual_parameter_count) {
+ ASM_CODE_COMMENT(this);
+ if (expected_parameter_count == actual_parameter_count) {
+ Move(rax, actual_parameter_count);
+ return;
+ }
Label regular_invoke;
// If the expected parameter count is equal to the adaptor sentinel, no need
// to push undefined value as arguments.
@@ -2925,14 +3090,12 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
int3(); // This should be unreachable.
}
bind(&regular_invoke);
- } else {
- Move(rax, actual_parameter_count);
- }
}
void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count) {
+ ASM_CODE_COMMENT(this);
FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
SmiTag(expected_parameter_count);
@@ -2962,12 +3125,14 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
}
void TurboAssembler::StubPrologue(StackFrame::Type type) {
+ ASM_CODE_COMMENT(this);
pushq(rbp); // Caller's frame pointer.
movq(rbp, rsp);
Push(Immediate(StackFrame::TypeToMarker(type)));
}
void TurboAssembler::Prologue() {
+ ASM_CODE_COMMENT(this);
pushq(rbp); // Caller's frame pointer.
movq(rbp, rsp);
Push(kContextRegister); // Callee's context.
@@ -2976,6 +3141,7 @@ void TurboAssembler::Prologue() {
}
void TurboAssembler::EnterFrame(StackFrame::Type type) {
+ ASM_CODE_COMMENT(this);
pushq(rbp);
movq(rbp, rsp);
if (!StackFrame::IsJavaScript(type)) {
@@ -2984,6 +3150,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
+ ASM_CODE_COMMENT(this);
// TODO(v8:11429): Consider passing BASELINE instead, and checking for
// IsJSFrame or similar. Could then unify with manual frame leaves in the
// interpreter too.
@@ -2998,6 +3165,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
#if defined(V8_TARGET_OS_WIN) || defined(V8_TARGET_OS_MACOSX)
void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
+ ASM_CODE_COMMENT(this);
// On Windows and on macOS, we cannot increment the stack size by more than
// one page (minimum page size is 4KB) without accessing at least one byte on
// the page. Check this:
@@ -3019,6 +3187,7 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
}
void TurboAssembler::AllocateStackSpace(int bytes) {
+ ASM_CODE_COMMENT(this);
DCHECK_GE(bytes, 0);
while (bytes > kStackPageSize) {
subq(rsp, Immediate(kStackPageSize));
@@ -3032,6 +3201,7 @@ void TurboAssembler::AllocateStackSpace(int bytes) {
void MacroAssembler::EnterExitFramePrologue(Register saved_rax_reg,
StackFrame::Type frame_type) {
+ ASM_CODE_COMMENT(this);
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
@@ -3064,11 +3234,17 @@ void MacroAssembler::EnterExitFramePrologue(Register saved_rax_reg,
rbx);
}
+#ifdef V8_TARGET_OS_WIN
+static const int kRegisterPassedArguments = 4;
+#else
+static const int kRegisterPassedArguments = 6;
+#endif
+
void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
bool save_doubles) {
+ ASM_CODE_COMMENT(this);
#ifdef V8_TARGET_OS_WIN
- const int kShadowSpace = 4;
- arg_stack_space += kShadowSpace;
+ arg_stack_space += kRegisterPassedArguments;
#endif
// Optionally save all XMM registers.
if (save_doubles) {
@@ -3100,6 +3276,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles,
StackFrame::Type frame_type) {
+ ASM_CODE_COMMENT(this);
Register saved_rax_reg = r12;
EnterExitFramePrologue(saved_rax_reg, frame_type);
@@ -3112,11 +3289,13 @@ void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles,
}
void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
+ ASM_CODE_COMMENT(this);
EnterExitFramePrologue(no_reg, StackFrame::EXIT);
EnterExitFrameEpilogue(arg_stack_space, false);
}
void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
+ ASM_CODE_COMMENT(this);
// Registers:
// r15 : argv
if (save_doubles) {
@@ -3148,6 +3327,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
}
void MacroAssembler::LeaveApiExitFrame() {
+ ASM_CODE_COMMENT(this);
movq(rsp, rbp);
popq(rbp);
@@ -3155,29 +3335,25 @@ void MacroAssembler::LeaveApiExitFrame() {
}
void MacroAssembler::LeaveExitFrameEpilogue() {
+ ASM_CODE_COMMENT(this);
// Restore current context from top and clear it in debug mode.
ExternalReference context_address =
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate());
Operand context_operand = ExternalReferenceAsOperand(context_address);
movq(rsi, context_operand);
#ifdef DEBUG
- movq(context_operand, Immediate(Context::kInvalidContext));
+ Move(context_operand, Context::kInvalidContext);
#endif
// Clear the top frame.
ExternalReference c_entry_fp_address =
ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate());
Operand c_entry_fp_operand = ExternalReferenceAsOperand(c_entry_fp_address);
- movq(c_entry_fp_operand, Immediate(0));
+ Move(c_entry_fp_operand, 0);
}
-#ifdef V8_TARGET_OS_WIN
-static const int kRegisterPassedArguments = 4;
-#else
-static const int kRegisterPassedArguments = 6;
-#endif
-
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
+ ASM_CODE_COMMENT(this);
// Load native context.
LoadMap(dst, rsi);
LoadTaggedPointerField(
@@ -3206,6 +3382,7 @@ int TurboAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
}
void TurboAssembler::PrepareCallCFunction(int num_arguments) {
+ ASM_CODE_COMMENT(this);
int frame_alignment = base::OS::ActivationFrameAlignment();
DCHECK_NE(frame_alignment, 0);
DCHECK_GE(num_arguments, 0);
@@ -3223,11 +3400,13 @@ void TurboAssembler::PrepareCallCFunction(int num_arguments) {
void TurboAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
+ ASM_CODE_COMMENT(this);
LoadAddress(rax, function);
CallCFunction(rax, num_arguments);
}
void TurboAssembler::CallCFunction(Register function, int num_arguments) {
+ ASM_CODE_COMMENT(this);
DCHECK_LE(num_arguments, kMaxCParameters);
DCHECK(has_frame());
// Check stack alignment.
@@ -3303,6 +3482,7 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met,
Label::Distance condition_met_distance) {
+ ASM_CODE_COMMENT(this);
DCHECK(cc == zero || cc == not_zero);
if (scratch == object) {
andq(scratch, Immediate(~kPageAlignmentMask));
@@ -3332,13 +3512,14 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
Move(kSpeculationPoisonRegister, -1);
}
-void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
- Label* exit, DeoptimizeKind kind,
- Label* ret, Label*) {
+void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
+ DeoptimizeKind kind, Label* ret,
+ Label*) {
+ ASM_CODE_COMMENT(this);
// Note: Assembler::call is used here on purpose to guarantee fixed-size
// exits even on Atom CPUs; see TurboAssembler::Call for Atom-specific
// performance tuning which emits a different instruction sequence.
- call(EntryFromBuiltinIndexAsOperand(target));
+ call(EntryFromBuiltinAsOperand(target));
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
(kind == DeoptimizeKind::kLazy)
? Deoptimizer::kLazyDeoptExitSize
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h
index da5cf7dae3..c303eed9e2 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.h
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h
@@ -123,7 +123,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void Ret(int bytes_dropped, Register scratch);
// Operations on roots in the root-array.
- void LoadRoot(Register destination, RootIndex index) override;
+ void LoadRoot(Register destination, RootIndex index) final;
void LoadRoot(Operand destination, RootIndex index) {
LoadRoot(kScratchRegister, index);
movq(destination, kScratchRegister);
@@ -251,9 +251,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void Move(Register dst, intptr_t x) {
if (x == 0) {
xorl(dst, dst);
- } else if (is_uint8(x)) {
- xorl(dst, dst);
- movb(dst, Immediate(static_cast<uint32_t>(x)));
+ // The following shorter sequence for uint8 causes performance
+ // regressions:
+ // xorl(dst, dst); movb(dst,
+ // Immediate(static_cast<uint32_t>(x)));
} else if (is_uint32(x)) {
movl(dst, Immediate(static_cast<uint32_t>(x)));
} else if (is_int32(x)) {
@@ -320,10 +321,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
// register.
void LoadAddress(Register destination, ExternalReference source);
- void LoadFromConstantsTable(Register destination,
- int constant_index) override;
- void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
- void LoadRootRelative(Register destination, int32_t offset) override;
+ void LoadFromConstantsTable(Register destination, int constant_index) final;
+ void LoadRootRegisterOffset(Register destination, intptr_t offset) final;
+ void LoadRootRelative(Register destination, int32_t offset) final;
// Operand pointing to an external reference.
// May emit code to set up the scratch register. The operand is
@@ -341,42 +341,53 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void Call(ExternalReference ext);
void Call(Label* target) { call(target); }
- Operand EntryFromBuiltinIndexAsOperand(Builtins::Name builtin_index);
+ Operand EntryFromBuiltinAsOperand(Builtin builtin_index);
Operand EntryFromBuiltinIndexAsOperand(Register builtin_index);
- void CallBuiltinByIndex(Register builtin_index) override;
- void CallBuiltin(Builtins::Name builtin) {
- // TODO(11527): drop the int overload in favour of the Builtins::Name one.
- return CallBuiltin(static_cast<int>(builtin));
- }
- void CallBuiltin(int builtin_index);
- void TailCallBuiltin(Builtins::Name builtin) {
- // TODO(11527): drop the int overload in favour of the Builtins::Name one.
- return TailCallBuiltin(static_cast<int>(builtin));
- }
- void TailCallBuiltin(int builtin_index);
+ void CallBuiltinByIndex(Register builtin_index);
+ void CallBuiltin(Builtin builtin);
+ void TailCallBuiltin(Builtin builtin);
- void LoadCodeObjectEntry(Register destination, Register code_object) override;
- void CallCodeObject(Register code_object) override;
+ void LoadCodeObjectEntry(Register destination, Register code_object);
+ void CallCodeObject(Register code_object);
void JumpCodeObject(Register code_object,
- JumpMode jump_mode = JumpMode::kJump) override;
+ JumpMode jump_mode = JumpMode::kJump);
+
+ // Load code entry point from the CodeDataContainer object.
+ void LoadCodeDataContainerEntry(Register destination,
+ Register code_data_container_object);
+ // Load code entry point from the CodeDataContainer object and compute
+ // Code object pointer out of it. Must not be used for CodeDataContainers
+ // corresponding to builtins, because their entry points values point to
+ // the embedded instruction stream in .text section.
+ void LoadCodeDataContainerCodeNonBuiltin(Register destination,
+ Register code_data_container_object);
+ void CallCodeDataContainerObject(Register code_data_container_object);
+ void JumpCodeDataContainerObject(Register code_data_container_object,
+ JumpMode jump_mode = JumpMode::kJump);
+
+ // Helper functions that dispatch either to Call/JumpCodeObject or to
+ // Call/JumpCodeDataContainerObject.
+ void LoadCodeTEntry(Register destination, Register code);
+ void CallCodeTObject(Register code);
+ void JumpCodeTObject(Register code, JumpMode jump_mode = JumpMode::kJump);
void RetpolineCall(Register reg);
void RetpolineCall(Address destination, RelocInfo::Mode rmode);
void Jump(Address destination, RelocInfo::Mode rmode);
- void Jump(const ExternalReference& reference) override;
+ void Jump(const ExternalReference& reference);
void Jump(Operand op);
void Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
Condition cc = always);
void RetpolineJump(Register reg);
- void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
+ void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
DeoptimizeKind kind, Label* ret,
Label* jump_deoptimization_entry_label);
- void Trap() override;
- void DebugBreak() override;
+ void Trap();
+ void DebugBreak();
// Will move src1 to dst if dst != src1.
void Pmaddwd(XMMRegister dst, XMMRegister src1, Operand src2);
@@ -501,18 +512,29 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
#endif
}
- void SaveRegisters(RegList registers);
- void RestoreRegisters(RegList registers);
+ void MaybeSaveRegisters(RegList registers);
+ void MaybeRestoreRegisters(RegList registers);
- void CallRecordWriteStub(Register object, Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode);
- void CallRecordWriteStub(Register object, Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode, Address wasm_target);
- void CallEphemeronKeyBarrier(Register object, Register address,
+ void CallEphemeronKeyBarrier(Register object, Register slot_address,
SaveFPRegsMode fp_mode);
+ void CallRecordWriteStubSaveRegisters(
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode = StubCallMode::kCallBuiltinPointer);
+ void CallRecordWriteStub(
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode = StubCallMode::kCallBuiltinPointer);
+
+#ifdef V8_IS_TSAN
+ void CallTSANRelaxedStoreStub(Register address, Register value,
+ SaveFPRegsMode fp_mode, int size,
+ StubCallMode mode);
+ void CallTSANRelaxedLoadStub(Register address, SaveFPRegsMode fp_mode,
+ int size, StubCallMode mode);
+#endif // V8_IS_TSAN
+
void MoveNumber(Register dst, double value);
void MoveNonSmi(Register dst, double value);
@@ -611,11 +633,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
// Returns a register holding the smi value. The register MUST NOT be
// modified. It may be the "smi 1 constant" register.
Register GetSmiConstant(Smi value);
-
- void CallRecordWriteStub(Register object, Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode, int builtin_index,
- Address wasm_target);
};
// MacroAssembler implements a collection of frequently used macros.
@@ -674,7 +691,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// The offset is the offset from the start of the object, not the offset from
// the tagged HeapObject pointer. For use with FieldOperand(reg, off).
void RecordWriteField(
- Register object, int offset, Register value, Register scratch,
+ Register object, int offset, Register value, Register slot_address,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
SmiCheck smi_check = SmiCheck::kInline);
@@ -685,7 +702,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// operation. RecordWrite filters out smis so it does not update
// the write barrier if the value is a smi.
void RecordWrite(
- Register object, Register address, Register value, SaveFPRegsMode save_fp,
+ Register object, Register slot_address, Register value,
+ SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
SmiCheck smi_check = SmiCheck::kInline);
@@ -854,6 +872,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void AssertSmi(Register object);
void AssertSmi(Operand object);
+ // Abort execution if argument is not a CodeT, enabled via --debug-code.
+ void AssertCodeT(Register object);
+
// Abort execution if argument is not a Constructor, enabled via --debug-code.
void AssertConstructor(Register object);
@@ -921,8 +942,16 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// StatsCounter support
- void IncrementCounter(StatsCounter* counter, int value);
- void DecrementCounter(StatsCounter* counter, int value);
+ void IncrementCounter(StatsCounter* counter, int value) {
+ if (!FLAG_native_code_counters) return;
+ EmitIncrementCounter(counter, value);
+ }
+ void EmitIncrementCounter(StatsCounter* counter, int value);
+ void DecrementCounter(StatsCounter* counter, int value) {
+ if (!FLAG_native_code_counters) return;
+ EmitDecrementCounter(counter, value);
+ }
+ void EmitDecrementCounter(StatsCounter* counter, int value);
// ---------------------------------------------------------------------------
// Stack limit utilities