summaryrefslogtreecommitdiff
path: root/deps/v8/src/codegen/arm/assembler-arm.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/codegen/arm/assembler-arm.cc')
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.cc57
1 files changed, 49 insertions, 8 deletions
diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc
index 09c57928ff..ec2588364c 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/assembler-arm.cc
@@ -2677,7 +2677,7 @@ void Assembler::vstm(BlockAddrMode am, Register base, SwVfpRegister first,
0xA * B8 | count);
}
-static void DoubleAsTwoUInt32(Double d, uint32_t* lo, uint32_t* hi) {
+static void DoubleAsTwoUInt32(base::Double d, uint32_t* lo, uint32_t* hi) {
uint64_t i = d.AsUint64();
*lo = i & 0xFFFFFFFF;
@@ -2750,7 +2750,7 @@ void Assembler::vmov(const QwNeonRegister dst, uint64_t imm) {
// Only works for little endian floating point formats.
// We don't support VFP on the mixed endian floating point platform.
-static bool FitsVmovFPImmediate(Double d, uint32_t* encoding) {
+static bool FitsVmovFPImmediate(base::Double d, uint32_t* encoding) {
// VMOV can accept an immediate of the form:
//
// +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
@@ -2799,7 +2799,7 @@ static bool FitsVmovFPImmediate(Double d, uint32_t* encoding) {
void Assembler::vmov(const SwVfpRegister dst, Float32 imm) {
uint32_t enc;
if (CpuFeatures::IsSupported(VFPv3) &&
- FitsVmovFPImmediate(Double(imm.get_scalar()), &enc)) {
+ FitsVmovFPImmediate(base::Double(imm.get_scalar()), &enc)) {
CpuFeatureScope scope(this, VFPv3);
// The float can be encoded in the instruction.
//
@@ -2818,7 +2818,7 @@ void Assembler::vmov(const SwVfpRegister dst, Float32 imm) {
}
}
-void Assembler::vmov(const DwVfpRegister dst, Double imm,
+void Assembler::vmov(const DwVfpRegister dst, base::Double imm,
const Register extra_scratch) {
DCHECK(VfpRegisterIsAvailable(dst));
uint32_t enc;
@@ -4046,6 +4046,8 @@ enum UnaryOp {
VTRN,
VRECPE,
VRSQRTE,
+ VPADAL_S,
+ VPADAL_U,
VPADDL_S,
VPADDL_U,
VCEQ0,
@@ -4119,6 +4121,12 @@ static Instr EncodeNeonUnaryOp(UnaryOp op, NeonRegType reg_type, NeonSize size,
// Only support floating point.
op_encoding = 0x3 * B16 | 0xB * B7;
break;
+ case VPADAL_S:
+ op_encoding = 0xC * B7;
+ break;
+ case VPADAL_U:
+ op_encoding = 0xD * B7;
+ break;
case VPADDL_S:
op_encoding = 0x4 * B7;
break;
@@ -5016,6 +5024,14 @@ void Assembler::vtrn(NeonSize size, QwNeonRegister src1, QwNeonRegister src2) {
emit(EncodeNeonUnaryOp(VTRN, NEON_Q, size, src1.code(), src2.code()));
}
+void Assembler::vpadal(NeonDataType dt, QwNeonRegister dst,
+ QwNeonRegister src) {
+ DCHECK(IsEnabled(NEON));
+ // vpadal.<dt>(Qd, Qm) SIMD Vector Pairwise Add and Accumulate Long
+ emit(EncodeNeonUnaryOp(NeonU(dt) ? VPADAL_U : VPADAL_S, NEON_Q,
+ NeonDataTypeToSize(dt), dst.code(), src.code()));
+}
+
void Assembler::vpaddl(NeonDataType dt, QwNeonRegister dst,
QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
@@ -5159,6 +5175,8 @@ void Assembler::RecordConstPool(int size) {
void Assembler::GrowBuffer() {
DCHECK_EQ(buffer_start_, buffer_->start());
+ bool previously_on_heap = buffer_->IsOnHeap();
+
// Compute new buffer size.
int old_size = buffer_->size();
int new_size = std::min(2 * old_size, old_size + 1 * MB);
@@ -5191,6 +5209,14 @@ void Assembler::GrowBuffer() {
reinterpret_cast<Address>(reloc_info_writer.last_pc()) + pc_delta);
reloc_info_writer.Reposition(new_reloc_start, new_last_pc);
+ // Patch on-heap references to handles.
+ if (previously_on_heap && !buffer_->IsOnHeap()) {
+ Address base = reinterpret_cast<Address>(buffer_->start());
+ for (auto p : saved_handles_for_raw_object_ptr_) {
+ WriteUnalignedValue(base + p.first, p.second);
+ }
+ }
+
// None of our relocation types are pc relative pointing outside the code
// buffer nor pc absolute pointing inside the code buffer, so there is no need
// to relocate any emitted relocation entries.
@@ -5357,9 +5383,9 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
while (buffer_space() <= needed_space) GrowBuffer();
{
+ ASM_CODE_COMMENT_STRING(this, "Constant Pool");
// Block recursive calls to CheckConstPool.
BlockConstPoolScope block_const_pool(this);
- RecordComment("[ Constant Pool");
RecordConstPool(size);
Label size_check;
@@ -5384,6 +5410,13 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
CHECK_LE(pc_offset(),
first_const_pool_32_use_ + kMaxDistToPcRelativeConstant);
+ // Check that the code buffer is large enough before emitting the constant
+ // pool (this includes the gap to the relocation information).
+ int needed_space = pending_32_bit_constants_.size() * kPointerSize + kGap;
+ while (buffer_space() <= needed_space) {
+ GrowBuffer();
+ }
+
// Emit 32-bit constant pool entries.
for (size_t i = 0; i < pending_32_bit_constants_.size(); i++) {
ConstantPoolEntry& entry = pending_32_bit_constants_[i];
@@ -5416,7 +5449,17 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
instr_at_put(entry.position(),
SetLdrRegisterImmediateOffset(instr, delta));
if (!entry.is_merged()) {
- emit(entry.value());
+ if (IsOnHeap() && RelocInfo::IsEmbeddedObjectMode(entry.rmode())) {
+ saved_handles_for_raw_object_ptr_.push_back(
+ std::make_pair(pc_offset(), entry.value()));
+ Handle<HeapObject> handle(reinterpret_cast<Address*>(entry.value()));
+ emit(handle->ptr());
+ // We must ensure that `emit` is not growing the assembler buffer
+ // and falling back to off-heap compilation.
+ DCHECK(IsOnHeap());
+ } else {
+ emit(entry.value());
+ }
}
}
@@ -5424,8 +5467,6 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
first_const_pool_32_use_ = -1;
- RecordComment("]");
-
DCHECK_EQ(size, SizeOfCodeGeneratedSince(&size_check));
if (after_pool.is_linked()) {