summaryrefslogtreecommitdiff
path: root/deps/v8/src/ppc
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2019-03-12 09:01:49 +0100
committerMichaël Zasso <targos@protonmail.com>2019-03-14 18:49:21 +0100
commit7b48713334469818661fe276cf571de9c7899f2d (patch)
tree4dbda49ac88db76ce09dc330a0cb587e68e139ba /deps/v8/src/ppc
parent8549ac09b256666cf5275224ec58fab9939ff32e (diff)
downloadnode-new-7b48713334469818661fe276cf571de9c7899f2d.tar.gz
deps: update V8 to 7.3.492.25
PR-URL: https://github.com/nodejs/node/pull/25852 Reviewed-By: Ujjwal Sharma <usharma1998@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com>
Diffstat (limited to 'deps/v8/src/ppc')
-rw-r--r--deps/v8/src/ppc/OWNERS5
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h27
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc134
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h355
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc621
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.h31
-rw-r--r--deps/v8/src/ppc/codegen-ppc.cc55
-rw-r--r--deps/v8/src/ppc/constants-ppc.h30
-rw-r--r--deps/v8/src/ppc/cpu-ppc.cc6
-rw-r--r--deps/v8/src/ppc/deoptimizer-ppc.cc53
-rw-r--r--deps/v8/src/ppc/disasm-ppc.cc10
-rw-r--r--deps/v8/src/ppc/frame-constants-ppc.cc8
-rw-r--r--deps/v8/src/ppc/frame-constants-ppc.h2
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc21
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc437
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h132
-rw-r--r--deps/v8/src/ppc/register-ppc.h321
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc563
-rw-r--r--deps/v8/src/ppc/simulator-ppc.h167
19 files changed, 1004 insertions, 1974 deletions
diff --git a/deps/v8/src/ppc/OWNERS b/deps/v8/src/ppc/OWNERS
index cf60da5cc7..6d1a8fc472 100644
--- a/deps/v8/src/ppc/OWNERS
+++ b/deps/v8/src/ppc/OWNERS
@@ -1,7 +1,4 @@
jyan@ca.ibm.com
-dstence@us.ibm.com
joransiu@ca.ibm.com
-mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
-jbarboza@ca.ibm.com
-mmallick@ca.ibm.com
+miladfar@ca.ibm.com \ No newline at end of file
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index e3dbaa96c9..99e75c377c 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -90,9 +90,7 @@ Address RelocInfo::target_address() {
}
Address RelocInfo::target_address_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
- IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
- IsOffHeapTarget(rmode_));
+ DCHECK(HasTargetAddressAddress());
if (FLAG_enable_embedded_constant_pool &&
Assembler::IsConstantPoolLoadStart(pc_)) {
@@ -161,31 +159,29 @@ Address Assembler::return_address_from_call_start(Address pc) {
return pc + (len + 2) * kInstrSize;
}
-HeapObject* RelocInfo::target_object() {
+HeapObject RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return HeapObject::cast(reinterpret_cast<Object*>(
- Assembler::target_address_at(pc_, constant_pool_)));
+ return HeapObject::cast(
+ Object(Assembler::target_address_at(pc_, constant_pool_)));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Handle<HeapObject>(reinterpret_cast<HeapObject**>(
+ return Handle<HeapObject>(reinterpret_cast<Address*>(
Assembler::target_address_at(pc_, constant_pool_)));
}
-void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
+void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, constant_pool_,
- reinterpret_cast<Address>(target),
+ Assembler::set_target_address_at(pc_, constant_pool_, target->ptr(),
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
WriteBarrierForCode(host(), this, target);
}
}
-
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
return Assembler::target_address_at(pc_, constant_pool_);
@@ -380,7 +376,7 @@ int Assembler::GetConstantPoolOffset(Address pc,
void Assembler::PatchConstantPoolAccessInstruction(
int pc_offset, int offset, ConstantPoolEntry::Access access,
ConstantPoolEntry::Type type) {
- Address pc = reinterpret_cast<Address>(buffer_) + pc_offset;
+ Address pc = reinterpret_cast<Address>(buffer_start_) + pc_offset;
bool overflowed = (access == ConstantPoolEntry::OVERFLOWED);
CHECK(overflowed != is_int16(offset));
#ifdef DEBUG
@@ -426,9 +422,10 @@ Address Assembler::target_constant_pool_address_at(
// has already deserialized the mov instructions etc.
// There is a FIXED_SEQUENCE assumption here
void Assembler::deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target) {
+ Address instruction_payload, Code code, Address target) {
set_target_address_at(instruction_payload,
- code ? code->constant_pool() : kNullAddress, target);
+ !code.is_null() ? code->constant_pool() : kNullAddress,
+ target);
}
int Assembler::deserialization_special_target_size(
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index 5daa55604e..db84384595 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -40,7 +40,6 @@
#include "src/base/bits.h"
#include "src/base/cpu.h"
-#include "src/code-stubs.h"
#include "src/deoptimizer.h"
#include "src/macro-assembler.h"
#include "src/ppc/assembler-ppc-inl.h"
@@ -163,23 +162,6 @@ bool RelocInfo::IsInConstantPool() {
return false;
}
-int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
- DCHECK(IsRuntimeEntry(rmode_));
- return Deoptimizer::GetDeoptimizationId(isolate, target_address(), kind);
-}
-
-void RelocInfo::set_js_to_wasm_address(Address address,
- ICacheFlushMode icache_flush_mode) {
- DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- Assembler::set_target_address_at(pc_, constant_pool_, address,
- icache_flush_mode);
-}
-
-Address RelocInfo::js_to_wasm_address() const {
- DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- return Assembler::target_address_at(pc_, constant_pool_);
-}
-
uint32_t RelocInfo::wasm_call_tag() const {
DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
return static_cast<uint32_t>(
@@ -205,13 +187,6 @@ Operand Operand::EmbeddedNumber(double value) {
return result;
}
-Operand Operand::EmbeddedCode(CodeStub* stub) {
- Operand result(0, RelocInfo::CODE_TARGET);
- result.is_heap_object_request_ = true;
- result.value_.heap_object_request = HeapObjectRequest(stub);
- return result;
-}
-
Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
Operand result(0, RelocInfo::EMBEDDED_OBJECT);
result.is_heap_object_request_ = true;
@@ -235,11 +210,6 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
break;
}
- case HeapObjectRequest::kCodeStub: {
- request.code_stub()->set_isolate(isolate);
- object = request.code_stub()->GetCode();
- break;
- }
case HeapObjectRequest::kStringConstant: {
const StringConstantBase* str = request.string();
CHECK_NOT_NULL(str);
@@ -247,7 +217,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
break;
}
}
- Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
+ Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
Address constant_pool = kNullAddress;
set_target_address_at(pc, constant_pool, object.address(),
SKIP_ICACHE_FLUSH);
@@ -257,11 +227,11 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
-Assembler::Assembler(const AssemblerOptions& options, void* buffer,
- int buffer_size)
- : AssemblerBase(options, buffer, buffer_size),
+Assembler::Assembler(const AssemblerOptions& options,
+ std::unique_ptr<AssemblerBuffer> buffer)
+ : AssemblerBase(options, std::move(buffer)),
constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits) {
- reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
+ reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
no_trampoline_pool_before_ = 0;
trampoline_pool_blocked_nesting_ = 0;
@@ -277,21 +247,25 @@ Assembler::Assembler(const AssemblerOptions& options, void* buffer,
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
// Emit constant pool if necessary.
- int constant_pool_offset = EmitConstantPool();
+ int constant_pool_size = EmitConstantPool();
EmitRelocations();
+
+ int code_comments_size = WriteCodeComments();
+
AllocateAndInstallRequestedHeapObjects(isolate);
// Set up code descriptor.
- desc->buffer = buffer_;
- desc->buffer_size = buffer_size_;
+ desc->buffer = buffer_start_;
+ desc->buffer_size = buffer_->size();
desc->instr_size = pc_offset();
- desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
- desc->constant_pool_size =
- (constant_pool_offset ? desc->instr_size - constant_pool_offset : 0);
+ desc->reloc_size =
+ (buffer_start_ + desc->buffer_size) - reloc_info_writer.pos();
+ desc->constant_pool_size = constant_pool_size;
desc->origin = this;
desc->unwinding_info_size = 0;
desc->unwinding_info = nullptr;
+ desc->code_comments_size = code_comments_size;
}
@@ -516,7 +490,8 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
Register dst = Register::from_code(instr_at(pos + kInstrSize));
int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag);
PatchingAssembler patcher(options(),
- reinterpret_cast<byte*>(buffer_ + pos), 2);
+ reinterpret_cast<byte*>(buffer_start_ + pos),
+ 2);
patcher.bitwise_mov32(dst, offset);
break;
}
@@ -531,7 +506,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
: (SIGN_EXT_IMM22(operands & kImm22Mask));
int32_t offset = target_pos + delta;
PatchingAssembler patcher(
- options(), reinterpret_cast<byte*>(buffer_ + pos),
+ options(), reinterpret_cast<byte*>(buffer_start_ + pos),
2 + static_cast<int32_t>(opcode == kUnboundAddLabelLongOffsetOpcode));
patcher.bitwise_add32(dst, base, offset);
if (opcode == kUnboundAddLabelLongOffsetOpcode) patcher.nop();
@@ -541,7 +516,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
// Load the address of the label in a register.
Register dst = Register::from_code(instr_at(pos + kInstrSize));
PatchingAssembler patcher(options(),
- reinterpret_cast<byte*>(buffer_ + pos),
+ reinterpret_cast<byte*>(buffer_start_ + pos),
kMovInstructionsNoConstantPool);
// Keep internal references relative until EmitRelocations.
patcher.bitwise_mov(dst, target_pos);
@@ -549,7 +524,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
}
case kUnboundJumpTableEntryOpcode: {
PatchingAssembler patcher(options(),
- reinterpret_cast<byte*>(buffer_ + pos),
+ reinterpret_cast<byte*>(buffer_start_ + pos),
kPointerSize / kInstrSize);
// Keep internal references relative until EmitRelocations.
patcher.dp(target_pos);
@@ -2006,54 +1981,43 @@ bool Assembler::IsNop(Instr instr, int type) {
void Assembler::GrowBuffer(int needed) {
- if (!own_buffer_) FATAL("external code buffer is too small");
+ DCHECK_EQ(buffer_start_, buffer_->start());
// Compute new buffer size.
- CodeDesc desc; // the new buffer
- if (buffer_size_ < 4 * KB) {
- desc.buffer_size = 4 * KB;
- } else if (buffer_size_ < 1 * MB) {
- desc.buffer_size = 2 * buffer_size_;
- } else {
- desc.buffer_size = buffer_size_ + 1 * MB;
- }
- int space = buffer_space() + (desc.buffer_size - buffer_size_);
- if (space < needed) {
- desc.buffer_size += needed - space;
- }
+ int old_size = buffer_->size();
+ int new_size = std::min(2 * old_size, old_size + 1 * MB);
+ int space = buffer_space() + (new_size - old_size);
+ new_size += (space < needed) ? needed - space : 0;
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
- if (desc.buffer_size > kMaximalBufferSize) {
+ if (new_size > kMaximalBufferSize) {
V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
}
// Set up new buffer.
- desc.buffer = NewArray<byte>(desc.buffer_size);
- desc.origin = this;
-
- desc.instr_size = pc_offset();
- desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
+ DCHECK_EQ(new_size, new_buffer->size());
+ byte* new_start = new_buffer->start();
// Copy the data.
- intptr_t pc_delta = desc.buffer - buffer_;
- intptr_t rc_delta =
- (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
- desc.reloc_size);
+ intptr_t pc_delta = new_start - buffer_start_;
+ intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
+ size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
+ MemMove(new_start, buffer_start_, pc_offset());
+ MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
+ reloc_size);
// Switch buffers.
- DeleteArray(buffer_);
- buffer_ = desc.buffer;
- buffer_size_ = desc.buffer_size;
+ buffer_ = std::move(new_buffer);
+ buffer_start_ = new_start;
pc_ += pc_delta;
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
- // Nothing else to do here since we keep all internal references and
- // deferred relocation entries relative to the buffer (until
- // EmitRelocations).
+ // None of our relocation types are pc relative pointing outside the code
+ // buffer nor pc absolute pointing inside the code buffer, so there is no need
+ // to relocate any emitted relocation entries.
}
@@ -2098,18 +2062,19 @@ void Assembler::EmitRelocations() {
for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
it != relocations_.end(); it++) {
RelocInfo::Mode rmode = it->rmode();
- Address pc = reinterpret_cast<Address>(buffer_) + it->position();
- RelocInfo rinfo(pc, rmode, it->data(), nullptr);
+ Address pc = reinterpret_cast<Address>(buffer_start_) + it->position();
+ RelocInfo rinfo(pc, rmode, it->data(), Code());
// Fix up internal references now that they are guaranteed to be bound.
if (RelocInfo::IsInternalReference(rmode)) {
// Jump table entry
intptr_t pos = static_cast<intptr_t>(Memory<Address>(pc));
- Memory<Address>(pc) = reinterpret_cast<Address>(buffer_) + pos;
+ Memory<Address>(pc) = reinterpret_cast<Address>(buffer_start_) + pos;
} else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
// mov sequence
intptr_t pos = static_cast<intptr_t>(target_address_at(pc, kNullAddress));
- set_target_address_at(pc, 0, reinterpret_cast<Address>(buffer_) + pos,
+ set_target_address_at(pc, 0,
+ reinterpret_cast<Address>(buffer_start_) + pos,
SKIP_ICACHE_FLUSH);
}
@@ -2156,14 +2121,15 @@ void Assembler::CheckTrampolinePool() {
PatchingAssembler::PatchingAssembler(const AssemblerOptions& options,
byte* address, int instructions)
- : Assembler(options, address, instructions * kInstrSize + kGap) {
- DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
+ : Assembler(options, ExternalAssemblerBuffer(
+ address, instructions * kInstrSize + kGap)) {
+ DCHECK_EQ(reloc_info_writer.pos(), buffer_start_ + buffer_->size());
}
PatchingAssembler::~PatchingAssembler() {
// Check that the code was patched as expected.
- DCHECK_EQ(pc_, buffer_ + buffer_size_ - kGap);
- DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
+ DCHECK_EQ(pc_, buffer_start_ + buffer_->size() - kGap);
+ DCHECK_EQ(reloc_info_writer.pos(), buffer_start_ + buffer_->size());
}
} // namespace internal
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index 9f3ff0dc7e..46c810334f 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -44,319 +44,17 @@
#include <vector>
#include "src/assembler.h"
+#include "src/constant-pool.h"
#include "src/double.h"
+#include "src/external-reference.h"
+#include "src/label.h"
+#include "src/objects/smi.h"
#include "src/ppc/constants-ppc.h"
-
-#if V8_HOST_ARCH_PPC && \
- (V8_OS_AIX || (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN))
-#define ABI_USES_FUNCTION_DESCRIPTORS 1
-#else
-#define ABI_USES_FUNCTION_DESCRIPTORS 0
-#endif
-
-#if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64
-#define ABI_PASSES_HANDLES_IN_REGS 1
-#else
-#define ABI_PASSES_HANDLES_IN_REGS 0
-#endif
-
-#if !V8_HOST_ARCH_PPC || !V8_TARGET_ARCH_PPC64 || V8_TARGET_LITTLE_ENDIAN
-#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 1
-#else
-#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 0
-#endif
-
-#if !V8_HOST_ARCH_PPC || (V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN)
-#define ABI_CALL_VIA_IP 1
-#else
-#define ABI_CALL_VIA_IP 0
-#endif
-
-#if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64
-#define ABI_TOC_REGISTER 2
-#else
-#define ABI_TOC_REGISTER 13
-#endif
-
-#define INSTR_AND_DATA_CACHE_COHERENCY LWSYNC
+#include "src/ppc/register-ppc.h"
namespace v8 {
namespace internal {
-// clang-format off
-#define GENERAL_REGISTERS(V) \
- V(r0) V(sp) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
- V(r8) V(r9) V(r10) V(r11) V(ip) V(r13) V(r14) V(r15) \
- V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
- V(r24) V(r25) V(r26) V(r27) V(r28) V(r29) V(r30) V(fp)
-
-#if V8_EMBEDDED_CONSTANT_POOL
-#define ALLOCATABLE_GENERAL_REGISTERS(V) \
- V(r3) V(r4) V(r5) V(r6) V(r7) \
- V(r8) V(r9) V(r10) V(r14) V(r15) \
- V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
- V(r24) V(r25) V(r26) V(r27) V(r30)
-#else
-#define ALLOCATABLE_GENERAL_REGISTERS(V) \
- V(r3) V(r4) V(r5) V(r6) V(r7) \
- V(r8) V(r9) V(r10) V(r14) V(r15) \
- V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
- V(r24) V(r25) V(r26) V(r27) V(r28) V(r30)
-#endif
-
-#define LOW_DOUBLE_REGISTERS(V) \
- V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
- V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15)
-
-#define NON_LOW_DOUBLE_REGISTERS(V) \
- V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
- V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
-
-#define DOUBLE_REGISTERS(V) \
- LOW_DOUBLE_REGISTERS(V) NON_LOW_DOUBLE_REGISTERS(V)
-
-#define FLOAT_REGISTERS DOUBLE_REGISTERS
-#define SIMD128_REGISTERS DOUBLE_REGISTERS
-
-#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
- V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
- V(d8) V(d9) V(d10) V(d11) V(d12) V(d15) \
- V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
- V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
-
-#define C_REGISTERS(V) \
- V(cr0) V(cr1) V(cr2) V(cr3) V(cr4) V(cr5) V(cr6) V(cr7) \
- V(cr8) V(cr9) V(cr10) V(cr11) V(cr12) V(cr15)
-// clang-format on
-
-// Register list in load/store instructions
-// Note that the bit values must match those used in actual instruction encoding
-const int kNumRegs = 32;
-
-// Caller-saved/arguments registers
-const RegList kJSCallerSaved = 1 << 3 | // r3 a1
- 1 << 4 | // r4 a2
- 1 << 5 | // r5 a3
- 1 << 6 | // r6 a4
- 1 << 7 | // r7 a5
- 1 << 8 | // r8 a6
- 1 << 9 | // r9 a7
- 1 << 10 | // r10 a8
- 1 << 11;
-
-const int kNumJSCallerSaved = 9;
-
-// Return the code of the n-th caller-saved register available to JavaScript
-// e.g. JSCallerSavedReg(0) returns r0.code() == 0
-int JSCallerSavedCode(int n);
-
-// Callee-saved registers preserved when switching from C to JavaScript
-const RegList kCalleeSaved = 1 << 14 | // r14
- 1 << 15 | // r15
- 1 << 16 | // r16
- 1 << 17 | // r17
- 1 << 18 | // r18
- 1 << 19 | // r19
- 1 << 20 | // r20
- 1 << 21 | // r21
- 1 << 22 | // r22
- 1 << 23 | // r23
- 1 << 24 | // r24
- 1 << 25 | // r25
- 1 << 26 | // r26
- 1 << 27 | // r27
- 1 << 28 | // r28
- 1 << 29 | // r29
- 1 << 30 | // r20
- 1 << 31; // r31
-
-const int kNumCalleeSaved = 18;
-
-const RegList kCallerSavedDoubles = 1 << 0 | // d0
- 1 << 1 | // d1
- 1 << 2 | // d2
- 1 << 3 | // d3
- 1 << 4 | // d4
- 1 << 5 | // d5
- 1 << 6 | // d6
- 1 << 7 | // d7
- 1 << 8 | // d8
- 1 << 9 | // d9
- 1 << 10 | // d10
- 1 << 11 | // d11
- 1 << 12 | // d12
- 1 << 13; // d13
-
-const int kNumCallerSavedDoubles = 14;
-
-const RegList kCalleeSavedDoubles = 1 << 14 | // d14
- 1 << 15 | // d15
- 1 << 16 | // d16
- 1 << 17 | // d17
- 1 << 18 | // d18
- 1 << 19 | // d19
- 1 << 20 | // d20
- 1 << 21 | // d21
- 1 << 22 | // d22
- 1 << 23 | // d23
- 1 << 24 | // d24
- 1 << 25 | // d25
- 1 << 26 | // d26
- 1 << 27 | // d27
- 1 << 28 | // d28
- 1 << 29 | // d29
- 1 << 30 | // d30
- 1 << 31; // d31
-
-const int kNumCalleeSavedDoubles = 18;
-
-// Number of registers for which space is reserved in safepoints. Must be a
-// multiple of 8.
-const int kNumSafepointRegisters = 32;
-
-// The following constants describe the stack frame linkage area as
-// defined by the ABI. Note that kNumRequiredStackFrameSlots must
-// satisfy alignment requirements (rounding up if required).
-#if V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN // ppc64le linux
-// [0] back chain
-// [1] condition register save area
-// [2] link register save area
-// [3] TOC save area
-// [4] Parameter1 save area
-// ...
-// [11] Parameter8 save area
-// [12] Parameter9 slot (if necessary)
-// ...
-const int kNumRequiredStackFrameSlots = 12;
-const int kStackFrameLRSlot = 2;
-const int kStackFrameExtraParamSlot = 12;
-#else // AIX
-// [0] back chain
-// [1] condition register save area
-// [2] link register save area
-// [3] reserved for compiler
-// [4] reserved by binder
-// [5] TOC save area
-// [6] Parameter1 save area
-// ...
-// [13] Parameter8 save area
-// [14] Parameter9 slot (if necessary)
-// ...
-const int kNumRequiredStackFrameSlots = 14;
-const int kStackFrameLRSlot = 2;
-const int kStackFrameExtraParamSlot = 14;
-#endif
-
-// Define the list of registers actually saved at safepoints.
-// Note that the number of saved registers may be smaller than the reserved
-// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
-
-enum RegisterCode {
-#define REGISTER_CODE(R) kRegCode_##R,
- GENERAL_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kRegAfterLast
-};
-
-class Register : public RegisterBase<Register, kRegAfterLast> {
- public:
-#if V8_TARGET_LITTLE_ENDIAN
- static constexpr int kMantissaOffset = 0;
- static constexpr int kExponentOffset = 4;
-#else
- static constexpr int kMantissaOffset = 4;
- static constexpr int kExponentOffset = 0;
-#endif
-
- private:
- friend class RegisterBase;
- explicit constexpr Register(int code) : RegisterBase(code) {}
-};
-
-ASSERT_TRIVIALLY_COPYABLE(Register);
-static_assert(sizeof(Register) == sizeof(int),
- "Register can efficiently be passed by value");
-
-#define DEFINE_REGISTER(R) \
- constexpr Register R = Register::from_code<kRegCode_##R>();
-GENERAL_REGISTERS(DEFINE_REGISTER)
-#undef DEFINE_REGISTER
-constexpr Register no_reg = Register::no_reg();
-
-// Aliases
-constexpr Register kConstantPoolRegister = r28; // Constant pool.
-constexpr Register kRootRegister = r29; // Roots array pointer.
-constexpr Register cp = r30; // JavaScript context pointer.
-
-constexpr bool kPadArguments = false;
-constexpr bool kSimpleFPAliasing = true;
-constexpr bool kSimdMaskRegisters = false;
-
-enum DoubleRegisterCode {
-#define REGISTER_CODE(R) kDoubleCode_##R,
- DOUBLE_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kDoubleAfterLast
-};
-
-// Double word FP register.
-class DoubleRegister : public RegisterBase<DoubleRegister, kDoubleAfterLast> {
- public:
- // A few double registers are reserved: one as a scratch register and one to
- // hold 0.0, that does not fit in the immediate field of vmov instructions.
- // d14: 0.0
- // d15: scratch register.
- static constexpr int kSizeInBytes = 8;
- inline static int NumRegisters();
-
- private:
- friend class RegisterBase;
- explicit constexpr DoubleRegister(int code) : RegisterBase(code) {}
-};
-
-ASSERT_TRIVIALLY_COPYABLE(DoubleRegister);
-static_assert(sizeof(DoubleRegister) == sizeof(int),
- "DoubleRegister can efficiently be passed by value");
-
-typedef DoubleRegister FloatRegister;
-
-// TODO(ppc) Define SIMD registers.
-typedef DoubleRegister Simd128Register;
-
-#define DEFINE_REGISTER(R) \
- constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
-DOUBLE_REGISTERS(DEFINE_REGISTER)
-#undef DEFINE_REGISTER
-constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
-
-constexpr DoubleRegister kFirstCalleeSavedDoubleReg = d14;
-constexpr DoubleRegister kLastCalleeSavedDoubleReg = d31;
-constexpr DoubleRegister kDoubleRegZero = d14;
-constexpr DoubleRegister kScratchDoubleReg = d13;
-
-Register ToRegister(int num);
-
-enum CRegisterCode {
-#define REGISTER_CODE(R) kCCode_##R,
- C_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kCAfterLast
-};
-
-// Coprocessor register
-class CRegister : public RegisterBase<CRegister, kCAfterLast> {
- friend class RegisterBase;
- explicit constexpr CRegister(int code) : RegisterBase(code) {}
-};
-
-constexpr CRegister no_creg = CRegister::no_reg();
-#define DECLARE_C_REGISTER(R) \
- constexpr CRegister R = CRegister::from_code<kCCode_##R>();
-C_REGISTERS(DECLARE_C_REGISTER)
-#undef DECLARE_C_REGISTER
-
// -----------------------------------------------------------------------------
// Machine instruction Operands
@@ -375,15 +73,14 @@ class Operand {
value_.immediate = static_cast<intptr_t>(f.address());
}
explicit Operand(Handle<HeapObject> handle);
- V8_INLINE explicit Operand(Smi* value) : rmode_(RelocInfo::NONE) {
- value_.immediate = reinterpret_cast<intptr_t>(value);
+ V8_INLINE explicit Operand(Smi value) : rmode_(RelocInfo::NONE) {
+ value_.immediate = static_cast<intptr_t>(value.ptr());
}
// rm
V8_INLINE explicit Operand(Register rm);
static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
static Operand EmbeddedStringConstant(const StringConstantBase* str);
- static Operand EmbeddedCode(CodeStub* stub);
// Return true if this is a register operand.
V8_INLINE bool is_reg() const { return rm_.is_valid(); }
@@ -484,15 +181,10 @@ class Assembler : public AssemblerBase {
// for a detailed comment on the layout (globals.h).
//
// If the provided buffer is nullptr, the assembler allocates and grows its
- // own buffer, and buffer_size determines the initial buffer size. The buffer
- // is owned by the assembler and deallocated upon destruction of the
- // assembler.
- //
- // If the provided buffer is not nullptr, the assembler uses the provided
- // buffer for code generation and assumes its size to be buffer_size. If the
- // buffer is too small, a fatal error occurs. No deallocation of the buffer is
- // done upon destruction of the assembler.
- Assembler(const AssemblerOptions& options, void* buffer, int buffer_size);
+ // own buffer. Otherwise it takes ownership of the provided buffer.
+ explicit Assembler(const AssemblerOptions&,
+ std::unique_ptr<AssemblerBuffer> = {});
+
virtual ~Assembler() {}
// GetCode emits any pending (non-emitted) code and fills the descriptor
@@ -574,7 +266,7 @@ class Assembler : public AssemblerBase {
// This sets the branch destination.
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target);
+ Address instruction_payload, Code code, Address target);
// Get the size of the special target encoded at 'instruction_payload'.
inline static int deserialization_special_target_size(
@@ -1331,10 +1023,6 @@ class Assembler : public AssemblerBase {
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstantPoolEntrySharingScope);
};
- // Record a comment relocation entry that can be used by a disassembler.
- // Use --code-comments to enable.
- void RecordComment(const char* msg);
-
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
@@ -1348,9 +1036,11 @@ class Assembler : public AssemblerBase {
void dp(uintptr_t data);
// Read/patch instructions
- Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
+ Instr instr_at(int pos) {
+ return *reinterpret_cast<Instr*>(buffer_start_ + pos);
+ }
void instr_at_put(int pos, Instr instr) {
- *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
+ *reinterpret_cast<Instr*>(buffer_start_ + pos) = instr;
}
static Instr instr_at(Address pc) { return *reinterpret_cast<Instr*>(pc); }
static void instr_at_put(Address pc, Instr instr) {
@@ -1437,10 +1127,13 @@ class Assembler : public AssemblerBase {
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
ConstantPoolEntry::Access ConstantPoolAddEntry(RelocInfo::Mode rmode,
intptr_t value) {
- bool sharing_ok = RelocInfo::IsNone(rmode) ||
- (!options().record_reloc_info_for_serialization &&
- RelocInfo::IsShareableRelocMode(rmode) &&
- !is_constant_pool_entry_sharing_blocked());
+ bool sharing_ok =
+ RelocInfo::IsNone(rmode) ||
+ (!options().record_reloc_info_for_serialization &&
+ RelocInfo::IsShareableRelocMode(rmode) &&
+ !is_constant_pool_entry_sharing_blocked() &&
+ // TODO(johnyan): make the following rmode shareable
+ !RelocInfo::IsWasmCall(rmode) && !RelocInfo::IsWasmStubCall(rmode));
return constant_pool_builder_.AddEntry(pc_offset(), value, sharing_ok);
}
ConstantPoolEntry::Access ConstantPoolAddEntry(Double value) {
@@ -1615,6 +1308,8 @@ class Assembler : public AssemblerBase {
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
+ int WriteCodeComments();
+
friend class RegExpMacroAssemblerPPC;
friend class RelocInfo;
friend class BlockTrampolinePoolScope;
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index 7e287b08b8..c0d7b58b0f 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -15,634 +15,15 @@
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
+#include "src/macro-assembler.h"
#include "src/objects/api-callbacks.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
-#include "src/ppc/code-stubs-ppc.h" // Cannot be the first include.
-
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm)
-
-void JSEntryStub::Generate(MacroAssembler* masm) {
- // r3: code entry
- // r4: function
- // r5: receiver
- // r6: argc
- // [sp+0]: argv
-
- Label invoke, handler_entry, exit;
-
-// Called from C
- __ function_descriptor();
-
- {
- NoRootArrayScope no_root_array(masm);
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
- // PPC LINUX ABI:
- // preserve LR in pre-reserved slot in caller's frame
- __ mflr(r0);
- __ StoreP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize));
-
- // Save callee saved registers on the stack.
- __ MultiPush(kCalleeSaved);
-
- // Save callee-saved double registers.
- __ MultiPushDoubles(kCalleeSavedDoubles);
- // Set up the reserved register for 0.0.
- __ LoadDoubleLiteral(kDoubleRegZero, Double(0.0), r0);
-
- __ InitializeRootRegister();
- }
-
- // Push a frame with special values setup to mark it as an entry frame.
- // r3: code entry
- // r4: function
- // r5: receiver
- // r6: argc
- // r7: argv
- __ li(r0, Operand(-1)); // Push a bad frame pointer to fail if it is used.
- __ push(r0);
- if (FLAG_enable_embedded_constant_pool) {
- __ li(kConstantPoolRegister, Operand::Zero());
- __ push(kConstantPoolRegister);
- }
- StackFrame::Type marker = type();
- __ mov(r0, Operand(StackFrame::TypeToMarker(marker)));
- __ push(r0);
- __ push(r0);
- // Save copies of the top frame descriptor on the stack.
- __ mov(r8, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate())));
- __ LoadP(r0, MemOperand(r8));
- __ push(r0);
-
- // Set up frame pointer for the frame to be pushed.
- __ addi(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
-
- // If this is the outermost JS call, set js_entry_sp value.
- Label non_outermost_js;
- ExternalReference js_entry_sp =
- ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress, isolate());
- __ mov(r8, Operand(js_entry_sp));
- __ LoadP(r9, MemOperand(r8));
- __ cmpi(r9, Operand::Zero());
- __ bne(&non_outermost_js);
- __ StoreP(fp, MemOperand(r8));
- __ mov(ip, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
- Label cont;
- __ b(&cont);
- __ bind(&non_outermost_js);
- __ mov(ip, Operand(StackFrame::INNER_JSENTRY_FRAME));
- __ bind(&cont);
- __ push(ip); // frame-type
-
- // Jump to a faked try block that does the invoke, with a faked catch
- // block that sets the pending exception.
- __ b(&invoke);
-
- __ bind(&handler_entry);
- handler_offset_ = handler_entry.pos();
- // Caught exception: Store result (exception) in the pending exception
- // field in the JSEnv and return a failure sentinel. Coming in here the
- // fp will be invalid because the PushStackHandler below sets it to 0 to
- // signal the existence of the JSEntry frame.
- __ mov(ip, Operand(ExternalReference::Create(
- IsolateAddressId::kPendingExceptionAddress, isolate())));
-
- __ StoreP(r3, MemOperand(ip));
- __ LoadRoot(r3, RootIndex::kException);
- __ b(&exit);
-
- // Invoke: Link this frame into the handler chain.
- __ bind(&invoke);
- // Must preserve r3-r7.
- __ PushStackHandler();
- // If an exception not caught by another handler occurs, this handler
- // returns control to the code after the b(&invoke) above, which
- // restores all kCalleeSaved registers (including cp and fp) to their
- // saved values before returning a failure to C.
-
- // Invoke the function by calling through JS entry trampoline builtin.
- // Notice that we cannot store a reference to the trampoline code directly in
- // this stub, because runtime stubs are not traversed when doing GC.
-
- // Expected registers by Builtins::JSEntryTrampoline
- // r3: code entry
- // r4: function
- // r5: receiver
- // r6: argc
- // r7: argv
- __ Call(EntryTrampoline(), RelocInfo::CODE_TARGET);
-
- // Unlink this frame from the handler chain.
- __ PopStackHandler();
-
- __ bind(&exit); // r3 holds result
- // Check if the current stack frame is marked as the outermost JS frame.
- Label non_outermost_js_2;
- __ pop(r8);
- __ cmpi(r8, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
- __ bne(&non_outermost_js_2);
- __ mov(r9, Operand::Zero());
- __ mov(r8, Operand(js_entry_sp));
- __ StoreP(r9, MemOperand(r8));
- __ bind(&non_outermost_js_2);
-
- // Restore the top frame descriptors from the stack.
- __ pop(r6);
- __ mov(ip, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate())));
- __ StoreP(r6, MemOperand(ip));
-
- // Reset the stack to the callee saved registers.
- __ addi(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
-
- // Restore callee-saved double registers.
- __ MultiPopDoubles(kCalleeSavedDoubles);
-
- // Restore callee-saved registers.
- __ MultiPop(kCalleeSaved);
-
- // Return
- __ LoadP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize));
- __ mtlr(r0);
- __ blr();
-}
-
-// This stub is paired with DirectCEntryStub::GenerateCall
-void DirectCEntryStub::Generate(MacroAssembler* masm) {
- // Place the return address on the stack, making the call
- // GC safe. The RegExp backend also relies on this.
- __ mflr(r0);
- __ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
- __ Call(ip); // Call the C++ function.
- __ LoadP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
- __ mtlr(r0);
- __ blr();
-}
-
-
-void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) {
- if (FLAG_embedded_builtins) {
- if (masm->root_array_available() &&
- isolate()->ShouldLoadConstantsFromRootList()) {
- // This is basically an inlined version of Call(Handle<Code>) that loads
- // the code object into lr instead of ip.
- DCHECK_NE(ip, target);
- __ IndirectLoadConstant(ip, GetCode());
- __ addi(r0, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Move(ip, target);
- __ Call(r0);
- return;
- }
- }
- if (ABI_USES_FUNCTION_DESCRIPTORS) {
- // AIX/PPC64BE Linux use a function descriptor.
- __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize));
- __ LoadP(ip, MemOperand(target, 0)); // Instruction address
- } else {
- // ip needs to be set for DirectCEentryStub::Generate, and also
- // for ABI_CALL_VIA_IP.
- __ Move(ip, target);
- }
-
- intptr_t code = reinterpret_cast<intptr_t>(GetCode().location());
- __ mov(r0, Operand(code, RelocInfo::CODE_TARGET));
- __ Call(r0); // Call the stub.
-}
-
-
-void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
- Zone* zone) {
- if (tasm->isolate()->function_entry_hook() != nullptr) {
- PredictableCodeSizeScope predictable(tasm,
-#if V8_TARGET_ARCH_PPC64
- 14 * kInstrSize);
-#else
- 11 * kInstrSize);
-#endif
- tasm->mflr(r0);
- tasm->Push(r0, ip);
- tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
- tasm->Pop(r0, ip);
- tasm->mtlr(r0);
- }
-}
-
-void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (masm->isolate()->function_entry_hook() != nullptr) {
- PredictableCodeSizeScope predictable(masm,
-#if V8_TARGET_ARCH_PPC64
- 14 * kInstrSize);
-#else
- 11 * kInstrSize);
-#endif
- ProfileEntryHookStub stub(masm->isolate());
- __ mflr(r0);
- __ Push(r0, ip);
- __ CallStub(&stub);
- __ Pop(r0, ip);
- __ mtlr(r0);
- }
-}
-
-
-void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
- // The entry hook is a "push lr, ip" instruction, followed by a call.
- const int32_t kReturnAddressDistanceFromFunctionStart =
- Assembler::kCallTargetAddressOffset + 3 * kInstrSize;
-
- // This should contain all kJSCallerSaved registers.
- const RegList kSavedRegs = kJSCallerSaved | // Caller saved registers.
- r15.bit(); // Saved stack pointer.
-
- // We also save lr, so the count here is one higher than the mask indicates.
- const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
-
- // Save all caller-save registers as this may be called from anywhere.
- __ mflr(ip);
- __ MultiPush(kSavedRegs | ip.bit());
-
- // Compute the function's address for the first argument.
- __ subi(r3, ip, Operand(kReturnAddressDistanceFromFunctionStart));
-
- // The caller's return address is two slots above the saved temporaries.
- // Grab that for the second argument to the hook.
- __ addi(r4, sp, Operand((kNumSavedRegs + 1) * kPointerSize));
-
- // Align the stack if necessary.
- int frame_alignment = masm->ActivationFrameAlignment();
- if (frame_alignment > kPointerSize) {
- __ mr(r15, sp);
- DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
- __ ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
- }
-
-#if !defined(USE_SIMULATOR)
- uintptr_t entry_hook =
- reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
-#else
- // Under the simulator we need to indirect the entry hook through a
- // trampoline function at a known address.
- ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
- ExternalReference entry_hook =
- ExternalReference::Create(&dispatcher, ExternalReference::BUILTIN_CALL);
-
- // It additionally takes an isolate as a third parameter
- __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
-#endif
-
- __ mov(ip, Operand(entry_hook));
-
- if (ABI_USES_FUNCTION_DESCRIPTORS) {
- __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize));
- __ LoadP(ip, MemOperand(ip, 0));
- }
- // ip set above, so nothing more to do for ABI_CALL_VIA_IP.
-
- // PPC LINUX ABI:
- __ li(r0, Operand::Zero());
- __ StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
-
- __ Call(ip);
-
- __ addi(sp, sp, Operand(kNumRequiredStackFrameSlots * kPointerSize));
-
- // Restore the stack pointer if needed.
- if (frame_alignment > kPointerSize) {
- __ mr(sp, r15);
- }
-
- // Also pop lr to get Ret(0).
- __ MultiPop(kSavedRegs | ip.bit());
- __ mtlr(ip);
- __ Ret();
-}
-
-static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
- return ref0.address() - ref1.address();
-}
-
-
-// Calls an API function. Allocates HandleScope, extracts returned value
-// from handle and propagates exceptions. Restores context. stack_space
-// - space to be unwound on exit (includes the call JS arguments space and
-// the additional space allocated for the fast call).
-static void CallApiFunctionAndReturn(MacroAssembler* masm,
- Register function_address,
- ExternalReference thunk_ref,
- int stack_space,
- MemOperand* stack_space_operand,
- MemOperand return_value_operand) {
- Isolate* isolate = masm->isolate();
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address(isolate);
- const int kNextOffset = 0;
- const int kLimitOffset = AddressOffset(
- ExternalReference::handle_scope_limit_address(isolate), next_address);
- const int kLevelOffset = AddressOffset(
- ExternalReference::handle_scope_level_address(isolate), next_address);
-
- // Additional parameter is the address of the actual callback.
- DCHECK(function_address == r4 || function_address == r5);
- Register scratch = r6;
-
- __ Move(scratch, ExternalReference::is_profiling_address(isolate));
- __ lbz(scratch, MemOperand(scratch, 0));
- __ cmpi(scratch, Operand::Zero());
-
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ Move(scratch, thunk_ref);
- __ isel(eq, scratch, function_address, scratch);
- } else {
- Label profiler_disabled;
- Label end_profiler_check;
- __ beq(&profiler_disabled);
- __ Move(scratch, thunk_ref);
- __ b(&end_profiler_check);
- __ bind(&profiler_disabled);
- __ mr(scratch, function_address);
- __ bind(&end_profiler_check);
- }
-
- // Allocate HandleScope in callee-save registers.
- // r17 - next_address
- // r14 - next_address->kNextOffset
- // r15 - next_address->kLimitOffset
- // r16 - next_address->kLevelOffset
- __ Move(r17, next_address);
- __ LoadP(r14, MemOperand(r17, kNextOffset));
- __ LoadP(r15, MemOperand(r17, kLimitOffset));
- __ lwz(r16, MemOperand(r17, kLevelOffset));
- __ addi(r16, r16, Operand(1));
- __ stw(r16, MemOperand(r17, kLevelOffset));
-
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1, r3);
- __ Move(r3, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_enter_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
- // Native call returns to the DirectCEntry stub which redirects to the
- // return address pushed on stack (could have moved after GC).
- // DirectCEntry stub itself is generated early and never moves.
- DirectCEntryStub stub(isolate);
- stub.GenerateCall(masm, scratch);
-
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1, r3);
- __ Move(r3, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_leave_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
- Label promote_scheduled_exception;
- Label delete_allocated_handles;
- Label leave_exit_frame;
- Label return_value_loaded;
-
- // load value from ReturnValue
- __ LoadP(r3, return_value_operand);
- __ bind(&return_value_loaded);
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- __ StoreP(r14, MemOperand(r17, kNextOffset));
- if (__ emit_debug_code()) {
- __ lwz(r4, MemOperand(r17, kLevelOffset));
- __ cmp(r4, r16);
- __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
- }
- __ subi(r16, r16, Operand(1));
- __ stw(r16, MemOperand(r17, kLevelOffset));
- __ LoadP(r0, MemOperand(r17, kLimitOffset));
- __ cmp(r15, r0);
- __ bne(&delete_allocated_handles);
-
- // Leave the API exit frame.
- __ bind(&leave_exit_frame);
- // LeaveExitFrame expects unwind space to be in a register.
- if (stack_space_operand != nullptr) {
- __ lwz(r14, *stack_space_operand);
- } else {
- __ mov(r14, Operand(stack_space));
- }
- __ LeaveExitFrame(false, r14, stack_space_operand != nullptr);
-
- // Check if the function scheduled an exception.
- __ LoadRoot(r14, RootIndex::kTheHoleValue);
- __ Move(r15, ExternalReference::scheduled_exception_address(isolate));
- __ LoadP(r15, MemOperand(r15));
- __ cmp(r14, r15);
- __ bne(&promote_scheduled_exception);
-
- __ blr();
-
- // Re-throw by promoting a scheduled exception.
- __ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException);
-
- // HandleScope limit has changed. Delete allocated extensions.
- __ bind(&delete_allocated_handles);
- __ StoreP(r15, MemOperand(r17, kLimitOffset));
- __ mr(r14, r3);
- __ PrepareCallCFunction(1, r15);
- __ Move(r3, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
- __ mr(r3, r14);
- __ b(&leave_exit_frame);
-}
-
-void CallApiCallbackStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r7 : call_data
- // -- r5 : holder
- // -- r4 : api_function_address
- // -- cp : context
- // --
- // -- sp[0] : last argument
- // -- ...
- // -- sp[(argc - 1)* 4] : first argument
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- Register call_data = r7;
- Register holder = r5;
- Register api_function_address = r4;
-
- typedef FunctionCallbackArguments FCA;
-
- STATIC_ASSERT(FCA::kArgsLength == 6);
- STATIC_ASSERT(FCA::kNewTargetIndex == 5);
- STATIC_ASSERT(FCA::kDataIndex == 4);
- STATIC_ASSERT(FCA::kReturnValueOffset == 3);
- STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
- STATIC_ASSERT(FCA::kIsolateIndex == 1);
- STATIC_ASSERT(FCA::kHolderIndex == 0);
-
- // new target
- __ PushRoot(RootIndex::kUndefinedValue);
-
- // call data
- __ push(call_data);
-
- Register scratch = call_data;
- __ LoadRoot(scratch, RootIndex::kUndefinedValue);
- // return value
- __ push(scratch);
- // return value default
- __ push(scratch);
- // isolate
- __ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
- __ push(scratch);
- // holder
- __ push(holder);
-
- // Prepare arguments.
- __ mr(scratch, sp);
-
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- // PPC LINUX ABI:
- //
- // Create 4 extra slots on stack:
- // [0] space for DirectCEntryStub's LR save
- // [1-3] FunctionCallbackInfo
- const int kApiStackSpace = 4;
- const int kFunctionCallbackInfoOffset =
- (kStackFrameExtraParamSlot + 1) * kPointerSize;
-
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
-
- DCHECK(api_function_address != r3 && scratch != r3);
- // r3 = FunctionCallbackInfo&
- // Arguments is after the return address.
- __ addi(r3, sp, Operand(kFunctionCallbackInfoOffset));
- // FunctionCallbackInfo::implicit_args_
- __ StoreP(scratch, MemOperand(r3, 0 * kPointerSize));
- // FunctionCallbackInfo::values_
- __ addi(ip, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
- __ StoreP(ip, MemOperand(r3, 1 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc
- __ li(ip, Operand(argc()));
- __ stw(ip, MemOperand(r3, 2 * kPointerSize));
-
- ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
-
- AllowExternalCallThatCantCauseGC scope(masm);
- // Stores return the first js argument
- int return_value_offset = 2 + FCA::kReturnValueOffset;
- MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
- const int stack_space = argc() + FCA::kArgsLength + 1;
- MemOperand* stack_space_operand = nullptr;
- CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
- stack_space_operand, return_value_operand);
-}
-
-
-void CallApiGetterStub::Generate(MacroAssembler* masm) {
- int arg0Slot = 0;
- int accessorInfoSlot = 0;
- int apiStackSpace = 0;
- // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
- // name below the exit frame to make GC aware of them.
- STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
-
- Register receiver = ApiGetterDescriptor::ReceiverRegister();
- Register holder = ApiGetterDescriptor::HolderRegister();
- Register callback = ApiGetterDescriptor::CallbackRegister();
- Register scratch = r7;
- DCHECK(!AreAliased(receiver, holder, callback, scratch));
-
- Register api_function_address = r5;
-
- __ push(receiver);
- // Push data from AccessorInfo.
- __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
- __ push(scratch);
- __ LoadRoot(scratch, RootIndex::kUndefinedValue);
- __ Push(scratch, scratch);
- __ Move(scratch, ExternalReference::isolate_address(isolate()));
- __ Push(scratch, holder);
- __ Push(Smi::kZero); // should_throw_on_error -> false
- __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
- __ push(scratch);
-
- // v8::PropertyCallbackInfo::args_ array and name handle.
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
-
- // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
- __ mr(r3, sp); // r3 = Handle<Name>
- __ addi(r4, r3, Operand(1 * kPointerSize)); // r4 = v8::PCI::args_
-
-// If ABI passes Handles (pointer-sized struct) in a register:
-//
-// Create 2 extra slots on stack:
-// [0] space for DirectCEntryStub's LR save
-// [1] AccessorInfo&
-//
-// Otherwise:
-//
-// Create 3 extra slots on stack:
-// [0] space for DirectCEntryStub's LR save
-// [1] copy of Handle (first arg)
-// [2] AccessorInfo&
- if (ABI_PASSES_HANDLES_IN_REGS) {
- accessorInfoSlot = kStackFrameExtraParamSlot + 1;
- apiStackSpace = 2;
- } else {
- arg0Slot = kStackFrameExtraParamSlot + 1;
- accessorInfoSlot = arg0Slot + 1;
- apiStackSpace = 3;
- }
-
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, apiStackSpace);
-
- if (!ABI_PASSES_HANDLES_IN_REGS) {
- // pass 1st arg by reference
- __ StoreP(r3, MemOperand(sp, arg0Slot * kPointerSize));
- __ addi(r3, sp, Operand(arg0Slot * kPointerSize));
- }
-
- // Create v8::PropertyCallbackInfo object on the stack and initialize
- // it's args_ field.
- __ StoreP(r4, MemOperand(sp, accessorInfoSlot * kPointerSize));
- __ addi(r4, sp, Operand(accessorInfoSlot * kPointerSize));
- // r4 = v8::PropertyCallbackInfo&
-
- ExternalReference thunk_ref =
- ExternalReference::invoke_accessor_getter_callback();
-
- __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
- __ LoadP(api_function_address,
- FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
-
- // +3 is to skip prolog, return address and name handle.
- MemOperand return_value_operand(
- fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
- CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
- kStackUnwindSpace, nullptr, return_value_operand);
-}
-
-#undef __
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/code-stubs-ppc.h b/deps/v8/src/ppc/code-stubs-ppc.h
deleted file mode 100644
index 80284587db..0000000000
--- a/deps/v8/src/ppc/code-stubs-ppc.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PPC_CODE_STUBS_PPC_H_
-#define V8_PPC_CODE_STUBS_PPC_H_
-
-namespace v8 {
-namespace internal {
-
-// Trampoline stub to call into native code. To call safely into native code
-// in the presence of compacting GC (which can move code objects) we need to
-// keep the code which called into native pinned in the memory. Currently the
-// simplest approach is to generate such stub early enough so it can never be
-// moved by GC
-class DirectCEntryStub : public PlatformCodeStub {
- public:
- explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
- void GenerateCall(MacroAssembler* masm, Register target);
-
- private:
- Movability NeedsImmovableCode() override { return kImmovable; }
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_PPC_CODE_STUBS_PPC_H_
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
deleted file mode 100644
index b27890d1f5..0000000000
--- a/deps/v8/src/ppc/codegen-ppc.cc
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_PPC
-
-#include <memory>
-
-#include "src/codegen.h"
-#include "src/macro-assembler.h"
-#include "src/ppc/simulator-ppc.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ masm.
-
-UnaryMathFunction CreateSqrtFunction() {
-#if defined(USE_SIMULATOR)
- return nullptr;
-#else
- v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
- size_t allocated = 0;
- byte* buffer = AllocatePage(page_allocator,
- page_allocator->GetRandomMmapAddr(), &allocated);
- if (buffer == nullptr) return nullptr;
-
- MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
-
- // Called from C
- __ function_descriptor();
-
- __ MovFromFloatParameter(d1);
- __ fsqrt(d1, d1);
- __ MovToFloatResult(d1);
- __ Ret();
-
- CodeDesc desc;
- masm.GetCode(nullptr, &desc);
- DCHECK(ABI_USES_FUNCTION_DESCRIPTORS ||
- !RelocInfo::RequiresRelocationAfterCodegen(desc));
-
- Assembler::FlushICache(buffer, allocated);
- CHECK(SetPermissions(page_allocator, buffer, allocated,
- PageAllocator::kReadExecute));
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-#endif
-}
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/constants-ppc.h b/deps/v8/src/ppc/constants-ppc.h
index a6cecf7dc2..4d79fad031 100644
--- a/deps/v8/src/ppc/constants-ppc.h
+++ b/deps/v8/src/ppc/constants-ppc.h
@@ -20,6 +20,36 @@
#define UNIMPLEMENTED_PPC()
#endif
+#if V8_HOST_ARCH_PPC && \
+ (V8_OS_AIX || (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN))
+#define ABI_USES_FUNCTION_DESCRIPTORS 1
+#else
+#define ABI_USES_FUNCTION_DESCRIPTORS 0
+#endif
+
+#if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64
+#define ABI_PASSES_HANDLES_IN_REGS 1
+#else
+#define ABI_PASSES_HANDLES_IN_REGS 0
+#endif
+
+#if !V8_HOST_ARCH_PPC || !V8_TARGET_ARCH_PPC64 || V8_TARGET_LITTLE_ENDIAN
+#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 1
+#else
+#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 0
+#endif
+
+#if !V8_HOST_ARCH_PPC || (V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN)
+#define ABI_CALL_VIA_IP 1
+#else
+#define ABI_CALL_VIA_IP 0
+#endif
+
+#if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64
+#define ABI_TOC_REGISTER 2
+#else
+#define ABI_TOC_REGISTER 13
+#endif
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ppc/cpu-ppc.cc b/deps/v8/src/ppc/cpu-ppc.cc
index 91ea4000e1..cca8ebaf73 100644
--- a/deps/v8/src/ppc/cpu-ppc.cc
+++ b/deps/v8/src/ppc/cpu-ppc.cc
@@ -6,8 +6,9 @@
#if V8_TARGET_ARCH_PPC
-#include "src/assembler.h"
-#include "src/macro-assembler.h"
+#include "src/cpu-features.h"
+
+#define INSTR_AND_DATA_CACHE_COHERENCY LWSYNC
namespace v8 {
namespace internal {
@@ -45,4 +46,5 @@ void CpuFeatures::FlushICache(void* buffer, size_t size) {
} // namespace internal
} // namespace v8
+#undef INSTR_AND_DATA_CACHE_COHERENCY
#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/deoptimizer-ppc.cc b/deps/v8/src/ppc/deoptimizer-ppc.cc
index b10af51de1..9fe8cbefbd 100644
--- a/deps/v8/src/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/ppc/deoptimizer-ppc.cc
@@ -4,20 +4,21 @@
#include "src/assembler-inl.h"
#include "src/deoptimizer.h"
+#include "src/macro-assembler.h"
#include "src/register-configuration.h"
#include "src/safepoint-table.h"
namespace v8 {
namespace internal {
-const int Deoptimizer::table_entry_size_ = 8;
-
-#define __ masm()->
+#define __ masm->
// This code tries to be close to ia32 code so that any changes can be
// easily ported.
-void Deoptimizer::TableEntryGenerator::Generate() {
- GeneratePrologue();
+void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
+ Isolate* isolate,
+ DeoptimizeKind deopt_kind) {
+ NoRootArrayScope no_root_array(masm);
// Unlike on ARM we don't save all the registers, just the useful ones.
// For the rest, there are gaps on the stack, so the offsets remain the same.
@@ -57,21 +58,20 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
__ mov(ip, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate())));
+ IsolateAddressId::kCEntryFPAddress, isolate)));
__ StoreP(fp, MemOperand(ip));
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize;
- // Get the bailout id from the stack.
- __ LoadP(r5, MemOperand(sp, kSavedRegistersAreaSize));
+ // Get the bailout id is passed as r29 by the caller.
+ __ mr(r5, r29);
// Get the address of the location in the code object (r6) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register r7.
__ mflr(r6);
- // Correct one word for bailout id.
- __ addi(r7, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+ __ addi(r7, sp, Operand(kSavedRegistersAreaSize));
__ sub(r7, fp, r7);
// Allocate a new deoptimizer object.
@@ -83,14 +83,14 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ JumpIfSmi(r4, &context_check);
__ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ bind(&context_check);
- __ li(r4, Operand(static_cast<int>(deopt_kind())));
+ __ li(r4, Operand(static_cast<int>(deopt_kind)));
// r5: bailout id already loaded.
// r6: code address or 0 already loaded.
// r7: Fp-to-sp delta.
- __ mov(r8, Operand(ExternalReference::isolate_address(isolate())));
+ __ mov(r8, Operand(ExternalReference::isolate_address(isolate)));
// Call Deoptimizer::New().
{
- AllowExternalCallThatCantCauseGC scope(masm());
+ AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
}
@@ -127,8 +127,9 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ lfs(d0, MemOperand(sp, src_offset));
__ stfs(d0, MemOperand(r4, dst_offset));
}
- // Remove the bailout id and the saved registers from the stack.
- __ addi(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+
+ // Remove the saved registers from the stack.
+ __ addi(sp, sp, Operand(kSavedRegistersAreaSize));
// Compute a pointer to the unwinding limit in register r5; that is
// the first stack slot not part of the input frame.
@@ -156,7 +157,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ PrepareCallCFunction(1, r4);
// Call Deoptimizer::ComputeOutputFrames().
{
- AllowExternalCallThatCantCauseGC scope(masm());
+ AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
}
__ pop(r3); // Restore deoptimizer object (class Deoptimizer).
@@ -218,8 +219,6 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
}
- __ InitializeRootRegister();
-
__ pop(ip); // get continuation, leave pc on stack
__ pop(r0);
__ mtlr(r0);
@@ -227,24 +226,6 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ stop("Unreachable.");
}
-
-void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
-
- // Create a sequence of deoptimization entries.
- // Note that registers are still live when jumping to an entry.
- Label done;
- for (int i = 0; i < count(); i++) {
- int start = masm()->pc_offset();
- USE(start);
- __ li(ip, Operand(i));
- __ b(&done);
- DCHECK(masm()->pc_offset() - start == table_entry_size_);
- }
- __ bind(&done);
- __ push(ip);
-}
-
bool Deoptimizer::PadTopOfStackRegister() { return false; }
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
diff --git a/deps/v8/src/ppc/disasm-ppc.cc b/deps/v8/src/ppc/disasm-ppc.cc
index ae56f3616d..f736f804c0 100644
--- a/deps/v8/src/ppc/disasm-ppc.cc
+++ b/deps/v8/src/ppc/disasm-ppc.cc
@@ -34,13 +34,11 @@
#include "src/disasm.h"
#include "src/macro-assembler.h"
#include "src/ppc/constants-ppc.h"
-
+#include "src/register-configuration.h"
namespace v8 {
namespace internal {
-const auto GetRegConfig = RegisterConfiguration::Default;
-
//------------------------------------------------------------------------------
// Decoder decodes and disassembles instructions into an output buffer.
@@ -120,7 +118,7 @@ void Decoder::PrintRegister(int reg) {
// Print the double FP register name according to the active name converter.
void Decoder::PrintDRegister(int reg) {
- Print(GetRegConfig()->GetDoubleRegisterName(reg));
+ Print(RegisterName(DoubleRegister::from_code(reg)));
}
@@ -1497,18 +1495,16 @@ const char* NameConverter::NameOfConstant(byte* addr) const {
const char* NameConverter::NameOfCPURegister(int reg) const {
- return v8::internal::GetRegConfig()->GetGeneralRegisterName(reg);
+ return RegisterName(i::Register::from_code(reg));
}
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // PPC does not have the concept of a byte register
- return "nobytereg";
}
const char* NameConverter::NameOfXMMRegister(int reg) const {
UNREACHABLE(); // PPC does not have any XMM registers
- return "noxmmreg";
}
const char* NameConverter::NameInCode(byte* addr) const {
diff --git a/deps/v8/src/ppc/frame-constants-ppc.cc b/deps/v8/src/ppc/frame-constants-ppc.cc
index f49296292a..546d495df8 100644
--- a/deps/v8/src/ppc/frame-constants-ppc.cc
+++ b/deps/v8/src/ppc/frame-constants-ppc.cc
@@ -4,14 +4,12 @@
#if V8_TARGET_ARCH_PPC
-#include "src/assembler.h"
+#include "src/ppc/frame-constants-ppc.h"
+
+#include "src/assembler-inl.h"
#include "src/frame-constants.h"
#include "src/macro-assembler.h"
-#include "src/ppc/assembler-ppc-inl.h"
-#include "src/ppc/assembler-ppc.h"
-#include "src/ppc/macro-assembler-ppc.h"
-#include "src/ppc/frame-constants-ppc.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ppc/frame-constants-ppc.h b/deps/v8/src/ppc/frame-constants-ppc.h
index a4516c367c..f9b3e40846 100644
--- a/deps/v8/src/ppc/frame-constants-ppc.h
+++ b/deps/v8/src/ppc/frame-constants-ppc.h
@@ -35,7 +35,7 @@ class ExitFrameConstants : public TypedFrameConstants {
class WasmCompileLazyFrameConstants : public TypedFrameConstants {
public:
- static constexpr int kNumberOfSavedGpParamRegs = 8;
+ static constexpr int kNumberOfSavedGpParamRegs = 7;
static constexpr int kNumberOfSavedFpParamRegs = 8;
// FP-relative.
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index 505aaef93d..b5640d75c8 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -6,6 +6,8 @@
#include "src/interface-descriptors.h"
+#include "src/frames.h"
+
namespace v8 {
namespace internal {
@@ -70,12 +72,6 @@ void TypeofDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallFunctionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void CallTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r3 : number of arguments
@@ -208,10 +204,9 @@ void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- JavaScriptFrame::context_register(), // callee context
- r7, // call_data
- r5, // holder
- r4, // api_function_address
+ JavaScriptFrame::context_register(), // kTargetContext
+ r4, // kApiFunctionAddress
+ r5, // kArgc
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -263,6 +258,12 @@ void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r3, r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index 9565d04a4d..94bb328bc9 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -12,36 +12,26 @@
#include "src/bootstrapper.h"
#include "src/callable.h"
#include "src/code-factory.h"
-#include "src/code-stubs.h"
+#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
-#include "src/instruction-stream.h"
+#include "src/macro-assembler.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
+#include "src/snapshot/embedded-data.h"
#include "src/snapshot/snapshot.h"
#include "src/wasm/wasm-code-manager.h"
+// Satisfy cpplint check, but don't include platform-specific header. It is
+// included recursively via macro-assembler.h.
+#if 0
#include "src/ppc/macro-assembler-ppc.h"
+#endif
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* isolate,
- const AssemblerOptions& options, void* buffer,
- int size, CodeObjectRequired create_code_object)
- : TurboAssembler(isolate, options, buffer, size, create_code_object) {
- if (create_code_object == CodeObjectRequired::kYes) {
- // Unlike TurboAssembler, which can be used off the main thread and may not
- // allocate, macro assembler creates its own copy of the self-reference
- // marker in order to disambiguate between self-references during nested
- // code generation (e.g.: codegen of the current object triggers stub
- // compilation through CodeStub::GetCode()).
- code_object_ = Handle<HeapObject>::New(
- *isolate->factory()->NewSelfReferenceMarker(), isolate);
- }
-}
-
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1,
Register exclusion2,
@@ -127,8 +117,7 @@ void TurboAssembler::Jump(Register target) {
void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
- DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
- RootIndex::kBuiltinsConstantsTable));
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
const uint32_t offset =
FixedArray::kHeaderSize + constant_index * kPointerSize - kHeapObjectTag;
@@ -147,8 +136,11 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination,
intptr_t offset) {
if (offset == 0) {
mr(destination, kRootRegister);
- } else {
+ } else if (is_int16(offset)) {
addi(destination, kRootRegister, Operand(offset));
+ } else {
+ mov(destination, Operand(offset));
+ add(destination, kRootRegister, destination);
}
}
@@ -181,35 +173,38 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, CRegister cr) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
- // 'code' is always generated ppc code, never THUMB code
- if (FLAG_embedded_builtins) {
- if (root_array_available_ && options().isolate_independent_code) {
- Register scratch = ip;
- IndirectLoadConstant(scratch, code);
- addi(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
- Label skip;
- if (cond != al) b(NegateCondition(cond), &skip, cr);
- Jump(scratch);
- bind(&skip);
- return;
- } else if (options().inline_offheap_trampolines) {
- int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
- // Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- // Use ip directly instead of using UseScratchRegisterScope, as we do
- // not preserve scratch registers across calls.
- mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- Label skip;
- if (cond != al) b(NegateCondition(cond), &skip, cr);
- Jump(ip);
- bind(&skip);
- return;
- }
- }
+ DCHECK_IMPLIES(options().isolate_independent_code,
+ Builtins::IsIsolateIndependentBuiltin(*code));
+
+ int builtin_index = Builtins::kNoBuiltinId;
+ bool target_is_isolate_independent_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index);
+
+ if (root_array_available_ && options().isolate_independent_code) {
+ Label skip;
+ Register scratch = ip;
+ int offset = code->builtin_index() * kSystemPointerSize +
+ IsolateData::builtin_entry_table_offset();
+ LoadP(scratch, MemOperand(kRootRegister, offset), r0);
+ if (cond != al) b(NegateCondition(cond), &skip, cr);
+ Jump(scratch);
+ bind(&skip);
+ return;
+ } else if (options().inline_offheap_trampolines &&
+ target_is_isolate_independent_builtin) {
+ // Inline the trampoline.
+ Label skip;
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ // Use ip directly instead of using UseScratchRegisterScope, as we do
+ // not preserve scratch registers across calls.
+ mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ if (cond != al) b(NegateCondition(cond), &skip, cr);
+ Jump(ip);
+ bind(&skip);
+ return;
}
Jump(static_cast<intptr_t>(code.address()), rmode, cond, cr);
}
@@ -252,37 +247,39 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(RelocInfo::IsCodeTarget(rmode));
-
- if (FLAG_embedded_builtins) {
- if (root_array_available_ && options().isolate_independent_code) {
- // Use ip directly instead of using UseScratchRegisterScope, as we do not
- // preserve scratch registers across calls.
- IndirectLoadConstant(ip, code);
- addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- Label skip;
- if (cond != al) b(NegateCondition(cond), &skip);
- Call(ip);
- bind(&skip);
- return;
- } else if (options().inline_offheap_trampolines) {
- int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
- // Inline the trampoline.
- RecordCommentForOffHeapTrampoline(builtin_index);
- DCHECK(Builtins::IsBuiltinId(builtin_index));
- EmbeddedData d = EmbeddedData::FromBlob();
- Address entry = d.InstructionStartOfBuiltin(builtin_index);
- // Use ip directly instead of using UseScratchRegisterScope, as we do
- // not preserve scratch registers across calls.
- mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- Label skip;
- if (cond != al) b(NegateCondition(cond), &skip);
- Call(ip);
- bind(&skip);
- return;
- }
- }
+ DCHECK_IMPLIES(options().isolate_independent_code,
+ Builtins::IsIsolateIndependentBuiltin(*code));
+ DCHECK_IMPLIES(options().use_pc_relative_calls_and_jumps,
+ Builtins::IsIsolateIndependentBuiltin(*code));
+
+ int builtin_index = Builtins::kNoBuiltinId;
+ bool target_is_isolate_independent_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index);
+
+ if (root_array_available_ && options().isolate_independent_code) {
+ Label skip;
+ int offset = code->builtin_index() * kSystemPointerSize +
+ IsolateData::builtin_entry_table_offset();
+ LoadP(ip, MemOperand(kRootRegister, offset));
+ if (cond != al) b(NegateCondition(cond), &skip);
+ Call(ip);
+ bind(&skip);
+ return;
+ } else if (options().inline_offheap_trampolines &&
+ target_is_isolate_independent_builtin) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ // Use ip directly instead of using UseScratchRegisterScope, as we do
+ // not preserve scratch registers across calls.
+ mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Label skip;
+ if (cond != al) b(NegateCondition(cond), &skip);
+ Call(ip);
+ bind(&skip);
+ return;
}
Call(code.address(), rmode, cond);
}
@@ -305,7 +302,7 @@ void TurboAssembler::Push(Handle<HeapObject> handle) {
push(r0);
}
-void TurboAssembler::Push(Smi* smi) {
+void TurboAssembler::Push(Smi smi) {
mov(r0, Operand(smi));
push(r0);
}
@@ -398,7 +395,8 @@ void TurboAssembler::MultiPopDoubles(RegList dregs, Register location) {
void TurboAssembler::LoadRoot(Register destination, RootIndex index,
Condition cond) {
DCHECK(cond == al);
- LoadP(destination, MemOperand(kRootRegister, RootRegisterOffset(index)), r0);
+ LoadP(destination,
+ MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0);
}
void MacroAssembler::RecordWriteField(Register object, int offset,
@@ -468,25 +466,43 @@ void TurboAssembler::RestoreRegisters(RegList registers) {
void TurboAssembler::CallRecordWriteStub(
Register object, Register address,
RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
+ CallRecordWriteStub(
+ object, address, remembered_set_action, fp_mode,
+ isolate()->builtins()->builtin_handle(Builtins::kRecordWrite),
+ kNullAddress);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ Address wasm_target) {
+ CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
+ Handle<Code>::null(), wasm_target);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ Handle<Code> code_target, Address wasm_target) {
+ DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress);
// TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
// i.e. always emit remember set and save FP registers in RecordWriteStub. If
// large performance regression is observed, we should use these values to
// avoid unnecessary work.
- Callable const callable =
- Builtins::CallableFor(isolate(), Builtins::kRecordWrite);
- RegList registers = callable.descriptor().allocatable_registers();
+ RecordWriteDescriptor descriptor;
+ RegList registers = descriptor.allocatable_registers();
SaveRegisters(registers);
- Register object_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kObject));
+ Register object_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
Register slot_parameter(
- callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
- Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kRememberedSet));
- Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
- RecordWriteDescriptor::kFPMode));
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
+ Register remembered_set_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
+ Register fp_mode_parameter(
+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
push(object);
push(address);
@@ -496,7 +512,11 @@ void TurboAssembler::CallRecordWriteStub(
Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
- Call(callable.code(), RelocInfo::CODE_TARGET);
+ if (code_target.is_null()) {
+ Call(wasm_target, RelocInfo::WASM_STUB_CALL);
+ } else {
+ Call(code_target, RelocInfo::CODE_TARGET);
+ }
RestoreRegisters(registers);
}
@@ -1262,7 +1282,7 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
Move(r7, debug_hook_active);
LoadByte(r7, MemOperand(r7), r0);
extsb(r7, r7);
- CmpSmiLiteral(r7, Smi::kZero, r0);
+ CmpSmiLiteral(r7, Smi::zero(), r0);
beq(&skip_hook);
{
@@ -1331,12 +1351,11 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
LoadP(code, FieldMemOperand(function, JSFunction::kCodeOffset));
- addi(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
- CallJSEntry(code);
+ CallCodeObject(code);
} else {
DCHECK(flag == JUMP_FUNCTION);
- JumpToJSEntry(code);
+ JumpCodeObject(code);
}
// Continue here if InvokePrologue does handle the invocation due to
@@ -1399,17 +1418,17 @@ void MacroAssembler::PushStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- Push(Smi::kZero); // Padding.
+ Push(Smi::zero()); // Padding.
// Link the current handler as the next handler.
- // Preserve r3-r7.
- mov(r8, Operand(ExternalReference::Create(IsolateAddressId::kHandlerAddress,
- isolate())));
- LoadP(r0, MemOperand(r8));
+ // Preserve r4-r8.
+ Move(r3,
+ ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
+ LoadP(r0, MemOperand(r3));
push(r0);
// Set this new handler as the current one.
- StoreP(sp, MemOperand(r8));
+ StoreP(sp, MemOperand(r3));
}
@@ -1418,8 +1437,8 @@ void MacroAssembler::PopStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(r4);
- mov(ip, Operand(ExternalReference::Create(IsolateAddressId::kHandlerAddress,
- isolate())));
+ Move(ip, ExternalReference::Create(IsolateAddressId::kHandlerAddress,
+ isolate()));
StoreP(r4, MemOperand(ip));
Drop(1); // Drop padding.
@@ -1538,30 +1557,6 @@ void TurboAssembler::SubAndCheckForOverflow(Register dst, Register left,
}
-void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
- DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
-}
-
-void TurboAssembler::CallStubDelayed(CodeStub* stub) {
- DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
-
- // Block constant pool for the call instruction sequence.
- ConstantPoolUnavailableScope constant_pool_unavailable(this);
-
- mov(ip, Operand::EmbeddedCode(stub));
- mtctr(ip);
- bctrl();
-}
-
-void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
- Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
-}
-
-bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
- return has_frame_ || !stub->SometimesSetsUpAFrame();
-}
-
void MacroAssembler::TryDoubleToInt32Exact(Register result,
DoubleRegister double_input,
Register scratch,
@@ -1649,8 +1644,7 @@ void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
mov(r3, Operand(f->nargs));
Move(r4, ExternalReference::Create(f));
DCHECK(!AreAliased(centry, r3, r4));
- addi(centry, centry, Operand(Code::kHeaderSize - kHeapObjectTag));
- Call(centry);
+ CallCodeObject(centry);
}
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
@@ -1702,7 +1696,7 @@ void MacroAssembler::JumpToInstructionStream(Address entry) {
void MacroAssembler::LoadWeakValue(Register out, Register in,
Label* target_if_cleared) {
- cmpi(in, Operand(kClearedWeakHeapObject));
+ cmpi(in, Operand(kClearedWeakHeapObjectLower32));
beq(target_if_cleared);
mov(r0, Operand(~kWeakHeapObjectMask));
@@ -1876,6 +1870,10 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
CompareInstanceType(map, instance_type, JS_GENERATOR_OBJECT_TYPE);
beq(&do_check);
+ // Check if JSAsyncFunctionObject (See MacroAssembler::CompareInstanceType)
+ cmpi(instance_type, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE));
+ beq(&do_check);
+
// Check if JSAsyncGeneratorObject (See MacroAssembler::CompareInstanceType)
cmpi(instance_type, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
@@ -1990,6 +1988,23 @@ void TurboAssembler::CallCFunctionHelper(Register function,
DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
DCHECK(has_frame());
+ // Save the frame pointer and PC so that the stack layout remains iterable,
+ // even without an ExitFrame which normally exists between JS and C frames.
+ if (isolate() != nullptr) {
+ Register scratch1 = r7;
+ Register scratch2 = r8;
+ Push(scratch1, scratch2);
+
+ mflr(scratch2);
+ Move(scratch1, ExternalReference::fast_c_call_caller_pc_address(isolate()));
+ LoadPC(r0);
+ StoreP(r0, MemOperand(scratch1));
+ Move(scratch1, ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ StoreP(fp, MemOperand(scratch1));
+ mtlr(scratch2);
+ Pop(scratch1, scratch2);
+ }
+
// Just call directly. The function called cannot cause a GC, or
// allow preemption, so the return address in the link register
// stays correct.
@@ -2007,6 +2022,17 @@ void TurboAssembler::CallCFunctionHelper(Register function,
Call(dest);
+ if (isolate() != nullptr) {
+ // We don't unset the PC; the FP is the source of truth.
+ Register scratch1 = r7;
+ Register scratch2 = r8;
+ Push(scratch1, scratch2);
+ Move(scratch1, ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ mov(scratch2, Operand::Zero());
+ StoreP(scratch2, MemOperand(scratch1));
+ Pop(scratch1, scratch2);
+ }
+
// Remove frame bought in PrepareCallCFunction
int stack_passed_arguments =
CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
@@ -2054,7 +2080,7 @@ void TurboAssembler::LoadIntLiteral(Register dst, int value) {
mov(dst, Operand(value));
}
-void TurboAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
+void TurboAssembler::LoadSmiLiteral(Register dst, Smi smi) {
mov(dst, Operand(smi));
}
@@ -2421,8 +2447,7 @@ void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
}
}
-
-void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
+void MacroAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch,
CRegister cr) {
#if V8_TARGET_ARCH_PPC64
LoadSmiLiteral(scratch, smi);
@@ -2432,8 +2457,7 @@ void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
#endif
}
-
-void MacroAssembler::CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
+void MacroAssembler::CmplSmiLiteral(Register src1, Smi smi, Register scratch,
CRegister cr) {
#if V8_TARGET_ARCH_PPC64
LoadSmiLiteral(scratch, smi);
@@ -2443,8 +2467,7 @@ void MacroAssembler::CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
#endif
}
-
-void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
+void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi smi,
Register scratch) {
#if V8_TARGET_ARCH_PPC64
LoadSmiLiteral(scratch, smi);
@@ -2454,8 +2477,7 @@ void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
#endif
}
-
-void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
+void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi smi,
Register scratch) {
#if V8_TARGET_ARCH_PPC64
LoadSmiLiteral(scratch, smi);
@@ -2465,8 +2487,7 @@ void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
#endif
}
-
-void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi,
+void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi smi,
Register scratch, RCBit rc) {
#if V8_TARGET_ARCH_PPC64
LoadSmiLiteral(scratch, smi);
@@ -3000,6 +3021,136 @@ void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
blt(dest);
}
+void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
+ STATIC_ASSERT(kSystemPointerSize == 8);
+ STATIC_ASSERT(kSmiShiftSize == 31);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+
+ // The builtin_pointer register contains the builtin index as a Smi.
+ // Untagging is folded into the indexing operand below.
+ ShiftRightArithImm(builtin_pointer, builtin_pointer,
+ kSmiShift - kSystemPointerSizeLog2);
+ addi(builtin_pointer, builtin_pointer,
+ Operand(IsolateData::builtin_entry_table_offset()));
+ LoadPX(builtin_pointer, MemOperand(kRootRegister, builtin_pointer));
+ Call(builtin_pointer);
+}
+
+void TurboAssembler::LoadCodeObjectEntry(Register destination,
+ Register code_object) {
+ // Code objects are called differently depending on whether we are generating
+ // builtin code (which will later be embedded into the binary) or compiling
+ // user JS code at runtime.
+ // * Builtin code runs in --jitless mode and thus must not call into on-heap
+ // Code targets. Instead, we dispatch through the builtins entry table.
+ // * Codegen at runtime does not have this restriction and we can use the
+ // shorter, branchless instruction sequence. The assumption here is that
+ // targets are usually generated code and not builtin Code objects.
+
+ if (options().isolate_independent_code) {
+ DCHECK(root_array_available());
+ Label if_code_is_builtin, out;
+
+ Register scratch = r11;
+
+ DCHECK(!AreAliased(destination, scratch));
+ DCHECK(!AreAliased(code_object, scratch));
+
+ // Check whether the Code object is a builtin. If so, call its (off-heap)
+ // entry point directly without going through the (on-heap) trampoline.
+ // Otherwise, just call the Code object as always.
+
+ LoadWordArith(scratch,
+ FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
+ cmpi(scratch, Operand(Builtins::kNoBuiltinId));
+ bne(&if_code_is_builtin);
+
+ // A non-builtin Code object, the entry point is at
+ // Code::raw_instruction_start().
+ addi(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
+ b(&out);
+
+ // A builtin Code object, the entry point is loaded from the builtin entry
+ // table.
+ // The builtin index is loaded in scratch.
+ bind(&if_code_is_builtin);
+ ShiftLeftImm(destination, scratch, Operand(kSystemPointerSizeLog2));
+ add(destination, destination, kRootRegister);
+ LoadP(destination,
+ MemOperand(destination, IsolateData::builtin_entry_table_offset()), r0);
+
+ bind(&out);
+ } else {
+ addi(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
+ }
+}
+
+void TurboAssembler::CallCodeObject(Register code_object) {
+ LoadCodeObjectEntry(code_object, code_object);
+ Call(code_object);
+}
+
+void TurboAssembler::JumpCodeObject(Register code_object) {
+ LoadCodeObjectEntry(code_object, code_object);
+ Jump(code_object);
+}
+
+void TurboAssembler::StoreReturnAddressAndCall(Register target) {
+ // This generates the final instruction sequence for calls to C functions
+ // once an exit frame has been constructed.
+ //
+ // Note that this assumes the caller code (i.e. the Code object currently
+ // being generated) is immovable or that the callee function cannot trigger
+ // GC, since the callee function will return to it.
+
+ static constexpr int after_call_offset = 5 * kInstrSize;
+ Label start_call;
+ Register dest = target;
+
+ if (ABI_USES_FUNCTION_DESCRIPTORS) {
+ // AIX/PPC64BE Linux uses a function descriptor. When calling C code be
+ // aware of this descriptor and pick up values from it
+ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize));
+ LoadP(ip, MemOperand(target, 0));
+ dest = ip;
+ } else if (ABI_CALL_VIA_IP && dest != ip) {
+ Move(ip, target);
+ dest = ip;
+ }
+
+ LoadPC(r7);
+ bind(&start_call);
+ addi(r7, r7, Operand(after_call_offset));
+ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
+ Call(dest);
+
+ DCHECK_EQ(after_call_offset - kInstrSize,
+ SizeOfCodeGeneratedSince(&start_call));
+}
+
+void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
+ NoRootArrayScope no_root_array(this);
+
+ // Save the deopt id in r29 (we don't need the roots array from now on).
+ DCHECK_LE(deopt_id, 0xFFFF);
+
+ mov(r29, Operand(deopt_id));
+ Call(target, RelocInfo::RUNTIME_ENTRY);
+}
+
+void TurboAssembler::ZeroExtByte(Register dst, Register src) {
+ clrldi(dst, src, Operand(56));
+}
+
+void TurboAssembler::ZeroExtHalfWord(Register dst, Register src) {
+ clrldi(dst, src, Operand(48));
+}
+
+void TurboAssembler::ZeroExtWord32(Register dst, Register src) {
+ clrldi(dst, src, Operand(32));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index 897ac5553e..a85af61761 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -2,44 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
+#error This header must be included via macro-assembler.h
+#endif
+
#ifndef V8_PPC_MACRO_ASSEMBLER_PPC_H_
#define V8_PPC_MACRO_ASSEMBLER_PPC_H_
-#include "src/assembler.h"
#include "src/bailout-reason.h"
+#include "src/contexts.h"
#include "src/double.h"
#include "src/globals.h"
#include "src/ppc/assembler-ppc.h"
-#include "src/turbo-assembler.h"
namespace v8 {
namespace internal {
-// Give alias names to registers for calling conventions.
-constexpr Register kReturnRegister0 = r3;
-constexpr Register kReturnRegister1 = r4;
-constexpr Register kReturnRegister2 = r5;
-constexpr Register kJSFunctionRegister = r4;
-constexpr Register kContextRegister = r30;
-constexpr Register kAllocateSizeRegister = r4;
-constexpr Register kSpeculationPoisonRegister = r14;
-constexpr Register kInterpreterAccumulatorRegister = r3;
-constexpr Register kInterpreterBytecodeOffsetRegister = r15;
-constexpr Register kInterpreterBytecodeArrayRegister = r16;
-constexpr Register kInterpreterDispatchTableRegister = r17;
-
-constexpr Register kJavaScriptCallArgCountRegister = r3;
-constexpr Register kJavaScriptCallCodeStartRegister = r5;
-constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
-constexpr Register kJavaScriptCallNewTargetRegister = r6;
-constexpr Register kJavaScriptCallExtraArg1Register = r5;
-
-constexpr Register kOffHeapTrampolineRegister = ip;
-constexpr Register kRuntimeCallFunctionRegister = r4;
-constexpr Register kRuntimeCallArgCountRegister = r3;
-constexpr Register kRuntimeCallArgvRegister = r5;
-constexpr Register kWasmInstanceRegister = r10;
-
// ----------------------------------------------------------------------------
// Static helper functions
@@ -90,14 +68,9 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
- TurboAssembler(const AssemblerOptions& options, void* buffer, int buffer_size)
- : TurboAssemblerBase(options, buffer, buffer_size) {}
-
- TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
- void* buffer, int buffer_size,
- CodeObjectRequired create_code_object)
- : TurboAssemblerBase(isolate, options, buffer, buffer_size,
- create_code_object) {}
+ template <typename... Args>
+ explicit TurboAssembler(Args&&... args)
+ : TurboAssemblerBase(std::forward<Args>(args)...) {}
// Converts the integer (untagged smi) in |src| to a double, storing
// the result to |dst|
@@ -166,10 +139,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
static int ActivationFrameAlignment();
void InitializeRootRegister() {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- mov(kRootRegister, Operand(roots_array_start));
- addi(kRootRegister, kRootRegister, Operand(kRootRegisterBias));
+ ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
+ mov(kRootRegister, Operand(isolate_root));
}
// These exist to provide portability between 32 and 64bit
@@ -187,7 +158,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// load a literal signed int value <value> to GPR <dst>
void LoadIntLiteral(Register dst, int value);
// load an SMI value <value> to GPR <dst>
- void LoadSmiLiteral(Register dst, Smi* smi);
+ void LoadSmiLiteral(Register dst, Smi smi);
void LoadSingle(DoubleRegister dst, const MemOperand& mem,
Register scratch = no_reg);
@@ -222,7 +193,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Push(Register src) { push(src); }
// Push a handle.
void Push(Handle<HeapObject> handle);
- void Push(Smi* smi);
+ void Push(Smi smi);
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
@@ -298,6 +269,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallRecordWriteStub(Register object, Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode);
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode, Address wasm_target);
void MultiPush(RegList regs, Register location = sp);
void MultiPop(RegList regs, Register location = sp);
@@ -399,7 +373,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Print a message to stdout and abort execution.
void Abort(AbortReason reason);
- inline bool AllowThisStubCall(CodeStub* stub);
#if !V8_TARGET_ARCH_PPC64
void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, Register scratch, Register shift);
@@ -434,11 +407,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Condition cond = al);
void Call(Label* target);
- void CallForDeoptimization(Address target, int deopt_id,
- RelocInfo::Mode rmode) {
- USE(deopt_id);
- Call(target, rmode);
- }
+ void LoadCodeObjectEntry(Register destination, Register code_object) override;
+ void CallCodeObject(Register code_object) override;
+ void JumpCodeObject(Register code_object) override;
+
+ void CallBuiltinPointer(Register builtin_pointer) override;
+ void CallForDeoptimization(Address target, int deopt_id);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
@@ -485,7 +459,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void MovIntToFloat(DoubleRegister dst, Register src);
void MovFloatToInt(Register dst, DoubleRegister src);
// Register move. May do nothing if the registers are identical.
- void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); }
+ void Move(Register dst, Smi smi) { LoadSmiLiteral(dst, smi); }
void Move(Register dst, Handle<HeapObject> value);
void Move(Register dst, ExternalReference reference);
void Move(Register dst, Register src, Condition cond = al);
@@ -504,6 +478,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// do nothing
}
}
+
+ void ZeroExtByte(Register dst, Register src);
+ void ZeroExtHalfWord(Register dst, Register src);
+ void ZeroExtWord32(Register dst, Register src);
+
// ---------------------------------------------------------------------------
// Bit testing/extraction
//
@@ -515,18 +494,18 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
inline void ExtractBitRange(Register dst, Register src, int rangeStart,
int rangeEnd, RCBit rc = LeaveRC,
bool test = false) {
- DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
- int rotate = (rangeEnd == 0) ? 0 : kBitsPerPointer - rangeEnd;
+ DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerSystemPointer);
+ int rotate = (rangeEnd == 0) ? 0 : kBitsPerSystemPointer - rangeEnd;
int width = rangeStart - rangeEnd + 1;
if (rc == SetRC && rangeStart < 16 && (rangeEnd == 0 || test)) {
// Prefer faster andi when applicable.
andi(dst, src, Operand(((1 << width) - 1) << rangeEnd));
} else {
#if V8_TARGET_ARCH_PPC64
- rldicl(dst, src, rotate, kBitsPerPointer - width, rc);
+ rldicl(dst, src, rotate, kBitsPerSystemPointer - width, rc);
#else
- rlwinm(dst, src, rotate, kBitsPerPointer - width, kBitsPerPointer - 1,
- rc);
+ rlwinm(dst, src, rotate, kBitsPerSystemPointer - width,
+ kBitsPerSystemPointer - 1, rc);
#endif
}
}
@@ -540,7 +519,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// into the least significant bits of dst.
inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
RCBit rc = LeaveRC, bool test = false) {
- int start = kBitsPerPointer - 1;
+ int start = kBitsPerSystemPointer - 1;
int end;
uintptr_t bit = (1L << start);
@@ -628,16 +607,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
// succeeds, otherwise falls through if result is saturated. On return
// 'result' either holds answer, or is clobbered on fall through.
- //
- // Only public for the test code in test-code-stubs-arm.cc.
void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
Label* done);
void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
DoubleRegister double_input, StubCallMode stub_mode);
- // Call a code stub.
- void CallStubDelayed(CodeStub* stub);
-
void LoadConstantPoolPointerRegister();
// Loads the constant pool pointer (kConstantPoolRegister).
@@ -650,6 +624,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#endif
}
+ // Generates an instruction sequence s.t. the return address points to the
+ // instruction following the call.
+ // The return address on the stack is used by frame iteration.
+ void StoreReturnAddressAndCall(Register target);
+
void ResetSpeculationPoisonRegister();
private:
@@ -659,21 +638,18 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
int num_double_arguments);
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
+ void CallRecordWriteStub(Register object, Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode, Handle<Code> code_target,
+ Address wasm_target);
};
// MacroAssembler implements a collection of frequently used acros.
-class MacroAssembler : public TurboAssembler {
+class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
- MacroAssembler(const AssemblerOptions& options, void* buffer, int size)
- : TurboAssembler(options, buffer, size) {}
-
- MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object)
- : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
- size, create_code_object) {}
-
- MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
- void* buffer, int size, CodeObjectRequired create_code_object);
+ template <typename... Args>
+ explicit MacroAssembler(Args&&... args)
+ : TurboAssembler(std::forward<Args>(args)...) {}
// ---------------------------------------------------------------------------
// GC Support
@@ -780,17 +756,15 @@ class MacroAssembler : public TurboAssembler {
void Or(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
void Xor(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
- void AddSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
- void SubSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
- void CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
+ void AddSmiLiteral(Register dst, Register src, Smi smi, Register scratch);
+ void SubSmiLiteral(Register dst, Register src, Smi smi, Register scratch);
+ void CmpSmiLiteral(Register src1, Smi smi, Register scratch,
CRegister cr = cr7);
- void CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
+ void CmplSmiLiteral(Register src1, Smi smi, Register scratch,
CRegister cr = cr7);
- void AndSmiLiteral(Register dst, Register src, Smi* smi, Register scratch,
+ void AndSmiLiteral(Register dst, Register src, Smi smi, Register scratch,
RCBit rc = LeaveRC);
-
-
// ---------------------------------------------------------------------------
// JavaScript invokes
@@ -883,10 +857,6 @@ class MacroAssembler : public TurboAssembler {
Condition cond = al);
void CallJSEntry(Register target);
- // Call a code stub.
- void CallStub(CodeStub* stub, Condition cond = al);
- void TailCallStub(CodeStub* stub, Condition cond = al);
-
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
diff --git a/deps/v8/src/ppc/register-ppc.h b/deps/v8/src/ppc/register-ppc.h
new file mode 100644
index 0000000000..11ddb17dc5
--- /dev/null
+++ b/deps/v8/src/ppc/register-ppc.h
@@ -0,0 +1,321 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PPC_REGISTER_PPC_H_
+#define V8_PPC_REGISTER_PPC_H_
+
+#include "src/register.h"
+#include "src/reglist.h"
+
+namespace v8 {
+namespace internal {
+
+// clang-format off
+#define GENERAL_REGISTERS(V) \
+ V(r0) V(sp) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
+ V(r8) V(r9) V(r10) V(r11) V(ip) V(r13) V(r14) V(r15) \
+ V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
+ V(r24) V(r25) V(r26) V(r27) V(r28) V(r29) V(r30) V(fp)
+
+#if V8_EMBEDDED_CONSTANT_POOL
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(r3) V(r4) V(r5) V(r6) V(r7) \
+ V(r8) V(r9) V(r10) V(r14) V(r15) \
+ V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
+ V(r24) V(r25) V(r26) V(r27) V(r30)
+#else
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(r3) V(r4) V(r5) V(r6) V(r7) \
+ V(r8) V(r9) V(r10) V(r14) V(r15) \
+ V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
+ V(r24) V(r25) V(r26) V(r27) V(r28) V(r30)
+#endif
+
+#define LOW_DOUBLE_REGISTERS(V) \
+ V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
+ V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15)
+
+#define NON_LOW_DOUBLE_REGISTERS(V) \
+ V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
+ V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
+
+#define DOUBLE_REGISTERS(V) \
+ LOW_DOUBLE_REGISTERS(V) NON_LOW_DOUBLE_REGISTERS(V)
+
+#define FLOAT_REGISTERS DOUBLE_REGISTERS
+#define SIMD128_REGISTERS DOUBLE_REGISTERS
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
+ V(d8) V(d9) V(d10) V(d11) V(d12) V(d15) \
+ V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
+ V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
+
+#define C_REGISTERS(V) \
+ V(cr0) V(cr1) V(cr2) V(cr3) V(cr4) V(cr5) V(cr6) V(cr7) \
+ V(cr8) V(cr9) V(cr10) V(cr11) V(cr12) V(cr15)
+// clang-format on
+
+// Register list in load/store instructions
+// Note that the bit values must match those used in actual instruction encoding
+const int kNumRegs = 32;
+
+// Caller-saved/arguments registers
+const RegList kJSCallerSaved = 1 << 3 | // r3 a1
+ 1 << 4 | // r4 a2
+ 1 << 5 | // r5 a3
+ 1 << 6 | // r6 a4
+ 1 << 7 | // r7 a5
+ 1 << 8 | // r8 a6
+ 1 << 9 | // r9 a7
+ 1 << 10 | // r10 a8
+ 1 << 11;
+
+const int kNumJSCallerSaved = 9;
+
+// Return the code of the n-th caller-saved register available to JavaScript
+// e.g. JSCallerSavedReg(0) returns r0.code() == 0
+int JSCallerSavedCode(int n);
+
+// Callee-saved registers preserved when switching from C to JavaScript
+const RegList kCalleeSaved = 1 << 14 | // r14
+ 1 << 15 | // r15
+ 1 << 16 | // r16
+ 1 << 17 | // r17
+ 1 << 18 | // r18
+ 1 << 19 | // r19
+ 1 << 20 | // r20
+ 1 << 21 | // r21
+ 1 << 22 | // r22
+ 1 << 23 | // r23
+ 1 << 24 | // r24
+ 1 << 25 | // r25
+ 1 << 26 | // r26
+ 1 << 27 | // r27
+ 1 << 28 | // r28
+ 1 << 29 | // r29
+ 1 << 30 | // r20
+ 1 << 31; // r31
+
+const int kNumCalleeSaved = 18;
+
+const RegList kCallerSavedDoubles = 1 << 0 | // d0
+ 1 << 1 | // d1
+ 1 << 2 | // d2
+ 1 << 3 | // d3
+ 1 << 4 | // d4
+ 1 << 5 | // d5
+ 1 << 6 | // d6
+ 1 << 7 | // d7
+ 1 << 8 | // d8
+ 1 << 9 | // d9
+ 1 << 10 | // d10
+ 1 << 11 | // d11
+ 1 << 12 | // d12
+ 1 << 13; // d13
+
+const int kNumCallerSavedDoubles = 14;
+
+const RegList kCalleeSavedDoubles = 1 << 14 | // d14
+ 1 << 15 | // d15
+ 1 << 16 | // d16
+ 1 << 17 | // d17
+ 1 << 18 | // d18
+ 1 << 19 | // d19
+ 1 << 20 | // d20
+ 1 << 21 | // d21
+ 1 << 22 | // d22
+ 1 << 23 | // d23
+ 1 << 24 | // d24
+ 1 << 25 | // d25
+ 1 << 26 | // d26
+ 1 << 27 | // d27
+ 1 << 28 | // d28
+ 1 << 29 | // d29
+ 1 << 30 | // d30
+ 1 << 31; // d31
+
+const int kNumCalleeSavedDoubles = 18;
+
+// Number of registers for which space is reserved in safepoints. Must be a
+// multiple of 8.
+const int kNumSafepointRegisters = 32;
+
+// The following constants describe the stack frame linkage area as
+// defined by the ABI. Note that kNumRequiredStackFrameSlots must
+// satisfy alignment requirements (rounding up if required).
+#if V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN // ppc64le linux
+// [0] back chain
+// [1] condition register save area
+// [2] link register save area
+// [3] TOC save area
+// [4] Parameter1 save area
+// ...
+// [11] Parameter8 save area
+// [12] Parameter9 slot (if necessary)
+// ...
+const int kNumRequiredStackFrameSlots = 12;
+const int kStackFrameLRSlot = 2;
+const int kStackFrameExtraParamSlot = 12;
+#else // AIX
+// [0] back chain
+// [1] condition register save area
+// [2] link register save area
+// [3] reserved for compiler
+// [4] reserved by binder
+// [5] TOC save area
+// [6] Parameter1 save area
+// ...
+// [13] Parameter8 save area
+// [14] Parameter9 slot (if necessary)
+// ...
+const int kNumRequiredStackFrameSlots = 14;
+const int kStackFrameLRSlot = 2;
+const int kStackFrameExtraParamSlot = 14;
+#endif
+
+// Define the list of registers actually saved at safepoints.
+// Note that the number of saved registers may be smaller than the reserved
+// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
+const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
+
+enum RegisterCode {
+#define REGISTER_CODE(R) kRegCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kRegAfterLast
+};
+
+class Register : public RegisterBase<Register, kRegAfterLast> {
+ public:
+#if V8_TARGET_LITTLE_ENDIAN
+ static constexpr int kMantissaOffset = 0;
+ static constexpr int kExponentOffset = 4;
+#else
+ static constexpr int kMantissaOffset = 4;
+ static constexpr int kExponentOffset = 0;
+#endif
+
+ private:
+ friend class RegisterBase;
+ explicit constexpr Register(int code) : RegisterBase(code) {}
+};
+
+ASSERT_TRIVIALLY_COPYABLE(Register);
+static_assert(sizeof(Register) == sizeof(int),
+ "Register can efficiently be passed by value");
+
+#define DEFINE_REGISTER(R) \
+ constexpr Register R = Register::from_code<kRegCode_##R>();
+GENERAL_REGISTERS(DEFINE_REGISTER)
+#undef DEFINE_REGISTER
+constexpr Register no_reg = Register::no_reg();
+
+// Aliases
+constexpr Register kConstantPoolRegister = r28; // Constant pool.
+constexpr Register kRootRegister = r29; // Roots array pointer.
+constexpr Register cp = r30; // JavaScript context pointer.
+
+constexpr bool kPadArguments = false;
+constexpr bool kSimpleFPAliasing = true;
+constexpr bool kSimdMaskRegisters = false;
+
+enum DoubleRegisterCode {
+#define REGISTER_CODE(R) kDoubleCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kDoubleAfterLast
+};
+
+// Double word FP register.
+class DoubleRegister : public RegisterBase<DoubleRegister, kDoubleAfterLast> {
+ public:
+ // A few double registers are reserved: one as a scratch register and one to
+ // hold 0.0, that does not fit in the immediate field of vmov instructions.
+ // d14: 0.0
+ // d15: scratch register.
+ static constexpr int kSizeInBytes = 8;
+ inline static int NumRegisters();
+
+ private:
+ friend class RegisterBase;
+ explicit constexpr DoubleRegister(int code) : RegisterBase(code) {}
+};
+
+ASSERT_TRIVIALLY_COPYABLE(DoubleRegister);
+static_assert(sizeof(DoubleRegister) == sizeof(int),
+ "DoubleRegister can efficiently be passed by value");
+
+typedef DoubleRegister FloatRegister;
+
+// TODO(ppc) Define SIMD registers.
+typedef DoubleRegister Simd128Register;
+
+#define DEFINE_REGISTER(R) \
+ constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
+DOUBLE_REGISTERS(DEFINE_REGISTER)
+#undef DEFINE_REGISTER
+constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
+
+constexpr DoubleRegister kFirstCalleeSavedDoubleReg = d14;
+constexpr DoubleRegister kLastCalleeSavedDoubleReg = d31;
+constexpr DoubleRegister kDoubleRegZero = d14;
+constexpr DoubleRegister kScratchDoubleReg = d13;
+
+Register ToRegister(int num);
+
+enum CRegisterCode {
+#define REGISTER_CODE(R) kCCode_##R,
+ C_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kCAfterLast
+};
+
+// Coprocessor register
+class CRegister : public RegisterBase<CRegister, kCAfterLast> {
+ friend class RegisterBase;
+ explicit constexpr CRegister(int code) : RegisterBase(code) {}
+};
+
+constexpr CRegister no_creg = CRegister::no_reg();
+#define DECLARE_C_REGISTER(R) \
+ constexpr CRegister R = CRegister::from_code<kCCode_##R>();
+C_REGISTERS(DECLARE_C_REGISTER)
+#undef DECLARE_C_REGISTER
+
+// Define {RegisterName} methods for the register types.
+DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS);
+DEFINE_REGISTER_NAMES(DoubleRegister, DOUBLE_REGISTERS);
+
+// Give alias names to registers for calling conventions.
+constexpr Register kReturnRegister0 = r3;
+constexpr Register kReturnRegister1 = r4;
+constexpr Register kReturnRegister2 = r5;
+constexpr Register kJSFunctionRegister = r4;
+constexpr Register kContextRegister = r30;
+constexpr Register kAllocateSizeRegister = r4;
+constexpr Register kSpeculationPoisonRegister = r14;
+constexpr Register kInterpreterAccumulatorRegister = r3;
+constexpr Register kInterpreterBytecodeOffsetRegister = r15;
+constexpr Register kInterpreterBytecodeArrayRegister = r16;
+constexpr Register kInterpreterDispatchTableRegister = r17;
+
+constexpr Register kJavaScriptCallArgCountRegister = r3;
+constexpr Register kJavaScriptCallCodeStartRegister = r5;
+constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
+constexpr Register kJavaScriptCallNewTargetRegister = r6;
+constexpr Register kJavaScriptCallExtraArg1Register = r5;
+
+constexpr Register kOffHeapTrampolineRegister = ip;
+constexpr Register kRuntimeCallFunctionRegister = r4;
+constexpr Register kRuntimeCallArgCountRegister = r3;
+constexpr Register kRuntimeCallArgvRegister = r5;
+constexpr Register kWasmInstanceRegister = r10;
+constexpr Register kWasmCompileLazyFuncIndexRegister = r15;
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PPC_REGISTER_PPC_H_
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index 900e03f6bb..b46610d592 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -2,34 +2,32 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/ppc/simulator-ppc.h"
+
+#if defined(USE_SIMULATOR)
+
#include <stdarg.h>
#include <stdlib.h>
#include <cmath>
-#if V8_TARGET_ARCH_PPC
-
#include "src/assembler.h"
#include "src/base/bits.h"
-#include "src/codegen.h"
+#include "src/base/lazy-instance.h"
#include "src/disasm.h"
#include "src/macro-assembler.h"
+#include "src/objects-inl.h"
#include "src/ostreams.h"
#include "src/ppc/constants-ppc.h"
#include "src/ppc/frame-constants-ppc.h"
-#include "src/ppc/simulator-ppc.h"
+#include "src/register-configuration.h"
#include "src/runtime/runtime-utils.h"
-#if defined(USE_SIMULATOR)
-
// Only build the simulator if not compiling for real PPC hardware.
namespace v8 {
namespace internal {
-const auto GetRegConfig = RegisterConfiguration::Default;
-
-// static
-base::LazyInstance<Simulator::GlobalMonitor>::type Simulator::global_monitor_ =
- LAZY_INSTANCE_INITIALIZER;
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor,
+ Simulator::GlobalMonitor::Get);
// This macro provides a platform independent use of sscanf. The reason for
// SScanF not being implemented in a platform independent way through
@@ -272,7 +270,7 @@ void PPCDebugger::Debug() {
for (int i = 0; i < kNumRegisters; i++) {
value = GetRegisterValue(i);
PrintF(" %3s: %08" V8PRIxPTR,
- GetRegConfig()->GetGeneralRegisterName(i), value);
+ RegisterName(Register::from_code(i)), value);
if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
(i % 2) == 0) {
dvalue = GetRegisterPairDoubleValue(i);
@@ -291,7 +289,7 @@ void PPCDebugger::Debug() {
for (int i = 0; i < kNumRegisters; i++) {
value = GetRegisterValue(i);
PrintF(" %3s: %08" V8PRIxPTR " %11" V8PRIdPTR,
- GetRegConfig()->GetGeneralRegisterName(i), value, value);
+ RegisterName(Register::from_code(i)), value, value);
if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
(i % 2) == 0) {
dvalue = GetRegisterPairDoubleValue(i);
@@ -311,7 +309,7 @@ void PPCDebugger::Debug() {
dvalue = GetFPDoubleRegisterValue(i);
uint64_t as_words = bit_cast<uint64_t>(dvalue);
PrintF("%3s: %f 0x%08x %08x\n",
- GetRegConfig()->GetDoubleRegisterName(i), dvalue,
+ RegisterName(DoubleRegister::from_code(i)), dvalue,
static_cast<uint32_t>(as_words >> 32),
static_cast<uint32_t>(as_words & 0xFFFFFFFF));
}
@@ -349,7 +347,7 @@ void PPCDebugger::Debug() {
intptr_t value;
StdoutStream os;
if (GetValue(arg1, &value)) {
- Object* obj = reinterpret_cast<Object*>(value);
+ Object obj(value);
os << arg1 << ": \n";
#ifdef DEBUG
obj->Print(os);
@@ -401,14 +399,12 @@ void PPCDebugger::Debug() {
while (cur < end) {
PrintF(" 0x%08" V8PRIxPTR ": 0x%08" V8PRIxPTR " %10" V8PRIdPTR,
reinterpret_cast<intptr_t>(cur), *cur, *cur);
- HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
- intptr_t value = *cur;
+ Object obj(*cur);
Heap* current_heap = sim_->isolate_->heap();
- if (((value & 1) == 0) ||
- current_heap->ContainsSlow(obj->address())) {
+ if (obj.IsSmi() || current_heap->Contains(HeapObject::cast(obj))) {
PrintF(" (");
- if ((value & 1) == 0) {
- PrintF("smi %d", PlatformSmiTagging::SmiToInt(obj));
+ if (obj.IsSmi()) {
+ PrintF("smi %d", Smi::ToInt(obj));
} else {
obj->ShortPrint();
}
@@ -774,7 +770,6 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
}
Simulator::~Simulator() {
- global_monitor_.Pointer()->RemoveProcessor(&global_monitor_processor_);
free(stack_);
}
@@ -870,245 +865,26 @@ void Simulator::TrashCallerSaveRegisters() {
#endif
}
-int Simulator::WriteExDW(intptr_t addr, uint64_t value, Instruction* instr) {
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Word) &&
- global_monitor_.Pointer()->NotifyStoreExcl_Locked(
- addr, &global_monitor_processor_)) {
- uint64_t* ptr = reinterpret_cast<uint64_t*>(addr);
- *ptr = value;
- return 0;
- } else {
- return 1;
- }
-}
-
-uint64_t Simulator::ReadExDWU(intptr_t addr, Instruction* instr) {
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyLoadExcl(addr, TransactionSize::Word);
- global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
- &global_monitor_processor_);
- uint64_t* ptr = reinterpret_cast<uint64_t*>(addr);
- return *ptr;
-}
-
-uint32_t Simulator::ReadWU(intptr_t addr, Instruction* instr) {
- // All supported PPC targets allow unaligned accesses, so we don't need to
- // check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyLoad(addr);
- uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
- return *ptr;
-}
-
-uint32_t Simulator::ReadExWU(intptr_t addr, Instruction* instr) {
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyLoadExcl(addr, TransactionSize::Word);
- global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
- &global_monitor_processor_);
- uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
- return *ptr;
-}
-
-int32_t Simulator::ReadW(intptr_t addr, Instruction* instr) {
- // All supported PPC targets allow unaligned accesses, so we don't need to
- // check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyLoad(addr);
- int32_t* ptr = reinterpret_cast<int32_t*>(addr);
- return *ptr;
-}
-
-
-void Simulator::WriteW(intptr_t addr, uint32_t value, Instruction* instr) {
- // All supported PPC targets allow unaligned accesses, so we don't need to
- // check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyStore(addr);
- global_monitor_.Pointer()->NotifyStore_Locked(addr,
- &global_monitor_processor_);
- uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
- *ptr = value;
- return;
-}
-
-int Simulator::WriteExW(intptr_t addr, uint32_t value, Instruction* instr) {
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Word) &&
- global_monitor_.Pointer()->NotifyStoreExcl_Locked(
- addr, &global_monitor_processor_)) {
- uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
- *ptr = value;
- return 0;
- } else {
- return 1;
+#define GENERATE_RW_FUNC(size, type) \
+ type Simulator::Read##size(uintptr_t addr) { \
+ type value; \
+ Read(addr, &value); \
+ return value; \
+ } \
+ type Simulator::ReadEx##size(uintptr_t addr) { \
+ type value; \
+ ReadEx(addr, &value); \
+ return value; \
+ } \
+ void Simulator::Write##size(uintptr_t addr, type value) { \
+ Write(addr, value); \
+ } \
+ int32_t Simulator::WriteEx##size(uintptr_t addr, type value) { \
+ return WriteEx(addr, value); \
}
-}
-
-void Simulator::WriteW(intptr_t addr, int32_t value, Instruction* instr) {
- // All supported PPC targets allow unaligned accesses, so we don't need to
- // check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyStore(addr);
- global_monitor_.Pointer()->NotifyStore_Locked(addr,
- &global_monitor_processor_);
- int32_t* ptr = reinterpret_cast<int32_t*>(addr);
- *ptr = value;
- return;
-}
-
-uint16_t Simulator::ReadHU(intptr_t addr, Instruction* instr) {
- // All supported PPC targets allow unaligned accesses, so we don't need to
- // check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyLoad(addr);
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- return *ptr;
-}
-
-uint16_t Simulator::ReadExHU(intptr_t addr, Instruction* instr) {
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyLoadExcl(addr, TransactionSize::HalfWord);
- global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
- &global_monitor_processor_);
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- return *ptr;
-}
-
-int16_t Simulator::ReadH(intptr_t addr, Instruction* instr) {
- // All supported PPC targets allow unaligned accesses, so we don't need to
- // check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyLoad(addr);
- int16_t* ptr = reinterpret_cast<int16_t*>(addr);
- return *ptr;
-}
-
-
-void Simulator::WriteH(intptr_t addr, uint16_t value, Instruction* instr) {
- // All supported PPC targets allow unaligned accesses, so we don't need to
- // check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyStore(addr);
- global_monitor_.Pointer()->NotifyStore_Locked(addr,
- &global_monitor_processor_);
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- *ptr = value;
- return;
-}
-
-
-void Simulator::WriteH(intptr_t addr, int16_t value, Instruction* instr) {
- // All supported PPC targets allow unaligned accesses, so we don't need to
- // check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyStore(addr);
- global_monitor_.Pointer()->NotifyStore_Locked(addr,
- &global_monitor_processor_);
- int16_t* ptr = reinterpret_cast<int16_t*>(addr);
- *ptr = value;
- return;
-}
-
-int Simulator::WriteExH(intptr_t addr, uint16_t value, Instruction* instr) {
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::HalfWord) &&
- global_monitor_.Pointer()->NotifyStoreExcl_Locked(
- addr, &global_monitor_processor_)) {
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- *ptr = value;
- return 0;
- } else {
- return 1;
- }
-}
-
-uint8_t Simulator::ReadBU(intptr_t addr) {
- // All supported PPC targets allow unaligned accesses, so we don't need to
- // check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyLoad(addr);
- uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
- return *ptr;
-}
-
-
-int8_t Simulator::ReadB(intptr_t addr) {
- // All supported PPC targets allow unaligned accesses, so we don't need to
- // check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyLoad(addr);
- int8_t* ptr = reinterpret_cast<int8_t*>(addr);
- return *ptr;
-}
-
-uint8_t Simulator::ReadExBU(intptr_t addr) {
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyLoadExcl(addr, TransactionSize::Byte);
- global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
- &global_monitor_processor_);
- uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
- return *ptr;
-}
-
-void Simulator::WriteB(intptr_t addr, uint8_t value) {
- // All supported PPC targets allow unaligned accesses, so we don't need to
- // check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyStore(addr);
- global_monitor_.Pointer()->NotifyStore_Locked(addr,
- &global_monitor_processor_);
- uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
- *ptr = value;
-}
-
-
-void Simulator::WriteB(intptr_t addr, int8_t value) {
- // All supported PPC targets allow unaligned accesses, so we don't need to
- // check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyStore(addr);
- global_monitor_.Pointer()->NotifyStore_Locked(addr,
- &global_monitor_processor_);
- int8_t* ptr = reinterpret_cast<int8_t*>(addr);
- *ptr = value;
-}
-
-int Simulator::WriteExB(intptr_t addr, uint8_t value) {
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Byte) &&
- global_monitor_.Pointer()->NotifyStoreExcl_Locked(
- addr, &global_monitor_processor_)) {
- uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
- *ptr = value;
- return 0;
- } else {
- return 1;
- }
-}
-
-intptr_t* Simulator::ReadDW(intptr_t addr) {
- // All supported PPC targets allow unaligned accesses, so we don't need to
- // check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyLoad(addr);
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- return ptr;
-}
-
-
-void Simulator::WriteDW(intptr_t addr, int64_t value) {
- // All supported PPC targets allow unaligned accesses, so we don't need to
- // check the alignment here.
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyStore(addr);
- global_monitor_.Pointer()->NotifyStore_Locked(addr,
- &global_monitor_processor_);
- int64_t* ptr = reinterpret_cast<int64_t*>(addr);
- *ptr = value;
- return;
-}
+RW_VAR_LIST(GENERATE_RW_FUNC);
+#undef GENERATE_RW_FUNC
// Returns the limit of the stack area to enable checking for stack overflows.
uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
@@ -1172,23 +948,10 @@ bool Simulator::OverflowFrom(int32_t alu_out, int32_t left, int32_t right,
return overflow;
}
-
-#if V8_TARGET_ARCH_PPC64
static void decodeObjectPair(ObjectPair* pair, intptr_t* x, intptr_t* y) {
- *x = reinterpret_cast<intptr_t>(pair->x);
- *y = reinterpret_cast<intptr_t>(pair->y);
+ *x = static_cast<intptr_t>(pair->x);
+ *y = static_cast<intptr_t>(pair->y);
}
-#else
-static void decodeObjectPair(ObjectPair* pair, intptr_t* x, intptr_t* y) {
-#if V8_TARGET_BIG_ENDIAN
- *x = static_cast<int32_t>(*pair >> 32);
- *y = static_cast<int32_t>(*pair);
-#else
- *x = static_cast<int32_t>(*pair);
- *y = static_cast<int32_t>(*pair >> 32);
-#endif
-}
-#endif
// Calls into the V8 runtime.
typedef intptr_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1,
@@ -2094,7 +1857,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
}
#if V8_TARGET_ARCH_PPC64
case EXTSW: {
- const int shift = kBitsPerPointer - 32;
+ const int shift = kBitsPerSystemPointer - 32;
int ra = instr->RAValue();
int rs = instr->RSValue();
intptr_t rs_val = get_register(rs);
@@ -2107,7 +1870,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
}
#endif
case EXTSH: {
- const int shift = kBitsPerPointer - 16;
+ const int shift = kBitsPerSystemPointer - 16;
int ra = instr->RAValue();
int rs = instr->RSValue();
intptr_t rs_val = get_register(rs);
@@ -2119,7 +1882,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
break;
}
case EXTSB: {
- const int shift = kBitsPerPointer - 8;
+ const int shift = kBitsPerSystemPointer - 8;
int ra = instr->RAValue();
int rs = instr->RSValue();
intptr_t rs_val = get_register(rs);
@@ -2137,7 +1900,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rb = instr->RBValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
intptr_t rb_val = get_register(rb);
- int32_t val = ReadW(ra_val + rb_val, instr);
+ int32_t val = ReadW(ra_val + rb_val);
float* fptr = reinterpret_cast<float*>(&val);
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
// Conversion using double changes sNan to qNan on ia32/x64
@@ -2165,8 +1928,8 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rb = instr->RBValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
intptr_t rb_val = get_register(rb);
- int64_t* dptr = reinterpret_cast<int64_t*>(ReadDW(ra_val + rb_val));
- set_d_register(frt, *dptr);
+ int64_t dptr = ReadDW(ra_val + rb_val);
+ set_d_register(frt, dptr);
if (opcode == LFDUX) {
DCHECK_NE(ra, 0);
set_register(ra, ra_val + rb_val);
@@ -2196,7 +1959,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
#else
p = reinterpret_cast<int32_t*>(&frs_val);
#endif
- WriteW(ra_val + rb_val, *p, instr);
+ WriteW(ra_val + rb_val, *p);
if (opcode == STFSUX) {
DCHECK_NE(ra, 0);
set_register(ra, ra_val + rb_val);
@@ -2250,6 +2013,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
#endif
case SYNC: {
// todo - simulate sync
+ __sync_synchronize();
break;
}
case ICBI: {
@@ -2263,7 +2027,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rt = instr->RTValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
- set_register(rt, ReadWU(ra_val + offset, instr));
+ set_register(rt, ReadWU(ra_val + offset));
if (opcode == LWZU) {
DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
@@ -2292,7 +2056,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
int32_t rs_val = get_register(rs);
int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
- WriteW(ra_val + offset, rs_val, instr);
+ WriteW(ra_val + offset, rs_val);
if (opcode == STWU) {
DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
@@ -2328,7 +2092,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
int16_t rs_val = get_register(rs);
intptr_t rb_val = get_register(rb);
- SetCR0(WriteExH(ra_val + rb_val, rs_val, instr));
+ SetCR0(WriteExH(ra_val + rb_val, rs_val));
break;
}
case STWCX: {
@@ -2338,7 +2102,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
int32_t rs_val = get_register(rs);
intptr_t rb_val = get_register(rb);
- SetCR0(WriteExW(ra_val + rb_val, rs_val, instr));
+ SetCR0(WriteExW(ra_val + rb_val, rs_val));
break;
}
case STDCX: {
@@ -2348,7 +2112,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
int64_t rs_val = get_register(rs);
intptr_t rb_val = get_register(rb);
- SetCR0(WriteExDW(ra_val + rb_val, rs_val, instr));
+ SetCR0(WriteExDW(ra_val + rb_val, rs_val));
break;
}
case TW: {
@@ -2962,7 +2726,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
int32_t rs_val = get_register(rs);
intptr_t rb_val = get_register(rb);
- WriteW(ra_val + rb_val, rs_val, instr);
+ WriteW(ra_val + rb_val, rs_val);
if (opcode == STWUX) {
DCHECK_NE(ra, 0);
set_register(ra, ra_val + rb_val);
@@ -2992,7 +2756,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
int16_t rs_val = get_register(rs);
intptr_t rb_val = get_register(rb);
- WriteH(ra_val + rb_val, rs_val, instr);
+ WriteH(ra_val + rb_val, rs_val);
if (opcode == STHUX) {
DCHECK_NE(ra, 0);
set_register(ra, ra_val + rb_val);
@@ -3006,7 +2770,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rb = instr->RBValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
intptr_t rb_val = get_register(rb);
- set_register(rt, ReadWU(ra_val + rb_val, instr));
+ set_register(rt, ReadWU(ra_val + rb_val));
if (opcode == LWZUX) {
DCHECK(ra != 0 && ra != rt);
set_register(ra, ra_val + rb_val);
@@ -3020,7 +2784,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rb = instr->RBValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
intptr_t rb_val = get_register(rb);
- set_register(rt, ReadW(ra_val + rb_val, instr));
+ set_register(rt, ReadW(ra_val + rb_val));
break;
}
case LDX:
@@ -3030,8 +2794,8 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rb = instr->RBValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
intptr_t rb_val = get_register(rb);
- intptr_t* result = ReadDW(ra_val + rb_val);
- set_register(rt, *result);
+ intptr_t result = ReadDW(ra_val + rb_val);
+ set_register(rt, result);
if (opcode == LDUX) {
DCHECK(ra != 0 && ra != rt);
set_register(ra, ra_val + rb_val);
@@ -3075,7 +2839,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rb = instr->RBValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
intptr_t rb_val = get_register(rb);
- set_register(rt, ReadHU(ra_val + rb_val, instr) & 0xFFFF);
+ set_register(rt, ReadHU(ra_val + rb_val) & 0xFFFF);
if (opcode == LHZUX) {
DCHECK(ra != 0 && ra != rt);
set_register(ra, ra_val + rb_val);
@@ -3088,7 +2852,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rb = instr->RBValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
intptr_t rb_val = get_register(rb);
- set_register(rt, ReadH(ra_val + rb_val, instr));
+ set_register(rt, ReadH(ra_val + rb_val));
break;
}
case LBARX: {
@@ -3106,7 +2870,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rb = instr->RBValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
intptr_t rb_val = get_register(rb);
- set_register(rt, ReadExHU(ra_val + rb_val, instr));
+ set_register(rt, ReadExHU(ra_val + rb_val));
break;
}
case LWARX: {
@@ -3115,7 +2879,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rb = instr->RBValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
intptr_t rb_val = get_register(rb);
- set_register(rt, ReadExWU(ra_val + rb_val, instr));
+ set_register(rt, ReadExWU(ra_val + rb_val));
break;
}
case LDARX: {
@@ -3124,7 +2888,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rb = instr->RBValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
intptr_t rb_val = get_register(rb);
- set_register(rt, ReadExDWU(ra_val + rb_val, instr));
+ set_register(rt, ReadExDWU(ra_val + rb_val));
break;
}
case DCBF: {
@@ -3165,7 +2929,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rt = instr->RTValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
- uintptr_t result = ReadHU(ra_val + offset, instr) & 0xFFFF;
+ uintptr_t result = ReadHU(ra_val + offset) & 0xFFFF;
set_register(rt, result);
if (opcode == LHZU) {
set_register(ra, ra_val + offset);
@@ -3179,7 +2943,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rt = instr->RTValue();
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
- intptr_t result = ReadH(ra_val + offset, instr);
+ intptr_t result = ReadH(ra_val + offset);
set_register(rt, result);
if (opcode == LHAU) {
set_register(ra, ra_val + offset);
@@ -3194,7 +2958,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
int16_t rs_val = get_register(rs);
int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
- WriteH(ra_val + offset, rs_val, instr);
+ WriteH(ra_val + offset, rs_val);
if (opcode == STHU) {
DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
@@ -3214,7 +2978,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int ra = instr->RAValue();
int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
- int32_t val = ReadW(ra_val + offset, instr);
+ int32_t val = ReadW(ra_val + offset);
float* fptr = reinterpret_cast<float*>(&val);
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
// Conversion using double changes sNan to qNan on ia32/x64
@@ -3242,8 +3006,8 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int ra = instr->RAValue();
int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
- int64_t* dptr = reinterpret_cast<int64_t*>(ReadDW(ra_val + offset));
- set_d_register(frt, *dptr);
+ int64_t dptr = ReadDW(ra_val + offset);
+ set_d_register(frt, dptr);
if (opcode == LFDU) {
DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
@@ -3273,7 +3037,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
#else
p = reinterpret_cast<int32_t*>(&frs_val);
#endif
- WriteW(ra_val + offset, *p, instr);
+ WriteW(ra_val + offset, *p);
if (opcode == STFSU) {
DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
@@ -3345,11 +3109,10 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
return;
}
case FSQRT: {
- lazily_initialize_fast_sqrt();
int frt = instr->RTValue();
int frb = instr->RBValue();
double frb_val = get_double_from_d_register(frb);
- double frt_val = fast_sqrt(frb_val);
+ double frt_val = std::sqrt(frb_val);
set_d_register_from_double(frt, frt_val);
return;
}
@@ -3847,19 +3610,19 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int offset = SIGN_EXT_IMM16(instr->Bits(15, 0) & ~3);
switch (instr->Bits(1, 0)) {
case 0: { // ld
- intptr_t* result = ReadDW(ra_val + offset);
- set_register(rt, *result);
+ intptr_t result = ReadDW(ra_val + offset);
+ set_register(rt, result);
break;
}
case 1: { // ldu
- intptr_t* result = ReadDW(ra_val + offset);
- set_register(rt, *result);
+ intptr_t result = ReadDW(ra_val + offset);
+ set_register(rt, result);
DCHECK_NE(ra, 0);
set_register(ra, ra_val + offset);
break;
}
case 2: { // lwa
- intptr_t result = ReadW(ra_val + offset, instr);
+ intptr_t result = ReadW(ra_val + offset);
set_register(rt, result);
break;
}
@@ -4187,170 +3950,58 @@ uintptr_t Simulator::PopAddress() {
return address;
}
-Simulator::LocalMonitor::LocalMonitor()
- : access_state_(MonitorAccess::Open),
- tagged_addr_(0),
- size_(TransactionSize::None) {}
-
-void Simulator::LocalMonitor::Clear() {
+void Simulator::GlobalMonitor::Clear() {
access_state_ = MonitorAccess::Open;
tagged_addr_ = 0;
size_ = TransactionSize::None;
+ thread_id_ = ThreadId::Invalid();
}
-void Simulator::LocalMonitor::NotifyLoad(int32_t addr) {
- if (access_state_ == MonitorAccess::Exclusive) {
- // A load could cause a cache eviction which will affect the monitor. As a
- // result, it's most strict to unconditionally clear the local monitor on
- // load.
- Clear();
- }
-}
-
-void Simulator::LocalMonitor::NotifyLoadExcl(int32_t addr,
- TransactionSize size) {
+void Simulator::GlobalMonitor::NotifyLoadExcl(uintptr_t addr,
+ TransactionSize size,
+ ThreadId thread_id) {
+ // TODO(s390): By using Global Monitors, we are effectively limiting one
+ // active reservation across all processors. This would potentially serialize
+ // parallel threads executing load&reserve + store conditional on unrelated
+ // memory. Technically, this implementation would still make the simulator
+ // adhere to the spec, but seems overly heavy-handed.
access_state_ = MonitorAccess::Exclusive;
tagged_addr_ = addr;
size_ = size;
+ thread_id_ = thread_id;
}
-void Simulator::LocalMonitor::NotifyStore(int32_t addr) {
- if (access_state_ == MonitorAccess::Exclusive) {
- // A store could cause a cache eviction which will affect the
- // monitor. As a result, it's most strict to unconditionally clear the
- // local monitor on store.
- Clear();
- }
-}
-
-bool Simulator::LocalMonitor::NotifyStoreExcl(int32_t addr,
- TransactionSize size) {
+void Simulator::GlobalMonitor::NotifyStore(uintptr_t addr, TransactionSize size,
+ ThreadId thread_id) {
if (access_state_ == MonitorAccess::Exclusive) {
- if (addr == tagged_addr_ && size_ == size) {
- Clear();
- return true;
- } else {
+ // Calculate if the transaction has been overlapped
+ uintptr_t transaction_start = addr;
+ uintptr_t transaction_end = addr + static_cast<uintptr_t>(size);
+ uintptr_t exclusive_transaction_start = tagged_addr_;
+ uintptr_t exclusive_transaction_end =
+ tagged_addr_ + static_cast<uintptr_t>(size_);
+ bool is_not_overlapped = transaction_end < exclusive_transaction_start ||
+ exclusive_transaction_end < transaction_start;
+ if (!is_not_overlapped && !thread_id_.Equals(thread_id)) {
Clear();
- return false;
}
- } else {
- DCHECK(access_state_ == MonitorAccess::Open);
- return false;
- }
-}
-
-Simulator::GlobalMonitor::Processor::Processor()
- : access_state_(MonitorAccess::Open),
- tagged_addr_(0),
- next_(nullptr),
- prev_(nullptr) {}
-
-void Simulator::GlobalMonitor::Processor::Clear_Locked() {
- access_state_ = MonitorAccess::Open;
- tagged_addr_ = 0;
-}
-void Simulator::GlobalMonitor::Processor::NotifyLoadExcl_Locked(int32_t addr) {
- access_state_ = MonitorAccess::Exclusive;
- tagged_addr_ = addr;
-}
-
-void Simulator::GlobalMonitor::Processor::NotifyStore_Locked(
- int32_t addr, bool is_requesting_processor) {
- if (access_state_ == MonitorAccess::Exclusive) {
- // It is possible that a store caused a cache eviction,
- // which can affect the montior, so conservatively,
- // we always clear the monitor.
- Clear_Locked();
}
}
-bool Simulator::GlobalMonitor::Processor::NotifyStoreExcl_Locked(
- int32_t addr, bool is_requesting_processor) {
- if (access_state_ == MonitorAccess::Exclusive) {
- if (is_requesting_processor) {
- if (addr == tagged_addr_) {
- Clear_Locked();
- return true;
- }
- } else if (addr == tagged_addr_) {
- Clear_Locked();
- return false;
- }
- }
- return false;
-}
-
-Simulator::GlobalMonitor::GlobalMonitor() : head_(nullptr) {}
-
-void Simulator::GlobalMonitor::NotifyLoadExcl_Locked(int32_t addr,
- Processor* processor) {
- processor->NotifyLoadExcl_Locked(addr);
- PrependProcessor_Locked(processor);
-}
-
-void Simulator::GlobalMonitor::NotifyStore_Locked(int32_t addr,
- Processor* processor) {
- // Notify each processor of the store operation.
- for (Processor* iter = head_; iter; iter = iter->next_) {
- bool is_requesting_processor = iter == processor;
- iter->NotifyStore_Locked(addr, is_requesting_processor);
- }
-}
-
-bool Simulator::GlobalMonitor::NotifyStoreExcl_Locked(int32_t addr,
- Processor* processor) {
- DCHECK(IsProcessorInLinkedList_Locked(processor));
- if (processor->NotifyStoreExcl_Locked(addr, true)) {
- // Notify the other processors that this StoreExcl succeeded.
- for (Processor* iter = head_; iter; iter = iter->next_) {
- if (iter != processor) {
- iter->NotifyStoreExcl_Locked(addr, false);
- }
- }
- return true;
- } else {
- return false;
- }
-}
-
-bool Simulator::GlobalMonitor::IsProcessorInLinkedList_Locked(
- Processor* processor) const {
- return head_ == processor || processor->next_ || processor->prev_;
-}
-
-void Simulator::GlobalMonitor::PrependProcessor_Locked(Processor* processor) {
- if (IsProcessorInLinkedList_Locked(processor)) {
- return;
- }
-
- if (head_) {
- head_->prev_ = processor;
- }
- processor->prev_ = nullptr;
- processor->next_ = head_;
- head_ = processor;
-}
-
-void Simulator::GlobalMonitor::RemoveProcessor(Processor* processor) {
- base::LockGuard<base::Mutex> lock_guard(&mutex);
- if (!IsProcessorInLinkedList_Locked(processor)) {
- return;
- }
-
- if (processor->prev_) {
- processor->prev_->next_ = processor->next_;
- } else {
- head_ = processor->next_;
- }
- if (processor->next_) {
- processor->next_->prev_ = processor->prev_;
- }
- processor->prev_ = nullptr;
- processor->next_ = nullptr;
+bool Simulator::GlobalMonitor::NotifyStoreExcl(uintptr_t addr,
+ TransactionSize size,
+ ThreadId thread_id) {
+ bool permission = access_state_ == MonitorAccess::Exclusive &&
+ addr == tagged_addr_ && size_ == size &&
+ thread_id_.Equals(thread_id);
+ // The reservation is cleared if the processor holding the reservation
+ // executes a store conditional instruction to any address.
+ Clear();
+ return permission;
}
} // namespace internal
} // namespace v8
+#undef SScanF
#endif // USE_SIMULATOR
-#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/simulator-ppc.h b/deps/v8/src/ppc/simulator-ppc.h
index 7b26906c29..e0f4eeae2b 100644
--- a/deps/v8/src/ppc/simulator-ppc.h
+++ b/deps/v8/src/ppc/simulator-ppc.h
@@ -12,11 +12,16 @@
#ifndef V8_PPC_SIMULATOR_PPC_H_
#define V8_PPC_SIMULATOR_PPC_H_
-#include "src/allocation.h"
+// globals.h defines USE_SIMULATOR.
+#include "src/globals.h"
#if defined(USE_SIMULATOR)
// Running with a simulator.
+#include "src/allocation.h"
+#include "src/base/lazy-instance.h"
+#include "src/base/platform/mutex.h"
+
#include "src/assembler.h"
#include "src/base/hashmap.h"
#include "src/ppc/constants-ppc.h"
@@ -241,49 +246,61 @@ class Simulator : public SimulatorBase {
void PrintStopInfo(uint32_t code);
// Read and write memory.
- inline uint8_t ReadBU(intptr_t addr);
- inline uint8_t ReadExBU(intptr_t addr);
- inline int8_t ReadB(intptr_t addr);
- inline void WriteB(intptr_t addr, uint8_t value);
- inline int WriteExB(intptr_t addr, uint8_t value);
- inline void WriteB(intptr_t addr, int8_t value);
-
- inline uint16_t ReadHU(intptr_t addr, Instruction* instr);
- inline uint16_t ReadExHU(intptr_t addr, Instruction* instr);
- inline int16_t ReadH(intptr_t addr, Instruction* instr);
- // Note: Overloaded on the sign of the value.
- inline void WriteH(intptr_t addr, uint16_t value, Instruction* instr);
- inline int WriteExH(intptr_t addr, uint16_t value, Instruction* instr);
- inline void WriteH(intptr_t addr, int16_t value, Instruction* instr);
-
- inline uint32_t ReadWU(intptr_t addr, Instruction* instr);
- inline uint32_t ReadExWU(intptr_t addr, Instruction* instr);
- inline int32_t ReadW(intptr_t addr, Instruction* instr);
- inline void WriteW(intptr_t addr, uint32_t value, Instruction* instr);
- inline int WriteExW(intptr_t addr, uint32_t value, Instruction* instr);
- inline void WriteW(intptr_t addr, int32_t value, Instruction* instr);
-
- intptr_t* ReadDW(intptr_t addr);
- void WriteDW(intptr_t addr, int64_t value);
- inline int WriteExDW(intptr_t addr, uint64_t value, Instruction* instr);
- inline uint64_t ReadExDWU(intptr_t addr, Instruction* instr);
+ template <typename T>
+ inline void Read(uintptr_t address, T* value) {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ memcpy(value, reinterpret_cast<const char*>(address), sizeof(T));
+ }
+
+ template <typename T>
+ inline void ReadEx(uintptr_t address, T* value) {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyLoadExcl(
+ address, static_cast<TransactionSize>(sizeof(T)),
+ isolate_->thread_id());
+ memcpy(value, reinterpret_cast<const char*>(address), sizeof(T));
+ }
+
+ template <typename T>
+ inline void Write(uintptr_t address, T value) {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore(address,
+ static_cast<TransactionSize>(sizeof(T)),
+ isolate_->thread_id());
+ memcpy(reinterpret_cast<char*>(address), &value, sizeof(T));
+ }
+
+ template <typename T>
+ inline int32_t WriteEx(uintptr_t address, T value) {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ if (GlobalMonitor::Get()->NotifyStoreExcl(
+ address, static_cast<TransactionSize>(sizeof(T)),
+ isolate_->thread_id())) {
+ memcpy(reinterpret_cast<char*>(address), &value, sizeof(T));
+ return 0;
+ } else {
+ return 1;
+ }
+ }
+
+#define RW_VAR_LIST(V) \
+ V(DWU, uint64_t) \
+ V(DW, int64_t) \
+ V(WU, uint32_t) \
+ V(W, int32_t) V(HU, uint16_t) V(H, int16_t) V(BU, uint8_t) V(B, int8_t)
+
+#define GENERATE_RW_FUNC(size, type) \
+ inline type Read##size(uintptr_t addr); \
+ inline type ReadEx##size(uintptr_t addr); \
+ inline void Write##size(uintptr_t addr, type value); \
+ inline int32_t WriteEx##size(uintptr_t addr, type value);
+
+ RW_VAR_LIST(GENERATE_RW_FUNC);
+#undef GENERATE_RW_FUNC
void Trace(Instruction* instr);
void SetCR0(intptr_t result, bool setSO = false);
void ExecuteBranchConditional(Instruction* instr, BCType type);
- void ExecuteExt1(Instruction* instr);
- bool ExecuteExt2_10bit_part1(Instruction* instr);
- bool ExecuteExt2_10bit_part2(Instruction* instr);
- bool ExecuteExt2_9bit_part1(Instruction* instr);
- bool ExecuteExt2_9bit_part2(Instruction* instr);
- void ExecuteExt2_5bit(Instruction* instr);
- void ExecuteExt2(Instruction* instr);
- void ExecuteExt3(Instruction* instr);
- void ExecuteExt4(Instruction* instr);
-#if V8_TARGET_ARCH_PPC64
- void ExecuteExt5(Instruction* instr);
-#endif
- void ExecuteExt6(Instruction* instr);
void ExecuteGeneric(Instruction* instr);
void SetFPSCR(int bit) { fp_condition_reg_ |= (1 << (31 - bit)); }
@@ -364,72 +381,34 @@ class Simulator : public SimulatorBase {
Byte = 1,
HalfWord = 2,
Word = 4,
- };
-
- class LocalMonitor {
- public:
- LocalMonitor();
-
- // These functions manage the state machine for the local monitor, but do
- // not actually perform loads and stores. NotifyStoreExcl only returns
- // true if the exclusive store is allowed; the global monitor will still
- // have to be checked to see whether the memory should be updated.
- void NotifyLoad(int32_t addr);
- void NotifyLoadExcl(int32_t addr, TransactionSize size);
- void NotifyStore(int32_t addr);
- bool NotifyStoreExcl(int32_t addr, TransactionSize size);
-
- private:
- void Clear();
-
- MonitorAccess access_state_;
- int32_t tagged_addr_;
- TransactionSize size_;
+ DWord = 8,
};
class GlobalMonitor {
public:
- GlobalMonitor();
-
- class Processor {
- public:
- Processor();
-
- private:
- friend class GlobalMonitor;
- // These functions manage the state machine for the global monitor, but do
- // not actually perform loads and stores.
- void Clear_Locked();
- void NotifyLoadExcl_Locked(int32_t addr);
- void NotifyStore_Locked(int32_t addr, bool is_requesting_processor);
- bool NotifyStoreExcl_Locked(int32_t addr, bool is_requesting_processor);
-
- MonitorAccess access_state_;
- int32_t tagged_addr_;
- Processor* next_;
- Processor* prev_;
- };
-
// Exposed so it can be accessed by Simulator::{Read,Write}Ex*.
base::Mutex mutex;
- void NotifyLoadExcl_Locked(int32_t addr, Processor* processor);
- void NotifyStore_Locked(int32_t addr, Processor* processor);
- bool NotifyStoreExcl_Locked(int32_t addr, Processor* processor);
+ void NotifyLoadExcl(uintptr_t addr, TransactionSize size,
+ ThreadId thread_id);
+ void NotifyStore(uintptr_t addr, TransactionSize size, ThreadId thread_id);
+ bool NotifyStoreExcl(uintptr_t addr, TransactionSize size,
+ ThreadId thread_id);
- // Called when the simulator is destroyed.
- void RemoveProcessor(Processor* processor);
+ static GlobalMonitor* Get();
private:
- bool IsProcessorInLinkedList_Locked(Processor* processor) const;
- void PrependProcessor_Locked(Processor* processor);
+ // Private constructor. Call {GlobalMonitor::Get()} to get the singleton.
+ GlobalMonitor() = default;
+ friend class base::LeakyObject<GlobalMonitor>;
- Processor* head_;
- };
+ void Clear();
- LocalMonitor local_monitor_;
- GlobalMonitor::Processor global_monitor_processor_;
- static base::LazyInstance<GlobalMonitor>::type global_monitor_;
+ MonitorAccess access_state_ = MonitorAccess::Open;
+ uintptr_t tagged_addr_ = 0;
+ TransactionSize size_ = TransactionSize::None;
+ ThreadId thread_id_ = ThreadId::Invalid();
+ };
};
} // namespace internal