summaryrefslogtreecommitdiff
path: root/deps/v8/src/ppc
diff options
context:
space:
mode:
authorBen Noordhuis <info@bnoordhuis.nl>2015-01-07 18:38:38 +0100
committerBen Noordhuis <info@bnoordhuis.nl>2015-01-07 22:11:18 +0100
commitdad73f645cde6920e79db956e7ef82ed640d7615 (patch)
tree7ba3f3fc7e0722c5f130065461b7c56f571af383 /deps/v8/src/ppc
parent53ba494537259b18b346dc6150d6a100c557e08f (diff)
downloadnode-new-dad73f645cde6920e79db956e7ef82ed640d7615.tar.gz
deps: upgrade v8 to 3.31.74.1
PR-URL: https://github.com/iojs/io.js/pull/243 Reviewed-By: Fedor Indutny <fedor@indutny.com> Reviewed-By: Trevor Norris <trev.norris@gmail.com>
Diffstat (limited to 'deps/v8/src/ppc')
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h593
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc2493
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h1493
-rw-r--r--deps/v8/src/ppc/builtins-ppc.cc1615
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc4893
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.h325
-rw-r--r--deps/v8/src/ppc/codegen-ppc.cc700
-rw-r--r--deps/v8/src/ppc/codegen-ppc.h44
-rw-r--r--deps/v8/src/ppc/constants-ppc.cc91
-rw-r--r--deps/v8/src/ppc/constants-ppc.h600
-rw-r--r--deps/v8/src/ppc/cpu-ppc.cc63
-rw-r--r--deps/v8/src/ppc/debug-ppc.cc343
-rw-r--r--deps/v8/src/ppc/deoptimizer-ppc.cc359
-rw-r--r--deps/v8/src/ppc/disasm-ppc.cc1353
-rw-r--r--deps/v8/src/ppc/frames-ppc.cc60
-rw-r--r--deps/v8/src/ppc/frames-ppc.h202
-rw-r--r--deps/v8/src/ppc/full-codegen-ppc.cc5290
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc306
-rw-r--r--deps/v8/src/ppc/lithium-codegen-ppc.cc6136
-rw-r--r--deps/v8/src/ppc/lithium-codegen-ppc.h372
-rw-r--r--deps/v8/src/ppc/lithium-gap-resolver-ppc.cc288
-rw-r--r--deps/v8/src/ppc/lithium-gap-resolver-ppc.h60
-rw-r--r--deps/v8/src/ppc/lithium-ppc.cc2626
-rw-r--r--deps/v8/src/ppc/lithium-ppc.h2746
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc4819
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h1554
-rw-r--r--deps/v8/src/ppc/regexp-macro-assembler-ppc.cc1337
-rw-r--r--deps/v8/src/ppc/regexp-macro-assembler-ppc.h212
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc3803
-rw-r--r--deps/v8/src/ppc/simulator-ppc.h413
30 files changed, 45189 insertions, 0 deletions
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
new file mode 100644
index 0000000000..6779ee3d88
--- /dev/null
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -0,0 +1,593 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been modified
+// significantly by Google Inc.
+// Copyright 2014 the V8 project authors. All rights reserved.
+
+#ifndef V8_PPC_ASSEMBLER_PPC_INL_H_
+#define V8_PPC_ASSEMBLER_PPC_INL_H_
+
+#include "src/ppc/assembler-ppc.h"
+
+#include "src/assembler.h"
+#include "src/debug.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+bool CpuFeatures::SupportsCrankshaft() { return true; }
+
+
+void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
+#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
+ if (RelocInfo::IsInternalReference(rmode_)) {
+ // absolute code pointer inside code object moves with the code object.
+ Assembler::RelocateInternalReference(pc_, delta, 0, icache_flush_mode);
+ }
+#endif
+ // We do not use pc relative addressing on PPC, so there is
+ // nothing else to do.
+}
+
+
+Address RelocInfo::target_address() {
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ return Assembler::target_address_at(pc_, host_);
+}
+
+
+Address RelocInfo::target_address_address() {
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) ||
+ rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
+
+#if V8_OOL_CONSTANT_POOL
+ if (Assembler::IsConstantPoolLoadStart(pc_)) {
+ // We return the PC for ool constant pool since this function is used by the
+ // serializerer and expects the address to reside within the code object.
+ return reinterpret_cast<Address>(pc_);
+ }
+#endif
+
+ // Read the address of the word containing the target_address in an
+ // instruction stream.
+ // The only architecture-independent user of this function is the serializer.
+ // The serializer uses it to find out how many raw bytes of instruction to
+ // output before the next target.
+ // For an instruction like LIS/ORI where the target bits are mixed into the
+ // instruction bits, the size of the target will be zero, indicating that the
+ // serializer should not step forward in memory after a target is resolved
+ // and written.
+ return reinterpret_cast<Address>(pc_);
+}
+
+
+Address RelocInfo::constant_pool_entry_address() {
+#if V8_OOL_CONSTANT_POOL
+ return Assembler::target_constant_pool_address_at(pc_,
+ host_->constant_pool());
+#else
+ UNREACHABLE();
+ return NULL;
+#endif
+}
+
+
+int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
+
+
+void RelocInfo::set_target_address(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
+ IsCodeTarget(rmode_)) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
+}
+
+
+Address Assembler::break_address_from_return_address(Address pc) {
+ return target_address_from_return_address(pc);
+}
+
+
+Address Assembler::target_address_from_return_address(Address pc) {
+// Returns the address of the call target from the return address that will
+// be returned to after a call.
+// Call sequence is :
+// mov ip, @ call address
+// mtlr ip
+// blrl
+// @ return address
+#if V8_OOL_CONSTANT_POOL
+ if (IsConstantPoolLoadEnd(pc - 3 * kInstrSize)) {
+ return pc - (kMovInstructionsConstantPool + 2) * kInstrSize;
+ }
+#endif
+ return pc - (kMovInstructionsNoConstantPool + 2) * kInstrSize;
+}
+
+
+Address Assembler::return_address_from_call_start(Address pc) {
+#if V8_OOL_CONSTANT_POOL
+ Address load_address = pc + (kMovInstructionsConstantPool - 1) * kInstrSize;
+ if (IsConstantPoolLoadEnd(load_address))
+ return pc + (kMovInstructionsConstantPool + 2) * kInstrSize;
+#endif
+ return pc + (kMovInstructionsNoConstantPool + 2) * kInstrSize;
+}
+
+
+Object* RelocInfo::target_object() {
+ DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
+ DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return Handle<Object>(
+ reinterpret_cast<Object**>(Assembler::target_address_at(pc_, host_)));
+}
+
+
+void RelocInfo::set_target_object(Object* target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ Assembler::set_target_address_at(
+ pc_, host_, reinterpret_cast<Address>(target), icache_flush_mode);
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
+ target->IsHeapObject()) {
+ host()->GetHeap()->incremental_marking()->RecordWrite(
+ host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+ }
+}
+
+
+Address RelocInfo::target_reference() {
+ DCHECK(rmode_ == EXTERNAL_REFERENCE);
+ return Assembler::target_address_at(pc_, host_);
+}
+
+
+Address RelocInfo::target_runtime_entry(Assembler* origin) {
+ DCHECK(IsRuntimeEntry(rmode_));
+ return target_address();
+}
+
+
+void RelocInfo::set_target_runtime_entry(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsRuntimeEntry(rmode_));
+ if (target_address() != target)
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
+}
+
+
+Handle<Cell> RelocInfo::target_cell_handle() {
+ DCHECK(rmode_ == RelocInfo::CELL);
+ Address address = Memory::Address_at(pc_);
+ return Handle<Cell>(reinterpret_cast<Cell**>(address));
+}
+
+
+Cell* RelocInfo::target_cell() {
+ DCHECK(rmode_ == RelocInfo::CELL);
+ return Cell::FromValueAddress(Memory::Address_at(pc_));
+}
+
+
+void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::CELL);
+ Address address = cell->address() + Cell::kValueOffset;
+ Memory::Address_at(pc_) = address;
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+ // TODO(1550) We are passing NULL as a slot because cell can never be on
+ // evacuation candidate.
+ host()->GetHeap()->incremental_marking()->RecordWrite(host(), NULL, cell);
+ }
+}
+
+
+#if V8_OOL_CONSTANT_POOL
+static const int kNoCodeAgeInstructions = 7;
+#else
+static const int kNoCodeAgeInstructions = 6;
+#endif
+static const int kCodeAgingInstructions =
+ Assembler::kMovInstructionsNoConstantPool + 3;
+static const int kNoCodeAgeSequenceInstructions =
+ ((kNoCodeAgeInstructions >= kCodeAgingInstructions)
+ ? kNoCodeAgeInstructions
+ : kCodeAgingInstructions);
+static const int kNoCodeAgeSequenceNops =
+ (kNoCodeAgeSequenceInstructions - kNoCodeAgeInstructions);
+static const int kCodeAgingSequenceNops =
+ (kNoCodeAgeSequenceInstructions - kCodeAgingInstructions);
+static const int kCodeAgingTargetDelta = 1 * Assembler::kInstrSize;
+static const int kNoCodeAgeSequenceLength =
+ (kNoCodeAgeSequenceInstructions * Assembler::kInstrSize);
+
+
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+ UNREACHABLE(); // This should never be reached on PPC.
+ return Handle<Object>();
+}
+
+
+Code* RelocInfo::code_age_stub() {
+ DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ return Code::GetCodeFromTargetAddress(
+ Assembler::target_address_at(pc_ + kCodeAgingTargetDelta, host_));
+}
+
+
+void RelocInfo::set_code_age_stub(Code* stub,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ Assembler::set_target_address_at(pc_ + kCodeAgingTargetDelta, host_,
+ stub->instruction_start(),
+ icache_flush_mode);
+}
+
+
+Address RelocInfo::call_address() {
+ DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+ // The pc_ offset of 0 assumes patched return sequence per
+ // BreakLocationIterator::SetDebugBreakAtReturn(), or debug break
+ // slot per BreakLocationIterator::SetDebugBreakAtSlot().
+ return Assembler::target_address_at(pc_, host_);
+}
+
+
+void RelocInfo::set_call_address(Address target) {
+ DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+ Assembler::set_target_address_at(pc_, host_, target);
+ if (host() != NULL) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
+}
+
+
+Object* RelocInfo::call_object() { return *call_object_address(); }
+
+
+void RelocInfo::set_call_object(Object* target) {
+ *call_object_address() = target;
+}
+
+
+Object** RelocInfo::call_object_address() {
+ DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+ return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
+}
+
+
+void RelocInfo::WipeOut() {
+ DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
+ IsRuntimeEntry(rmode_) || IsExternalReference(rmode_));
+ Assembler::set_target_address_at(pc_, host_, NULL);
+}
+
+
+bool RelocInfo::IsPatchedReturnSequence() {
+ //
+ // The patched return sequence is defined by
+ // BreakLocationIterator::SetDebugBreakAtReturn()
+ // FIXED_SEQUENCE
+
+ Instr instr0 = Assembler::instr_at(pc_);
+ Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
+#if V8_TARGET_ARCH_PPC64
+ Instr instr3 = Assembler::instr_at(pc_ + (3 * Assembler::kInstrSize));
+ Instr instr4 = Assembler::instr_at(pc_ + (4 * Assembler::kInstrSize));
+ Instr binstr = Assembler::instr_at(pc_ + (7 * Assembler::kInstrSize));
+#else
+ Instr binstr = Assembler::instr_at(pc_ + 4 * Assembler::kInstrSize);
+#endif
+ bool patched_return =
+ ((instr0 & kOpcodeMask) == ADDIS && (instr1 & kOpcodeMask) == ORI &&
+#if V8_TARGET_ARCH_PPC64
+ (instr3 & kOpcodeMask) == ORIS && (instr4 & kOpcodeMask) == ORI &&
+#endif
+ (binstr == 0x7d821008)); // twge r2, r2
+
+ // printf("IsPatchedReturnSequence: %d\n", patched_return);
+ return patched_return;
+}
+
+
+bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
+ Instr current_instr = Assembler::instr_at(pc_);
+ return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
+}
+
+
+void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ visitor->VisitEmbeddedPointer(this);
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ visitor->VisitCodeTarget(this);
+ } else if (mode == RelocInfo::CELL) {
+ visitor->VisitCell(this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ visitor->VisitExternalReference(this);
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ visitor->VisitCodeAgeSequence(this);
+ } else if (((RelocInfo::IsJSReturn(mode) && IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence())) &&
+ isolate->debug()->has_break_points()) {
+ visitor->VisitDebugTarget(this);
+ } else if (IsRuntimeEntry(mode)) {
+ visitor->VisitRuntimeEntry(this);
+ }
+}
+
+
+template <typename StaticVisitor>
+void RelocInfo::Visit(Heap* heap) {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ StaticVisitor::VisitEmbeddedPointer(heap, this);
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ StaticVisitor::VisitCodeTarget(heap, this);
+ } else if (mode == RelocInfo::CELL) {
+ StaticVisitor::VisitCell(heap, this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ StaticVisitor::VisitExternalReference(this);
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ StaticVisitor::VisitCodeAgeSequence(heap, this);
+ } else if (heap->isolate()->debug()->has_break_points() &&
+ ((RelocInfo::IsJSReturn(mode) && IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()))) {
+ StaticVisitor::VisitDebugTarget(heap, this);
+ } else if (IsRuntimeEntry(mode)) {
+ StaticVisitor::VisitRuntimeEntry(this);
+ }
+}
+
+Operand::Operand(intptr_t immediate, RelocInfo::Mode rmode) {
+ rm_ = no_reg;
+ imm_ = immediate;
+ rmode_ = rmode;
+}
+
+Operand::Operand(const ExternalReference& f) {
+ rm_ = no_reg;
+ imm_ = reinterpret_cast<intptr_t>(f.address());
+ rmode_ = RelocInfo::EXTERNAL_REFERENCE;
+}
+
+Operand::Operand(Smi* value) {
+ rm_ = no_reg;
+ imm_ = reinterpret_cast<intptr_t>(value);
+ rmode_ = kRelocInfo_NONEPTR;
+}
+
+Operand::Operand(Register rm) {
+ rm_ = rm;
+ rmode_ = kRelocInfo_NONEPTR; // PPC -why doesn't ARM do this?
+}
+
+void Assembler::CheckBuffer() {
+ if (buffer_space() <= kGap) {
+ GrowBuffer();
+ }
+}
+
+void Assembler::CheckTrampolinePoolQuick() {
+ if (pc_offset() >= next_buffer_check_) {
+ CheckTrampolinePool();
+ }
+}
+
+void Assembler::emit(Instr x) {
+ CheckBuffer();
+ *reinterpret_cast<Instr*>(pc_) = x;
+ pc_ += kInstrSize;
+ CheckTrampolinePoolQuick();
+}
+
+bool Operand::is_reg() const { return rm_.is_valid(); }
+
+
+// Fetch the 32bit value from the FIXED_SEQUENCE lis/ori
+Address Assembler::target_address_at(Address pc,
+ ConstantPoolArray* constant_pool) {
+ Instr instr1 = instr_at(pc);
+ Instr instr2 = instr_at(pc + kInstrSize);
+ // Interpret 2 instructions generated by lis/ori
+ if (IsLis(instr1) && IsOri(instr2)) {
+#if V8_TARGET_ARCH_PPC64
+ Instr instr4 = instr_at(pc + (3 * kInstrSize));
+ Instr instr5 = instr_at(pc + (4 * kInstrSize));
+ // Assemble the 64 bit value.
+ uint64_t hi = (static_cast<uint32_t>((instr1 & kImm16Mask) << 16) |
+ static_cast<uint32_t>(instr2 & kImm16Mask));
+ uint64_t lo = (static_cast<uint32_t>((instr4 & kImm16Mask) << 16) |
+ static_cast<uint32_t>(instr5 & kImm16Mask));
+ return reinterpret_cast<Address>((hi << 32) | lo);
+#else
+ // Assemble the 32 bit value.
+ return reinterpret_cast<Address>(((instr1 & kImm16Mask) << 16) |
+ (instr2 & kImm16Mask));
+#endif
+ }
+#if V8_OOL_CONSTANT_POOL
+ return Memory::Address_at(target_constant_pool_address_at(pc, constant_pool));
+#else
+ DCHECK(false);
+ return (Address)0;
+#endif
+}
+
+
+#if V8_OOL_CONSTANT_POOL
+bool Assembler::IsConstantPoolLoadStart(Address pc) {
+#if V8_TARGET_ARCH_PPC64
+ if (!IsLi(instr_at(pc))) return false;
+ pc += kInstrSize;
+#endif
+ return GetRA(instr_at(pc)).is(kConstantPoolRegister);
+}
+
+
+bool Assembler::IsConstantPoolLoadEnd(Address pc) {
+#if V8_TARGET_ARCH_PPC64
+ pc -= kInstrSize;
+#endif
+ return IsConstantPoolLoadStart(pc);
+}
+
+
+int Assembler::GetConstantPoolOffset(Address pc) {
+ DCHECK(IsConstantPoolLoadStart(pc));
+ Instr instr = instr_at(pc);
+ int offset = SIGN_EXT_IMM16((instr & kImm16Mask));
+ return offset;
+}
+
+
+void Assembler::SetConstantPoolOffset(Address pc, int offset) {
+ DCHECK(IsConstantPoolLoadStart(pc));
+ DCHECK(is_int16(offset));
+ Instr instr = instr_at(pc);
+ instr &= ~kImm16Mask;
+ instr |= (offset & kImm16Mask);
+ instr_at_put(pc, instr);
+}
+
+
+Address Assembler::target_constant_pool_address_at(
+ Address pc, ConstantPoolArray* constant_pool) {
+ Address addr = reinterpret_cast<Address>(constant_pool);
+ DCHECK(addr);
+ addr += GetConstantPoolOffset(pc);
+ return addr;
+}
+#endif
+
+
+// This sets the branch destination (which gets loaded at the call address).
+// This is for calls and branches within generated code. The serializer
+// has already deserialized the mov instructions etc.
+// There is a FIXED_SEQUENCE assumption here
+void Assembler::deserialization_set_special_target_at(
+ Address instruction_payload, Code* code, Address target) {
+ set_target_address_at(instruction_payload, code, target);
+}
+
+// This code assumes the FIXED_SEQUENCE of lis/ori
+void Assembler::set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target,
+ ICacheFlushMode icache_flush_mode) {
+ Instr instr1 = instr_at(pc);
+ Instr instr2 = instr_at(pc + kInstrSize);
+ // Interpret 2 instructions generated by lis/ori
+ if (IsLis(instr1) && IsOri(instr2)) {
+#if V8_TARGET_ARCH_PPC64
+ Instr instr4 = instr_at(pc + (3 * kInstrSize));
+ Instr instr5 = instr_at(pc + (4 * kInstrSize));
+ // Needs to be fixed up when mov changes to handle 64-bit values.
+ uint32_t* p = reinterpret_cast<uint32_t*>(pc);
+ uintptr_t itarget = reinterpret_cast<uintptr_t>(target);
+
+ instr5 &= ~kImm16Mask;
+ instr5 |= itarget & kImm16Mask;
+ itarget = itarget >> 16;
+
+ instr4 &= ~kImm16Mask;
+ instr4 |= itarget & kImm16Mask;
+ itarget = itarget >> 16;
+
+ instr2 &= ~kImm16Mask;
+ instr2 |= itarget & kImm16Mask;
+ itarget = itarget >> 16;
+
+ instr1 &= ~kImm16Mask;
+ instr1 |= itarget & kImm16Mask;
+ itarget = itarget >> 16;
+
+ *p = instr1;
+ *(p + 1) = instr2;
+ *(p + 3) = instr4;
+ *(p + 4) = instr5;
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ CpuFeatures::FlushICache(p, 5 * kInstrSize);
+ }
+#else
+ uint32_t* p = reinterpret_cast<uint32_t*>(pc);
+ uint32_t itarget = reinterpret_cast<uint32_t>(target);
+ int lo_word = itarget & kImm16Mask;
+ int hi_word = itarget >> 16;
+ instr1 &= ~kImm16Mask;
+ instr1 |= hi_word;
+ instr2 &= ~kImm16Mask;
+ instr2 |= lo_word;
+
+ *p = instr1;
+ *(p + 1) = instr2;
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ CpuFeatures::FlushICache(p, 2 * kInstrSize);
+ }
+#endif
+ } else {
+#if V8_OOL_CONSTANT_POOL
+ Memory::Address_at(target_constant_pool_address_at(pc, constant_pool)) =
+ target;
+#else
+ UNREACHABLE();
+#endif
+ }
+}
+}
+} // namespace v8::internal
+
+#endif // V8_PPC_ASSEMBLER_PPC_INL_H_
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
new file mode 100644
index 0000000000..4b8b165657
--- /dev/null
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -0,0 +1,2493 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2014 the V8 project authors. All rights reserved.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/base/bits.h"
+#include "src/base/cpu.h"
+#include "src/macro-assembler.h"
+#include "src/ppc/assembler-ppc-inl.h"
+#include "src/serialize.h"
+
+namespace v8 {
+namespace internal {
+
+// Get the CPU features enabled by the build.
+static unsigned CpuFeaturesImpliedByCompiler() {
+ unsigned answer = 0;
+ return answer;
+}
+
+
+void CpuFeatures::ProbeImpl(bool cross_compile) {
+ supported_ |= CpuFeaturesImpliedByCompiler();
+ cache_line_size_ = 128;
+
+ // Only use statically determined features for cross compile (snapshot).
+ if (cross_compile) return;
+
+// Detect whether frim instruction is supported (POWER5+)
+// For now we will just check for processors we know do not
+// support it
+#ifndef USE_SIMULATOR
+ // Probe for additional features at runtime.
+ base::CPU cpu;
+#if V8_TARGET_ARCH_PPC64
+ if (cpu.part() == base::CPU::PPC_POWER8) {
+ supported_ |= (1u << FPR_GPR_MOV);
+ }
+#endif
+ if (cpu.part() == base::CPU::PPC_POWER6 ||
+ cpu.part() == base::CPU::PPC_POWER7 ||
+ cpu.part() == base::CPU::PPC_POWER8) {
+ supported_ |= (1u << LWSYNC);
+ }
+#if V8_OS_LINUX
+ if (!(cpu.part() == base::CPU::PPC_G5 || cpu.part() == base::CPU::PPC_G4)) {
+ // Assume support
+ supported_ |= (1u << FPU);
+ }
+ if (cpu.cache_line_size() != 0) {
+ cache_line_size_ = cpu.cache_line_size();
+ }
+#elif V8_OS_AIX
+ // Assume support FP support and default cache line size
+ supported_ |= (1u << FPU);
+#endif
+#else // Simulator
+ supported_ |= (1u << FPU);
+ supported_ |= (1u << LWSYNC);
+#if V8_TARGET_ARCH_PPC64
+ supported_ |= (1u << FPR_GPR_MOV);
+#endif
+#endif
+}
+
+
+void CpuFeatures::PrintTarget() {
+ const char* ppc_arch = NULL;
+
+#if V8_TARGET_ARCH_PPC64
+ ppc_arch = "ppc64";
+#else
+ ppc_arch = "ppc";
+#endif
+
+ printf("target %s\n", ppc_arch);
+}
+
+
+void CpuFeatures::PrintFeatures() {
+ printf("FPU=%d\n", CpuFeatures::IsSupported(FPU));
+}
+
+
+Register ToRegister(int num) {
+ DCHECK(num >= 0 && num < kNumRegisters);
+ const Register kRegisters[] = {r0, sp, r2, r3, r4, r5, r6, r7,
+ r8, r9, r10, r11, ip, r13, r14, r15,
+ r16, r17, r18, r19, r20, r21, r22, r23,
+ r24, r25, r26, r27, r28, r29, r30, fp};
+ return kRegisters[num];
+}
+
+
+const char* DoubleRegister::AllocationIndexToString(int index) {
+ DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
+ const char* const names[] = {
+ "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10",
+ "d11", "d12", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
+ "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
+ return names[index];
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE;
+
+
+bool RelocInfo::IsCodedSpecially() {
+ // The deserializer needs to know whether a pointer is specially
+ // coded. Being specially coded on PPC means that it is a lis/ori
+ // instruction sequence or is an out of line constant pool entry,
+ // and these are always the case inside code objects.
+ return true;
+}
+
+
+bool RelocInfo::IsInConstantPool() {
+#if V8_OOL_CONSTANT_POOL
+ return Assembler::IsConstantPoolLoadStart(pc_);
+#else
+ return false;
+#endif
+}
+
+
+void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
+ // Patch the code at the current address with the supplied instructions.
+ Instr* pc = reinterpret_cast<Instr*>(pc_);
+ Instr* instr = reinterpret_cast<Instr*>(instructions);
+ for (int i = 0; i < instruction_count; i++) {
+ *(pc + i) = *(instr + i);
+ }
+
+ // Indicate that code has changed.
+ CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
+}
+
+
+// Patch the code at the current PC with a call to the target address.
+// Additional guard instructions can be added if required.
+void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
+ // Patch the code at the current address with a call to the target.
+ UNIMPLEMENTED();
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand and MemOperand
+// See assembler-ppc-inl.h for inlined constructors
+
+Operand::Operand(Handle<Object> handle) {
+ AllowDeferredHandleDereference using_raw_address;
+ rm_ = no_reg;
+ // Verify all Objects referred by code are NOT in new space.
+ Object* obj = *handle;
+ if (obj->IsHeapObject()) {
+ DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
+ imm_ = reinterpret_cast<intptr_t>(handle.location());
+ rmode_ = RelocInfo::EMBEDDED_OBJECT;
+ } else {
+ // no relocation needed
+ imm_ = reinterpret_cast<intptr_t>(obj);
+ rmode_ = kRelocInfo_NONEPTR;
+ }
+}
+
+
+MemOperand::MemOperand(Register rn, int32_t offset) {
+ ra_ = rn;
+ rb_ = no_reg;
+ offset_ = offset;
+}
+
+
+MemOperand::MemOperand(Register ra, Register rb) {
+ ra_ = ra;
+ rb_ = rb;
+ offset_ = 0;
+}
+
+
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+
+// Spare buffer.
+static const int kMinimalBufferSize = 4 * KB;
+
+
+Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
+ : AssemblerBase(isolate, buffer, buffer_size),
+ recorded_ast_id_(TypeFeedbackId::None()),
+#if V8_OOL_CONSTANT_POOL
+ constant_pool_builder_(),
+#endif
+ positions_recorder_(this) {
+ reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
+
+ no_trampoline_pool_before_ = 0;
+ trampoline_pool_blocked_nesting_ = 0;
+ // We leave space (kMaxBlockTrampolineSectionSize)
+ // for BlockTrampolinePoolScope buffer.
+ next_buffer_check_ =
+ FLAG_force_long_branches ? kMaxInt : kMaxCondBranchReach -
+ kMaxBlockTrampolineSectionSize;
+ internal_trampoline_exception_ = false;
+ last_bound_pos_ = 0;
+ trampoline_emitted_ = FLAG_force_long_branches;
+ unbound_labels_count_ = 0;
+ ClearRecordedAstId();
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+ // Set up code descriptor.
+ desc->buffer = buffer_;
+ desc->buffer_size = buffer_size_;
+ desc->instr_size = pc_offset();
+ desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ desc->origin = this;
+}
+
+
+void Assembler::Align(int m) {
+#if V8_TARGET_ARCH_PPC64
+ DCHECK(m >= 4 && base::bits::IsPowerOfTwo64(m));
+#else
+ DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
+#endif
+ while ((pc_offset() & (m - 1)) != 0) {
+ nop();
+ }
+}
+
+
+void Assembler::CodeTargetAlign() { Align(8); }
+
+
+Condition Assembler::GetCondition(Instr instr) {
+ switch (instr & kCondMask) {
+ case BT:
+ return eq;
+ case BF:
+ return ne;
+ default:
+ UNIMPLEMENTED();
+ }
+ return al;
+}
+
+
+bool Assembler::IsLis(Instr instr) {
+ return ((instr & kOpcodeMask) == ADDIS) && GetRA(instr).is(r0);
+}
+
+
+bool Assembler::IsLi(Instr instr) {
+ return ((instr & kOpcodeMask) == ADDI) && GetRA(instr).is(r0);
+}
+
+
+bool Assembler::IsAddic(Instr instr) { return (instr & kOpcodeMask) == ADDIC; }
+
+
+bool Assembler::IsOri(Instr instr) { return (instr & kOpcodeMask) == ORI; }
+
+
+bool Assembler::IsBranch(Instr instr) { return ((instr & kOpcodeMask) == BCX); }
+
+
+Register Assembler::GetRA(Instr instr) {
+ Register reg;
+ reg.code_ = Instruction::RAValue(instr);
+ return reg;
+}
+
+
+Register Assembler::GetRB(Instr instr) {
+ Register reg;
+ reg.code_ = Instruction::RBValue(instr);
+ return reg;
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+// This code assumes a FIXED_SEQUENCE for 64bit loads (lis/ori)
+bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3,
+ Instr instr4, Instr instr5) {
+ // Check the instructions are indeed a five part load (into r12)
+ // 3d800000 lis r12, 0
+ // 618c0000 ori r12, r12, 0
+ // 798c07c6 rldicr r12, r12, 32, 31
+ // 658c00c3 oris r12, r12, 195
+ // 618ccd40 ori r12, r12, 52544
+ return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c) &&
+ (instr3 == 0x798c07c6) && ((instr4 >> 16) == 0x658c) &&
+ ((instr5 >> 16) == 0x618c));
+}
+#else
+// This code assumes a FIXED_SEQUENCE for 32bit loads (lis/ori)
+bool Assembler::Is32BitLoadIntoR12(Instr instr1, Instr instr2) {
+ // Check the instruction is indeed a two part load (into r12)
+ // 3d802553 lis r12, 9555
+ // 618c5000 ori r12, r12, 20480
+ return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c));
+}
+#endif
+
+
+bool Assembler::IsCmpRegister(Instr instr) {
+ return (((instr & kOpcodeMask) == EXT2) &&
+ ((instr & kExt2OpcodeMask) == CMP));
+}
+
+
+bool Assembler::IsRlwinm(Instr instr) {
+ return ((instr & kOpcodeMask) == RLWINMX);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+bool Assembler::IsRldicl(Instr instr) {
+ return (((instr & kOpcodeMask) == EXT5) &&
+ ((instr & kExt5OpcodeMask) == RLDICL));
+}
+#endif
+
+
+bool Assembler::IsCmpImmediate(Instr instr) {
+ return ((instr & kOpcodeMask) == CMPI);
+}
+
+
+bool Assembler::IsCrSet(Instr instr) {
+ return (((instr & kOpcodeMask) == EXT1) &&
+ ((instr & kExt1OpcodeMask) == CREQV));
+}
+
+
+Register Assembler::GetCmpImmediateRegister(Instr instr) {
+ DCHECK(IsCmpImmediate(instr));
+ return GetRA(instr);
+}
+
+
+int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
+ DCHECK(IsCmpImmediate(instr));
+ return instr & kOff16Mask;
+}
+
+
+// Labels refer to positions in the (to be) generated code.
+// There are bound, linked, and unused labels.
+//
+// Bound labels refer to known positions in the already
+// generated code. pos() is the position the label refers to.
+//
+// Linked labels refer to unknown positions in the code
+// to be generated; pos() is the position of the last
+// instruction using the label.
+
+
+// The link chain is terminated by a negative code position (must be aligned)
+const int kEndOfChain = -4;
+
+
+int Assembler::target_at(int pos) {
+ Instr instr = instr_at(pos);
+ // check which type of branch this is 16 or 26 bit offset
+ int opcode = instr & kOpcodeMask;
+ if (BX == opcode) {
+ int imm26 = ((instr & kImm26Mask) << 6) >> 6;
+ imm26 &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
+ if (imm26 == 0) return kEndOfChain;
+ return pos + imm26;
+ } else if (BCX == opcode) {
+ int imm16 = SIGN_EXT_IMM16((instr & kImm16Mask));
+ imm16 &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
+ if (imm16 == 0) return kEndOfChain;
+ return pos + imm16;
+ } else if ((instr & ~kImm26Mask) == 0) {
+ // Emitted link to a label, not part of a branch (regexp PushBacktrack).
+ if (instr == 0) {
+ return kEndOfChain;
+ } else {
+ int32_t imm26 = SIGN_EXT_IMM26(instr);
+ return (imm26 + pos);
+ }
+ }
+
+ PPCPORT_UNIMPLEMENTED();
+ DCHECK(false);
+ return -1;
+}
+
+
+void Assembler::target_at_put(int pos, int target_pos) {
+ Instr instr = instr_at(pos);
+ int opcode = instr & kOpcodeMask;
+
+ // check which type of branch this is 16 or 26 bit offset
+ if (BX == opcode) {
+ int imm26 = target_pos - pos;
+ DCHECK((imm26 & (kAAMask | kLKMask)) == 0);
+ instr &= ((~kImm26Mask) | kAAMask | kLKMask);
+ DCHECK(is_int26(imm26));
+ instr_at_put(pos, instr | (imm26 & kImm26Mask));
+ return;
+ } else if (BCX == opcode) {
+ int imm16 = target_pos - pos;
+ DCHECK((imm16 & (kAAMask | kLKMask)) == 0);
+ instr &= ((~kImm16Mask) | kAAMask | kLKMask);
+ DCHECK(is_int16(imm16));
+ instr_at_put(pos, instr | (imm16 & kImm16Mask));
+ return;
+ } else if ((instr & ~kImm26Mask) == 0) {
+ DCHECK(target_pos == kEndOfChain || target_pos >= 0);
+ // Emitted link to a label, not part of a branch (regexp PushBacktrack).
+ // Load the position of the label relative to the generated code object
+ // pointer in a register.
+
+ Register dst = r3; // we assume r3 for now
+ DCHECK(IsNop(instr_at(pos + kInstrSize)));
+ uint32_t target = target_pos + (Code::kHeaderSize - kHeapObjectTag);
+ CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2,
+ CodePatcher::DONT_FLUSH);
+ int target_hi = static_cast<int>(target) >> 16;
+ int target_lo = static_cast<int>(target) & 0XFFFF;
+
+ patcher.masm()->lis(dst, Operand(SIGN_EXT_IMM16(target_hi)));
+ patcher.masm()->ori(dst, dst, Operand(target_lo));
+ return;
+ }
+
+ DCHECK(false);
+}
+
+
+int Assembler::max_reach_from(int pos) {
+ Instr instr = instr_at(pos);
+ int opcode = instr & kOpcodeMask;
+
+ // check which type of branch this is 16 or 26 bit offset
+ if (BX == opcode) {
+ return 26;
+ } else if (BCX == opcode) {
+ return 16;
+ } else if ((instr & ~kImm26Mask) == 0) {
+ // Emitted label constant, not part of a branch (regexp PushBacktrack).
+ return 26;
+ }
+
+ DCHECK(false);
+ return 0;
+}
+
+
+void Assembler::bind_to(Label* L, int pos) {
+ DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position
+ int32_t trampoline_pos = kInvalidSlotPos;
+ if (L->is_linked() && !trampoline_emitted_) {
+ unbound_labels_count_--;
+ next_buffer_check_ += kTrampolineSlotsSize;
+ }
+
+ while (L->is_linked()) {
+ int fixup_pos = L->pos();
+ int32_t offset = pos - fixup_pos;
+ int maxReach = max_reach_from(fixup_pos);
+ next(L); // call next before overwriting link with target at fixup_pos
+ if (is_intn(offset, maxReach) == false) {
+ if (trampoline_pos == kInvalidSlotPos) {
+ trampoline_pos = get_trampoline_entry();
+ CHECK(trampoline_pos != kInvalidSlotPos);
+ target_at_put(trampoline_pos, pos);
+ }
+ target_at_put(fixup_pos, trampoline_pos);
+ } else {
+ target_at_put(fixup_pos, pos);
+ }
+ }
+ L->bind_to(pos);
+
+ // Keep track of the last bound label so we don't eliminate any instructions
+ // before a bound label.
+ if (pos > last_bound_pos_) last_bound_pos_ = pos;
+}
+
+
+void Assembler::bind(Label* L) {
+ DCHECK(!L->is_bound()); // label can only be bound once
+ bind_to(L, pc_offset());
+}
+
+
+void Assembler::next(Label* L) {
+ DCHECK(L->is_linked());
+ int link = target_at(L->pos());
+ if (link == kEndOfChain) {
+ L->Unuse();
+ } else {
+ DCHECK(link >= 0);
+ L->link_to(link);
+ }
+}
+
+
+bool Assembler::is_near(Label* L, Condition cond) {
+ DCHECK(L->is_bound());
+ if (L->is_bound() == false) return false;
+
+ int maxReach = ((cond == al) ? 26 : 16);
+ int offset = L->pos() - pc_offset();
+
+ return is_intn(offset, maxReach);
+}
+
+
+void Assembler::a_form(Instr instr, DoubleRegister frt, DoubleRegister fra,
+ DoubleRegister frb, RCBit r) {
+ emit(instr | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 | r);
+}
+
+
+void Assembler::d_form(Instr instr, Register rt, Register ra,
+ const intptr_t val, bool signed_disp) {
+ if (signed_disp) {
+ if (!is_int16(val)) {
+ PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR "\n", val, val);
+ }
+ DCHECK(is_int16(val));
+ } else {
+ if (!is_uint16(val)) {
+ PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR
+ ", is_unsigned_imm16(val)=%d, kImm16Mask=0x%x\n",
+ val, val, is_uint16(val), kImm16Mask);
+ }
+ DCHECK(is_uint16(val));
+ }
+ emit(instr | rt.code() * B21 | ra.code() * B16 | (kImm16Mask & val));
+}
+
+
+void Assembler::x_form(Instr instr, Register ra, Register rs, Register rb,
+ RCBit r) {
+ emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | r);
+}
+
+
+void Assembler::xo_form(Instr instr, Register rt, Register ra, Register rb,
+ OEBit o, RCBit r) {
+ emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | o | r);
+}
+
+
+void Assembler::md_form(Instr instr, Register ra, Register rs, int shift,
+ int maskbit, RCBit r) {
+ int sh0_4 = shift & 0x1f;
+ int sh5 = (shift >> 5) & 0x1;
+ int m0_4 = maskbit & 0x1f;
+ int m5 = (maskbit >> 5) & 0x1;
+
+ emit(instr | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 | m0_4 * B6 |
+ m5 * B5 | sh5 * B1 | r);
+}
+
+
+void Assembler::mds_form(Instr instr, Register ra, Register rs, Register rb,
+ int maskbit, RCBit r) {
+ int m0_4 = maskbit & 0x1f;
+ int m5 = (maskbit >> 5) & 0x1;
+
+ emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | m0_4 * B6 |
+ m5 * B5 | r);
+}
+
+
+// Returns the next free trampoline entry.
+int32_t Assembler::get_trampoline_entry() {
+ int32_t trampoline_entry = kInvalidSlotPos;
+
+ if (!internal_trampoline_exception_) {
+ trampoline_entry = trampoline_.take_slot();
+
+ if (kInvalidSlotPos == trampoline_entry) {
+ internal_trampoline_exception_ = true;
+ }
+ }
+ return trampoline_entry;
+}
+
+
+int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
+ int target_pos;
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link
+ } else {
+ // was: target_pos = kEndOfChain;
+ // However, using branch to self to mark the first reference
+ // should avoid most instances of branch offset overflow. See
+ // target_at() for where this is converted back to kEndOfChain.
+ target_pos = pc_offset();
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ }
+ L->link_to(pc_offset());
+ }
+
+ return target_pos - pc_offset();
+}
+
+
+// Branch instructions.
+
+
+void Assembler::bclr(BOfield bo, LKBit lk) {
+ positions_recorder()->WriteRecordedPositions();
+ emit(EXT1 | bo | BCLRX | lk);
+}
+
+
+void Assembler::bcctr(BOfield bo, LKBit lk) {
+ positions_recorder()->WriteRecordedPositions();
+ emit(EXT1 | bo | BCCTRX | lk);
+}
+
+
+// Pseudo op - branch to link register
+void Assembler::blr() { bclr(BA, LeaveLK); }
+
+
+// Pseudo op - branch to count register -- used for "jump"
+void Assembler::bctr() { bcctr(BA, LeaveLK); }
+
+
+void Assembler::bctrl() { bcctr(BA, SetLK); }
+
+
+void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) {
+ if (lk == SetLK) {
+ positions_recorder()->WriteRecordedPositions();
+ }
+ DCHECK(is_int16(branch_offset));
+ emit(BCX | bo | condition_bit * B16 | (kImm16Mask & branch_offset) | lk);
+}
+
+
+void Assembler::b(int branch_offset, LKBit lk) {
+ if (lk == SetLK) {
+ positions_recorder()->WriteRecordedPositions();
+ }
+ DCHECK((branch_offset & 3) == 0);
+ int imm26 = branch_offset;
+ DCHECK(is_int26(imm26));
+ // todo add AA and LK bits
+ emit(BX | (imm26 & kImm26Mask) | lk);
+}
+
+
+void Assembler::xori(Register dst, Register src, const Operand& imm) {
+ d_form(XORI, src, dst, imm.imm_, false);
+}
+
+
+void Assembler::xoris(Register ra, Register rs, const Operand& imm) {
+ d_form(XORIS, rs, ra, imm.imm_, false);
+}
+
+
+void Assembler::xor_(Register dst, Register src1, Register src2, RCBit rc) {
+ x_form(EXT2 | XORX, dst, src1, src2, rc);
+}
+
+
+void Assembler::cntlzw_(Register ra, Register rs, RCBit rc) {
+ x_form(EXT2 | CNTLZWX, ra, rs, r0, rc);
+}
+
+
+void Assembler::and_(Register ra, Register rs, Register rb, RCBit rc) {
+ x_form(EXT2 | ANDX, ra, rs, rb, rc);
+}
+
+
+void Assembler::rlwinm(Register ra, Register rs, int sh, int mb, int me,
+ RCBit rc) {
+ sh &= 0x1f;
+ mb &= 0x1f;
+ me &= 0x1f;
+ emit(RLWINMX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
+ me << 1 | rc);
+}
+
+
+void Assembler::rlwnm(Register ra, Register rs, Register rb, int mb, int me,
+ RCBit rc) {
+ mb &= 0x1f;
+ me &= 0x1f;
+ emit(RLWNMX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | mb * B6 |
+ me << 1 | rc);
+}
+
+
+void Assembler::rlwimi(Register ra, Register rs, int sh, int mb, int me,
+ RCBit rc) {
+ sh &= 0x1f;
+ mb &= 0x1f;
+ me &= 0x1f;
+ emit(RLWIMIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
+ me << 1 | rc);
+}
+
+
+void Assembler::slwi(Register dst, Register src, const Operand& val, RCBit rc) {
+ DCHECK((32 > val.imm_) && (val.imm_ >= 0));
+ rlwinm(dst, src, val.imm_, 0, 31 - val.imm_, rc);
+}
+
+
+void Assembler::srwi(Register dst, Register src, const Operand& val, RCBit rc) {
+ DCHECK((32 > val.imm_) && (val.imm_ >= 0));
+ rlwinm(dst, src, 32 - val.imm_, val.imm_, 31, rc);
+}
+
+
+void Assembler::clrrwi(Register dst, Register src, const Operand& val,
+ RCBit rc) {
+ DCHECK((32 > val.imm_) && (val.imm_ >= 0));
+ rlwinm(dst, src, 0, 0, 31 - val.imm_, rc);
+}
+
+
+void Assembler::clrlwi(Register dst, Register src, const Operand& val,
+ RCBit rc) {
+ DCHECK((32 > val.imm_) && (val.imm_ >= 0));
+ rlwinm(dst, src, 0, val.imm_, 31, rc);
+}
+
+
+void Assembler::srawi(Register ra, Register rs, int sh, RCBit r) {
+ emit(EXT2 | SRAWIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | r);
+}
+
+
+void Assembler::srw(Register dst, Register src1, Register src2, RCBit r) {
+ x_form(EXT2 | SRWX, dst, src1, src2, r);
+}
+
+
+void Assembler::slw(Register dst, Register src1, Register src2, RCBit r) {
+ x_form(EXT2 | SLWX, dst, src1, src2, r);
+}
+
+
+void Assembler::sraw(Register ra, Register rs, Register rb, RCBit r) {
+ x_form(EXT2 | SRAW, ra, rs, rb, r);
+}
+
+
+void Assembler::rotlw(Register ra, Register rs, Register rb, RCBit r) {
+ rlwnm(ra, rs, rb, 0, 31, r);
+}
+
+
+void Assembler::rotlwi(Register ra, Register rs, int sh, RCBit r) {
+ rlwinm(ra, rs, sh, 0, 31, r);
+}
+
+
+void Assembler::rotrwi(Register ra, Register rs, int sh, RCBit r) {
+ rlwinm(ra, rs, 32 - sh, 0, 31, r);
+}
+
+
+void Assembler::subi(Register dst, Register src, const Operand& imm) {
+ addi(dst, src, Operand(-(imm.imm_)));
+}
+
+void Assembler::addc(Register dst, Register src1, Register src2, OEBit o,
+ RCBit r) {
+ xo_form(EXT2 | ADDCX, dst, src1, src2, o, r);
+}
+
+
+void Assembler::addze(Register dst, Register src1, OEBit o, RCBit r) {
+ // a special xo_form
+ emit(EXT2 | ADDZEX | dst.code() * B21 | src1.code() * B16 | o | r);
+}
+
+
+void Assembler::sub(Register dst, Register src1, Register src2, OEBit o,
+ RCBit r) {
+ xo_form(EXT2 | SUBFX, dst, src2, src1, o, r);
+}
+
+
+void Assembler::subfc(Register dst, Register src1, Register src2, OEBit o,
+ RCBit r) {
+ xo_form(EXT2 | SUBFCX, dst, src2, src1, o, r);
+}
+
+
+void Assembler::subfic(Register dst, Register src, const Operand& imm) {
+ d_form(SUBFIC, dst, src, imm.imm_, true);
+}
+
+
+void Assembler::add(Register dst, Register src1, Register src2, OEBit o,
+ RCBit r) {
+ xo_form(EXT2 | ADDX, dst, src1, src2, o, r);
+}
+
+
+// Multiply low word
+void Assembler::mullw(Register dst, Register src1, Register src2, OEBit o,
+ RCBit r) {
+ xo_form(EXT2 | MULLW, dst, src1, src2, o, r);
+}
+
+
+// Multiply hi word
+void Assembler::mulhw(Register dst, Register src1, Register src2, OEBit o,
+ RCBit r) {
+ xo_form(EXT2 | MULHWX, dst, src1, src2, o, r);
+}
+
+
+// Divide word
+void Assembler::divw(Register dst, Register src1, Register src2, OEBit o,
+ RCBit r) {
+ xo_form(EXT2 | DIVW, dst, src1, src2, o, r);
+}
+
+
+void Assembler::addi(Register dst, Register src, const Operand& imm) {
+ DCHECK(!src.is(r0)); // use li instead to show intent
+ d_form(ADDI, dst, src, imm.imm_, true);
+}
+
+
+void Assembler::addis(Register dst, Register src, const Operand& imm) {
+ DCHECK(!src.is(r0)); // use lis instead to show intent
+ d_form(ADDIS, dst, src, imm.imm_, true);
+}
+
+
+void Assembler::addic(Register dst, Register src, const Operand& imm) {
+ d_form(ADDIC, dst, src, imm.imm_, true);
+}
+
+
+void Assembler::andi(Register ra, Register rs, const Operand& imm) {
+ d_form(ANDIx, rs, ra, imm.imm_, false);
+}
+
+
+void Assembler::andis(Register ra, Register rs, const Operand& imm) {
+ d_form(ANDISx, rs, ra, imm.imm_, false);
+}
+
+
+void Assembler::nor(Register dst, Register src1, Register src2, RCBit r) {
+ x_form(EXT2 | NORX, dst, src1, src2, r);
+}
+
+
+void Assembler::notx(Register dst, Register src, RCBit r) {
+ x_form(EXT2 | NORX, dst, src, src, r);
+}
+
+
+void Assembler::ori(Register ra, Register rs, const Operand& imm) {
+ d_form(ORI, rs, ra, imm.imm_, false);
+}
+
+
+void Assembler::oris(Register dst, Register src, const Operand& imm) {
+ d_form(ORIS, src, dst, imm.imm_, false);
+}
+
+
+void Assembler::orx(Register dst, Register src1, Register src2, RCBit rc) {
+ x_form(EXT2 | ORX, dst, src1, src2, rc);
+}
+
+
+void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) {
+ intptr_t imm16 = src2.imm_;
+#if V8_TARGET_ARCH_PPC64
+ int L = 1;
+#else
+ int L = 0;
+#endif
+ DCHECK(is_int16(imm16));
+ DCHECK(cr.code() >= 0 && cr.code() <= 7);
+ imm16 &= kImm16Mask;
+ emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
+}
+
+
+void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) {
+ uintptr_t uimm16 = src2.imm_;
+#if V8_TARGET_ARCH_PPC64
+ int L = 1;
+#else
+ int L = 0;
+#endif
+ DCHECK(is_uint16(uimm16));
+ DCHECK(cr.code() >= 0 && cr.code() <= 7);
+ uimm16 &= kImm16Mask;
+ emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
+}
+
+
+void Assembler::cmp(Register src1, Register src2, CRegister cr) {
+#if V8_TARGET_ARCH_PPC64
+ int L = 1;
+#else
+ int L = 0;
+#endif
+ DCHECK(cr.code() >= 0 && cr.code() <= 7);
+ emit(EXT2 | CMP | cr.code() * B23 | L * B21 | src1.code() * B16 |
+ src2.code() * B11);
+}
+
+
+void Assembler::cmpl(Register src1, Register src2, CRegister cr) {
+#if V8_TARGET_ARCH_PPC64
+ int L = 1;
+#else
+ int L = 0;
+#endif
+ DCHECK(cr.code() >= 0 && cr.code() <= 7);
+ emit(EXT2 | CMPL | cr.code() * B23 | L * B21 | src1.code() * B16 |
+ src2.code() * B11);
+}
+
+
+void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) {
+ intptr_t imm16 = src2.imm_;
+ int L = 0;
+ DCHECK(is_int16(imm16));
+ DCHECK(cr.code() >= 0 && cr.code() <= 7);
+ imm16 &= kImm16Mask;
+ emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
+}
+
+
+void Assembler::cmplwi(Register src1, const Operand& src2, CRegister cr) {
+ uintptr_t uimm16 = src2.imm_;
+ int L = 0;
+ DCHECK(is_uint16(uimm16));
+ DCHECK(cr.code() >= 0 && cr.code() <= 7);
+ uimm16 &= kImm16Mask;
+ emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
+}
+
+
+void Assembler::cmpw(Register src1, Register src2, CRegister cr) {
+ int L = 0;
+ DCHECK(cr.code() >= 0 && cr.code() <= 7);
+ emit(EXT2 | CMP | cr.code() * B23 | L * B21 | src1.code() * B16 |
+ src2.code() * B11);
+}
+
+
+void Assembler::cmplw(Register src1, Register src2, CRegister cr) {
+ int L = 0;
+ DCHECK(cr.code() >= 0 && cr.code() <= 7);
+ emit(EXT2 | CMPL | cr.code() * B23 | L * B21 | src1.code() * B16 |
+ src2.code() * B11);
+}
+
+
+// Pseudo op - load immediate
+void Assembler::li(Register dst, const Operand& imm) {
+ d_form(ADDI, dst, r0, imm.imm_, true);
+}
+
+
+void Assembler::lis(Register dst, const Operand& imm) {
+ d_form(ADDIS, dst, r0, imm.imm_, true);
+}
+
+
+// Pseudo op - move register
+void Assembler::mr(Register dst, Register src) {
+ // actually or(dst, src, src)
+ orx(dst, src, src);
+}
+
+
+void Assembler::lbz(Register dst, const MemOperand& src) {
+ DCHECK(!src.ra_.is(r0));
+ d_form(LBZ, dst, src.ra(), src.offset(), true);
+}
+
+
+void Assembler::lbzx(Register rt, const MemOperand& src) {
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ emit(EXT2 | LBZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+ LeaveRC);
+}
+
+
+void Assembler::lbzux(Register rt, const MemOperand& src) {
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ emit(EXT2 | LBZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+ LeaveRC);
+}
+
+
+void Assembler::lhz(Register dst, const MemOperand& src) {
+ DCHECK(!src.ra_.is(r0));
+ d_form(LHZ, dst, src.ra(), src.offset(), true);
+}
+
+
+void Assembler::lhzx(Register rt, const MemOperand& src) {
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ emit(EXT2 | LHZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+ LeaveRC);
+}
+
+
+void Assembler::lhzux(Register rt, const MemOperand& src) {
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ emit(EXT2 | LHZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+ LeaveRC);
+}
+
+
+void Assembler::lwz(Register dst, const MemOperand& src) {
+ DCHECK(!src.ra_.is(r0));
+ d_form(LWZ, dst, src.ra(), src.offset(), true);
+}
+
+
+void Assembler::lwzu(Register dst, const MemOperand& src) {
+ DCHECK(!src.ra_.is(r0));
+ d_form(LWZU, dst, src.ra(), src.offset(), true);
+}
+
+
+void Assembler::lwzx(Register rt, const MemOperand& src) {
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ emit(EXT2 | LWZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+ LeaveRC);
+}
+
+
+void Assembler::lwzux(Register rt, const MemOperand& src) {
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ emit(EXT2 | LWZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+ LeaveRC);
+}
+
+
+void Assembler::lwa(Register dst, const MemOperand& src) {
+#if V8_TARGET_ARCH_PPC64
+ int offset = src.offset();
+ DCHECK(!src.ra_.is(r0));
+ DCHECK(!(offset & 3) && is_int16(offset));
+ offset = kImm16Mask & offset;
+ emit(LD | dst.code() * B21 | src.ra().code() * B16 | offset | 2);
+#else
+ lwz(dst, src);
+#endif
+}
+
+
+void Assembler::stb(Register dst, const MemOperand& src) {
+ DCHECK(!src.ra_.is(r0));
+ d_form(STB, dst, src.ra(), src.offset(), true);
+}
+
+
+void Assembler::stbx(Register rs, const MemOperand& src) {
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ emit(EXT2 | STBX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+ LeaveRC);
+}
+
+
+void Assembler::stbux(Register rs, const MemOperand& src) {
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ emit(EXT2 | STBUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+ LeaveRC);
+}
+
+
+void Assembler::sth(Register dst, const MemOperand& src) {
+ DCHECK(!src.ra_.is(r0));
+ d_form(STH, dst, src.ra(), src.offset(), true);
+}
+
+
+void Assembler::sthx(Register rs, const MemOperand& src) {
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ emit(EXT2 | STHX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+ LeaveRC);
+}
+
+
+void Assembler::sthux(Register rs, const MemOperand& src) {
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ emit(EXT2 | STHUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+ LeaveRC);
+}
+
+
+void Assembler::stw(Register dst, const MemOperand& src) {
+ DCHECK(!src.ra_.is(r0));
+ d_form(STW, dst, src.ra(), src.offset(), true);
+}
+
+
+void Assembler::stwu(Register dst, const MemOperand& src) {
+ DCHECK(!src.ra_.is(r0));
+ d_form(STWU, dst, src.ra(), src.offset(), true);
+}
+
+
+void Assembler::stwx(Register rs, const MemOperand& src) {
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ emit(EXT2 | STWX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+ LeaveRC);
+}
+
+
+void Assembler::stwux(Register rs, const MemOperand& src) {
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ emit(EXT2 | STWUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+ LeaveRC);
+}
+
+
+void Assembler::extsb(Register rs, Register ra, RCBit rc) {
+ emit(EXT2 | EXTSB | ra.code() * B21 | rs.code() * B16 | rc);
+}
+
+
+void Assembler::extsh(Register rs, Register ra, RCBit rc) {
+ emit(EXT2 | EXTSH | ra.code() * B21 | rs.code() * B16 | rc);
+}
+
+
+void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) {
+ emit(EXT2 | NEGX | rt.code() * B21 | ra.code() * B16 | o | r);
+}
+
+
+void Assembler::andc(Register dst, Register src1, Register src2, RCBit rc) {
+ x_form(EXT2 | ANDCX, dst, src1, src2, rc);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+// 64bit specific instructions
+void Assembler::ld(Register rd, const MemOperand& src) {
+ int offset = src.offset();
+ DCHECK(!src.ra_.is(r0));
+ DCHECK(!(offset & 3) && is_int16(offset));
+ offset = kImm16Mask & offset;
+ emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset);
+}
+
+
+void Assembler::ldx(Register rd, const MemOperand& src) {
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ emit(EXT2 | LDX | rd.code() * B21 | ra.code() * B16 | rb.code() * B11);
+}
+
+
+void Assembler::ldu(Register rd, const MemOperand& src) {
+ int offset = src.offset();
+ DCHECK(!src.ra_.is(r0));
+ DCHECK(!(offset & 3) && is_int16(offset));
+ offset = kImm16Mask & offset;
+ emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset | 1);
+}
+
+
+void Assembler::ldux(Register rd, const MemOperand& src) {
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ emit(EXT2 | LDUX | rd.code() * B21 | ra.code() * B16 | rb.code() * B11);
+}
+
+
+void Assembler::std(Register rs, const MemOperand& src) {
+ int offset = src.offset();
+ DCHECK(!src.ra_.is(r0));
+ DCHECK(!(offset & 3) && is_int16(offset));
+ offset = kImm16Mask & offset;
+ emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset);
+}
+
+
+void Assembler::stdx(Register rs, const MemOperand& src) {
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ emit(EXT2 | STDX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11);
+}
+
+
+void Assembler::stdu(Register rs, const MemOperand& src) {
+ int offset = src.offset();
+ DCHECK(!src.ra_.is(r0));
+ DCHECK(!(offset & 3) && is_int16(offset));
+ offset = kImm16Mask & offset;
+ emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset | 1);
+}
+
+
+void Assembler::stdux(Register rs, const MemOperand& src) {
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ emit(EXT2 | STDUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11);
+}
+
+
+void Assembler::rldic(Register ra, Register rs, int sh, int mb, RCBit r) {
+ md_form(EXT5 | RLDIC, ra, rs, sh, mb, r);
+}
+
+
+void Assembler::rldicl(Register ra, Register rs, int sh, int mb, RCBit r) {
+ md_form(EXT5 | RLDICL, ra, rs, sh, mb, r);
+}
+
+
+void Assembler::rldcl(Register ra, Register rs, Register rb, int mb, RCBit r) {
+ mds_form(EXT5 | RLDCL, ra, rs, rb, mb, r);
+}
+
+
+void Assembler::rldicr(Register ra, Register rs, int sh, int me, RCBit r) {
+ md_form(EXT5 | RLDICR, ra, rs, sh, me, r);
+}
+
+
+void Assembler::sldi(Register dst, Register src, const Operand& val, RCBit rc) {
+ DCHECK((64 > val.imm_) && (val.imm_ >= 0));
+ rldicr(dst, src, val.imm_, 63 - val.imm_, rc);
+}
+
+
+void Assembler::srdi(Register dst, Register src, const Operand& val, RCBit rc) {
+ DCHECK((64 > val.imm_) && (val.imm_ >= 0));
+ rldicl(dst, src, 64 - val.imm_, val.imm_, rc);
+}
+
+
+void Assembler::clrrdi(Register dst, Register src, const Operand& val,
+ RCBit rc) {
+ DCHECK((64 > val.imm_) && (val.imm_ >= 0));
+ rldicr(dst, src, 0, 63 - val.imm_, rc);
+}
+
+
+void Assembler::clrldi(Register dst, Register src, const Operand& val,
+ RCBit rc) {
+ DCHECK((64 > val.imm_) && (val.imm_ >= 0));
+ rldicl(dst, src, 0, val.imm_, rc);
+}
+
+
+void Assembler::rldimi(Register ra, Register rs, int sh, int mb, RCBit r) {
+ md_form(EXT5 | RLDIMI, ra, rs, sh, mb, r);
+}
+
+
+void Assembler::sradi(Register ra, Register rs, int sh, RCBit r) {
+ int sh0_4 = sh & 0x1f;
+ int sh5 = (sh >> 5) & 0x1;
+
+ emit(EXT2 | SRADIX | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 |
+ sh5 * B1 | r);
+}
+
+
+void Assembler::srd(Register dst, Register src1, Register src2, RCBit r) {
+ x_form(EXT2 | SRDX, dst, src1, src2, r);
+}
+
+
+void Assembler::sld(Register dst, Register src1, Register src2, RCBit r) {
+ x_form(EXT2 | SLDX, dst, src1, src2, r);
+}
+
+
+void Assembler::srad(Register ra, Register rs, Register rb, RCBit r) {
+ x_form(EXT2 | SRAD, ra, rs, rb, r);
+}
+
+
+void Assembler::rotld(Register ra, Register rs, Register rb, RCBit r) {
+ rldcl(ra, rs, rb, 0, r);
+}
+
+
+void Assembler::rotldi(Register ra, Register rs, int sh, RCBit r) {
+ rldicl(ra, rs, sh, 0, r);
+}
+
+
+void Assembler::rotrdi(Register ra, Register rs, int sh, RCBit r) {
+ rldicl(ra, rs, 64 - sh, 0, r);
+}
+
+
+void Assembler::cntlzd_(Register ra, Register rs, RCBit rc) {
+ x_form(EXT2 | CNTLZDX, ra, rs, r0, rc);
+}
+
+
+void Assembler::extsw(Register rs, Register ra, RCBit rc) {
+ emit(EXT2 | EXTSW | ra.code() * B21 | rs.code() * B16 | rc);
+}
+
+
+void Assembler::mulld(Register dst, Register src1, Register src2, OEBit o,
+ RCBit r) {
+ xo_form(EXT2 | MULLD, dst, src1, src2, o, r);
+}
+
+
+void Assembler::divd(Register dst, Register src1, Register src2, OEBit o,
+ RCBit r) {
+ xo_form(EXT2 | DIVD, dst, src1, src2, o, r);
+}
+#endif
+
+
+void Assembler::fake_asm(enum FAKE_OPCODE_T fopcode) {
+ DCHECK(fopcode < fLastFaker);
+ emit(FAKE_OPCODE | FAKER_SUBOPCODE | fopcode);
+}
+
+
+void Assembler::marker_asm(int mcode) {
+ if (::v8::internal::FLAG_trace_sim_stubs) {
+ DCHECK(mcode < F_NEXT_AVAILABLE_STUB_MARKER);
+ emit(FAKE_OPCODE | MARKER_SUBOPCODE | mcode);
+ }
+}
+
+
+// Function descriptor for AIX.
+// Code address skips the function descriptor "header".
+// TOC and static chain are ignored and set to 0.
+void Assembler::function_descriptor() {
+ DCHECK(pc_offset() == 0);
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ emit_ptr(reinterpret_cast<uintptr_t>(pc_) + 3 * kPointerSize);
+ emit_ptr(0);
+ emit_ptr(0);
+}
+
+
+#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
+void Assembler::RelocateInternalReference(Address pc, intptr_t delta,
+ Address code_start,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(delta || code_start);
+#if ABI_USES_FUNCTION_DESCRIPTORS
+ uintptr_t* fd = reinterpret_cast<uintptr_t*>(pc);
+ if (fd[1] == 0 && fd[2] == 0) {
+ // Function descriptor
+ if (delta) {
+ fd[0] += delta;
+ } else {
+ fd[0] = reinterpret_cast<uintptr_t>(code_start) + 3 * kPointerSize;
+ }
+ return;
+ }
+#endif
+#if V8_OOL_CONSTANT_POOL
+ // mov for LoadConstantPoolPointerRegister
+ ConstantPoolArray* constant_pool = NULL;
+ if (delta) {
+ code_start = target_address_at(pc, constant_pool) + delta;
+ }
+ set_target_address_at(pc, constant_pool, code_start, icache_flush_mode);
+#endif
+}
+
+
+int Assembler::DecodeInternalReference(Vector<char> buffer, Address pc) {
+#if ABI_USES_FUNCTION_DESCRIPTORS
+ uintptr_t* fd = reinterpret_cast<uintptr_t*>(pc);
+ if (fd[1] == 0 && fd[2] == 0) {
+ // Function descriptor
+ SNPrintF(buffer, "[%08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
+ "]"
+ " function descriptor",
+ fd[0], fd[1], fd[2]);
+ return kPointerSize * 3;
+ }
+#endif
+ return 0;
+}
+#endif
+
+
+int Assembler::instructions_required_for_mov(const Operand& x) const {
+#if V8_OOL_CONSTANT_POOL || DEBUG
+ bool canOptimize =
+ !(x.must_output_reloc_info(this) || is_trampoline_pool_blocked());
+#endif
+#if V8_OOL_CONSTANT_POOL
+ if (use_constant_pool_for_mov(x, canOptimize)) {
+ // Current usage guarantees that all constant pool references can
+ // use the same sequence.
+ return kMovInstructionsConstantPool;
+ }
+#endif
+ DCHECK(!canOptimize);
+ return kMovInstructionsNoConstantPool;
+}
+
+
+#if V8_OOL_CONSTANT_POOL
+bool Assembler::use_constant_pool_for_mov(const Operand& x,
+ bool canOptimize) const {
+ if (!is_ool_constant_pool_available() || is_constant_pool_full()) {
+ // If there is no constant pool available, we must use a mov
+ // immediate sequence.
+ return false;
+ }
+
+ intptr_t value = x.immediate();
+ if (canOptimize && is_int16(value)) {
+ // Prefer a single-instruction load-immediate.
+ return false;
+ }
+
+ return true;
+}
+
+
+void Assembler::EnsureSpaceFor(int space_needed) {
+ if (buffer_space() <= (kGap + space_needed)) {
+ GrowBuffer();
+ }
+}
+#endif
+
+
+bool Operand::must_output_reloc_info(const Assembler* assembler) const {
+ if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
+ if (assembler != NULL && assembler->predictable_code_size()) return true;
+ return assembler->serializer_enabled();
+ } else if (RelocInfo::IsNone(rmode_)) {
+ return false;
+ }
+ return true;
+}
+
+
+// Primarily used for loading constants
+// This should really move to be in macro-assembler as it
+// is really a pseudo instruction
+// Some usages of this intend for a FIXED_SEQUENCE to be used
+// Todo - break this dependency so we can optimize mov() in general
+// and only use the generic version when we require a fixed sequence
+void Assembler::mov(Register dst, const Operand& src) {
+ intptr_t value = src.immediate();
+ bool canOptimize;
+ RelocInfo rinfo(pc_, src.rmode_, value, NULL);
+
+ if (src.must_output_reloc_info(this)) {
+ RecordRelocInfo(rinfo);
+ }
+
+ canOptimize =
+ !(src.must_output_reloc_info(this) || is_trampoline_pool_blocked());
+
+#if V8_OOL_CONSTANT_POOL
+ if (use_constant_pool_for_mov(src, canOptimize)) {
+ DCHECK(is_ool_constant_pool_available());
+ ConstantPoolAddEntry(rinfo);
+#if V8_TARGET_ARCH_PPC64
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ // We are forced to use 2 instruction sequence since the constant
+ // pool pointer is tagged.
+ li(dst, Operand::Zero());
+ ldx(dst, MemOperand(kConstantPoolRegister, dst));
+#else
+ lwz(dst, MemOperand(kConstantPoolRegister, 0));
+#endif
+ return;
+ }
+#endif
+
+ if (canOptimize) {
+ if (is_int16(value)) {
+ li(dst, Operand(value));
+ } else {
+ uint16_t u16;
+#if V8_TARGET_ARCH_PPC64
+ if (is_int32(value)) {
+#endif
+ lis(dst, Operand(value >> 16));
+#if V8_TARGET_ARCH_PPC64
+ } else {
+ if (is_int48(value)) {
+ li(dst, Operand(value >> 32));
+ } else {
+ lis(dst, Operand(value >> 48));
+ u16 = ((value >> 32) & 0xffff);
+ if (u16) {
+ ori(dst, dst, Operand(u16));
+ }
+ }
+ sldi(dst, dst, Operand(32));
+ u16 = ((value >> 16) & 0xffff);
+ if (u16) {
+ oris(dst, dst, Operand(u16));
+ }
+ }
+#endif
+ u16 = (value & 0xffff);
+ if (u16) {
+ ori(dst, dst, Operand(u16));
+ }
+ }
+ return;
+ }
+
+ DCHECK(!canOptimize);
+
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+#if V8_TARGET_ARCH_PPC64
+ int32_t hi_32 = static_cast<int32_t>(value >> 32);
+ int32_t lo_32 = static_cast<int32_t>(value);
+ int hi_word = static_cast<int>(hi_32 >> 16);
+ int lo_word = static_cast<int>(hi_32 & 0xffff);
+ lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
+ ori(dst, dst, Operand(lo_word));
+ sldi(dst, dst, Operand(32));
+ hi_word = static_cast<int>(((lo_32 >> 16) & 0xffff));
+ lo_word = static_cast<int>(lo_32 & 0xffff);
+ oris(dst, dst, Operand(hi_word));
+ ori(dst, dst, Operand(lo_word));
+#else
+ int hi_word = static_cast<int>(value >> 16);
+ int lo_word = static_cast<int>(value & 0xffff);
+ lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
+ ori(dst, dst, Operand(lo_word));
+#endif
+ }
+}
+
+
+void Assembler::mov_label_offset(Register dst, Label* label) {
+ if (label->is_bound()) {
+ int target = label->pos();
+ mov(dst, Operand(target + Code::kHeaderSize - kHeapObjectTag));
+ } else {
+ bool is_linked = label->is_linked();
+ // Emit the link to the label in the code stream followed by extra
+ // nop instructions.
+ DCHECK(dst.is(r3)); // target_at_put assumes r3 for now
+ int link = is_linked ? label->pos() - pc_offset() : 0;
+ label->link_to(pc_offset());
+
+ if (!is_linked && !trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+
+ // When the label is bound, these instructions will be patched
+ // with a 2 instruction mov sequence that will load the
+ // destination register with the position of the label from the
+ // beginning of the code.
+ //
+ // When the label gets bound: target_at extracts the link and
+ // target_at_put patches the instructions.
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ emit(link);
+ nop();
+ }
+}
+
+
+// Special register instructions
+void Assembler::crxor(int bt, int ba, int bb) {
+ emit(EXT1 | CRXOR | bt * B21 | ba * B16 | bb * B11);
+}
+
+
+void Assembler::creqv(int bt, int ba, int bb) {
+ emit(EXT1 | CREQV | bt * B21 | ba * B16 | bb * B11);
+}
+
+
+void Assembler::mflr(Register dst) {
+ emit(EXT2 | MFSPR | dst.code() * B21 | 256 << 11); // Ignore RC bit
+}
+
+
+void Assembler::mtlr(Register src) {
+ emit(EXT2 | MTSPR | src.code() * B21 | 256 << 11); // Ignore RC bit
+}
+
+
+void Assembler::mtctr(Register src) {
+ emit(EXT2 | MTSPR | src.code() * B21 | 288 << 11); // Ignore RC bit
+}
+
+
+void Assembler::mtxer(Register src) {
+ emit(EXT2 | MTSPR | src.code() * B21 | 32 << 11);
+}
+
+
+void Assembler::mcrfs(int bf, int bfa) {
+ emit(EXT4 | MCRFS | bf * B23 | bfa * B18);
+}
+
+
+void Assembler::mfcr(Register dst) { emit(EXT2 | MFCR | dst.code() * B21); }
+
+
+#if V8_TARGET_ARCH_PPC64
+void Assembler::mffprd(Register dst, DoubleRegister src) {
+ emit(EXT2 | MFVSRD | src.code() * B21 | dst.code() * B16);
+}
+
+
+void Assembler::mffprwz(Register dst, DoubleRegister src) {
+ emit(EXT2 | MFVSRWZ | src.code() * B21 | dst.code() * B16);
+}
+
+
+void Assembler::mtfprd(DoubleRegister dst, Register src) {
+ emit(EXT2 | MTVSRD | dst.code() * B21 | src.code() * B16);
+}
+
+
+void Assembler::mtfprwz(DoubleRegister dst, Register src) {
+ emit(EXT2 | MTVSRWZ | dst.code() * B21 | src.code() * B16);
+}
+
+
+void Assembler::mtfprwa(DoubleRegister dst, Register src) {
+ emit(EXT2 | MTVSRWA | dst.code() * B21 | src.code() * B16);
+}
+#endif
+
+
+// Exception-generating instructions and debugging support.
+// Stops with a non-negative code less than kNumOfWatchedStops support
+// enabling/disabling and a counter feature. See simulator-ppc.h .
+void Assembler::stop(const char* msg, Condition cond, int32_t code,
+ CRegister cr) {
+ if (cond != al) {
+ Label skip;
+ b(NegateCondition(cond), &skip, cr);
+ bkpt(0);
+ bind(&skip);
+ } else {
+ bkpt(0);
+ }
+}
+
+
+void Assembler::bkpt(uint32_t imm16) { emit(0x7d821008); }
+
+
+void Assembler::info(const char* msg, Condition cond, int32_t code,
+ CRegister cr) {
+ if (::v8::internal::FLAG_trace_sim_stubs) {
+ emit(0x7d9ff808);
+#if V8_TARGET_ARCH_PPC64
+ uint64_t value = reinterpret_cast<uint64_t>(msg);
+ emit(static_cast<uint32_t>(value >> 32));
+ emit(static_cast<uint32_t>(value & 0xFFFFFFFF));
+#else
+ emit(reinterpret_cast<Instr>(msg));
+#endif
+ }
+}
+
+
+void Assembler::dcbf(Register ra, Register rb) {
+ emit(EXT2 | DCBF | ra.code() * B16 | rb.code() * B11);
+}
+
+
+void Assembler::sync() { emit(EXT2 | SYNC); }
+
+
+void Assembler::lwsync() { emit(EXT2 | SYNC | 1 * B21); }
+
+
+void Assembler::icbi(Register ra, Register rb) {
+ emit(EXT2 | ICBI | ra.code() * B16 | rb.code() * B11);
+}
+
+
+void Assembler::isync() { emit(EXT1 | ISYNC); }
+
+
+// Floating point support
+
+void Assembler::lfd(const DoubleRegister frt, const MemOperand& src) {
+ int offset = src.offset();
+ Register ra = src.ra();
+ DCHECK(is_int16(offset));
+ int imm16 = offset & kImm16Mask;
+ // could be x_form instruction with some casting magic
+ emit(LFD | frt.code() * B21 | ra.code() * B16 | imm16);
+}
+
+
+void Assembler::lfdu(const DoubleRegister frt, const MemOperand& src) {
+ int offset = src.offset();
+ Register ra = src.ra();
+ DCHECK(is_int16(offset));
+ int imm16 = offset & kImm16Mask;
+ // could be x_form instruction with some casting magic
+ emit(LFDU | frt.code() * B21 | ra.code() * B16 | imm16);
+}
+
+
+void Assembler::lfdx(const DoubleRegister frt, const MemOperand& src) {
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ emit(EXT2 | LFDX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+ LeaveRC);
+}
+
+
+void Assembler::lfdux(const DoubleRegister frt, const MemOperand& src) {
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ emit(EXT2 | LFDUX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+ LeaveRC);
+}
+
+
+void Assembler::lfs(const DoubleRegister frt, const MemOperand& src) {
+ int offset = src.offset();
+ Register ra = src.ra();
+ DCHECK(is_int16(offset));
+ DCHECK(!ra.is(r0));
+ int imm16 = offset & kImm16Mask;
+ // could be x_form instruction with some casting magic
+ emit(LFS | frt.code() * B21 | ra.code() * B16 | imm16);
+}
+
+
+void Assembler::lfsu(const DoubleRegister frt, const MemOperand& src) {
+ int offset = src.offset();
+ Register ra = src.ra();
+ DCHECK(is_int16(offset));
+ DCHECK(!ra.is(r0));
+ int imm16 = offset & kImm16Mask;
+ // could be x_form instruction with some casting magic
+ emit(LFSU | frt.code() * B21 | ra.code() * B16 | imm16);
+}
+
+
+void Assembler::lfsx(const DoubleRegister frt, const MemOperand& src) {
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ emit(EXT2 | LFSX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+ LeaveRC);
+}
+
+
+void Assembler::lfsux(const DoubleRegister frt, const MemOperand& src) {
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ emit(EXT2 | LFSUX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+ LeaveRC);
+}
+
+
+void Assembler::stfd(const DoubleRegister frs, const MemOperand& src) {
+ int offset = src.offset();
+ Register ra = src.ra();
+ DCHECK(is_int16(offset));
+ DCHECK(!ra.is(r0));
+ int imm16 = offset & kImm16Mask;
+ // could be x_form instruction with some casting magic
+ emit(STFD | frs.code() * B21 | ra.code() * B16 | imm16);
+}
+
+
+void Assembler::stfdu(const DoubleRegister frs, const MemOperand& src) {
+ int offset = src.offset();
+ Register ra = src.ra();
+ DCHECK(is_int16(offset));
+ DCHECK(!ra.is(r0));
+ int imm16 = offset & kImm16Mask;
+ // could be x_form instruction with some casting magic
+ emit(STFDU | frs.code() * B21 | ra.code() * B16 | imm16);
+}
+
+
+void Assembler::stfdx(const DoubleRegister frs, const MemOperand& src) {
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ emit(EXT2 | STFDX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+ LeaveRC);
+}
+
+
+void Assembler::stfdux(const DoubleRegister frs, const MemOperand& src) {
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ emit(EXT2 | STFDUX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+ LeaveRC);
+}
+
+
+void Assembler::stfs(const DoubleRegister frs, const MemOperand& src) {
+ int offset = src.offset();
+ Register ra = src.ra();
+ DCHECK(is_int16(offset));
+ DCHECK(!ra.is(r0));
+ int imm16 = offset & kImm16Mask;
+ // could be x_form instruction with some casting magic
+ emit(STFS | frs.code() * B21 | ra.code() * B16 | imm16);
+}
+
+
+void Assembler::stfsu(const DoubleRegister frs, const MemOperand& src) {
+ int offset = src.offset();
+ Register ra = src.ra();
+ DCHECK(is_int16(offset));
+ DCHECK(!ra.is(r0));
+ int imm16 = offset & kImm16Mask;
+ // could be x_form instruction with some casting magic
+ emit(STFSU | frs.code() * B21 | ra.code() * B16 | imm16);
+}
+
+
+void Assembler::stfsx(const DoubleRegister frs, const MemOperand& src) {
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ emit(EXT2 | STFSX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+ LeaveRC);
+}
+
+
+void Assembler::stfsux(const DoubleRegister frs, const MemOperand& src) {
+ Register ra = src.ra();
+ Register rb = src.rb();
+ DCHECK(!ra.is(r0));
+ emit(EXT2 | STFSUX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+ LeaveRC);
+}
+
+
+void Assembler::fsub(const DoubleRegister frt, const DoubleRegister fra,
+ const DoubleRegister frb, RCBit rc) {
+ a_form(EXT4 | FSUB, frt, fra, frb, rc);
+}
+
+
+void Assembler::fadd(const DoubleRegister frt, const DoubleRegister fra,
+ const DoubleRegister frb, RCBit rc) {
+ a_form(EXT4 | FADD, frt, fra, frb, rc);
+}
+
+
+void Assembler::fmul(const DoubleRegister frt, const DoubleRegister fra,
+ const DoubleRegister frc, RCBit rc) {
+ emit(EXT4 | FMUL | frt.code() * B21 | fra.code() * B16 | frc.code() * B6 |
+ rc);
+}
+
+
+void Assembler::fdiv(const DoubleRegister frt, const DoubleRegister fra,
+ const DoubleRegister frb, RCBit rc) {
+ a_form(EXT4 | FDIV, frt, fra, frb, rc);
+}
+
+
+void Assembler::fcmpu(const DoubleRegister fra, const DoubleRegister frb,
+ CRegister cr) {
+ DCHECK(cr.code() >= 0 && cr.code() <= 7);
+ emit(EXT4 | FCMPU | cr.code() * B23 | fra.code() * B16 | frb.code() * B11);
+}
+
+
+void Assembler::fmr(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT4 | FMR | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::fctiwz(const DoubleRegister frt, const DoubleRegister frb) {
+ emit(EXT4 | FCTIWZ | frt.code() * B21 | frb.code() * B11);
+}
+
+
+void Assembler::fctiw(const DoubleRegister frt, const DoubleRegister frb) {
+ emit(EXT4 | FCTIW | frt.code() * B21 | frb.code() * B11);
+}
+
+
+void Assembler::frim(const DoubleRegister frt, const DoubleRegister frb) {
+ emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11);
+}
+
+
+void Assembler::frsp(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT4 | FRSP | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::fcfid(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT4 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::fctid(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT4 | FCTID | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::fctidz(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT4 | FCTIDZ | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::fsel(const DoubleRegister frt, const DoubleRegister fra,
+ const DoubleRegister frc, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT4 | FSEL | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
+ frc.code() * B6 | rc);
+}
+
+
+void Assembler::fneg(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT4 | FNEG | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::mtfsfi(int bf, int immediate, RCBit rc) {
+ emit(EXT4 | MTFSFI | bf * B23 | immediate * B12 | rc);
+}
+
+
+void Assembler::mffs(const DoubleRegister frt, RCBit rc) {
+ emit(EXT4 | MFFS | frt.code() * B21 | rc);
+}
+
+
+void Assembler::mtfsf(const DoubleRegister frb, bool L, int FLM, bool W,
+ RCBit rc) {
+ emit(EXT4 | MTFSF | frb.code() * B11 | W * B16 | FLM * B17 | L * B25 | rc);
+}
+
+
+void Assembler::fsqrt(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT4 | FSQRT | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::fabs(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT4 | FABS | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::fmadd(const DoubleRegister frt, const DoubleRegister fra,
+ const DoubleRegister frc, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT4 | FMADD | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
+ frc.code() * B6 | rc);
+}
+
+
+void Assembler::fmsub(const DoubleRegister frt, const DoubleRegister fra,
+ const DoubleRegister frc, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT4 | FMSUB | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
+ frc.code() * B6 | rc);
+}
+
+
+// Pseudo instructions.
+void Assembler::nop(int type) {
+ Register reg = r0;
+ switch (type) {
+ case NON_MARKING_NOP:
+ reg = r0;
+ break;
+ case GROUP_ENDING_NOP:
+ reg = r2;
+ break;
+ case DEBUG_BREAK_NOP:
+ reg = r3;
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+
+ ori(reg, reg, Operand::Zero());
+}
+
+
+bool Assembler::IsNop(Instr instr, int type) {
+ int reg = 0;
+ switch (type) {
+ case NON_MARKING_NOP:
+ reg = 0;
+ break;
+ case GROUP_ENDING_NOP:
+ reg = 2;
+ break;
+ case DEBUG_BREAK_NOP:
+ reg = 3;
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ return instr == (ORI | reg * B21 | reg * B16);
+}
+
+
+// Debugging.
+void Assembler::RecordJSReturn() {
+ positions_recorder()->WriteRecordedPositions();
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::JS_RETURN);
+}
+
+
+void Assembler::RecordDebugBreakSlot() {
+ positions_recorder()->WriteRecordedPositions();
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
+}
+
+
+void Assembler::RecordComment(const char* msg) {
+ if (FLAG_code_comments) {
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
+ }
+}
+
+
+void Assembler::GrowBuffer() {
+ if (!own_buffer_) FATAL("external code buffer is too small");
+
+ // Compute new buffer size.
+ CodeDesc desc; // the new buffer
+ if (buffer_size_ < 4 * KB) {
+ desc.buffer_size = 4 * KB;
+ } else if (buffer_size_ < 1 * MB) {
+ desc.buffer_size = 2 * buffer_size_;
+ } else {
+ desc.buffer_size = buffer_size_ + 1 * MB;
+ }
+ CHECK_GT(desc.buffer_size, 0); // no overflow
+
+ // Set up new buffer.
+ desc.buffer = NewArray<byte>(desc.buffer_size);
+
+ desc.instr_size = pc_offset();
+ desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+
+ // Copy the data.
+ intptr_t pc_delta = desc.buffer - buffer_;
+ intptr_t rc_delta =
+ (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
+ memmove(desc.buffer, buffer_, desc.instr_size);
+ memmove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
+ desc.reloc_size);
+
+ // Switch buffers.
+ DeleteArray(buffer_);
+ buffer_ = desc.buffer;
+ buffer_size_ = desc.buffer_size;
+ pc_ += pc_delta;
+ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.last_pc() + pc_delta);
+
+// None of our relocation types are pc relative pointing outside the code
+// buffer nor pc absolute pointing inside the code buffer, so there is no need
+// to relocate any emitted relocation entries.
+
+#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
+ // Relocate runtime entries.
+ for (RelocIterator it(desc); !it.done(); it.next()) {
+ RelocInfo::Mode rmode = it.rinfo()->rmode();
+ if (rmode == RelocInfo::INTERNAL_REFERENCE) {
+ RelocateInternalReference(it.rinfo()->pc(), pc_delta, 0);
+ }
+ }
+#if V8_OOL_CONSTANT_POOL
+ constant_pool_builder_.Relocate(pc_delta);
+#endif
+#endif
+}
+
+
+void Assembler::db(uint8_t data) {
+ CheckBuffer();
+ *reinterpret_cast<uint8_t*>(pc_) = data;
+ pc_ += sizeof(uint8_t);
+}
+
+
+void Assembler::dd(uint32_t data) {
+ CheckBuffer();
+ *reinterpret_cast<uint32_t*>(pc_) = data;
+ pc_ += sizeof(uint32_t);
+}
+
+
+void Assembler::emit_ptr(uintptr_t data) {
+ CheckBuffer();
+ *reinterpret_cast<uintptr_t*>(pc_) = data;
+ pc_ += sizeof(uintptr_t);
+}
+
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ RelocInfo rinfo(pc_, rmode, data, NULL);
+ RecordRelocInfo(rinfo);
+}
+
+
+void Assembler::RecordRelocInfo(const RelocInfo& rinfo) {
+ if (rinfo.rmode() >= RelocInfo::JS_RETURN &&
+ rinfo.rmode() <= RelocInfo::DEBUG_BREAK_SLOT) {
+ // Adjust code for new modes.
+ DCHECK(RelocInfo::IsDebugBreakSlot(rinfo.rmode()) ||
+ RelocInfo::IsJSReturn(rinfo.rmode()) ||
+ RelocInfo::IsComment(rinfo.rmode()) ||
+ RelocInfo::IsPosition(rinfo.rmode()));
+ }
+ if (!RelocInfo::IsNone(rinfo.rmode())) {
+ // Don't record external references unless the heap will be serialized.
+ if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE) {
+ if (!serializer_enabled() && !emit_debug_code()) {
+ return;
+ }
+ }
+ DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
+ if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
+ RelocInfo reloc_info_with_ast_id(rinfo.pc(), rinfo.rmode(),
+ RecordedAstId().ToInt(), NULL);
+ ClearRecordedAstId();
+ reloc_info_writer.Write(&reloc_info_with_ast_id);
+ } else {
+ reloc_info_writer.Write(&rinfo);
+ }
+ }
+}
+
+
+void Assembler::BlockTrampolinePoolFor(int instructions) {
+ BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
+}
+
+
+void Assembler::CheckTrampolinePool() {
+ // Some small sequences of instructions must not be broken up by the
+ // insertion of a trampoline pool; such sequences are protected by setting
+ // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
+ // which are both checked here. Also, recursive calls to CheckTrampolinePool
+ // are blocked by trampoline_pool_blocked_nesting_.
+ if ((trampoline_pool_blocked_nesting_ > 0) ||
+ (pc_offset() < no_trampoline_pool_before_)) {
+ // Emission is currently blocked; make sure we try again as soon as
+ // possible.
+ if (trampoline_pool_blocked_nesting_ > 0) {
+ next_buffer_check_ = pc_offset() + kInstrSize;
+ } else {
+ next_buffer_check_ = no_trampoline_pool_before_;
+ }
+ return;
+ }
+
+ DCHECK(!trampoline_emitted_);
+ DCHECK(unbound_labels_count_ >= 0);
+ if (unbound_labels_count_ > 0) {
+ // First we emit jump, then we emit trampoline pool.
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label after_pool;
+ b(&after_pool);
+
+ int pool_start = pc_offset();
+ for (int i = 0; i < unbound_labels_count_; i++) {
+ b(&after_pool);
+ }
+ bind(&after_pool);
+ trampoline_ = Trampoline(pool_start, unbound_labels_count_);
+
+ trampoline_emitted_ = true;
+ // As we are only going to emit trampoline once, we need to prevent any
+ // further emission.
+ next_buffer_check_ = kMaxInt;
+ }
+ } else {
+ // Number of branches to unbound label at this point is zero, so we can
+ // move next buffer check to maximum.
+ next_buffer_check_ =
+ pc_offset() + kMaxCondBranchReach - kMaxBlockTrampolineSectionSize;
+ }
+ return;
+}
+
+
+Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
+#if V8_OOL_CONSTANT_POOL
+ return constant_pool_builder_.New(isolate);
+#else
+ // No out-of-line constant pool support.
+ DCHECK(!FLAG_enable_ool_constant_pool);
+ return isolate->factory()->empty_constant_pool_array();
+#endif
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+#if V8_OOL_CONSTANT_POOL
+ constant_pool_builder_.Populate(this, constant_pool);
+#else
+ // No out-of-line constant pool support.
+ DCHECK(!FLAG_enable_ool_constant_pool);
+#endif
+}
+
+
+#if V8_OOL_CONSTANT_POOL
+ConstantPoolBuilder::ConstantPoolBuilder()
+ : size_(0),
+ entries_(),
+ current_section_(ConstantPoolArray::SMALL_SECTION) {}
+
+
+bool ConstantPoolBuilder::IsEmpty() { return entries_.size() == 0; }
+
+
+ConstantPoolArray::Type ConstantPoolBuilder::GetConstantPoolType(
+ RelocInfo::Mode rmode) {
+#if V8_TARGET_ARCH_PPC64
+ // We don't support 32-bit entries at this time.
+ if (!RelocInfo::IsGCRelocMode(rmode)) {
+ return ConstantPoolArray::INT64;
+#else
+ if (rmode == RelocInfo::NONE64) {
+ return ConstantPoolArray::INT64;
+ } else if (!RelocInfo::IsGCRelocMode(rmode)) {
+ return ConstantPoolArray::INT32;
+#endif
+ } else if (RelocInfo::IsCodeTarget(rmode)) {
+ return ConstantPoolArray::CODE_PTR;
+ } else {
+ DCHECK(RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode));
+ return ConstantPoolArray::HEAP_PTR;
+ }
+}
+
+
+ConstantPoolArray::LayoutSection ConstantPoolBuilder::AddEntry(
+ Assembler* assm, const RelocInfo& rinfo) {
+ RelocInfo::Mode rmode = rinfo.rmode();
+ DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::POSITION &&
+ rmode != RelocInfo::STATEMENT_POSITION &&
+ rmode != RelocInfo::CONST_POOL);
+
+ // Try to merge entries which won't be patched.
+ int merged_index = -1;
+ ConstantPoolArray::LayoutSection entry_section = current_section_;
+ if (RelocInfo::IsNone(rmode) ||
+ (!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) {
+ size_t i;
+ std::vector<ConstantPoolEntry>::const_iterator it;
+ for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
+ if (RelocInfo::IsEqual(rinfo, it->rinfo_)) {
+ // Merge with found entry.
+ merged_index = i;
+ entry_section = entries_[i].section_;
+ break;
+ }
+ }
+ }
+ DCHECK(entry_section <= current_section_);
+ entries_.push_back(ConstantPoolEntry(rinfo, entry_section, merged_index));
+
+ if (merged_index == -1) {
+ // Not merged, so update the appropriate count.
+ number_of_entries_[entry_section].increment(GetConstantPoolType(rmode));
+ }
+
+ // Check if we still have room for another entry in the small section
+ // given the limitations of the header's layout fields.
+ if (current_section_ == ConstantPoolArray::SMALL_SECTION) {
+ size_ = ConstantPoolArray::SizeFor(*small_entries());
+ if (!is_uint12(size_)) {
+ current_section_ = ConstantPoolArray::EXTENDED_SECTION;
+ }
+ } else {
+ size_ = ConstantPoolArray::SizeForExtended(*small_entries(),
+ *extended_entries());
+ }
+
+ return entry_section;
+}
+
+
+void ConstantPoolBuilder::Relocate(intptr_t pc_delta) {
+ for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
+ entry != entries_.end(); entry++) {
+ DCHECK(entry->rinfo_.rmode() != RelocInfo::JS_RETURN);
+ entry->rinfo_.set_pc(entry->rinfo_.pc() + pc_delta);
+ }
+}
+
+
+Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) {
+ if (IsEmpty()) {
+ return isolate->factory()->empty_constant_pool_array();
+ } else if (extended_entries()->is_empty()) {
+ return isolate->factory()->NewConstantPoolArray(*small_entries());
+ } else {
+ DCHECK(current_section_ == ConstantPoolArray::EXTENDED_SECTION);
+ return isolate->factory()->NewExtendedConstantPoolArray(
+ *small_entries(), *extended_entries());
+ }
+}
+
+
+void ConstantPoolBuilder::Populate(Assembler* assm,
+ ConstantPoolArray* constant_pool) {
+ DCHECK_EQ(extended_entries()->is_empty(),
+ !constant_pool->is_extended_layout());
+ DCHECK(small_entries()->equals(ConstantPoolArray::NumberOfEntries(
+ constant_pool, ConstantPoolArray::SMALL_SECTION)));
+ if (constant_pool->is_extended_layout()) {
+ DCHECK(extended_entries()->equals(ConstantPoolArray::NumberOfEntries(
+ constant_pool, ConstantPoolArray::EXTENDED_SECTION)));
+ }
+
+ // Set up initial offsets.
+ int offsets[ConstantPoolArray::NUMBER_OF_LAYOUT_SECTIONS]
+ [ConstantPoolArray::NUMBER_OF_TYPES];
+ for (int section = 0; section <= constant_pool->final_section(); section++) {
+ int section_start = (section == ConstantPoolArray::EXTENDED_SECTION)
+ ? small_entries()->total_count()
+ : 0;
+ for (int i = 0; i < ConstantPoolArray::NUMBER_OF_TYPES; i++) {
+ ConstantPoolArray::Type type = static_cast<ConstantPoolArray::Type>(i);
+ if (number_of_entries_[section].count_of(type) != 0) {
+ offsets[section][type] = constant_pool->OffsetOfElementAt(
+ number_of_entries_[section].base_of(type) + section_start);
+ }
+ }
+ }
+
+ for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
+ entry != entries_.end(); entry++) {
+ RelocInfo rinfo = entry->rinfo_;
+ RelocInfo::Mode rmode = entry->rinfo_.rmode();
+ ConstantPoolArray::Type type = GetConstantPoolType(rmode);
+
+ // Update constant pool if necessary and get the entry's offset.
+ int offset;
+ if (entry->merged_index_ == -1) {
+ offset = offsets[entry->section_][type];
+ offsets[entry->section_][type] += ConstantPoolArray::entry_size(type);
+ if (type == ConstantPoolArray::INT64) {
+#if V8_TARGET_ARCH_PPC64
+ constant_pool->set_at_offset(offset, rinfo.data());
+#else
+ constant_pool->set_at_offset(offset, rinfo.data64());
+ } else if (type == ConstantPoolArray::INT32) {
+ constant_pool->set_at_offset(offset,
+ static_cast<int32_t>(rinfo.data()));
+#endif
+ } else if (type == ConstantPoolArray::CODE_PTR) {
+ constant_pool->set_at_offset(offset,
+ reinterpret_cast<Address>(rinfo.data()));
+ } else {
+ DCHECK(type == ConstantPoolArray::HEAP_PTR);
+ constant_pool->set_at_offset(offset,
+ reinterpret_cast<Object*>(rinfo.data()));
+ }
+ offset -= kHeapObjectTag;
+ entry->merged_index_ = offset; // Stash offset for merged entries.
+ } else {
+ DCHECK(entry->merged_index_ < (entry - entries_.begin()));
+ offset = entries_[entry->merged_index_].merged_index_;
+ }
+
+ // Patch load instruction with correct offset.
+ Assembler::SetConstantPoolOffset(rinfo.pc(), offset);
+ }
+}
+#endif
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
new file mode 100644
index 0000000000..2b112d6ca5
--- /dev/null
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -0,0 +1,1493 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2014 the V8 project authors. All rights reserved.
+
+// A light-weight PPC Assembler
+// Generates user mode instructions for the PPC architecture up
+
+#ifndef V8_PPC_ASSEMBLER_PPC_H_
+#define V8_PPC_ASSEMBLER_PPC_H_
+
+#include <stdio.h>
+#include <vector>
+
+#include "src/assembler.h"
+#include "src/ppc/constants-ppc.h"
+#include "src/serialize.h"
+
+#define ABI_USES_FUNCTION_DESCRIPTORS \
+ (V8_HOST_ARCH_PPC && (V8_OS_AIX || \
+ (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN)))
+
+#define ABI_PASSES_HANDLES_IN_REGS \
+ (!V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64)
+
+#define ABI_RETURNS_HANDLES_IN_REGS \
+ (!V8_HOST_ARCH_PPC || V8_TARGET_LITTLE_ENDIAN)
+
+#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS \
+ (!V8_HOST_ARCH_PPC || V8_TARGET_LITTLE_ENDIAN)
+
+#define ABI_TOC_ADDRESSABILITY_VIA_IP \
+ (V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN)
+
+#if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64
+#define ABI_TOC_REGISTER kRegister_r2_Code
+#else
+#define ABI_TOC_REGISTER kRegister_r13_Code
+#endif
+
+#define INSTR_AND_DATA_CACHE_COHERENCY LWSYNC
+
+namespace v8 {
+namespace internal {
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+
+// Core register
+struct Register {
+ static const int kNumRegisters = 32;
+ static const int kSizeInBytes = kPointerSize;
+
+#if V8_TARGET_LITTLE_ENDIAN
+ static const int kMantissaOffset = 0;
+ static const int kExponentOffset = 4;
+#else
+ static const int kMantissaOffset = 4;
+ static const int kExponentOffset = 0;
+#endif
+
+ static const int kAllocatableLowRangeBegin = 3;
+ static const int kAllocatableLowRangeEnd = 10;
+ static const int kAllocatableHighRangeBegin = 14;
+#if V8_OOL_CONSTANT_POOL
+ static const int kAllocatableHighRangeEnd = 27;
+#else
+ static const int kAllocatableHighRangeEnd = 28;
+#endif
+ static const int kAllocatableContext = 30;
+
+ static const int kNumAllocatableLow =
+ kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1;
+ static const int kNumAllocatableHigh =
+ kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1;
+ static const int kMaxNumAllocatableRegisters =
+ kNumAllocatableLow + kNumAllocatableHigh + 1; // cp
+
+ static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
+
+ static int ToAllocationIndex(Register reg) {
+ int index;
+ int code = reg.code();
+ if (code == kAllocatableContext) {
+ // Context is the last index
+ index = NumAllocatableRegisters() - 1;
+ } else if (code <= kAllocatableLowRangeEnd) {
+ // low range
+ index = code - kAllocatableLowRangeBegin;
+ } else {
+ // high range
+ index = code - kAllocatableHighRangeBegin + kNumAllocatableLow;
+ }
+ DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
+ return index;
+ }
+
+ static Register FromAllocationIndex(int index) {
+ DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
+ // Last index is always the 'cp' register.
+ if (index == kMaxNumAllocatableRegisters - 1) {
+ return from_code(kAllocatableContext);
+ }
+ return (index < kNumAllocatableLow)
+ ? from_code(index + kAllocatableLowRangeBegin)
+ : from_code(index - kNumAllocatableLow +
+ kAllocatableHighRangeBegin);
+ }
+
+ static const char* AllocationIndexToString(int index) {
+ DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
+ const char* const names[] = {
+ "r3",
+ "r4",
+ "r5",
+ "r6",
+ "r7",
+ "r8",
+ "r9",
+ "r10",
+ "r14",
+ "r15",
+ "r16",
+ "r17",
+ "r18",
+ "r19",
+ "r20",
+ "r21",
+ "r22",
+ "r23",
+ "r24",
+ "r25",
+ "r26",
+ "r27",
+#if !V8_OOL_CONSTANT_POOL
+ "r28",
+#endif
+ "cp",
+ };
+ return names[index];
+ }
+
+ static Register from_code(int code) {
+ Register r = {code};
+ return r;
+ }
+
+ bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
+ bool is(Register reg) const { return code_ == reg.code_; }
+ int code() const {
+ DCHECK(is_valid());
+ return code_;
+ }
+ int bit() const {
+ DCHECK(is_valid());
+ return 1 << code_;
+ }
+
+ void set_code(int code) {
+ code_ = code;
+ DCHECK(is_valid());
+ }
+
+ // Unfortunately we can't make this private in a struct.
+ int code_;
+};
+
+// These constants are used in several locations, including static initializers
+const int kRegister_no_reg_Code = -1;
+const int kRegister_r0_Code = 0; // general scratch
+const int kRegister_sp_Code = 1; // stack pointer
+const int kRegister_r2_Code = 2; // special on PowerPC
+const int kRegister_r3_Code = 3;
+const int kRegister_r4_Code = 4;
+const int kRegister_r5_Code = 5;
+const int kRegister_r6_Code = 6;
+const int kRegister_r7_Code = 7;
+const int kRegister_r8_Code = 8;
+const int kRegister_r9_Code = 9;
+const int kRegister_r10_Code = 10;
+const int kRegister_r11_Code = 11; // lithium scratch
+const int kRegister_ip_Code = 12; // ip (general scratch)
+const int kRegister_r13_Code = 13; // special on PowerPC
+const int kRegister_r14_Code = 14;
+const int kRegister_r15_Code = 15;
+
+const int kRegister_r16_Code = 16;
+const int kRegister_r17_Code = 17;
+const int kRegister_r18_Code = 18;
+const int kRegister_r19_Code = 19;
+const int kRegister_r20_Code = 20;
+const int kRegister_r21_Code = 21;
+const int kRegister_r22_Code = 22;
+const int kRegister_r23_Code = 23;
+const int kRegister_r24_Code = 24;
+const int kRegister_r25_Code = 25;
+const int kRegister_r26_Code = 26;
+const int kRegister_r27_Code = 27;
+const int kRegister_r28_Code = 28; // constant pool pointer
+const int kRegister_r29_Code = 29; // roots array pointer
+const int kRegister_r30_Code = 30; // context pointer
+const int kRegister_fp_Code = 31; // frame pointer
+
+const Register no_reg = {kRegister_no_reg_Code};
+
+const Register r0 = {kRegister_r0_Code};
+const Register sp = {kRegister_sp_Code};
+const Register r2 = {kRegister_r2_Code};
+const Register r3 = {kRegister_r3_Code};
+const Register r4 = {kRegister_r4_Code};
+const Register r5 = {kRegister_r5_Code};
+const Register r6 = {kRegister_r6_Code};
+const Register r7 = {kRegister_r7_Code};
+const Register r8 = {kRegister_r8_Code};
+const Register r9 = {kRegister_r9_Code};
+const Register r10 = {kRegister_r10_Code};
+const Register r11 = {kRegister_r11_Code};
+const Register ip = {kRegister_ip_Code};
+const Register r13 = {kRegister_r13_Code};
+const Register r14 = {kRegister_r14_Code};
+const Register r15 = {kRegister_r15_Code};
+
+const Register r16 = {kRegister_r16_Code};
+const Register r17 = {kRegister_r17_Code};
+const Register r18 = {kRegister_r18_Code};
+const Register r19 = {kRegister_r19_Code};
+const Register r20 = {kRegister_r20_Code};
+const Register r21 = {kRegister_r21_Code};
+const Register r22 = {kRegister_r22_Code};
+const Register r23 = {kRegister_r23_Code};
+const Register r24 = {kRegister_r24_Code};
+const Register r25 = {kRegister_r25_Code};
+const Register r26 = {kRegister_r26_Code};
+const Register r27 = {kRegister_r27_Code};
+const Register r28 = {kRegister_r28_Code};
+const Register r29 = {kRegister_r29_Code};
+const Register r30 = {kRegister_r30_Code};
+const Register fp = {kRegister_fp_Code};
+
+// Give alias names to registers
+const Register cp = {kRegister_r30_Code}; // JavaScript context pointer
+const Register kRootRegister = {kRegister_r29_Code}; // Roots array pointer.
+#if V8_OOL_CONSTANT_POOL
+const Register kConstantPoolRegister = {kRegister_r28_Code}; // Constant pool
+#endif
+
+// Double word FP register.
+struct DoubleRegister {
+ static const int kNumRegisters = 32;
+ static const int kMaxNumRegisters = kNumRegisters;
+ static const int kNumVolatileRegisters = 14; // d0-d13
+ static const int kSizeInBytes = 8;
+
+ static const int kAllocatableLowRangeBegin = 1;
+ static const int kAllocatableLowRangeEnd = 12;
+ static const int kAllocatableHighRangeBegin = 15;
+ static const int kAllocatableHighRangeEnd = 31;
+
+ static const int kNumAllocatableLow =
+ kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1;
+ static const int kNumAllocatableHigh =
+ kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1;
+ static const int kMaxNumAllocatableRegisters =
+ kNumAllocatableLow + kNumAllocatableHigh;
+ static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
+
+ // TODO(turbofan)
+ inline static int NumAllocatableAliasedRegisters() {
+ return NumAllocatableRegisters();
+ }
+
+ static int ToAllocationIndex(DoubleRegister reg) {
+ int code = reg.code();
+ int index = (code <= kAllocatableLowRangeEnd)
+ ? code - kAllocatableLowRangeBegin
+ : code - kAllocatableHighRangeBegin + kNumAllocatableLow;
+ DCHECK(index < kMaxNumAllocatableRegisters);
+ return index;
+ }
+
+ static DoubleRegister FromAllocationIndex(int index) {
+ DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
+ return (index < kNumAllocatableLow)
+ ? from_code(index + kAllocatableLowRangeBegin)
+ : from_code(index - kNumAllocatableLow +
+ kAllocatableHighRangeBegin);
+ }
+
+ static const char* AllocationIndexToString(int index);
+
+ static DoubleRegister from_code(int code) {
+ DoubleRegister r = {code};
+ return r;
+ }
+
+ bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters; }
+ bool is(DoubleRegister reg) const { return code_ == reg.code_; }
+
+ int code() const {
+ DCHECK(is_valid());
+ return code_;
+ }
+ int bit() const {
+ DCHECK(is_valid());
+ return 1 << code_;
+ }
+ void split_code(int* vm, int* m) const {
+ DCHECK(is_valid());
+ *m = (code_ & 0x10) >> 4;
+ *vm = code_ & 0x0F;
+ }
+
+ int code_;
+};
+
+
+const DoubleRegister no_dreg = {-1};
+const DoubleRegister d0 = {0};
+const DoubleRegister d1 = {1};
+const DoubleRegister d2 = {2};
+const DoubleRegister d3 = {3};
+const DoubleRegister d4 = {4};
+const DoubleRegister d5 = {5};
+const DoubleRegister d6 = {6};
+const DoubleRegister d7 = {7};
+const DoubleRegister d8 = {8};
+const DoubleRegister d9 = {9};
+const DoubleRegister d10 = {10};
+const DoubleRegister d11 = {11};
+const DoubleRegister d12 = {12};
+const DoubleRegister d13 = {13};
+const DoubleRegister d14 = {14};
+const DoubleRegister d15 = {15};
+const DoubleRegister d16 = {16};
+const DoubleRegister d17 = {17};
+const DoubleRegister d18 = {18};
+const DoubleRegister d19 = {19};
+const DoubleRegister d20 = {20};
+const DoubleRegister d21 = {21};
+const DoubleRegister d22 = {22};
+const DoubleRegister d23 = {23};
+const DoubleRegister d24 = {24};
+const DoubleRegister d25 = {25};
+const DoubleRegister d26 = {26};
+const DoubleRegister d27 = {27};
+const DoubleRegister d28 = {28};
+const DoubleRegister d29 = {29};
+const DoubleRegister d30 = {30};
+const DoubleRegister d31 = {31};
+
+// Aliases for double registers. Defined using #define instead of
+// "static const DoubleRegister&" because Clang complains otherwise when a
+// compilation unit that includes this header doesn't use the variables.
+#define kFirstCalleeSavedDoubleReg d14
+#define kLastCalleeSavedDoubleReg d31
+#define kDoubleRegZero d14
+#define kScratchDoubleReg d13
+
+Register ToRegister(int num);
+
+// Coprocessor register
+struct CRegister {
+ bool is_valid() const { return 0 <= code_ && code_ < 16; }
+ bool is(CRegister creg) const { return code_ == creg.code_; }
+ int code() const {
+ DCHECK(is_valid());
+ return code_;
+ }
+ int bit() const {
+ DCHECK(is_valid());
+ return 1 << code_;
+ }
+
+ // Unfortunately we can't make this private in a struct.
+ int code_;
+};
+
+
+const CRegister no_creg = {-1};
+
+const CRegister cr0 = {0};
+const CRegister cr1 = {1};
+const CRegister cr2 = {2};
+const CRegister cr3 = {3};
+const CRegister cr4 = {4};
+const CRegister cr5 = {5};
+const CRegister cr6 = {6};
+const CRegister cr7 = {7};
+const CRegister cr8 = {8};
+const CRegister cr9 = {9};
+const CRegister cr10 = {10};
+const CRegister cr11 = {11};
+const CRegister cr12 = {12};
+const CRegister cr13 = {13};
+const CRegister cr14 = {14};
+const CRegister cr15 = {15};
+
+// -----------------------------------------------------------------------------
+// Machine instruction Operands
+
+#if V8_TARGET_ARCH_PPC64
+const RelocInfo::Mode kRelocInfo_NONEPTR = RelocInfo::NONE64;
+#else
+const RelocInfo::Mode kRelocInfo_NONEPTR = RelocInfo::NONE32;
+#endif
+
+// Class Operand represents a shifter operand in data processing instructions
+class Operand BASE_EMBEDDED {
+ public:
+ // immediate
+ INLINE(explicit Operand(intptr_t immediate,
+ RelocInfo::Mode rmode = kRelocInfo_NONEPTR));
+ INLINE(static Operand Zero()) { return Operand(static_cast<intptr_t>(0)); }
+ INLINE(explicit Operand(const ExternalReference& f));
+ explicit Operand(Handle<Object> handle);
+ INLINE(explicit Operand(Smi* value));
+
+ // rm
+ INLINE(explicit Operand(Register rm));
+
+ // Return true if this is a register operand.
+ INLINE(bool is_reg() const);
+
+ // For mov. Return the number of actual instructions required to
+ // load the operand into a register. This can be anywhere from
+ // one (constant pool small section) to five instructions (full
+ // 64-bit sequence).
+ //
+ // The value returned is only valid as long as no entries are added to the
+ // constant pool between this call and the actual instruction being emitted.
+ bool must_output_reloc_info(const Assembler* assembler) const;
+
+ inline intptr_t immediate() const {
+ DCHECK(!rm_.is_valid());
+ return imm_;
+ }
+
+ Register rm() const { return rm_; }
+
+ private:
+ Register rm_;
+ intptr_t imm_; // valid if rm_ == no_reg
+ RelocInfo::Mode rmode_;
+
+ friend class Assembler;
+ friend class MacroAssembler;
+};
+
+
+// Class MemOperand represents a memory operand in load and store instructions
+// On PowerPC we have base register + 16bit signed value
+// Alternatively we can have a 16bit signed value immediate
+class MemOperand BASE_EMBEDDED {
+ public:
+ explicit MemOperand(Register rn, int32_t offset = 0);
+
+ explicit MemOperand(Register ra, Register rb);
+
+ int32_t offset() const {
+ DCHECK(rb_.is(no_reg));
+ return offset_;
+ }
+
+ // PowerPC - base register
+ Register ra() const {
+ DCHECK(!ra_.is(no_reg));
+ return ra_;
+ }
+
+ Register rb() const {
+ DCHECK(offset_ == 0 && !rb_.is(no_reg));
+ return rb_;
+ }
+
+ private:
+ Register ra_; // base
+ int32_t offset_; // offset
+ Register rb_; // index
+
+ friend class Assembler;
+};
+
+
+#if V8_OOL_CONSTANT_POOL
+// Class used to build a constant pool.
+class ConstantPoolBuilder BASE_EMBEDDED {
+ public:
+ ConstantPoolBuilder();
+ ConstantPoolArray::LayoutSection AddEntry(Assembler* assm,
+ const RelocInfo& rinfo);
+ void Relocate(intptr_t pc_delta);
+ bool IsEmpty();
+ Handle<ConstantPoolArray> New(Isolate* isolate);
+ void Populate(Assembler* assm, ConstantPoolArray* constant_pool);
+
+ inline ConstantPoolArray::LayoutSection current_section() const {
+ return current_section_;
+ }
+
+ // Rather than increasing the capacity of the ConstantPoolArray's
+ // small section to match the longer (16-bit) reach of PPC's load
+ // instruction (at the expense of a larger header to describe the
+ // layout), the PPC implementation utilizes the extended section to
+ // satisfy that reach. I.e. all entries (regardless of their
+ // section) are reachable with a single load instruction.
+ //
+ // This implementation does not support an unlimited constant pool
+ // size (which would require a multi-instruction sequence). [See
+ // ARM commit e27ab337 for a reference on the changes required to
+ // support the longer instruction sequence.] Note, however, that
+ // going down that path will necessarily generate that longer
+ // sequence for all extended section accesses since the placement of
+ // a given entry within the section is not known at the time of
+ // code generation.
+ //
+ // TODO(mbrandy): Determine whether there is a benefit to supporting
+ // the longer sequence given that nops could be used for those
+ // entries which are reachable with a single instruction.
+ inline bool is_full() const { return !is_int16(size_); }
+
+ inline ConstantPoolArray::NumberOfEntries* number_of_entries(
+ ConstantPoolArray::LayoutSection section) {
+ return &number_of_entries_[section];
+ }
+
+ inline ConstantPoolArray::NumberOfEntries* small_entries() {
+ return number_of_entries(ConstantPoolArray::SMALL_SECTION);
+ }
+
+ inline ConstantPoolArray::NumberOfEntries* extended_entries() {
+ return number_of_entries(ConstantPoolArray::EXTENDED_SECTION);
+ }
+
+ private:
+ struct ConstantPoolEntry {
+ ConstantPoolEntry(RelocInfo rinfo, ConstantPoolArray::LayoutSection section,
+ int merged_index)
+ : rinfo_(rinfo), section_(section), merged_index_(merged_index) {}
+
+ RelocInfo rinfo_;
+ ConstantPoolArray::LayoutSection section_;
+ int merged_index_;
+ };
+
+ ConstantPoolArray::Type GetConstantPoolType(RelocInfo::Mode rmode);
+
+ uint32_t size_;
+ std::vector<ConstantPoolEntry> entries_;
+ ConstantPoolArray::LayoutSection current_section_;
+ ConstantPoolArray::NumberOfEntries number_of_entries_[2];
+};
+#endif
+
+
+class Assembler : public AssemblerBase {
+ public:
+ // Create an assembler. Instructions and relocation information are emitted
+ // into a buffer, with the instructions starting from the beginning and the
+ // relocation information starting from the end of the buffer. See CodeDesc
+ // for a detailed comment on the layout (globals.h).
+ //
+ // If the provided buffer is NULL, the assembler allocates and grows its own
+ // buffer, and buffer_size determines the initial buffer size. The buffer is
+ // owned by the assembler and deallocated upon destruction of the assembler.
+ //
+ // If the provided buffer is not NULL, the assembler uses the provided buffer
+ // for code generation and assumes its size to be buffer_size. If the buffer
+ // is too small, a fatal error occurs. No deallocation of the buffer is done
+ // upon destruction of the assembler.
+ Assembler(Isolate* isolate, void* buffer, int buffer_size);
+ virtual ~Assembler() {}
+
+ // GetCode emits any pending (non-emitted) code and fills the descriptor
+ // desc. GetCode() is idempotent; it returns the same result if no other
+ // Assembler functions are invoked in between GetCode() calls.
+ void GetCode(CodeDesc* desc);
+
+ // Label operations & relative jumps (PPUM Appendix D)
+ //
+ // Takes a branch opcode (cc) and a label (L) and generates
+ // either a backward branch or a forward branch and links it
+ // to the label fixup chain. Usage:
+ //
+ // Label L; // unbound label
+ // j(cc, &L); // forward branch to unbound label
+ // bind(&L); // bind label to the current pc
+ // j(cc, &L); // backward branch to bound label
+ // bind(&L); // illegal: a label may be bound only once
+ //
+ // Note: The same Label can be used for forward and backward branches
+ // but it may be bound only once.
+
+ void bind(Label* L); // binds an unbound label L to the current code position
+ // Determines if Label is bound and near enough so that a single
+ // branch instruction can be used to reach it.
+ bool is_near(Label* L, Condition cond);
+
+ // Returns the branch offset to the given label from the current code position
+ // Links the label to the current position if it is still unbound
+ // Manages the jump elimination optimization if the second parameter is true.
+ int branch_offset(Label* L, bool jump_elimination_allowed);
+
+ // Puts a labels target address at the given position.
+ // The high 8 bits are set to zero.
+ void label_at_put(Label* L, int at_offset);
+
+#if V8_OOL_CONSTANT_POOL
+ INLINE(static bool IsConstantPoolLoadStart(Address pc));
+ INLINE(static bool IsConstantPoolLoadEnd(Address pc));
+ INLINE(static int GetConstantPoolOffset(Address pc));
+ INLINE(static void SetConstantPoolOffset(Address pc, int offset));
+
+ // Return the address in the constant pool of the code target address used by
+ // the branch/call instruction at pc, or the object in a mov.
+ INLINE(static Address target_constant_pool_address_at(
+ Address pc, ConstantPoolArray* constant_pool));
+#endif
+
+ // Read/Modify the code target address in the branch/call instruction at pc.
+ INLINE(static Address target_address_at(Address pc,
+ ConstantPoolArray* constant_pool));
+ INLINE(static void set_target_address_at(
+ Address pc, ConstantPoolArray* constant_pool, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
+ INLINE(static Address target_address_at(Address pc, Code* code)) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+ }
+ INLINE(static void set_target_address_at(
+ Address pc, Code* code, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(pc, constant_pool, target, icache_flush_mode);
+ }
+
+ // Return the code target address at a call site from the return address
+ // of that call in the instruction stream.
+ inline static Address target_address_from_return_address(Address pc);
+
+ // Given the address of the beginning of a call, return the address
+ // in the instruction stream that the call will return to.
+ INLINE(static Address return_address_from_call_start(Address pc));
+
+ // Return the code target address of the patch debug break slot
+ INLINE(static Address break_address_from_return_address(Address pc));
+
+ // This sets the branch destination.
+ // This is for calls and branches within generated code.
+ inline static void deserialization_set_special_target_at(
+ Address instruction_payload, Code* code, Address target);
+
+ // Size of an instruction.
+ static const int kInstrSize = sizeof(Instr);
+
+ // Here we are patching the address in the LUI/ORI instruction pair.
+ // These values are used in the serialization process and must be zero for
+ // PPC platform, as Code, Embedded Object or External-reference pointers
+ // are split across two consecutive instructions and don't exist separately
+ // in the code, so the serializer should not step forwards in memory after
+ // a target is resolved and written.
+ static const int kSpecialTargetSize = 0;
+
+// Number of instructions to load an address via a mov sequence.
+#if V8_TARGET_ARCH_PPC64
+ static const int kMovInstructionsConstantPool = 2;
+ static const int kMovInstructionsNoConstantPool = 5;
+#else
+ static const int kMovInstructionsConstantPool = 1;
+ static const int kMovInstructionsNoConstantPool = 2;
+#endif
+#if V8_OOL_CONSTANT_POOL
+ static const int kMovInstructions = kMovInstructionsConstantPool;
+#else
+ static const int kMovInstructions = kMovInstructionsNoConstantPool;
+#endif
+
+ // Distance between the instruction referring to the address of the call
+ // target and the return address.
+
+ // Call sequence is a FIXED_SEQUENCE:
+ // mov r8, @ call address
+ // mtlr r8
+ // blrl
+ // @ return address
+ static const int kCallTargetAddressOffset =
+ (kMovInstructions + 2) * kInstrSize;
+
+ // Distance between start of patched return sequence and the emitted address
+ // to jump to.
+ // Patched return sequence is a FIXED_SEQUENCE:
+ // mov r0, <address>
+ // mtlr r0
+ // blrl
+ static const int kPatchReturnSequenceAddressOffset = 0 * kInstrSize;
+
+ // Distance between start of patched debug break slot and the emitted address
+ // to jump to.
+ // Patched debug break slot code is a FIXED_SEQUENCE:
+ // mov r0, <address>
+ // mtlr r0
+ // blrl
+ static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
+
+ // This is the length of the BreakLocationIterator::SetDebugBreakAtReturn()
+ // code patch FIXED_SEQUENCE
+ static const int kJSReturnSequenceInstructions =
+ kMovInstructionsNoConstantPool + 3;
+
+ // This is the length of the code sequence from SetDebugBreakAtSlot()
+ // FIXED_SEQUENCE
+ static const int kDebugBreakSlotInstructions =
+ kMovInstructionsNoConstantPool + 2;
+ static const int kDebugBreakSlotLength =
+ kDebugBreakSlotInstructions * kInstrSize;
+
+ static inline int encode_crbit(const CRegister& cr, enum CRBit crbit) {
+ return ((cr.code() * CRWIDTH) + crbit);
+ }
+
+ // ---------------------------------------------------------------------------
+ // Code generation
+
+ // Insert the smallest number of nop instructions
+ // possible to align the pc offset to a multiple
+ // of m. m must be a power of 2 (>= 4).
+ void Align(int m);
+ // Aligns code to something that's optimal for a jump target for the platform.
+ void CodeTargetAlign();
+
+ // Branch instructions
+ void bclr(BOfield bo, LKBit lk);
+ void blr();
+ void bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk = LeaveLK);
+ void b(int branch_offset, LKBit lk);
+
+ void bcctr(BOfield bo, LKBit lk);
+ void bctr();
+ void bctrl();
+
+ // Convenience branch instructions using labels
+ void b(Label* L, LKBit lk = LeaveLK) { b(branch_offset(L, false), lk); }
+
+ void bc_short(Condition cond, Label* L, CRegister cr = cr7,
+ LKBit lk = LeaveLK) {
+ DCHECK(cond != al);
+ DCHECK(cr.code() >= 0 && cr.code() <= 7);
+
+ int b_offset = branch_offset(L, false);
+
+ switch (cond) {
+ case eq:
+ bc(b_offset, BT, encode_crbit(cr, CR_EQ), lk);
+ break;
+ case ne:
+ bc(b_offset, BF, encode_crbit(cr, CR_EQ), lk);
+ break;
+ case gt:
+ bc(b_offset, BT, encode_crbit(cr, CR_GT), lk);
+ break;
+ case le:
+ bc(b_offset, BF, encode_crbit(cr, CR_GT), lk);
+ break;
+ case lt:
+ bc(b_offset, BT, encode_crbit(cr, CR_LT), lk);
+ break;
+ case ge:
+ bc(b_offset, BF, encode_crbit(cr, CR_LT), lk);
+ break;
+ case unordered:
+ bc(b_offset, BT, encode_crbit(cr, CR_FU), lk);
+ break;
+ case ordered:
+ bc(b_offset, BF, encode_crbit(cr, CR_FU), lk);
+ break;
+ case overflow:
+ bc(b_offset, BT, encode_crbit(cr, CR_SO), lk);
+ break;
+ case nooverflow:
+ bc(b_offset, BF, encode_crbit(cr, CR_SO), lk);
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ }
+
+ void b(Condition cond, Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
+ if (cond == al) {
+ b(L, lk);
+ return;
+ }
+
+ if ((L->is_bound() && is_near(L, cond)) || !is_trampoline_emitted()) {
+ bc_short(cond, L, cr, lk);
+ return;
+ }
+
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ bc_short(neg_cond, &skip, cr);
+ b(L, lk);
+ bind(&skip);
+ }
+
+ void bne(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
+ b(ne, L, cr, lk);
+ }
+ void beq(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
+ b(eq, L, cr, lk);
+ }
+ void blt(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
+ b(lt, L, cr, lk);
+ }
+ void bge(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
+ b(ge, L, cr, lk);
+ }
+ void ble(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
+ b(le, L, cr, lk);
+ }
+ void bgt(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
+ b(gt, L, cr, lk);
+ }
+ void bunordered(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
+ b(unordered, L, cr, lk);
+ }
+ void bordered(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
+ b(ordered, L, cr, lk);
+ }
+ void boverflow(Label* L, CRegister cr = cr0, LKBit lk = LeaveLK) {
+ b(overflow, L, cr, lk);
+ }
+ void bnooverflow(Label* L, CRegister cr = cr0, LKBit lk = LeaveLK) {
+ b(nooverflow, L, cr, lk);
+ }
+
+ // Decrement CTR; branch if CTR != 0
+ void bdnz(Label* L, LKBit lk = LeaveLK) {
+ bc(branch_offset(L, false), DCBNZ, 0, lk);
+ }
+
+ // Data-processing instructions
+
+ void sub(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
+ RCBit r = LeaveRC);
+
+ void subfic(Register dst, Register src, const Operand& imm);
+
+ void subfc(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
+ RCBit r = LeaveRC);
+
+ void add(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
+ RCBit r = LeaveRC);
+
+ void addc(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
+ RCBit r = LeaveRC);
+
+ void addze(Register dst, Register src1, OEBit o, RCBit r);
+
+ void mullw(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
+ RCBit r = LeaveRC);
+
+ void mulhw(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
+ RCBit r = LeaveRC);
+
+ void divw(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
+ RCBit r = LeaveRC);
+
+ void addi(Register dst, Register src, const Operand& imm);
+ void addis(Register dst, Register src, const Operand& imm);
+ void addic(Register dst, Register src, const Operand& imm);
+
+ void and_(Register dst, Register src1, Register src2, RCBit rc = LeaveRC);
+ void andc(Register dst, Register src1, Register src2, RCBit rc = LeaveRC);
+ void andi(Register ra, Register rs, const Operand& imm);
+ void andis(Register ra, Register rs, const Operand& imm);
+ void nor(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
+ void notx(Register dst, Register src, RCBit r = LeaveRC);
+ void ori(Register dst, Register src, const Operand& imm);
+ void oris(Register dst, Register src, const Operand& imm);
+ void orx(Register dst, Register src1, Register src2, RCBit rc = LeaveRC);
+ void xori(Register dst, Register src, const Operand& imm);
+ void xoris(Register ra, Register rs, const Operand& imm);
+ void xor_(Register dst, Register src1, Register src2, RCBit rc = LeaveRC);
+ void cmpi(Register src1, const Operand& src2, CRegister cr = cr7);
+ void cmpli(Register src1, const Operand& src2, CRegister cr = cr7);
+ void cmpwi(Register src1, const Operand& src2, CRegister cr = cr7);
+ void cmplwi(Register src1, const Operand& src2, CRegister cr = cr7);
+ void li(Register dst, const Operand& src);
+ void lis(Register dst, const Operand& imm);
+ void mr(Register dst, Register src);
+
+ void lbz(Register dst, const MemOperand& src);
+ void lbzx(Register dst, const MemOperand& src);
+ void lbzux(Register dst, const MemOperand& src);
+ void lhz(Register dst, const MemOperand& src);
+ void lhzx(Register dst, const MemOperand& src);
+ void lhzux(Register dst, const MemOperand& src);
+ void lwz(Register dst, const MemOperand& src);
+ void lwzu(Register dst, const MemOperand& src);
+ void lwzx(Register dst, const MemOperand& src);
+ void lwzux(Register dst, const MemOperand& src);
+ void lwa(Register dst, const MemOperand& src);
+ void stb(Register dst, const MemOperand& src);
+ void stbx(Register dst, const MemOperand& src);
+ void stbux(Register dst, const MemOperand& src);
+ void sth(Register dst, const MemOperand& src);
+ void sthx(Register dst, const MemOperand& src);
+ void sthux(Register dst, const MemOperand& src);
+ void stw(Register dst, const MemOperand& src);
+ void stwu(Register dst, const MemOperand& src);
+ void stwx(Register rs, const MemOperand& src);
+ void stwux(Register rs, const MemOperand& src);
+
+ void extsb(Register rs, Register ra, RCBit r = LeaveRC);
+ void extsh(Register rs, Register ra, RCBit r = LeaveRC);
+
+ void neg(Register rt, Register ra, OEBit o = LeaveOE, RCBit c = LeaveRC);
+
+#if V8_TARGET_ARCH_PPC64
+ void ld(Register rd, const MemOperand& src);
+ void ldx(Register rd, const MemOperand& src);
+ void ldu(Register rd, const MemOperand& src);
+ void ldux(Register rd, const MemOperand& src);
+ void std(Register rs, const MemOperand& src);
+ void stdx(Register rs, const MemOperand& src);
+ void stdu(Register rs, const MemOperand& src);
+ void stdux(Register rs, const MemOperand& src);
+ void rldic(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC);
+ void rldicl(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC);
+ void rldcl(Register ra, Register rs, Register rb, int mb, RCBit r = LeaveRC);
+ void rldicr(Register dst, Register src, int sh, int me, RCBit r = LeaveRC);
+ void rldimi(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC);
+ void sldi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC);
+ void srdi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC);
+ void clrrdi(Register dst, Register src, const Operand& val,
+ RCBit rc = LeaveRC);
+ void clrldi(Register dst, Register src, const Operand& val,
+ RCBit rc = LeaveRC);
+ void sradi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
+ void srd(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
+ void sld(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
+ void srad(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
+ void rotld(Register ra, Register rs, Register rb, RCBit r = LeaveRC);
+ void rotldi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
+ void rotrdi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
+ void cntlzd_(Register dst, Register src, RCBit rc = LeaveRC);
+ void extsw(Register rs, Register ra, RCBit r = LeaveRC);
+ void mulld(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
+ RCBit r = LeaveRC);
+ void divd(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
+ RCBit r = LeaveRC);
+#endif
+
+ void rlwinm(Register ra, Register rs, int sh, int mb, int me,
+ RCBit rc = LeaveRC);
+ void rlwimi(Register ra, Register rs, int sh, int mb, int me,
+ RCBit rc = LeaveRC);
+ void rlwnm(Register ra, Register rs, Register rb, int mb, int me,
+ RCBit rc = LeaveRC);
+ void slwi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC);
+ void srwi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC);
+ void clrrwi(Register dst, Register src, const Operand& val,
+ RCBit rc = LeaveRC);
+ void clrlwi(Register dst, Register src, const Operand& val,
+ RCBit rc = LeaveRC);
+ void srawi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
+ void srw(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
+ void slw(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
+ void sraw(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
+ void rotlw(Register ra, Register rs, Register rb, RCBit r = LeaveRC);
+ void rotlwi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
+ void rotrwi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
+
+ void cntlzw_(Register dst, Register src, RCBit rc = LeaveRC);
+
+ void subi(Register dst, Register src1, const Operand& src2);
+
+ void cmp(Register src1, Register src2, CRegister cr = cr7);
+ void cmpl(Register src1, Register src2, CRegister cr = cr7);
+ void cmpw(Register src1, Register src2, CRegister cr = cr7);
+ void cmplw(Register src1, Register src2, CRegister cr = cr7);
+
+ void mov(Register dst, const Operand& src);
+
+ // Load the position of the label relative to the generated code object
+ // pointer in a register.
+ void mov_label_offset(Register dst, Label* label);
+
+ // Multiply instructions
+ void mul(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
+ RCBit r = LeaveRC);
+
+ // Miscellaneous arithmetic instructions
+
+ // Special register access
+ void crxor(int bt, int ba, int bb);
+ void crclr(int bt) { crxor(bt, bt, bt); }
+ void creqv(int bt, int ba, int bb);
+ void crset(int bt) { creqv(bt, bt, bt); }
+ void mflr(Register dst);
+ void mtlr(Register src);
+ void mtctr(Register src);
+ void mtxer(Register src);
+ void mcrfs(int bf, int bfa);
+ void mfcr(Register dst);
+#if V8_TARGET_ARCH_PPC64
+ void mffprd(Register dst, DoubleRegister src);
+ void mffprwz(Register dst, DoubleRegister src);
+ void mtfprd(DoubleRegister dst, Register src);
+ void mtfprwz(DoubleRegister dst, Register src);
+ void mtfprwa(DoubleRegister dst, Register src);
+#endif
+
+ void fake_asm(enum FAKE_OPCODE_T fopcode);
+ void marker_asm(int mcode);
+ void function_descriptor();
+
+ // Exception-generating instructions and debugging support
+ void stop(const char* msg, Condition cond = al,
+ int32_t code = kDefaultStopCode, CRegister cr = cr7);
+
+ void bkpt(uint32_t imm16); // v5 and above
+
+ // Informational messages when simulating
+ void info(const char* msg, Condition cond = al,
+ int32_t code = kDefaultStopCode, CRegister cr = cr7);
+
+ void dcbf(Register ra, Register rb);
+ void sync();
+ void lwsync();
+ void icbi(Register ra, Register rb);
+ void isync();
+
+ // Support for floating point
+ void lfd(const DoubleRegister frt, const MemOperand& src);
+ void lfdu(const DoubleRegister frt, const MemOperand& src);
+ void lfdx(const DoubleRegister frt, const MemOperand& src);
+ void lfdux(const DoubleRegister frt, const MemOperand& src);
+ void lfs(const DoubleRegister frt, const MemOperand& src);
+ void lfsu(const DoubleRegister frt, const MemOperand& src);
+ void lfsx(const DoubleRegister frt, const MemOperand& src);
+ void lfsux(const DoubleRegister frt, const MemOperand& src);
+ void stfd(const DoubleRegister frs, const MemOperand& src);
+ void stfdu(const DoubleRegister frs, const MemOperand& src);
+ void stfdx(const DoubleRegister frs, const MemOperand& src);
+ void stfdux(const DoubleRegister frs, const MemOperand& src);
+ void stfs(const DoubleRegister frs, const MemOperand& src);
+ void stfsu(const DoubleRegister frs, const MemOperand& src);
+ void stfsx(const DoubleRegister frs, const MemOperand& src);
+ void stfsux(const DoubleRegister frs, const MemOperand& src);
+
+ void fadd(const DoubleRegister frt, const DoubleRegister fra,
+ const DoubleRegister frb, RCBit rc = LeaveRC);
+ void fsub(const DoubleRegister frt, const DoubleRegister fra,
+ const DoubleRegister frb, RCBit rc = LeaveRC);
+ void fdiv(const DoubleRegister frt, const DoubleRegister fra,
+ const DoubleRegister frb, RCBit rc = LeaveRC);
+ void fmul(const DoubleRegister frt, const DoubleRegister fra,
+ const DoubleRegister frc, RCBit rc = LeaveRC);
+ void fcmpu(const DoubleRegister fra, const DoubleRegister frb,
+ CRegister cr = cr7);
+ void fmr(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
+ void fctiwz(const DoubleRegister frt, const DoubleRegister frb);
+ void fctiw(const DoubleRegister frt, const DoubleRegister frb);
+ void frim(const DoubleRegister frt, const DoubleRegister frb);
+ void frsp(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
+ void fcfid(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
+ void fctid(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
+ void fctidz(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
+ void fsel(const DoubleRegister frt, const DoubleRegister fra,
+ const DoubleRegister frc, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
+ void fneg(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
+ void mtfsfi(int bf, int immediate, RCBit rc = LeaveRC);
+ void mffs(const DoubleRegister frt, RCBit rc = LeaveRC);
+ void mtfsf(const DoubleRegister frb, bool L = 1, int FLM = 0, bool W = 0,
+ RCBit rc = LeaveRC);
+ void fsqrt(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
+ void fabs(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
+ void fmadd(const DoubleRegister frt, const DoubleRegister fra,
+ const DoubleRegister frc, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
+ void fmsub(const DoubleRegister frt, const DoubleRegister fra,
+ const DoubleRegister frc, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
+
+ // Pseudo instructions
+
+ // Different nop operations are used by the code generator to detect certain
+ // states of the generated code.
+ enum NopMarkerTypes {
+ NON_MARKING_NOP = 0,
+ GROUP_ENDING_NOP,
+ DEBUG_BREAK_NOP,
+ // IC markers.
+ PROPERTY_ACCESS_INLINED,
+ PROPERTY_ACCESS_INLINED_CONTEXT,
+ PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
+ // Helper values.
+ LAST_CODE_MARKER,
+ FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
+ };
+
+ void nop(int type = 0); // 0 is the default non-marking type.
+
+ void push(Register src) {
+#if V8_TARGET_ARCH_PPC64
+ stdu(src, MemOperand(sp, -kPointerSize));
+#else
+ stwu(src, MemOperand(sp, -kPointerSize));
+#endif
+ }
+
+ void pop(Register dst) {
+#if V8_TARGET_ARCH_PPC64
+ ld(dst, MemOperand(sp));
+#else
+ lwz(dst, MemOperand(sp));
+#endif
+ addi(sp, sp, Operand(kPointerSize));
+ }
+
+ void pop() { addi(sp, sp, Operand(kPointerSize)); }
+
+ // Jump unconditionally to given label.
+ void jmp(Label* L) { b(L); }
+
+ // Check the code size generated from label to here.
+ int SizeOfCodeGeneratedSince(Label* label) {
+ return pc_offset() - label->pos();
+ }
+
+ // Check the number of instructions generated from label to here.
+ int InstructionsGeneratedSince(Label* label) {
+ return SizeOfCodeGeneratedSince(label) / kInstrSize;
+ }
+
+ // Class for scoping postponing the trampoline pool generation.
+ class BlockTrampolinePoolScope {
+ public:
+ explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
+ assem_->StartBlockTrampolinePool();
+ }
+ ~BlockTrampolinePoolScope() { assem_->EndBlockTrampolinePool(); }
+
+ private:
+ Assembler* assem_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
+ };
+
+ // Debugging
+
+ // Mark address of the ExitJSFrame code.
+ void RecordJSReturn();
+
+ // Mark address of a debug break slot.
+ void RecordDebugBreakSlot();
+
+ // Record the AST id of the CallIC being compiled, so that it can be placed
+ // in the relocation information.
+ void SetRecordedAstId(TypeFeedbackId ast_id) {
+ // Causes compiler to fail
+ // DCHECK(recorded_ast_id_.IsNone());
+ recorded_ast_id_ = ast_id;
+ }
+
+ TypeFeedbackId RecordedAstId() {
+ // Causes compiler to fail
+ // DCHECK(!recorded_ast_id_.IsNone());
+ return recorded_ast_id_;
+ }
+
+ void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
+
+ // Record a comment relocation entry that can be used by a disassembler.
+ // Use --code-comments to enable.
+ void RecordComment(const char* msg);
+
+ // Writes a single byte or word of data in the code stream. Used
+ // for inline tables, e.g., jump-tables.
+ void db(uint8_t data);
+ void dd(uint32_t data);
+ void emit_ptr(uintptr_t data);
+
+ PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+
+ // Read/patch instructions
+ Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
+ void instr_at_put(int pos, Instr instr) {
+ *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
+ }
+ static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
+ static void instr_at_put(byte* pc, Instr instr) {
+ *reinterpret_cast<Instr*>(pc) = instr;
+ }
+ static Condition GetCondition(Instr instr);
+
+ static bool IsLis(Instr instr);
+ static bool IsLi(Instr instr);
+ static bool IsAddic(Instr instr);
+ static bool IsOri(Instr instr);
+
+ static bool IsBranch(Instr instr);
+ static Register GetRA(Instr instr);
+ static Register GetRB(Instr instr);
+#if V8_TARGET_ARCH_PPC64
+ static bool Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3,
+ Instr instr4, Instr instr5);
+#else
+ static bool Is32BitLoadIntoR12(Instr instr1, Instr instr2);
+#endif
+
+ static bool IsCmpRegister(Instr instr);
+ static bool IsCmpImmediate(Instr instr);
+ static bool IsRlwinm(Instr instr);
+#if V8_TARGET_ARCH_PPC64
+ static bool IsRldicl(Instr instr);
+#endif
+ static bool IsCrSet(Instr instr);
+ static Register GetCmpImmediateRegister(Instr instr);
+ static int GetCmpImmediateRawImmediate(Instr instr);
+ static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
+
+ // Postpone the generation of the trampoline pool for the specified number of
+ // instructions.
+ void BlockTrampolinePoolFor(int instructions);
+ void CheckTrampolinePool();
+
+ int instructions_required_for_mov(const Operand& x) const;
+
+#if V8_OOL_CONSTANT_POOL
+ // Decide between using the constant pool vs. a mov immediate sequence.
+ bool use_constant_pool_for_mov(const Operand& x, bool canOptimize) const;
+
+ // The code currently calls CheckBuffer() too often. This has the side
+ // effect of randomly growing the buffer in the middle of multi-instruction
+ // sequences.
+ // MacroAssembler::LoadConstantPoolPointerRegister() includes a relocation
+ // and multiple instructions. We cannot grow the buffer until the
+ // relocation and all of the instructions are written.
+ //
+ // This function allows outside callers to check and grow the buffer
+ void EnsureSpaceFor(int space_needed);
+#endif
+
+ // Allocate a constant pool of the correct size for the generated code.
+ Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
+
+ // Generate the constant pool for the generated code.
+ void PopulateConstantPool(ConstantPoolArray* constant_pool);
+
+#if V8_OOL_CONSTANT_POOL
+ bool is_constant_pool_full() const {
+ return constant_pool_builder_.is_full();
+ }
+
+ bool use_extended_constant_pool() const {
+ return constant_pool_builder_.current_section() ==
+ ConstantPoolArray::EXTENDED_SECTION;
+ }
+#endif
+
+#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
+ static void RelocateInternalReference(
+ Address pc, intptr_t delta, Address code_start,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ static int DecodeInternalReference(Vector<char> buffer, Address pc);
+#endif
+
+ protected:
+ // Relocation for a type-recording IC has the AST id added to it. This
+ // member variable is a way to pass the information from the call site to
+ // the relocation info.
+ TypeFeedbackId recorded_ast_id_;
+
+ int buffer_space() const { return reloc_info_writer.pos() - pc_; }
+
+ // Decode branch instruction at pos and return branch target pos
+ int target_at(int pos);
+
+ // Patch branch instruction at pos to branch to given branch target pos
+ void target_at_put(int pos, int target_pos);
+
+ // Record reloc info for current pc_
+ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+ void RecordRelocInfo(const RelocInfo& rinfo);
+#if V8_OOL_CONSTANT_POOL
+ ConstantPoolArray::LayoutSection ConstantPoolAddEntry(
+ const RelocInfo& rinfo) {
+ return constant_pool_builder_.AddEntry(this, rinfo);
+ }
+#endif
+
+ // Block the emission of the trampoline pool before pc_offset.
+ void BlockTrampolinePoolBefore(int pc_offset) {
+ if (no_trampoline_pool_before_ < pc_offset)
+ no_trampoline_pool_before_ = pc_offset;
+ }
+
+ void StartBlockTrampolinePool() { trampoline_pool_blocked_nesting_++; }
+
+ void EndBlockTrampolinePool() { trampoline_pool_blocked_nesting_--; }
+
+ bool is_trampoline_pool_blocked() const {
+ return trampoline_pool_blocked_nesting_ > 0;
+ }
+
+ bool has_exception() const { return internal_trampoline_exception_; }
+
+ bool is_trampoline_emitted() const { return trampoline_emitted_; }
+
+#if V8_OOL_CONSTANT_POOL
+ void set_constant_pool_available(bool available) {
+ constant_pool_available_ = available;
+ }
+#endif
+
+ private:
+ // Code generation
+ // The relocation writer's position is at least kGap bytes below the end of
+ // the generated instructions. This is so that multi-instruction sequences do
+ // not have to check for overflow. The same is true for writes of large
+ // relocation info entries.
+ static const int kGap = 32;
+
+ // Repeated checking whether the trampoline pool should be emitted is rather
+ // expensive. By default we only check again once a number of instructions
+ // has been generated.
+ int next_buffer_check_; // pc offset of next buffer check.
+
+ // Emission of the trampoline pool may be blocked in some code sequences.
+ int trampoline_pool_blocked_nesting_; // Block emission if this is not zero.
+ int no_trampoline_pool_before_; // Block emission before this pc offset.
+
+ // Relocation info generation
+ // Each relocation is encoded as a variable size value
+ static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
+ RelocInfoWriter reloc_info_writer;
+
+ // The bound position, before this we cannot do instruction elimination.
+ int last_bound_pos_;
+
+#if V8_OOL_CONSTANT_POOL
+ ConstantPoolBuilder constant_pool_builder_;
+#endif
+
+ // Code emission
+ inline void CheckBuffer();
+ void GrowBuffer();
+ inline void emit(Instr x);
+ inline void CheckTrampolinePoolQuick();
+
+ // Instruction generation
+ void a_form(Instr instr, DoubleRegister frt, DoubleRegister fra,
+ DoubleRegister frb, RCBit r);
+ void d_form(Instr instr, Register rt, Register ra, const intptr_t val,
+ bool signed_disp);
+ void x_form(Instr instr, Register ra, Register rs, Register rb, RCBit r);
+ void xo_form(Instr instr, Register rt, Register ra, Register rb, OEBit o,
+ RCBit r);
+ void md_form(Instr instr, Register ra, Register rs, int shift, int maskbit,
+ RCBit r);
+ void mds_form(Instr instr, Register ra, Register rs, Register rb, int maskbit,
+ RCBit r);
+
+ // Labels
+ void print(Label* L);
+ int max_reach_from(int pos);
+ void bind_to(Label* L, int pos);
+ void next(Label* L);
+
+ class Trampoline {
+ public:
+ Trampoline() {
+ next_slot_ = 0;
+ free_slot_count_ = 0;
+ }
+ Trampoline(int start, int slot_count) {
+ next_slot_ = start;
+ free_slot_count_ = slot_count;
+ }
+ int take_slot() {
+ int trampoline_slot = kInvalidSlotPos;
+ if (free_slot_count_ <= 0) {
+ // We have run out of space on trampolines.
+ // Make sure we fail in debug mode, so we become aware of each case
+ // when this happens.
+ DCHECK(0);
+ // Internal exception will be caught.
+ } else {
+ trampoline_slot = next_slot_;
+ free_slot_count_--;
+ next_slot_ += kTrampolineSlotsSize;
+ }
+ return trampoline_slot;
+ }
+
+ private:
+ int next_slot_;
+ int free_slot_count_;
+ };
+
+ int32_t get_trampoline_entry();
+ int unbound_labels_count_;
+ // If trampoline is emitted, generated code is becoming large. As
+ // this is already a slow case which can possibly break our code
+ // generation for the extreme case, we use this information to
+ // trigger different mode of branch instruction generation, where we
+ // no longer use a single branch instruction.
+ bool trampoline_emitted_;
+ static const int kTrampolineSlotsSize = kInstrSize;
+ static const int kMaxCondBranchReach = (1 << (16 - 1)) - 1;
+ static const int kMaxBlockTrampolineSectionSize = 64 * kInstrSize;
+ static const int kInvalidSlotPos = -1;
+
+ Trampoline trampoline_;
+ bool internal_trampoline_exception_;
+
+ friend class RegExpMacroAssemblerPPC;
+ friend class RelocInfo;
+ friend class CodePatcher;
+ friend class BlockTrampolinePoolScope;
+ PositionsRecorder positions_recorder_;
+ friend class PositionsRecorder;
+ friend class EnsureSpace;
+};
+
+
+class EnsureSpace BASE_EMBEDDED {
+ public:
+ explicit EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
+};
+}
+} // namespace v8::internal
+
+#endif // V8_PPC_ASSEMBLER_PPC_H_
diff --git a/deps/v8/src/ppc/builtins-ppc.cc b/deps/v8/src/ppc/builtins-ppc.cc
new file mode 100644
index 0000000000..7817fcd0f6
--- /dev/null
+++ b/deps/v8/src/ppc/builtins-ppc.cc
@@ -0,0 +1,1615 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/codegen.h"
+#include "src/debug.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id,
+ BuiltinExtraArguments extra_args) {
+ // ----------- S t a t e -------------
+ // -- r3 : number of arguments excluding receiver
+ // -- r4 : called function (only guaranteed when
+ // extra_args requires it)
+ // -- cp : context
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[4 * (argc - 1)] : first argument (argc == r0)
+ // -- sp[4 * argc] : receiver
+ // -----------------------------------
+
+ // Insert extra arguments.
+ int num_extra_args = 0;
+ if (extra_args == NEEDS_CALLED_FUNCTION) {
+ num_extra_args = 1;
+ __ push(r4);
+ } else {
+ DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
+ }
+
+ // JumpToExternalReference expects r0 to contain the number of arguments
+ // including the receiver and the extra arguments.
+ __ addi(r3, r3, Operand(num_extra_args + 1));
+ __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
+}
+
+
+// Load the built-in InternalArray function from the current context.
+static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
+ Register result) {
+ // Load the native context.
+
+ __ LoadP(result,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ LoadP(result, FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ // Load the InternalArray function from the native context.
+ __ LoadP(result,
+ MemOperand(result, Context::SlotOffset(
+ Context::INTERNAL_ARRAY_FUNCTION_INDEX)));
+}
+
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+ // Load the native context.
+
+ __ LoadP(result,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ LoadP(result, FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ // Load the Array function from the native context.
+ __ LoadP(
+ result,
+ MemOperand(result, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : number of arguments
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+ // Get the InternalArray function.
+ GenerateLoadInternalArrayFunction(masm, r4);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin InternalArray functions should be maps.
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
+ __ TestIfSmi(r5, r0);
+ __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, cr0);
+ __ CompareObjectType(r5, r6, r7, MAP_TYPE);
+ __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction);
+ }
+
+ // Run the native code for the InternalArray function called as a normal
+ // function.
+ // tail call a stub
+ InternalArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : number of arguments
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+ // Get the Array function.
+ GenerateLoadArrayFunction(masm, r4);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin Array functions should be maps.
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
+ __ TestIfSmi(r5, r0);
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
+ __ CompareObjectType(r5, r6, r7, MAP_TYPE);
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ }
+
+ // Run the native code for the Array function called as a normal function.
+ // tail call a stub
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+
+void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : number of arguments
+ // -- r4 : constructor function
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->string_ctor_calls(), 1, r5, r6);
+
+ Register function = r4;
+ if (FLAG_debug_code) {
+ __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, r5);
+ __ cmp(function, r5);
+ __ Assert(eq, kUnexpectedStringFunction);
+ }
+
+ // Load the first arguments in r3 and get rid of the rest.
+ Label no_arguments;
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&no_arguments);
+ // First args = sp[(argc - 1) * 4].
+ __ subi(r3, r3, Operand(1));
+ __ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2));
+ __ add(sp, sp, r3);
+ __ LoadP(r3, MemOperand(sp));
+ // sp now point to args[0], drop args[0] + receiver.
+ __ Drop(2);
+
+ Register argument = r5;
+ Label not_cached, argument_is_string;
+ __ LookupNumberStringCache(r3, // Input.
+ argument, // Result.
+ r6, // Scratch.
+ r7, // Scratch.
+ r8, // Scratch.
+ &not_cached);
+ __ IncrementCounter(counters->string_ctor_cached_number(), 1, r6, r7);
+ __ bind(&argument_is_string);
+
+ // ----------- S t a t e -------------
+ // -- r5 : argument converted to string
+ // -- r4 : constructor function
+ // -- lr : return address
+ // -----------------------------------
+
+ Label gc_required;
+ __ Allocate(JSValue::kSize,
+ r3, // Result.
+ r6, // Scratch.
+ r7, // Scratch.
+ &gc_required, TAG_OBJECT);
+
+ // Initialising the String Object.
+ Register map = r6;
+ __ LoadGlobalFunctionInitialMap(function, map, r7);
+ if (FLAG_debug_code) {
+ __ lbz(r7, FieldMemOperand(map, Map::kInstanceSizeOffset));
+ __ cmpi(r7, Operand(JSValue::kSize >> kPointerSizeLog2));
+ __ Assert(eq, kUnexpectedStringWrapperInstanceSize);
+ __ lbz(r7, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
+ __ cmpi(r7, Operand::Zero());
+ __ Assert(eq, kUnexpectedUnusedPropertiesOfStringWrapper);
+ }
+ __ StoreP(map, FieldMemOperand(r3, HeapObject::kMapOffset), r0);
+
+ __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r6, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
+ __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
+
+ __ StoreP(argument, FieldMemOperand(r3, JSValue::kValueOffset), r0);
+
+ // Ensure the object is fully initialized.
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+
+ __ Ret();
+
+ // The argument was not found in the number to string cache. Check
+ // if it's a string already before calling the conversion builtin.
+ Label convert_argument;
+ __ bind(&not_cached);
+ __ JumpIfSmi(r3, &convert_argument);
+
+ // Is it a String?
+ __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ lbz(r6, FieldMemOperand(r5, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kNotStringTag != 0);
+ __ andi(r0, r6, Operand(kIsNotStringMask));
+ __ bne(&convert_argument, cr0);
+ __ mr(argument, r3);
+ __ IncrementCounter(counters->string_ctor_conversions(), 1, r6, r7);
+ __ b(&argument_is_string);
+
+ // Invoke the conversion builtin and put the result into r5.
+ __ bind(&convert_argument);
+ __ push(function); // Preserve the function.
+ __ IncrementCounter(counters->string_ctor_conversions(), 1, r6, r7);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ push(r3);
+ __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ }
+ __ pop(function);
+ __ mr(argument, r3);
+ __ b(&argument_is_string);
+
+ // Load the empty string into r5, remove the receiver from the
+ // stack, and jump back to the case where the argument is a string.
+ __ bind(&no_arguments);
+ __ LoadRoot(argument, Heap::kempty_stringRootIndex);
+ __ Drop(1);
+ __ b(&argument_is_string);
+
+ // At this point the argument is already a string. Call runtime to
+ // create a string wrapper.
+ __ bind(&gc_required);
+ __ IncrementCounter(counters->string_ctor_gc_required(), 1, r6, r7);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ push(argument);
+ __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ }
+ __ Ret();
+}
+
+
+static void CallRuntimePassFunction(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function onto the stack.
+ // Push function as parameter to the runtime call.
+ __ Push(r4, r4);
+
+ __ CallRuntime(function_id, 1);
+ // Restore reciever.
+ __ Pop(r4);
+}
+
+
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+ __ LoadP(ip, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
+ __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(ip);
+}
+
+
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+ __ addi(ip, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(ip);
+}
+
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ Label ok;
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmpl(sp, ip);
+ __ bge(&ok);
+
+ CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
+ GenerateTailCallToReturnedCode(masm);
+
+ __ bind(&ok);
+ GenerateTailCallToSharedCode(masm);
+}
+
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+ bool is_api_function,
+ bool create_memento) {
+ // ----------- S t a t e -------------
+ // -- r3 : number of arguments
+ // -- r4 : constructor function
+ // -- r5 : allocation site or undefined
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ // Should never create mementos for api functions.
+ DCHECK(!is_api_function || !create_memento);
+
+ Isolate* isolate = masm->isolate();
+
+ // Enter a construct frame.
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
+
+ if (create_memento) {
+ __ AssertUndefinedOrAllocationSite(r5, r6);
+ __ push(r5);
+ }
+
+ // Preserve the two incoming parameters on the stack.
+ __ SmiTag(r3);
+ __ push(r3); // Smi-tagged arguments count.
+ __ push(r4); // Constructor function.
+
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ Label undo_allocation;
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(isolate);
+ __ mov(r5, Operand(debug_step_in_fp));
+ __ LoadP(r5, MemOperand(r5));
+ __ cmpi(r5, Operand::Zero());
+ __ bne(&rt_call);
+
+ // Load the initial map and verify that it is in fact a map.
+ // r4: constructor function
+ __ LoadP(r5,
+ FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(r5, &rt_call);
+ __ CompareObjectType(r5, r6, r7, MAP_TYPE);
+ __ bne(&rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the
+ // initial map's instance type would be JS_FUNCTION_TYPE.
+ // r4: constructor function
+ // r5: initial map
+ __ CompareInstanceType(r5, r6, JS_FUNCTION_TYPE);
+ __ beq(&rt_call);
+
+ if (!is_api_function) {
+ Label allocate;
+ MemOperand bit_field3 = FieldMemOperand(r5, Map::kBitField3Offset);
+ // Check if slack tracking is enabled.
+ __ lwz(r7, bit_field3);
+ __ DecodeField<Map::ConstructionCount>(r11, r7);
+ STATIC_ASSERT(JSFunction::kNoSlackTracking == 0);
+ __ cmpi(r11, Operand::Zero()); // JSFunction::kNoSlackTracking
+ __ beq(&allocate);
+ // Decrease generous allocation count.
+ __ Add(r7, r7, -(1 << Map::ConstructionCount::kShift), r0);
+ __ stw(r7, bit_field3);
+ __ cmpi(r11, Operand(JSFunction::kFinishSlackTracking));
+ __ bne(&allocate);
+
+ __ push(r4);
+
+ __ Push(r5, r4); // r4 = constructor
+ __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+ __ Pop(r4, r5);
+
+ __ bind(&allocate);
+ }
+
+ // Now allocate the JSObject on the heap.
+ // r4: constructor function
+ // r5: initial map
+ __ lbz(r6, FieldMemOperand(r5, Map::kInstanceSizeOffset));
+ if (create_memento) {
+ __ addi(r6, r6, Operand(AllocationMemento::kSize / kPointerSize));
+ }
+
+ __ Allocate(r6, r7, r8, r9, &rt_call, SIZE_IN_WORDS);
+
+ // Allocated the JSObject, now initialize the fields. Map is set to
+ // initial map and properties and elements are set to empty fixed array.
+ // r4: constructor function
+ // r5: initial map
+ // r6: object size (not including memento if create_memento)
+ // r7: JSObject (not tagged)
+ __ LoadRoot(r9, Heap::kEmptyFixedArrayRootIndex);
+ __ mr(r8, r7);
+ __ StoreP(r5, MemOperand(r8, JSObject::kMapOffset));
+ __ StoreP(r9, MemOperand(r8, JSObject::kPropertiesOffset));
+ __ StoreP(r9, MemOperand(r8, JSObject::kElementsOffset));
+ __ addi(r8, r8, Operand(JSObject::kElementsOffset + kPointerSize));
+
+ __ ShiftLeftImm(r9, r6, Operand(kPointerSizeLog2));
+ __ add(r9, r7, r9); // End of object.
+
+ // Fill all the in-object properties with the appropriate filler.
+ // r4: constructor function
+ // r5: initial map
+ // r6: object size (in words, including memento if create_memento)
+ // r7: JSObject (not tagged)
+ // r8: First in-object property of JSObject (not tagged)
+ // r9: End of object
+ DCHECK_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+ __ LoadRoot(r10, Heap::kUndefinedValueRootIndex);
+
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ // Check if slack tracking is enabled.
+ STATIC_ASSERT(JSFunction::kNoSlackTracking == 0);
+ __ cmpi(r11, Operand::Zero()); // JSFunction::kNoSlackTracking
+ __ beq(&no_inobject_slack_tracking);
+
+ // Allocate object with a slack.
+ __ lbz(r3, FieldMemOperand(r5, Map::kPreAllocatedPropertyFieldsOffset));
+ if (FLAG_debug_code) {
+ __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
+ __ add(r0, r8, r0);
+ // r0: offset of first field after pre-allocated fields
+ __ cmp(r0, r9);
+ __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
+ }
+ {
+ Label done;
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&done);
+ __ InitializeNFieldsWithFiller(r8, r3, r10);
+ __ bind(&done);
+ }
+ // To allow for truncation.
+ __ LoadRoot(r10, Heap::kOnePointerFillerMapRootIndex);
+ // Fill the remaining fields with one pointer filler map.
+
+ __ bind(&no_inobject_slack_tracking);
+ }
+
+ if (create_memento) {
+ __ subi(r3, r9, Operand(AllocationMemento::kSize));
+ __ InitializeFieldsWithFiller(r8, r3, r10);
+
+ // Fill in memento fields.
+ // r8: points to the allocated but uninitialized memento.
+ __ LoadRoot(r10, Heap::kAllocationMementoMapRootIndex);
+ __ StoreP(r10, MemOperand(r8, AllocationMemento::kMapOffset));
+ // Load the AllocationSite
+ __ LoadP(r10, MemOperand(sp, 2 * kPointerSize));
+ __ StoreP(r10,
+ MemOperand(r8, AllocationMemento::kAllocationSiteOffset));
+ __ addi(r8, r8, Operand(AllocationMemento::kAllocationSiteOffset +
+ kPointerSize));
+ } else {
+ __ InitializeFieldsWithFiller(r8, r9, r10);
+ }
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on. Any
+ // failures need to undo the allocation, so that the heap is in a
+ // consistent state and verifiable.
+ __ addi(r7, r7, Operand(kHeapObjectTag));
+
+ // Check if a non-empty properties array is needed. Continue with
+ // allocated object if not fall through to runtime call if it is.
+ // r4: constructor function
+ // r7: JSObject
+ // r8: start of next object (not tagged)
+ __ lbz(r6, FieldMemOperand(r5, Map::kUnusedPropertyFieldsOffset));
+ // The field instance sizes contains both pre-allocated property fields
+ // and in-object properties.
+ __ lbz(r0, FieldMemOperand(r5, Map::kPreAllocatedPropertyFieldsOffset));
+ __ add(r6, r6, r0);
+ __ lbz(r0, FieldMemOperand(r5, Map::kInObjectPropertiesOffset));
+ __ sub(r6, r6, r0, LeaveOE, SetRC);
+
+ // Done if no extra properties are to be allocated.
+ __ beq(&allocated, cr0);
+ __ Assert(ge, kPropertyAllocationCountFailed, cr0);
+
+ // Scale the number of elements by pointer size and add the header for
+ // FixedArrays to the start of the next object calculation from above.
+ // r4: constructor
+ // r6: number of elements in properties array
+ // r7: JSObject
+ // r8: start of next object
+ __ addi(r3, r6, Operand(FixedArray::kHeaderSize / kPointerSize));
+ __ Allocate(
+ r3, r8, r9, r5, &undo_allocation,
+ static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
+
+ // Initialize the FixedArray.
+ // r4: constructor
+ // r6: number of elements in properties array
+ // r7: JSObject
+ // r8: FixedArray (not tagged)
+ __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
+ __ mr(r5, r8);
+ DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset);
+ __ StoreP(r9, MemOperand(r5));
+ DCHECK_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+ __ SmiTag(r3, r6);
+ __ StoreP(r3, MemOperand(r5, kPointerSize));
+ __ addi(r5, r5, Operand(2 * kPointerSize));
+
+ // Initialize the fields to undefined.
+ // r4: constructor function
+ // r5: First element of FixedArray (not tagged)
+ // r6: number of elements in properties array
+ // r7: JSObject
+ // r8: FixedArray (not tagged)
+ DCHECK_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+ {
+ Label done;
+ __ cmpi(r6, Operand::Zero());
+ __ beq(&done);
+ if (!is_api_function || create_memento) {
+ __ LoadRoot(r10, Heap::kUndefinedValueRootIndex);
+ } else if (FLAG_debug_code) {
+ __ LoadRoot(r11, Heap::kUndefinedValueRootIndex);
+ __ cmp(r10, r11);
+ __ Assert(eq, kUndefinedValueNotLoaded);
+ }
+ __ InitializeNFieldsWithFiller(r5, r6, r10);
+ __ bind(&done);
+ }
+
+ // Store the initialized FixedArray into the properties field of
+ // the JSObject
+ // r4: constructor function
+ // r7: JSObject
+ // r8: FixedArray (not tagged)
+ __ addi(r8, r8, Operand(kHeapObjectTag)); // Add the heap tag.
+ __ StoreP(r8, FieldMemOperand(r7, JSObject::kPropertiesOffset), r0);
+
+ // Continue with JSObject being successfully allocated
+ // r4: constructor function
+ // r7: JSObject
+ __ b(&allocated);
+
+ // Undo the setting of the new top so that the heap is verifiable. For
+ // example, the map's unused properties potentially do not match the
+ // allocated objects unused properties.
+ // r7: JSObject (previous new top)
+ __ bind(&undo_allocation);
+ __ UndoAllocationInNewSpace(r7, r8);
+ }
+
+ // Allocate the new receiver object using the runtime call.
+ // r4: constructor function
+ __ bind(&rt_call);
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ LoadP(r5, MemOperand(sp, 2 * kPointerSize));
+ __ push(r5);
+ }
+
+ __ push(r4); // argument for Runtime_NewObject
+ if (create_memento) {
+ __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2);
+ } else {
+ __ CallRuntime(Runtime::kNewObject, 1);
+ }
+ __ mr(r7, r3);
+
+ // If we ended up using the runtime, and we want a memento, then the
+ // runtime call made it for us, and we shouldn't do create count
+ // increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ b(&count_incremented);
+ }
+
+ // Receiver for constructor call allocated.
+ // r7: JSObject
+ __ bind(&allocated);
+
+ if (create_memento) {
+ __ LoadP(r5, MemOperand(sp, kPointerSize * 2));
+ __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+ __ cmp(r5, r8);
+ __ beq(&count_incremented);
+ // r5 is an AllocationSite. We are creating a memento from it, so we
+ // need to increment the memento create count.
+ __ LoadP(
+ r6, FieldMemOperand(r5, AllocationSite::kPretenureCreateCountOffset));
+ __ AddSmiLiteral(r6, r6, Smi::FromInt(1), r0);
+ __ StoreP(
+ r6, FieldMemOperand(r5, AllocationSite::kPretenureCreateCountOffset),
+ r0);
+ __ bind(&count_incremented);
+ }
+
+ __ Push(r7, r7);
+
+ // Reload the number of arguments and the constructor from the stack.
+ // sp[0]: receiver
+ // sp[1]: receiver
+ // sp[2]: constructor function
+ // sp[3]: number of arguments (smi-tagged)
+ __ LoadP(r4, MemOperand(sp, 2 * kPointerSize));
+ __ LoadP(r6, MemOperand(sp, 3 * kPointerSize));
+
+ // Set up pointer to last argument.
+ __ addi(r5, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // Set up number of arguments for function call below
+ __ SmiUntag(r3, r6);
+
+ // Copy arguments and receiver to the expression stack.
+ // r3: number of arguments
+ // r4: constructor function
+ // r5: address of last argument (caller sp)
+ // r6: number of arguments (smi-tagged)
+ // sp[0]: receiver
+ // sp[1]: receiver
+ // sp[2]: constructor function
+ // sp[3]: number of arguments (smi-tagged)
+ Label loop, no_args;
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&no_args);
+ __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
+ __ mtctr(r3);
+ __ bind(&loop);
+ __ subi(ip, ip, Operand(kPointerSize));
+ __ LoadPX(r0, MemOperand(r5, ip));
+ __ push(r0);
+ __ bdnz(&loop);
+ __ bind(&no_args);
+
+ // Call the function.
+ // r3: number of arguments
+ // r4: constructor function
+ if (is_api_function) {
+ __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+ Handle<Code> code = masm->isolate()->builtins()->HandleApiCallConstruct();
+ __ Call(code, RelocInfo::CODE_TARGET);
+ } else {
+ ParameterCount actual(r3);
+ __ InvokeFunction(r4, actual, CALL_FUNCTION, NullCallWrapper());
+ }
+
+ // Store offset of return address for deoptimizer.
+ if (!is_api_function) {
+ masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context from the frame.
+ // r3: result
+ // sp[0]: receiver
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // r3: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ JumpIfSmi(r3, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ CompareObjectType(r3, r4, r6, FIRST_SPEC_OBJECT_TYPE);
+ __ bge(&exit);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ LoadP(r3, MemOperand(sp));
+
+ // Remove receiver from the stack, remove caller arguments, and
+ // return.
+ __ bind(&exit);
+ // r3: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ LoadP(r4, MemOperand(sp, 2 * kPointerSize));
+
+ // Leave construct frame.
+ }
+
+ __ SmiToPtrArrayOffset(r4, r4);
+ __ add(sp, sp, r4);
+ __ addi(sp, sp, Operand(kPointerSize));
+ __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r4, r5);
+ __ blr();
+}
+
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
+}
+
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, true, false);
+}
+
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+ bool is_construct) {
+ // Called from Generate_JS_Entry
+ // r3: code entry
+ // r4: function
+ // r5: receiver
+ // r6: argc
+ // r7: argv
+ // r0,r8-r9, cp may be clobbered
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // Clear the context before we push it when entering the internal frame.
+ __ li(cp, Operand::Zero());
+
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Set up the context from the function argument.
+ __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+
+ __ InitializeRootRegister();
+
+ // Push the function and the receiver onto the stack.
+ __ push(r4);
+ __ push(r5);
+
+ // Copy arguments to the stack in a loop.
+ // r4: function
+ // r6: argc
+ // r7: argv, i.e. points to first arg
+ Label loop, entry;
+ __ ShiftLeftImm(r0, r6, Operand(kPointerSizeLog2));
+ __ add(r5, r7, r0);
+ // r5 points past last arg.
+ __ b(&entry);
+ __ bind(&loop);
+ __ LoadP(r8, MemOperand(r7)); // read next parameter
+ __ addi(r7, r7, Operand(kPointerSize));
+ __ LoadP(r0, MemOperand(r8)); // dereference handle
+ __ push(r0); // push parameter
+ __ bind(&entry);
+ __ cmp(r7, r5);
+ __ bne(&loop);
+
+ // Initialize all JavaScript callee-saved registers, since they will be seen
+ // by the garbage collector as part of handlers.
+ __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
+ __ mr(r14, r7);
+ __ mr(r15, r7);
+ __ mr(r16, r7);
+ __ mr(r17, r7);
+
+ // Invoke the code and pass argc as r3.
+ __ mr(r3, r6);
+ if (is_construct) {
+ // No type feedback cell is available
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
+ __ CallStub(&stub);
+ } else {
+ ParameterCount actual(r3);
+ __ InvokeFunction(r4, actual, CALL_FUNCTION, NullCallWrapper());
+ }
+ // Exit the JS frame and remove the parameters (except function), and
+ // return.
+ }
+ __ blr();
+
+ // r3: result
+}
+
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileLazy);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function onto the stack.
+ // Push function as parameter to the runtime call.
+ __ Push(r4, r4);
+ // Whether to compile in a background thread.
+ __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+
+ __ CallRuntime(Runtime::kCompileOptimized, 2);
+ // Restore receiver.
+ __ pop(r4);
+}
+
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+ CallCompileOptimized(masm, false);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+ CallCompileOptimized(masm, true);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+ // For now, we are relying on the fact that make_code_young doesn't do any
+ // garbage collection which allows us to save/restore the registers without
+ // worrying about which of them contain pointers. We also don't build an
+ // internal frame to make the code faster, since we shouldn't have to do stack
+ // crawls in MakeCodeYoung. This seems a bit fragile.
+
+ // Point r3 at the start of the PlatformCodeAge sequence.
+ __ mr(r3, ip);
+
+ // The following registers must be saved and restored when calling through to
+ // the runtime:
+ // r3 - contains return address (beginning of patch sequence)
+ // r4 - isolate
+ // lr - return address
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ mflr(r0);
+ __ MultiPush(r0.bit() | r3.bit() | r4.bit() | fp.bit());
+ __ PrepareCallCFunction(2, 0, r5);
+ __ mov(r4, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+ __ MultiPop(r0.bit() | r3.bit() | r4.bit() | fp.bit());
+ __ mtlr(r0);
+ __ mr(ip, r3);
+ __ Jump(ip);
+}
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+ } \
+ void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+ }
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+ // For now, we are relying on the fact that make_code_young doesn't do any
+ // garbage collection which allows us to save/restore the registers without
+ // worrying about which of them contain pointers. We also don't build an
+ // internal frame to make the code faster, since we shouldn't have to do stack
+ // crawls in MakeCodeYoung. This seems a bit fragile.
+
+ // Point r3 at the start of the PlatformCodeAge sequence.
+ __ mr(r3, ip);
+
+ // The following registers must be saved and restored when calling through to
+ // the runtime:
+ // r3 - contains return address (beginning of patch sequence)
+ // r4 - isolate
+ // lr - return address
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ mflr(r0);
+ __ MultiPush(r0.bit() | r3.bit() | r4.bit() | fp.bit());
+ __ PrepareCallCFunction(2, 0, r5);
+ __ mov(r4, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(
+ ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+ 2);
+ __ MultiPop(r0.bit() | r3.bit() | r4.bit() | fp.bit());
+ __ mtlr(r0);
+ __ mr(ip, r3);
+
+ // Perform prologue operations usually performed by the young code stub.
+ __ PushFixedFrame(r4);
+ __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+
+ // Jump to point after the code-age stub.
+ __ addi(r3, ip, Operand(kNoCodeAgeSequenceLength));
+ __ Jump(r3);
+}
+
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+ GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+ SaveFPRegsMode save_doubles) {
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+ // Preserve registers across notification, this is important for compiled
+ // stubs that tail call the runtime on deopts passing their parameters in
+ // registers.
+ __ MultiPush(kJSCallerSaved | kCalleeSaved);
+ // Pass the function and deoptimization type to the runtime system.
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ MultiPop(kJSCallerSaved | kCalleeSaved);
+ }
+
+ __ addi(sp, sp, Operand(kPointerSize)); // Ignore state
+ __ blr(); // Jump to miss handler
+}
+
+
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+}
+
+
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+ Deoptimizer::BailoutType type) {
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ // Pass the function and deoptimization type to the runtime system.
+ __ LoadSmiLiteral(r3, Smi::FromInt(static_cast<int>(type)));
+ __ push(r3);
+ __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ }
+
+ // Get the full codegen state from the stack and untag it -> r9.
+ __ LoadP(r9, MemOperand(sp, 0 * kPointerSize));
+ __ SmiUntag(r9);
+ // Switch on the state.
+ Label with_tos_register, unknown_state;
+ __ cmpi(r9, Operand(FullCodeGenerator::NO_REGISTERS));
+ __ bne(&with_tos_register);
+ __ addi(sp, sp, Operand(1 * kPointerSize)); // Remove state.
+ __ Ret();
+
+ __ bind(&with_tos_register);
+ __ LoadP(r3, MemOperand(sp, 1 * kPointerSize));
+ __ cmpi(r9, Operand(FullCodeGenerator::TOS_REG));
+ __ bne(&unknown_state);
+ __ addi(sp, sp, Operand(2 * kPointerSize)); // Remove state.
+ __ Ret();
+
+ __ bind(&unknown_state);
+ __ stop("no cases left");
+}
+
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+ // Lookup the function in the JavaScript frame.
+ __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ // Pass function as argument.
+ __ push(r3);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ }
+
+ // If the code object is null, just return to the unoptimized code.
+ Label skip;
+ __ CmpSmiLiteral(r3, Smi::FromInt(0), r0);
+ __ bne(&skip);
+ __ Ret();
+
+ __ bind(&skip);
+
+ // Load deoptimization data from the code object.
+ // <deopt_data> = <code>[#deoptimization_data_offset]
+ __ LoadP(r4, FieldMemOperand(r3, Code::kDeoptimizationDataOffset));
+
+#if V8_OOL_CONSTANT_POOL
+ {
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm);
+ __ LoadP(kConstantPoolRegister,
+ FieldMemOperand(r3, Code::kConstantPoolOffset));
+#endif
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+ __ LoadP(r4, FieldMemOperand(
+ r4, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex)));
+ __ SmiUntag(r4);
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+ __ add(r3, r3, r4);
+ __ addi(r0, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ mtlr(r0);
+
+ // And "return" to the OSR entry point of the function.
+ __ Ret();
+#if V8_OOL_CONSTANT_POOL
+ }
+#endif
+}
+
+
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+ // We check the stack limit as indicator that recompilation might be done.
+ Label ok;
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmpl(sp, ip);
+ __ bge(&ok);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ }
+ __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&ok);
+ __ Ret();
+}
+
+
+void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+ // 1. Make sure we have at least one argument.
+ // r3: actual number of arguments
+ {
+ Label done;
+ __ cmpi(r3, Operand::Zero());
+ __ bne(&done);
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ push(r5);
+ __ addi(r3, r3, Operand(1));
+ __ bind(&done);
+ }
+
+ // 2. Get the function to call (passed as receiver) from the stack, check
+ // if it is a function.
+ // r3: actual number of arguments
+ Label slow, non_function;
+ __ ShiftLeftImm(r4, r3, Operand(kPointerSizeLog2));
+ __ add(r4, sp, r4);
+ __ LoadP(r4, MemOperand(r4));
+ __ JumpIfSmi(r4, &non_function);
+ __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
+ __ bne(&slow);
+
+ // 3a. Patch the first argument if necessary when calling a function.
+ // r3: actual number of arguments
+ // r4: function
+ Label shift_arguments;
+ __ li(r7, Operand::Zero()); // indicate regular JS_FUNCTION
+ {
+ Label convert_to_object, use_global_proxy, patch_receiver;
+ // Change context eagerly in case we need the global receiver.
+ __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+
+ // Do not transform the receiver for strict mode functions.
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kCompilerHintsOffset));
+ __ TestBit(r6,
+#if V8_TARGET_ARCH_PPC64
+ SharedFunctionInfo::kStrictModeFunction,
+#else
+ SharedFunctionInfo::kStrictModeFunction + kSmiTagSize,
+#endif
+ r0);
+ __ bne(&shift_arguments, cr0);
+
+ // Do not transform the receiver for native (Compilerhints already in r6).
+ __ TestBit(r6,
+#if V8_TARGET_ARCH_PPC64
+ SharedFunctionInfo::kNative,
+#else
+ SharedFunctionInfo::kNative + kSmiTagSize,
+#endif
+ r0);
+ __ bne(&shift_arguments, cr0);
+
+ // Compute the receiver in sloppy mode.
+ __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
+ __ add(r5, sp, ip);
+ __ LoadP(r5, MemOperand(r5, -kPointerSize));
+ // r3: actual number of arguments
+ // r4: function
+ // r5: first argument
+ __ JumpIfSmi(r5, &convert_to_object);
+
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+ __ cmp(r5, r6);
+ __ beq(&use_global_proxy);
+ __ LoadRoot(r6, Heap::kNullValueRootIndex);
+ __ cmp(r5, r6);
+ __ beq(&use_global_proxy);
+
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CompareObjectType(r5, r6, r6, FIRST_SPEC_OBJECT_TYPE);
+ __ bge(&shift_arguments);
+
+ __ bind(&convert_to_object);
+
+ {
+ // Enter an internal frame in order to preserve argument count.
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(r3);
+ __ Push(r3, r5);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mr(r5, r3);
+
+ __ pop(r3);
+ __ SmiUntag(r3);
+
+ // Exit the internal frame.
+ }
+
+ // Restore the function to r4, and the flag to r7.
+ __ ShiftLeftImm(r7, r3, Operand(kPointerSizeLog2));
+ __ add(r7, sp, r7);
+ __ LoadP(r4, MemOperand(r7));
+ __ li(r7, Operand::Zero());
+ __ b(&patch_receiver);
+
+ __ bind(&use_global_proxy);
+ __ LoadP(r5, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kGlobalProxyOffset));
+
+ __ bind(&patch_receiver);
+ __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
+ __ add(r6, sp, ip);
+ __ StoreP(r5, MemOperand(r6, -kPointerSize));
+
+ __ b(&shift_arguments);
+ }
+
+ // 3b. Check for function proxy.
+ __ bind(&slow);
+ __ li(r7, Operand(1, RelocInfo::NONE32)); // indicate function proxy
+ __ cmpi(r5, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ beq(&shift_arguments);
+ __ bind(&non_function);
+ __ li(r7, Operand(2, RelocInfo::NONE32)); // indicate non-function
+
+ // 3c. Patch the first argument when calling a non-function. The
+ // CALL_NON_FUNCTION builtin expects the non-function callee as
+ // receiver, so overwrite the first argument which will ultimately
+ // become the receiver.
+ // r3: actual number of arguments
+ // r4: function
+ // r7: call type (0: JS function, 1: function proxy, 2: non-function)
+ __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
+ __ add(r5, sp, ip);
+ __ StoreP(r4, MemOperand(r5, -kPointerSize));
+
+ // 4. Shift arguments and return address one slot down on the stack
+ // (overwriting the original receiver). Adjust argument count to make
+ // the original first argument the new receiver.
+ // r3: actual number of arguments
+ // r4: function
+ // r7: call type (0: JS function, 1: function proxy, 2: non-function)
+ __ bind(&shift_arguments);
+ {
+ Label loop;
+ // Calculate the copy start address (destination). Copy end address is sp.
+ __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
+ __ add(r5, sp, ip);
+
+ __ bind(&loop);
+ __ LoadP(ip, MemOperand(r5, -kPointerSize));
+ __ StoreP(ip, MemOperand(r5));
+ __ subi(r5, r5, Operand(kPointerSize));
+ __ cmp(r5, sp);
+ __ bne(&loop);
+ // Adjust the actual number of arguments and remove the top element
+ // (which is a copy of the last argument).
+ __ subi(r3, r3, Operand(1));
+ __ pop();
+ }
+
+ // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
+ // or a function proxy via CALL_FUNCTION_PROXY.
+ // r3: actual number of arguments
+ // r4: function
+ // r7: call type (0: JS function, 1: function proxy, 2: non-function)
+ {
+ Label function, non_proxy;
+ __ cmpi(r7, Operand::Zero());
+ __ beq(&function);
+ // Expected number of arguments is 0 for CALL_NON_FUNCTION.
+ __ li(r5, Operand::Zero());
+ __ cmpi(r7, Operand(1));
+ __ bne(&non_proxy);
+
+ __ push(r4); // re-add proxy object as additional argument
+ __ addi(r3, r3, Operand(1));
+ __ GetBuiltinFunction(r4, Builtins::CALL_FUNCTION_PROXY);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&non_proxy);
+ __ GetBuiltinFunction(r4, Builtins::CALL_NON_FUNCTION);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ __ bind(&function);
+ }
+
+ // 5b. Get the code to call from the function and check that the number of
+ // expected arguments matches what we're providing. If so, jump
+ // (tail-call) to the code in register edx without checking arguments.
+ // r3: actual number of arguments
+ // r4: function
+ __ LoadP(r6, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadWordArith(
+ r5, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
+#if !V8_TARGET_ARCH_PPC64
+ __ SmiUntag(r5);
+#endif
+ __ cmp(r5, r3); // Check formal and actual parameter counts.
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET, ne);
+
+ __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
+ ParameterCount expected(0);
+ __ InvokeCode(ip, expected, expected, JUMP_FUNCTION, NullCallWrapper());
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+ const int kIndexOffset =
+ StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ const int kArgsOffset = 2 * kPointerSize;
+ const int kRecvOffset = 3 * kPointerSize;
+ const int kFunctionOffset = 4 * kPointerSize;
+
+ {
+ FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
+
+ __ LoadP(r3, MemOperand(fp, kFunctionOffset)); // get the function
+ __ push(r3);
+ __ LoadP(r3, MemOperand(fp, kArgsOffset)); // get the args array
+ __ push(r3);
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadRoot(r5, Heap::kRealStackLimitRootIndex);
+ // Make r5 the space we have left. The stack might already be overflowed
+ // here which will cause r5 to become negative.
+ __ sub(r5, sp, r5);
+ // Check if the arguments will overflow the stack.
+ __ SmiToPtrArrayOffset(r0, r3);
+ __ cmp(r5, r0);
+ __ bgt(&okay); // Signed comparison.
+
+ // Out of stack space.
+ __ LoadP(r4, MemOperand(fp, kFunctionOffset));
+ __ Push(r4, r3);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ // End of stack check.
+
+ // Push current limit and index.
+ __ bind(&okay);
+ __ li(r4, Operand::Zero());
+ __ Push(r3, r4); // limit and initial index.
+
+ // Get the receiver.
+ __ LoadP(r3, MemOperand(fp, kRecvOffset));
+
+ // Check that the function is a JS function (otherwise it must be a proxy).
+ Label push_receiver;
+ __ LoadP(r4, MemOperand(fp, kFunctionOffset));
+ __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
+ __ bne(&push_receiver);
+
+ // Change context eagerly to get the right global object if necessary.
+ __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+ // Load the shared function info while the function is still in r4.
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+
+ // Compute the receiver.
+ // Do not transform the receiver for strict mode functions.
+ Label call_to_object, use_global_proxy;
+ __ lwz(r5, FieldMemOperand(r5, SharedFunctionInfo::kCompilerHintsOffset));
+ __ TestBit(r5,
+#if V8_TARGET_ARCH_PPC64
+ SharedFunctionInfo::kStrictModeFunction,
+#else
+ SharedFunctionInfo::kStrictModeFunction + kSmiTagSize,
+#endif
+ r0);
+ __ bne(&push_receiver, cr0);
+
+ // Do not transform the receiver for strict mode functions.
+ __ TestBit(r5,
+#if V8_TARGET_ARCH_PPC64
+ SharedFunctionInfo::kNative,
+#else
+ SharedFunctionInfo::kNative + kSmiTagSize,
+#endif
+ r0);
+ __ bne(&push_receiver, cr0);
+
+ // Compute the receiver in sloppy mode.
+ __ JumpIfSmi(r3, &call_to_object);
+ __ LoadRoot(r4, Heap::kNullValueRootIndex);
+ __ cmp(r3, r4);
+ __ beq(&use_global_proxy);
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ cmp(r3, r4);
+ __ beq(&use_global_proxy);
+
+ // Check if the receiver is already a JavaScript object.
+ // r3: receiver
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+ __ bge(&push_receiver);
+
+ // Convert the receiver to a regular object.
+ // r3: receiver
+ __ bind(&call_to_object);
+ __ push(r3);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ b(&push_receiver);
+
+ __ bind(&use_global_proxy);
+ __ LoadP(r3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ LoadP(r3, FieldMemOperand(r3, GlobalObject::kGlobalProxyOffset));
+
+ // Push the receiver.
+ // r3: receiver
+ __ bind(&push_receiver);
+ __ push(r3);
+
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ __ LoadP(r3, MemOperand(fp, kIndexOffset));
+ __ b(&entry);
+
+ // Load the current argument from the arguments array and push it to the
+ // stack.
+ // r3: current argument index
+ __ bind(&loop);
+ __ LoadP(r4, MemOperand(fp, kArgsOffset));
+ __ Push(r4, r3);
+
+ // Call the runtime to access the property in the arguments array.
+ __ CallRuntime(Runtime::kGetProperty, 2);
+ __ push(r3);
+
+ // Use inline caching to access the arguments.
+ __ LoadP(r3, MemOperand(fp, kIndexOffset));
+ __ AddSmiLiteral(r3, r3, Smi::FromInt(1), r0);
+ __ StoreP(r3, MemOperand(fp, kIndexOffset));
+
+ // Test if the copy loop has finished copying all the elements from the
+ // arguments object.
+ __ bind(&entry);
+ __ LoadP(r4, MemOperand(fp, kLimitOffset));
+ __ cmp(r3, r4);
+ __ bne(&loop);
+
+ // Call the function.
+ Label call_proxy;
+ ParameterCount actual(r3);
+ __ SmiUntag(r3);
+ __ LoadP(r4, MemOperand(fp, kFunctionOffset));
+ __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
+ __ bne(&call_proxy);
+ __ InvokeFunction(r4, actual, CALL_FUNCTION, NullCallWrapper());
+
+ __ LeaveFrame(StackFrame::INTERNAL, 3 * kPointerSize);
+ __ blr();
+
+ // Call the function proxy.
+ __ bind(&call_proxy);
+ __ push(r4); // add function proxy as last argument
+ __ addi(r3, r3, Operand(1));
+ __ li(r5, Operand::Zero());
+ __ GetBuiltinFunction(r4, Builtins::CALL_FUNCTION_PROXY);
+ __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+
+ // Tear down the internal frame and remove function, receiver and args.
+ }
+ __ addi(sp, sp, Operand(3 * kPointerSize));
+ __ blr();
+}
+
+
+static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
+ Label* stack_overflow) {
+ // ----------- S t a t e -------------
+ // -- r3 : actual number of arguments
+ // -- r4 : function (passed through to callee)
+ // -- r5 : expected number of arguments
+ // -----------------------------------
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ __ LoadRoot(r8, Heap::kRealStackLimitRootIndex);
+ // Make r8 the space we have left. The stack might already be overflowed
+ // here which will cause r8 to become negative.
+ __ sub(r8, sp, r8);
+ // Check if the arguments will overflow the stack.
+ __ ShiftLeftImm(r0, r5, Operand(kPointerSizeLog2));
+ __ cmp(r8, r0);
+ __ ble(stack_overflow); // Signed comparison.
+}
+
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+ __ SmiTag(r3);
+ __ LoadSmiLiteral(r7, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ mflr(r0);
+ __ push(r0);
+#if V8_OOL_CONSTANT_POOL
+ __ Push(fp, kConstantPoolRegister, r7, r4, r3);
+#else
+ __ Push(fp, r7, r4, r3);
+#endif
+ __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize));
+}
+
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : result being passed through
+ // -----------------------------------
+ // Get the number of arguments passed (as a smi), tear down the frame and
+ // then tear down the parameters.
+ __ LoadP(r4, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize)));
+ int stack_adjustment = kPointerSize; // adjust for receiver
+ __ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR, stack_adjustment);
+ __ SmiToPtrArrayOffset(r0, r4);
+ __ add(sp, sp, r0);
+}
+
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : actual number of arguments
+ // -- r4 : function (passed through to callee)
+ // -- r5 : expected number of arguments
+ // -----------------------------------
+
+ Label stack_overflow;
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
+ Label invoke, dont_adapt_arguments;
+
+ Label enough, too_few;
+ __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
+ __ cmp(r3, r5);
+ __ blt(&too_few);
+ __ cmpi(r5, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ __ beq(&dont_adapt_arguments);
+
+ { // Enough parameters: actual >= expected
+ __ bind(&enough);
+ EnterArgumentsAdaptorFrame(masm);
+
+ // Calculate copy start address into r3 and copy end address into r5.
+ // r3: actual number of arguments as a smi
+ // r4: function
+ // r5: expected number of arguments
+ // ip: code entry to call
+ __ SmiToPtrArrayOffset(r3, r3);
+ __ add(r3, r3, fp);
+ // adjust for return address and receiver
+ __ addi(r3, r3, Operand(2 * kPointerSize));
+ __ ShiftLeftImm(r5, r5, Operand(kPointerSizeLog2));
+ __ sub(r5, r3, r5);
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ // r3: copy start address
+ // r4: function
+ // r5: copy end address
+ // ip: code entry to call
+
+ Label copy;
+ __ bind(&copy);
+ __ LoadP(r0, MemOperand(r3, 0));
+ __ push(r0);
+ __ cmp(r3, r5); // Compare before moving to next argument.
+ __ subi(r3, r3, Operand(kPointerSize));
+ __ bne(&copy);
+
+ __ b(&invoke);
+ }
+
+ { // Too few parameters: Actual < expected
+ __ bind(&too_few);
+ EnterArgumentsAdaptorFrame(masm);
+
+ // Calculate copy start address into r0 and copy end address is fp.
+ // r3: actual number of arguments as a smi
+ // r4: function
+ // r5: expected number of arguments
+ // ip: code entry to call
+ __ SmiToPtrArrayOffset(r3, r3);
+ __ add(r3, r3, fp);
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ // r3: copy start address
+ // r4: function
+ // r5: expected number of arguments
+ // ip: code entry to call
+ Label copy;
+ __ bind(&copy);
+ // Adjust load for return address and receiver.
+ __ LoadP(r0, MemOperand(r3, 2 * kPointerSize));
+ __ push(r0);
+ __ cmp(r3, fp); // Compare before moving to next argument.
+ __ subi(r3, r3, Operand(kPointerSize));
+ __ bne(&copy);
+
+ // Fill the remaining expected arguments with undefined.
+ // r4: function
+ // r5: expected number of arguments
+ // ip: code entry to call
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ ShiftLeftImm(r5, r5, Operand(kPointerSizeLog2));
+ __ sub(r5, fp, r5);
+ // Adjust for frame.
+ __ subi(r5, r5, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+ 2 * kPointerSize));
+
+ Label fill;
+ __ bind(&fill);
+ __ push(r0);
+ __ cmp(sp, r5);
+ __ bne(&fill);
+ }
+
+ // Call the entry point.
+ __ bind(&invoke);
+ __ CallJSEntry(ip);
+
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+
+ // Exit frame and return.
+ LeaveArgumentsAdaptorFrame(masm);
+ __ blr();
+
+
+ // -------------------------------------------
+ // Dont adapt arguments.
+ // -------------------------------------------
+ __ bind(&dont_adapt_arguments);
+ __ JumpToJSEntry(ip);
+
+ __ bind(&stack_overflow);
+ {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ EnterArgumentsAdaptorFrame(masm);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ bkpt(0);
+ }
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
new file mode 100644
index 0000000000..3e84a2143c
--- /dev/null
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -0,0 +1,4893 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/base/bits.h"
+#include "src/bootstrapper.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+#include "src/isolate.h"
+#include "src/jsregexp.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+
+static void InitializeArrayConstructorDescriptor(
+ Isolate* isolate, CodeStubDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ Address deopt_handler =
+ Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+
+ if (constant_stack_parameter_count == 0) {
+ descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
+ JS_FUNCTION_STUB_MODE);
+ } else {
+ descriptor->Initialize(r3, deopt_handler, constant_stack_parameter_count,
+ JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+ }
+}
+
+
+static void InitializeInternalArrayConstructorDescriptor(
+ Isolate* isolate, CodeStubDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ Address deopt_handler =
+ Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
+
+ if (constant_stack_parameter_count == 0) {
+ descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
+ JS_FUNCTION_STUB_MODE);
+ } else {
+ descriptor->Initialize(r3, deopt_handler, constant_stack_parameter_count,
+ JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+ }
+}
+
+
+void ArrayNoArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
+}
+
+
+void ArraySingleArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
+}
+
+
+void ArrayNArgumentsConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
+}
+
+
+void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
+}
+
+
+void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
+}
+
+
+void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
+ Condition cond);
+static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
+ Register rhs, Label* lhs_not_nan,
+ Label* slow, bool strict);
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
+ Register rhs);
+
+
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
+ ExternalReference miss) {
+ // Update the static counter each time a new code stub is generated.
+ isolate()->counters()->code_stubs()->Increment();
+
+ CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
+ int param_count = descriptor.GetEnvironmentParameterCount();
+ {
+ // Call the runtime system in a fresh internal frame.
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ DCHECK(param_count == 0 ||
+ r3.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
+ // Push arguments
+ for (int i = 0; i < param_count; ++i) {
+ __ push(descriptor.GetEnvironmentParameterRegister(i));
+ }
+ __ CallExternalReference(miss, param_count);
+ }
+
+ __ Ret();
+}
+
+
+void DoubleToIStub::Generate(MacroAssembler* masm) {
+ Label out_of_range, only_low, negate, done, fastpath_done;
+ Register input_reg = source();
+ Register result_reg = destination();
+ DCHECK(is_truncating());
+
+ int double_offset = offset();
+
+ // Immediate values for this stub fit in instructions, so it's safe to use ip.
+ Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
+ Register scratch_low =
+ GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
+ Register scratch_high =
+ GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
+ DoubleRegister double_scratch = kScratchDoubleReg;
+
+ __ push(scratch);
+ // Account for saved regs if input is sp.
+ if (input_reg.is(sp)) double_offset += kPointerSize;
+
+ if (!skip_fastpath()) {
+ // Load double input.
+ __ lfd(double_scratch, MemOperand(input_reg, double_offset));
+
+ // Do fast-path convert from double to int.
+ __ ConvertDoubleToInt64(double_scratch,
+#if !V8_TARGET_ARCH_PPC64
+ scratch,
+#endif
+ result_reg, d0);
+
+// Test for overflow
+#if V8_TARGET_ARCH_PPC64
+ __ TestIfInt32(result_reg, scratch, r0);
+#else
+ __ TestIfInt32(scratch, result_reg, r0);
+#endif
+ __ beq(&fastpath_done);
+ }
+
+ __ Push(scratch_high, scratch_low);
+ // Account for saved regs if input is sp.
+ if (input_reg.is(sp)) double_offset += 2 * kPointerSize;
+
+ __ lwz(scratch_high,
+ MemOperand(input_reg, double_offset + Register::kExponentOffset));
+ __ lwz(scratch_low,
+ MemOperand(input_reg, double_offset + Register::kMantissaOffset));
+
+ __ ExtractBitMask(scratch, scratch_high, HeapNumber::kExponentMask);
+ // Load scratch with exponent - 1. This is faster than loading
+ // with exponent because Bias + 1 = 1024 which is a *PPC* immediate value.
+ STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
+ __ subi(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
+ // If exponent is greater than or equal to 84, the 32 less significant
+ // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
+ // the result is 0.
+ // Compare exponent with 84 (compare exponent - 1 with 83).
+ __ cmpi(scratch, Operand(83));
+ __ bge(&out_of_range);
+
+ // If we reach this code, 31 <= exponent <= 83.
+ // So, we don't have to handle cases where 0 <= exponent <= 20 for
+ // which we would need to shift right the high part of the mantissa.
+ // Scratch contains exponent - 1.
+ // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
+ __ subfic(scratch, scratch, Operand(51));
+ __ cmpi(scratch, Operand::Zero());
+ __ ble(&only_low);
+ // 21 <= exponent <= 51, shift scratch_low and scratch_high
+ // to generate the result.
+ __ srw(scratch_low, scratch_low, scratch);
+ // Scratch contains: 52 - exponent.
+ // We needs: exponent - 20.
+ // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
+ __ subfic(scratch, scratch, Operand(32));
+ __ ExtractBitMask(result_reg, scratch_high, HeapNumber::kMantissaMask);
+ // Set the implicit 1 before the mantissa part in scratch_high.
+ STATIC_ASSERT(HeapNumber::kMantissaBitsInTopWord >= 16);
+ __ oris(result_reg, result_reg,
+ Operand(1 << ((HeapNumber::kMantissaBitsInTopWord) - 16)));
+ __ slw(r0, result_reg, scratch);
+ __ orx(result_reg, scratch_low, r0);
+ __ b(&negate);
+
+ __ bind(&out_of_range);
+ __ mov(result_reg, Operand::Zero());
+ __ b(&done);
+
+ __ bind(&only_low);
+ // 52 <= exponent <= 83, shift only scratch_low.
+ // On entry, scratch contains: 52 - exponent.
+ __ neg(scratch, scratch);
+ __ slw(result_reg, scratch_low, scratch);
+
+ __ bind(&negate);
+ // If input was positive, scratch_high ASR 31 equals 0 and
+ // scratch_high LSR 31 equals zero.
+ // New result = (result eor 0) + 0 = result.
+ // If the input was negative, we have to negate the result.
+ // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
+ // New result = (result eor 0xffffffff) + 1 = 0 - result.
+ __ srawi(r0, scratch_high, 31);
+#if V8_TARGET_ARCH_PPC64
+ __ srdi(r0, r0, Operand(32));
+#endif
+ __ xor_(result_reg, result_reg, r0);
+ __ srwi(r0, scratch_high, Operand(31));
+ __ add(result_reg, result_reg, r0);
+
+ __ bind(&done);
+ __ Pop(scratch_high, scratch_low);
+
+ __ bind(&fastpath_done);
+ __ pop(scratch);
+
+ __ Ret();
+}
+
+
+// Handle the case where the lhs and rhs are the same object.
+// Equality is almost reflexive (everything but NaN), so this is a test
+// for "identity and not NaN".
+static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
+ Condition cond) {
+ Label not_identical;
+ Label heap_number, return_equal;
+ __ cmp(r3, r4);
+ __ bne(&not_identical);
+
+ // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if (cond == lt || cond == gt) {
+ __ CompareObjectType(r3, r7, r7, FIRST_SPEC_OBJECT_TYPE);
+ __ bge(slow);
+ } else {
+ __ CompareObjectType(r3, r7, r7, HEAP_NUMBER_TYPE);
+ __ beq(&heap_number);
+ // Comparing JS objects with <=, >= is complicated.
+ if (cond != eq) {
+ __ cmpi(r7, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ bge(slow);
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if (cond == le || cond == ge) {
+ __ cmpi(r7, Operand(ODDBALL_TYPE));
+ __ bne(&return_equal);
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ cmp(r3, r5);
+ __ bne(&return_equal);
+ if (cond == le) {
+ // undefined <= undefined should fail.
+ __ li(r3, Operand(GREATER));
+ } else {
+ // undefined >= undefined should fail.
+ __ li(r3, Operand(LESS));
+ }
+ __ Ret();
+ }
+ }
+ }
+
+ __ bind(&return_equal);
+ if (cond == lt) {
+ __ li(r3, Operand(GREATER)); // Things aren't less than themselves.
+ } else if (cond == gt) {
+ __ li(r3, Operand(LESS)); // Things aren't greater than themselves.
+ } else {
+ __ li(r3, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
+ }
+ __ Ret();
+
+ // For less and greater we don't have to check for NaN since the result of
+ // x < x is false regardless. For the others here is some code to check
+ // for NaN.
+ if (cond != lt && cond != gt) {
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if it's
+ // not NaN.
+
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // Read top bits of double representation (second word of value).
+ __ lwz(r5, FieldMemOperand(r3, HeapNumber::kExponentOffset));
+ // Test that exponent bits are all set.
+ STATIC_ASSERT(HeapNumber::kExponentMask == 0x7ff00000u);
+ __ ExtractBitMask(r6, r5, HeapNumber::kExponentMask);
+ __ cmpli(r6, Operand(0x7ff));
+ __ bne(&return_equal);
+
+ // Shift out flag and all exponent bits, retaining only mantissa.
+ __ slwi(r5, r5, Operand(HeapNumber::kNonMantissaBitsInTopWord));
+ // Or with all low-bits of mantissa.
+ __ lwz(r6, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
+ __ orx(r3, r6, r5);
+ __ cmpi(r3, Operand::Zero());
+ // For equal we already have the right value in r3: Return zero (equal)
+ // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+ // not (it's a NaN). For <= and >= we need to load r0 with the failing
+ // value if it's a NaN.
+ if (cond != eq) {
+ Label not_equal;
+ __ bne(&not_equal);
+ // All-zero means Infinity means equal.
+ __ Ret();
+ __ bind(&not_equal);
+ if (cond == le) {
+ __ li(r3, Operand(GREATER)); // NaN <= NaN should fail.
+ } else {
+ __ li(r3, Operand(LESS)); // NaN >= NaN should fail.
+ }
+ }
+ __ Ret();
+ }
+ // No fall through here.
+
+ __ bind(&not_identical);
+}
+
+
+// See comment at call site.
+static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
+ Register rhs, Label* lhs_not_nan,
+ Label* slow, bool strict) {
+ DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
+
+ Label rhs_is_smi;
+ __ JumpIfSmi(rhs, &rhs_is_smi);
+
+ // Lhs is a Smi. Check whether the rhs is a heap number.
+ __ CompareObjectType(rhs, r6, r7, HEAP_NUMBER_TYPE);
+ if (strict) {
+ // If rhs is not a number and lhs is a Smi then strict equality cannot
+ // succeed. Return non-equal
+ // If rhs is r3 then there is already a non zero value in it.
+ Label skip;
+ __ beq(&skip);
+ if (!rhs.is(r3)) {
+ __ mov(r3, Operand(NOT_EQUAL));
+ }
+ __ Ret();
+ __ bind(&skip);
+ } else {
+ // Smi compared non-strictly with a non-Smi non-heap-number. Call
+ // the runtime.
+ __ bne(slow);
+ }
+
+ // Lhs is a smi, rhs is a number.
+ // Convert lhs to a double in d7.
+ __ SmiToDouble(d7, lhs);
+ // Load the double from rhs, tagged HeapNumber r3, to d6.
+ __ lfd(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+
+ // We now have both loaded as doubles but we can skip the lhs nan check
+ // since it's a smi.
+ __ b(lhs_not_nan);
+
+ __ bind(&rhs_is_smi);
+ // Rhs is a smi. Check whether the non-smi lhs is a heap number.
+ __ CompareObjectType(lhs, r7, r7, HEAP_NUMBER_TYPE);
+ if (strict) {
+ // If lhs is not a number and rhs is a smi then strict equality cannot
+ // succeed. Return non-equal.
+ // If lhs is r3 then there is already a non zero value in it.
+ Label skip;
+ __ beq(&skip);
+ if (!lhs.is(r3)) {
+ __ mov(r3, Operand(NOT_EQUAL));
+ }
+ __ Ret();
+ __ bind(&skip);
+ } else {
+ // Smi compared non-strictly with a non-smi non-heap-number. Call
+ // the runtime.
+ __ bne(slow);
+ }
+
+ // Rhs is a smi, lhs is a heap number.
+ // Load the double from lhs, tagged HeapNumber r4, to d7.
+ __ lfd(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ // Convert rhs to a double in d6.
+ __ SmiToDouble(d6, rhs);
+ // Fall through to both_loaded_as_doubles.
+}
+
+
+// See comment at call site.
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
+ Register rhs) {
+ DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
+
+ // If either operand is a JS object or an oddball value, then they are
+ // not equal since their pointers are different.
+ // There is no test for undetectability in strict equality.
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ Label first_non_object;
+ // Get the type of the first operand into r5 and compare it with
+ // FIRST_SPEC_OBJECT_TYPE.
+ __ CompareObjectType(rhs, r5, r5, FIRST_SPEC_OBJECT_TYPE);
+ __ blt(&first_non_object);
+
+ // Return non-zero (r3 is not zero)
+ Label return_not_equal;
+ __ bind(&return_not_equal);
+ __ Ret();
+
+ __ bind(&first_non_object);
+ // Check for oddballs: true, false, null, undefined.
+ __ cmpi(r5, Operand(ODDBALL_TYPE));
+ __ beq(&return_not_equal);
+
+ __ CompareObjectType(lhs, r6, r6, FIRST_SPEC_OBJECT_TYPE);
+ __ bge(&return_not_equal);
+
+ // Check for oddballs: true, false, null, undefined.
+ __ cmpi(r6, Operand(ODDBALL_TYPE));
+ __ beq(&return_not_equal);
+
+ // Now that we have the types we might as well check for
+ // internalized-internalized.
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ orx(r5, r5, r6);
+ __ andi(r0, r5, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+ __ beq(&return_not_equal, cr0);
+}
+
+
+// See comment at call site.
+static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, Register lhs,
+ Register rhs,
+ Label* both_loaded_as_doubles,
+ Label* not_heap_numbers, Label* slow) {
+ DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
+
+ __ CompareObjectType(rhs, r6, r5, HEAP_NUMBER_TYPE);
+ __ bne(not_heap_numbers);
+ __ LoadP(r5, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ cmp(r5, r6);
+ __ bne(slow); // First was a heap number, second wasn't. Go slow case.
+
+ // Both are heap numbers. Load them up then jump to the code we have
+ // for that.
+ __ lfd(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ __ lfd(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+
+ __ b(both_loaded_as_doubles);
+}
+
+
+// Fast negative check for internalized-to-internalized equality.
+static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
+ Register lhs, Register rhs,
+ Label* possible_strings,
+ Label* not_both_strings) {
+ DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
+
+ // r5 is object type of rhs.
+ Label object_test;
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ andi(r0, r5, Operand(kIsNotStringMask));
+ __ bne(&object_test, cr0);
+ __ andi(r0, r5, Operand(kIsNotInternalizedMask));
+ __ bne(possible_strings, cr0);
+ __ CompareObjectType(lhs, r6, r6, FIRST_NONSTRING_TYPE);
+ __ bge(not_both_strings);
+ __ andi(r0, r6, Operand(kIsNotInternalizedMask));
+ __ bne(possible_strings, cr0);
+
+ // Both are internalized. We already checked they weren't the same pointer
+ // so they are not equal.
+ __ li(r3, Operand(NOT_EQUAL));
+ __ Ret();
+
+ __ bind(&object_test);
+ __ cmpi(r5, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ blt(not_both_strings);
+ __ CompareObjectType(lhs, r5, r6, FIRST_SPEC_OBJECT_TYPE);
+ __ blt(not_both_strings);
+ // If both objects are undetectable, they are equal. Otherwise, they
+ // are not equal, since they are different objects and an object is not
+ // equal to undefined.
+ __ LoadP(r6, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ lbz(r5, FieldMemOperand(r5, Map::kBitFieldOffset));
+ __ lbz(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
+ __ and_(r3, r5, r6);
+ __ andi(r3, r3, Operand(1 << Map::kIsUndetectable));
+ __ xori(r3, r3, Operand(1 << Map::kIsUndetectable));
+ __ Ret();
+}
+
+
+static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
+ Register scratch,
+ CompareICState::State expected,
+ Label* fail) {
+ Label ok;
+ if (expected == CompareICState::SMI) {
+ __ JumpIfNotSmi(input, fail);
+ } else if (expected == CompareICState::NUMBER) {
+ __ JumpIfSmi(input, &ok);
+ __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
+ DONT_DO_SMI_CHECK);
+ }
+ // We could be strict about internalized/non-internalized here, but as long as
+ // hydrogen doesn't care, the stub doesn't have to care either.
+ __ bind(&ok);
+}
+
+
+// On entry r4 and r5 are the values to be compared.
+// On exit r3 is 0, positive or negative to indicate the result of
+// the comparison.
+void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
+ Register lhs = r4;
+ Register rhs = r3;
+ Condition cc = GetCondition();
+
+ Label miss;
+ CompareICStub_CheckInputType(masm, lhs, r5, left(), &miss);
+ CompareICStub_CheckInputType(masm, rhs, r6, right(), &miss);
+
+ Label slow; // Call builtin.
+ Label not_smis, both_loaded_as_doubles, lhs_not_nan;
+
+ Label not_two_smis, smi_done;
+ __ orx(r5, r4, r3);
+ __ JumpIfNotSmi(r5, &not_two_smis);
+ __ SmiUntag(r4);
+ __ SmiUntag(r3);
+ __ sub(r3, r4, r3);
+ __ Ret();
+ __ bind(&not_two_smis);
+
+ // NOTICE! This code is only reached after a smi-fast-case check, so
+ // it is certain that at least one operand isn't a smi.
+
+ // Handle the case where the objects are identical. Either returns the answer
+ // or goes to slow. Only falls through if the objects were not identical.
+ EmitIdenticalObjectComparison(masm, &slow, cc);
+
+ // If either is a Smi (we know that not both are), then they can only
+ // be strictly equal if the other is a HeapNumber.
+ STATIC_ASSERT(kSmiTag == 0);
+ DCHECK_EQ(0, Smi::FromInt(0));
+ __ and_(r5, lhs, rhs);
+ __ JumpIfNotSmi(r5, &not_smis);
+ // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
+ // 1) Return the answer.
+ // 2) Go to slow.
+ // 3) Fall through to both_loaded_as_doubles.
+ // 4) Jump to lhs_not_nan.
+ // In cases 3 and 4 we have found out we were dealing with a number-number
+ // comparison. The double values of the numbers have been loaded
+ // into d7 and d6.
+ EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
+
+ __ bind(&both_loaded_as_doubles);
+ // The arguments have been converted to doubles and stored in d6 and d7
+ __ bind(&lhs_not_nan);
+ Label no_nan;
+ __ fcmpu(d7, d6);
+
+ Label nan, equal, less_than;
+ __ bunordered(&nan);
+ __ beq(&equal);
+ __ blt(&less_than);
+ __ li(r3, Operand(GREATER));
+ __ Ret();
+ __ bind(&equal);
+ __ li(r3, Operand(EQUAL));
+ __ Ret();
+ __ bind(&less_than);
+ __ li(r3, Operand(LESS));
+ __ Ret();
+
+ __ bind(&nan);
+ // If one of the sides was a NaN then the v flag is set. Load r3 with
+ // whatever it takes to make the comparison fail, since comparisons with NaN
+ // always fail.
+ if (cc == lt || cc == le) {
+ __ li(r3, Operand(GREATER));
+ } else {
+ __ li(r3, Operand(LESS));
+ }
+ __ Ret();
+
+ __ bind(&not_smis);
+ // At this point we know we are dealing with two different objects,
+ // and neither of them is a Smi. The objects are in rhs_ and lhs_.
+ if (strict()) {
+ // This returns non-equal for some object types, or falls through if it
+ // was not lucky.
+ EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
+ }
+
+ Label check_for_internalized_strings;
+ Label flat_string_check;
+ // Check for heap-number-heap-number comparison. Can jump to slow case,
+ // or load both doubles into r3, r4, r5, r6 and jump to the code that handles
+ // that case. If the inputs are not doubles then jumps to
+ // check_for_internalized_strings.
+ // In this case r5 will contain the type of rhs_. Never falls through.
+ EmitCheckForTwoHeapNumbers(masm, lhs, rhs, &both_loaded_as_doubles,
+ &check_for_internalized_strings,
+ &flat_string_check);
+
+ __ bind(&check_for_internalized_strings);
+ // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
+ // internalized strings.
+ if (cc == eq && !strict()) {
+ // Returns an answer for two internalized strings or two detectable objects.
+ // Otherwise jumps to string case or not both strings case.
+ // Assumes that r5 is the type of rhs_ on entry.
+ EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, &flat_string_check,
+ &slow);
+ }
+
+ // Check for both being sequential one-byte strings,
+ // and inline if that is the case.
+ __ bind(&flat_string_check);
+
+ __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, r5, r6, &slow);
+
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r5,
+ r6);
+ if (cc == eq) {
+ StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, r5, r6);
+ } else {
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, r5, r6, r7);
+ }
+ // Never falls through to here.
+
+ __ bind(&slow);
+
+ __ Push(lhs, rhs);
+ // Figure out which native to call and setup the arguments.
+ Builtins::JavaScript native;
+ if (cc == eq) {
+ native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ } else {
+ native = Builtins::COMPARE;
+ int ncr; // NaN compare result
+ if (cc == lt || cc == le) {
+ ncr = GREATER;
+ } else {
+ DCHECK(cc == gt || cc == ge); // remaining cases
+ ncr = LESS;
+ }
+ __ LoadSmiLiteral(r3, Smi::FromInt(ncr));
+ __ push(r3);
+ }
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(native, JUMP_FUNCTION);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+ // We don't allow a GC during a store buffer overflow so there is no need to
+ // store the registers in any particular way, but we do have to store and
+ // restore them.
+ __ mflr(r0);
+ __ MultiPush(kJSCallerSaved | r0.bit());
+ if (save_doubles()) {
+ __ SaveFPRegs(sp, 0, DoubleRegister::kNumVolatileRegisters);
+ }
+ const int argument_count = 1;
+ const int fp_argument_count = 0;
+ const Register scratch = r4;
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
+ __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
+ __ CallCFunction(ExternalReference::store_buffer_overflow_function(isolate()),
+ argument_count);
+ if (save_doubles()) {
+ __ RestoreFPRegs(sp, 0, DoubleRegister::kNumVolatileRegisters);
+ }
+ __ MultiPop(kJSCallerSaved | r0.bit());
+ __ mtlr(r0);
+ __ Ret();
+}
+
+
+void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
+ __ PushSafepointRegisters();
+ __ blr();
+}
+
+
+void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
+ __ PopSafepointRegisters();
+ __ blr();
+}
+
+
+void MathPowStub::Generate(MacroAssembler* masm) {
+ const Register base = r4;
+ const Register exponent = MathPowTaggedDescriptor::exponent();
+ DCHECK(exponent.is(r5));
+ const Register heapnumbermap = r8;
+ const Register heapnumber = r3;
+ const DoubleRegister double_base = d1;
+ const DoubleRegister double_exponent = d2;
+ const DoubleRegister double_result = d3;
+ const DoubleRegister double_scratch = d0;
+ const Register scratch = r11;
+ const Register scratch2 = r10;
+
+ Label call_runtime, done, int_exponent;
+ if (exponent_type() == ON_STACK) {
+ Label base_is_smi, unpack_exponent;
+ // The exponent and base are supplied as arguments on the stack.
+ // This can only happen if the stub is called from non-optimized code.
+ // Load input parameters from stack to double registers.
+ __ LoadP(base, MemOperand(sp, 1 * kPointerSize));
+ __ LoadP(exponent, MemOperand(sp, 0 * kPointerSize));
+
+ __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
+
+ __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
+ __ LoadP(scratch, FieldMemOperand(base, JSObject::kMapOffset));
+ __ cmp(scratch, heapnumbermap);
+ __ bne(&call_runtime);
+
+ __ lfd(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
+ __ b(&unpack_exponent);
+
+ __ bind(&base_is_smi);
+ __ ConvertIntToDouble(scratch, double_base);
+ __ bind(&unpack_exponent);
+
+ __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
+ __ LoadP(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
+ __ cmp(scratch, heapnumbermap);
+ __ bne(&call_runtime);
+
+ __ lfd(double_exponent,
+ FieldMemOperand(exponent, HeapNumber::kValueOffset));
+ } else if (exponent_type() == TAGGED) {
+ // Base is already in double_base.
+ __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
+
+ __ lfd(double_exponent,
+ FieldMemOperand(exponent, HeapNumber::kValueOffset));
+ }
+
+ if (exponent_type() != INTEGER) {
+ // Detect integer exponents stored as double.
+ __ TryDoubleToInt32Exact(scratch, double_exponent, scratch2,
+ double_scratch);
+ __ beq(&int_exponent);
+
+ if (exponent_type() == ON_STACK) {
+ // Detect square root case. Crankshaft detects constant +/-0.5 at
+ // compile time and uses DoMathPowHalf instead. We then skip this check
+ // for non-constant cases of +/-0.5 as these hardly occur.
+ Label not_plus_half, not_minus_inf1, not_minus_inf2;
+
+ // Test for 0.5.
+ __ LoadDoubleLiteral(double_scratch, 0.5, scratch);
+ __ fcmpu(double_exponent, double_scratch);
+ __ bne(&not_plus_half);
+
+ // Calculates square root of base. Check for the special case of
+ // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
+ __ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch);
+ __ fcmpu(double_base, double_scratch);
+ __ bne(&not_minus_inf1);
+ __ fneg(double_result, double_scratch);
+ __ b(&done);
+ __ bind(&not_minus_inf1);
+
+ // Add +0 to convert -0 to +0.
+ __ fadd(double_scratch, double_base, kDoubleRegZero);
+ __ fsqrt(double_result, double_scratch);
+ __ b(&done);
+
+ __ bind(&not_plus_half);
+ __ LoadDoubleLiteral(double_scratch, -0.5, scratch);
+ __ fcmpu(double_exponent, double_scratch);
+ __ bne(&call_runtime);
+
+ // Calculates square root of base. Check for the special case of
+ // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
+ __ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch);
+ __ fcmpu(double_base, double_scratch);
+ __ bne(&not_minus_inf2);
+ __ fmr(double_result, kDoubleRegZero);
+ __ b(&done);
+ __ bind(&not_minus_inf2);
+
+ // Add +0 to convert -0 to +0.
+ __ fadd(double_scratch, double_base, kDoubleRegZero);
+ __ LoadDoubleLiteral(double_result, 1.0, scratch);
+ __ fsqrt(double_scratch, double_scratch);
+ __ fdiv(double_result, double_result, double_scratch);
+ __ b(&done);
+ }
+
+ __ mflr(r0);
+ __ push(r0);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ MovToFloatParameters(double_base, double_exponent);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(isolate()), 0, 2);
+ }
+ __ pop(r0);
+ __ mtlr(r0);
+ __ MovFromFloatResult(double_result);
+ __ b(&done);
+ }
+
+ // Calculate power with integer exponent.
+ __ bind(&int_exponent);
+
+ // Get two copies of exponent in the registers scratch and exponent.
+ if (exponent_type() == INTEGER) {
+ __ mr(scratch, exponent);
+ } else {
+ // Exponent has previously been stored into scratch as untagged integer.
+ __ mr(exponent, scratch);
+ }
+ __ fmr(double_scratch, double_base); // Back up base.
+ __ li(scratch2, Operand(1));
+ __ ConvertIntToDouble(scratch2, double_result);
+
+ // Get absolute value of exponent.
+ Label positive_exponent;
+ __ cmpi(scratch, Operand::Zero());
+ __ bge(&positive_exponent);
+ __ neg(scratch, scratch);
+ __ bind(&positive_exponent);
+
+ Label while_true, no_carry, loop_end;
+ __ bind(&while_true);
+ __ andi(scratch2, scratch, Operand(1));
+ __ beq(&no_carry, cr0);
+ __ fmul(double_result, double_result, double_scratch);
+ __ bind(&no_carry);
+ __ ShiftRightArithImm(scratch, scratch, 1, SetRC);
+ __ beq(&loop_end, cr0);
+ __ fmul(double_scratch, double_scratch, double_scratch);
+ __ b(&while_true);
+ __ bind(&loop_end);
+
+ __ cmpi(exponent, Operand::Zero());
+ __ bge(&done);
+
+ __ li(scratch2, Operand(1));
+ __ ConvertIntToDouble(scratch2, double_scratch);
+ __ fdiv(double_result, double_scratch, double_result);
+ // Test whether result is zero. Bail out to check for subnormal result.
+ // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
+ __ fcmpu(double_result, kDoubleRegZero);
+ __ bne(&done);
+ // double_exponent may not containe the exponent value if the input was a
+ // smi. We set it with exponent value before bailing out.
+ __ ConvertIntToDouble(exponent, double_exponent);
+
+ // Returning or bailing out.
+ Counters* counters = isolate()->counters();
+ if (exponent_type() == ON_STACK) {
+ // The arguments are still on the stack.
+ __ bind(&call_runtime);
+ __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
+
+ // The stub is called from non-optimized code, which expects the result
+ // as heap number in exponent.
+ __ bind(&done);
+ __ AllocateHeapNumber(heapnumber, scratch, scratch2, heapnumbermap,
+ &call_runtime);
+ __ stfd(double_result,
+ FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+ DCHECK(heapnumber.is(r3));
+ __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
+ __ Ret(2);
+ } else {
+ __ mflr(r0);
+ __ push(r0);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ MovToFloatParameters(double_base, double_exponent);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(isolate()), 0, 2);
+ }
+ __ pop(r0);
+ __ mtlr(r0);
+ __ MovFromFloatResult(double_result);
+
+ __ bind(&done);
+ __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
+ __ Ret();
+ }
+}
+
+
+bool CEntryStub::NeedsImmovableCode() { return true; }
+
+
+void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
+ CEntryStub::GenerateAheadOfTime(isolate);
+ // WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
+ ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ BinaryOpICStub::GenerateAheadOfTime(isolate);
+ StoreRegistersStateStub::GenerateAheadOfTime(isolate);
+ RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
+ BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
+}
+
+
+void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
+ StoreRegistersStateStub stub(isolate);
+ stub.GetCode();
+}
+
+
+void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
+ RestoreRegistersStateStub stub(isolate);
+ stub.GetCode();
+}
+
+
+void CodeStub::GenerateFPStubs(Isolate* isolate) {
+ // Generate if not already in cache.
+ SaveFPRegsMode mode = kSaveFPRegs;
+ CEntryStub(isolate, 1, mode).GetCode();
+ StoreBufferOverflowStub(isolate, mode).GetCode();
+ isolate->set_fp_stubs_generated(true);
+}
+
+
+void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
+ CEntryStub stub(isolate, 1, kDontSaveFPRegs);
+ stub.GetCode();
+}
+
+
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // Called from JavaScript; parameters are on stack as if calling JS function.
+ // r3: number of arguments including receiver
+ // r4: pointer to builtin function
+ // fp: frame pointer (restored after C call)
+ // sp: stack pointer (restored as callee's sp after C call)
+ // cp: current context (C callee-saved)
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ __ mr(r15, r4);
+
+ // Compute the argv pointer.
+ __ ShiftLeftImm(r4, r3, Operand(kPointerSizeLog2));
+ __ add(r4, r4, sp);
+ __ subi(r4, r4, Operand(kPointerSize));
+
+ // Enter the exit frame that transitions from JavaScript to C++.
+ FrameScope scope(masm, StackFrame::MANUAL);
+
+ // Need at least one extra slot for return address location.
+ int arg_stack_space = 1;
+
+// PPC LINUX ABI:
+#if V8_TARGET_ARCH_PPC64 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
+ // Pass buffer for return value on stack if necessary
+ if (result_size() > 1) {
+ DCHECK_EQ(2, result_size());
+ arg_stack_space += 2;
+ }
+#endif
+
+ __ EnterExitFrame(save_doubles(), arg_stack_space);
+
+ // Store a copy of argc in callee-saved registers for later.
+ __ mr(r14, r3);
+
+ // r3, r14: number of arguments including receiver (C callee-saved)
+ // r4: pointer to the first argument
+ // r15: pointer to builtin function (C callee-saved)
+
+ // Result returned in registers or stack, depending on result size and ABI.
+
+ Register isolate_reg = r5;
+#if V8_TARGET_ARCH_PPC64 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
+ if (result_size() > 1) {
+ // The return value is 16-byte non-scalar value.
+ // Use frame storage reserved by calling function to pass return
+ // buffer as implicit first argument.
+ __ mr(r5, r4);
+ __ mr(r4, r3);
+ __ addi(r3, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
+ isolate_reg = r6;
+ }
+#endif
+
+ // Call C built-in.
+ __ mov(isolate_reg, Operand(ExternalReference::isolate_address(isolate())));
+
+#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
+ // Native AIX/PPC64 Linux use a function descriptor.
+ __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(r15, kPointerSize));
+ __ LoadP(ip, MemOperand(r15, 0)); // Instruction address
+ Register target = ip;
+#elif ABI_TOC_ADDRESSABILITY_VIA_IP
+ __ Move(ip, r15);
+ Register target = ip;
+#else
+ Register target = r15;
+#endif
+
+ // To let the GC traverse the return address of the exit frames, we need to
+ // know where the return address is. The CEntryStub is unmovable, so
+ // we can store the address on the stack to be able to find it again and
+ // we never have to restore it, because it will not change.
+ // Compute the return address in lr to return to after the jump below. Pc is
+ // already at '+ 8' from the current instruction but return is after three
+ // instructions so add another 4 to pc to get the return address.
+ {
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
+ Label here;
+ __ b(&here, SetLK);
+ __ bind(&here);
+ __ mflr(r8);
+
+ // Constant used below is dependent on size of Call() macro instructions
+ __ addi(r0, r8, Operand(20));
+
+ __ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
+ __ Call(target);
+ }
+
+#if V8_TARGET_ARCH_PPC64 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
+ // If return value is on the stack, pop it to registers.
+ if (result_size() > 1) {
+ __ LoadP(r4, MemOperand(r3, kPointerSize));
+ __ LoadP(r3, MemOperand(r3));
+ }
+#endif
+
+ // Runtime functions should not return 'the hole'. Allowing it to escape may
+ // lead to crashes in the IC code later.
+ if (FLAG_debug_code) {
+ Label okay;
+ __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ bne(&okay);
+ __ stop("The hole escaped");
+ __ bind(&okay);
+ }
+
+ // Check result for exception sentinel.
+ Label exception_returned;
+ __ CompareRoot(r3, Heap::kExceptionRootIndex);
+ __ beq(&exception_returned);
+
+ ExternalReference pending_exception_address(Isolate::kPendingExceptionAddress,
+ isolate());
+
+ // Check that there is no pending exception, otherwise we
+ // should have returned the exception sentinel.
+ if (FLAG_debug_code) {
+ Label okay;
+ __ mov(r5, Operand(pending_exception_address));
+ __ LoadP(r5, MemOperand(r5));
+ __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
+ // Cannot use check here as it attempts to generate call into runtime.
+ __ beq(&okay);
+ __ stop("Unexpected pending exception");
+ __ bind(&okay);
+ }
+
+ // Exit C frame and return.
+ // r3:r4: result
+ // sp: stack pointer
+ // fp: frame pointer
+ // r14: still holds argc (callee-saved).
+ __ LeaveExitFrame(save_doubles(), r14, true);
+ __ blr();
+
+ // Handling of exception.
+ __ bind(&exception_returned);
+
+ // Retrieve the pending exception.
+ __ mov(r5, Operand(pending_exception_address));
+ __ LoadP(r3, MemOperand(r5));
+
+ // Clear the pending exception.
+ __ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
+ __ StoreP(r6, MemOperand(r5));
+
+ // Special handling of termination exceptions which are uncatchable
+ // by javascript code.
+ Label throw_termination_exception;
+ __ CompareRoot(r3, Heap::kTerminationExceptionRootIndex);
+ __ beq(&throw_termination_exception);
+
+ // Handle normal exception.
+ __ Throw(r3);
+
+ __ bind(&throw_termination_exception);
+ __ ThrowUncatchable(r3);
+}
+
+
+void JSEntryStub::Generate(MacroAssembler* masm) {
+ // r3: code entry
+ // r4: function
+ // r5: receiver
+ // r6: argc
+ // [sp+0]: argv
+
+ Label invoke, handler_entry, exit;
+
+// Called from C
+#if ABI_USES_FUNCTION_DESCRIPTORS
+ __ function_descriptor();
+#endif
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // PPC LINUX ABI:
+ // preserve LR in pre-reserved slot in caller's frame
+ __ mflr(r0);
+ __ StoreP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize));
+
+ // Save callee saved registers on the stack.
+ __ MultiPush(kCalleeSaved);
+
+ // Floating point regs FPR0 - FRP13 are volatile
+ // FPR14-FPR31 are non-volatile, but sub-calls will save them for us
+
+ // int offset_to_argv = kPointerSize * 22; // matches (22*4) above
+ // __ lwz(r7, MemOperand(sp, offset_to_argv));
+
+ // Push a frame with special values setup to mark it as an entry frame.
+ // r3: code entry
+ // r4: function
+ // r5: receiver
+ // r6: argc
+ // r7: argv
+ __ li(r0, Operand(-1)); // Push a bad frame pointer to fail if it is used.
+ __ push(r0);
+#if V8_OOL_CONSTANT_POOL
+ __ mov(kConstantPoolRegister,
+ Operand(isolate()->factory()->empty_constant_pool_array()));
+ __ push(kConstantPoolRegister);
+#endif
+ int marker = type();
+ __ LoadSmiLiteral(r0, Smi::FromInt(marker));
+ __ push(r0);
+ __ push(r0);
+ // Save copies of the top frame descriptor on the stack.
+ __ mov(r8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ __ LoadP(r0, MemOperand(r8));
+ __ push(r0);
+
+ // Set up frame pointer for the frame to be pushed.
+ __ addi(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
+
+ // If this is the outermost JS call, set js_entry_sp value.
+ Label non_outermost_js;
+ ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
+ __ mov(r8, Operand(ExternalReference(js_entry_sp)));
+ __ LoadP(r9, MemOperand(r8));
+ __ cmpi(r9, Operand::Zero());
+ __ bne(&non_outermost_js);
+ __ StoreP(fp, MemOperand(r8));
+ __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ Label cont;
+ __ b(&cont);
+ __ bind(&non_outermost_js);
+ __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
+ __ bind(&cont);
+ __ push(ip); // frame-type
+
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ b(&invoke);
+
+ __ bind(&handler_entry);
+ handler_offset_ = handler_entry.pos();
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel. Coming in here the
+ // fp will be invalid because the PushTryHandler below sets it to 0 to
+ // signal the existence of the JSEntry frame.
+ __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate())));
+
+ __ StoreP(r3, MemOperand(ip));
+ __ LoadRoot(r3, Heap::kExceptionRootIndex);
+ __ b(&exit);
+
+ // Invoke: Link this frame into the handler chain. There's only one
+ // handler block in this code object, so its index is 0.
+ __ bind(&invoke);
+ // Must preserve r0-r4, r5-r7 are available. (needs update for PPC)
+ __ PushTryHandler(StackHandler::JS_ENTRY, 0);
+ // If an exception not caught by another handler occurs, this handler
+ // returns control to the code after the b(&invoke) above, which
+ // restores all kCalleeSaved registers (including cp and fp) to their
+ // saved values before returning a failure to C.
+
+ // Clear any pending exceptions.
+ __ mov(r8, Operand(isolate()->factory()->the_hole_value()));
+ __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate())));
+ __ StoreP(r8, MemOperand(ip));
+
+ // Invoke the function by calling through JS entry trampoline builtin.
+ // Notice that we cannot store a reference to the trampoline code directly in
+ // this stub, because runtime stubs are not traversed when doing GC.
+
+ // Expected registers by Builtins::JSEntryTrampoline
+ // r3: code entry
+ // r4: function
+ // r5: receiver
+ // r6: argc
+ // r7: argv
+ if (type() == StackFrame::ENTRY_CONSTRUCT) {
+ ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
+ isolate());
+ __ mov(ip, Operand(construct_entry));
+ } else {
+ ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
+ __ mov(ip, Operand(entry));
+ }
+ __ LoadP(ip, MemOperand(ip)); // deref address
+
+ // Branch and link to JSEntryTrampoline.
+ // the address points to the start of the code object, skip the header
+ __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ mtctr(ip);
+ __ bctrl(); // make the call
+
+ // Unlink this frame from the handler chain.
+ __ PopTryHandler();
+
+ __ bind(&exit); // r3 holds result
+ // Check if the current stack frame is marked as the outermost JS frame.
+ Label non_outermost_js_2;
+ __ pop(r8);
+ __ CmpSmiLiteral(r8, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME), r0);
+ __ bne(&non_outermost_js_2);
+ __ mov(r9, Operand::Zero());
+ __ mov(r8, Operand(ExternalReference(js_entry_sp)));
+ __ StoreP(r9, MemOperand(r8));
+ __ bind(&non_outermost_js_2);
+
+ // Restore the top frame descriptors from the stack.
+ __ pop(r6);
+ __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ __ StoreP(r6, MemOperand(ip));
+
+ // Reset the stack to the callee saved registers.
+ __ addi(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
+
+// Restore callee-saved registers and return.
+#ifdef DEBUG
+ if (FLAG_debug_code) {
+ Label here;
+ __ b(&here, SetLK);
+ __ bind(&here);
+ }
+#endif
+
+ __ MultiPop(kCalleeSaved);
+
+ __ LoadP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize));
+ __ mtctr(r0);
+ __ bctr();
+}
+
+
+// Uses registers r3 to r7.
+// Expected input (depending on whether args are in registers or on the stack):
+// * object: r3 or at sp + 1 * kPointerSize.
+// * function: r4 or at sp.
+//
+// An inlined call site may have been generated before calling this stub.
+// In this case the offset to the inline site to patch is passed in r8.
+// (See LCodeGen::DoInstanceOfKnownGlobal)
+void InstanceofStub::Generate(MacroAssembler* masm) {
+ // Call site inlining and patching implies arguments in registers.
+ DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
+
+ // Fixed register usage throughout the stub:
+ const Register object = r3; // Object (lhs).
+ Register map = r6; // Map of the object.
+ const Register function = r4; // Function (rhs).
+ const Register prototype = r7; // Prototype of the function.
+ const Register inline_site = r9;
+ const Register scratch = r5;
+ Register scratch3 = no_reg;
+
+// delta = mov + unaligned LoadP + cmp + bne
+#if V8_TARGET_ARCH_PPC64
+ const int32_t kDeltaToLoadBoolResult =
+ (Assembler::kMovInstructions + 4) * Assembler::kInstrSize;
+#else
+ const int32_t kDeltaToLoadBoolResult =
+ (Assembler::kMovInstructions + 3) * Assembler::kInstrSize;
+#endif
+
+ Label slow, loop, is_instance, is_not_instance, not_js_object;
+
+ if (!HasArgsInRegisters()) {
+ __ LoadP(object, MemOperand(sp, 1 * kPointerSize));
+ __ LoadP(function, MemOperand(sp, 0));
+ }
+
+ // Check that the left hand is a JS object and load map.
+ __ JumpIfSmi(object, &not_js_object);
+ __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
+
+ // If there is a call site cache don't look in the global cache, but do the
+ // real lookup and update the call site cache.
+ if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
+ Label miss;
+ __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ bne(&miss);
+ __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
+ __ bne(&miss);
+ __ LoadRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
+ __ Ret(HasArgsInRegisters() ? 0 : 2);
+
+ __ bind(&miss);
+ }
+
+ // Get the prototype of the function.
+ __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
+
+ // Check that the function prototype is a JS object.
+ __ JumpIfSmi(prototype, &slow);
+ __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
+
+ // Update the global instanceof or call site inlined cache with the current
+ // map and function. The cached answer will be set when it is known below.
+ if (!HasCallSiteInlineCheck()) {
+ __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
+ } else {
+ DCHECK(HasArgsInRegisters());
+ // Patch the (relocated) inlined map check.
+
+ // The offset was stored in r8
+ // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
+ const Register offset = r8;
+ __ mflr(inline_site);
+ __ sub(inline_site, inline_site, offset);
+ // Get the map location in r8 and patch it.
+ __ GetRelocatedValue(inline_site, offset, scratch);
+ __ StoreP(map, FieldMemOperand(offset, Cell::kValueOffset), r0);
+ }
+
+ // Register mapping: r6 is object map and r7 is function prototype.
+ // Get prototype of object into r5.
+ __ LoadP(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
+
+ // We don't need map any more. Use it as a scratch register.
+ scratch3 = map;
+ map = no_reg;
+
+ // Loop through the prototype chain looking for the function prototype.
+ __ LoadRoot(scratch3, Heap::kNullValueRootIndex);
+ __ bind(&loop);
+ __ cmp(scratch, prototype);
+ __ beq(&is_instance);
+ __ cmp(scratch, scratch3);
+ __ beq(&is_not_instance);
+ __ LoadP(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
+ __ LoadP(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
+ __ b(&loop);
+ Factory* factory = isolate()->factory();
+
+ __ bind(&is_instance);
+ if (!HasCallSiteInlineCheck()) {
+ __ LoadSmiLiteral(r3, Smi::FromInt(0));
+ __ StoreRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
+ if (ReturnTrueFalseObject()) {
+ __ Move(r3, factory->true_value());
+ }
+ } else {
+ // Patch the call site to return true.
+ __ LoadRoot(r3, Heap::kTrueValueRootIndex);
+ __ addi(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+ // Get the boolean result location in scratch and patch it.
+ __ SetRelocatedValue(inline_site, scratch, r3);
+
+ if (!ReturnTrueFalseObject()) {
+ __ LoadSmiLiteral(r3, Smi::FromInt(0));
+ }
+ }
+ __ Ret(HasArgsInRegisters() ? 0 : 2);
+
+ __ bind(&is_not_instance);
+ if (!HasCallSiteInlineCheck()) {
+ __ LoadSmiLiteral(r3, Smi::FromInt(1));
+ __ StoreRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
+ if (ReturnTrueFalseObject()) {
+ __ Move(r3, factory->false_value());
+ }
+ } else {
+ // Patch the call site to return false.
+ __ LoadRoot(r3, Heap::kFalseValueRootIndex);
+ __ addi(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+ // Get the boolean result location in scratch and patch it.
+ __ SetRelocatedValue(inline_site, scratch, r3);
+
+ if (!ReturnTrueFalseObject()) {
+ __ LoadSmiLiteral(r3, Smi::FromInt(1));
+ }
+ }
+ __ Ret(HasArgsInRegisters() ? 0 : 2);
+
+ Label object_not_null, object_not_null_or_smi;
+ __ bind(&not_js_object);
+ // Before null, smi and string value checks, check that the rhs is a function
+ // as for a non-function rhs an exception needs to be thrown.
+ __ JumpIfSmi(function, &slow);
+ __ CompareObjectType(function, scratch3, scratch, JS_FUNCTION_TYPE);
+ __ bne(&slow);
+
+ // Null is not instance of anything.
+ __ Cmpi(object, Operand(isolate()->factory()->null_value()), r0);
+ __ bne(&object_not_null);
+ if (ReturnTrueFalseObject()) {
+ __ Move(r3, factory->false_value());
+ } else {
+ __ LoadSmiLiteral(r3, Smi::FromInt(1));
+ }
+ __ Ret(HasArgsInRegisters() ? 0 : 2);
+
+ __ bind(&object_not_null);
+ // Smi values are not instances of anything.
+ __ JumpIfNotSmi(object, &object_not_null_or_smi);
+ if (ReturnTrueFalseObject()) {
+ __ Move(r3, factory->false_value());
+ } else {
+ __ LoadSmiLiteral(r3, Smi::FromInt(1));
+ }
+ __ Ret(HasArgsInRegisters() ? 0 : 2);
+
+ __ bind(&object_not_null_or_smi);
+ // String values are not instances of anything.
+ __ IsObjectJSStringType(object, scratch, &slow);
+ if (ReturnTrueFalseObject()) {
+ __ Move(r3, factory->false_value());
+ } else {
+ __ LoadSmiLiteral(r3, Smi::FromInt(1));
+ }
+ __ Ret(HasArgsInRegisters() ? 0 : 2);
+
+ // Slow-case. Tail call builtin.
+ __ bind(&slow);
+ if (!ReturnTrueFalseObject()) {
+ if (HasArgsInRegisters()) {
+ __ Push(r3, r4);
+ }
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
+ } else {
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r3, r4);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+ }
+ Label true_value, done;
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&true_value);
+
+ __ LoadRoot(r3, Heap::kFalseValueRootIndex);
+ __ b(&done);
+
+ __ bind(&true_value);
+ __ LoadRoot(r3, Heap::kTrueValueRootIndex);
+
+ __ bind(&done);
+ __ Ret(HasArgsInRegisters() ? 0 : 2);
+ }
+}
+
+
+void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver = LoadDescriptor::ReceiverRegister();
+
+ NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r6,
+ r7, &miss);
+ __ bind(&miss);
+ PropertyAccessCompiler::TailCallBuiltin(
+ masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
+}
+
+
+void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
+ // Return address is in lr.
+ Label miss;
+
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register index = LoadDescriptor::NameRegister();
+ Register scratch = r6;
+ Register result = r3;
+ DCHECK(!scratch.is(receiver) && !scratch.is(index));
+
+ StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &miss, // When index out of range.
+ STRING_INDEX_IS_ARRAY_INDEX,
+ RECEIVER_IS_STRING);
+ char_at_generator.GenerateFast(masm);
+ __ Ret();
+
+ StubRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm, call_helper);
+
+ __ bind(&miss);
+ PropertyAccessCompiler::TailCallBuiltin(
+ masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ // The displacement is the offset of the last parameter (if any)
+ // relative to the frame pointer.
+ const int kDisplacement =
+ StandardFrameConstants::kCallerSPOffset - kPointerSize;
+ DCHECK(r4.is(ArgumentsAccessReadDescriptor::index()));
+ DCHECK(r3.is(ArgumentsAccessReadDescriptor::parameter_count()));
+
+ // Check that the key is a smi.
+ Label slow;
+ __ JumpIfNotSmi(r4, &slow);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor;
+ __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset));
+ STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu);
+ __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ beq(&adaptor);
+
+ // Check index against formal parameters count limit passed in
+ // through register r3. Use unsigned comparison to get negative
+ // check for free.
+ __ cmpl(r4, r3);
+ __ bge(&slow);
+
+ // Read the argument from the stack and return it.
+ __ sub(r6, r3, r4);
+ __ SmiToPtrArrayOffset(r6, r6);
+ __ add(r6, fp, r6);
+ __ LoadP(r3, MemOperand(r6, kDisplacement));
+ __ blr();
+
+ // Arguments adaptor case: Check index against actual arguments
+ // limit found in the arguments adaptor frame. Use unsigned
+ // comparison to get negative check for free.
+ __ bind(&adaptor);
+ __ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ cmpl(r4, r3);
+ __ bge(&slow);
+
+ // Read the argument from the adaptor frame and return it.
+ __ sub(r6, r3, r4);
+ __ SmiToPtrArrayOffset(r6, r6);
+ __ add(r6, r5, r6);
+ __ LoadP(r3, MemOperand(r6, kDisplacement));
+ __ blr();
+
+ // Slow-case: Handle non-smi or out-of-bounds access to arguments
+ // by calling the runtime system.
+ __ bind(&slow);
+ __ push(r4);
+ __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
+ // sp[0] : number of parameters
+ // sp[1] : receiver displacement
+ // sp[2] : function
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r5, MemOperand(r6, StandardFrameConstants::kContextOffset));
+ STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu);
+ __ CmpSmiLiteral(r5, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ bne(&runtime);
+
+ // Patch the arguments.length and the parameters pointer in the current frame.
+ __ LoadP(r5, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ StoreP(r5, MemOperand(sp, 0 * kPointerSize));
+ __ SmiToPtrArrayOffset(r5, r5);
+ __ add(r6, r6, r5);
+ __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ StoreP(r6, MemOperand(sp, 1 * kPointerSize));
+
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
+ // Stack layout:
+ // sp[0] : number of parameters (tagged)
+ // sp[1] : address of receiver argument
+ // sp[2] : function
+ // Registers used over whole function:
+ // r9 : allocated object (tagged)
+ // r11 : mapped parameter count (tagged)
+
+ __ LoadP(r4, MemOperand(sp, 0 * kPointerSize));
+ // r4 = parameter count (tagged)
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ Label adaptor_frame, try_allocate;
+ __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r5, MemOperand(r6, StandardFrameConstants::kContextOffset));
+ STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu);
+ __ CmpSmiLiteral(r5, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ beq(&adaptor_frame);
+
+ // No adaptor, parameter count = argument count.
+ __ mr(r5, r4);
+ __ b(&try_allocate);
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ LoadP(r5, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToPtrArrayOffset(r7, r5);
+ __ add(r6, r6, r7);
+ __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ StoreP(r6, MemOperand(sp, 1 * kPointerSize));
+
+ // r4 = parameter count (tagged)
+ // r5 = argument count (tagged)
+ // Compute the mapped parameter count = min(r4, r5) in r4.
+ Label skip;
+ __ cmp(r4, r5);
+ __ blt(&skip);
+ __ mr(r4, r5);
+ __ bind(&skip);
+
+ __ bind(&try_allocate);
+
+ // Compute the sizes of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has 2 extra words containing context and backing store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ // If there are no mapped parameters, we do not need the parameter_map.
+ Label skip2, skip3;
+ __ CmpSmiLiteral(r4, Smi::FromInt(0), r0);
+ __ bne(&skip2);
+ __ li(r11, Operand::Zero());
+ __ b(&skip3);
+ __ bind(&skip2);
+ __ SmiToPtrArrayOffset(r11, r4);
+ __ addi(r11, r11, Operand(kParameterMapHeaderSize));
+ __ bind(&skip3);
+
+ // 2. Backing store.
+ __ SmiToPtrArrayOffset(r7, r5);
+ __ add(r11, r11, r7);
+ __ addi(r11, r11, Operand(FixedArray::kHeaderSize));
+
+ // 3. Arguments object.
+ __ addi(r11, r11, Operand(Heap::kSloppyArgumentsObjectSize));
+
+ // Do the allocation of all three objects in one go.
+ __ Allocate(r11, r3, r6, r7, &runtime, TAG_OBJECT);
+
+ // r3 = address of new object(s) (tagged)
+ // r5 = argument count (smi-tagged)
+ // Get the arguments boilerplate from the current native context into r4.
+ const int kNormalOffset =
+ Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
+ const int kAliasedOffset =
+ Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX);
+
+ __ LoadP(r7,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ LoadP(r7, FieldMemOperand(r7, GlobalObject::kNativeContextOffset));
+ Label skip4, skip5;
+ __ cmpi(r4, Operand::Zero());
+ __ bne(&skip4);
+ __ LoadP(r7, MemOperand(r7, kNormalOffset));
+ __ b(&skip5);
+ __ bind(&skip4);
+ __ LoadP(r7, MemOperand(r7, kAliasedOffset));
+ __ bind(&skip5);
+
+ // r3 = address of new object (tagged)
+ // r4 = mapped parameter count (tagged)
+ // r5 = argument count (smi-tagged)
+ // r7 = address of arguments map (tagged)
+ __ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0);
+ __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r6, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
+ __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
+
+ // Set up the callee in-object property.
+ STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+ __ LoadP(r6, MemOperand(sp, 2 * kPointerSize));
+ __ AssertNotSmi(r6);
+ const int kCalleeOffset =
+ JSObject::kHeaderSize + Heap::kArgumentsCalleeIndex * kPointerSize;
+ __ StoreP(r6, FieldMemOperand(r3, kCalleeOffset), r0);
+
+ // Use the length (smi tagged) and set that as an in-object property too.
+ __ AssertSmi(r5);
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ const int kLengthOffset =
+ JSObject::kHeaderSize + Heap::kArgumentsLengthIndex * kPointerSize;
+ __ StoreP(r5, FieldMemOperand(r3, kLengthOffset), r0);
+
+ // Set up the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, r7 will point there, otherwise
+ // it will point to the backing store.
+ __ addi(r7, r3, Operand(Heap::kSloppyArgumentsObjectSize));
+ __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
+
+ // r3 = address of new object (tagged)
+ // r4 = mapped parameter count (tagged)
+ // r5 = argument count (tagged)
+ // r7 = address of parameter map or backing store (tagged)
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map, skip6;
+ __ CmpSmiLiteral(r4, Smi::FromInt(0), r0);
+ __ bne(&skip6);
+ // Move backing store address to r6, because it is
+ // expected there when filling in the unmapped arguments.
+ __ mr(r6, r7);
+ __ b(&skip_parameter_map);
+ __ bind(&skip6);
+
+ __ LoadRoot(r9, Heap::kSloppyArgumentsElementsMapRootIndex);
+ __ StoreP(r9, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
+ __ AddSmiLiteral(r9, r4, Smi::FromInt(2), r0);
+ __ StoreP(r9, FieldMemOperand(r7, FixedArray::kLengthOffset), r0);
+ __ StoreP(cp, FieldMemOperand(r7, FixedArray::kHeaderSize + 0 * kPointerSize),
+ r0);
+ __ SmiToPtrArrayOffset(r9, r4);
+ __ add(r9, r7, r9);
+ __ addi(r9, r9, Operand(kParameterMapHeaderSize));
+ __ StoreP(r9, FieldMemOperand(r7, FixedArray::kHeaderSize + 1 * kPointerSize),
+ r0);
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+ // We loop from right to left.
+ Label parameters_loop, parameters_test;
+ __ mr(r9, r4);
+ __ LoadP(r11, MemOperand(sp, 0 * kPointerSize));
+ __ AddSmiLiteral(r11, r11, Smi::FromInt(Context::MIN_CONTEXT_SLOTS), r0);
+ __ sub(r11, r11, r4);
+ __ LoadRoot(r10, Heap::kTheHoleValueRootIndex);
+ __ SmiToPtrArrayOffset(r6, r9);
+ __ add(r6, r7, r6);
+ __ addi(r6, r6, Operand(kParameterMapHeaderSize));
+
+ // r9 = loop variable (tagged)
+ // r4 = mapping index (tagged)
+ // r6 = address of backing store (tagged)
+ // r7 = address of parameter map (tagged)
+ // r8 = temporary scratch (a.o., for address calculation)
+ // r10 = the hole value
+ __ b(&parameters_test);
+
+ __ bind(&parameters_loop);
+ __ SubSmiLiteral(r9, r9, Smi::FromInt(1), r0);
+ __ SmiToPtrArrayOffset(r8, r9);
+ __ addi(r8, r8, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+ __ StorePX(r11, MemOperand(r8, r7));
+ __ subi(r8, r8, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+ __ StorePX(r10, MemOperand(r8, r6));
+ __ AddSmiLiteral(r11, r11, Smi::FromInt(1), r0);
+ __ bind(&parameters_test);
+ __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
+ __ bne(&parameters_loop);
+
+ __ bind(&skip_parameter_map);
+ // r5 = argument count (tagged)
+ // r6 = address of backing store (tagged)
+ // r8 = scratch
+ // Copy arguments header and remaining slots (if there are any).
+ __ LoadRoot(r8, Heap::kFixedArrayMapRootIndex);
+ __ StoreP(r8, FieldMemOperand(r6, FixedArray::kMapOffset), r0);
+ __ StoreP(r5, FieldMemOperand(r6, FixedArray::kLengthOffset), r0);
+
+ Label arguments_loop, arguments_test;
+ __ mr(r11, r4);
+ __ LoadP(r7, MemOperand(sp, 1 * kPointerSize));
+ __ SmiToPtrArrayOffset(r8, r11);
+ __ sub(r7, r7, r8);
+ __ b(&arguments_test);
+
+ __ bind(&arguments_loop);
+ __ subi(r7, r7, Operand(kPointerSize));
+ __ LoadP(r9, MemOperand(r7, 0));
+ __ SmiToPtrArrayOffset(r8, r11);
+ __ add(r8, r6, r8);
+ __ StoreP(r9, FieldMemOperand(r8, FixedArray::kHeaderSize), r0);
+ __ AddSmiLiteral(r11, r11, Smi::FromInt(1), r0);
+
+ __ bind(&arguments_test);
+ __ cmp(r11, r5);
+ __ blt(&arguments_loop);
+
+ // Return and remove the on-stack parameters.
+ __ addi(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ // r5 = argument count (tagged)
+ __ bind(&runtime);
+ __ StoreP(r5, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
+ __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+}
+
+
+void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
+ // Return address is in lr.
+ Label slow;
+
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register key = LoadDescriptor::NameRegister();
+
+ // Check that the key is an array index, that is Uint32.
+ __ TestIfPositiveSmi(key, r0);
+ __ bne(&slow, cr0);
+
+ // Everything is fine, call runtime.
+ __ Push(receiver, key); // Receiver, key.
+
+ // Perform tail call to the entry.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
+ masm->isolate()),
+ 2, 1);
+
+ __ bind(&slow);
+ PropertyAccessCompiler::TailCallBuiltin(
+ masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
+}
+
+
+void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+ // sp[0] : number of parameters
+ // sp[4] : receiver displacement
+ // sp[8] : function
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, try_allocate, runtime;
+ __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset));
+ STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu);
+ __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ beq(&adaptor_frame);
+
+ // Get the length from the frame.
+ __ LoadP(r4, MemOperand(sp, 0));
+ __ b(&try_allocate);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ LoadP(r4, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ StoreP(r4, MemOperand(sp, 0));
+ __ SmiToPtrArrayOffset(r6, r4);
+ __ add(r6, r5, r6);
+ __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ StoreP(r6, MemOperand(sp, 1 * kPointerSize));
+
+ // Try the new space allocation. Start out with computing the size
+ // of the arguments object and the elements array in words.
+ Label add_arguments_object;
+ __ bind(&try_allocate);
+ __ cmpi(r4, Operand::Zero());
+ __ beq(&add_arguments_object);
+ __ SmiUntag(r4);
+ __ addi(r4, r4, Operand(FixedArray::kHeaderSize / kPointerSize));
+ __ bind(&add_arguments_object);
+ __ addi(r4, r4, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
+
+ // Do the allocation of both objects in one go.
+ __ Allocate(r4, r3, r5, r6, &runtime,
+ static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+
+ // Get the arguments boilerplate from the current native context.
+ __ LoadP(r7,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ LoadP(r7, FieldMemOperand(r7, GlobalObject::kNativeContextOffset));
+ __ LoadP(
+ r7,
+ MemOperand(r7, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
+
+ __ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0);
+ __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r6, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
+ __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
+
+ // Get the length (smi tagged) and set that as an in-object property too.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ __ LoadP(r4, MemOperand(sp, 0 * kPointerSize));
+ __ AssertSmi(r4);
+ __ StoreP(r4,
+ FieldMemOperand(r3, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize),
+ r0);
+
+ // If there are no actual arguments, we're done.
+ Label done;
+ __ cmpi(r4, Operand::Zero());
+ __ beq(&done);
+
+ // Get the parameters pointer from the stack.
+ __ LoadP(r5, MemOperand(sp, 1 * kPointerSize));
+
+ // Set up the elements pointer in the allocated arguments object and
+ // initialize the header in the elements fixed array.
+ __ addi(r7, r3, Operand(Heap::kStrictArgumentsObjectSize));
+ __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
+ __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
+ __ StoreP(r6, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
+ __ StoreP(r4, FieldMemOperand(r7, FixedArray::kLengthOffset), r0);
+ // Untag the length for the loop.
+ __ SmiUntag(r4);
+
+ // Copy the fixed array slots.
+ Label loop;
+ // Set up r7 to point just prior to the first array slot.
+ __ addi(r7, r7,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+ __ mtctr(r4);
+ __ bind(&loop);
+ // Pre-decrement r5 with kPointerSize on each iteration.
+ // Pre-decrement in order to skip receiver.
+ __ LoadPU(r6, MemOperand(r5, -kPointerSize));
+ // Pre-increment r7 with kPointerSize on each iteration.
+ __ StorePU(r6, MemOperand(r7, kPointerSize));
+ __ bdnz(&loop);
+
+ // Return and remove the on-stack parameters.
+ __ bind(&done);
+ __ addi(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
+}
+
+
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+// Just jump directly to runtime if native RegExp is not selected at compile
+// time or if regexp entry in generated code is turned off runtime switch or
+// at compilation.
+#ifdef V8_INTERPRETED_REGEXP
+ __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+#else // V8_INTERPRETED_REGEXP
+
+ // Stack frame on entry.
+ // sp[0]: last_match_info (expected JSArray)
+ // sp[4]: previous index
+ // sp[8]: subject string
+ // sp[12]: JSRegExp object
+
+ const int kLastMatchInfoOffset = 0 * kPointerSize;
+ const int kPreviousIndexOffset = 1 * kPointerSize;
+ const int kSubjectOffset = 2 * kPointerSize;
+ const int kJSRegExpOffset = 3 * kPointerSize;
+
+ Label runtime, br_over, encoding_type_UC16;
+
+ // Allocation of registers for this function. These are in callee save
+ // registers and will be preserved by the call to the native RegExp code, as
+ // this code is called using the normal C calling convention. When calling
+ // directly from generated code the native RegExp code will not do a GC and
+ // therefore the content of these registers are safe to use after the call.
+ Register subject = r14;
+ Register regexp_data = r15;
+ Register last_match_info_elements = r16;
+ Register code = r17;
+
+ // Ensure register assigments are consistent with callee save masks
+ DCHECK(subject.bit() & kCalleeSaved);
+ DCHECK(regexp_data.bit() & kCalleeSaved);
+ DCHECK(last_match_info_elements.bit() & kCalleeSaved);
+ DCHECK(code.bit() & kCalleeSaved);
+
+ // Ensure that a RegExp stack is allocated.
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address(isolate());
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size(isolate());
+ __ mov(r3, Operand(address_of_regexp_stack_memory_size));
+ __ LoadP(r3, MemOperand(r3, 0));
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&runtime);
+
+ // Check that the first argument is a JSRegExp object.
+ __ LoadP(r3, MemOperand(sp, kJSRegExpOffset));
+ __ JumpIfSmi(r3, &runtime);
+ __ CompareObjectType(r3, r4, r4, JS_REGEXP_TYPE);
+ __ bne(&runtime);
+
+ // Check that the RegExp has been compiled (data contains a fixed array).
+ __ LoadP(regexp_data, FieldMemOperand(r3, JSRegExp::kDataOffset));
+ if (FLAG_debug_code) {
+ __ TestIfSmi(regexp_data, r0);
+ __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected, cr0);
+ __ CompareObjectType(regexp_data, r3, r3, FIXED_ARRAY_TYPE);
+ __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
+ }
+
+ // regexp_data: RegExp data (FixedArray)
+ // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+ __ LoadP(r3, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
+ // DCHECK(Smi::FromInt(JSRegExp::IRREGEXP) < (char *)0xffffu);
+ __ CmpSmiLiteral(r3, Smi::FromInt(JSRegExp::IRREGEXP), r0);
+ __ bne(&runtime);
+
+ // regexp_data: RegExp data (FixedArray)
+ // Check that the number of captures fit in the static offsets vector buffer.
+ __ LoadP(r5,
+ FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+ // Check (number_of_captures + 1) * 2 <= offsets vector size
+ // Or number_of_captures * 2 <= offsets vector size - 2
+ // SmiToShortArrayOffset accomplishes the multiplication by 2 and
+ // SmiUntag (which is a nop for 32-bit).
+ __ SmiToShortArrayOffset(r5, r5);
+ STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
+ __ cmpli(r5, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
+ __ bgt(&runtime);
+
+ // Reset offset for possibly sliced string.
+ __ li(r11, Operand::Zero());
+ __ LoadP(subject, MemOperand(sp, kSubjectOffset));
+ __ JumpIfSmi(subject, &runtime);
+ __ mr(r6, subject); // Make a copy of the original subject string.
+ __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
+ // subject: subject string
+ // r6: subject string
+ // r3: subject string instance type
+ // regexp_data: RegExp data (FixedArray)
+ // Handle subject string according to its encoding and representation:
+ // (1) Sequential string? If yes, go to (5).
+ // (2) Anything but sequential or cons? If yes, go to (6).
+ // (3) Cons string. If the string is flat, replace subject with first string.
+ // Otherwise bailout.
+ // (4) Is subject external? If yes, go to (7).
+ // (5) Sequential string. Load regexp code according to encoding.
+ // (E) Carry on.
+ /// [...]
+
+ // Deferred code at the end of the stub:
+ // (6) Not a long external string? If yes, go to (8).
+ // (7) External string. Make it, offset-wise, look like a sequential string.
+ // Go to (5).
+ // (8) Short external string or not a string? If yes, bail out to runtime.
+ // (9) Sliced string. Replace subject with parent. Go to (4).
+
+ Label seq_string /* 5 */, external_string /* 7 */, check_underlying /* 4 */,
+ not_seq_nor_cons /* 6 */, not_long_external /* 8 */;
+
+ // (1) Sequential string? If yes, go to (5).
+ STATIC_ASSERT((kIsNotStringMask | kStringRepresentationMask |
+ kShortExternalStringMask) == 0x93);
+ __ andi(r4, r3, Operand(kIsNotStringMask | kStringRepresentationMask |
+ kShortExternalStringMask));
+ STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
+ __ beq(&seq_string, cr0); // Go to (5).
+
+ // (2) Anything but sequential or cons? If yes, go to (6).
+ STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+ STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+ STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+ STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
+ STATIC_ASSERT(kExternalStringTag < 0xffffu);
+ __ cmpi(r4, Operand(kExternalStringTag));
+ __ bge(&not_seq_nor_cons); // Go to (6).
+
+ // (3) Cons string. Check that it's flat.
+ // Replace subject with first string and reload instance type.
+ __ LoadP(r3, FieldMemOperand(subject, ConsString::kSecondOffset));
+ __ CompareRoot(r3, Heap::kempty_stringRootIndex);
+ __ bne(&runtime);
+ __ LoadP(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+
+ // (4) Is subject external? If yes, go to (7).
+ __ bind(&check_underlying);
+ __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kSeqStringTag == 0);
+ STATIC_ASSERT(kStringRepresentationMask == 3);
+ __ andi(r0, r3, Operand(kStringRepresentationMask));
+ // The underlying external string is never a short external string.
+ STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ __ bne(&external_string, cr0); // Go to (7).
+
+ // (5) Sequential string. Load regexp code according to encoding.
+ __ bind(&seq_string);
+ // subject: sequential subject string (or look-alike, external string)
+ // r6: original subject string
+ // Load previous index and check range before r6 is overwritten. We have to
+ // use r6 instead of subject here because subject might have been only made
+ // to look like a sequential string when it actually is an external string.
+ __ LoadP(r4, MemOperand(sp, kPreviousIndexOffset));
+ __ JumpIfNotSmi(r4, &runtime);
+ __ LoadP(r6, FieldMemOperand(r6, String::kLengthOffset));
+ __ cmpl(r6, r4);
+ __ ble(&runtime);
+ __ SmiUntag(r4);
+
+ STATIC_ASSERT(4 == kOneByteStringTag);
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ STATIC_ASSERT(kStringEncodingMask == 4);
+ __ ExtractBitMask(r6, r3, kStringEncodingMask, SetRC);
+ __ beq(&encoding_type_UC16, cr0);
+ __ LoadP(code,
+ FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
+ __ b(&br_over);
+ __ bind(&encoding_type_UC16);
+ __ LoadP(code, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
+ __ bind(&br_over);
+
+ // (E) Carry on. String handling is done.
+ // code: irregexp code
+ // Check that the irregexp code has been generated for the actual string
+ // encoding. If it has, the field contains a code object otherwise it contains
+ // a smi (code flushing support).
+ __ JumpIfSmi(code, &runtime);
+
+ // r4: previous index
+ // r6: encoding of subject string (1 if one_byte, 0 if two_byte);
+ // code: Address of generated regexp code
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // All checks done. Now push arguments for native regexp code.
+ __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r3, r5);
+
+ // Isolates: note we add an additional parameter here (isolate pointer).
+ const int kRegExpExecuteArguments = 10;
+ const int kParameterRegisters = 8;
+ __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
+
+ // Stack pointer now points to cell where return address is to be written.
+ // Arguments are before that on the stack or in registers.
+
+ // Argument 10 (in stack parameter area): Pass current isolate address.
+ __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
+ __ StoreP(r3, MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize));
+
+ // Argument 9 is a dummy that reserves the space used for
+ // the return address added by the ExitFrame in native calls.
+
+ // Argument 8 (r10): Indicate that this is a direct call from JavaScript.
+ __ li(r10, Operand(1));
+
+ // Argument 7 (r9): Start (high end) of backtracking stack memory area.
+ __ mov(r3, Operand(address_of_regexp_stack_memory_address));
+ __ LoadP(r3, MemOperand(r3, 0));
+ __ mov(r5, Operand(address_of_regexp_stack_memory_size));
+ __ LoadP(r5, MemOperand(r5, 0));
+ __ add(r9, r3, r5);
+
+ // Argument 6 (r8): Set the number of capture registers to zero to force
+ // global egexps to behave as non-global. This does not affect non-global
+ // regexps.
+ __ li(r8, Operand::Zero());
+
+ // Argument 5 (r7): static offsets vector buffer.
+ __ mov(
+ r7,
+ Operand(ExternalReference::address_of_static_offsets_vector(isolate())));
+
+ // For arguments 4 (r6) and 3 (r5) get string length, calculate start of data
+ // and calculate the shift of the index (0 for one-byte and 1 for two-byte).
+ __ addi(r18, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
+ __ xori(r6, r6, Operand(1));
+ // Load the length from the original subject string from the previous stack
+ // frame. Therefore we have to use fp, which points exactly to two pointer
+ // sizes below the previous sp. (Because creating a new stack frame pushes
+ // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
+ __ LoadP(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
+ // If slice offset is not 0, load the length from the original sliced string.
+ // Argument 4, r6: End of string data
+ // Argument 3, r5: Start of string data
+ // Prepare start and end index of the input.
+ __ ShiftLeft_(r11, r11, r6);
+ __ add(r11, r18, r11);
+ __ ShiftLeft_(r5, r4, r6);
+ __ add(r5, r11, r5);
+
+ __ LoadP(r18, FieldMemOperand(subject, String::kLengthOffset));
+ __ SmiUntag(r18);
+ __ ShiftLeft_(r6, r18, r6);
+ __ add(r6, r11, r6);
+
+ // Argument 2 (r4): Previous index.
+ // Already there
+
+ // Argument 1 (r3): Subject string.
+ __ mr(r3, subject);
+
+ // Locate the code entry and call it.
+ __ addi(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+
+#if ABI_USES_FUNCTION_DESCRIPTORS && defined(USE_SIMULATOR)
+ // Even Simulated AIX/PPC64 Linux uses a function descriptor for the
+ // RegExp routine. Extract the instruction address here since
+ // DirectCEntryStub::GenerateCall will not do it for calls out to
+ // what it thinks is C code compiled for the simulator/host
+ // platform.
+ __ LoadP(code, MemOperand(code, 0)); // Instruction address
+#endif
+
+ DirectCEntryStub stub(isolate());
+ stub.GenerateCall(masm, code);
+
+ __ LeaveExitFrame(false, no_reg, true);
+
+ // r3: result
+ // subject: subject string (callee saved)
+ // regexp_data: RegExp data (callee saved)
+ // last_match_info_elements: Last match info elements (callee saved)
+ // Check the result.
+ Label success;
+ __ cmpi(r3, Operand(1));
+ // We expect exactly one result since we force the called regexp to behave
+ // as non-global.
+ __ beq(&success);
+ Label failure;
+ __ cmpi(r3, Operand(NativeRegExpMacroAssembler::FAILURE));
+ __ beq(&failure);
+ __ cmpi(r3, Operand(NativeRegExpMacroAssembler::EXCEPTION));
+ // If not exception it can only be retry. Handle that in the runtime system.
+ __ bne(&runtime);
+ // Result must now be exception. If there is no pending exception already a
+ // stack overflow (on the backtrack stack) was detected in RegExp code but
+ // haven't created the exception yet. Handle that in the runtime system.
+ // TODO(592): Rerunning the RegExp to get the stack overflow exception.
+ __ mov(r4, Operand(isolate()->factory()->the_hole_value()));
+ __ mov(r5, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate())));
+ __ LoadP(r3, MemOperand(r5, 0));
+ __ cmp(r3, r4);
+ __ beq(&runtime);
+
+ __ StoreP(r4, MemOperand(r5, 0)); // Clear pending exception.
+
+ // Check if the exception is a termination. If so, throw as uncatchable.
+ __ CompareRoot(r3, Heap::kTerminationExceptionRootIndex);
+
+ Label termination_exception;
+ __ beq(&termination_exception);
+
+ __ Throw(r3);
+
+ __ bind(&termination_exception);
+ __ ThrowUncatchable(r3);
+
+ __ bind(&failure);
+ // For failure and exception return null.
+ __ mov(r3, Operand(isolate()->factory()->null_value()));
+ __ addi(sp, sp, Operand(4 * kPointerSize));
+ __ Ret();
+
+ // Process the result from the native regexp code.
+ __ bind(&success);
+ __ LoadP(r4,
+ FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2.
+ // SmiToShortArrayOffset accomplishes the multiplication by 2 and
+ // SmiUntag (which is a nop for 32-bit).
+ __ SmiToShortArrayOffset(r4, r4);
+ __ addi(r4, r4, Operand(2));
+
+ __ LoadP(r3, MemOperand(sp, kLastMatchInfoOffset));
+ __ JumpIfSmi(r3, &runtime);
+ __ CompareObjectType(r3, r5, r5, JS_ARRAY_TYPE);
+ __ bne(&runtime);
+ // Check that the JSArray is in fast case.
+ __ LoadP(last_match_info_elements,
+ FieldMemOperand(r3, JSArray::kElementsOffset));
+ __ LoadP(r3,
+ FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
+ __ CompareRoot(r3, Heap::kFixedArrayMapRootIndex);
+ __ bne(&runtime);
+ // Check that the last match info has space for the capture registers and the
+ // additional information.
+ __ LoadP(
+ r3, FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
+ __ addi(r5, r4, Operand(RegExpImpl::kLastMatchOverhead));
+ __ SmiUntag(r0, r3);
+ __ cmp(r5, r0);
+ __ bgt(&runtime);
+
+ // r4: number of capture registers
+ // subject: subject string
+ // Store the capture count.
+ __ SmiTag(r5, r4);
+ __ StoreP(r5, FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastCaptureCountOffset),
+ r0);
+ // Store last subject and last input.
+ __ StoreP(subject, FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastSubjectOffset),
+ r0);
+ __ mr(r5, subject);
+ __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastSubjectOffset,
+ subject, r10, kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ mr(subject, r5);
+ __ StoreP(subject, FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastInputOffset),
+ r0);
+ __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastInputOffset,
+ subject, r10, kLRHasNotBeenSaved, kDontSaveFPRegs);
+
+ // Get the static offsets vector filled by the native regexp code.
+ ExternalReference address_of_static_offsets_vector =
+ ExternalReference::address_of_static_offsets_vector(isolate());
+ __ mov(r5, Operand(address_of_static_offsets_vector));
+
+ // r4: number of capture registers
+ // r5: offsets vector
+ Label next_capture;
+ // Capture register counter starts from number of capture registers and
+ // counts down until wraping after zero.
+ __ addi(
+ r3, last_match_info_elements,
+ Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag - kPointerSize));
+ __ addi(r5, r5, Operand(-kIntSize)); // bias down for lwzu
+ __ mtctr(r4);
+ __ bind(&next_capture);
+ // Read the value from the static offsets vector buffer.
+ __ lwzu(r6, MemOperand(r5, kIntSize));
+ // Store the smi value in the last match info.
+ __ SmiTag(r6);
+ __ StorePU(r6, MemOperand(r3, kPointerSize));
+ __ bdnz(&next_capture);
+
+ // Return last match info.
+ __ LoadP(r3, MemOperand(sp, kLastMatchInfoOffset));
+ __ addi(sp, sp, Operand(4 * kPointerSize));
+ __ Ret();
+
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+
+ // Deferred code for string handling.
+ // (6) Not a long external string? If yes, go to (8).
+ __ bind(&not_seq_nor_cons);
+ // Compare flags are still set.
+ __ bgt(&not_long_external); // Go to (8).
+
+ // (7) External string. Make it, offset-wise, look like a sequential string.
+ __ bind(&external_string);
+ __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ STATIC_ASSERT(kIsIndirectStringMask == 1);
+ __ andi(r0, r3, Operand(kIsIndirectStringMask));
+ __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
+ }
+ __ LoadP(subject,
+ FieldMemOperand(subject, ExternalString::kResourceDataOffset));
+ // Move the pointer so that offset-wise, it looks like a sequential string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ subi(subject, subject,
+ Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ b(&seq_string); // Go to (5).
+
+ // (8) Short external string or not a string? If yes, bail out to runtime.
+ __ bind(&not_long_external);
+ STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag != 0);
+ __ andi(r0, r4, Operand(kIsNotStringMask | kShortExternalStringMask));
+ __ bne(&runtime, cr0);
+
+ // (9) Sliced string. Replace subject with parent. Go to (4).
+ // Load offset into r11 and replace subject string with parent.
+ __ LoadP(r11, FieldMemOperand(subject, SlicedString::kOffsetOffset));
+ __ SmiUntag(r11);
+ __ LoadP(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
+ __ b(&check_underlying); // Go to (4).
+#endif // V8_INTERPRETED_REGEXP
+}
+
+
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
+ // Cache the called function in a feedback vector slot. Cache states
+ // are uninitialized, monomorphic (indicated by a JSFunction), and
+ // megamorphic.
+ // r3 : number of arguments to the construct function
+ // r4 : the function to call
+ // r5 : Feedback vector
+ // r6 : slot in feedback vector (Smi)
+ Label initialize, done, miss, megamorphic, not_array_function;
+
+ DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->megamorphic_symbol());
+ DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
+ masm->isolate()->heap()->uninitialized_symbol());
+
+ // Load the cache state into r7.
+ __ SmiToPtrArrayOffset(r7, r6);
+ __ add(r7, r5, r7);
+ __ LoadP(r7, FieldMemOperand(r7, FixedArray::kHeaderSize));
+
+ // A monomorphic cache hit or an already megamorphic state: invoke the
+ // function without changing the state.
+ __ cmp(r7, r4);
+ __ b(eq, &done);
+
+ if (!FLAG_pretenuring_call_new) {
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite. Do a map check on the object in ecx.
+ __ LoadP(r8, FieldMemOperand(r7, 0));
+ __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
+ __ bne(&miss);
+
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r7);
+ __ cmp(r4, r7);
+ __ bne(&megamorphic);
+ __ b(&done);
+ }
+
+ __ bind(&miss);
+
+ // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+ // megamorphic.
+ __ CompareRoot(r7, Heap::kuninitialized_symbolRootIndex);
+ __ beq(&initialize);
+ // MegamorphicSentinel is an immortal immovable object (undefined) so no
+ // write-barrier is needed.
+ __ bind(&megamorphic);
+ __ SmiToPtrArrayOffset(r7, r6);
+ __ add(r7, r5, r7);
+ __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
+ __ StoreP(ip, FieldMemOperand(r7, FixedArray::kHeaderSize), r0);
+ __ jmp(&done);
+
+ // An uninitialized cache is patched with the function
+ __ bind(&initialize);
+
+ if (!FLAG_pretenuring_call_new) {
+ // Make sure the function is the Array() function.
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r7);
+ __ cmp(r4, r7);
+ __ bne(&not_array_function);
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+ // Arguments register must be smi-tagged to call out.
+ __ SmiTag(r3);
+ __ Push(r6, r5, r4, r3);
+
+ CreateAllocationSiteStub create_stub(masm->isolate());
+ __ CallStub(&create_stub);
+
+ __ Pop(r6, r5, r4, r3);
+ __ SmiUntag(r3);
+ }
+ __ b(&done);
+
+ __ bind(&not_array_function);
+ }
+
+ __ SmiToPtrArrayOffset(r7, r6);
+ __ add(r7, r5, r7);
+ __ addi(r7, r7, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ StoreP(r4, MemOperand(r7, 0));
+
+ __ Push(r7, r5, r4);
+ __ RecordWrite(r5, r7, r4, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Pop(r7, r5, r4);
+
+ __ bind(&done);
+}
+
+
+static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
+ // Do not transform the receiver for strict mode functions and natives.
+ __ LoadP(r6, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ lwz(r7, FieldMemOperand(r6, SharedFunctionInfo::kCompilerHintsOffset));
+ __ TestBit(r7,
+#if V8_TARGET_ARCH_PPC64
+ SharedFunctionInfo::kStrictModeFunction,
+#else
+ SharedFunctionInfo::kStrictModeFunction + kSmiTagSize,
+#endif
+ r0);
+ __ bne(cont, cr0);
+
+ // Do not transform the receiver for native.
+ __ TestBit(r7,
+#if V8_TARGET_ARCH_PPC64
+ SharedFunctionInfo::kNative,
+#else
+ SharedFunctionInfo::kNative + kSmiTagSize,
+#endif
+ r0);
+ __ bne(cont, cr0);
+}
+
+
+static void EmitSlowCase(MacroAssembler* masm, int argc, Label* non_function) {
+ // Check for function proxy.
+ STATIC_ASSERT(JS_FUNCTION_PROXY_TYPE < 0xffffu);
+ __ cmpi(r7, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ bne(non_function);
+ __ push(r4); // put proxy as additional argument
+ __ li(r3, Operand(argc + 1));
+ __ li(r5, Operand::Zero());
+ __ GetBuiltinFunction(r4, Builtins::CALL_FUNCTION_PROXY);
+ {
+ Handle<Code> adaptor =
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ __ Jump(adaptor, RelocInfo::CODE_TARGET);
+ }
+
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ bind(non_function);
+ __ StoreP(r4, MemOperand(sp, argc * kPointerSize), r0);
+ __ li(r3, Operand(argc)); // Set up the number of arguments.
+ __ li(r5, Operand::Zero());
+ __ GetBuiltinFunction(r4, Builtins::CALL_NON_FUNCTION);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+}
+
+
+static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
+ // Wrap the receiver and patch it back onto the stack.
+ {
+ FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
+ __ Push(r4, r6);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ pop(r4);
+ }
+ __ StoreP(r3, MemOperand(sp, argc * kPointerSize), r0);
+ __ b(cont);
+}
+
+
+static void CallFunctionNoFeedback(MacroAssembler* masm, int argc,
+ bool needs_checks, bool call_as_method) {
+ // r4 : the function to call
+ Label slow, non_function, wrap, cont;
+
+ if (needs_checks) {
+ // Check that the function is really a JavaScript function.
+ // r4: pushed function (to be verified)
+ __ JumpIfSmi(r4, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE);
+ __ bne(&slow);
+ }
+
+ // Fast-case: Invoke the function now.
+ // r4: pushed function
+ ParameterCount actual(argc);
+
+ if (call_as_method) {
+ if (needs_checks) {
+ EmitContinueIfStrictOrNative(masm, &cont);
+ }
+
+ // Compute the receiver in sloppy mode.
+ __ LoadP(r6, MemOperand(sp, argc * kPointerSize), r0);
+
+ if (needs_checks) {
+ __ JumpIfSmi(r6, &wrap);
+ __ CompareObjectType(r6, r7, r7, FIRST_SPEC_OBJECT_TYPE);
+ __ blt(&wrap);
+ } else {
+ __ b(&wrap);
+ }
+
+ __ bind(&cont);
+ }
+
+ __ InvokeFunction(r4, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ if (needs_checks) {
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ EmitSlowCase(masm, argc, &non_function);
+ }
+
+ if (call_as_method) {
+ __ bind(&wrap);
+ EmitWrapCase(masm, argc, &cont);
+ }
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
+}
+
+
+void CallConstructStub::Generate(MacroAssembler* masm) {
+ // r3 : number of arguments
+ // r4 : the function to call
+ // r5 : feedback vector
+ // r6 : (only if r5 is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
+ Label slow, non_function_call;
+
+ // Check that the function is not a smi.
+ __ JumpIfSmi(r4, &non_function_call);
+ // Check that the function is a JSFunction.
+ __ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE);
+ __ bne(&slow);
+
+ if (RecordCallTarget()) {
+ GenerateRecordCallTarget(masm);
+
+ __ SmiToPtrArrayOffset(r8, r6);
+ __ add(r8, r5, r8);
+ if (FLAG_pretenuring_call_new) {
+ // Put the AllocationSite from the feedback vector into r5.
+ // By adding kPointerSize we encode that we know the AllocationSite
+ // entry is at the feedback vector slot given by r6 + 1.
+ __ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize + kPointerSize));
+ } else {
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into r5, or undefined.
+ __ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize));
+ __ LoadP(r8, FieldMemOperand(r5, AllocationSite::kMapOffset));
+ __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
+ __ beq(&feedback_register_initialized);
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
+ }
+
+ __ AssertUndefinedOrAllocationSite(r5, r8);
+ }
+
+ // Jump to the function-specific construct stub.
+ Register jmp_reg = r7;
+ __ LoadP(jmp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(jmp_reg,
+ FieldMemOperand(jmp_reg, SharedFunctionInfo::kConstructStubOffset));
+ __ addi(ip, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(ip);
+
+ // r3: number of arguments
+ // r4: called object
+ // r7: object type
+ Label do_call;
+ __ bind(&slow);
+ STATIC_ASSERT(JS_FUNCTION_PROXY_TYPE < 0xffffu);
+ __ cmpi(r7, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ bne(&non_function_call);
+ __ GetBuiltinFunction(r4, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+ __ b(&do_call);
+
+ __ bind(&non_function_call);
+ __ GetBuiltinFunction(r4, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ bind(&do_call);
+ // Set expected number of arguments to zero (not changing r3).
+ __ li(r5, Operand::Zero());
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+}
+
+
+static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
+ __ LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadP(vector,
+ FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(vector,
+ FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+}
+
+
+void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
+ // r4 - function
+ // r6 - slot id
+ Label miss;
+ int argc = arg_count();
+ ParameterCount actual(argc);
+
+ EmitLoadTypeFeedbackVector(masm, r5);
+
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r7);
+ __ cmp(r4, r7);
+ __ bne(&miss);
+
+ __ mov(r3, Operand(arg_count()));
+ __ SmiToPtrArrayOffset(r7, r6);
+ __ add(r7, r5, r7);
+ __ LoadP(r7, FieldMemOperand(r7, FixedArray::kHeaderSize));
+
+ // Verify that r7 contains an AllocationSite
+ __ LoadP(r8, FieldMemOperand(r7, HeapObject::kMapOffset));
+ __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
+ __ bne(&miss);
+
+ __ mr(r5, r7);
+ ArrayConstructorStub stub(masm->isolate(), arg_count());
+ __ TailCallStub(&stub);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+
+ // The slow case, we need this no matter what to complete a call after a miss.
+ CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+ // Unreachable.
+ __ stop("Unexpected code address");
+}
+
+
+void CallICStub::Generate(MacroAssembler* masm) {
+ // r4 - function
+ // r6 - slot id (Smi)
+ Label extra_checks_or_miss, slow_start;
+ Label slow, non_function, wrap, cont;
+ Label have_js_function;
+ int argc = arg_count();
+ ParameterCount actual(argc);
+
+ EmitLoadTypeFeedbackVector(masm, r5);
+
+ // The checks. First, does r4 match the recorded monomorphic target?
+ __ SmiToPtrArrayOffset(r7, r6);
+ __ add(r7, r5, r7);
+ __ LoadP(r7, FieldMemOperand(r7, FixedArray::kHeaderSize));
+ __ cmp(r4, r7);
+ __ bne(&extra_checks_or_miss);
+
+ __ bind(&have_js_function);
+ if (CallAsMethod()) {
+ EmitContinueIfStrictOrNative(masm, &cont);
+ // Compute the receiver in sloppy mode.
+ __ LoadP(r6, MemOperand(sp, argc * kPointerSize), r0);
+
+ __ JumpIfSmi(r6, &wrap);
+ __ CompareObjectType(r6, r7, r7, FIRST_SPEC_OBJECT_TYPE);
+ __ blt(&wrap);
+
+ __ bind(&cont);
+ }
+
+ __ InvokeFunction(r4, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ __ bind(&slow);
+ EmitSlowCase(masm, argc, &non_function);
+
+ if (CallAsMethod()) {
+ __ bind(&wrap);
+ EmitWrapCase(masm, argc, &cont);
+ }
+
+ __ bind(&extra_checks_or_miss);
+ Label miss;
+
+ __ CompareRoot(r7, Heap::kmegamorphic_symbolRootIndex);
+ __ beq(&slow_start);
+ __ CompareRoot(r7, Heap::kuninitialized_symbolRootIndex);
+ __ beq(&miss);
+
+ if (!FLAG_trace_ic) {
+ // We are going megamorphic. If the feedback is a JSFunction, it is fine
+ // to handle it here. More complex cases are dealt with in the runtime.
+ __ AssertNotSmi(r7);
+ __ CompareObjectType(r7, r8, r8, JS_FUNCTION_TYPE);
+ __ bne(&miss);
+ __ SmiToPtrArrayOffset(r7, r6);
+ __ add(r7, r5, r7);
+ __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
+ __ StoreP(ip, FieldMemOperand(r7, FixedArray::kHeaderSize), r0);
+ // We have to update statistics for runtime profiling.
+ const int with_types_offset =
+ FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
+ __ LoadP(r7, FieldMemOperand(r5, with_types_offset));
+ __ SubSmiLiteral(r7, r7, Smi::FromInt(1), r0);
+ __ StoreP(r7, FieldMemOperand(r5, with_types_offset), r0);
+ const int generic_offset =
+ FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
+ __ LoadP(r7, FieldMemOperand(r5, generic_offset));
+ __ AddSmiLiteral(r7, r7, Smi::FromInt(1), r0);
+ __ StoreP(r7, FieldMemOperand(r5, generic_offset), r0);
+ __ jmp(&slow_start);
+ }
+
+ // We are here because tracing is on or we are going monomorphic.
+ __ bind(&miss);
+ GenerateMiss(masm);
+
+ // the slow case
+ __ bind(&slow_start);
+ // Check that the function is really a JavaScript function.
+ // r4: pushed function (to be verified)
+ __ JumpIfSmi(r4, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE);
+ __ bne(&slow);
+ __ b(&have_js_function);
+}
+
+
+void CallICStub::GenerateMiss(MacroAssembler* masm) {
+ // Get the receiver of the function from the stack; 1 ~ return address.
+ __ LoadP(r7, MemOperand(sp, (arg_count() + 1) * kPointerSize), r0);
+
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+ // Push the receiver and the function and feedback info.
+ __ Push(r7, r4, r5, r6);
+
+ // Call the entry.
+ IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+ : IC::kCallIC_Customization_Miss;
+
+ ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
+ __ CallExternalReference(miss, 4);
+
+ // Move result to r4 and exit the internal frame.
+ __ mr(r4, r3);
+ }
+}
+
+
+// StringCharCodeAtGenerator
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+ // If the receiver is a smi trigger the non-string case.
+ if (check_mode_ == RECEIVER_IS_UNKNOWN) {
+ __ JumpIfSmi(object_, receiver_not_string_);
+
+ // Fetch the instance type of the receiver into result register.
+ __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ lbz(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ // If the receiver is not a string trigger the non-string case.
+ __ andi(r0, result_, Operand(kIsNotStringMask));
+ __ bne(receiver_not_string_, cr0);
+ }
+
+ // If the index is non-smi trigger the non-smi case.
+ __ JumpIfNotSmi(index_, &index_not_smi_);
+ __ bind(&got_smi_index_);
+
+ // Check for index out of range.
+ __ LoadP(ip, FieldMemOperand(object_, String::kLengthOffset));
+ __ cmpl(ip, index_);
+ __ ble(index_out_of_range_);
+
+ __ SmiUntag(index_);
+
+ StringCharLoadGenerator::Generate(masm, object_, index_, result_,
+ &call_runtime_);
+
+ __ SmiTag(result_);
+ __ bind(&exit_);
+}
+
+
+void StringCharCodeAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
+
+ // Index is not a smi.
+ __ bind(&index_not_smi_);
+ // If index is a heap number, try converting it to an integer.
+ __ CheckMap(index_, result_, Heap::kHeapNumberMapRootIndex, index_not_number_,
+ DONT_DO_SMI_CHECK);
+ call_helper.BeforeCall(masm);
+ __ push(object_);
+ __ push(index_); // Consumed by runtime conversion function.
+ if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ } else {
+ DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ // NumberToSmi discards numbers that are not exact integers.
+ __ CallRuntime(Runtime::kNumberToSmi, 1);
+ }
+ // Save the conversion result before the pop instructions below
+ // have a chance to overwrite it.
+ __ Move(index_, r3);
+ __ pop(object_);
+ // Reload the instance type.
+ __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ lbz(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ call_helper.AfterCall(masm);
+ // If index is still not a smi, it must be out of range.
+ __ JumpIfNotSmi(index_, index_out_of_range_);
+ // Otherwise, return to the fast path.
+ __ b(&got_smi_index_);
+
+ // Call runtime. We get here when the receiver is a string and the
+ // index is a number, but the code of getting the actual character
+ // is too complex (e.g., when the string needs to be flattened).
+ __ bind(&call_runtime_);
+ call_helper.BeforeCall(masm);
+ __ SmiTag(index_);
+ __ Push(object_, index_);
+ __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
+ __ Move(result_, r3);
+ call_helper.AfterCall(masm);
+ __ b(&exit_);
+
+ __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharFromCodeGenerator
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
+ // Fast case of Heap::LookupSingleCharacterStringFromCode.
+ DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
+ __ LoadSmiLiteral(r0, Smi::FromInt(~String::kMaxOneByteCharCode));
+ __ ori(r0, r0, Operand(kSmiTagMask));
+ __ and_(r0, code_, r0);
+ __ cmpi(r0, Operand::Zero());
+ __ bne(&slow_case_);
+
+ __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
+ // At this point code register contains smi tagged one-byte char code.
+ __ mr(r0, code_);
+ __ SmiToPtrArrayOffset(code_, code_);
+ __ add(result_, result_, code_);
+ __ mr(code_, r0);
+ __ LoadP(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
+ __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
+ __ beq(&slow_case_);
+ __ bind(&exit_);
+}
+
+
+void StringCharFromCodeGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
+
+ __ bind(&slow_case_);
+ call_helper.BeforeCall(masm);
+ __ push(code_);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ Move(result_, r3);
+ call_helper.AfterCall(masm);
+ __ b(&exit_);
+
+ __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
+}
+
+
+enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
+
+
+void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, Register dest,
+ Register src, Register count,
+ Register scratch,
+ String::Encoding encoding) {
+ if (FLAG_debug_code) {
+ // Check that destination is word aligned.
+ __ andi(r0, dest, Operand(kPointerAlignmentMask));
+ __ Check(eq, kDestinationOfCopyNotAligned, cr0);
+ }
+
+ // Nothing to do for zero characters.
+ Label done;
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ // double the length
+ __ add(count, count, count, LeaveOE, SetRC);
+ __ beq(&done, cr0);
+ } else {
+ __ cmpi(count, Operand::Zero());
+ __ beq(&done);
+ }
+
+ // Copy count bytes from src to dst.
+ Label byte_loop;
+ __ mtctr(count);
+ __ bind(&byte_loop);
+ __ lbz(scratch, MemOperand(src));
+ __ addi(src, src, Operand(1));
+ __ stb(scratch, MemOperand(dest));
+ __ addi(dest, dest, Operand(1));
+ __ bdnz(&byte_loop);
+
+ __ bind(&done);
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // lr: return address
+ // sp[0]: to
+ // sp[4]: from
+ // sp[8]: string
+
+ // This stub is called from the native-call %_SubString(...), so
+ // nothing can be assumed about the arguments. It is tested that:
+ // "string" is a sequential string,
+ // both "from" and "to" are smis, and
+ // 0 <= from <= to <= string.length.
+ // If any of these assumptions fail, we call the runtime system.
+
+ const int kToOffset = 0 * kPointerSize;
+ const int kFromOffset = 1 * kPointerSize;
+ const int kStringOffset = 2 * kPointerSize;
+
+ __ LoadP(r5, MemOperand(sp, kToOffset));
+ __ LoadP(r6, MemOperand(sp, kFromOffset));
+
+ // If either to or from had the smi tag bit set, then fail to generic runtime
+ __ JumpIfNotSmi(r5, &runtime);
+ __ JumpIfNotSmi(r6, &runtime);
+ __ SmiUntag(r5);
+ __ SmiUntag(r6, SetRC);
+ // Both r5 and r6 are untagged integers.
+
+ // We want to bailout to runtime here if From is negative.
+ __ blt(&runtime, cr0); // From < 0.
+
+ __ cmpl(r6, r5);
+ __ bgt(&runtime); // Fail if from > to.
+ __ sub(r5, r5, r6);
+
+ // Make sure first argument is a string.
+ __ LoadP(r3, MemOperand(sp, kStringOffset));
+ __ JumpIfSmi(r3, &runtime);
+ Condition is_string = masm->IsObjectStringType(r3, r4);
+ __ b(NegateCondition(is_string), &runtime, cr0);
+
+ Label single_char;
+ __ cmpi(r5, Operand(1));
+ __ b(eq, &single_char);
+
+ // Short-cut for the case of trivial substring.
+ Label return_r3;
+ // r3: original string
+ // r5: result string length
+ __ LoadP(r7, FieldMemOperand(r3, String::kLengthOffset));
+ __ SmiUntag(r0, r7);
+ __ cmpl(r5, r0);
+ // Return original string.
+ __ beq(&return_r3);
+ // Longer than original string's length or negative: unsafe arguments.
+ __ bgt(&runtime);
+ // Shorter than original string's length: an actual substring.
+
+ // Deal with different string types: update the index if necessary
+ // and put the underlying string into r8.
+ // r3: original string
+ // r4: instance type
+ // r5: length
+ // r6: from index (untagged)
+ Label underlying_unpacked, sliced_string, seq_or_external_string;
+ // If the string is not indirect, it can only be sequential or external.
+ STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+ STATIC_ASSERT(kIsIndirectStringMask != 0);
+ __ andi(r0, r4, Operand(kIsIndirectStringMask));
+ __ beq(&seq_or_external_string, cr0);
+
+ __ andi(r0, r4, Operand(kSlicedNotConsMask));
+ __ bne(&sliced_string, cr0);
+ // Cons string. Check whether it is flat, then fetch first part.
+ __ LoadP(r8, FieldMemOperand(r3, ConsString::kSecondOffset));
+ __ CompareRoot(r8, Heap::kempty_stringRootIndex);
+ __ bne(&runtime);
+ __ LoadP(r8, FieldMemOperand(r3, ConsString::kFirstOffset));
+ // Update instance type.
+ __ LoadP(r4, FieldMemOperand(r8, HeapObject::kMapOffset));
+ __ lbz(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+ __ b(&underlying_unpacked);
+
+ __ bind(&sliced_string);
+ // Sliced string. Fetch parent and correct start index by offset.
+ __ LoadP(r8, FieldMemOperand(r3, SlicedString::kParentOffset));
+ __ LoadP(r7, FieldMemOperand(r3, SlicedString::kOffsetOffset));
+ __ SmiUntag(r4, r7);
+ __ add(r6, r6, r4); // Add offset to index.
+ // Update instance type.
+ __ LoadP(r4, FieldMemOperand(r8, HeapObject::kMapOffset));
+ __ lbz(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+ __ b(&underlying_unpacked);
+
+ __ bind(&seq_or_external_string);
+ // Sequential or external string. Just move string to the expected register.
+ __ mr(r8, r3);
+
+ __ bind(&underlying_unpacked);
+
+ if (FLAG_string_slices) {
+ Label copy_routine;
+ // r8: underlying subject string
+ // r4: instance type of underlying subject string
+ // r5: length
+ // r6: adjusted start index (untagged)
+ __ cmpi(r5, Operand(SlicedString::kMinLength));
+ // Short slice. Copy instead of slicing.
+ __ blt(&copy_routine);
+ // Allocate new sliced string. At this point we do not reload the instance
+ // type including the string encoding because we simply rely on the info
+ // provided by the original string. It does not matter if the original
+ // string's encoding is wrong because we always have to recheck encoding of
+ // the newly created string's parent anyways due to externalized strings.
+ Label two_byte_slice, set_slice_header;
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+ __ andi(r0, r4, Operand(kStringEncodingMask));
+ __ beq(&two_byte_slice, cr0);
+ __ AllocateOneByteSlicedString(r3, r5, r9, r10, &runtime);
+ __ b(&set_slice_header);
+ __ bind(&two_byte_slice);
+ __ AllocateTwoByteSlicedString(r3, r5, r9, r10, &runtime);
+ __ bind(&set_slice_header);
+ __ SmiTag(r6);
+ __ StoreP(r8, FieldMemOperand(r3, SlicedString::kParentOffset), r0);
+ __ StoreP(r6, FieldMemOperand(r3, SlicedString::kOffsetOffset), r0);
+ __ b(&return_r3);
+
+ __ bind(&copy_routine);
+ }
+
+ // r8: underlying subject string
+ // r4: instance type of underlying subject string
+ // r5: length
+ // r6: adjusted start index (untagged)
+ Label two_byte_sequential, sequential_string, allocate_result;
+ STATIC_ASSERT(kExternalStringTag != 0);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ andi(r0, r4, Operand(kExternalStringTag));
+ __ beq(&sequential_string, cr0);
+
+ // Handle external string.
+ // Rule out short external strings.
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ andi(r0, r4, Operand(kShortExternalStringTag));
+ __ bne(&runtime, cr0);
+ __ LoadP(r8, FieldMemOperand(r8, ExternalString::kResourceDataOffset));
+ // r8 already points to the first character of underlying string.
+ __ b(&allocate_result);
+
+ __ bind(&sequential_string);
+ // Locate first character of underlying subject string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ addi(r8, r8, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+
+ __ bind(&allocate_result);
+ // Sequential acii string. Allocate the result.
+ STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
+ __ andi(r0, r4, Operand(kStringEncodingMask));
+ __ beq(&two_byte_sequential, cr0);
+
+ // Allocate and copy the resulting one-byte string.
+ __ AllocateOneByteString(r3, r5, r7, r9, r10, &runtime);
+
+ // Locate first character of substring to copy.
+ __ add(r8, r8, r6);
+ // Locate first character of result.
+ __ addi(r4, r3, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+
+ // r3: result string
+ // r4: first character of result string
+ // r5: result string length
+ // r8: first character of substring to copy
+ STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ StringHelper::GenerateCopyCharacters(masm, r4, r8, r5, r6,
+ String::ONE_BYTE_ENCODING);
+ __ b(&return_r3);
+
+ // Allocate and copy the resulting two-byte string.
+ __ bind(&two_byte_sequential);
+ __ AllocateTwoByteString(r3, r5, r7, r9, r10, &runtime);
+
+ // Locate first character of substring to copy.
+ __ ShiftLeftImm(r4, r6, Operand(1));
+ __ add(r8, r8, r4);
+ // Locate first character of result.
+ __ addi(r4, r3, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+ // r3: result string.
+ // r4: first character of result.
+ // r5: result length.
+ // r8: first character of substring to copy.
+ STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ StringHelper::GenerateCopyCharacters(masm, r4, r8, r5, r6,
+ String::TWO_BYTE_ENCODING);
+
+ __ bind(&return_r3);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->sub_string_native(), 1, r6, r7);
+ __ Drop(3);
+ __ Ret();
+
+ // Just jump to runtime to create the sub string.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
+
+ __ bind(&single_char);
+ // r3: original string
+ // r4: instance type
+ // r5: length
+ // r6: from index (untagged)
+ __ SmiTag(r6, r6);
+ StringCharAtGenerator generator(r3, r6, r5, r3, &runtime, &runtime, &runtime,
+ STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
+ generator.GenerateFast(masm);
+ __ Drop(3);
+ __ Ret();
+ generator.SkipSlow(masm, &runtime);
+}
+
+
+void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2) {
+ Register length = scratch1;
+
+ // Compare lengths.
+ Label strings_not_equal, check_zero_length;
+ __ LoadP(length, FieldMemOperand(left, String::kLengthOffset));
+ __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ cmp(length, scratch2);
+ __ beq(&check_zero_length);
+ __ bind(&strings_not_equal);
+ __ LoadSmiLiteral(r3, Smi::FromInt(NOT_EQUAL));
+ __ Ret();
+
+ // Check if the length is zero.
+ Label compare_chars;
+ __ bind(&check_zero_length);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ cmpi(length, Operand::Zero());
+ __ bne(&compare_chars);
+ __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
+ __ Ret();
+
+ // Compare characters.
+ __ bind(&compare_chars);
+ GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2,
+ &strings_not_equal);
+
+ // Characters are equal.
+ __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
+ __ Ret();
+}
+
+
+void StringHelper::GenerateCompareFlatOneByteStrings(
+ MacroAssembler* masm, Register left, Register right, Register scratch1,
+ Register scratch2, Register scratch3) {
+ Label skip, result_not_equal, compare_lengths;
+ // Find minimum length and length difference.
+ __ LoadP(scratch1, FieldMemOperand(left, String::kLengthOffset));
+ __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ sub(scratch3, scratch1, scratch2, LeaveOE, SetRC);
+ Register length_delta = scratch3;
+ __ ble(&skip, cr0);
+ __ mr(scratch1, scratch2);
+ __ bind(&skip);
+ Register min_length = scratch1;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ cmpi(min_length, Operand::Zero());
+ __ beq(&compare_lengths);
+
+ // Compare loop.
+ GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
+ &result_not_equal);
+
+ // Compare lengths - strings up to min-length are equal.
+ __ bind(&compare_lengths);
+ DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+ // Use length_delta as result if it's zero.
+ __ mr(r3, length_delta);
+ __ cmpi(r3, Operand::Zero());
+ __ bind(&result_not_equal);
+ // Conditionally update the result based either on length_delta or
+ // the last comparion performed in the loop above.
+ Label less_equal, equal;
+ __ ble(&less_equal);
+ __ LoadSmiLiteral(r3, Smi::FromInt(GREATER));
+ __ Ret();
+ __ bind(&less_equal);
+ __ beq(&equal);
+ __ LoadSmiLiteral(r3, Smi::FromInt(LESS));
+ __ bind(&equal);
+ __ Ret();
+}
+
+
+void StringHelper::GenerateOneByteCharsCompareLoop(
+ MacroAssembler* masm, Register left, Register right, Register length,
+ Register scratch1, Label* chars_not_equal) {
+ // Change index to run from -length to -1 by adding length to string
+ // start. This means that loop ends when index reaches zero, which
+ // doesn't need an additional compare.
+ __ SmiUntag(length);
+ __ addi(scratch1, length,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ add(left, left, scratch1);
+ __ add(right, right, scratch1);
+ __ subfic(length, length, Operand::Zero());
+ Register index = length; // index = -length;
+
+ // Compare loop.
+ Label loop;
+ __ bind(&loop);
+ __ lbzx(scratch1, MemOperand(left, index));
+ __ lbzx(r0, MemOperand(right, index));
+ __ cmp(scratch1, r0);
+ __ bne(chars_not_equal);
+ __ addi(index, index, Operand(1));
+ __ cmpi(index, Operand::Zero());
+ __ bne(&loop);
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ Counters* counters = isolate()->counters();
+
+ // Stack frame on entry.
+ // sp[0]: right string
+ // sp[4]: left string
+ __ LoadP(r3, MemOperand(sp)); // Load right in r3, left in r4.
+ __ LoadP(r4, MemOperand(sp, kPointerSize));
+
+ Label not_same;
+ __ cmp(r3, r4);
+ __ bne(&not_same);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
+ __ IncrementCounter(counters->string_compare_native(), 1, r4, r5);
+ __ addi(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&not_same);
+
+ // Check that both objects are sequential one-byte strings.
+ __ JumpIfNotBothSequentialOneByteStrings(r4, r3, r5, r6, &runtime);
+
+ // Compare flat one-byte strings natively. Remove arguments from stack first.
+ __ IncrementCounter(counters->string_compare_native(), 1, r5, r6);
+ __ addi(sp, sp, Operand(2 * kPointerSize));
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, r4, r3, r5, r6, r7);
+
+ // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+}
+
+
+void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r4 : left
+ // -- r3 : right
+ // -- lr : return address
+ // -----------------------------------
+
+ // Load r5 with the allocation site. We stick an undefined dummy value here
+ // and replace it with the real allocation site later when we instantiate this
+ // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
+ __ Move(r5, handle(isolate()->heap()->undefined_value()));
+
+ // Make sure that we actually patched the allocation site.
+ if (FLAG_debug_code) {
+ __ TestIfSmi(r5, r0);
+ __ Assert(ne, kExpectedAllocationSite, cr0);
+ __ push(r5);
+ __ LoadP(r5, FieldMemOperand(r5, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kAllocationSiteMapRootIndex);
+ __ cmp(r5, ip);
+ __ pop(r5);
+ __ Assert(eq, kExpectedAllocationSite);
+ }
+
+ // Tail call into the stub that handles binary operations with allocation
+ // sites.
+ BinaryOpWithAllocationSiteStub stub(isolate(), state());
+ __ TailCallStub(&stub);
+}
+
+
+void CompareICStub::GenerateSmis(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::SMI);
+ Label miss;
+ __ orx(r5, r4, r3);
+ __ JumpIfNotSmi(r5, &miss);
+
+ if (GetCondition() == eq) {
+ // For equality we do not care about the sign of the result.
+ // __ sub(r3, r3, r4, SetCC);
+ __ sub(r3, r3, r4);
+ } else {
+ // Untag before subtracting to avoid handling overflow.
+ __ SmiUntag(r4);
+ __ SmiUntag(r3);
+ __ sub(r3, r4, r3);
+ }
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::NUMBER);
+
+ Label generic_stub;
+ Label unordered, maybe_undefined1, maybe_undefined2;
+ Label miss;
+ Label equal, less_than;
+
+ if (left() == CompareICState::SMI) {
+ __ JumpIfNotSmi(r4, &miss);
+ }
+ if (right() == CompareICState::SMI) {
+ __ JumpIfNotSmi(r3, &miss);
+ }
+
+ // Inlining the double comparison and falling back to the general compare
+ // stub if NaN is involved.
+ // Load left and right operand.
+ Label done, left, left_smi, right_smi;
+ __ JumpIfSmi(r3, &right_smi);
+ __ CheckMap(r3, r5, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
+ DONT_DO_SMI_CHECK);
+ __ lfd(d1, FieldMemOperand(r3, HeapNumber::kValueOffset));
+ __ b(&left);
+ __ bind(&right_smi);
+ __ SmiToDouble(d1, r3);
+
+ __ bind(&left);
+ __ JumpIfSmi(r4, &left_smi);
+ __ CheckMap(r4, r5, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
+ DONT_DO_SMI_CHECK);
+ __ lfd(d0, FieldMemOperand(r4, HeapNumber::kValueOffset));
+ __ b(&done);
+ __ bind(&left_smi);
+ __ SmiToDouble(d0, r4);
+
+ __ bind(&done);
+
+ // Compare operands
+ __ fcmpu(d0, d1);
+
+ // Don't base result on status bits when a NaN is involved.
+ __ bunordered(&unordered);
+
+ // Return a result of -1, 0, or 1, based on status bits.
+ __ beq(&equal);
+ __ blt(&less_than);
+ // assume greater than
+ __ li(r3, Operand(GREATER));
+ __ Ret();
+ __ bind(&equal);
+ __ li(r3, Operand(EQUAL));
+ __ Ret();
+ __ bind(&less_than);
+ __ li(r3, Operand(LESS));
+ __ Ret();
+
+ __ bind(&unordered);
+ __ bind(&generic_stub);
+ CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+ CompareICState::GENERIC, CompareICState::GENERIC);
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+
+ __ bind(&maybe_undefined1);
+ if (Token::IsOrderedRelationalCompareOp(op())) {
+ __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ bne(&miss);
+ __ JumpIfSmi(r4, &unordered);
+ __ CompareObjectType(r4, r5, r5, HEAP_NUMBER_TYPE);
+ __ bne(&maybe_undefined2);
+ __ b(&unordered);
+ }
+
+ __ bind(&maybe_undefined2);
+ if (Token::IsOrderedRelationalCompareOp(op())) {
+ __ CompareRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ beq(&unordered);
+ }
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::INTERNALIZED_STRING);
+ Label miss, not_equal;
+
+ // Registers containing left and right operands respectively.
+ Register left = r4;
+ Register right = r3;
+ Register tmp1 = r5;
+ Register tmp2 = r6;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(left, right, &miss);
+
+ // Check that both operands are symbols.
+ __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ orx(tmp1, tmp1, tmp2);
+ __ andi(r0, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+ __ bne(&miss, cr0);
+
+ // Internalized strings are compared by identity.
+ __ cmp(left, right);
+ __ bne(&not_equal);
+ // Make sure r3 is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ DCHECK(right.is(r3));
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
+ __ bind(&not_equal);
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::UNIQUE_NAME);
+ DCHECK(GetCondition() == eq);
+ Label miss;
+
+ // Registers containing left and right operands respectively.
+ Register left = r4;
+ Register right = r3;
+ Register tmp1 = r5;
+ Register tmp2 = r6;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(left, right, &miss);
+
+ // Check that both operands are unique names. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+
+ __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
+ __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
+
+ // Unique names are compared by identity.
+ __ cmp(left, right);
+ __ bne(&miss);
+ // Make sure r3 is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ DCHECK(right.is(r3));
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void CompareICStub::GenerateStrings(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::STRING);
+ Label miss, not_identical, is_symbol;
+
+ bool equality = Token::IsEqualityOp(op());
+
+ // Registers containing left and right operands respectively.
+ Register left = r4;
+ Register right = r3;
+ Register tmp1 = r5;
+ Register tmp2 = r6;
+ Register tmp3 = r7;
+ Register tmp4 = r8;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(left, right, &miss);
+
+ // Check that both operands are strings. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kNotStringTag != 0);
+ __ orx(tmp3, tmp1, tmp2);
+ __ andi(r0, tmp3, Operand(kIsNotStringMask));
+ __ bne(&miss, cr0);
+
+ // Fast check for identical strings.
+ __ cmp(left, right);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ bne(&not_identical);
+ __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
+ __ Ret();
+ __ bind(&not_identical);
+
+ // Handle not identical strings.
+
+ // Check that both strings are internalized strings. If they are, we're done
+ // because we already know they are not identical. We know they are both
+ // strings.
+ if (equality) {
+ DCHECK(GetCondition() == eq);
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ orx(tmp3, tmp1, tmp2);
+ __ andi(r0, tmp3, Operand(kIsNotInternalizedMask));
+ __ bne(&is_symbol, cr0);
+ // Make sure r3 is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ DCHECK(right.is(r3));
+ __ Ret();
+ __ bind(&is_symbol);
+ }
+
+ // Check that both strings are sequential one-byte.
+ Label runtime;
+ __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
+ &runtime);
+
+ // Compare flat one-byte strings. Returns when done.
+ if (equality) {
+ StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1,
+ tmp2);
+ } else {
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
+ tmp2, tmp3);
+ }
+
+ // Handle more complex cases in runtime.
+ __ bind(&runtime);
+ __ Push(left, right);
+ if (equality) {
+ __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ } else {
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ }
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void CompareICStub::GenerateObjects(MacroAssembler* masm) {
+ DCHECK(state() == CompareICState::OBJECT);
+ Label miss;
+ __ and_(r5, r4, r3);
+ __ JumpIfSmi(r5, &miss);
+
+ __ CompareObjectType(r3, r5, r5, JS_OBJECT_TYPE);
+ __ bne(&miss);
+ __ CompareObjectType(r4, r5, r5, JS_OBJECT_TYPE);
+ __ bne(&miss);
+
+ DCHECK(GetCondition() == eq);
+ __ sub(r3, r3, r4);
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
+ Label miss;
+ __ and_(r5, r4, r3);
+ __ JumpIfSmi(r5, &miss);
+ __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ LoadP(r6, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ Cmpi(r5, Operand(known_map_), r0);
+ __ bne(&miss);
+ __ Cmpi(r6, Operand(known_map_), r0);
+ __ bne(&miss);
+
+ __ sub(r3, r3, r4);
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void CompareICStub::GenerateMiss(MacroAssembler* masm) {
+ {
+ // Call the runtime system in a fresh internal frame.
+ ExternalReference miss =
+ ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
+
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r4, r3);
+ __ Push(r4, r3);
+ __ LoadSmiLiteral(r0, Smi::FromInt(op()));
+ __ push(r0);
+ __ CallExternalReference(miss, 3);
+ // Compute the entry point of the rewritten stub.
+ __ addi(r5, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Restore registers.
+ __ Pop(r4, r3);
+ }
+
+ __ JumpToJSEntry(r5);
+}
+
+
+// This stub is paired with DirectCEntryStub::GenerateCall
+void DirectCEntryStub::Generate(MacroAssembler* masm) {
+ // Place the return address on the stack, making the call
+ // GC safe. The RegExp backend also relies on this.
+ __ mflr(r0);
+ __ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
+ __ Call(ip); // Call the C++ function.
+ __ LoadP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
+ __ mtlr(r0);
+ __ blr();
+}
+
+
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) {
+#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
+ // Native AIX/PPC64 Linux use a function descriptor.
+ __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize));
+ __ LoadP(ip, MemOperand(target, 0)); // Instruction address
+#else
+ // ip needs to be set for DirectCEentryStub::Generate, and also
+ // for ABI_TOC_ADDRESSABILITY_VIA_IP.
+ __ Move(ip, target);
+#endif
+
+ intptr_t code = reinterpret_cast<intptr_t>(GetCode().location());
+ __ mov(r0, Operand(code, RelocInfo::CODE_TARGET));
+ __ Call(r0); // Call the stub.
+}
+
+
+void NameDictionaryLookupStub::GenerateNegativeLookup(
+ MacroAssembler* masm, Label* miss, Label* done, Register receiver,
+ Register properties, Handle<Name> name, Register scratch0) {
+ DCHECK(name->IsUniqueName());
+ // If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the hole value).
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // scratch0 points to properties hash.
+ // Compute the masked index: (hash + i + i * i) & mask.
+ Register index = scratch0;
+ // Capacity is smi 2^n.
+ __ LoadP(index, FieldMemOperand(properties, kCapacityOffset));
+ __ subi(index, index, Operand(1));
+ __ LoadSmiLiteral(
+ ip, Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)));
+ __ and_(index, index, ip);
+
+ // Scale the index by multiplying by the entry size.
+ DCHECK(NameDictionary::kEntrySize == 3);
+ __ ShiftLeftImm(ip, index, Operand(1));
+ __ add(index, index, ip); // index *= 3.
+
+ Register entity_name = scratch0;
+ // Having undefined at this place means the name is not contained.
+ Register tmp = properties;
+ __ SmiToPtrArrayOffset(ip, index);
+ __ add(tmp, properties, ip);
+ __ LoadP(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
+
+ DCHECK(!tmp.is(entity_name));
+ __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
+ __ cmp(entity_name, tmp);
+ __ beq(done);
+
+ // Load the hole ready for use below:
+ __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
+
+ // Stop if found the property.
+ __ Cmpi(entity_name, Operand(Handle<Name>(name)), r0);
+ __ beq(miss);
+
+ Label good;
+ __ cmp(entity_name, tmp);
+ __ beq(&good);
+
+ // Check if the entry name is not a unique name.
+ __ LoadP(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
+ __ lbz(entity_name, FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
+ __ bind(&good);
+
+ // Restore the properties.
+ __ LoadP(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ }
+
+ const int spill_mask = (r0.bit() | r9.bit() | r8.bit() | r7.bit() | r6.bit() |
+ r5.bit() | r4.bit() | r3.bit());
+
+ __ mflr(r0);
+ __ MultiPush(spill_mask);
+
+ __ LoadP(r3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ mov(r4, Operand(Handle<Name>(name)));
+ NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
+ __ CallStub(&stub);
+ __ cmpi(r3, Operand::Zero());
+
+ __ MultiPop(spill_mask); // MultiPop does not touch condition flags
+ __ mtlr(r0);
+
+ __ beq(done);
+ __ bne(miss);
+}
+
+
+// Probe the name dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found. Jump to
+// the |miss| label otherwise.
+// If lookup was successful |scratch2| will be equal to elements + 4 * index.
+void NameDictionaryLookupStub::GeneratePositiveLookup(
+ MacroAssembler* masm, Label* miss, Label* done, Register elements,
+ Register name, Register scratch1, Register scratch2) {
+ DCHECK(!elements.is(scratch1));
+ DCHECK(!elements.is(scratch2));
+ DCHECK(!name.is(scratch1));
+ DCHECK(!name.is(scratch2));
+
+ __ AssertName(name);
+
+ // Compute the capacity mask.
+ __ LoadP(scratch1, FieldMemOperand(elements, kCapacityOffset));
+ __ SmiUntag(scratch1); // convert smi to int
+ __ subi(scratch1, scratch1, Operand(1));
+
+ // Generate an unrolled loop that performs a few probes before
+ // giving up. Measurements done on Gmail indicate that 2 probes
+ // cover ~93% of loads from dictionaries.
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ __ lwz(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
+ if (i > 0) {
+ // Add the probe offset (i + i * i) left shifted to avoid right shifting
+ // the hash in a separate instruction. The value hash + i + i * i is right
+ // shifted in the following and instruction.
+ DCHECK(NameDictionary::GetProbeOffset(i) <
+ 1 << (32 - Name::kHashFieldOffset));
+ __ addi(scratch2, scratch2,
+ Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift));
+ }
+ __ srwi(scratch2, scratch2, Operand(Name::kHashShift));
+ __ and_(scratch2, scratch1, scratch2);
+
+ // Scale the index by multiplying by the element size.
+ DCHECK(NameDictionary::kEntrySize == 3);
+ // scratch2 = scratch2 * 3.
+ __ ShiftLeftImm(ip, scratch2, Operand(1));
+ __ add(scratch2, scratch2, ip);
+
+ // Check if the key is identical to the name.
+ __ ShiftLeftImm(ip, scratch2, Operand(kPointerSizeLog2));
+ __ add(scratch2, elements, ip);
+ __ LoadP(ip, FieldMemOperand(scratch2, kElementsStartOffset));
+ __ cmp(name, ip);
+ __ beq(done);
+ }
+
+ const int spill_mask = (r0.bit() | r9.bit() | r8.bit() | r7.bit() | r6.bit() |
+ r5.bit() | r4.bit() | r3.bit()) &
+ ~(scratch1.bit() | scratch2.bit());
+
+ __ mflr(r0);
+ __ MultiPush(spill_mask);
+ if (name.is(r3)) {
+ DCHECK(!elements.is(r4));
+ __ mr(r4, name);
+ __ mr(r3, elements);
+ } else {
+ __ mr(r3, elements);
+ __ mr(r4, name);
+ }
+ NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
+ __ CallStub(&stub);
+ __ cmpi(r3, Operand::Zero());
+ __ mr(scratch2, r5);
+ __ MultiPop(spill_mask);
+ __ mtlr(r0);
+
+ __ bne(done);
+ __ beq(miss);
+}
+
+
+void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
+ // This stub overrides SometimesSetsUpAFrame() to return false. That means
+ // we cannot call anything that could cause a GC from this stub.
+ // Registers:
+ // result: NameDictionary to probe
+ // r4: key
+ // dictionary: NameDictionary to probe.
+ // index: will hold an index of entry if lookup is successful.
+ // might alias with result_.
+ // Returns:
+ // result_ is zero if lookup failed, non zero otherwise.
+
+ Register result = r3;
+ Register dictionary = r3;
+ Register key = r4;
+ Register index = r5;
+ Register mask = r6;
+ Register hash = r7;
+ Register undefined = r8;
+ Register entry_key = r9;
+ Register scratch = r9;
+
+ Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
+
+ __ LoadP(mask, FieldMemOperand(dictionary, kCapacityOffset));
+ __ SmiUntag(mask);
+ __ subi(mask, mask, Operand(1));
+
+ __ lwz(hash, FieldMemOperand(key, Name::kHashFieldOffset));
+
+ __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+ for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ // Capacity is smi 2^n.
+ if (i > 0) {
+ // Add the probe offset (i + i * i) left shifted to avoid right shifting
+ // the hash in a separate instruction. The value hash + i + i * i is right
+ // shifted in the following and instruction.
+ DCHECK(NameDictionary::GetProbeOffset(i) <
+ 1 << (32 - Name::kHashFieldOffset));
+ __ addi(index, hash,
+ Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift));
+ } else {
+ __ mr(index, hash);
+ }
+ __ srwi(r0, index, Operand(Name::kHashShift));
+ __ and_(index, mask, r0);
+
+ // Scale the index by multiplying by the entry size.
+ DCHECK(NameDictionary::kEntrySize == 3);
+ __ ShiftLeftImm(scratch, index, Operand(1));
+ __ add(index, index, scratch); // index *= 3.
+
+ DCHECK_EQ(kSmiTagSize, 1);
+ __ ShiftLeftImm(scratch, index, Operand(kPointerSizeLog2));
+ __ add(index, dictionary, scratch);
+ __ LoadP(entry_key, FieldMemOperand(index, kElementsStartOffset));
+
+ // Having undefined at this place means the name is not contained.
+ __ cmp(entry_key, undefined);
+ __ beq(&not_in_dictionary);
+
+ // Stop if found the property.
+ __ cmp(entry_key, key);
+ __ beq(&in_dictionary);
+
+ if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
+ // Check if the entry name is not a unique name.
+ __ LoadP(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
+ __ lbz(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
+ }
+ }
+
+ __ bind(&maybe_in_dictionary);
+ // If we are doing negative lookup then probing failure should be
+ // treated as a lookup success. For positive lookup probing failure
+ // should be treated as lookup failure.
+ if (mode() == POSITIVE_LOOKUP) {
+ __ li(result, Operand::Zero());
+ __ Ret();
+ }
+
+ __ bind(&in_dictionary);
+ __ li(result, Operand(1));
+ __ Ret();
+
+ __ bind(&not_in_dictionary);
+ __ li(result, Operand::Zero());
+ __ Ret();
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
+ Isolate* isolate) {
+ StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
+ stub1.GetCode();
+ // Hydrogen code stubs need stub2 at snapshot time.
+ StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
+ stub2.GetCode();
+}
+
+
+// Takes the input in 3 registers: address_ value_ and object_. A pointer to
+// the value has just been written into the object, now this stub makes sure
+// we keep the GC informed. The word in the object where the value has been
+// written is in the address register.
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ Label skip_to_incremental_noncompacting;
+ Label skip_to_incremental_compacting;
+
+ // The first two branch instructions are generated with labels so as to
+ // get the offset fixed up correctly by the bind(Label*) call. We patch
+ // it back and forth between branch condition True and False
+ // when we start and stop incremental heap marking.
+ // See RecordWriteStub::Patch for details.
+
+ // Clear the bit, branch on True for NOP action initially
+ __ crclr(Assembler::encode_crbit(cr2, CR_LT));
+ __ blt(&skip_to_incremental_noncompacting, cr2);
+ __ blt(&skip_to_incremental_compacting, cr2);
+
+ if (remembered_set_action() == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
+ MacroAssembler::kReturnAtEnd);
+ }
+ __ Ret();
+
+ __ bind(&skip_to_incremental_noncompacting);
+ GenerateIncremental(masm, INCREMENTAL);
+
+ __ bind(&skip_to_incremental_compacting);
+ GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+ // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+ // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+ // patching not required on PPC as the initial path is effectively NOP
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+ regs_.Save(masm);
+
+ if (remembered_set_action() == EMIT_REMEMBERED_SET) {
+ Label dont_need_remembered_set;
+
+ __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
+ __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
+ regs_.scratch0(), &dont_need_remembered_set);
+
+ __ CheckPageFlag(regs_.object(), regs_.scratch0(),
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE, ne,
+ &dont_need_remembered_set);
+
+ // First notify the incremental marker if necessary, then update the
+ // remembered set.
+ CheckNeedsToInformIncrementalMarker(
+ masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm);
+ regs_.Restore(masm);
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
+ MacroAssembler::kReturnAtEnd);
+
+ __ bind(&dont_need_remembered_set);
+ }
+
+ CheckNeedsToInformIncrementalMarker(
+ masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm);
+ regs_.Restore(masm);
+ __ Ret();
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
+ int argument_count = 3;
+ __ PrepareCallCFunction(argument_count, regs_.scratch0());
+ Register address =
+ r3.is(regs_.address()) ? regs_.scratch0() : regs_.address();
+ DCHECK(!address.is(regs_.object()));
+ DCHECK(!address.is(r3));
+ __ mr(address, regs_.address());
+ __ mr(r3, regs_.object());
+ __ mr(r4, address);
+ __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(isolate()),
+ argument_count);
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode) {
+ Label on_black;
+ Label need_incremental;
+ Label need_incremental_pop_scratch;
+
+ DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
+ __ lis(r0, Operand((~Page::kPageAlignmentMask >> 16)));
+ __ and_(regs_.scratch0(), regs_.object(), r0);
+ __ LoadP(
+ regs_.scratch1(),
+ MemOperand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset));
+ __ subi(regs_.scratch1(), regs_.scratch1(), Operand(1));
+ __ StoreP(
+ regs_.scratch1(),
+ MemOperand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset));
+ __ cmpi(regs_.scratch1(), Operand::Zero()); // PPC, we could do better here
+ __ blt(&need_incremental);
+
+ // Let's look at the color of the object: If it is not black we don't have
+ // to inform the incremental marker.
+ __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ Ret();
+ }
+
+ __ bind(&on_black);
+
+ // Get the value from the slot.
+ __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
+
+ if (mode == INCREMENTAL_COMPACTION) {
+ Label ensure_not_white;
+
+ __ CheckPageFlag(regs_.scratch0(), // Contains value.
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kEvacuationCandidateMask, eq,
+ &ensure_not_white);
+
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kSkipEvacuationSlotsRecordingMask, eq,
+ &need_incremental);
+
+ __ bind(&ensure_not_white);
+ }
+
+ // We need extra registers for this, so we push the object and the address
+ // register temporarily.
+ __ Push(regs_.object(), regs_.address());
+ __ EnsureNotWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ regs_.address(), // Scratch.
+ &need_incremental_pop_scratch);
+ __ Pop(regs_.object(), regs_.address());
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ Ret();
+ }
+
+ __ bind(&need_incremental_pop_scratch);
+ __ Pop(regs_.object(), regs_.address());
+
+ __ bind(&need_incremental);
+
+ // Fall through when we need to inform the incremental marker.
+}
+
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : element value to store
+ // -- r6 : element index as smi
+ // -- sp[0] : array literal index in function as smi
+ // -- sp[4] : array literal
+ // clobbers r3, r5, r7
+ // -----------------------------------
+
+ Label element_done;
+ Label double_elements;
+ Label smi_element;
+ Label slow_elements;
+ Label fast_elements;
+
+ // Get array literal index, array literal and its map.
+ __ LoadP(r7, MemOperand(sp, 0 * kPointerSize));
+ __ LoadP(r4, MemOperand(sp, 1 * kPointerSize));
+ __ LoadP(r5, FieldMemOperand(r4, JSObject::kMapOffset));
+
+ __ CheckFastElements(r5, r8, &double_elements);
+ // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
+ __ JumpIfSmi(r3, &smi_element);
+ __ CheckFastSmiElements(r5, r8, &fast_elements);
+
+ // Store into the array literal requires a elements transition. Call into
+ // the runtime.
+ __ bind(&slow_elements);
+ // call.
+ __ Push(r4, r6, r3);
+ __ LoadP(r8, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadP(r8, FieldMemOperand(r8, JSFunction::kLiteralsOffset));
+ __ Push(r8, r7);
+ __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+ // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
+ __ bind(&fast_elements);
+ __ LoadP(r8, FieldMemOperand(r4, JSObject::kElementsOffset));
+ __ SmiToPtrArrayOffset(r9, r6);
+ __ add(r9, r8, r9);
+#if V8_TARGET_ARCH_PPC64
+ // add due to offset alignment requirements of StorePU
+ __ addi(r9, r9, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ StoreP(r3, MemOperand(r9));
+#else
+ __ StorePU(r3, MemOperand(r9, FixedArray::kHeaderSize - kHeapObjectTag));
+#endif
+ // Update the write barrier for the array store.
+ __ RecordWrite(r8, r9, r3, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Ret();
+
+ // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
+ // and value is Smi.
+ __ bind(&smi_element);
+ __ LoadP(r8, FieldMemOperand(r4, JSObject::kElementsOffset));
+ __ SmiToPtrArrayOffset(r9, r6);
+ __ add(r9, r8, r9);
+ __ StoreP(r3, FieldMemOperand(r9, FixedArray::kHeaderSize), r0);
+ __ Ret();
+
+ // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
+ __ bind(&double_elements);
+ __ LoadP(r8, FieldMemOperand(r4, JSObject::kElementsOffset));
+ __ StoreNumberToDoubleElements(r3, r6, r8, r9, d0, &slow_elements);
+ __ Ret();
+}
+
+
+void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
+ CEntryStub ces(isolate(), 1, kSaveFPRegs);
+ __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
+ int parameter_count_offset =
+ StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ __ LoadP(r4, MemOperand(fp, parameter_count_offset));
+ if (function_mode() == JS_FUNCTION_STUB_MODE) {
+ __ addi(r4, r4, Operand(1));
+ }
+ masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ __ slwi(r4, r4, Operand(kPointerSizeLog2));
+ __ add(sp, sp, r4);
+ __ Ret();
+}
+
+
+void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+ VectorLoadStub stub(isolate(), state());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
+ EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+ VectorKeyedLoadStub stub(isolate());
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
+ if (masm->isolate()->function_entry_hook() != NULL) {
+ PredictableCodeSizeScope predictable(masm,
+#if V8_TARGET_ARCH_PPC64
+ 14 * Assembler::kInstrSize);
+#else
+ 11 * Assembler::kInstrSize);
+#endif
+ ProfileEntryHookStub stub(masm->isolate());
+ __ mflr(r0);
+ __ Push(r0, ip);
+ __ CallStub(&stub);
+ __ Pop(r0, ip);
+ __ mtlr(r0);
+ }
+}
+
+
+void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
+ // The entry hook is a "push lr, ip" instruction, followed by a call.
+ const int32_t kReturnAddressDistanceFromFunctionStart =
+ Assembler::kCallTargetAddressOffset + 3 * Assembler::kInstrSize;
+
+ // This should contain all kJSCallerSaved registers.
+ const RegList kSavedRegs = kJSCallerSaved | // Caller saved registers.
+ r15.bit(); // Saved stack pointer.
+
+ // We also save lr, so the count here is one higher than the mask indicates.
+ const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
+
+ // Save all caller-save registers as this may be called from anywhere.
+ __ mflr(ip);
+ __ MultiPush(kSavedRegs | ip.bit());
+
+ // Compute the function's address for the first argument.
+ __ subi(r3, ip, Operand(kReturnAddressDistanceFromFunctionStart));
+
+ // The caller's return address is two slots above the saved temporaries.
+ // Grab that for the second argument to the hook.
+ __ addi(r4, sp, Operand((kNumSavedRegs + 1) * kPointerSize));
+
+ // Align the stack if necessary.
+ int frame_alignment = masm->ActivationFrameAlignment();
+ if (frame_alignment > kPointerSize) {
+ __ mr(r15, sp);
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ __ ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
+ }
+
+#if !defined(USE_SIMULATOR)
+ uintptr_t entry_hook =
+ reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
+ __ mov(ip, Operand(entry_hook));
+
+#if ABI_USES_FUNCTION_DESCRIPTORS
+ // Function descriptor
+ __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize));
+ __ LoadP(ip, MemOperand(ip, 0));
+#elif ABI_TOC_ADDRESSABILITY_VIA_IP
+// ip set above, so nothing to do.
+#endif
+
+ // PPC LINUX ABI:
+ __ li(r0, Operand::Zero());
+ __ StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
+#else
+ // Under the simulator we need to indirect the entry hook through a
+ // trampoline function at a known address.
+ // It additionally takes an isolate as a third parameter
+ __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
+
+ ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
+ __ mov(ip, Operand(ExternalReference(
+ &dispatcher, ExternalReference::BUILTIN_CALL, isolate())));
+#endif
+ __ Call(ip);
+
+#if !defined(USE_SIMULATOR)
+ __ addi(sp, sp, Operand(kNumRequiredStackFrameSlots * kPointerSize));
+#endif
+
+ // Restore the stack pointer if needed.
+ if (frame_alignment > kPointerSize) {
+ __ mr(sp, r15);
+ }
+
+ // Also pop lr to get Ret(0).
+ __ MultiPop(kSavedRegs | ip.bit());
+ __ mtlr(ip);
+ __ Ret();
+}
+
+
+template <class T>
+static void CreateArrayDispatch(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
+ __ TailCallStub(&stub);
+ } else if (mode == DONT_OVERRIDE) {
+ int last_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ Cmpi(r6, Operand(kind), r0);
+ T stub(masm->isolate(), kind);
+ __ TailCallStub(&stub, eq);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ // r5 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
+ // r6 - kind (if mode != DISABLE_ALLOCATION_SITES)
+ // r3 - number of arguments
+ // r4 - constructor?
+ // sp[0] - last argument
+ Label normal_sequence;
+ if (mode == DONT_OVERRIDE) {
+ DCHECK(FAST_SMI_ELEMENTS == 0);
+ DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
+ DCHECK(FAST_ELEMENTS == 2);
+ DCHECK(FAST_HOLEY_ELEMENTS == 3);
+ DCHECK(FAST_DOUBLE_ELEMENTS == 4);
+ DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+
+ // is the low bit set? If so, we are holey and that is good.
+ __ andi(r0, r6, Operand(1));
+ __ bne(&normal_sequence, cr0);
+ }
+
+ // look at the first argument
+ __ LoadP(r8, MemOperand(sp, 0));
+ __ cmpi(r8, Operand::Zero());
+ __ beq(&normal_sequence);
+
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ ElementsKind initial = GetInitialFastElementsKind();
+ ElementsKind holey_initial = GetHoleyElementsKind(initial);
+
+ ArraySingleArgumentConstructorStub stub_holey(
+ masm->isolate(), holey_initial, DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub_holey);
+
+ __ bind(&normal_sequence);
+ ArraySingleArgumentConstructorStub stub(masm->isolate(), initial,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub);
+ } else if (mode == DONT_OVERRIDE) {
+ // We are going to create a holey array, but our kind is non-holey.
+ // Fix kind and retry (only if we have an allocation site in the slot).
+ __ addi(r6, r6, Operand(1));
+
+ if (FLAG_debug_code) {
+ __ LoadP(r8, FieldMemOperand(r5, 0));
+ __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
+ __ Assert(eq, kExpectedAllocationSite);
+ }
+
+ // Save the resulting elements kind in type info. We can't just store r6
+ // in the AllocationSite::transition_info field because elements kind is
+ // restricted to a portion of the field...upper bits need to be left alone.
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ LoadP(r7, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
+ __ AddSmiLiteral(r7, r7, Smi::FromInt(kFastElementsKindPackedToHoley), r0);
+ __ StoreP(r7, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset),
+ r0);
+
+ __ bind(&normal_sequence);
+ int last_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ mov(r0, Operand(kind));
+ __ cmp(r6, r0);
+ ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
+ __ TailCallStub(&stub, eq);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+template <class T>
+static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
+ int to_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= to_index; ++i) {
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ T stub(isolate, kind);
+ stub.GetCode();
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
+ stub1.GetCode();
+ }
+ }
+}
+
+
+void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
+ isolate);
+}
+
+
+void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
+ Isolate* isolate) {
+ ElementsKind kinds[2] = {FAST_ELEMENTS, FAST_HOLEY_ELEMENTS};
+ for (int i = 0; i < 2; i++) {
+ // For internal arrays we only need a few things
+ InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
+ stubh1.GetCode();
+ InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
+ stubh2.GetCode();
+ InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
+ stubh3.GetCode();
+ }
+}
+
+
+void ArrayConstructorStub::GenerateDispatchToArrayStub(
+ MacroAssembler* masm, AllocationSiteOverrideMode mode) {
+ if (argument_count() == ANY) {
+ Label not_zero_case, not_one_case;
+ __ cmpi(r3, Operand::Zero());
+ __ bne(&not_zero_case);
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ bind(&not_zero_case);
+ __ cmpi(r3, Operand(1));
+ __ bgt(&not_one_case);
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ bind(&not_one_case);
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else if (argument_count() == NONE) {
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+ } else if (argument_count() == ONE) {
+ CreateArrayDispatchOneArgument(masm, mode);
+ } else if (argument_count() == MORE_THAN_ONE) {
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void ArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : argc (only if argument_count() == ANY)
+ // -- r4 : constructor
+ // -- r5 : AllocationSite or undefined
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ LoadP(r7, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ TestIfSmi(r7, r0);
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
+ __ CompareObjectType(r7, r7, r8, MAP_TYPE);
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+
+ // We should either have undefined in r5 or a valid AllocationSite
+ __ AssertUndefinedOrAllocationSite(r5, r7);
+ }
+
+ Label no_info;
+ // Get the elements kind and case on that.
+ __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ beq(&no_info);
+
+ __ LoadP(r6, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
+ __ SmiUntag(r6);
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ And(r6, r6, Operand(AllocationSite::ElementsKindBits::kMask));
+ GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
+
+ __ bind(&no_info);
+ GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
+}
+
+
+void InternalArrayConstructorStub::GenerateCase(MacroAssembler* masm,
+ ElementsKind kind) {
+ __ cmpli(r3, Operand(1));
+
+ InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
+ __ TailCallStub(&stub0, lt);
+
+ InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+ __ TailCallStub(&stubN, gt);
+
+ if (IsFastPackedElementsKind(kind)) {
+ // We might need to create a holey array
+ // look at the first argument
+ __ LoadP(r6, MemOperand(sp, 0));
+ __ cmpi(r6, Operand::Zero());
+
+ InternalArraySingleArgumentConstructorStub stub1_holey(
+ isolate(), GetHoleyElementsKind(kind));
+ __ TailCallStub(&stub1_holey, ne);
+ }
+
+ InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
+ __ TailCallStub(&stub1);
+}
+
+
+void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : argc
+ // -- r4 : constructor
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ // Initial map for the builtin Array function should be a map.
+ __ LoadP(r6, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ TestIfSmi(r6, r0);
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
+ __ CompareObjectType(r6, r6, r7, MAP_TYPE);
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ }
+
+ // Figure out the right elements kind
+ __ LoadP(r6, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
+ // Load the map's "bit field 2" into |result|.
+ __ lbz(r6, FieldMemOperand(r6, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ DecodeField<Map::ElementsKindBits>(r6);
+
+ if (FLAG_debug_code) {
+ Label done;
+ __ cmpi(r6, Operand(FAST_ELEMENTS));
+ __ beq(&done);
+ __ cmpi(r6, Operand(FAST_HOLEY_ELEMENTS));
+ __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ __ bind(&done);
+ }
+
+ Label fast_elements_case;
+ __ cmpi(r6, Operand(FAST_ELEMENTS));
+ __ beq(&fast_elements_case);
+ GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+
+ __ bind(&fast_elements_case);
+ GenerateCase(masm, FAST_ELEMENTS);
+}
+
+
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : callee
+ // -- r7 : call_data
+ // -- r5 : holder
+ // -- r4 : api_function_address
+ // -- cp : context
+ // --
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[(argc - 1)* 4] : first argument
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ Register callee = r3;
+ Register call_data = r7;
+ Register holder = r5;
+ Register api_function_address = r4;
+ Register context = cp;
+
+ int argc = this->argc();
+ bool is_store = this->is_store();
+ bool call_data_undefined = this->call_data_undefined();
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+ STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+ STATIC_ASSERT(FCA::kArgsLength == 7);
+
+ // context save
+ __ push(context);
+ // load context from callee
+ __ LoadP(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+
+ // callee
+ __ push(callee);
+
+ // call data
+ __ push(call_data);
+
+ Register scratch = call_data;
+ if (!call_data_undefined) {
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ }
+ // return value
+ __ push(scratch);
+ // return value default
+ __ push(scratch);
+ // isolate
+ __ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
+ __ push(scratch);
+ // holder
+ __ push(holder);
+
+ // Prepare arguments.
+ __ mr(scratch, sp);
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ // PPC LINUX ABI:
+ //
+ // Create 5 extra slots on stack:
+ // [0] space for DirectCEntryStub's LR save
+ // [1-4] FunctionCallbackInfo
+ const int kApiStackSpace = 5;
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ DCHECK(!api_function_address.is(r3) && !scratch.is(r3));
+ // r3 = FunctionCallbackInfo&
+ // Arguments is after the return address.
+ __ addi(r3, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
+ // FunctionCallbackInfo::implicit_args_
+ __ StoreP(scratch, MemOperand(r3, 0 * kPointerSize));
+ // FunctionCallbackInfo::values_
+ __ addi(ip, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
+ __ StoreP(ip, MemOperand(r3, 1 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc
+ __ li(ip, Operand(argc));
+ __ stw(ip, MemOperand(r3, 2 * kPointerSize));
+ // FunctionCallbackInfo::is_construct_call = 0
+ __ li(ip, Operand::Zero());
+ __ stw(ip, MemOperand(r3, 2 * kPointerSize + kIntSize));
+
+ const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_function_callback(isolate());
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ MemOperand context_restore_operand(
+ fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
+ // Stores return the first js argument
+ int return_value_offset = 0;
+ if (is_store) {
+ return_value_offset = 2 + FCA::kArgsLength;
+ } else {
+ return_value_offset = 2 + FCA::kReturnValueOffset;
+ }
+ MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
+
+ __ CallApiFunctionAndReturn(api_function_address, thunk_ref,
+ kStackUnwindSpace, return_value_operand,
+ &context_restore_operand);
+}
+
+
+void CallApiGetterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- sp[0] : name
+ // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
+ // -- ...
+ // -- r5 : api_function_address
+ // -----------------------------------
+
+ Register api_function_address = ApiGetterDescriptor::function_address();
+ DCHECK(api_function_address.is(r5));
+
+ __ mr(r3, sp); // r0 = Handle<Name>
+ __ addi(r4, r3, Operand(1 * kPointerSize)); // r4 = PCA
+
+// If ABI passes Handles (pointer-sized struct) in a register:
+//
+// Create 2 extra slots on stack:
+// [0] space for DirectCEntryStub's LR save
+// [1] AccessorInfo&
+//
+// Otherwise:
+//
+// Create 3 extra slots on stack:
+// [0] space for DirectCEntryStub's LR save
+// [1] copy of Handle (first arg)
+// [2] AccessorInfo&
+#if ABI_PASSES_HANDLES_IN_REGS
+ const int kAccessorInfoSlot = kStackFrameExtraParamSlot + 1;
+ const int kApiStackSpace = 2;
+#else
+ const int kArg0Slot = kStackFrameExtraParamSlot + 1;
+ const int kAccessorInfoSlot = kArg0Slot + 1;
+ const int kApiStackSpace = 3;
+#endif
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, kApiStackSpace);
+
+#if !ABI_PASSES_HANDLES_IN_REGS
+ // pass 1st arg by reference
+ __ StoreP(r3, MemOperand(sp, kArg0Slot * kPointerSize));
+ __ addi(r3, sp, Operand(kArg0Slot * kPointerSize));
+#endif
+
+ // Create PropertyAccessorInfo instance on the stack above the exit frame with
+ // r4 (internal::Object** args_) as the data.
+ __ StoreP(r4, MemOperand(sp, kAccessorInfoSlot * kPointerSize));
+ // r4 = AccessorInfo&
+ __ addi(r4, sp, Operand(kAccessorInfoSlot * kPointerSize));
+
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback(isolate());
+ __ CallApiFunctionAndReturn(api_function_address, thunk_ref,
+ kStackUnwindSpace,
+ MemOperand(fp, 6 * kPointerSize), NULL);
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/code-stubs-ppc.h b/deps/v8/src/ppc/code-stubs-ppc.h
new file mode 100644
index 0000000000..a9d06fb62e
--- /dev/null
+++ b/deps/v8/src/ppc/code-stubs-ppc.h
@@ -0,0 +1,325 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PPC_CODE_STUBS_PPC_H_
+#define V8_PPC_CODE_STUBS_PPC_H_
+
+namespace v8 {
+namespace internal {
+
+
+void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
+
+
+class StringHelper : public AllStatic {
+ public:
+ // Generate code for copying a large number of characters. This function
+ // is allowed to spend extra time setting up conditions to make copying
+ // faster. Copying of overlapping regions is not supported.
+ // Dest register ends at the position after the last character written.
+ static void GenerateCopyCharacters(MacroAssembler* masm, Register dest,
+ Register src, Register count,
+ Register scratch,
+ String::Encoding encoding);
+
+ // Compares two flat one-byte strings and returns result in r0.
+ static void GenerateCompareFlatOneByteStrings(MacroAssembler* masm,
+ Register left, Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
+
+ // Compares two flat one-byte strings for equality and returns result in r0.
+ static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+ Register left, Register right,
+ Register scratch1,
+ Register scratch2);
+
+ private:
+ static void GenerateOneByteCharsCompareLoop(MacroAssembler* masm,
+ Register left, Register right,
+ Register length,
+ Register scratch1,
+ Label* chars_not_equal);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
+class StoreRegistersStateStub : public PlatformCodeStub {
+ public:
+ explicit StoreRegistersStateStub(Isolate* isolate)
+ : PlatformCodeStub(isolate) {}
+
+ static void GenerateAheadOfTime(Isolate* isolate);
+
+ private:
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub);
+};
+
+
+class RestoreRegistersStateStub : public PlatformCodeStub {
+ public:
+ explicit RestoreRegistersStateStub(Isolate* isolate)
+ : PlatformCodeStub(isolate) {}
+
+ static void GenerateAheadOfTime(Isolate* isolate);
+
+ private:
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
+};
+
+
+class RecordWriteStub : public PlatformCodeStub {
+ public:
+ RecordWriteStub(Isolate* isolate, Register object, Register value,
+ Register address, RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode)
+ : PlatformCodeStub(isolate),
+ regs_(object, // An input reg.
+ address, // An input reg.
+ value) { // One scratch reg.
+ minor_key_ = ObjectBits::encode(object.code()) |
+ ValueBits::encode(value.code()) |
+ AddressBits::encode(address.code()) |
+ RememberedSetActionBits::encode(remembered_set_action) |
+ SaveFPRegsModeBits::encode(fp_mode);
+ }
+
+ RecordWriteStub(uint32_t key, Isolate* isolate)
+ : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
+
+ enum Mode { STORE_BUFFER_ONLY, INCREMENTAL, INCREMENTAL_COMPACTION };
+
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
+ // Consider adding DCHECK here to catch bad patching
+ masm->instr_at_put(pos, (masm->instr_at(pos) & ~kBOfieldMask) | BT);
+ }
+
+ static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
+ // Consider adding DCHECK here to catch bad patching
+ masm->instr_at_put(pos, (masm->instr_at(pos) & ~kBOfieldMask) | BF);
+ }
+
+ static Mode GetMode(Code* stub) {
+ Instr first_instruction =
+ Assembler::instr_at(stub->instruction_start() + Assembler::kInstrSize);
+ Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
+ (Assembler::kInstrSize * 2));
+
+ // Consider adding DCHECK here to catch unexpected instruction sequence
+ if (BF == (first_instruction & kBOfieldMask)) {
+ return INCREMENTAL;
+ }
+
+ if (BF == (second_instruction & kBOfieldMask)) {
+ return INCREMENTAL_COMPACTION;
+ }
+
+ return STORE_BUFFER_ONLY;
+ }
+
+ static void Patch(Code* stub, Mode mode) {
+ MacroAssembler masm(NULL, stub->instruction_start(),
+ stub->instruction_size());
+ switch (mode) {
+ case STORE_BUFFER_ONLY:
+ DCHECK(GetMode(stub) == INCREMENTAL ||
+ GetMode(stub) == INCREMENTAL_COMPACTION);
+
+ PatchBranchIntoNop(&masm, Assembler::kInstrSize);
+ PatchBranchIntoNop(&masm, Assembler::kInstrSize * 2);
+ break;
+ case INCREMENTAL:
+ DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
+ PatchNopIntoBranch(&masm, Assembler::kInstrSize);
+ break;
+ case INCREMENTAL_COMPACTION:
+ DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
+ PatchNopIntoBranch(&masm, Assembler::kInstrSize * 2);
+ break;
+ }
+ DCHECK(GetMode(stub) == mode);
+ CpuFeatures::FlushICache(stub->instruction_start() + Assembler::kInstrSize,
+ 2 * Assembler::kInstrSize);
+ }
+
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+
+ private:
+ // This is a helper class for freeing up 3 scratch registers. The input is
+ // two registers that must be preserved and one scratch register provided by
+ // the caller.
+ class RegisterAllocation {
+ public:
+ RegisterAllocation(Register object, Register address, Register scratch0)
+ : object_(object), address_(address), scratch0_(scratch0) {
+ DCHECK(!AreAliased(scratch0, object, address, no_reg));
+ scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
+ }
+
+ void Save(MacroAssembler* masm) {
+ DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
+ // We don't have to save scratch0_ because it was given to us as
+ // a scratch register.
+ masm->push(scratch1_);
+ }
+
+ void Restore(MacroAssembler* masm) { masm->pop(scratch1_); }
+
+ // If we have to call into C then we need to save and restore all caller-
+ // saved registers that were not already preserved. The scratch registers
+ // will be restored by other means so we don't bother pushing them here.
+ void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+ masm->mflr(r0);
+ masm->push(r0);
+ masm->MultiPush(kJSCallerSaved & ~scratch1_.bit());
+ if (mode == kSaveFPRegs) {
+ // Save all volatile FP registers except d0.
+ masm->SaveFPRegs(sp, 1, DoubleRegister::kNumVolatileRegisters - 1);
+ }
+ }
+
+ inline void RestoreCallerSaveRegisters(MacroAssembler* masm,
+ SaveFPRegsMode mode) {
+ if (mode == kSaveFPRegs) {
+ // Restore all volatile FP registers except d0.
+ masm->RestoreFPRegs(sp, 1, DoubleRegister::kNumVolatileRegisters - 1);
+ }
+ masm->MultiPop(kJSCallerSaved & ~scratch1_.bit());
+ masm->pop(r0);
+ masm->mtlr(r0);
+ }
+
+ inline Register object() { return object_; }
+ inline Register address() { return address_; }
+ inline Register scratch0() { return scratch0_; }
+ inline Register scratch1() { return scratch1_; }
+
+ private:
+ Register object_;
+ Register address_;
+ Register scratch0_;
+ Register scratch1_;
+
+ friend class RecordWriteStub;
+ };
+
+ enum OnNoNeedToInformIncrementalMarker {
+ kReturnOnNoNeedToInformIncrementalMarker,
+ kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+ };
+
+ inline Major MajorKey() const FINAL { return RecordWrite; }
+
+ void Generate(MacroAssembler* masm) OVERRIDE;
+ void GenerateIncremental(MacroAssembler* masm, Mode mode);
+ void CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm);
+
+ void Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ }
+
+ Register object() const {
+ return Register::from_code(ObjectBits::decode(minor_key_));
+ }
+
+ Register value() const {
+ return Register::from_code(ValueBits::decode(minor_key_));
+ }
+
+ Register address() const {
+ return Register::from_code(AddressBits::decode(minor_key_));
+ }
+
+ RememberedSetAction remembered_set_action() const {
+ return RememberedSetActionBits::decode(minor_key_);
+ }
+
+ SaveFPRegsMode save_fp_regs_mode() const {
+ return SaveFPRegsModeBits::decode(minor_key_);
+ }
+
+ class ObjectBits : public BitField<int, 0, 5> {};
+ class ValueBits : public BitField<int, 5, 5> {};
+ class AddressBits : public BitField<int, 10, 5> {};
+ class RememberedSetActionBits : public BitField<RememberedSetAction, 15, 1> {
+ };
+ class SaveFPRegsModeBits : public BitField<SaveFPRegsMode, 16, 1> {};
+
+ Label slow_;
+ RegisterAllocation regs_;
+
+ DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
+};
+
+
+// Trampoline stub to call into native code. To call safely into native code
+// in the presence of compacting GC (which can move code objects) we need to
+// keep the code which called into native pinned in the memory. Currently the
+// simplest approach is to generate such stub early enough so it can never be
+// moved by GC
+class DirectCEntryStub : public PlatformCodeStub {
+ public:
+ explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+ void GenerateCall(MacroAssembler* masm, Register target);
+
+ private:
+ bool NeedsImmovableCode() { return true; }
+
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
+};
+
+
+class NameDictionaryLookupStub : public PlatformCodeStub {
+ public:
+ enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+ NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = LookupModeBits::encode(mode);
+ }
+
+ static void GenerateNegativeLookup(MacroAssembler* masm, Label* miss,
+ Label* done, Register receiver,
+ Register properties, Handle<Name> name,
+ Register scratch0);
+
+ static void GeneratePositiveLookup(MacroAssembler* masm, Label* miss,
+ Label* done, Register elements,
+ Register name, Register r0, Register r1);
+
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+ static const int kInlinedProbes = 4;
+ static const int kTotalProbes = 20;
+
+ static const int kCapacityOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kCapacityIndex * kPointerSize;
+
+ static const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+
+ LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
+
+ class LookupModeBits : public BitField<LookupMode, 0, 1> {};
+
+ DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+ DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
+};
+}
+} // namespace v8::internal
+
+#endif // V8_PPC_CODE_STUBS_PPC_H_
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
new file mode 100644
index 0000000000..1074e872bf
--- /dev/null
+++ b/deps/v8/src/ppc/codegen-ppc.cc
@@ -0,0 +1,700 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/codegen.h"
+#include "src/macro-assembler.h"
+#include "src/ppc/simulator-ppc.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ masm.
+
+
+#if defined(USE_SIMULATOR)
+byte* fast_exp_ppc_machine_code = NULL;
+double fast_exp_simulator(double x) {
+ return Simulator::current(Isolate::Current())
+ ->CallFPReturnsDouble(fast_exp_ppc_machine_code, x, 0);
+}
+#endif
+
+
+UnaryMathFunction CreateExpFunction() {
+ if (!FLAG_fast_math) return &std::exp;
+ size_t actual_size;
+ byte* buffer =
+ static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return &std::exp;
+ ExternalReference::InitializeMathExpData();
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+ {
+ DoubleRegister input = d1;
+ DoubleRegister result = d2;
+ DoubleRegister double_scratch1 = d3;
+ DoubleRegister double_scratch2 = d4;
+ Register temp1 = r7;
+ Register temp2 = r8;
+ Register temp3 = r9;
+
+// Called from C
+#if ABI_USES_FUNCTION_DESCRIPTORS
+ __ function_descriptor();
+#endif
+
+ __ Push(temp3, temp2, temp1);
+ MathExpGenerator::EmitMathExp(&masm, input, result, double_scratch1,
+ double_scratch2, temp1, temp2, temp3);
+ __ Pop(temp3, temp2, temp1);
+ __ fmr(d1, result);
+ __ Ret();
+ }
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+#if !ABI_USES_FUNCTION_DESCRIPTORS
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
+#endif
+
+ CpuFeatures::FlushICache(buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
+
+#if !defined(USE_SIMULATOR)
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+#else
+ fast_exp_ppc_machine_code = buffer;
+ return &fast_exp_simulator;
+#endif
+}
+
+
+UnaryMathFunction CreateSqrtFunction() {
+#if defined(USE_SIMULATOR)
+ return &std::sqrt;
+#else
+ size_t actual_size;
+ byte* buffer =
+ static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return &std::sqrt;
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+// Called from C
+#if ABI_USES_FUNCTION_DESCRIPTORS
+ __ function_descriptor();
+#endif
+
+ __ MovFromFloatParameter(d1);
+ __ fsqrt(d1, d1);
+ __ MovToFloatResult(d1);
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+#if !ABI_USES_FUNCTION_DESCRIPTORS
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
+#endif
+
+ CpuFeatures::FlushICache(buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+#endif
+}
+
+#undef __
+
+
+// -------------------------------------------------------------------------
+// Platform-specific RuntimeCallHelper functions.
+
+void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+ masm->EnterFrame(StackFrame::INTERNAL);
+ DCHECK(!masm->has_frame());
+ masm->set_has_frame(true);
+}
+
+
+void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+ masm->LeaveFrame(StackFrame::INTERNAL);
+ DCHECK(masm->has_frame());
+ masm->set_has_frame(false);
+}
+
+
+// -------------------------------------------------------------------------
+// Code generators
+
+#define __ ACCESS_MASM(masm)
+
+void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+ MacroAssembler* masm, Register receiver, Register key, Register value,
+ Register target_map, AllocationSiteMode mode,
+ Label* allocation_memento_found) {
+ Register scratch_elements = r7;
+ DCHECK(!AreAliased(receiver, key, value, target_map, scratch_elements));
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ DCHECK(allocation_memento_found != NULL);
+ __ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements,
+ allocation_memento_found);
+ }
+
+ // Set transitioned map.
+ __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
+ __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r11,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateSmiToDouble(
+ MacroAssembler* masm, Register receiver, Register key, Register value,
+ Register target_map, AllocationSiteMode mode, Label* fail) {
+ // lr contains the return address
+ Label loop, entry, convert_hole, gc_required, only_change_map, done;
+ Register elements = r7;
+ Register length = r8;
+ Register array = r9;
+ Register array_end = array;
+
+ // target_map parameter can be clobbered.
+ Register scratch1 = target_map;
+ Register scratch2 = r11;
+
+ // Verify input registers don't conflict with locals.
+ DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array,
+ scratch2));
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
+ }
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
+ __ beq(&only_change_map);
+
+ // Preserve lr and use r17 as a temporary register.
+ __ mflr(r0);
+ __ Push(r0);
+
+ __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ // length: number of elements (smi-tagged)
+
+ // Allocate new FixedDoubleArray.
+ __ SmiToDoubleArrayOffset(r17, length);
+ __ addi(r17, r17, Operand(FixedDoubleArray::kHeaderSize));
+ __ Allocate(r17, array, r10, scratch2, &gc_required, DOUBLE_ALIGNMENT);
+
+ // Set destination FixedDoubleArray's length and map.
+ __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
+ __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
+ // Update receiver's map.
+ __ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset));
+
+ __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
+ __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
+ kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ // Replace receiver's backing store with newly created FixedDoubleArray.
+ __ addi(scratch1, array, Operand(kHeapObjectTag));
+ __ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
+ __ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2,
+ kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ // Prepare for conversion loop.
+ __ addi(target_map, elements,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ addi(r10, array, Operand(FixedDoubleArray::kHeaderSize));
+ __ SmiToDoubleArrayOffset(array, length);
+ __ add(array_end, r10, array);
+// Repurpose registers no longer in use.
+#if V8_TARGET_ARCH_PPC64
+ Register hole_int64 = elements;
+#else
+ Register hole_lower = elements;
+ Register hole_upper = length;
+#endif
+ // scratch1: begin of source FixedArray element fields, not tagged
+ // hole_lower: kHoleNanLower32 OR hol_int64
+ // hole_upper: kHoleNanUpper32
+ // array_end: end of destination FixedDoubleArray, not tagged
+ // scratch2: begin of FixedDoubleArray element fields, not tagged
+
+ __ b(&entry);
+
+ __ bind(&only_change_map);
+ __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
+ __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ b(&done);
+
+ // Call into runtime if GC is required.
+ __ bind(&gc_required);
+ __ Pop(r0);
+ __ mtlr(r0);
+ __ b(fail);
+
+ // Convert and copy elements.
+ __ bind(&loop);
+ __ LoadP(r11, MemOperand(scratch1));
+ __ addi(scratch1, scratch1, Operand(kPointerSize));
+ // r11: current element
+ __ UntagAndJumpIfNotSmi(r11, r11, &convert_hole);
+
+ // Normal smi, convert to double and store.
+ __ ConvertIntToDouble(r11, d0);
+ __ stfd(d0, MemOperand(scratch2, 0));
+ __ addi(r10, r10, Operand(8));
+
+ __ b(&entry);
+
+ // Hole found, store the-hole NaN.
+ __ bind(&convert_hole);
+ if (FLAG_debug_code) {
+ // Restore a "smi-untagged" heap object.
+ __ LoadP(r11, MemOperand(r6, -kPointerSize));
+ __ CompareRoot(r11, Heap::kTheHoleValueRootIndex);
+ __ Assert(eq, kObjectFoundInSmiOnlyArray);
+ }
+#if V8_TARGET_ARCH_PPC64
+ __ std(hole_int64, MemOperand(r10, 0));
+#else
+ __ stw(hole_upper, MemOperand(r10, Register::kExponentOffset));
+ __ stw(hole_lower, MemOperand(r10, Register::kMantissaOffset));
+#endif
+ __ addi(r10, r10, Operand(8));
+
+ __ bind(&entry);
+ __ cmp(r10, array_end);
+ __ blt(&loop);
+
+ __ Pop(r0);
+ __ mtlr(r0);
+ __ bind(&done);
+}
+
+
+void ElementsTransitionGenerator::GenerateDoubleToObject(
+ MacroAssembler* masm, Register receiver, Register key, Register value,
+ Register target_map, AllocationSiteMode mode, Label* fail) {
+ // Register lr contains the return address.
+ Label entry, loop, convert_hole, gc_required, only_change_map;
+ Register elements = r7;
+ Register array = r9;
+ Register length = r8;
+ Register scratch = r11;
+
+ // Verify input registers don't conflict with locals.
+ DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
+ scratch));
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
+ }
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
+ __ beq(&only_change_map);
+
+ __ Push(target_map, receiver, key, value);
+ __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ // elements: source FixedDoubleArray
+ // length: number of elements (smi-tagged)
+
+ // Allocate new FixedArray.
+ // Re-use value and target_map registers, as they have been saved on the
+ // stack.
+ Register array_size = value;
+ Register allocate_scratch = target_map;
+ __ li(array_size, Operand(FixedDoubleArray::kHeaderSize));
+ __ SmiToPtrArrayOffset(r0, length);
+ __ add(array_size, array_size, r0);
+ __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
+ NO_ALLOCATION_FLAGS);
+ // array: destination FixedArray, not tagged as heap object
+ // Set destination FixedDoubleArray's length and map.
+ __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
+ __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
+ __ StoreP(scratch, MemOperand(array, HeapObject::kMapOffset));
+ __ addi(array, array, Operand(kHeapObjectTag));
+
+ // Prepare for conversion loop.
+ Register src_elements = elements;
+ Register dst_elements = target_map;
+ Register dst_end = length;
+ Register heap_number_map = scratch;
+ __ addi(src_elements, elements,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ __ SmiToPtrArrayOffset(length, length);
+ __ LoadRoot(r10, Heap::kTheHoleValueRootIndex);
+
+ Label initialization_loop, loop_done;
+ __ ShiftRightImm(r0, length, Operand(kPointerSizeLog2), SetRC);
+ __ beq(&loop_done, cr0);
+
+ // Allocating heap numbers in the loop below can fail and cause a jump to
+ // gc_required. We can't leave a partly initialized FixedArray behind,
+ // so pessimistically fill it with holes now.
+ __ mtctr(r0);
+ __ addi(dst_elements, array,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+ __ bind(&initialization_loop);
+ __ StorePU(r10, MemOperand(dst_elements, kPointerSize));
+ __ bdnz(&initialization_loop);
+
+ __ addi(dst_elements, array,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(dst_end, dst_elements, length);
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ // Using offsetted addresses in src_elements to fully take advantage of
+ // post-indexing.
+ // dst_elements: begin of destination FixedArray element fields, not tagged
+ // src_elements: begin of source FixedDoubleArray element fields,
+ // not tagged, +4
+ // dst_end: end of destination FixedArray, not tagged
+ // array: destination FixedArray
+ // r10: the-hole pointer
+ // heap_number_map: heap number map
+ __ b(&loop);
+
+ // Call into runtime if GC is required.
+ __ bind(&gc_required);
+ __ Pop(target_map, receiver, key, value);
+ __ b(fail);
+
+ // Replace the-hole NaN with the-hole pointer.
+ __ bind(&convert_hole);
+ __ StoreP(r10, MemOperand(dst_elements));
+ __ addi(dst_elements, dst_elements, Operand(kPointerSize));
+ __ cmpl(dst_elements, dst_end);
+ __ bge(&loop_done);
+
+ __ bind(&loop);
+ Register upper_bits = key;
+ __ lwz(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
+ __ addi(src_elements, src_elements, Operand(kDoubleSize));
+ // upper_bits: current element's upper 32 bit
+ // src_elements: address of next element's upper 32 bit
+ __ Cmpi(upper_bits, Operand(kHoleNanUpper32), r0);
+ __ beq(&convert_hole);
+
+ // Non-hole double, copy value into a heap number.
+ Register heap_number = receiver;
+ Register scratch2 = value;
+ __ AllocateHeapNumber(heap_number, scratch2, r11, heap_number_map,
+ &gc_required);
+ // heap_number: new heap number
+#if V8_TARGET_ARCH_PPC64
+ __ ld(scratch2, MemOperand(src_elements, -kDoubleSize));
+ // subtract tag for std
+ __ addi(upper_bits, heap_number, Operand(-kHeapObjectTag));
+ __ std(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset));
+#else
+ __ lwz(scratch2,
+ MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize));
+ __ lwz(upper_bits,
+ MemOperand(src_elements, Register::kExponentOffset - kDoubleSize));
+ __ stw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
+ __ stw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
+#endif
+ __ mr(scratch2, dst_elements);
+ __ StoreP(heap_number, MemOperand(dst_elements));
+ __ addi(dst_elements, dst_elements, Operand(kPointerSize));
+ __ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ b(&entry);
+
+ // Replace the-hole NaN with the-hole pointer.
+ __ bind(&convert_hole);
+ __ StoreP(r10, MemOperand(dst_elements));
+ __ addi(dst_elements, dst_elements, Operand(kPointerSize));
+
+ __ bind(&entry);
+ __ cmpl(dst_elements, dst_end);
+ __ blt(&loop);
+ __ bind(&loop_done);
+
+ __ Pop(target_map, receiver, key, value);
+ // Replace receiver's backing store with newly created and filled FixedArray.
+ __ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
+ __ RecordWriteField(receiver, JSObject::kElementsOffset, array, scratch,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ __ bind(&only_change_map);
+ // Update receiver's map.
+ __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
+ __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+
+// assume ip can be used as a scratch register below
+void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
+ Register index, Register result,
+ Label* call_runtime) {
+ // Fetch the instance type of the receiver into result register.
+ __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // We need special handling for indirect strings.
+ Label check_sequential;
+ __ andi(r0, result, Operand(kIsIndirectStringMask));
+ __ beq(&check_sequential, cr0);
+
+ // Dispatch on the indirect string shape: slice or cons.
+ Label cons_string;
+ __ mov(ip, Operand(kSlicedNotConsMask));
+ __ and_(r0, result, ip, SetRC);
+ __ beq(&cons_string, cr0);
+
+ // Handle slices.
+ Label indirect_string_loaded;
+ __ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
+ __ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset));
+ __ SmiUntag(ip, result);
+ __ add(index, index, ip);
+ __ b(&indirect_string_loaded);
+
+ // Handle cons strings.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ bind(&cons_string);
+ __ LoadP(result, FieldMemOperand(string, ConsString::kSecondOffset));
+ __ CompareRoot(result, Heap::kempty_stringRootIndex);
+ __ bne(call_runtime);
+ // Get the first of the two strings and load its instance type.
+ __ LoadP(string, FieldMemOperand(string, ConsString::kFirstOffset));
+
+ __ bind(&indirect_string_loaded);
+ __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // Distinguish sequential and external strings. Only these two string
+ // representations can reach here (slices and flat cons strings have been
+ // reduced to the underlying sequential or external string).
+ Label external_string, check_encoding;
+ __ bind(&check_sequential);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ andi(r0, result, Operand(kStringRepresentationMask));
+ __ bne(&external_string, cr0);
+
+ // Prepare sequential strings
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ addi(string, string,
+ Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ b(&check_encoding);
+
+ // Handle external strings.
+ __ bind(&external_string);
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ andi(r0, result, Operand(kIsIndirectStringMask));
+ __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
+ }
+ // Rule out short external strings.
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ andi(r0, result, Operand(kShortExternalStringMask));
+ __ bne(call_runtime, cr0);
+ __ LoadP(string,
+ FieldMemOperand(string, ExternalString::kResourceDataOffset));
+
+ Label one_byte, done;
+ __ bind(&check_encoding);
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ andi(r0, result, Operand(kStringEncodingMask));
+ __ bne(&one_byte, cr0);
+ // Two-byte string.
+ __ ShiftLeftImm(result, index, Operand(1));
+ __ lhzx(result, MemOperand(string, result));
+ __ b(&done);
+ __ bind(&one_byte);
+ // One-byte string.
+ __ lbzx(result, MemOperand(string, index));
+ __ bind(&done);
+}
+
+
+static MemOperand ExpConstant(int index, Register base) {
+ return MemOperand(base, index * kDoubleSize);
+}
+
+
+void MathExpGenerator::EmitMathExp(MacroAssembler* masm, DoubleRegister input,
+ DoubleRegister result,
+ DoubleRegister double_scratch1,
+ DoubleRegister double_scratch2,
+ Register temp1, Register temp2,
+ Register temp3) {
+ DCHECK(!input.is(result));
+ DCHECK(!input.is(double_scratch1));
+ DCHECK(!input.is(double_scratch2));
+ DCHECK(!result.is(double_scratch1));
+ DCHECK(!result.is(double_scratch2));
+ DCHECK(!double_scratch1.is(double_scratch2));
+ DCHECK(!temp1.is(temp2));
+ DCHECK(!temp1.is(temp3));
+ DCHECK(!temp2.is(temp3));
+ DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
+ DCHECK(!masm->serializer_enabled()); // External references not serializable.
+
+ Label zero, infinity, done;
+
+ __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
+
+ __ lfd(double_scratch1, ExpConstant(0, temp3));
+ __ fcmpu(double_scratch1, input);
+ __ fmr(result, input);
+ __ bunordered(&done);
+ __ bge(&zero);
+
+ __ lfd(double_scratch2, ExpConstant(1, temp3));
+ __ fcmpu(input, double_scratch2);
+ __ bge(&infinity);
+
+ __ lfd(double_scratch1, ExpConstant(3, temp3));
+ __ lfd(result, ExpConstant(4, temp3));
+ __ fmul(double_scratch1, double_scratch1, input);
+ __ fadd(double_scratch1, double_scratch1, result);
+ __ MovDoubleLowToInt(temp2, double_scratch1);
+ __ fsub(double_scratch1, double_scratch1, result);
+ __ lfd(result, ExpConstant(6, temp3));
+ __ lfd(double_scratch2, ExpConstant(5, temp3));
+ __ fmul(double_scratch1, double_scratch1, double_scratch2);
+ __ fsub(double_scratch1, double_scratch1, input);
+ __ fsub(result, result, double_scratch1);
+ __ fmul(double_scratch2, double_scratch1, double_scratch1);
+ __ fmul(result, result, double_scratch2);
+ __ lfd(double_scratch2, ExpConstant(7, temp3));
+ __ fmul(result, result, double_scratch2);
+ __ fsub(result, result, double_scratch1);
+ __ lfd(double_scratch2, ExpConstant(8, temp3));
+ __ fadd(result, result, double_scratch2);
+ __ srwi(temp1, temp2, Operand(11));
+ __ andi(temp2, temp2, Operand(0x7ff));
+ __ addi(temp1, temp1, Operand(0x3ff));
+
+ // Must not call ExpConstant() after overwriting temp3!
+ __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
+ __ slwi(temp2, temp2, Operand(3));
+#if V8_TARGET_ARCH_PPC64
+ __ ldx(temp2, MemOperand(temp3, temp2));
+ __ sldi(temp1, temp1, Operand(52));
+ __ orx(temp2, temp1, temp2);
+ __ MovInt64ToDouble(double_scratch1, temp2);
+#else
+ __ add(ip, temp3, temp2);
+ __ lwz(temp3, MemOperand(ip, Register::kExponentOffset));
+ __ lwz(temp2, MemOperand(ip, Register::kMantissaOffset));
+ __ slwi(temp1, temp1, Operand(20));
+ __ orx(temp3, temp1, temp3);
+ __ MovInt64ToDouble(double_scratch1, temp3, temp2);
+#endif
+
+ __ fmul(result, result, double_scratch1);
+ __ b(&done);
+
+ __ bind(&zero);
+ __ fmr(result, kDoubleRegZero);
+ __ b(&done);
+
+ __ bind(&infinity);
+ __ lfd(result, ExpConstant(2, temp3));
+
+ __ bind(&done);
+}
+
+#undef __
+
+CodeAgingHelper::CodeAgingHelper() {
+ DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
+ // Since patcher is a large object, allocate it dynamically when needed,
+ // to avoid overloading the stack in stress conditions.
+ // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
+ // the process, before ARM simulator ICache is setup.
+ SmartPointer<CodePatcher> patcher(new CodePatcher(
+ young_sequence_.start(), young_sequence_.length() / Assembler::kInstrSize,
+ CodePatcher::DONT_FLUSH));
+ PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
+ patcher->masm()->PushFixedFrame(r4);
+ patcher->masm()->addi(fp, sp,
+ Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
+ patcher->masm()->nop();
+ }
+}
+
+
+#ifdef DEBUG
+bool CodeAgingHelper::IsOld(byte* candidate) const {
+ return Assembler::IsNop(Assembler::instr_at(candidate));
+}
+#endif
+
+
+bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
+ bool result = isolate->code_aging_helper()->IsYoung(sequence);
+ DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
+ return result;
+}
+
+
+void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
+ MarkingParity* parity) {
+ if (IsYoungSequence(isolate, sequence)) {
+ *age = kNoAgeCodeAge;
+ *parity = NO_MARKING_PARITY;
+ } else {
+ ConstantPoolArray* constant_pool = NULL;
+ Address target_address = Assembler::target_address_at(
+ sequence + kCodeAgingTargetDelta, constant_pool);
+ Code* stub = GetCodeFromTargetAddress(target_address);
+ GetCodeAgeAndParity(stub, age, parity);
+ }
+}
+
+
+void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age,
+ MarkingParity parity) {
+ uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
+ if (age == kNoAgeCodeAge) {
+ isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
+ CpuFeatures::FlushICache(sequence, young_length);
+ } else {
+ // FIXED_SEQUENCE
+ Code* stub = GetCodeAgeStub(isolate, age, parity);
+ CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm());
+ intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
+ // Don't use Call -- we need to preserve ip and lr.
+ // GenerateMakeCodeYoungAgainCommon for the stub code.
+ patcher.masm()->nop(); // marker to detect sequence (see IsOld)
+ patcher.masm()->mov(r3, Operand(target));
+ patcher.masm()->Jump(r3);
+ for (int i = 0; i < kCodeAgingSequenceNops; i++) {
+ patcher.masm()->nop();
+ }
+ }
+}
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/codegen-ppc.h b/deps/v8/src/ppc/codegen-ppc.h
new file mode 100644
index 0000000000..500bf600f9
--- /dev/null
+++ b/deps/v8/src/ppc/codegen-ppc.h
@@ -0,0 +1,44 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PPC_CODEGEN_PPC_H_
+#define V8_PPC_CODEGEN_PPC_H_
+
+#include "src/ast.h"
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+
+enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
+
+
+class StringCharLoadGenerator : public AllStatic {
+ public:
+ // Generates the code for handling different string types and loading the
+ // indexed character into |result|. We expect |index| as untagged input and
+ // |result| as untagged output.
+ static void Generate(MacroAssembler* masm, Register string, Register index,
+ Register result, Label* call_runtime);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
+class MathExpGenerator : public AllStatic {
+ public:
+ // Register input isn't modified. All other registers are clobbered.
+ static void EmitMathExp(MacroAssembler* masm, DoubleRegister input,
+ DoubleRegister result, DoubleRegister double_scratch1,
+ DoubleRegister double_scratch2, Register temp1,
+ Register temp2, Register temp3);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
+};
+}
+} // namespace v8::internal
+
+#endif // V8_PPC_CODEGEN_PPC_H_
diff --git a/deps/v8/src/ppc/constants-ppc.cc b/deps/v8/src/ppc/constants-ppc.cc
new file mode 100644
index 0000000000..f32f25a258
--- /dev/null
+++ b/deps/v8/src/ppc/constants-ppc.cc
@@ -0,0 +1,91 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/ppc/constants-ppc.h"
+
+
+namespace v8 {
+namespace internal {
+
+// These register names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+const char* Registers::names_[kNumRegisters] = {
+ "r0", "sp", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
+ "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", "r21",
+ "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", "r30", "fp"};
+
+
+// List of alias names which can be used when referring to PPC registers.
+const Registers::RegisterAlias Registers::aliases_[] = {{10, "sl"},
+ {11, "r11"},
+ {12, "r12"},
+ {13, "r13"},
+ {14, "r14"},
+ {15, "r15"},
+ {kNoRegister, NULL}};
+
+
+const char* Registers::Name(int reg) {
+ const char* result;
+ if ((0 <= reg) && (reg < kNumRegisters)) {
+ result = names_[reg];
+ } else {
+ result = "noreg";
+ }
+ return result;
+}
+
+
+const char* FPRegisters::names_[kNumFPRegisters] = {
+ "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10",
+ "d11", "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21",
+ "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
+
+
+const char* FPRegisters::Name(int reg) {
+ DCHECK((0 <= reg) && (reg < kNumFPRegisters));
+ return names_[reg];
+}
+
+
+int FPRegisters::Number(const char* name) {
+ for (int i = 0; i < kNumFPRegisters; i++) {
+ if (strcmp(names_[i], name) == 0) {
+ return i;
+ }
+ }
+
+ // No register with the requested name found.
+ return kNoRegister;
+}
+
+
+int Registers::Number(const char* name) {
+ // Look through the canonical names.
+ for (int i = 0; i < kNumRegisters; i++) {
+ if (strcmp(names_[i], name) == 0) {
+ return i;
+ }
+ }
+
+ // Look through the alias names.
+ int i = 0;
+ while (aliases_[i].reg != kNoRegister) {
+ if (strcmp(aliases_[i].name, name) == 0) {
+ return aliases_[i].reg;
+ }
+ i++;
+ }
+
+ // No register with the requested name found.
+ return kNoRegister;
+}
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/constants-ppc.h b/deps/v8/src/ppc/constants-ppc.h
new file mode 100644
index 0000000000..9434b8f92f
--- /dev/null
+++ b/deps/v8/src/ppc/constants-ppc.h
@@ -0,0 +1,600 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PPC_CONSTANTS_PPC_H_
+#define V8_PPC_CONSTANTS_PPC_H_
+
+namespace v8 {
+namespace internal {
+
+// Number of registers
+const int kNumRegisters = 32;
+
+// FP support.
+const int kNumFPDoubleRegisters = 32;
+const int kNumFPRegisters = kNumFPDoubleRegisters;
+
+const int kNoRegister = -1;
+
+// sign-extend the least significant 16-bits of value <imm>
+#define SIGN_EXT_IMM16(imm) ((static_cast<int>(imm) << 16) >> 16)
+
+// sign-extend the least significant 26-bits of value <imm>
+#define SIGN_EXT_IMM26(imm) ((static_cast<int>(imm) << 6) >> 6)
+
+// -----------------------------------------------------------------------------
+// Conditions.
+
+// Defines constants and accessor classes to assemble, disassemble and
+// simulate PPC instructions.
+//
+// Section references in the code refer to the "PowerPC Microprocessor
+// Family: The Programmer.s Reference Guide" from 10/95
+// https://www-01.ibm.com/chips/techlib/techlib.nsf/techdocs/852569B20050FF778525699600741775/$file/prg.pdf
+//
+
+// Constants for specific fields are defined in their respective named enums.
+// General constants are in an anonymous enum in class Instr.
+enum Condition {
+ kNoCondition = -1,
+ eq = 0, // Equal.
+ ne = 1, // Not equal.
+ ge = 2, // Greater or equal.
+ lt = 3, // Less than.
+ gt = 4, // Greater than.
+ le = 5, // Less then or equal
+ unordered = 6, // Floating-point unordered
+ ordered = 7,
+ overflow = 8, // Summary overflow
+ nooverflow = 9,
+ al = 10 // Always.
+};
+
+
+inline Condition NegateCondition(Condition cond) {
+ DCHECK(cond != al);
+ return static_cast<Condition>(cond ^ ne);
+}
+
+
+// Commute a condition such that {a cond b == b cond' a}.
+inline Condition CommuteCondition(Condition cond) {
+ switch (cond) {
+ case lt:
+ return gt;
+ case gt:
+ return lt;
+ case ge:
+ return le;
+ case le:
+ return ge;
+ default:
+ return cond;
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Instructions encoding.
+
+// Instr is merely used by the Assembler to distinguish 32bit integers
+// representing instructions from usual 32 bit values.
+// Instruction objects are pointers to 32bit values, and provide methods to
+// access the various ISA fields.
+typedef int32_t Instr;
+
+// Opcodes as defined in section 4.2 table 34 (32bit PowerPC)
+enum Opcode {
+ TWI = 3 << 26, // Trap Word Immediate
+ MULLI = 7 << 26, // Multiply Low Immediate
+ SUBFIC = 8 << 26, // Subtract from Immediate Carrying
+ CMPLI = 10 << 26, // Compare Logical Immediate
+ CMPI = 11 << 26, // Compare Immediate
+ ADDIC = 12 << 26, // Add Immediate Carrying
+ ADDICx = 13 << 26, // Add Immediate Carrying and Record
+ ADDI = 14 << 26, // Add Immediate
+ ADDIS = 15 << 26, // Add Immediate Shifted
+ BCX = 16 << 26, // Branch Conditional
+ SC = 17 << 26, // System Call
+ BX = 18 << 26, // Branch
+ EXT1 = 19 << 26, // Extended code set 1
+ RLWIMIX = 20 << 26, // Rotate Left Word Immediate then Mask Insert
+ RLWINMX = 21 << 26, // Rotate Left Word Immediate then AND with Mask
+ RLWNMX = 23 << 26, // Rotate Left Word then AND with Mask
+ ORI = 24 << 26, // OR Immediate
+ ORIS = 25 << 26, // OR Immediate Shifted
+ XORI = 26 << 26, // XOR Immediate
+ XORIS = 27 << 26, // XOR Immediate Shifted
+ ANDIx = 28 << 26, // AND Immediate
+ ANDISx = 29 << 26, // AND Immediate Shifted
+ EXT5 = 30 << 26, // Extended code set 5 - 64bit only
+ EXT2 = 31 << 26, // Extended code set 2
+ LWZ = 32 << 26, // Load Word and Zero
+ LWZU = 33 << 26, // Load Word with Zero Update
+ LBZ = 34 << 26, // Load Byte and Zero
+ LBZU = 35 << 26, // Load Byte and Zero with Update
+ STW = 36 << 26, // Store
+ STWU = 37 << 26, // Store Word with Update
+ STB = 38 << 26, // Store Byte
+ STBU = 39 << 26, // Store Byte with Update
+ LHZ = 40 << 26, // Load Half and Zero
+ LHZU = 41 << 26, // Load Half and Zero with Update
+ LHA = 42 << 26, // Load Half Algebraic
+ LHAU = 43 << 26, // Load Half Algebraic with Update
+ STH = 44 << 26, // Store Half
+ STHU = 45 << 26, // Store Half with Update
+ LMW = 46 << 26, // Load Multiple Word
+ STMW = 47 << 26, // Store Multiple Word
+ LFS = 48 << 26, // Load Floating-Point Single
+ LFSU = 49 << 26, // Load Floating-Point Single with Update
+ LFD = 50 << 26, // Load Floating-Point Double
+ LFDU = 51 << 26, // Load Floating-Point Double with Update
+ STFS = 52 << 26, // Store Floating-Point Single
+ STFSU = 53 << 26, // Store Floating-Point Single with Update
+ STFD = 54 << 26, // Store Floating-Point Double
+ STFDU = 55 << 26, // Store Floating-Point Double with Update
+ LD = 58 << 26, // Load Double Word
+ EXT3 = 59 << 26, // Extended code set 3
+ STD = 62 << 26, // Store Double Word (optionally with Update)
+ EXT4 = 63 << 26 // Extended code set 4
+};
+
+// Bits 10-1
+enum OpcodeExt1 {
+ MCRF = 0 << 1, // Move Condition Register Field
+ BCLRX = 16 << 1, // Branch Conditional Link Register
+ CRNOR = 33 << 1, // Condition Register NOR)
+ RFI = 50 << 1, // Return from Interrupt
+ CRANDC = 129 << 1, // Condition Register AND with Complement
+ ISYNC = 150 << 1, // Instruction Synchronize
+ CRXOR = 193 << 1, // Condition Register XOR
+ CRNAND = 225 << 1, // Condition Register NAND
+ CRAND = 257 << 1, // Condition Register AND
+ CREQV = 289 << 1, // Condition Register Equivalent
+ CRORC = 417 << 1, // Condition Register OR with Complement
+ CROR = 449 << 1, // Condition Register OR
+ BCCTRX = 528 << 1 // Branch Conditional to Count Register
+};
+
+// Bits 9-1 or 10-1
+enum OpcodeExt2 {
+ CMP = 0 << 1,
+ TW = 4 << 1,
+ SUBFCX = 8 << 1,
+ ADDCX = 10 << 1,
+ MULHWUX = 11 << 1,
+ MFCR = 19 << 1,
+ LWARX = 20 << 1,
+ LDX = 21 << 1,
+ LWZX = 23 << 1, // load word zero w/ x-form
+ SLWX = 24 << 1,
+ CNTLZWX = 26 << 1,
+ SLDX = 27 << 1,
+ ANDX = 28 << 1,
+ CMPL = 32 << 1,
+ SUBFX = 40 << 1,
+ MFVSRD = 51 << 1, // Move From VSR Doubleword
+ LDUX = 53 << 1,
+ DCBST = 54 << 1,
+ LWZUX = 55 << 1, // load word zero w/ update x-form
+ CNTLZDX = 58 << 1,
+ ANDCX = 60 << 1,
+ MULHWX = 75 << 1,
+ DCBF = 86 << 1,
+ LBZX = 87 << 1, // load byte zero w/ x-form
+ NEGX = 104 << 1,
+ MFVSRWZ = 115 << 1, // Move From VSR Word And Zero
+ LBZUX = 119 << 1, // load byte zero w/ update x-form
+ NORX = 124 << 1,
+ SUBFEX = 136 << 1,
+ ADDEX = 138 << 1,
+ STDX = 149 << 1,
+ STWX = 151 << 1, // store word w/ x-form
+ MTVSRD = 179 << 1, // Move To VSR Doubleword
+ STDUX = 181 << 1,
+ STWUX = 183 << 1, // store word w/ update x-form
+ /*
+ MTCRF
+ MTMSR
+ STWCXx
+ SUBFZEX
+ */
+ ADDZEX = 202 << 1, // Add to Zero Extended
+ /*
+ MTSR
+ */
+ MTVSRWA = 211 << 1, // Move To VSR Word Algebraic
+ STBX = 215 << 1, // store byte w/ x-form
+ MULLD = 233 << 1, // Multiply Low Double Word
+ MULLW = 235 << 1, // Multiply Low Word
+ MTVSRWZ = 243 << 1, // Move To VSR Word And Zero
+ STBUX = 247 << 1, // store byte w/ update x-form
+ ADDX = 266 << 1, // Add
+ LHZX = 279 << 1, // load half-word zero w/ x-form
+ LHZUX = 311 << 1, // load half-word zero w/ update x-form
+ LHAX = 343 << 1, // load half-word algebraic w/ x-form
+ LHAUX = 375 << 1, // load half-word algebraic w/ update x-form
+ XORX = 316 << 1, // Exclusive OR
+ MFSPR = 339 << 1, // Move from Special-Purpose-Register
+ STHX = 407 << 1, // store half-word w/ x-form
+ STHUX = 439 << 1, // store half-word w/ update x-form
+ ORX = 444 << 1, // Or
+ MTSPR = 467 << 1, // Move to Special-Purpose-Register
+ DIVD = 489 << 1, // Divide Double Word
+ DIVW = 491 << 1, // Divide Word
+
+ // Below represent bits 10-1 (any value >= 512)
+ LFSX = 535 << 1, // load float-single w/ x-form
+ SRWX = 536 << 1, // Shift Right Word
+ SRDX = 539 << 1, // Shift Right Double Word
+ LFSUX = 567 << 1, // load float-single w/ update x-form
+ SYNC = 598 << 1, // Synchronize
+ LFDX = 599 << 1, // load float-double w/ x-form
+ LFDUX = 631 << 1, // load float-double w/ update X-form
+ STFSX = 663 << 1, // store float-single w/ x-form
+ STFSUX = 695 << 1, // store float-single w/ update x-form
+ STFDX = 727 << 1, // store float-double w/ x-form
+ STFDUX = 759 << 1, // store float-double w/ update x-form
+ SRAW = 792 << 1, // Shift Right Algebraic Word
+ SRAD = 794 << 1, // Shift Right Algebraic Double Word
+ SRAWIX = 824 << 1, // Shift Right Algebraic Word Immediate
+ SRADIX = 413 << 2, // Shift Right Algebraic Double Word Immediate
+ EXTSH = 922 << 1, // Extend Sign Halfword
+ EXTSB = 954 << 1, // Extend Sign Byte
+ ICBI = 982 << 1, // Instruction Cache Block Invalidate
+ EXTSW = 986 << 1 // Extend Sign Word
+};
+
+// Some use Bits 10-1 and other only 5-1 for the opcode
+enum OpcodeExt4 {
+ // Bits 5-1
+ FDIV = 18 << 1, // Floating Divide
+ FSUB = 20 << 1, // Floating Subtract
+ FADD = 21 << 1, // Floating Add
+ FSQRT = 22 << 1, // Floating Square Root
+ FSEL = 23 << 1, // Floating Select
+ FMUL = 25 << 1, // Floating Multiply
+ FMSUB = 28 << 1, // Floating Multiply-Subtract
+ FMADD = 29 << 1, // Floating Multiply-Add
+
+ // Bits 10-1
+ FCMPU = 0 << 1, // Floating Compare Unordered
+ FRSP = 12 << 1, // Floating-Point Rounding
+ FCTIW = 14 << 1, // Floating Convert to Integer Word X-form
+ FCTIWZ = 15 << 1, // Floating Convert to Integer Word with Round to Zero
+ FNEG = 40 << 1, // Floating Negate
+ MCRFS = 64 << 1, // Move to Condition Register from FPSCR
+ FMR = 72 << 1, // Floating Move Register
+ MTFSFI = 134 << 1, // Move to FPSCR Field Immediate
+ FABS = 264 << 1, // Floating Absolute Value
+ FRIM = 488 << 1, // Floating Round to Integer Minus
+ MFFS = 583 << 1, // move from FPSCR x-form
+ MTFSF = 711 << 1, // move to FPSCR fields XFL-form
+ FCFID = 846 << 1, // Floating convert from integer doubleword
+ FCTID = 814 << 1, // Floating convert from integer doubleword
+ FCTIDZ = 815 << 1 // Floating convert from integer doubleword
+};
+
+enum OpcodeExt5 {
+ // Bits 4-2
+ RLDICL = 0 << 1, // Rotate Left Double Word Immediate then Clear Left
+ RLDICR = 2 << 1, // Rotate Left Double Word Immediate then Clear Right
+ RLDIC = 4 << 1, // Rotate Left Double Word Immediate then Clear
+ RLDIMI = 6 << 1, // Rotate Left Double Word Immediate then Mask Insert
+ // Bits 4-1
+ RLDCL = 8 << 1, // Rotate Left Double Word then Clear Left
+ RLDCR = 9 << 1 // Rotate Left Double Word then Clear Right
+};
+
+// Instruction encoding bits and masks.
+enum {
+ // Instruction encoding bit
+ B1 = 1 << 1,
+ B4 = 1 << 4,
+ B5 = 1 << 5,
+ B7 = 1 << 7,
+ B8 = 1 << 8,
+ B9 = 1 << 9,
+ B12 = 1 << 12,
+ B18 = 1 << 18,
+ B19 = 1 << 19,
+ B20 = 1 << 20,
+ B22 = 1 << 22,
+ B23 = 1 << 23,
+ B24 = 1 << 24,
+ B25 = 1 << 25,
+ B26 = 1 << 26,
+ B27 = 1 << 27,
+ B28 = 1 << 28,
+ B6 = 1 << 6,
+ B10 = 1 << 10,
+ B11 = 1 << 11,
+ B16 = 1 << 16,
+ B17 = 1 << 17,
+ B21 = 1 << 21,
+
+ // Instruction bit masks
+ kCondMask = 0x1F << 21,
+ kOff12Mask = (1 << 12) - 1,
+ kImm24Mask = (1 << 24) - 1,
+ kOff16Mask = (1 << 16) - 1,
+ kImm16Mask = (1 << 16) - 1,
+ kImm26Mask = (1 << 26) - 1,
+ kBOfieldMask = 0x1f << 21,
+ kOpcodeMask = 0x3f << 26,
+ kExt1OpcodeMask = 0x3ff << 1,
+ kExt2OpcodeMask = 0x1f << 1,
+ kExt5OpcodeMask = 0x3 << 2,
+ kBOMask = 0x1f << 21,
+ kBIMask = 0x1F << 16,
+ kBDMask = 0x14 << 2,
+ kAAMask = 0x01 << 1,
+ kLKMask = 0x01,
+ kRCMask = 0x01,
+ kTOMask = 0x1f << 21
+};
+
+// the following is to differentiate different faked opcodes for
+// the BOGUS PPC instruction we invented (when bit 25 is 0) or to mark
+// different stub code (when bit 25 is 1)
+// - use primary opcode 1 for undefined instruction
+// - use bit 25 to indicate whether the opcode is for fake-arm
+// instr or stub-marker
+// - use the least significant 6-bit to indicate FAKE_OPCODE_T or
+// MARKER_T
+#define FAKE_OPCODE 1 << 26
+#define MARKER_SUBOPCODE_BIT 25
+#define MARKER_SUBOPCODE 1 << MARKER_SUBOPCODE_BIT
+#define FAKER_SUBOPCODE 0 << MARKER_SUBOPCODE_BIT
+
+enum FAKE_OPCODE_T {
+ fBKPT = 14,
+ fLastFaker // can't be more than 128 (2^^7)
+};
+#define FAKE_OPCODE_HIGH_BIT 7 // fake opcode has to fall into bit 0~7
+#define F_NEXT_AVAILABLE_STUB_MARKER 369 // must be less than 2^^9 (512)
+#define STUB_MARKER_HIGH_BIT 9 // stub marker has to fall into bit 0~9
+// -----------------------------------------------------------------------------
+// Addressing modes and instruction variants.
+
+// Overflow Exception
+enum OEBit {
+ SetOE = 1 << 10, // Set overflow exception
+ LeaveOE = 0 << 10 // No overflow exception
+};
+
+// Record bit
+enum RCBit { // Bit 0
+ SetRC = 1, // LT,GT,EQ,SO
+ LeaveRC = 0 // None
+};
+
+// Link bit
+enum LKBit { // Bit 0
+ SetLK = 1, // Load effective address of next instruction
+ LeaveLK = 0 // No action
+};
+
+enum BOfield { // Bits 25-21
+ DCBNZF = 0 << 21, // Decrement CTR; branch if CTR != 0 and condition false
+ DCBEZF = 2 << 21, // Decrement CTR; branch if CTR == 0 and condition false
+ BF = 4 << 21, // Branch if condition false
+ DCBNZT = 8 << 21, // Decrement CTR; branch if CTR != 0 and condition true
+ DCBEZT = 10 << 21, // Decrement CTR; branch if CTR == 0 and condition true
+ BT = 12 << 21, // Branch if condition true
+ DCBNZ = 16 << 21, // Decrement CTR; branch if CTR != 0
+ DCBEZ = 18 << 21, // Decrement CTR; branch if CTR == 0
+ BA = 20 << 21 // Branch always
+};
+
+#if V8_OS_AIX
+#undef CR_LT
+#undef CR_GT
+#undef CR_EQ
+#undef CR_SO
+#endif
+
+enum CRBit { CR_LT = 0, CR_GT = 1, CR_EQ = 2, CR_SO = 3, CR_FU = 3 };
+
+#define CRWIDTH 4
+
+// -----------------------------------------------------------------------------
+// Supervisor Call (svc) specific support.
+
+// Special Software Interrupt codes when used in the presence of the PPC
+// simulator.
+// svc (formerly swi) provides a 24bit immediate value. Use bits 22:0 for
+// standard SoftwareInterrupCode. Bit 23 is reserved for the stop feature.
+enum SoftwareInterruptCodes {
+ // transition to C code
+ kCallRtRedirected = 0x10,
+ // break point
+ kBreakpoint = 0x821008, // bits23-0 of 0x7d821008 = twge r2, r2
+ // stop
+ kStopCode = 1 << 23,
+ // info
+ kInfo = 0x9ff808 // bits23-0 of 0x7d9ff808 = twge r31, r31
+};
+const uint32_t kStopCodeMask = kStopCode - 1;
+const uint32_t kMaxStopCode = kStopCode - 1;
+const int32_t kDefaultStopCode = -1;
+
+// FP rounding modes.
+enum FPRoundingMode {
+ RN = 0, // Round to Nearest.
+ RZ = 1, // Round towards zero.
+ RP = 2, // Round towards Plus Infinity.
+ RM = 3, // Round towards Minus Infinity.
+
+ // Aliases.
+ kRoundToNearest = RN,
+ kRoundToZero = RZ,
+ kRoundToPlusInf = RP,
+ kRoundToMinusInf = RM
+};
+
+const uint32_t kFPRoundingModeMask = 3;
+
+enum CheckForInexactConversion {
+ kCheckForInexactConversion,
+ kDontCheckForInexactConversion
+};
+
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+// These constants are declared in assembler-arm.cc, as they use named registers
+// and other constants.
+
+
+// add(sp, sp, 4) instruction (aka Pop())
+extern const Instr kPopInstruction;
+
+// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
+// register r is not encoded.
+extern const Instr kPushRegPattern;
+
+// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
+// register r is not encoded.
+extern const Instr kPopRegPattern;
+
+// use TWI to indicate redirection call for simulation mode
+const Instr rtCallRedirInstr = TWI;
+
+// -----------------------------------------------------------------------------
+// Instruction abstraction.
+
+// The class Instruction enables access to individual fields defined in the PPC
+// architecture instruction set encoding.
+// Note that the Assembler uses typedef int32_t Instr.
+//
+// Example: Test whether the instruction at ptr does set the condition code
+// bits.
+//
+// bool InstructionSetsConditionCodes(byte* ptr) {
+// Instruction* instr = Instruction::At(ptr);
+// int type = instr->TypeValue();
+// return ((type == 0) || (type == 1)) && instr->HasS();
+// }
+//
+class Instruction {
+ public:
+ enum { kInstrSize = 4, kInstrSizeLog2 = 2, kPCReadOffset = 8 };
+
+// Helper macro to define static accessors.
+// We use the cast to char* trick to bypass the strict anti-aliasing rules.
+#define DECLARE_STATIC_TYPED_ACCESSOR(return_type, Name) \
+ static inline return_type Name(Instr instr) { \
+ char* temp = reinterpret_cast<char*>(&instr); \
+ return reinterpret_cast<Instruction*>(temp)->Name(); \
+ }
+
+#define DECLARE_STATIC_ACCESSOR(Name) DECLARE_STATIC_TYPED_ACCESSOR(int, Name)
+
+ // Get the raw instruction bits.
+ inline Instr InstructionBits() const {
+ return *reinterpret_cast<const Instr*>(this);
+ }
+
+ // Set the raw instruction bits to value.
+ inline void SetInstructionBits(Instr value) {
+ *reinterpret_cast<Instr*>(this) = value;
+ }
+
+ // Read one particular bit out of the instruction bits.
+ inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; }
+
+ // Read a bit field's value out of the instruction bits.
+ inline int Bits(int hi, int lo) const {
+ return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+ }
+
+ // Read a bit field out of the instruction bits.
+ inline int BitField(int hi, int lo) const {
+ return InstructionBits() & (((2 << (hi - lo)) - 1) << lo);
+ }
+
+ // Static support.
+
+ // Read one particular bit out of the instruction bits.
+ static inline int Bit(Instr instr, int nr) { return (instr >> nr) & 1; }
+
+ // Read the value of a bit field out of the instruction bits.
+ static inline int Bits(Instr instr, int hi, int lo) {
+ return (instr >> lo) & ((2 << (hi - lo)) - 1);
+ }
+
+
+ // Read a bit field out of the instruction bits.
+ static inline int BitField(Instr instr, int hi, int lo) {
+ return instr & (((2 << (hi - lo)) - 1) << lo);
+ }
+
+ inline int RSValue() const { return Bits(25, 21); }
+ inline int RTValue() const { return Bits(25, 21); }
+ inline int RAValue() const { return Bits(20, 16); }
+ DECLARE_STATIC_ACCESSOR(RAValue);
+ inline int RBValue() const { return Bits(15, 11); }
+ DECLARE_STATIC_ACCESSOR(RBValue);
+ inline int RCValue() const { return Bits(10, 6); }
+ DECLARE_STATIC_ACCESSOR(RCValue);
+
+ inline int OpcodeValue() const { return static_cast<Opcode>(Bits(31, 26)); }
+ inline Opcode OpcodeField() const {
+ return static_cast<Opcode>(BitField(24, 21));
+ }
+
+ // Fields used in Software interrupt instructions
+ inline SoftwareInterruptCodes SvcValue() const {
+ return static_cast<SoftwareInterruptCodes>(Bits(23, 0));
+ }
+
+ // Instructions are read of out a code stream. The only way to get a
+ // reference to an instruction is to convert a pointer. There is no way
+ // to allocate or create instances of class Instruction.
+ // Use the At(pc) function to create references to Instruction.
+ static Instruction* At(byte* pc) {
+ return reinterpret_cast<Instruction*>(pc);
+ }
+
+
+ private:
+ // We need to prevent the creation of instances of class Instruction.
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
+};
+
+
+// Helper functions for converting between register numbers and names.
+class Registers {
+ public:
+ // Return the name of the register.
+ static const char* Name(int reg);
+
+ // Lookup the register number for the name provided.
+ static int Number(const char* name);
+
+ struct RegisterAlias {
+ int reg;
+ const char* name;
+ };
+
+ private:
+ static const char* names_[kNumRegisters];
+ static const RegisterAlias aliases_[];
+};
+
+// Helper functions for converting between FP register numbers and names.
+class FPRegisters {
+ public:
+ // Return the name of the register.
+ static const char* Name(int reg);
+
+ // Lookup the register number for the name provided.
+ static int Number(const char* name);
+
+ private:
+ static const char* names_[kNumFPRegisters];
+};
+}
+} // namespace v8::internal
+
+#endif // V8_PPC_CONSTANTS_PPC_H_
diff --git a/deps/v8/src/ppc/cpu-ppc.cc b/deps/v8/src/ppc/cpu-ppc.cc
new file mode 100644
index 0000000000..d42420cde1
--- /dev/null
+++ b/deps/v8/src/ppc/cpu-ppc.cc
@@ -0,0 +1,63 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// CPU specific code for ppc independent of OS goes here.
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/assembler.h"
+#include "src/macro-assembler.h"
+#include "src/simulator.h" // for cache flushing.
+
+namespace v8 {
+namespace internal {
+
+void CpuFeatures::FlushICache(void* buffer, size_t size) {
+ // Nothing to do flushing no instructions.
+ if (size == 0) {
+ return;
+ }
+
+#if defined(USE_SIMULATOR)
+ // Not generating PPC instructions for C-code. This means that we are
+ // building an PPC emulator based target. We should notify the simulator
+ // that the Icache was flushed.
+ // None of this code ends up in the snapshot so there are no issues
+ // around whether or not to generate the code when building snapshots.
+ Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), buffer, size);
+#else
+
+ if (CpuFeatures::IsSupported(INSTR_AND_DATA_CACHE_COHERENCY)) {
+ __asm__ __volatile__(
+ "sync \n"
+ "icbi 0, %0 \n"
+ "isync \n"
+ : /* no output */
+ : "r"(buffer)
+ : "memory");
+ return;
+ }
+
+ const int kCacheLineSize = CpuFeatures::cache_line_size();
+ intptr_t mask = kCacheLineSize - 1;
+ byte *start =
+ reinterpret_cast<byte *>(reinterpret_cast<intptr_t>(buffer) & ~mask);
+ byte *end = static_cast<byte *>(buffer) + size;
+ for (byte *pointer = start; pointer < end; pointer += kCacheLineSize) {
+ __asm__(
+ "dcbf 0, %0 \n"
+ "sync \n"
+ "icbi 0, %0 \n"
+ "isync \n"
+ : /* no output */
+ : "r"(pointer));
+ }
+
+#endif // USE_SIMULATOR
+}
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/debug-ppc.cc b/deps/v8/src/ppc/debug-ppc.cc
new file mode 100644
index 0000000000..8106853134
--- /dev/null
+++ b/deps/v8/src/ppc/debug-ppc.cc
@@ -0,0 +1,343 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/codegen.h"
+#include "src/debug.h"
+
+namespace v8 {
+namespace internal {
+
+bool BreakLocationIterator::IsDebugBreakAtReturn() {
+ return Debug::IsDebugBreakAtReturn(rinfo());
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtReturn() {
+ // Patch the code changing the return from JS function sequence from
+ //
+ // LeaveFrame
+ // blr
+ //
+ // to a call to the debug break return code.
+ // this uses a FIXED_SEQUENCE to load an address constant
+ //
+ // mov r0, <address>
+ // mtlr r0
+ // blrl
+ // bkpt
+ //
+ CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm());
+ patcher.masm()->mov(
+ v8::internal::r0,
+ Operand(reinterpret_cast<intptr_t>(debug_info_->GetIsolate()
+ ->builtins()
+ ->Return_DebugBreak()
+ ->entry())));
+ patcher.masm()->mtctr(v8::internal::r0);
+ patcher.masm()->bctrl();
+ patcher.masm()->bkpt(0);
+}
+
+
+// Restore the JS frame exit code.
+void BreakLocationIterator::ClearDebugBreakAtReturn() {
+ rinfo()->PatchCode(original_rinfo()->pc(),
+ Assembler::kJSReturnSequenceInstructions);
+}
+
+
+// A debug break in the frame exit code is identified by the JS frame exit code
+// having been patched with a call instruction.
+bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
+ DCHECK(RelocInfo::IsJSReturn(rinfo->rmode()));
+ return rinfo->IsPatchedReturnSequence();
+}
+
+
+bool BreakLocationIterator::IsDebugBreakAtSlot() {
+ DCHECK(IsDebugBreakSlot());
+ // Check whether the debug break slot instructions have been patched.
+ return rinfo()->IsPatchedDebugBreakSlotSequence();
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtSlot() {
+ DCHECK(IsDebugBreakSlot());
+ // Patch the code changing the debug break slot code from
+ //
+ // ori r3, r3, 0
+ // ori r3, r3, 0
+ // ori r3, r3, 0
+ // ori r3, r3, 0
+ // ori r3, r3, 0
+ //
+ // to a call to the debug break code, using a FIXED_SEQUENCE.
+ //
+ // mov r0, <address>
+ // mtlr r0
+ // blrl
+ //
+ CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm());
+ patcher.masm()->mov(
+ v8::internal::r0,
+ Operand(reinterpret_cast<intptr_t>(
+ debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry())));
+ patcher.masm()->mtctr(v8::internal::r0);
+ patcher.masm()->bctrl();
+}
+
+
+void BreakLocationIterator::ClearDebugBreakAtSlot() {
+ DCHECK(IsDebugBreakSlot());
+ rinfo()->PatchCode(original_rinfo()->pc(),
+ Assembler::kDebugBreakSlotInstructions);
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
+ RegList object_regs,
+ RegList non_object_regs) {
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+ // Load padding words on stack.
+ __ LoadSmiLiteral(ip, Smi::FromInt(LiveEdit::kFramePaddingValue));
+ for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
+ __ push(ip);
+ }
+ __ LoadSmiLiteral(ip, Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
+ __ push(ip);
+
+ // Store the registers containing live values on the expression stack to
+ // make sure that these are correctly updated during GC. Non object values
+ // are stored as a smi causing it to be untouched by GC.
+ DCHECK((object_regs & ~kJSCallerSaved) == 0);
+ DCHECK((non_object_regs & ~kJSCallerSaved) == 0);
+ DCHECK((object_regs & non_object_regs) == 0);
+ if ((object_regs | non_object_regs) != 0) {
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = {r};
+ if ((non_object_regs & (1 << r)) != 0) {
+ if (FLAG_debug_code) {
+ __ TestUnsignedSmiCandidate(reg, r0);
+ __ Assert(eq, kUnableToEncodeValueAsSmi, cr0);
+ }
+ __ SmiTag(reg);
+ }
+ }
+ __ MultiPush(object_regs | non_object_regs);
+ }
+
+#ifdef DEBUG
+ __ RecordComment("// Calling from debug break to runtime - come in - over");
+#endif
+ __ mov(r3, Operand::Zero()); // no arguments
+ __ mov(r4, Operand(ExternalReference::debug_break(masm->isolate())));
+
+ CEntryStub ceb(masm->isolate(), 1);
+ __ CallStub(&ceb);
+
+ // Restore the register values from the expression stack.
+ if ((object_regs | non_object_regs) != 0) {
+ __ MultiPop(object_regs | non_object_regs);
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = {r};
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ SmiUntag(reg);
+ }
+ if (FLAG_debug_code &&
+ (((object_regs | non_object_regs) & (1 << r)) == 0)) {
+ __ mov(reg, Operand(kDebugZapValue));
+ }
+ }
+ }
+
+ // Don't bother removing padding bytes pushed on the stack
+ // as the frame is going to be restored right away.
+
+ // Leave the internal frame.
+ }
+
+ // Now that the break point has been handled, resume normal execution by
+ // jumping to the target address intended by the caller and that was
+ // overwritten by the address of DebugBreakXXX.
+ ExternalReference after_break_target =
+ ExternalReference::debug_after_break_target_address(masm->isolate());
+ __ mov(ip, Operand(after_break_target));
+ __ LoadP(ip, MemOperand(ip));
+ __ JumpToJSEntry(ip);
+}
+
+
+void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallICStub
+ // ----------- S t a t e -------------
+ // -- r4 : function
+ // -- r6 : slot in feedback array (smi)
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, r4.bit() | r6.bit(), 0);
+}
+
+
+void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for IC load (from ic-ppc.cc).
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register name = LoadDescriptor::NameRegister();
+ RegList regs = receiver.bit() | name.bit();
+ if (FLAG_vector_ics) {
+ regs |= VectorLoadICTrampolineDescriptor::SlotRegister().bit();
+ }
+ Generate_DebugBreakCallHelper(masm, regs, 0);
+}
+
+
+void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for IC store (from ic-ppc.cc).
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
+ Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit() | value.bit(),
+ 0);
+}
+
+
+void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for keyed IC load (from ic-ppc.cc).
+ GenerateLoadICDebugBreak(masm);
+}
+
+
+void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for IC keyed store call (from ic-ppc.cc).
+ Register receiver = StoreDescriptor::ReceiverRegister();
+ Register name = StoreDescriptor::NameRegister();
+ Register value = StoreDescriptor::ValueRegister();
+ Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit() | value.bit(),
+ 0);
+}
+
+
+void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+ // Register state for CompareNil IC
+ // ----------- S t a t e -------------
+ // -- r3 : value
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, r3.bit(), 0);
+}
+
+
+void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
+ // In places other than IC call sites it is expected that r3 is TOS which
+ // is an object - this is not generally the case so this should be used with
+ // care.
+ Generate_DebugBreakCallHelper(masm, r3.bit(), 0);
+}
+
+
+void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallFunctionStub (from code-stubs-ppc.cc).
+ // ----------- S t a t e -------------
+ // -- r4 : function
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, r4.bit(), 0);
+}
+
+
+void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
+ // Calling convention for CallConstructStub (from code-stubs-ppc.cc)
+ // ----------- S t a t e -------------
+ // -- r3 : number of arguments (not smi)
+ // -- r4 : constructor function
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, r4.bit(), r3.bit());
+}
+
+
+void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
+ MacroAssembler* masm) {
+ // Calling convention for CallConstructStub (from code-stubs-ppc.cc)
+ // ----------- S t a t e -------------
+ // -- r3 : number of arguments (not smi)
+ // -- r4 : constructor function
+ // -- r5 : feedback array
+ // -- r6 : feedback slot (smi)
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, r4.bit() | r5.bit() | r6.bit(), r3.bit());
+}
+
+
+void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
+ // Generate enough nop's to make space for a call instruction. Avoid emitting
+ // the trampoline pool in the debug break slot code.
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
+ Label check_codesize;
+ __ bind(&check_codesize);
+ __ RecordDebugBreakSlot();
+ for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
+ __ nop(MacroAssembler::DEBUG_BREAK_NOP);
+ }
+ DCHECK_EQ(Assembler::kDebugBreakSlotInstructions,
+ masm->InstructionsGeneratedSince(&check_codesize));
+}
+
+
+void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
+ // In the places where a debug break slot is inserted no registers can contain
+ // object pointers.
+ Generate_DebugBreakCallHelper(masm, 0, 0);
+}
+
+
+void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+ __ Ret();
+}
+
+
+void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+ ExternalReference restarter_frame_function_slot =
+ ExternalReference::debug_restarter_frame_function_pointer_address(
+ masm->isolate());
+ __ mov(ip, Operand(restarter_frame_function_slot));
+ __ li(r4, Operand::Zero());
+ __ StoreP(r4, MemOperand(ip, 0));
+
+ // Load the function pointer off of our current stack frame.
+ __ LoadP(r4, MemOperand(fp, StandardFrameConstants::kConstantPoolOffset -
+ kPointerSize));
+
+ // Pop return address, frame and constant pool pointer (if
+ // FLAG_enable_ool_constant_pool).
+ __ LeaveFrame(StackFrame::INTERNAL);
+
+ // Load context from the function.
+ __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+
+ // Get function code.
+ __ LoadP(ip, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
+ __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // Re-run JSFunction, r4 is function, cp is context.
+ __ Jump(ip);
+}
+
+
+const bool LiveEdit::kFrameDropperSupported = true;
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/deoptimizer-ppc.cc b/deps/v8/src/ppc/deoptimizer-ppc.cc
new file mode 100644
index 0000000000..58e9e939f5
--- /dev/null
+++ b/deps/v8/src/ppc/deoptimizer-ppc.cc
@@ -0,0 +1,359 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/codegen.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/safepoint-table.h"
+
+namespace v8 {
+namespace internal {
+
+const int Deoptimizer::table_entry_size_ = 8;
+
+
+int Deoptimizer::patch_size() {
+#if V8_TARGET_ARCH_PPC64
+ const int kCallInstructionSizeInWords = 7;
+#else
+ const int kCallInstructionSizeInWords = 4;
+#endif
+ return kCallInstructionSizeInWords * Assembler::kInstrSize;
+}
+
+
+void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
+ Address code_start_address = code->instruction_start();
+
+ // Invalidate the relocation information, as it will become invalid by the
+ // code patching below, and is not needed any more.
+ code->InvalidateRelocation();
+
+ if (FLAG_zap_code_space) {
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(pointer, 1);
+ patcher.masm()->bkpt(0);
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ osr_patcher.masm()->bkpt(0);
+ }
+ }
+
+ DeoptimizationInputData* deopt_data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+#ifdef DEBUG
+ Address prev_call_address = NULL;
+#endif
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
+ for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+ if (deopt_data->Pc(i)->value() == -1) continue;
+ Address call_address = code_start_address + deopt_data->Pc(i)->value();
+ Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
+ // We need calls to have a predictable size in the unoptimized code, but
+ // this is optimized code, so we don't have to have a predictable size.
+ int call_size_in_bytes = MacroAssembler::CallSizeNotPredictableCodeSize(
+ deopt_entry, kRelocInfo_NONEPTR);
+ int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
+ DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
+ DCHECK(call_size_in_bytes <= patch_size());
+ CodePatcher patcher(call_address, call_size_in_words);
+ patcher.masm()->Call(deopt_entry, kRelocInfo_NONEPTR);
+ DCHECK(prev_call_address == NULL ||
+ call_address >= prev_call_address + patch_size());
+ DCHECK(call_address + patch_size() <= code->instruction_end());
+#ifdef DEBUG
+ prev_call_address = call_address;
+#endif
+ }
+}
+
+
+void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
+ // Set the register values. The values are not important as there are no
+ // callee saved registers in JavaScript frames, so all registers are
+ // spilled. Registers fp and sp are set to the correct values though.
+
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ input_->SetRegister(i, i * 4);
+ }
+ input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
+ input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+ input_->SetDoubleRegister(i, 0.0);
+ }
+
+ // Fill the frame content from the actual data on the frame.
+ for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
+ input_->SetFrameSlot(
+ i, reinterpret_cast<intptr_t>(Memory::Address_at(tos + i)));
+ }
+}
+
+
+void Deoptimizer::SetPlatformCompiledStubRegisters(
+ FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
+ ApiFunction function(descriptor->deoptimization_handler());
+ ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
+ intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
+ int params = descriptor->GetHandlerParameterCount();
+ output_frame->SetRegister(r3.code(), params);
+ output_frame->SetRegister(r4.code(), handler);
+}
+
+
+void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+ for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
+ double double_value = input_->GetDoubleRegister(i);
+ output_frame->SetDoubleRegister(i, double_value);
+ }
+}
+
+
+bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+ // There is no dynamic alignment padding on PPC in the input frame.
+ return false;
+}
+
+
+#define __ masm()->
+
+// This code tries to be close to ia32 code so that any changes can be
+// easily ported.
+void Deoptimizer::EntryGenerator::Generate() {
+ GeneratePrologue();
+
+ // Unlike on ARM we don't save all the registers, just the useful ones.
+ // For the rest, there are gaps on the stack, so the offsets remain the same.
+ const int kNumberOfRegisters = Register::kNumRegisters;
+
+ RegList restored_regs = kJSCallerSaved | kCalleeSaved;
+ RegList saved_regs = restored_regs | sp.bit();
+
+ const int kDoubleRegsSize =
+ kDoubleSize * DoubleRegister::kMaxNumAllocatableRegisters;
+
+ // Save all FPU registers before messing with them.
+ __ subi(sp, sp, Operand(kDoubleRegsSize));
+ for (int i = 0; i < DoubleRegister::kMaxNumAllocatableRegisters; ++i) {
+ DoubleRegister fpu_reg = DoubleRegister::FromAllocationIndex(i);
+ int offset = i * kDoubleSize;
+ __ stfd(fpu_reg, MemOperand(sp, offset));
+ }
+
+ // Push saved_regs (needed to populate FrameDescription::registers_).
+ // Leave gaps for other registers.
+ __ subi(sp, sp, Operand(kNumberOfRegisters * kPointerSize));
+ for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
+ if ((saved_regs & (1 << i)) != 0) {
+ __ StoreP(ToRegister(i), MemOperand(sp, kPointerSize * i));
+ }
+ }
+
+ const int kSavedRegistersAreaSize =
+ (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
+
+ // Get the bailout id from the stack.
+ __ LoadP(r5, MemOperand(sp, kSavedRegistersAreaSize));
+
+ // Get the address of the location in the code object (r6) (return
+ // address for lazy deoptimization) and compute the fp-to-sp delta in
+ // register r7.
+ __ mflr(r6);
+ // Correct one word for bailout id.
+ __ addi(r7, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+ __ sub(r7, fp, r7);
+
+ // Allocate a new deoptimizer object.
+ // Pass six arguments in r3 to r8.
+ __ PrepareCallCFunction(6, r8);
+ __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ li(r4, Operand(type())); // bailout type,
+ // r5: bailout id already loaded.
+ // r6: code address or 0 already loaded.
+ // r7: Fp-to-sp delta.
+ __ mov(r8, Operand(ExternalReference::isolate_address(isolate())));
+ // Call Deoptimizer::New().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
+ }
+
+ // Preserve "deoptimizer" object in register r3 and get the input
+ // frame descriptor pointer to r4 (deoptimizer->input_);
+ __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
+
+ // Copy core registers into FrameDescription::registers_[kNumRegisters].
+ DCHECK(Register::kNumRegisters == kNumberOfRegisters);
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ __ LoadP(r5, MemOperand(sp, i * kPointerSize));
+ __ StoreP(r5, MemOperand(r4, offset));
+ }
+
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ // Copy VFP registers to
+ // double_registers_[DoubleRegister::kNumAllocatableRegisters]
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
+ int dst_offset = i * kDoubleSize + double_regs_offset;
+ int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ __ lfd(d0, MemOperand(sp, src_offset));
+ __ stfd(d0, MemOperand(r4, dst_offset));
+ }
+
+ // Remove the bailout id and the saved registers from the stack.
+ __ addi(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+
+ // Compute a pointer to the unwinding limit in register r5; that is
+ // the first stack slot not part of the input frame.
+ __ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
+ __ add(r5, r5, sp);
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ addi(r6, r4, Operand(FrameDescription::frame_content_offset()));
+ Label pop_loop;
+ Label pop_loop_header;
+ __ b(&pop_loop_header);
+ __ bind(&pop_loop);
+ __ pop(r7);
+ __ StoreP(r7, MemOperand(r6, 0));
+ __ addi(r6, r6, Operand(kPointerSize));
+ __ bind(&pop_loop_header);
+ __ cmp(r5, sp);
+ __ bne(&pop_loop);
+
+ // Compute the output frame in the deoptimizer.
+ __ push(r3); // Preserve deoptimizer object across call.
+ // r3: deoptimizer object; r4: scratch.
+ __ PrepareCallCFunction(1, r4);
+ // Call Deoptimizer::ComputeOutputFrames().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(
+ ExternalReference::compute_output_frames_function(isolate()), 1);
+ }
+ __ pop(r3); // Restore deoptimizer object (class Deoptimizer).
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
+ // Outer loop state: r7 = current "FrameDescription** output_",
+ // r4 = one past the last FrameDescription**.
+ __ lwz(r4, MemOperand(r3, Deoptimizer::output_count_offset()));
+ __ LoadP(r7, MemOperand(r3, Deoptimizer::output_offset())); // r7 is output_.
+ __ ShiftLeftImm(r4, r4, Operand(kPointerSizeLog2));
+ __ add(r4, r7, r4);
+ __ b(&outer_loop_header);
+
+ __ bind(&outer_push_loop);
+ // Inner loop state: r5 = current FrameDescription*, r6 = loop index.
+ __ LoadP(r5, MemOperand(r7, 0)); // output_[ix]
+ __ LoadP(r6, MemOperand(r5, FrameDescription::frame_size_offset()));
+ __ b(&inner_loop_header);
+
+ __ bind(&inner_push_loop);
+ __ addi(r6, r6, Operand(-sizeof(intptr_t)));
+ __ add(r9, r5, r6);
+ __ LoadP(r9, MemOperand(r9, FrameDescription::frame_content_offset()));
+ __ push(r9);
+
+ __ bind(&inner_loop_header);
+ __ cmpi(r6, Operand::Zero());
+ __ bne(&inner_push_loop); // test for gt?
+
+ __ addi(r7, r7, Operand(kPointerSize));
+ __ bind(&outer_loop_header);
+ __ cmp(r7, r4);
+ __ blt(&outer_push_loop);
+
+ __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
+ for (int i = 0; i < DoubleRegister::kMaxNumAllocatableRegisters; ++i) {
+ const DoubleRegister dreg = DoubleRegister::FromAllocationIndex(i);
+ int src_offset = i * kDoubleSize + double_regs_offset;
+ __ lfd(dreg, MemOperand(r4, src_offset));
+ }
+
+ // Push state, pc, and continuation from the last output frame.
+ __ LoadP(r9, MemOperand(r5, FrameDescription::state_offset()));
+ __ push(r9);
+ __ LoadP(r9, MemOperand(r5, FrameDescription::pc_offset()));
+ __ push(r9);
+ __ LoadP(r9, MemOperand(r5, FrameDescription::continuation_offset()));
+ __ push(r9);
+
+ // Restore the registers from the last output frame.
+ DCHECK(!(ip.bit() & restored_regs));
+ __ mr(ip, r5);
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ if ((restored_regs & (1 << i)) != 0) {
+ __ LoadP(ToRegister(i), MemOperand(ip, offset));
+ }
+ }
+
+ __ InitializeRootRegister();
+
+ __ pop(ip); // get continuation, leave pc on stack
+ __ pop(r0);
+ __ mtlr(r0);
+ __ Jump(ip);
+ __ stop("Unreachable.");
+}
+
+
+void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
+
+ // Create a sequence of deoptimization entries.
+ // Note that registers are still live when jumping to an entry.
+ Label done;
+ for (int i = 0; i < count(); i++) {
+ int start = masm()->pc_offset();
+ USE(start);
+ __ li(ip, Operand(i));
+ __ b(&done);
+ DCHECK(masm()->pc_offset() - start == table_entry_size_);
+ }
+ __ bind(&done);
+ __ push(ip);
+}
+
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+#if V8_OOL_CONSTANT_POOL
+ DCHECK(FLAG_enable_ool_constant_pool);
+ SetFrameSlot(offset, value);
+#else
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+#endif
+}
+
+
+#undef __
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/ppc/disasm-ppc.cc b/deps/v8/src/ppc/disasm-ppc.cc
new file mode 100644
index 0000000000..63cec8cd85
--- /dev/null
+++ b/deps/v8/src/ppc/disasm-ppc.cc
@@ -0,0 +1,1353 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A Disassembler object is used to disassemble a block of code instruction by
+// instruction. The default implementation of the NameConverter object can be
+// overriden to modify register names or to do symbol lookup on addresses.
+//
+// The example below will disassemble a block of code and print it to stdout.
+//
+// NameConverter converter;
+// Disassembler d(converter);
+// for (byte* pc = begin; pc < end;) {
+// v8::internal::EmbeddedVector<char, 256> buffer;
+// byte* prev_pc = pc;
+// pc += d.InstructionDecode(buffer, pc);
+// printf("%p %08x %s\n",
+// prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
+// }
+//
+// The Disassembler class also has a convenience method to disassemble a block
+// of code into a FILE*, meaning that the above functionality could also be
+// achieved by just calling Disassembler::Disassemble(stdout, begin, end);
+
+
+#include <assert.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/base/platform/platform.h"
+#include "src/disasm.h"
+#include "src/macro-assembler.h"
+#include "src/ppc/constants-ppc.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+//------------------------------------------------------------------------------
+
+// Decoder decodes and disassembles instructions into an output buffer.
+// It uses the converter to convert register names and call destinations into
+// more informative description.
+class Decoder {
+ public:
+ Decoder(const disasm::NameConverter& converter, Vector<char> out_buffer)
+ : converter_(converter), out_buffer_(out_buffer), out_buffer_pos_(0) {
+ out_buffer_[out_buffer_pos_] = '\0';
+ }
+
+ ~Decoder() {}
+
+ // Writes one disassembled instruction into 'buffer' (0-terminated).
+ // Returns the length of the disassembled machine instruction in bytes.
+ int InstructionDecode(byte* instruction);
+
+ private:
+ // Bottleneck functions to print into the out_buffer.
+ void PrintChar(const char ch);
+ void Print(const char* str);
+
+ // Printing of common values.
+ void PrintRegister(int reg);
+ void PrintDRegister(int reg);
+ int FormatFPRegister(Instruction* instr, const char* format);
+ void PrintSoftwareInterrupt(SoftwareInterruptCodes svc);
+
+ // Handle formatting of instructions and their options.
+ int FormatRegister(Instruction* instr, const char* option);
+ int FormatOption(Instruction* instr, const char* option);
+ void Format(Instruction* instr, const char* format);
+ void Unknown(Instruction* instr);
+ void UnknownFormat(Instruction* instr, const char* opcname);
+ void MarkerFormat(Instruction* instr, const char* opcname, int id);
+
+ void DecodeExt1(Instruction* instr);
+ void DecodeExt2(Instruction* instr);
+ void DecodeExt4(Instruction* instr);
+ void DecodeExt5(Instruction* instr);
+
+ const disasm::NameConverter& converter_;
+ Vector<char> out_buffer_;
+ int out_buffer_pos_;
+
+ DISALLOW_COPY_AND_ASSIGN(Decoder);
+};
+
+
+// Support for assertions in the Decoder formatting functions.
+#define STRING_STARTS_WITH(string, compare_string) \
+ (strncmp(string, compare_string, strlen(compare_string)) == 0)
+
+
+// Append the ch to the output buffer.
+void Decoder::PrintChar(const char ch) { out_buffer_[out_buffer_pos_++] = ch; }
+
+
+// Append the str to the output buffer.
+void Decoder::Print(const char* str) {
+ char cur = *str++;
+ while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+ PrintChar(cur);
+ cur = *str++;
+ }
+ out_buffer_[out_buffer_pos_] = 0;
+}
+
+
+// Print the register name according to the active name converter.
+void Decoder::PrintRegister(int reg) {
+ Print(converter_.NameOfCPURegister(reg));
+}
+
+
+// Print the double FP register name according to the active name converter.
+void Decoder::PrintDRegister(int reg) { Print(FPRegisters::Name(reg)); }
+
+
+// Print SoftwareInterrupt codes. Factoring this out reduces the complexity of
+// the FormatOption method.
+void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
+ switch (svc) {
+ case kCallRtRedirected:
+ Print("call rt redirected");
+ return;
+ case kBreakpoint:
+ Print("breakpoint");
+ return;
+ default:
+ if (svc >= kStopCode) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d - 0x%x",
+ svc & kStopCodeMask, svc & kStopCodeMask);
+ } else {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", svc);
+ }
+ return;
+ }
+}
+
+
+// Handle all register based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatRegister(Instruction* instr, const char* format) {
+ DCHECK(format[0] == 'r');
+
+ if ((format[1] == 't') || (format[1] == 's')) { // 'rt & 'rs register
+ int reg = instr->RTValue();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 'a') { // 'ra: RA register
+ int reg = instr->RAValue();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 'b') { // 'rb: RB register
+ int reg = instr->RBValue();
+ PrintRegister(reg);
+ return 2;
+ }
+
+ UNREACHABLE();
+ return -1;
+}
+
+
+// Handle all FP register based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatFPRegister(Instruction* instr, const char* format) {
+ DCHECK(format[0] == 'D');
+
+ int retval = 2;
+ int reg = -1;
+ if (format[1] == 't') {
+ reg = instr->RTValue();
+ } else if (format[1] == 'a') {
+ reg = instr->RAValue();
+ } else if (format[1] == 'b') {
+ reg = instr->RBValue();
+ } else if (format[1] == 'c') {
+ reg = instr->RCValue();
+ } else {
+ UNREACHABLE();
+ }
+
+ PrintDRegister(reg);
+
+ return retval;
+}
+
+
+// FormatOption takes a formatting string and interprets it based on
+// the current instructions. The format string points to the first
+// character of the option string (the option escape has already been
+// consumed by the caller.) FormatOption returns the number of
+// characters that were consumed from the formatting string.
+int Decoder::FormatOption(Instruction* instr, const char* format) {
+ switch (format[0]) {
+ case 'o': {
+ if (instr->Bit(10) == 1) {
+ Print("o");
+ }
+ return 1;
+ }
+ case '.': {
+ if (instr->Bit(0) == 1) {
+ Print(".");
+ } else {
+ Print(" "); // ensure consistent spacing
+ }
+ return 1;
+ }
+ case 'r': {
+ return FormatRegister(instr, format);
+ }
+ case 'D': {
+ return FormatFPRegister(instr, format);
+ }
+ case 'i': { // int16
+ int32_t value = (instr->Bits(15, 0) << 16) >> 16;
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+ return 5;
+ }
+ case 'u': { // uint16
+ int32_t value = instr->Bits(15, 0);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+ return 6;
+ }
+ case 'l': {
+ // Link (LK) Bit 0
+ if (instr->Bit(0) == 1) {
+ Print("l");
+ }
+ return 1;
+ }
+ case 'a': {
+ // Absolute Address Bit 1
+ if (instr->Bit(1) == 1) {
+ Print("a");
+ }
+ return 1;
+ }
+ case 't': { // 'target: target of branch instructions
+ // target26 or target16
+ DCHECK(STRING_STARTS_WITH(format, "target"));
+ if ((format[6] == '2') && (format[7] == '6')) {
+ int off = ((instr->Bits(25, 2)) << 8) >> 6;
+ out_buffer_pos_ += SNPrintF(
+ out_buffer_ + out_buffer_pos_, "%+d -> %s", off,
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + off));
+ return 8;
+ } else if ((format[6] == '1') && (format[7] == '6')) {
+ int off = ((instr->Bits(15, 2)) << 18) >> 16;
+ out_buffer_pos_ += SNPrintF(
+ out_buffer_ + out_buffer_pos_, "%+d -> %s", off,
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + off));
+ return 8;
+ }
+ case 's': {
+ DCHECK(format[1] == 'h');
+ int32_t value = 0;
+ int32_t opcode = instr->OpcodeValue() << 26;
+ int32_t sh = instr->Bits(15, 11);
+ if (opcode == EXT5 ||
+ (opcode == EXT2 && instr->Bits(10, 2) << 2 == SRADIX)) {
+ // SH Bits 1 and 15-11 (split field)
+ value = (sh | (instr->Bit(1) << 5));
+ } else {
+ // SH Bits 15-11
+ value = (sh << 26) >> 26;
+ }
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+ return 2;
+ }
+ case 'm': {
+ int32_t value = 0;
+ if (format[1] == 'e') {
+ if (instr->OpcodeValue() << 26 != EXT5) {
+ // ME Bits 10-6
+ value = (instr->Bits(10, 6) << 26) >> 26;
+ } else {
+ // ME Bits 5 and 10-6 (split field)
+ value = (instr->Bits(10, 6) | (instr->Bit(5) << 5));
+ }
+ } else if (format[1] == 'b') {
+ if (instr->OpcodeValue() << 26 != EXT5) {
+ // MB Bits 5-1
+ value = (instr->Bits(5, 1) << 26) >> 26;
+ } else {
+ // MB Bits 5 and 10-6 (split field)
+ value = (instr->Bits(10, 6) | (instr->Bit(5) << 5));
+ }
+ } else {
+ UNREACHABLE(); // bad format
+ }
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+ return 2;
+ }
+ }
+#if V8_TARGET_ARCH_PPC64
+ case 'd': { // ds value for offset
+ int32_t value = SIGN_EXT_IMM16(instr->Bits(15, 0) & ~3);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+ return 1;
+ }
+#endif
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
+
+ UNREACHABLE();
+ return -1;
+}
+
+
+// Format takes a formatting string for a whole instruction and prints it into
+// the output buffer. All escaped options are handed to FormatOption to be
+// parsed further.
+void Decoder::Format(Instruction* instr, const char* format) {
+ char cur = *format++;
+ while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+ if (cur == '\'') { // Single quote is used as the formatting escape.
+ format += FormatOption(instr, format);
+ } else {
+ out_buffer_[out_buffer_pos_++] = cur;
+ }
+ cur = *format++;
+ }
+ out_buffer_[out_buffer_pos_] = '\0';
+}
+
+
+// The disassembler may end up decoding data inlined in the code. We do not want
+// it to crash if the data does not ressemble any known instruction.
+#define VERIFY(condition) \
+ if (!(condition)) { \
+ Unknown(instr); \
+ return; \
+ }
+
+
+// For currently unimplemented decodings the disassembler calls Unknown(instr)
+// which will just print "unknown" of the instruction bits.
+void Decoder::Unknown(Instruction* instr) { Format(instr, "unknown"); }
+
+
+// For currently unimplemented decodings the disassembler calls
+// UnknownFormat(instr) which will just print opcode name of the
+// instruction bits.
+void Decoder::UnknownFormat(Instruction* instr, const char* name) {
+ char buffer[100];
+ snprintf(buffer, sizeof(buffer), "%s (unknown-format)", name);
+ Format(instr, buffer);
+}
+
+
+void Decoder::MarkerFormat(Instruction* instr, const char* name, int id) {
+ char buffer[100];
+ snprintf(buffer, sizeof(buffer), "%s %d", name, id);
+ Format(instr, buffer);
+}
+
+
+void Decoder::DecodeExt1(Instruction* instr) {
+ switch (instr->Bits(10, 1) << 1) {
+ case MCRF: {
+ UnknownFormat(instr, "mcrf"); // not used by V8
+ break;
+ }
+ case BCLRX: {
+ switch (instr->Bits(25, 21) << 21) {
+ case DCBNZF: {
+ UnknownFormat(instr, "bclrx-dcbnzf");
+ break;
+ }
+ case DCBEZF: {
+ UnknownFormat(instr, "bclrx-dcbezf");
+ break;
+ }
+ case BF: {
+ UnknownFormat(instr, "bclrx-bf");
+ break;
+ }
+ case DCBNZT: {
+ UnknownFormat(instr, "bclrx-dcbbzt");
+ break;
+ }
+ case DCBEZT: {
+ UnknownFormat(instr, "bclrx-dcbnezt");
+ break;
+ }
+ case BT: {
+ UnknownFormat(instr, "bclrx-bt");
+ break;
+ }
+ case DCBNZ: {
+ UnknownFormat(instr, "bclrx-dcbnz");
+ break;
+ }
+ case DCBEZ: {
+ UnknownFormat(instr, "bclrx-dcbez"); // not used by V8
+ break;
+ }
+ case BA: {
+ if (instr->Bit(0) == 1) {
+ Format(instr, "blrl");
+ } else {
+ Format(instr, "blr");
+ }
+ break;
+ }
+ }
+ break;
+ }
+ case BCCTRX: {
+ switch (instr->Bits(25, 21) << 21) {
+ case DCBNZF: {
+ UnknownFormat(instr, "bcctrx-dcbnzf");
+ break;
+ }
+ case DCBEZF: {
+ UnknownFormat(instr, "bcctrx-dcbezf");
+ break;
+ }
+ case BF: {
+ UnknownFormat(instr, "bcctrx-bf");
+ break;
+ }
+ case DCBNZT: {
+ UnknownFormat(instr, "bcctrx-dcbnzt");
+ break;
+ }
+ case DCBEZT: {
+ UnknownFormat(instr, "bcctrx-dcbezf");
+ break;
+ }
+ case BT: {
+ UnknownFormat(instr, "bcctrx-bt");
+ break;
+ }
+ case DCBNZ: {
+ UnknownFormat(instr, "bcctrx-dcbnz");
+ break;
+ }
+ case DCBEZ: {
+ UnknownFormat(instr, "bcctrx-dcbez");
+ break;
+ }
+ case BA: {
+ if (instr->Bit(0) == 1) {
+ Format(instr, "bctrl");
+ } else {
+ Format(instr, "bctr");
+ }
+ break;
+ }
+ default: { UNREACHABLE(); }
+ }
+ break;
+ }
+ case CRNOR: {
+ Format(instr, "crnor (stuff)");
+ break;
+ }
+ case RFI: {
+ Format(instr, "rfi (stuff)");
+ break;
+ }
+ case CRANDC: {
+ Format(instr, "crandc (stuff)");
+ break;
+ }
+ case ISYNC: {
+ Format(instr, "isync (stuff)");
+ break;
+ }
+ case CRXOR: {
+ Format(instr, "crxor (stuff)");
+ break;
+ }
+ case CRNAND: {
+ UnknownFormat(instr, "crnand");
+ break;
+ }
+ case CRAND: {
+ UnknownFormat(instr, "crand");
+ break;
+ }
+ case CREQV: {
+ UnknownFormat(instr, "creqv");
+ break;
+ }
+ case CRORC: {
+ UnknownFormat(instr, "crorc");
+ break;
+ }
+ case CROR: {
+ UnknownFormat(instr, "cror");
+ break;
+ }
+ default: {
+ Unknown(instr); // not used by V8
+ }
+ }
+}
+
+
+void Decoder::DecodeExt2(Instruction* instr) {
+ // Some encodings are 10-1 bits, handle those first
+ switch (instr->Bits(10, 1) << 1) {
+ case SRWX: {
+ Format(instr, "srw'. 'ra, 'rs, 'rb");
+ return;
+ }
+#if V8_TARGET_ARCH_PPC64
+ case SRDX: {
+ Format(instr, "srd'. 'ra, 'rs, 'rb");
+ return;
+ }
+#endif
+ case SRAW: {
+ Format(instr, "sraw'. 'ra, 'rs, 'rb");
+ return;
+ }
+#if V8_TARGET_ARCH_PPC64
+ case SRAD: {
+ Format(instr, "srad'. 'ra, 'rs, 'rb");
+ return;
+ }
+#endif
+ case SRAWIX: {
+ Format(instr, "srawi'. 'ra,'rs,'sh");
+ return;
+ }
+ case EXTSH: {
+ Format(instr, "extsh'. 'ra, 'rs");
+ return;
+ }
+#if V8_TARGET_ARCH_PPC64
+ case EXTSW: {
+ Format(instr, "extsw'. 'ra, 'rs");
+ return;
+ }
+#endif
+ case EXTSB: {
+ Format(instr, "extsb'. 'ra, 'rs");
+ return;
+ }
+ case LFSX: {
+ Format(instr, "lfsx 'rt, 'ra, 'rb");
+ return;
+ }
+ case LFSUX: {
+ Format(instr, "lfsux 'rt, 'ra, 'rb");
+ return;
+ }
+ case LFDX: {
+ Format(instr, "lfdx 'rt, 'ra, 'rb");
+ return;
+ }
+ case LFDUX: {
+ Format(instr, "lfdux 'rt, 'ra, 'rb");
+ return;
+ }
+ case STFSX: {
+ Format(instr, "stfsx 'rs, 'ra, 'rb");
+ return;
+ }
+ case STFSUX: {
+ Format(instr, "stfsux 'rs, 'ra, 'rb");
+ return;
+ }
+ case STFDX: {
+ Format(instr, "stfdx 'rs, 'ra, 'rb");
+ return;
+ }
+ case STFDUX: {
+ Format(instr, "stfdux 'rs, 'ra, 'rb");
+ return;
+ }
+ }
+
+ switch (instr->Bits(10, 2) << 2) {
+ case SRADIX: {
+ Format(instr, "sradi'. 'ra,'rs,'sh");
+ return;
+ }
+ }
+
+ // ?? are all of these xo_form?
+ switch (instr->Bits(9, 1) << 1) {
+ case CMP: {
+#if V8_TARGET_ARCH_PPC64
+ if (instr->Bit(21)) {
+#endif
+ Format(instr, "cmp 'ra, 'rb");
+#if V8_TARGET_ARCH_PPC64
+ } else {
+ Format(instr, "cmpw 'ra, 'rb");
+ }
+#endif
+ break;
+ }
+ case SLWX: {
+ Format(instr, "slw'. 'ra, 'rs, 'rb");
+ break;
+ }
+#if V8_TARGET_ARCH_PPC64
+ case SLDX: {
+ Format(instr, "sld'. 'ra, 'rs, 'rb");
+ break;
+ }
+#endif
+ case SUBFCX: {
+ Format(instr, "subfc'. 'rt, 'ra, 'rb");
+ break;
+ }
+ case ADDCX: {
+ Format(instr, "addc'. 'rt, 'ra, 'rb");
+ break;
+ }
+ case CNTLZWX: {
+ Format(instr, "cntlzw'. 'ra, 'rs");
+ break;
+ }
+#if V8_TARGET_ARCH_PPC64
+ case CNTLZDX: {
+ Format(instr, "cntlzd'. 'ra, 'rs");
+ break;
+ }
+#endif
+ case ANDX: {
+ Format(instr, "and'. 'ra, 'rs, 'rb");
+ break;
+ }
+ case ANDCX: {
+ Format(instr, "andc'. 'ra, 'rs, 'rb");
+ break;
+ }
+ case CMPL: {
+#if V8_TARGET_ARCH_PPC64
+ if (instr->Bit(21)) {
+#endif
+ Format(instr, "cmpl 'ra, 'rb");
+#if V8_TARGET_ARCH_PPC64
+ } else {
+ Format(instr, "cmplw 'ra, 'rb");
+ }
+#endif
+ break;
+ }
+ case NEGX: {
+ Format(instr, "neg'. 'rt, 'ra");
+ break;
+ }
+ case NORX: {
+ Format(instr, "nor'. 'rt, 'ra, 'rb");
+ break;
+ }
+ case SUBFX: {
+ Format(instr, "subf'. 'rt, 'ra, 'rb");
+ break;
+ }
+ case MULHWX: {
+ Format(instr, "mulhw'o'. 'rt, 'ra, 'rb");
+ break;
+ }
+ case ADDZEX: {
+ Format(instr, "addze'. 'rt, 'ra");
+ break;
+ }
+ case MULLW: {
+ Format(instr, "mullw'o'. 'rt, 'ra, 'rb");
+ break;
+ }
+#if V8_TARGET_ARCH_PPC64
+ case MULLD: {
+ Format(instr, "mulld'o'. 'rt, 'ra, 'rb");
+ break;
+ }
+#endif
+ case DIVW: {
+ Format(instr, "divw'o'. 'rt, 'ra, 'rb");
+ break;
+ }
+#if V8_TARGET_ARCH_PPC64
+ case DIVD: {
+ Format(instr, "divd'o'. 'rt, 'ra, 'rb");
+ break;
+ }
+#endif
+ case ADDX: {
+ Format(instr, "add'o 'rt, 'ra, 'rb");
+ break;
+ }
+ case XORX: {
+ Format(instr, "xor'. 'ra, 'rs, 'rb");
+ break;
+ }
+ case ORX: {
+ if (instr->RTValue() == instr->RBValue()) {
+ Format(instr, "mr 'ra, 'rb");
+ } else {
+ Format(instr, "or 'ra, 'rs, 'rb");
+ }
+ break;
+ }
+ case MFSPR: {
+ int spr = instr->Bits(20, 11);
+ if (256 == spr) {
+ Format(instr, "mflr 'rt");
+ } else {
+ Format(instr, "mfspr 'rt ??");
+ }
+ break;
+ }
+ case MTSPR: {
+ int spr = instr->Bits(20, 11);
+ if (256 == spr) {
+ Format(instr, "mtlr 'rt");
+ } else if (288 == spr) {
+ Format(instr, "mtctr 'rt");
+ } else {
+ Format(instr, "mtspr 'rt ??");
+ }
+ break;
+ }
+ case MFCR: {
+ Format(instr, "mfcr 'rt");
+ break;
+ }
+ case STWX: {
+ Format(instr, "stwx 'rs, 'ra, 'rb");
+ break;
+ }
+ case STWUX: {
+ Format(instr, "stwux 'rs, 'ra, 'rb");
+ break;
+ }
+ case STBX: {
+ Format(instr, "stbx 'rs, 'ra, 'rb");
+ break;
+ }
+ case STBUX: {
+ Format(instr, "stbux 'rs, 'ra, 'rb");
+ break;
+ }
+ case STHX: {
+ Format(instr, "sthx 'rs, 'ra, 'rb");
+ break;
+ }
+ case STHUX: {
+ Format(instr, "sthux 'rs, 'ra, 'rb");
+ break;
+ }
+ case LWZX: {
+ Format(instr, "lwzx 'rt, 'ra, 'rb");
+ break;
+ }
+ case LWZUX: {
+ Format(instr, "lwzux 'rt, 'ra, 'rb");
+ break;
+ }
+ case LBZX: {
+ Format(instr, "lbzx 'rt, 'ra, 'rb");
+ break;
+ }
+ case LBZUX: {
+ Format(instr, "lbzux 'rt, 'ra, 'rb");
+ break;
+ }
+ case LHZX: {
+ Format(instr, "lhzx 'rt, 'ra, 'rb");
+ break;
+ }
+ case LHZUX: {
+ Format(instr, "lhzux 'rt, 'ra, 'rb");
+ break;
+ }
+#if V8_TARGET_ARCH_PPC64
+ case LDX: {
+ Format(instr, "ldx 'rt, 'ra, 'rb");
+ break;
+ }
+ case LDUX: {
+ Format(instr, "ldux 'rt, 'ra, 'rb");
+ break;
+ }
+ case STDX: {
+ Format(instr, "stdx 'rt, 'ra, 'rb");
+ break;
+ }
+ case STDUX: {
+ Format(instr, "stdux 'rt, 'ra, 'rb");
+ break;
+ }
+ case MFVSRD: {
+ Format(instr, "mffprd 'ra, 'Dt");
+ break;
+ }
+ case MFVSRWZ: {
+ Format(instr, "mffprwz 'ra, 'Dt");
+ break;
+ }
+ case MTVSRD: {
+ Format(instr, "mtfprd 'Dt, 'ra");
+ break;
+ }
+ case MTVSRWA: {
+ Format(instr, "mtfprwa 'Dt, 'ra");
+ break;
+ }
+ case MTVSRWZ: {
+ Format(instr, "mtfprwz 'Dt, 'ra");
+ break;
+ }
+#endif
+ default: {
+ Unknown(instr); // not used by V8
+ }
+ }
+}
+
+
+void Decoder::DecodeExt4(Instruction* instr) {
+ switch (instr->Bits(5, 1) << 1) {
+ case FDIV: {
+ Format(instr, "fdiv'. 'Dt, 'Da, 'Db");
+ return;
+ }
+ case FSUB: {
+ Format(instr, "fsub'. 'Dt, 'Da, 'Db");
+ return;
+ }
+ case FADD: {
+ Format(instr, "fadd'. 'Dt, 'Da, 'Db");
+ return;
+ }
+ case FSQRT: {
+ Format(instr, "fsqrt'. 'Dt, 'Db");
+ return;
+ }
+ case FSEL: {
+ Format(instr, "fsel'. 'Dt, 'Da, 'Dc, 'Db");
+ return;
+ }
+ case FMUL: {
+ Format(instr, "fmul'. 'Dt, 'Da, 'Dc");
+ return;
+ }
+ case FMSUB: {
+ Format(instr, "fmsub'. 'Dt, 'Da, 'Dc, 'Db");
+ return;
+ }
+ case FMADD: {
+ Format(instr, "fmadd'. 'Dt, 'Da, 'Dc, 'Db");
+ return;
+ }
+ }
+
+ switch (instr->Bits(10, 1) << 1) {
+ case FCMPU: {
+ Format(instr, "fcmpu 'Da, 'Db");
+ break;
+ }
+ case FRSP: {
+ Format(instr, "frsp'. 'Dt, 'Db");
+ break;
+ }
+ case FCFID: {
+ Format(instr, "fcfid'. 'Dt, 'Db");
+ break;
+ }
+ case FCTID: {
+ Format(instr, "fctid 'Dt, 'Db");
+ break;
+ }
+ case FCTIDZ: {
+ Format(instr, "fctidz 'Dt, 'Db");
+ break;
+ }
+ case FCTIW: {
+ Format(instr, "fctiw'. 'Dt, 'Db");
+ break;
+ }
+ case FCTIWZ: {
+ Format(instr, "fctiwz'. 'Dt, 'Db");
+ break;
+ }
+ case FMR: {
+ Format(instr, "fmr'. 'Dt, 'Db");
+ break;
+ }
+ case MTFSFI: {
+ Format(instr, "mtfsfi'. ?,?");
+ break;
+ }
+ case MFFS: {
+ Format(instr, "mffs'. 'Dt");
+ break;
+ }
+ case MTFSF: {
+ Format(instr, "mtfsf'. 'Db ?,?,?");
+ break;
+ }
+ case FABS: {
+ Format(instr, "fabs'. 'Dt, 'Db");
+ break;
+ }
+ case FRIM: {
+ Format(instr, "frim 'Dt, 'Db");
+ break;
+ }
+ case FNEG: {
+ Format(instr, "fneg'. 'Dt, 'Db");
+ break;
+ }
+ default: {
+ Unknown(instr); // not used by V8
+ }
+ }
+}
+
+
+void Decoder::DecodeExt5(Instruction* instr) {
+ switch (instr->Bits(4, 2) << 2) {
+ case RLDICL: {
+ Format(instr, "rldicl'. 'ra, 'rs, 'sh, 'mb");
+ return;
+ }
+ case RLDICR: {
+ Format(instr, "rldicr'. 'ra, 'rs, 'sh, 'me");
+ return;
+ }
+ case RLDIC: {
+ Format(instr, "rldic'. 'ra, 'rs, 'sh, 'mb");
+ return;
+ }
+ case RLDIMI: {
+ Format(instr, "rldimi'. 'ra, 'rs, 'sh, 'mb");
+ return;
+ }
+ }
+ switch (instr->Bits(4, 1) << 1) {
+ case RLDCL: {
+ Format(instr, "rldcl'. 'ra, 'rs, 'sb, 'mb");
+ return;
+ }
+ }
+ Unknown(instr); // not used by V8
+}
+
+#undef VERIFIY
+
+// Disassemble the instruction at *instr_ptr into the output buffer.
+int Decoder::InstructionDecode(byte* instr_ptr) {
+ Instruction* instr = Instruction::At(instr_ptr);
+ // Print raw instruction bytes.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%08x ",
+ instr->InstructionBits());
+
+ switch (instr->OpcodeValue() << 26) {
+ case TWI: {
+ PrintSoftwareInterrupt(instr->SvcValue());
+ break;
+ }
+ case MULLI: {
+ UnknownFormat(instr, "mulli");
+ break;
+ }
+ case SUBFIC: {
+ Format(instr, "subfic 'rt, 'ra, 'int16");
+ break;
+ }
+ case CMPLI: {
+#if V8_TARGET_ARCH_PPC64
+ if (instr->Bit(21)) {
+#endif
+ Format(instr, "cmpli 'ra, 'uint16");
+#if V8_TARGET_ARCH_PPC64
+ } else {
+ Format(instr, "cmplwi 'ra, 'uint16");
+ }
+#endif
+ break;
+ }
+ case CMPI: {
+#if V8_TARGET_ARCH_PPC64
+ if (instr->Bit(21)) {
+#endif
+ Format(instr, "cmpi 'ra, 'int16");
+#if V8_TARGET_ARCH_PPC64
+ } else {
+ Format(instr, "cmpwi 'ra, 'int16");
+ }
+#endif
+ break;
+ }
+ case ADDIC: {
+ Format(instr, "addic 'rt, 'ra, 'int16");
+ break;
+ }
+ case ADDICx: {
+ UnknownFormat(instr, "addicx");
+ break;
+ }
+ case ADDI: {
+ if (instr->RAValue() == 0) {
+ // this is load immediate
+ Format(instr, "li 'rt, 'int16");
+ } else {
+ Format(instr, "addi 'rt, 'ra, 'int16");
+ }
+ break;
+ }
+ case ADDIS: {
+ if (instr->RAValue() == 0) {
+ Format(instr, "lis 'rt, 'int16");
+ } else {
+ Format(instr, "addis 'rt, 'ra, 'int16");
+ }
+ break;
+ }
+ case BCX: {
+ int bo = instr->Bits(25, 21) << 21;
+ int bi = instr->Bits(20, 16);
+ switch (bi) {
+ case 2:
+ case 30:
+ if (BT == bo) {
+ Format(instr, "beq'l'a 'target16");
+ break;
+ }
+ if (BF == bo) {
+ Format(instr, "bne'l'a 'target16");
+ break;
+ }
+ Format(instr, "bc'l'a 'target16");
+ break;
+ case 29:
+ if (BT == bo) {
+ Format(instr, "bgt'l'a 'target16");
+ break;
+ }
+ if (BF == bo) {
+ Format(instr, "ble'l'a 'target16");
+ break;
+ }
+ Format(instr, "bc'l'a 'target16");
+ break;
+ case 28:
+ if (BT == bo) {
+ Format(instr, "blt'l'a 'target16");
+ break;
+ }
+ if (BF == bo) {
+ Format(instr, "bge'l'a 'target16");
+ break;
+ }
+ Format(instr, "bc'l'a 'target16");
+ break;
+ default:
+ Format(instr, "bc'l'a 'target16");
+ break;
+ }
+ break;
+ }
+ case SC: {
+ UnknownFormat(instr, "sc");
+ break;
+ }
+ case BX: {
+ Format(instr, "b'l'a 'target26");
+ break;
+ }
+ case EXT1: {
+ DecodeExt1(instr);
+ break;
+ }
+ case RLWIMIX: {
+ Format(instr, "rlwimi'. 'ra, 'rs, 'sh, 'me, 'mb");
+ break;
+ }
+ case RLWINMX: {
+ Format(instr, "rlwinm'. 'ra, 'rs, 'sh, 'me, 'mb");
+ break;
+ }
+ case RLWNMX: {
+ Format(instr, "rlwnm'. 'ra, 'rs, 'rb, 'me, 'mb");
+ break;
+ }
+ case ORI: {
+ Format(instr, "ori 'ra, 'rs, 'uint16");
+ break;
+ }
+ case ORIS: {
+ Format(instr, "oris 'ra, 'rs, 'uint16");
+ break;
+ }
+ case XORI: {
+ Format(instr, "xori 'ra, 'rs, 'uint16");
+ break;
+ }
+ case XORIS: {
+ Format(instr, "xoris 'ra, 'rs, 'uint16");
+ break;
+ }
+ case ANDIx: {
+ Format(instr, "andi. 'ra, 'rs, 'uint16");
+ break;
+ }
+ case ANDISx: {
+ Format(instr, "andis. 'ra, 'rs, 'uint16");
+ break;
+ }
+ case EXT2: {
+ DecodeExt2(instr);
+ break;
+ }
+ case LWZ: {
+ Format(instr, "lwz 'rt, 'int16('ra)");
+ break;
+ }
+ case LWZU: {
+ Format(instr, "lwzu 'rt, 'int16('ra)");
+ break;
+ }
+ case LBZ: {
+ Format(instr, "lbz 'rt, 'int16('ra)");
+ break;
+ }
+ case LBZU: {
+ Format(instr, "lbzu 'rt, 'int16('ra)");
+ break;
+ }
+ case STW: {
+ Format(instr, "stw 'rs, 'int16('ra)");
+ break;
+ }
+ case STWU: {
+ Format(instr, "stwu 'rs, 'int16('ra)");
+ break;
+ }
+ case STB: {
+ Format(instr, "stb 'rs, 'int16('ra)");
+ break;
+ }
+ case STBU: {
+ Format(instr, "stbu 'rs, 'int16('ra)");
+ break;
+ }
+ case LHZ: {
+ Format(instr, "lhz 'rt, 'int16('ra)");
+ break;
+ }
+ case LHZU: {
+ Format(instr, "lhzu 'rt, 'int16('ra)");
+ break;
+ }
+ case LHA: {
+ Format(instr, "lha 'rt, 'int16('ra)");
+ break;
+ }
+ case LHAU: {
+ Format(instr, "lhau 'rt, 'int16('ra)");
+ break;
+ }
+ case STH: {
+ Format(instr, "sth 'rs, 'int16('ra)");
+ break;
+ }
+ case STHU: {
+ Format(instr, "sthu 'rs, 'int16('ra)");
+ break;
+ }
+ case LMW: {
+ UnknownFormat(instr, "lmw");
+ break;
+ }
+ case STMW: {
+ UnknownFormat(instr, "stmw");
+ break;
+ }
+ case LFS: {
+ Format(instr, "lfs 'Dt, 'int16('ra)");
+ break;
+ }
+ case LFSU: {
+ Format(instr, "lfsu 'Dt, 'int16('ra)");
+ break;
+ }
+ case LFD: {
+ Format(instr, "lfd 'Dt, 'int16('ra)");
+ break;
+ }
+ case LFDU: {
+ Format(instr, "lfdu 'Dt, 'int16('ra)");
+ break;
+ }
+ case STFS: {
+ Format(instr, "stfs 'Dt, 'int16('ra)");
+ break;
+ }
+ case STFSU: {
+ Format(instr, "stfsu 'Dt, 'int16('ra)");
+ break;
+ }
+ case STFD: {
+ Format(instr, "stfd 'Dt, 'int16('ra)");
+ break;
+ }
+ case STFDU: {
+ Format(instr, "stfdu 'Dt, 'int16('ra)");
+ break;
+ }
+ case EXT3:
+ case EXT4: {
+ DecodeExt4(instr);
+ break;
+ }
+ case EXT5: {
+ DecodeExt5(instr);
+ break;
+ }
+#if V8_TARGET_ARCH_PPC64
+ case LD: {
+ switch (instr->Bits(1, 0)) {
+ case 0:
+ Format(instr, "ld 'rt, 'd('ra)");
+ break;
+ case 1:
+ Format(instr, "ldu 'rt, 'd('ra)");
+ break;
+ case 2:
+ Format(instr, "lwa 'rt, 'd('ra)");
+ break;
+ }
+ break;
+ }
+ case STD: { // could be STD or STDU
+ if (instr->Bit(0) == 0) {
+ Format(instr, "std 'rs, 'd('ra)");
+ } else {
+ Format(instr, "stdu 'rs, 'd('ra)");
+ }
+ break;
+ }
+#endif
+
+ case FAKE_OPCODE: {
+ if (instr->Bits(MARKER_SUBOPCODE_BIT, MARKER_SUBOPCODE_BIT) == 1) {
+ int marker_code = instr->Bits(STUB_MARKER_HIGH_BIT, 0);
+ DCHECK(marker_code < F_NEXT_AVAILABLE_STUB_MARKER);
+ MarkerFormat(instr, "stub-marker ", marker_code);
+ } else {
+ int fake_opcode = instr->Bits(FAKE_OPCODE_HIGH_BIT, 0);
+ MarkerFormat(instr, "faker-opcode ", fake_opcode);
+ }
+ break;
+ }
+ default: {
+ Unknown(instr);
+ break;
+ }
+ }
+
+ return Instruction::kInstrSize;
+}
+}
+} // namespace v8::internal
+
+
+//------------------------------------------------------------------------------
+
+namespace disasm {
+
+
+const char* NameConverter::NameOfAddress(byte* addr) const {
+ v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
+ return tmp_buffer_.start();
+}
+
+
+const char* NameConverter::NameOfConstant(byte* addr) const {
+ return NameOfAddress(addr);
+}
+
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+ return v8::internal::Registers::Name(reg);
+}
+
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+ UNREACHABLE(); // PPC does not have the concept of a byte register
+ return "nobytereg";
+}
+
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+ UNREACHABLE(); // PPC does not have any XMM registers
+ return "noxmmreg";
+}
+
+const char* NameConverter::NameInCode(byte* addr) const {
+ // The default name converter is called for unknown code. So we will not try
+ // to access any memory.
+ return "";
+}
+
+
+//------------------------------------------------------------------------------
+
+Disassembler::Disassembler(const NameConverter& converter)
+ : converter_(converter) {}
+
+
+Disassembler::~Disassembler() {}
+
+
+int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
+ byte* instruction) {
+ v8::internal::Decoder d(converter_, buffer);
+ return d.InstructionDecode(instruction);
+}
+
+
+// The PPC assembler does not currently use constant pools.
+int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
+
+
+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
+ NameConverter converter;
+ Disassembler d(converter);
+ for (byte* pc = begin; pc < end;) {
+ v8::internal::EmbeddedVector<char, 128> buffer;
+ buffer[0] = '\0';
+ byte* prev_pc = pc;
+ pc += d.InstructionDecode(buffer, pc);
+ v8::internal::PrintF(f, "%p %08x %s\n", prev_pc,
+ *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+ }
+}
+
+
+} // namespace disasm
+
+#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/frames-ppc.cc b/deps/v8/src/ppc/frames-ppc.cc
new file mode 100644
index 0000000000..4b52882b0c
--- /dev/null
+++ b/deps/v8/src/ppc/frames-ppc.cc
@@ -0,0 +1,60 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/assembler.h"
+#include "src/frames.h"
+#include "src/macro-assembler.h"
+
+#include "src/ppc/assembler-ppc.h"
+#include "src/ppc/assembler-ppc-inl.h"
+#include "src/ppc/macro-assembler-ppc.h"
+
+namespace v8 {
+namespace internal {
+
+
+Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
+Register JavaScriptFrame::context_register() { return cp; }
+Register JavaScriptFrame::constant_pool_pointer_register() {
+#if V8_OOL_CONSTANT_POOL
+ DCHECK(FLAG_enable_ool_constant_pool);
+ return kConstantPoolRegister;
+#else
+ UNREACHABLE();
+ return no_reg;
+#endif
+}
+
+
+Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
+Register StubFailureTrampolineFrame::context_register() { return cp; }
+Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
+#if V8_OOL_CONSTANT_POOL
+ DCHECK(FLAG_enable_ool_constant_pool);
+ return kConstantPoolRegister;
+#else
+ UNREACHABLE();
+ return no_reg;
+#endif
+}
+
+
+Object*& ExitFrame::constant_pool_slot() const {
+#if V8_OOL_CONSTANT_POOL
+ DCHECK(FLAG_enable_ool_constant_pool);
+ const int offset = ExitFrameConstants::kConstantPoolOffset;
+ return Memory::Object_at(fp() + offset);
+#else
+ UNREACHABLE();
+ return Memory::Object_at(NULL);
+#endif
+}
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/frames-ppc.h b/deps/v8/src/ppc/frames-ppc.h
new file mode 100644
index 0000000000..f00fa668a8
--- /dev/null
+++ b/deps/v8/src/ppc/frames-ppc.h
@@ -0,0 +1,202 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PPC_FRAMES_PPC_H_
+#define V8_PPC_FRAMES_PPC_H_
+
+namespace v8 {
+namespace internal {
+
+
+// Register list in load/store instructions
+// Note that the bit values must match those used in actual instruction encoding
+const int kNumRegs = 32;
+
+
+// Caller-saved/arguments registers
+const RegList kJSCallerSaved = 1 << 3 | // r3 a1
+ 1 << 4 | // r4 a2
+ 1 << 5 | // r5 a3
+ 1 << 6 | // r6 a4
+ 1 << 7 | // r7 a5
+ 1 << 8 | // r8 a6
+ 1 << 9 | // r9 a7
+ 1 << 10 | // r10 a8
+ 1 << 11;
+
+const int kNumJSCallerSaved = 9;
+
+// Return the code of the n-th caller-saved register available to JavaScript
+// e.g. JSCallerSavedReg(0) returns r0.code() == 0
+int JSCallerSavedCode(int n);
+
+
+// Callee-saved registers preserved when switching from C to JavaScript
+const RegList kCalleeSaved = 1 << 14 | // r14
+ 1 << 15 | // r15
+ 1 << 16 | // r16
+ 1 << 17 | // r17
+ 1 << 18 | // r18
+ 1 << 19 | // r19
+ 1 << 20 | // r20
+ 1 << 21 | // r21
+ 1 << 22 | // r22
+ 1 << 23 | // r23
+ 1 << 24 | // r24
+ 1 << 25 | // r25
+ 1 << 26 | // r26
+ 1 << 27 | // r27
+ 1 << 28 | // r28
+ 1 << 29 | // r29
+ 1 << 30 | // r20
+ 1 << 31; // r31
+
+
+const int kNumCalleeSaved = 18;
+
+// Number of registers for which space is reserved in safepoints. Must be a
+// multiple of 8.
+// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
+const int kNumSafepointRegisters = 32;
+
+// Define the list of registers actually saved at safepoints.
+// Note that the number of saved registers may be smaller than the reserved
+// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
+const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
+
+// The following constants describe the stack frame linkage area as
+// defined by the ABI. Note that kNumRequiredStackFrameSlots must
+// satisfy alignment requirements (rounding up if required).
+#if V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN
+// [0] back chain
+// [1] condition register save area
+// [2] link register save area
+// [3] TOC save area
+// [4] Parameter1 save area
+// ...
+// [11] Parameter8 save area
+// [12] Parameter9 slot (if necessary)
+// ...
+const int kNumRequiredStackFrameSlots = 12;
+const int kStackFrameLRSlot = 2;
+const int kStackFrameExtraParamSlot = 12;
+#elif V8_OS_AIX || V8_TARGET_ARCH_PPC64
+// [0] back chain
+// [1] condition register save area
+// [2] link register save area
+// [3] reserved for compiler
+// [4] reserved by binder
+// [5] TOC save area
+// [6] Parameter1 save area
+// ...
+// [13] Parameter8 save area
+// [14] Parameter9 slot (if necessary)
+// ...
+#if V8_TARGET_ARCH_PPC64
+const int kNumRequiredStackFrameSlots = 14;
+#else
+const int kNumRequiredStackFrameSlots = 16;
+#endif
+const int kStackFrameLRSlot = 2;
+const int kStackFrameExtraParamSlot = 14;
+#else
+// [0] back chain
+// [1] link register save area
+// [2] Parameter9 slot (if necessary)
+// ...
+const int kNumRequiredStackFrameSlots = 4;
+const int kStackFrameLRSlot = 1;
+const int kStackFrameExtraParamSlot = 2;
+#endif
+
+// ----------------------------------------------------
+
+
+class EntryFrameConstants : public AllStatic {
+ public:
+ static const int kCallerFPOffset =
+ -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+};
+
+
+class ExitFrameConstants : public AllStatic {
+ public:
+#if V8_OOL_CONSTANT_POOL
+ static const int kFrameSize = 3 * kPointerSize;
+ static const int kConstantPoolOffset = -3 * kPointerSize;
+#else
+ static const int kFrameSize = 2 * kPointerSize;
+ static const int kConstantPoolOffset = 0; // Not used.
+#endif
+ static const int kCodeOffset = -2 * kPointerSize;
+ static const int kSPOffset = -1 * kPointerSize;
+
+ // The caller fields are below the frame pointer on the stack.
+ static const int kCallerFPOffset = 0 * kPointerSize;
+ // The calling JS function is below FP.
+ static const int kCallerPCOffset = 1 * kPointerSize;
+
+ // FP-relative displacement of the caller's SP. It points just
+ // below the saved PC.
+ static const int kCallerSPDisplacement = 2 * kPointerSize;
+};
+
+
+class JavaScriptFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
+ static const int kLastParameterOffset = +2 * kPointerSize;
+ static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+
+ // Caller SP-relative.
+ static const int kParam0Offset = -2 * kPointerSize;
+ static const int kReceiverOffset = -1 * kPointerSize;
+};
+
+
+class ArgumentsAdaptorFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + kPointerSize;
+};
+
+
+class ConstructFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kImplicitReceiverOffset = -6 * kPointerSize;
+ static const int kConstructorOffset = -5 * kPointerSize;
+ static const int kLengthOffset = -4 * kPointerSize;
+ static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
+};
+
+
+class InternalFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+inline Object* JavaScriptFrame::function_slot_object() const {
+ const int offset = JavaScriptFrameConstants::kFunctionOffset;
+ return Memory::Object_at(fp() + offset);
+}
+
+
+inline void StackHandler::SetFp(Address slot, Address fp) {
+ Memory::Address_at(slot) = fp;
+}
+}
+} // namespace v8::internal
+
+#endif // V8_PPC_FRAMES_PPC_H_
diff --git a/deps/v8/src/ppc/full-codegen-ppc.cc b/deps/v8/src/ppc/full-codegen-ppc.cc
new file mode 100644
index 0000000000..1bb4f54f4a
--- /dev/null
+++ b/deps/v8/src/ppc/full-codegen-ppc.cc
@@ -0,0 +1,5290 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/compiler.h"
+#include "src/debug.h"
+#include "src/full-codegen.h"
+#include "src/ic/ic.h"
+#include "src/isolate-inl.h"
+#include "src/parser.h"
+#include "src/scopes.h"
+
+#include "src/ppc/code-stubs-ppc.h"
+#include "src/ppc/macro-assembler-ppc.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// A patch site is a location in the code which it is possible to patch. This
+// class has a number of methods to emit the code which is patchable and the
+// method EmitPatchInfo to record a marker back to the patchable code. This
+// marker is a cmpi rx, #yyy instruction, and x * 0x0000ffff + yyy (raw 16 bit
+// immediate value is used) is the delta from the pc to the first instruction of
+// the patchable code.
+// See PatchInlinedSmiCode in ic-ppc.cc for the code that patches it
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+ explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
+#ifdef DEBUG
+ info_emitted_ = false;
+#endif
+ }
+
+ ~JumpPatchSite() { DCHECK(patch_site_.is_bound() == info_emitted_); }
+
+ // When initially emitting this ensure that a jump is always generated to skip
+ // the inlined smi code.
+ void EmitJumpIfNotSmi(Register reg, Label* target) {
+ DCHECK(!patch_site_.is_bound() && !info_emitted_);
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ __ bind(&patch_site_);
+ __ cmp(reg, reg, cr0);
+ __ beq(target, cr0); // Always taken before patched.
+ }
+
+ // When initially emitting this ensure that a jump is never generated to skip
+ // the inlined smi code.
+ void EmitJumpIfSmi(Register reg, Label* target) {
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ DCHECK(!patch_site_.is_bound() && !info_emitted_);
+ __ bind(&patch_site_);
+ __ cmp(reg, reg, cr0);
+ __ bne(target, cr0); // Never taken before patched.
+ }
+
+ void EmitPatchInfo() {
+ if (patch_site_.is_bound()) {
+ int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
+ Register reg;
+ // I believe this is using reg as the high bits of of the offset
+ reg.set_code(delta_to_patch_site / kOff16Mask);
+ __ cmpi(reg, Operand(delta_to_patch_site % kOff16Mask));
+#ifdef DEBUG
+ info_emitted_ = true;
+#endif
+ } else {
+ __ nop(); // Signals no inlined code.
+ }
+ }
+
+ private:
+ MacroAssembler* masm_;
+ Label patch_site_;
+#ifdef DEBUG
+ bool info_emitted_;
+#endif
+};
+
+
+// Generate code for a JS function. On entry to the function the receiver
+// and arguments have been pushed on the stack left to right. The actual
+// argument count matches the formal parameter count expected by the
+// function.
+//
+// The live registers are:
+// o r4: the JS function object being called (i.e., ourselves)
+// o cp: our context
+// o fp: our caller's frame pointer (aka r31)
+// o sp: stack pointer
+// o lr: return address
+// o ip: our own function entry (required by the prologue)
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-ppc.h for its layout.
+void FullCodeGenerator::Generate() {
+ CompilationInfo* info = info_;
+ handler_table_ =
+ isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
+ profiling_counter_ = isolate()->factory()->NewCell(
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
+ SetFunctionPosition(function());
+ Comment cmnt(masm_, "[ function compiled by full code generator");
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+#ifdef DEBUG
+ if (strlen(FLAG_stop_at) > 0 &&
+ info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ __ stop("stop-at");
+ }
+#endif
+
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
+ Label ok;
+ int receiver_offset = info->scope()->num_parameters() * kPointerSize;
+ __ LoadP(r5, MemOperand(sp, receiver_offset), r0);
+ __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ bne(&ok);
+
+ __ LoadP(r5, GlobalObjectOperand());
+ __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kGlobalProxyOffset));
+
+ __ StoreP(r5, MemOperand(sp, receiver_offset), r0);
+
+ __ bind(&ok);
+ }
+
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm_, StackFrame::MANUAL);
+ int prologue_offset = masm_->pc_offset();
+
+ if (prologue_offset) {
+ // Prologue logic requires it's starting address in ip and the
+ // corresponding offset from the function entry.
+ prologue_offset += Instruction::kInstrSize;
+ __ addi(ip, ip, Operand(prologue_offset));
+ }
+ info->set_prologue_offset(prologue_offset);
+ __ Prologue(info->IsCodePreAgingActive(), prologue_offset);
+ info->AddNoFrameRange(0, masm_->pc_offset());
+
+ {
+ Comment cmnt(masm_, "[ Allocate locals");
+ int locals_count = info->scope()->num_stack_slots();
+ // Generators allocate locals, if any, in context slots.
+ DCHECK(!info->function()->is_generator() || locals_count == 0);
+ if (locals_count > 0) {
+ if (locals_count >= 128) {
+ Label ok;
+ __ Add(ip, sp, -(locals_count * kPointerSize), r0);
+ __ LoadRoot(r5, Heap::kRealStackLimitRootIndex);
+ __ cmpl(ip, r5);
+ __ bc_short(ge, &ok);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ bind(&ok);
+ }
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ int kMaxPushes = FLAG_optimize_for_size ? 4 : 32;
+ if (locals_count >= kMaxPushes) {
+ int loop_iterations = locals_count / kMaxPushes;
+ __ mov(r5, Operand(loop_iterations));
+ __ mtctr(r5);
+ Label loop_header;
+ __ bind(&loop_header);
+ // Do pushes.
+ for (int i = 0; i < kMaxPushes; i++) {
+ __ push(ip);
+ }
+ // Continue loop if not done.
+ __ bdnz(&loop_header);
+ }
+ int remaining = locals_count % kMaxPushes;
+ // Emit the remaining pushes.
+ for (int i = 0; i < remaining; i++) {
+ __ push(ip);
+ }
+ }
+ }
+
+ bool function_in_register = true;
+
+ // Possibly allocate a local context.
+ int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ // Argument to NewContext is the function, which is still in r4.
+ Comment cmnt(masm_, "[ Allocate context");
+ bool need_write_barrier = true;
+ if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
+ __ push(r4);
+ __ Push(info->scope()->GetScopeInfo());
+ __ CallRuntime(Runtime::kNewScriptContext, 2);
+ } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), heap_slots);
+ __ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
+ } else {
+ __ push(r4);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ }
+ function_in_register = false;
+ // Context is returned in r3. It replaces the context passed to us.
+ // It's saved in the stack and kept live in cp.
+ __ mr(cp, r3);
+ __ StoreP(r3, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = info->scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Variable* var = scope()->parameter(i);
+ if (var->IsContextSlot()) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ LoadP(r3, MemOperand(fp, parameter_offset), r0);
+ // Store it in the context.
+ MemOperand target = ContextOperand(cp, var->index());
+ __ StoreP(r3, target, r0);
+
+ // Update the write barrier.
+ if (need_write_barrier) {
+ __ RecordWriteContextSlot(cp, target.offset(), r3, r6,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ } else if (FLAG_debug_code) {
+ Label done;
+ __ JumpIfInNewSpace(cp, r3, &done);
+ __ Abort(kExpectedNewSpaceObject);
+ __ bind(&done);
+ }
+ }
+ }
+ }
+
+ Variable* arguments = scope()->arguments();
+ if (arguments != NULL) {
+ // Function uses arguments object.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (!function_in_register) {
+ // Load this again, if it's used by the local context below.
+ __ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ } else {
+ __ mr(r6, r4);
+ }
+ // Receiver is just before the parameters on the caller's stack.
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
+ __ addi(r5, fp, Operand(StandardFrameConstants::kCallerSPOffset + offset));
+ __ LoadSmiLiteral(r4, Smi::FromInt(num_parameters));
+ __ Push(r6, r5, r4);
+
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiever and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub::Type type;
+ if (strict_mode() == STRICT) {
+ type = ArgumentsAccessStub::NEW_STRICT;
+ } else if (function()->has_duplicate_parameters()) {
+ type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
+ } else {
+ type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
+ }
+ ArgumentsAccessStub stub(isolate(), type);
+ __ CallStub(&stub);
+
+ SetVar(arguments, r3, r4, r5);
+ }
+
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+
+ // Visit the declarations and body unless there is an illegal
+ // redeclaration.
+ if (scope()->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ Declarations");
+ scope()->VisitIllegalRedeclaration(this);
+
+ } else {
+ PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+ {
+ Comment cmnt(masm_, "[ Declarations");
+ // For named function expressions, declare the function name as a
+ // constant.
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
+ VariableDeclaration* function = scope()->function();
+ DCHECK(function->proxy()->var()->mode() == CONST ||
+ function->proxy()->var()->mode() == CONST_LEGACY);
+ DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED);
+ VisitVariableDeclaration(function);
+ }
+ VisitDeclarations(scope()->declarations());
+ }
+
+ {
+ Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ Label ok;
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmpl(sp, ip);
+ __ bc_short(ge, &ok);
+ __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ bind(&ok);
+ }
+
+ {
+ Comment cmnt(masm_, "[ Body");
+ DCHECK(loop_depth() == 0);
+ VisitStatements(function()->body());
+ DCHECK(loop_depth() == 0);
+ }
+ }
+
+ // Always emit a 'return undefined' in case control fell off the end of
+ // the body.
+ {
+ Comment cmnt(masm_, "[ return <undefined>;");
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ }
+ EmitReturnSequence();
+}
+
+
+void FullCodeGenerator::ClearAccumulator() {
+ __ LoadSmiLiteral(r3, Smi::FromInt(0));
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
+ __ mov(r5, Operand(profiling_counter_));
+ __ LoadP(r6, FieldMemOperand(r5, Cell::kValueOffset));
+ __ SubSmiLiteral(r6, r6, Smi::FromInt(delta), r0);
+ __ StoreP(r6, FieldMemOperand(r5, Cell::kValueOffset), r0);
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterReset() {
+ int reset_value = FLAG_interrupt_budget;
+ if (info_->is_debug()) {
+ // Detect debug break requests as soon as possible.
+ reset_value = FLAG_interrupt_budget >> 4;
+ }
+ __ mov(r5, Operand(profiling_counter_));
+ __ LoadSmiLiteral(r6, Smi::FromInt(reset_value));
+ __ StoreP(r6, FieldMemOperand(r5, Cell::kValueOffset), r0);
+}
+
+
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target) {
+ Comment cmnt(masm_, "[ Back edge bookkeeping");
+ Label ok;
+
+ DCHECK(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target) +
+ kCodeSizeMultiplier / 2;
+ int weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
+ EmitProfilingCounterDecrement(weight);
+ {
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ // BackEdgeTable::PatchAt manipulates this sequence.
+ __ cmpi(r6, Operand::Zero());
+ __ bc_short(ge, &ok);
+ __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+
+ // Record a mapping of this PC offset to the OSR id. This is used to find
+ // the AST id from the unoptimized code in order to use it as a key into
+ // the deoptimization input data found in the optimized code.
+ RecordBackEdge(stmt->OsrEntryId());
+ }
+ EmitProfilingCounterReset();
+
+ __ bind(&ok);
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ // Record a mapping of the OSR id to this PC. This is used if the OSR
+ // entry becomes the target of a bailout. We don't expect it to be, but
+ // we want it to work if it is.
+ PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+}
+
+
+void FullCodeGenerator::EmitReturnSequence() {
+ Comment cmnt(masm_, "[ Return sequence");
+ if (return_label_.is_bound()) {
+ __ b(&return_label_);
+ } else {
+ __ bind(&return_label_);
+ if (FLAG_trace) {
+ // Push the return value on the stack as the parameter.
+ // Runtime::TraceExit returns its parameter in r3
+ __ push(r3);
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset() + kCodeSizeMultiplier / 2;
+ weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
+ }
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ cmpi(r6, Operand::Zero());
+ __ bge(&ok);
+ __ push(r3);
+ __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+ __ pop(r3);
+ EmitProfilingCounterReset();
+ __ bind(&ok);
+
+#ifdef DEBUG
+ // Add a label for checking the size of the code used for returning.
+ Label check_exit_codesize;
+ __ bind(&check_exit_codesize);
+#endif
+ // Make sure that the constant pool is not emitted inside of the return
+ // sequence.
+ {
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
+ CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
+ __ RecordJSReturn();
+ int no_frame_start = __ LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
+#if V8_TARGET_ARCH_PPC64
+ // With 64bit we may need nop() instructions to ensure we have
+ // enough space to SetDebugBreakAtReturn()
+ if (is_int16(sp_delta)) {
+#if !V8_OOL_CONSTANT_POOL
+ masm_->nop();
+#endif
+ masm_->nop();
+ }
+#endif
+ __ blr();
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
+
+#ifdef DEBUG
+ // Check that the size of the code used for returning is large enough
+ // for the debugger's requirements.
+ DCHECK(Assembler::kJSReturnSequenceInstructions <=
+ masm_->InstructionsGeneratedSince(&check_exit_codesize));
+#endif
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ codegen()->GetVar(result_register(), var);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ codegen()->GetVar(result_register(), var);
+ __ push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Variable* var) const {
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ // For simplicity we always test the accumulator register.
+ codegen()->GetVar(result_register(), var);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+ codegen()->DoTest(this);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Heap::RootListIndex index) const {
+ __ LoadRoot(result_register(), index);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Heap::RootListIndex index) const {
+ __ LoadRoot(result_register(), index);
+ __ push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
+ false_label_);
+ if (index == Heap::kUndefinedValueRootIndex ||
+ index == Heap::kNullValueRootIndex ||
+ index == Heap::kFalseValueRootIndex) {
+ if (false_label_ != fall_through_) __ b(false_label_);
+ } else if (index == Heap::kTrueValueRootIndex) {
+ if (true_label_ != fall_through_) __ b(true_label_);
+ } else {
+ __ LoadRoot(result_register(), index);
+ codegen()->DoTest(this);
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Handle<Object> lit) const {
+ __ mov(result_register(), Operand(lit));
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+ // Immediates cannot be pushed directly.
+ __ mov(result_register(), Operand(lit));
+ __ push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
+ false_label_);
+ DCHECK(!lit->IsUndetectableObject()); // There are no undetectable literals.
+ if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+ if (false_label_ != fall_through_) __ b(false_label_);
+ } else if (lit->IsTrue() || lit->IsJSObject()) {
+ if (true_label_ != fall_through_) __ b(true_label_);
+ } else if (lit->IsString()) {
+ if (String::cast(*lit)->length() == 0) {
+ if (false_label_ != fall_through_) __ b(false_label_);
+ } else {
+ if (true_label_ != fall_through_) __ b(true_label_);
+ }
+ } else if (lit->IsSmi()) {
+ if (Smi::cast(*lit)->value() == 0) {
+ if (false_label_ != fall_through_) __ b(false_label_);
+ } else {
+ if (true_label_ != fall_through_) __ b(true_label_);
+ }
+ } else {
+ // For simplicity we always test the accumulator register.
+ __ mov(result_register(), Operand(lit));
+ codegen()->DoTest(this);
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::DropAndPlug(int count,
+ Register reg) const {
+ DCHECK(count > 0);
+ __ Drop(count);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
+ int count, Register reg) const {
+ DCHECK(count > 0);
+ __ Drop(count);
+ __ Move(result_register(), reg);
+}
+
+
+void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
+ Register reg) const {
+ DCHECK(count > 0);
+ if (count > 1) __ Drop(count - 1);
+ __ StoreP(reg, MemOperand(sp, 0));
+}
+
+
+void FullCodeGenerator::TestContext::DropAndPlug(int count,
+ Register reg) const {
+ DCHECK(count > 0);
+ // For simplicity we always test the accumulator register.
+ __ Drop(count);
+ __ Move(result_register(), reg);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+ codegen()->DoTest(this);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ DCHECK(materialize_true == materialize_false);
+ __ bind(materialize_true);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Label* materialize_true, Label* materialize_false) const {
+ Label done;
+ __ bind(materialize_true);
+ __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ b(&done);
+ __ bind(materialize_false);
+ __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+ __ bind(&done);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Label* materialize_true, Label* materialize_false) const {
+ Label done;
+ __ bind(materialize_true);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ b(&done);
+ __ bind(materialize_false);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ bind(&done);
+ __ push(ip);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ DCHECK(materialize_true == true_label_);
+ DCHECK(materialize_false == false_label_);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(bool flag) const {}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
+ Heap::RootListIndex value_root_index =
+ flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+ __ LoadRoot(result_register(), value_root_index);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+ Heap::RootListIndex value_root_index =
+ flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+ __ LoadRoot(ip, value_root_index);
+ __ push(ip);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(bool flag) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
+ false_label_);
+ if (flag) {
+ if (true_label_ != fall_through_) __ b(true_label_);
+ } else {
+ if (false_label_ != fall_through_) __ b(false_label_);
+ }
+}
+
+
+void FullCodeGenerator::DoTest(Expression* condition, Label* if_true,
+ Label* if_false, Label* fall_through) {
+ Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(ic, condition->test_id());
+ __ cmpi(result_register(), Operand::Zero());
+ Split(ne, if_true, if_false, fall_through);
+}
+
+
+void FullCodeGenerator::Split(Condition cond, Label* if_true, Label* if_false,
+ Label* fall_through, CRegister cr) {
+ if (if_false == fall_through) {
+ __ b(cond, if_true, cr);
+ } else if (if_true == fall_through) {
+ __ b(NegateCondition(cond), if_false, cr);
+ } else {
+ __ b(cond, if_true, cr);
+ __ b(if_false);
+ }
+}
+
+
+MemOperand FullCodeGenerator::StackOperand(Variable* var) {
+ DCHECK(var->IsStackAllocated());
+ // Offset is negative because higher indexes are at lower addresses.
+ int offset = -var->index() * kPointerSize;
+ // Adjust by a (parameter or local) base offset.
+ if (var->IsParameter()) {
+ offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
+ } else {
+ offset += JavaScriptFrameConstants::kLocal0Offset;
+ }
+ return MemOperand(fp, offset);
+}
+
+
+MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
+ DCHECK(var->IsContextSlot() || var->IsStackAllocated());
+ if (var->IsContextSlot()) {
+ int context_chain_length = scope()->ContextChainLength(var->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return ContextOperand(scratch, var->index());
+ } else {
+ return StackOperand(var);
+ }
+}
+
+
+void FullCodeGenerator::GetVar(Register dest, Variable* var) {
+ // Use destination as scratch.
+ MemOperand location = VarOperand(var, dest);
+ __ LoadP(dest, location, r0);
+}
+
+
+void FullCodeGenerator::SetVar(Variable* var, Register src, Register scratch0,
+ Register scratch1) {
+ DCHECK(var->IsContextSlot() || var->IsStackAllocated());
+ DCHECK(!scratch0.is(src));
+ DCHECK(!scratch0.is(scratch1));
+ DCHECK(!scratch1.is(src));
+ MemOperand location = VarOperand(var, scratch0);
+ __ StoreP(src, location, r0);
+
+ // Emit the write barrier code if the location is in the heap.
+ if (var->IsContextSlot()) {
+ __ RecordWriteContextSlot(scratch0, location.offset(), src, scratch1,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
+ bool should_normalize,
+ Label* if_true,
+ Label* if_false) {
+ // Only prepare for bailouts before splits if we're in a test
+ // context. Otherwise, we let the Visit function deal with the
+ // preparation to avoid preparing with the same AST id twice.
+ if (!context()->IsTest() || !info_->IsOptimizable()) return;
+
+ Label skip;
+ if (should_normalize) __ b(&skip);
+ PrepareForBailout(expr, TOS_REG);
+ if (should_normalize) {
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(r3, ip);
+ Split(eq, if_true, if_false, NULL);
+ __ bind(&skip);
+ }
+}
+
+
+void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
+ // The variable in the declaration always resides in the current function
+ // context.
+ DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
+ if (generate_debug_code_) {
+ // Check that we're not inside a with or catch context.
+ __ LoadP(r4, FieldMemOperand(cp, HeapObject::kMapOffset));
+ __ CompareRoot(r4, Heap::kWithContextMapRootIndex);
+ __ Check(ne, kDeclarationInWithContext);
+ __ CompareRoot(r4, Heap::kCatchContextMapRootIndex);
+ __ Check(ne, kDeclarationInCatchContext);
+ }
+}
+
+
+void FullCodeGenerator::VisitVariableDeclaration(
+ VariableDeclaration* declaration) {
+ // If it was not possible to allocate the variable at compile time, we
+ // need to "declare" it at runtime to make sure it actually exists in the
+ // local context.
+ VariableProxy* proxy = declaration->proxy();
+ VariableMode mode = declaration->mode();
+ Variable* variable = proxy->var();
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
+ switch (variable->location()) {
+ case Variable::UNALLOCATED:
+ globals_->Add(variable->name(), zone());
+ globals_->Add(variable->binding_needs_init()
+ ? isolate()->factory()->the_hole_value()
+ : isolate()->factory()->undefined_value(),
+ zone());
+ break;
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ if (hole_init) {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ StoreP(ip, StackOperand(variable));
+ }
+ break;
+
+ case Variable::CONTEXT:
+ if (hole_init) {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ StoreP(ip, ContextOperand(cp, variable->index()), r0);
+ // No write barrier since the_hole_value is in old space.
+ PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ }
+ break;
+
+ case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ __ mov(r5, Operand(variable->name()));
+ // Declaration nodes are always introduced in one of four modes.
+ DCHECK(IsDeclaredVariableMode(mode));
+ PropertyAttributes attr =
+ IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
+ __ LoadSmiLiteral(r4, Smi::FromInt(attr));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (hole_init) {
+ __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ Push(cp, r5, r4, r3);
+ } else {
+ __ LoadSmiLiteral(r3, Smi::FromInt(0)); // Indicates no initial value.
+ __ Push(cp, r5, r4, r3);
+ }
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitFunctionDeclaration(
+ FunctionDeclaration* declaration) {
+ VariableProxy* proxy = declaration->proxy();
+ Variable* variable = proxy->var();
+ switch (variable->location()) {
+ case Variable::UNALLOCATED: {
+ globals_->Add(variable->name(), zone());
+ Handle<SharedFunctionInfo> function =
+ Compiler::BuildFunctionInfo(declaration->fun(), script(), info_);
+ // Check for stack-overflow exception.
+ if (function.is_null()) return SetStackOverflow();
+ globals_->Add(function, zone());
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL: {
+ Comment cmnt(masm_, "[ FunctionDeclaration");
+ VisitForAccumulatorValue(declaration->fun());
+ __ StoreP(result_register(), StackOperand(variable));
+ break;
+ }
+
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, "[ FunctionDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ VisitForAccumulatorValue(declaration->fun());
+ __ StoreP(result_register(), ContextOperand(cp, variable->index()), r0);
+ int offset = Context::SlotOffset(variable->index());
+ // We know that we have written a function, which is not a smi.
+ __ RecordWriteContextSlot(cp, offset, result_register(), r5,
+ kLRHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ break;
+ }
+
+ case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ FunctionDeclaration");
+ __ mov(r5, Operand(variable->name()));
+ __ LoadSmiLiteral(r4, Smi::FromInt(NONE));
+ __ Push(cp, r5, r4);
+ // Push initial value for function declaration.
+ VisitForStackValue(declaration->fun());
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
+ Variable* variable = declaration->proxy()->var();
+ DCHECK(variable->location() == Variable::CONTEXT);
+ DCHECK(variable->interface()->IsFrozen());
+
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+
+ // Load instance object.
+ __ LoadContext(r4, scope_->ContextChainLength(scope_->ScriptScope()));
+ __ LoadP(r4, ContextOperand(r4, variable->interface()->Index()));
+ __ LoadP(r4, ContextOperand(r4, Context::EXTENSION_INDEX));
+
+ // Assign it.
+ __ StoreP(r4, ContextOperand(cp, variable->index()), r0);
+ // We know that we have written a module, which is not a smi.
+ __ RecordWriteContextSlot(cp, Context::SlotOffset(variable->index()), r4, r6,
+ kLRHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
+
+ // Traverse into body.
+ Visit(declaration->module());
+}
+
+
+void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
+ VariableProxy* proxy = declaration->proxy();
+ Variable* variable = proxy->var();
+ switch (variable->location()) {
+ case Variable::UNALLOCATED:
+ // TODO(rossberg)
+ break;
+
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, "[ ImportDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ // TODO(rossberg)
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ case Variable::LOOKUP:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
+ // TODO(rossberg)
+}
+
+
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ // Call the runtime to declare the globals.
+ // The context is the first argument.
+ __ mov(r4, Operand(pairs));
+ __ LoadSmiLiteral(r3, Smi::FromInt(DeclareGlobalsFlags()));
+ __ Push(cp, r4, r3);
+ __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ // Return value is ignored.
+}
+
+
+void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
+ // Call the runtime to declare the modules.
+ __ Push(descriptions);
+ __ CallRuntime(Runtime::kDeclareModules, 1);
+ // Return value is ignored.
+}
+
+
+void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+ Comment cmnt(masm_, "[ SwitchStatement");
+ Breakable nested_statement(this, stmt);
+ SetStatementPosition(stmt);
+
+ // Keep the switch value on the stack until a case matches.
+ VisitForStackValue(stmt->tag());
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+
+ ZoneList<CaseClause*>* clauses = stmt->cases();
+ CaseClause* default_clause = NULL; // Can occur anywhere in the list.
+
+ Label next_test; // Recycled for each test.
+ // Compile all the tests with branches to their bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ CaseClause* clause = clauses->at(i);
+ clause->body_target()->Unuse();
+
+ // The default is not a test, but remember it as final fall through.
+ if (clause->is_default()) {
+ default_clause = clause;
+ continue;
+ }
+
+ Comment cmnt(masm_, "[ Case comparison");
+ __ bind(&next_test);
+ next_test.Unuse();
+
+ // Compile the label expression.
+ VisitForAccumulatorValue(clause->label());
+
+ // Perform the comparison as if via '==='.
+ __ LoadP(r4, MemOperand(sp, 0)); // Switch value.
+ bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+ JumpPatchSite patch_site(masm_);
+ if (inline_smi_code) {
+ Label slow_case;
+ __ orx(r5, r4, r3);
+ patch_site.EmitJumpIfNotSmi(r5, &slow_case);
+
+ __ cmp(r4, r3);
+ __ bne(&next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ b(clause->body_target());
+ __ bind(&slow_case);
+ }
+
+ // Record position before stub call for type feedback.
+ SetSourcePosition(clause->position());
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
+ CallIC(ic, clause->CompareId());
+ patch_site.EmitPatchInfo();
+
+ Label skip;
+ __ b(&skip);
+ PrepareForBailout(clause, TOS_REG);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(r3, ip);
+ __ bne(&next_test);
+ __ Drop(1);
+ __ b(clause->body_target());
+ __ bind(&skip);
+
+ __ cmpi(r3, Operand::Zero());
+ __ bne(&next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ b(clause->body_target());
+ }
+
+ // Discard the test value and jump to the default if present, otherwise to
+ // the end of the statement.
+ __ bind(&next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ if (default_clause == NULL) {
+ __ b(nested_statement.break_label());
+ } else {
+ __ b(default_clause->body_target());
+ }
+
+ // Compile all the case bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ Comment cmnt(masm_, "[ Case body");
+ CaseClause* clause = clauses->at(i);
+ __ bind(clause->body_target());
+ PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+ VisitStatements(clause->statements());
+ }
+
+ __ bind(nested_statement.break_label());
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+}
+
+
+void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+ Comment cmnt(masm_, "[ ForInStatement");
+ FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+ SetStatementPosition(stmt);
+
+ Label loop, exit;
+ ForIn loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // Get the object to enumerate over. If the object is null or undefined, skip
+ // over the loop. See ECMA-262 version 5, section 12.6.4.
+ VisitForAccumulatorValue(stmt->enumerable());
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(r3, ip);
+ __ beq(&exit);
+ Register null_value = r7;
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ __ cmp(r3, null_value);
+ __ beq(&exit);
+
+ PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
+
+ // Convert the object to a JS object.
+ Label convert, done_convert;
+ __ JumpIfSmi(r3, &convert);
+ __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+ __ bge(&done_convert);
+ __ bind(&convert);
+ __ push(r3);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ bind(&done_convert);
+ PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
+ __ push(r3);
+
+ // Check for proxies.
+ Label call_runtime;
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(r3, r4, r4, LAST_JS_PROXY_TYPE);
+ __ ble(&call_runtime);
+
+ // Check cache validity in generated code. This is a fast case for
+ // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+ // guarantee cache validity, call the runtime system to check cache
+ // validity or get the property names in a fixed array.
+ __ CheckEnumCache(null_value, &call_runtime);
+
+ // The enum cache is valid. Load the map of the object being
+ // iterated over and use the cache for the iteration.
+ Label use_cache;
+ __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ b(&use_cache);
+
+ // Get the set of properties to enumerate.
+ __ bind(&call_runtime);
+ __ push(r3); // Duplicate the enumerable object on the stack.
+ __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
+
+ // If we got a map from the runtime call, we can do a fast
+ // modification check. Otherwise, we got a fixed array, and we have
+ // to do a slow check.
+ Label fixed_array;
+ __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kMetaMapRootIndex);
+ __ cmp(r5, ip);
+ __ bne(&fixed_array);
+
+ // We got a map in register r3. Get the enumeration cache from it.
+ Label no_descriptors;
+ __ bind(&use_cache);
+
+ __ EnumLength(r4, r3);
+ __ CmpSmiLiteral(r4, Smi::FromInt(0), r0);
+ __ beq(&no_descriptors);
+
+ __ LoadInstanceDescriptors(r3, r5);
+ __ LoadP(r5, FieldMemOperand(r5, DescriptorArray::kEnumCacheOffset));
+ __ LoadP(r5,
+ FieldMemOperand(r5, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+ // Set up the four remaining stack slots.
+ __ push(r3); // Map.
+ __ LoadSmiLiteral(r3, Smi::FromInt(0));
+ // Push enumeration cache, enumeration cache length (as smi) and zero.
+ __ Push(r5, r4, r3);
+ __ b(&loop);
+
+ __ bind(&no_descriptors);
+ __ Drop(1);
+ __ b(&exit);
+
+ // We got a fixed array in register r3. Iterate through that.
+ Label non_proxy;
+ __ bind(&fixed_array);
+
+ __ Move(r4, FeedbackVector());
+ __ mov(r5, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+ int vector_index = FeedbackVector()->GetIndex(slot);
+ __ StoreP(
+ r5, FieldMemOperand(r4, FixedArray::OffsetOfElementAt(vector_index)), r0);
+
+ __ LoadSmiLiteral(r4, Smi::FromInt(1)); // Smi indicates slow check
+ __ LoadP(r5, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(r5, r6, r6, LAST_JS_PROXY_TYPE);
+ __ bgt(&non_proxy);
+ __ LoadSmiLiteral(r4, Smi::FromInt(0)); // Zero indicates proxy
+ __ bind(&non_proxy);
+ __ Push(r4, r3); // Smi and array
+ __ LoadP(r4, FieldMemOperand(r3, FixedArray::kLengthOffset));
+ __ LoadSmiLiteral(r3, Smi::FromInt(0));
+ __ Push(r4, r3); // Fixed array length (as smi) and initial index.
+
+ // Generate code for doing the condition check.
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+ __ bind(&loop);
+ // Load the current count to r3, load the length to r4.
+ __ LoadP(r3, MemOperand(sp, 0 * kPointerSize));
+ __ LoadP(r4, MemOperand(sp, 1 * kPointerSize));
+ __ cmpl(r3, r4); // Compare to the array length.
+ __ bge(loop_statement.break_label());
+
+ // Get the current entry of the array into register r6.
+ __ LoadP(r5, MemOperand(sp, 2 * kPointerSize));
+ __ addi(r5, r5, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ SmiToPtrArrayOffset(r6, r3);
+ __ LoadPX(r6, MemOperand(r6, r5));
+
+ // Get the expected map from the stack or a smi in the
+ // permanent slow case into register r5.
+ __ LoadP(r5, MemOperand(sp, 3 * kPointerSize));
+
+ // Check if the expected map still matches that of the enumerable.
+ // If not, we may have to filter the key.
+ Label update_each;
+ __ LoadP(r4, MemOperand(sp, 4 * kPointerSize));
+ __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ cmp(r7, r5);
+ __ beq(&update_each);
+
+ // For proxies, no filtering is done.
+ // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
+ __ CmpSmiLiteral(r5, Smi::FromInt(0), r0);
+ __ beq(&update_each);
+
+ // Convert the entry to a string or (smi) 0 if it isn't a property
+ // any more. If the property has been removed while iterating, we
+ // just skip it.
+ __ Push(r4, r6); // Enumerable and current entry.
+ __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+ __ mr(r6, r3);
+ __ cmpi(r6, Operand::Zero());
+ __ beq(loop_statement.continue_label());
+
+ // Update the 'each' property or variable from the possibly filtered
+ // entry in register r6.
+ __ bind(&update_each);
+ __ mr(result_register(), r6);
+ // Perform the assignment as if via '='.
+ {
+ EffectContext context(this);
+ EmitAssignment(stmt->each());
+ }
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Generate code for the going to the next element by incrementing
+ // the index (smi) stored on top of the stack.
+ __ bind(loop_statement.continue_label());
+ __ pop(r3);
+ __ AddSmiLiteral(r3, r3, Smi::FromInt(1), r0);
+ __ push(r3);
+
+ EmitBackEdgeBookkeeping(stmt, &loop);
+ __ b(&loop);
+
+ // Remove the pointers stored on the stack.
+ __ bind(loop_statement.break_label());
+ __ Drop(5);
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ bind(&exit);
+ decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
+ Comment cmnt(masm_, "[ ForOfStatement");
+ SetStatementPosition(stmt);
+
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // var iterator = iterable[Symbol.iterator]();
+ VisitForEffect(stmt->assign_iterator());
+
+ // Loop entry.
+ __ bind(loop_statement.continue_label());
+
+ // result = iterator.next()
+ VisitForEffect(stmt->next_result());
+
+ // if (result.done) break;
+ Label result_not_done;
+ VisitForControl(stmt->result_done(), loop_statement.break_label(),
+ &result_not_done, &result_not_done);
+ __ bind(&result_not_done);
+
+ // each = result.value
+ VisitForEffect(stmt->assign_each());
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Check stack before looping.
+ PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+ EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
+ __ b(loop_statement.continue_label());
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ bind(loop_statement.break_label());
+ decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
+ bool pretenure) {
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning. If
+ // we're running with the --always-opt or the --prepare-always-opt
+ // flag, we need to use the runtime function so that the new function
+ // we are creating here gets a chance to have its code optimized and
+ // doesn't just get a copy of the existing unoptimized code.
+ if (!FLAG_always_opt && !FLAG_prepare_always_opt && !pretenure &&
+ scope()->is_function_scope() && info->num_literals() == 0) {
+ FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
+ __ mov(r5, Operand(info));
+ __ CallStub(&stub);
+ } else {
+ __ mov(r3, Operand(info));
+ __ LoadRoot(
+ r4, pretenure ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
+ __ Push(cp, r3, r4);
+ __ CallRuntime(Runtime::kNewClosure, 3);
+ }
+ context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+ Comment cmnt(masm_, "[ VariableProxy");
+ EmitVariableLoad(expr);
+}
+
+
+void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
+ Comment cnmt(masm_, "[ SuperReference ");
+
+ __ LoadP(LoadDescriptor::ReceiverRegister(),
+ MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+
+ Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
+ __ Move(LoadDescriptor::NameRegister(), home_object_symbol);
+
+ if (FLAG_vector_ics) {
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(expr->HomeObjectFeedbackSlot())));
+ CallLoadIC(NOT_CONTEXTUAL);
+ } else {
+ CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
+ }
+
+ __ Cmpi(r3, Operand(isolate()->factory()->undefined_value()), r0);
+ Label done;
+ __ bne(&done);
+ __ CallRuntime(Runtime::kThrowNonMethodError, 0);
+ __ bind(&done);
+}
+
+
+void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
+ TypeofState typeof_state,
+ Label* slow) {
+ Register current = cp;
+ Register next = r4;
+ Register temp = r5;
+
+ Scope* s = scope();
+ while (s != NULL) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_sloppy_eval()) {
+ // Check that extension is NULL.
+ __ LoadP(temp, ContextOperand(current, Context::EXTENSION_INDEX));
+ __ cmpi(temp, Operand::Zero());
+ __ bne(slow);
+ }
+ // Load next context in chain.
+ __ LoadP(next, ContextOperand(current, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ current = next;
+ }
+ // If no outer scope calls eval, we do not need to check more
+ // context extensions.
+ if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
+ s = s->outer_scope();
+ }
+
+ if (s->is_eval_scope()) {
+ Label loop, fast;
+ if (!current.is(next)) {
+ __ Move(next, current);
+ }
+ __ bind(&loop);
+ // Terminate at native context.
+ __ LoadP(temp, FieldMemOperand(next, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kNativeContextMapRootIndex);
+ __ cmp(temp, ip);
+ __ beq(&fast);
+ // Check that extension is NULL.
+ __ LoadP(temp, ContextOperand(next, Context::EXTENSION_INDEX));
+ __ cmpi(temp, Operand::Zero());
+ __ bne(slow);
+ // Load next context in chain.
+ __ LoadP(next, ContextOperand(next, Context::PREVIOUS_INDEX));
+ __ b(&loop);
+ __ bind(&fast);
+ }
+
+ __ LoadP(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::NameRegister(), Operand(proxy->var()->name()));
+ if (FLAG_vector_ics) {
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ }
+
+ ContextualMode mode =
+ (typeof_state == INSIDE_TYPEOF) ? NOT_CONTEXTUAL : CONTEXTUAL;
+ CallLoadIC(mode);
+}
+
+
+MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
+ Label* slow) {
+ DCHECK(var->IsContextSlot());
+ Register context = cp;
+ Register next = r6;
+ Register temp = r7;
+
+ for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_sloppy_eval()) {
+ // Check that extension is NULL.
+ __ LoadP(temp, ContextOperand(context, Context::EXTENSION_INDEX));
+ __ cmpi(temp, Operand::Zero());
+ __ bne(slow);
+ }
+ __ LoadP(next, ContextOperand(context, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ context = next;
+ }
+ }
+ // Check that last extension is NULL.
+ __ LoadP(temp, ContextOperand(context, Context::EXTENSION_INDEX));
+ __ cmpi(temp, Operand::Zero());
+ __ bne(slow);
+
+ // This function is used only for loads, not stores, so it's safe to
+ // return an cp-based operand (the write barrier cannot be allowed to
+ // destroy the cp register).
+ return ContextOperand(context, var->index());
+}
+
+
+void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
+ TypeofState typeof_state,
+ Label* slow, Label* done) {
+ // Generate fast-case code for variables that might be shadowed by
+ // eval-introduced variables. Eval is used a lot without
+ // introducing variables. In those cases, we do not want to
+ // perform a runtime call for all variables in the scope
+ // containing the eval.
+ Variable* var = proxy->var();
+ if (var->mode() == DYNAMIC_GLOBAL) {
+ EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow);
+ __ b(done);
+ } else if (var->mode() == DYNAMIC_LOCAL) {
+ Variable* local = var->local_if_not_shadowed();
+ __ LoadP(r3, ContextSlotOperandCheckExtensions(local, slow));
+ if (local->mode() == LET || local->mode() == CONST ||
+ local->mode() == CONST_LEGACY) {
+ __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ bne(done);
+ if (local->mode() == CONST_LEGACY) {
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ } else { // LET || CONST
+ __ mov(r3, Operand(var->name()));
+ __ push(r3);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ }
+ }
+ __ b(done);
+ }
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
+ // Record position before possible IC call.
+ SetSourcePosition(proxy->position());
+ Variable* var = proxy->var();
+
+ // Three cases: global variables, lookup variables, and all other types of
+ // variables.
+ switch (var->location()) {
+ case Variable::UNALLOCATED: {
+ Comment cmnt(masm_, "[ Global variable");
+ __ LoadP(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
+ if (FLAG_vector_ics) {
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ }
+ CallLoadIC(CONTEXTUAL);
+ context()->Plug(r3);
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
+ : "[ Stack variable");
+ if (var->binding_needs_init()) {
+ // var->scope() may be NULL when the proxy is located in eval code and
+ // refers to a potential outside binding. Currently those bindings are
+ // always looked up dynamically, i.e. in that case
+ // var->location() == LOOKUP.
+ // always holds.
+ DCHECK(var->scope() != NULL);
+
+ // Check if the binding really needs an initialization check. The check
+ // can be skipped in the following situation: we have a LET or CONST
+ // binding in harmony mode, both the Variable and the VariableProxy have
+ // the same declaration scope (i.e. they are both in global code, in the
+ // same function or in the same eval code) and the VariableProxy is in
+ // the source physically located after the initializer of the variable.
+ //
+ // We cannot skip any initialization checks for CONST in non-harmony
+ // mode because const variables may be declared but never initialized:
+ // if (false) { const x; }; var y = x;
+ //
+ // The condition on the declaration scopes is a conservative check for
+ // nested functions that access a binding and are called before the
+ // binding is initialized:
+ // function() { f(); let x = 1; function f() { x = 2; } }
+ //
+ bool skip_init_check;
+ if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+ skip_init_check = false;
+ } else {
+ // Check that we always have valid source position.
+ DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
+ DCHECK(proxy->position() != RelocInfo::kNoPosition);
+ skip_init_check = var->mode() != CONST_LEGACY &&
+ var->initializer_position() < proxy->position();
+ }
+
+ if (!skip_init_check) {
+ Label done;
+ // Let and const need a read barrier.
+ GetVar(r3, var);
+ __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ bne(&done);
+ if (var->mode() == LET || var->mode() == CONST) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ __ mov(r3, Operand(var->name()));
+ __ push(r3);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ } else {
+ // Uninitalized const bindings outside of harmony mode are unholed.
+ DCHECK(var->mode() == CONST_LEGACY);
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ }
+ __ bind(&done);
+ context()->Plug(r3);
+ break;
+ }
+ }
+ context()->Plug(var);
+ break;
+ }
+
+ case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Lookup variable");
+ Label done, slow;
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
+ __ bind(&slow);
+ __ mov(r4, Operand(var->name()));
+ __ Push(cp, r4); // Context and name.
+ __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ __ bind(&done);
+ context()->Plug(r3);
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+ Comment cmnt(masm_, "[ RegExpLiteral");
+ Label materialized;
+ // Registers will be used as follows:
+ // r8 = materialized value (RegExp literal)
+ // r7 = JS function, literals array
+ // r6 = literal index
+ // r5 = RegExp pattern
+ // r4 = RegExp flags
+ // r3 = RegExp literal clone
+ __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadP(r7, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
+ int literal_offset =
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ __ LoadP(r8, FieldMemOperand(r7, literal_offset), r0);
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(r8, ip);
+ __ bne(&materialized);
+
+ // Create regexp literal using runtime function.
+ // Result will be in r3.
+ __ LoadSmiLiteral(r6, Smi::FromInt(expr->literal_index()));
+ __ mov(r5, Operand(expr->pattern()));
+ __ mov(r4, Operand(expr->flags()));
+ __ Push(r7, r6, r5, r4);
+ __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ mr(r8, r3);
+
+ __ bind(&materialized);
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Label allocated, runtime_allocate;
+ __ Allocate(size, r3, r5, r6, &runtime_allocate, TAG_OBJECT);
+ __ b(&allocated);
+
+ __ bind(&runtime_allocate);
+ __ LoadSmiLiteral(r3, Smi::FromInt(size));
+ __ Push(r8, r3);
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ pop(r8);
+
+ __ bind(&allocated);
+ // After this, registers are used as follows:
+ // r3: Newly allocated regexp.
+ // r8: Materialized regexp.
+ // r5: temp.
+ __ CopyFields(r3, r8, r5.bit(), size / kPointerSize);
+ context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitAccessor(Expression* expression) {
+ if (expression == NULL) {
+ __ LoadRoot(r4, Heap::kNullValueRootIndex);
+ __ push(r4);
+ } else {
+ VisitForStackValue(expression);
+ }
+}
+
+
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+ Comment cmnt(masm_, "[ ObjectLiteral");
+
+ expr->BuildConstantProperties(isolate());
+ Handle<FixedArray> constant_properties = expr->constant_properties();
+ __ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadP(r6, FieldMemOperand(r6, JSFunction::kLiteralsOffset));
+ __ LoadSmiLiteral(r5, Smi::FromInt(expr->literal_index()));
+ __ mov(r4, Operand(constant_properties));
+ int flags = expr->fast_elements() ? ObjectLiteral::kFastElements
+ : ObjectLiteral::kNoFlags;
+ flags |= expr->has_function() ? ObjectLiteral::kHasFunction
+ : ObjectLiteral::kNoFlags;
+ __ LoadSmiLiteral(r3, Smi::FromInt(flags));
+ int properties_count = constant_properties->length() / 2;
+ if (expr->may_store_doubles() || expr->depth() > 1 ||
+ masm()->serializer_enabled() || flags != ObjectLiteral::kFastElements ||
+ properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
+ __ Push(r6, r5, r4, r3);
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ } else {
+ FastCloneShallowObjectStub stub(isolate(), properties_count);
+ __ CallStub(&stub);
+ }
+ PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+
+ // If result_saved is true the result is on top of the stack. If
+ // result_saved is false the result is in r3.
+ bool result_saved = false;
+
+ // Mark all computed expressions that are bound to a key that
+ // is shadowed by a later occurrence of the same key. For the
+ // marked expressions, no store code is emitted.
+ expr->CalculateEmitStore(zone());
+
+ AccessorTable accessor_table(zone());
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ if (property->IsCompileTimeValue()) continue;
+
+ Literal* key = property->key();
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ push(r3); // Save result on stack
+ result_saved = true;
+ }
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
+ // Fall through.
+ case ObjectLiteral::Property::COMPUTED:
+ // It is safe to use [[Put]] here because the boilerplate already
+ // contains computed properties with an uninitialized value.
+ if (key->value()->IsInternalizedString()) {
+ if (property->emit_store()) {
+ VisitForAccumulatorValue(value);
+ DCHECK(StoreDescriptor::ValueRegister().is(r3));
+ __ mov(StoreDescriptor::NameRegister(), Operand(key->value()));
+ __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
+ CallStoreIC(key->LiteralFeedbackId());
+ PrepareForBailoutForId(key->id(), NO_REGISTERS);
+ } else {
+ VisitForEffect(value);
+ }
+ break;
+ }
+ // Duplicate receiver on stack.
+ __ LoadP(r3, MemOperand(sp));
+ __ push(r3);
+ VisitForStackValue(key);
+ VisitForStackValue(value);
+ if (property->emit_store()) {
+ __ LoadSmiLiteral(r3, Smi::FromInt(SLOPPY)); // PropertyAttributes
+ __ push(r3);
+ __ CallRuntime(Runtime::kSetProperty, 4);
+ } else {
+ __ Drop(3);
+ }
+ break;
+ case ObjectLiteral::Property::PROTOTYPE:
+ // Duplicate receiver on stack.
+ __ LoadP(r3, MemOperand(sp));
+ __ push(r3);
+ VisitForStackValue(value);
+ if (property->emit_store()) {
+ __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ } else {
+ __ Drop(2);
+ }
+ break;
+ case ObjectLiteral::Property::GETTER:
+ accessor_table.lookup(key)->second->getter = value;
+ break;
+ case ObjectLiteral::Property::SETTER:
+ accessor_table.lookup(key)->second->setter = value;
+ break;
+ }
+ }
+
+ // Emit code to define accessors, using only a single call to the runtime for
+ // each pair of corresponding getters and setters.
+ for (AccessorTable::Iterator it = accessor_table.begin();
+ it != accessor_table.end(); ++it) {
+ __ LoadP(r3, MemOperand(sp)); // Duplicate receiver.
+ __ push(r3);
+ VisitForStackValue(it->first);
+ EmitAccessor(it->second->getter);
+ EmitAccessor(it->second->setter);
+ __ LoadSmiLiteral(r3, Smi::FromInt(NONE));
+ __ push(r3);
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ }
+
+ if (expr->has_function()) {
+ DCHECK(result_saved);
+ __ LoadP(r3, MemOperand(sp));
+ __ push(r3);
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ }
+
+ if (result_saved) {
+ context()->PlugTOS();
+ } else {
+ context()->Plug(r3);
+ }
+}
+
+
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+ Comment cmnt(masm_, "[ ArrayLiteral");
+
+ expr->BuildConstantElements(isolate());
+ int flags = expr->depth() == 1 ? ArrayLiteral::kShallowElements
+ : ArrayLiteral::kNoFlags;
+
+ ZoneList<Expression*>* subexprs = expr->values();
+ int length = subexprs->length();
+ Handle<FixedArray> constant_elements = expr->constant_elements();
+ DCHECK_EQ(2, constant_elements->length());
+ ElementsKind constant_elements_kind =
+ static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+ bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind);
+ Handle<FixedArrayBase> constant_elements_values(
+ FixedArrayBase::cast(constant_elements->get(1)));
+
+ AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
+ if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
+ __ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadP(r6, FieldMemOperand(r6, JSFunction::kLiteralsOffset));
+ __ LoadSmiLiteral(r5, Smi::FromInt(expr->literal_index()));
+ __ mov(r4, Operand(constant_elements));
+ if (expr->depth() > 1 || length > JSObject::kInitialMaxFastElementArray) {
+ __ LoadSmiLiteral(r3, Smi::FromInt(flags));
+ __ Push(r6, r5, r4, r3);
+ __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ } else {
+ FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
+ __ CallStub(&stub);
+ }
+
+ bool result_saved = false; // Is the result saved to the stack?
+
+ // Emit code to evaluate all the non-constant subexpressions and to store
+ // them into the newly cloned array.
+ for (int i = 0; i < length; i++) {
+ Expression* subexpr = subexprs->at(i);
+ // If the subexpression is a literal or a simple materialized literal it
+ // is already set in the cloned array.
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+
+ if (!result_saved) {
+ __ push(r3);
+ __ Push(Smi::FromInt(expr->literal_index()));
+ result_saved = true;
+ }
+ VisitForAccumulatorValue(subexpr);
+
+ if (IsFastObjectElementsKind(constant_elements_kind)) {
+ int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ __ LoadP(r8, MemOperand(sp, kPointerSize)); // Copy of array literal.
+ __ LoadP(r4, FieldMemOperand(r8, JSObject::kElementsOffset));
+ __ StoreP(result_register(), FieldMemOperand(r4, offset), r0);
+ // Update the write barrier for the array store.
+ __ RecordWriteField(r4, offset, result_register(), r5, kLRHasBeenSaved,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ INLINE_SMI_CHECK);
+ } else {
+ __ LoadSmiLiteral(r6, Smi::FromInt(i));
+ StoreArrayLiteralElementStub stub(isolate());
+ __ CallStub(&stub);
+ }
+
+ PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
+ }
+
+ if (result_saved) {
+ __ pop(); // literal index
+ context()->PlugTOS();
+ } else {
+ context()->Plug(r3);
+ }
+}
+
+
+void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ DCHECK(expr->target()->IsValidReferenceExpression());
+
+ Comment cmnt(masm_, "[ Assignment");
+
+ Property* property = expr->target()->AsProperty();
+ LhsKind assign_type = GetAssignType(property);
+
+ // Evaluate LHS expression.
+ switch (assign_type) {
+ case VARIABLE:
+ // Nothing to do here.
+ break;
+ case NAMED_PROPERTY:
+ if (expr->is_compound()) {
+ // We need the receiver both on the stack and in the register.
+ VisitForStackValue(property->obj());
+ __ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ } else {
+ VisitForStackValue(property->obj());
+ }
+ break;
+ case NAMED_SUPER_PROPERTY:
+ VisitForStackValue(property->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(property->obj()->AsSuperReference());
+ __ Push(result_register());
+ if (expr->is_compound()) {
+ const Register scratch = r4;
+ __ LoadP(scratch, MemOperand(sp, kPointerSize));
+ __ Push(scratch, result_register());
+ }
+ break;
+ case KEYED_SUPER_PROPERTY: {
+ const Register scratch = r4;
+ VisitForStackValue(property->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(property->obj()->AsSuperReference());
+ __ Move(scratch, result_register());
+ VisitForAccumulatorValue(property->key());
+ __ Push(scratch, result_register());
+ if (expr->is_compound()) {
+ const Register scratch1 = r5;
+ __ LoadP(scratch1, MemOperand(sp, 2 * kPointerSize));
+ __ Push(scratch1, scratch, result_register());
+ }
+ break;
+ }
+ case KEYED_PROPERTY:
+ if (expr->is_compound()) {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ __ LoadP(LoadDescriptor::ReceiverRegister(),
+ MemOperand(sp, 1 * kPointerSize));
+ __ LoadP(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
+ } else {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ }
+ break;
+ }
+
+ // For compound assignments we need another deoptimization point after the
+ // variable/property load.
+ if (expr->is_compound()) {
+ {
+ AccumulatorValueContext context(this);
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableLoad(expr->target()->AsVariableProxy());
+ PrepareForBailout(expr->target(), TOS_REG);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ case NAMED_SUPER_PROPERTY:
+ EmitNamedSuperPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ case KEYED_SUPER_PROPERTY:
+ EmitKeyedSuperPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ }
+ }
+
+ Token::Value op = expr->binary_op();
+ __ push(r3); // Left operand goes on the stack.
+ VisitForAccumulatorValue(expr->value());
+
+ OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
+ ? OVERWRITE_RIGHT
+ : NO_OVERWRITE;
+ SetSourcePosition(expr->position() + 1);
+ AccumulatorValueContext context(this);
+ if (ShouldInlineSmiCase(op)) {
+ EmitInlineSmiBinaryOp(expr->binary_operation(), op, mode, expr->target(),
+ expr->value());
+ } else {
+ EmitBinaryOp(expr->binary_operation(), op, mode);
+ }
+
+ // Deoptimization point in case the binary operation may have side effects.
+ PrepareForBailout(expr->binary_operation(), TOS_REG);
+ } else {
+ VisitForAccumulatorValue(expr->value());
+ }
+
+ // Record source position before possible IC call.
+ SetSourcePosition(expr->position());
+
+ // Store the value.
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+ expr->op());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(r3);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyAssignment(expr);
+ break;
+ case NAMED_SUPER_PROPERTY:
+ EmitNamedSuperPropertyStore(property);
+ context()->Plug(r3);
+ break;
+ case KEYED_SUPER_PROPERTY:
+ EmitKeyedSuperPropertyStore(property);
+ context()->Plug(r3);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyAssignment(expr);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::VisitYield(Yield* expr) {
+ Comment cmnt(masm_, "[ Yield");
+ // Evaluate yielded value first; the initial iterator definition depends on
+ // this. It stays on the stack while we update the iterator.
+ VisitForStackValue(expr->expression());
+
+ switch (expr->yield_kind()) {
+ case Yield::kSuspend:
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(false);
+ __ push(result_register());
+ // Fall through.
+ case Yield::kInitial: {
+ Label suspend, continuation, post_runtime, resume;
+
+ __ b(&suspend);
+
+ __ bind(&continuation);
+ __ b(&resume);
+
+ __ bind(&suspend);
+ VisitForAccumulatorValue(expr->generator_object());
+ DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+ __ LoadSmiLiteral(r4, Smi::FromInt(continuation.pos()));
+ __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset),
+ r0);
+ __ StoreP(cp, FieldMemOperand(r3, JSGeneratorObject::kContextOffset), r0);
+ __ mr(r4, cp);
+ __ RecordWriteField(r3, JSGeneratorObject::kContextOffset, r4, r5,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ __ addi(r4, fp, Operand(StandardFrameConstants::kExpressionsOffset));
+ __ cmp(sp, r4);
+ __ beq(&post_runtime);
+ __ push(r3); // generator object
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ bind(&post_runtime);
+ __ pop(result_register());
+ EmitReturnSequence();
+
+ __ bind(&resume);
+ context()->Plug(result_register());
+ break;
+ }
+
+ case Yield::kFinal: {
+ VisitForAccumulatorValue(expr->generator_object());
+ __ LoadSmiLiteral(r4, Smi::FromInt(JSGeneratorObject::kGeneratorClosed));
+ __ StoreP(r4, FieldMemOperand(result_register(),
+ JSGeneratorObject::kContinuationOffset),
+ r0);
+ // Pop value from top-of-stack slot, box result into result register.
+ EmitCreateIteratorResult(true);
+ EmitUnwindBeforeReturn();
+ EmitReturnSequence();
+ break;
+ }
+
+ case Yield::kDelegating: {
+ VisitForStackValue(expr->generator_object());
+
+ // Initial stack layout is as follows:
+ // [sp + 1 * kPointerSize] iter
+ // [sp + 0 * kPointerSize] g
+
+ Label l_catch, l_try, l_suspend, l_continuation, l_resume;
+ Label l_next, l_call;
+ Register load_receiver = LoadDescriptor::ReceiverRegister();
+ Register load_name = LoadDescriptor::NameRegister();
+
+ // Initial send value is undefined.
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ b(&l_next);
+
+ // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
+ __ bind(&l_catch);
+ handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
+ __ LoadRoot(load_name, Heap::kthrow_stringRootIndex); // "throw"
+ __ LoadP(r6, MemOperand(sp, 1 * kPointerSize)); // iter
+ __ Push(load_name, r6, r3); // "throw", iter, except
+ __ b(&l_call);
+
+ // try { received = %yield result }
+ // Shuffle the received result above a try handler and yield it without
+ // re-boxing.
+ __ bind(&l_try);
+ __ pop(r3); // result
+ __ PushTryHandler(StackHandler::CATCH, expr->index());
+ const int handler_size = StackHandlerConstants::kSize;
+ __ push(r3); // result
+ __ b(&l_suspend);
+ __ bind(&l_continuation);
+ __ b(&l_resume);
+ __ bind(&l_suspend);
+ const int generator_object_depth = kPointerSize + handler_size;
+ __ LoadP(r3, MemOperand(sp, generator_object_depth));
+ __ push(r3); // g
+ DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
+ __ LoadSmiLiteral(r4, Smi::FromInt(l_continuation.pos()));
+ __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset),
+ r0);
+ __ StoreP(cp, FieldMemOperand(r3, JSGeneratorObject::kContextOffset), r0);
+ __ mr(r4, cp);
+ __ RecordWriteField(r3, JSGeneratorObject::kContextOffset, r4, r5,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ pop(r3); // result
+ EmitReturnSequence();
+ __ bind(&l_resume); // received in r3
+ __ PopTryHandler();
+
+ // receiver = iter; f = 'next'; arg = received;
+ __ bind(&l_next);
+
+ __ LoadRoot(load_name, Heap::knext_stringRootIndex); // "next"
+ __ LoadP(r6, MemOperand(sp, 1 * kPointerSize)); // iter
+ __ Push(load_name, r6, r3); // "next", iter, received
+
+ // result = receiver[f](arg);
+ __ bind(&l_call);
+ __ LoadP(load_receiver, MemOperand(sp, kPointerSize));
+ __ LoadP(load_name, MemOperand(sp, 2 * kPointerSize));
+ if (FLAG_vector_ics) {
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
+ }
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+ CallIC(ic, TypeFeedbackId::None());
+ __ mr(r4, r3);
+ __ StoreP(r4, MemOperand(sp, 2 * kPointerSize));
+ CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
+ __ CallStub(&stub);
+
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Drop(1); // The function is still on the stack; drop it.
+
+ // if (!result.done) goto l_try;
+ __ Move(load_receiver, r3);
+
+ __ push(load_receiver); // save result
+ __ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
+ if (FLAG_vector_ics) {
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(expr->DoneFeedbackSlot())));
+ }
+ CallLoadIC(NOT_CONTEXTUAL); // r0=result.done
+ Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(bool_ic);
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&l_try);
+
+ // result.value
+ __ pop(load_receiver); // result
+ __ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
+ if (FLAG_vector_ics) {
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(expr->ValueFeedbackSlot())));
+ }
+ CallLoadIC(NOT_CONTEXTUAL); // r3=result.value
+ context()->DropAndPlug(2, r3); // drop iter and g
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::EmitGeneratorResume(
+ Expression* generator, Expression* value,
+ JSGeneratorObject::ResumeMode resume_mode) {
+ // The value stays in r3, and is ultimately read by the resumed generator, as
+ // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
+ // is read to throw the value when the resumed generator is already closed.
+ // r4 will hold the generator object until the activation has been resumed.
+ VisitForStackValue(generator);
+ VisitForAccumulatorValue(value);
+ __ pop(r4);
+
+ // Check generator state.
+ Label wrong_state, closed_state, done;
+ __ LoadP(r6, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset));
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
+ __ CmpSmiLiteral(r6, Smi::FromInt(0), r0);
+ __ beq(&closed_state);
+ __ blt(&wrong_state);
+
+ // Load suspended function and context.
+ __ LoadP(cp, FieldMemOperand(r4, JSGeneratorObject::kContextOffset));
+ __ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
+
+ // Load receiver and store as the first argument.
+ __ LoadP(r5, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset));
+ __ push(r5);
+
+ // Push holes for the rest of the arguments to the generator function.
+ __ LoadP(r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadWordArith(
+ r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
+ Label argument_loop, push_frame;
+#if V8_TARGET_ARCH_PPC64
+ __ cmpi(r6, Operand::Zero());
+ __ beq(&push_frame);
+#else
+ __ SmiUntag(r6, SetRC);
+ __ beq(&push_frame, cr0);
+#endif
+ __ mtctr(r6);
+ __ bind(&argument_loop);
+ __ push(r5);
+ __ bdnz(&argument_loop);
+
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ Label resume_frame;
+ __ bind(&push_frame);
+ __ b(&resume_frame, SetLK);
+ __ b(&done);
+ __ bind(&resume_frame);
+ // lr = return address.
+ // fp = caller's frame pointer.
+ // cp = callee's context,
+ // r7 = callee's JS function.
+ __ PushFixedFrame(r7);
+ // Adjust FP to point to saved FP.
+ __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+
+ // Load the operand stack size.
+ __ LoadP(r6, FieldMemOperand(r4, JSGeneratorObject::kOperandStackOffset));
+ __ LoadP(r6, FieldMemOperand(r6, FixedArray::kLengthOffset));
+ __ SmiUntag(r6, SetRC);
+
+ // If we are sending a value and there is no operand stack, we can jump back
+ // in directly.
+ Label call_resume;
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ Label slow_resume;
+ __ bne(&slow_resume, cr0);
+ __ LoadP(ip, FieldMemOperand(r7, JSFunction::kCodeEntryOffset));
+#if V8_OOL_CONSTANT_POOL
+ {
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
+ // Load the new code object's constant pool pointer.
+ __ LoadP(kConstantPoolRegister,
+ MemOperand(ip, Code::kConstantPoolOffset - Code::kHeaderSize));
+#endif
+ __ LoadP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset));
+ __ SmiUntag(r5);
+ __ add(ip, ip, r5);
+ __ LoadSmiLiteral(r5,
+ Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
+ __ StoreP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset),
+ r0);
+ __ Jump(ip);
+ __ bind(&slow_resume);
+#if V8_OOL_CONSTANT_POOL
+ }
+#endif
+ } else {
+ __ beq(&call_resume, cr0);
+ }
+
+ // Otherwise, we push holes for the operand stack and call the runtime to fix
+ // up the stack and the handlers.
+ Label operand_loop;
+ __ mtctr(r6);
+ __ bind(&operand_loop);
+ __ push(r5);
+ __ bdnz(&operand_loop);
+
+ __ bind(&call_resume);
+ DCHECK(!result_register().is(r4));
+ __ Push(r4, result_register());
+ __ Push(Smi::FromInt(resume_mode));
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ // Not reached: the runtime call returns elsewhere.
+ __ stop("not-reached");
+
+ // Reach here when generator is closed.
+ __ bind(&closed_state);
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ // Return completed iterator result when generator is closed.
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ push(r5);
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(true);
+ } else {
+ // Throw the provided value.
+ __ push(r3);
+ __ CallRuntime(Runtime::kThrow, 1);
+ }
+ __ b(&done);
+
+ // Throw error if we attempt to operate on a running generator.
+ __ bind(&wrong_state);
+ __ push(r4);
+ __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
+
+ __ bind(&done);
+ context()->Plug(result_register());
+}
+
+
+void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
+ Label gc_required;
+ Label allocated;
+
+ const int instance_size = 5 * kPointerSize;
+ DCHECK_EQ(isolate()->native_context()->iterator_result_map()->instance_size(),
+ instance_size);
+
+ __ Allocate(instance_size, r3, r5, r6, &gc_required, TAG_OBJECT);
+ __ b(&allocated);
+
+ __ bind(&gc_required);
+ __ Push(Smi::FromInt(instance_size));
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ LoadP(context_register(),
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ __ bind(&allocated);
+ __ LoadP(r4, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ LoadP(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
+ __ LoadP(r4, ContextOperand(r4, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ pop(r5);
+ __ mov(r6, Operand(isolate()->factory()->ToBoolean(done)));
+ __ mov(r7, Operand(isolate()->factory()->empty_fixed_array()));
+ __ StoreP(r4, FieldMemOperand(r3, HeapObject::kMapOffset), r0);
+ __ StoreP(r7, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
+ __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
+ __ StoreP(r5,
+ FieldMemOperand(r3, JSGeneratorObject::kResultValuePropertyOffset),
+ r0);
+ __ StoreP(r6,
+ FieldMemOperand(r3, JSGeneratorObject::kResultDonePropertyOffset),
+ r0);
+
+ // Only the value field needs a write barrier, as the other values are in the
+ // root set.
+ __ RecordWriteField(r3, JSGeneratorObject::kResultValuePropertyOffset, r5, r6,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(!prop->IsSuperAccess());
+
+ __ mov(LoadDescriptor::NameRegister(), Operand(key->value()));
+ if (FLAG_vector_ics) {
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
+ CallLoadIC(NOT_CONTEXTUAL);
+ } else {
+ CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+ }
+}
+
+
+void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
+ // Stack: receiver, home_object.
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(!key->value()->IsSmi());
+ DCHECK(prop->IsSuperAccess());
+
+ __ Push(key->value());
+ __ CallRuntime(Runtime::kLoadFromSuper, 3);
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+ if (FLAG_vector_ics) {
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
+ CallIC(ic);
+ } else {
+ CallIC(ic, prop->PropertyFeedbackId());
+ }
+}
+
+
+void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
+ // Stack: receiver, home_object, key.
+ SetSourcePosition(prop->position());
+
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
+}
+
+
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
+ Token::Value op,
+ OverwriteMode mode,
+ Expression* left_expr,
+ Expression* right_expr) {
+ Label done, smi_case, stub_call;
+
+ Register scratch1 = r5;
+ Register scratch2 = r6;
+
+ // Get the arguments.
+ Register left = r4;
+ Register right = r3;
+ __ pop(left);
+
+ // Perform combined smi check on both operands.
+ __ orx(scratch1, left, right);
+ STATIC_ASSERT(kSmiTag == 0);
+ JumpPatchSite patch_site(masm_);
+ patch_site.EmitJumpIfSmi(scratch1, &smi_case);
+
+ __ bind(&stub_call);
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+ CallIC(code, expr->BinaryOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ __ b(&done);
+
+ __ bind(&smi_case);
+ // Smi case. This code works the same way as the smi-smi case in the type
+ // recording binary operation stub.
+ switch (op) {
+ case Token::SAR:
+ __ GetLeastBitsFromSmi(scratch1, right, 5);
+ __ ShiftRightArith(right, left, scratch1);
+ __ ClearRightImm(right, right, Operand(kSmiTagSize + kSmiShiftSize));
+ break;
+ case Token::SHL: {
+ __ GetLeastBitsFromSmi(scratch2, right, 5);
+#if V8_TARGET_ARCH_PPC64
+ __ ShiftLeft_(right, left, scratch2);
+#else
+ __ SmiUntag(scratch1, left);
+ __ ShiftLeft_(scratch1, scratch1, scratch2);
+ // Check that the *signed* result fits in a smi
+ __ JumpIfNotSmiCandidate(scratch1, scratch2, &stub_call);
+ __ SmiTag(right, scratch1);
+#endif
+ break;
+ }
+ case Token::SHR: {
+ __ SmiUntag(scratch1, left);
+ __ GetLeastBitsFromSmi(scratch2, right, 5);
+ __ srw(scratch1, scratch1, scratch2);
+ // Unsigned shift is not allowed to produce a negative number.
+ __ JumpIfNotUnsignedSmiCandidate(scratch1, r0, &stub_call);
+ __ SmiTag(right, scratch1);
+ break;
+ }
+ case Token::ADD: {
+ __ AddAndCheckForOverflow(scratch1, left, right, scratch2, r0);
+ __ bne(&stub_call, cr0);
+ __ mr(right, scratch1);
+ break;
+ }
+ case Token::SUB: {
+ __ SubAndCheckForOverflow(scratch1, left, right, scratch2, r0);
+ __ bne(&stub_call, cr0);
+ __ mr(right, scratch1);
+ break;
+ }
+ case Token::MUL: {
+ Label mul_zero;
+#if V8_TARGET_ARCH_PPC64
+ // Remove tag from both operands.
+ __ SmiUntag(ip, right);
+ __ SmiUntag(r0, left);
+ __ Mul(scratch1, r0, ip);
+ // Check for overflowing the smi range - no overflow if higher 33 bits of
+ // the result are identical.
+ __ TestIfInt32(scratch1, scratch2, ip);
+ __ bne(&stub_call);
+#else
+ __ SmiUntag(ip, right);
+ __ mullw(scratch1, left, ip);
+ __ mulhw(scratch2, left, ip);
+ // Check for overflowing the smi range - no overflow if higher 33 bits of
+ // the result are identical.
+ __ TestIfInt32(scratch2, scratch1, ip);
+ __ bne(&stub_call);
+#endif
+ // Go slow on zero result to handle -0.
+ __ cmpi(scratch1, Operand::Zero());
+ __ beq(&mul_zero);
+#if V8_TARGET_ARCH_PPC64
+ __ SmiTag(right, scratch1);
+#else
+ __ mr(right, scratch1);
+#endif
+ __ b(&done);
+ // We need -0 if we were multiplying a negative number with 0 to get 0.
+ // We know one of them was zero.
+ __ bind(&mul_zero);
+ __ add(scratch2, right, left);
+ __ cmpi(scratch2, Operand::Zero());
+ __ blt(&stub_call);
+ __ LoadSmiLiteral(right, Smi::FromInt(0));
+ break;
+ }
+ case Token::BIT_OR:
+ __ orx(right, left, right);
+ break;
+ case Token::BIT_AND:
+ __ and_(right, left, right);
+ break;
+ case Token::BIT_XOR:
+ __ xor_(right, left, right);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ bind(&done);
+ context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
+ // Constructor is in r3.
+ DCHECK(lit != NULL);
+ __ push(r3);
+
+ // No access check is needed here since the constructor is created by the
+ // class literal.
+ Register scratch = r4;
+ __ LoadP(scratch,
+ FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
+ __ push(scratch);
+
+ for (int i = 0; i < lit->properties()->length(); i++) {
+ ObjectLiteral::Property* property = lit->properties()->at(i);
+ Literal* key = property->key()->AsLiteral();
+ Expression* value = property->value();
+ DCHECK(key != NULL);
+
+ if (property->is_static()) {
+ __ LoadP(scratch, MemOperand(sp, kPointerSize)); // constructor
+ } else {
+ __ LoadP(scratch, MemOperand(sp, 0)); // prototype
+ }
+ __ push(scratch);
+ VisitForStackValue(key);
+ VisitForStackValue(value);
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ case ObjectLiteral::Property::COMPUTED:
+ case ObjectLiteral::Property::PROTOTYPE:
+ __ CallRuntime(Runtime::kDefineClassMethod, 3);
+ break;
+
+ case ObjectLiteral::Property::GETTER:
+ __ CallRuntime(Runtime::kDefineClassGetter, 3);
+ break;
+
+ case ObjectLiteral::Property::SETTER:
+ __ CallRuntime(Runtime::kDefineClassSetter, 3);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ // prototype
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+
+ // constructor
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+}
+
+
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op,
+ OverwriteMode mode) {
+ __ pop(r4);
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+ JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
+ CallIC(code, expr->BinaryOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitAssignment(Expression* expr) {
+ DCHECK(expr->IsValidReferenceExpression());
+
+ Property* prop = expr->AsProperty();
+ LhsKind assign_type = GetAssignType(prop);
+
+ switch (assign_type) {
+ case VARIABLE: {
+ Variable* var = expr->AsVariableProxy()->var();
+ EffectContext context(this);
+ EmitVariableAssignment(var, Token::ASSIGN);
+ break;
+ }
+ case NAMED_PROPERTY: {
+ __ push(r3); // Preserve value.
+ VisitForAccumulatorValue(prop->obj());
+ __ Move(StoreDescriptor::ReceiverRegister(), r3);
+ __ pop(StoreDescriptor::ValueRegister()); // Restore value.
+ __ mov(StoreDescriptor::NameRegister(),
+ Operand(prop->key()->AsLiteral()->value()));
+ CallStoreIC();
+ break;
+ }
+ case NAMED_SUPER_PROPERTY: {
+ __ Push(r3);
+ VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ // stack: value, this; r3: home_object
+ Register scratch = r5;
+ Register scratch2 = r6;
+ __ mr(scratch, result_register()); // home_object
+ __ LoadP(r3, MemOperand(sp, kPointerSize)); // value
+ __ LoadP(scratch2, MemOperand(sp, 0)); // this
+ __ StoreP(scratch2, MemOperand(sp, kPointerSize)); // this
+ __ StoreP(scratch, MemOperand(sp, 0)); // home_object
+ // stack: this, home_object; r3: value
+ EmitNamedSuperPropertyStore(prop);
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ __ Push(r3);
+ VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ __ Push(result_register());
+ VisitForAccumulatorValue(prop->key());
+ Register scratch = r5;
+ Register scratch2 = r6;
+ __ LoadP(scratch2, MemOperand(sp, 2 * kPointerSize)); // value
+ // stack: value, this, home_object; r3: key, r6: value
+ __ LoadP(scratch, MemOperand(sp, kPointerSize)); // this
+ __ StoreP(scratch, MemOperand(sp, 2 * kPointerSize));
+ __ LoadP(scratch, MemOperand(sp, 0)); // home_object
+ __ StoreP(scratch, MemOperand(sp, kPointerSize));
+ __ StoreP(r3, MemOperand(sp, 0));
+ __ Move(r3, scratch2);
+ // stack: this, home_object, key; r3: value.
+ EmitKeyedSuperPropertyStore(prop);
+ break;
+ }
+ case KEYED_PROPERTY: {
+ __ push(r3); // Preserve value.
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ Move(StoreDescriptor::NameRegister(), r3);
+ __ Pop(StoreDescriptor::ValueRegister(),
+ StoreDescriptor::ReceiverRegister());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ CallIC(ic);
+ break;
+ }
+ }
+ context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ StoreP(result_register(), location, r0);
+ if (var->IsContextSlot()) {
+ // RecordWrite may destroy all its register arguments.
+ __ mr(r6, result_register());
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(r4, offset, r6, r5, kLRHasBeenSaved,
+ kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
+ if (var->IsUnallocated()) {
+ // Global var, const, or let.
+ __ mov(StoreDescriptor::NameRegister(), Operand(var->name()));
+ __ LoadP(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ CallStoreIC();
+
+ } else if (op == Token::INIT_CONST_LEGACY) {
+ // Const initializers need a write barrier.
+ DCHECK(!var->IsParameter()); // No const parameters.
+ if (var->IsLookupSlot()) {
+ __ push(r3);
+ __ mov(r3, Operand(var->name()));
+ __ Push(cp, r3); // Context and name.
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
+ } else {
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, r4);
+ __ LoadP(r5, location);
+ __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
+ __ bne(&skip);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ bind(&skip);
+ }
+
+ } else if (var->mode() == LET && op != Token::INIT_LET) {
+ // Non-initializing assignment to let variable needs a write barrier.
+ DCHECK(!var->IsLookupSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label assign;
+ MemOperand location = VarOperand(var, r4);
+ __ LoadP(r6, location);
+ __ CompareRoot(r6, Heap::kTheHoleValueRootIndex);
+ __ bne(&assign);
+ __ mov(r6, Operand(var->name()));
+ __ push(r6);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ // Perform the assignment.
+ __ bind(&assign);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
+ if (var->IsLookupSlot()) {
+ // Assignment to var.
+ __ push(r3); // Value.
+ __ mov(r4, Operand(var->name()));
+ __ mov(r3, Operand(Smi::FromInt(strict_mode())));
+ __ Push(cp, r4, r3); // Context, name, strict mode.
+ __ CallRuntime(Runtime::kStoreLookupSlot, 4);
+ } else {
+ // Assignment to var or initializing assignment to let/const in harmony
+ // mode.
+ DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
+ MemOperand location = VarOperand(var, r4);
+ if (generate_debug_code_ && op == Token::INIT_LET) {
+ // Check for an uninitialized let binding.
+ __ LoadP(r5, location);
+ __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
+ __ Check(eq, kLetBindingReInitialization);
+ }
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ }
+ }
+ // Non-initializing assignments to consts are ignored.
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a named store IC.
+ Property* prop = expr->target()->AsProperty();
+ DCHECK(prop != NULL);
+ DCHECK(prop->key()->IsLiteral());
+
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ __ mov(StoreDescriptor::NameRegister(),
+ Operand(prop->key()->AsLiteral()->value()));
+ __ pop(StoreDescriptor::ReceiverRegister());
+ CallStoreIC(expr->AssignmentFeedbackId());
+
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
+ // Assignment to named property of super.
+ // r3 : value
+ // stack : receiver ('this'), home_object
+ DCHECK(prop != NULL);
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(key != NULL);
+
+ __ Push(key->value());
+ __ Push(r3);
+ __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy),
+ 4);
+}
+
+
+void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
+ // Assignment to named property of super.
+ // r3 : value
+ // stack : receiver ('this'), home_object, key
+ DCHECK(prop != NULL);
+
+ __ Push(r3);
+ __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy),
+ 4);
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a keyed store IC.
+
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ __ Pop(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister());
+ DCHECK(StoreDescriptor::ValueRegister().is(r3));
+
+ Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ CallIC(ic, expr->AssignmentFeedbackId());
+
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::VisitProperty(Property* expr) {
+ Comment cmnt(masm_, "[ Property");
+ Expression* key = expr->key();
+
+ if (key->IsPropertyName()) {
+ if (!expr->IsSuperAccess()) {
+ VisitForAccumulatorValue(expr->obj());
+ __ Move(LoadDescriptor::ReceiverRegister(), r3);
+ EmitNamedPropertyLoad(expr);
+ } else {
+ VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(expr->obj()->AsSuperReference());
+ __ Push(result_register());
+ EmitNamedSuperPropertyLoad(expr);
+ }
+ PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+ context()->Plug(r3);
+ } else {
+ if (!expr->IsSuperAccess()) {
+ VisitForStackValue(expr->obj());
+ VisitForAccumulatorValue(expr->key());
+ __ Move(LoadDescriptor::NameRegister(), r3);
+ __ pop(LoadDescriptor::ReceiverRegister());
+ EmitKeyedPropertyLoad(expr);
+ } else {
+ VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(expr->obj()->AsSuperReference());
+ __ Push(result_register());
+ VisitForStackValue(expr->key());
+ EmitKeyedSuperPropertyLoad(expr);
+ }
+ context()->Plug(r3);
+ }
+}
+
+
+void FullCodeGenerator::CallIC(Handle<Code> code, TypeFeedbackId ast_id) {
+ ic_total_count_++;
+ __ Call(code, RelocInfo::CODE_TARGET, ast_id);
+}
+
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+
+ CallICState::CallType call_type =
+ callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
+
+ // Get the target function.
+ if (call_type == CallICState::FUNCTION) {
+ {
+ StackValueContext context(this);
+ EmitVariableLoad(callee->AsVariableProxy());
+ PrepareForBailout(callee, NO_REGISTERS);
+ }
+ // Push undefined as receiver. This is patched in the method prologue if it
+ // is a sloppy mode method.
+ __ Push(isolate()->factory()->undefined_value());
+ } else {
+ // Load the function from the receiver.
+ DCHECK(callee->IsProperty());
+ DCHECK(!callee->AsProperty()->IsSuperAccess());
+ __ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ EmitNamedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ // Push the target function under the receiver.
+ __ LoadP(ip, MemOperand(sp, 0));
+ __ push(ip);
+ __ StoreP(r3, MemOperand(sp, kPointerSize));
+ }
+
+ EmitCall(expr, call_type);
+}
+
+
+void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+ DCHECK(callee->IsProperty());
+ Property* prop = callee->AsProperty();
+ DCHECK(prop->IsSuperAccess());
+
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(!key->value()->IsSmi());
+ // Load the function from the receiver.
+ const Register scratch = r4;
+ SuperReference* super_ref = prop->obj()->AsSuperReference();
+ EmitLoadHomeObject(super_ref);
+ __ mr(scratch, r3);
+ VisitForAccumulatorValue(super_ref->this_var());
+ __ Push(scratch, r3, r3, scratch);
+ __ Push(key->value());
+
+ // Stack here:
+ // - home_object
+ // - this (receiver)
+ // - this (receiver) <-- LoadFromSuper will pop here and below.
+ // - home_object
+ // - key
+ __ CallRuntime(Runtime::kLoadFromSuper, 3);
+
+ // Replace home_object with target function.
+ __ StoreP(r3, MemOperand(sp, kPointerSize));
+
+ // Stack here:
+ // - target function
+ // - this (receiver)
+ EmitCall(expr, CallICState::METHOD);
+}
+
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, Expression* key) {
+ // Load the key.
+ VisitForAccumulatorValue(key);
+
+ Expression* callee = expr->expression();
+
+ // Load the function from the receiver.
+ DCHECK(callee->IsProperty());
+ __ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ __ Move(LoadDescriptor::NameRegister(), r3);
+ EmitKeyedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+
+ // Push the target function under the receiver.
+ __ LoadP(ip, MemOperand(sp, 0));
+ __ push(ip);
+ __ StoreP(r3, MemOperand(sp, kPointerSize));
+
+ EmitCall(expr, CallICState::METHOD);
+}
+
+
+void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+ DCHECK(callee->IsProperty());
+ Property* prop = callee->AsProperty();
+ DCHECK(prop->IsSuperAccess());
+
+ SetSourcePosition(prop->position());
+ // Load the function from the receiver.
+ const Register scratch = r4;
+ SuperReference* super_ref = prop->obj()->AsSuperReference();
+ EmitLoadHomeObject(super_ref);
+ __ Push(r3);
+ VisitForAccumulatorValue(super_ref->this_var());
+ __ Push(r3);
+ __ Push(r3);
+ __ LoadP(scratch, MemOperand(sp, kPointerSize * 2));
+ __ Push(scratch);
+ VisitForStackValue(prop->key());
+
+ // Stack here:
+ // - home_object
+ // - this (receiver)
+ // - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
+ // - home_object
+ // - key
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
+
+ // Replace home_object with target function.
+ __ StoreP(r3, MemOperand(sp, kPointerSize));
+
+ // Stack here:
+ // - target function
+ // - this (receiver)
+ EmitCall(expr, CallICState::METHOD);
+}
+
+
+void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
+ // Load the arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ {
+ PreservePositionScope scope(masm()->positions_recorder());
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ }
+
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic = CallIC::initialize_stub(isolate(), arg_count, call_type);
+ __ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackSlot()));
+ __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+ // Don't assign a type feedback id to the IC, since type feedback is provided
+ // by the vector above.
+ CallIC(ic);
+
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, r3);
+}
+
+
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+ // r8: copy of the first argument or undefined if it doesn't exist.
+ if (arg_count > 0) {
+ __ LoadP(r8, MemOperand(sp, arg_count * kPointerSize), r0);
+ } else {
+ __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+ }
+
+ // r7: the receiver of the enclosing function.
+ __ LoadP(r7, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+
+ // r6: the receiver of the enclosing function.
+ int receiver_offset = 2 + info_->scope()->num_parameters();
+ __ LoadP(r6, MemOperand(fp, receiver_offset * kPointerSize), r0);
+
+ // r5: strict mode.
+ __ LoadSmiLiteral(r5, Smi::FromInt(strict_mode()));
+
+ // r4: the start position of the scope the calls resides in.
+ __ LoadSmiLiteral(r4, Smi::FromInt(scope()->start_position()));
+
+ // Do the runtime call.
+ __ Push(r8, r7, r6, r5, r4);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
+}
+
+
+void FullCodeGenerator::EmitLoadSuperConstructor(SuperReference* super_ref) {
+ DCHECK(super_ref != NULL);
+ __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Push(r3);
+ __ CallRuntime(Runtime::kGetPrototype, 1);
+}
+
+
+void FullCodeGenerator::VisitCall(Call* expr) {
+#ifdef DEBUG
+ // We want to verify that RecordJSReturnSite gets called on all paths
+ // through this function. Avoid early returns.
+ expr->return_is_recorded_ = false;
+#endif
+
+ Comment cmnt(masm_, "[ Call");
+ Expression* callee = expr->expression();
+ Call::CallType call_type = expr->GetCallType(isolate());
+
+ if (call_type == Call::POSSIBLY_EVAL_CALL) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call and the receiver of the
+ // call. Then we call the resolved function using the given
+ // arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ {
+ PreservePositionScope pos_scope(masm()->positions_recorder());
+ VisitForStackValue(callee);
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ push(r5); // Reserved receiver slot.
+
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+ __ push(r4);
+ EmitResolvePossiblyDirectEval(arg_count);
+
+ // The runtime call returns a pair of values in r3 (function) and
+ // r4 (receiver). Touch up the stack with the right values.
+ __ StoreP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+ __ StoreP(r4, MemOperand(sp, arg_count * kPointerSize), r0);
+
+ PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
+ }
+
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+ __ CallStub(&stub);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, r3);
+ } else if (call_type == Call::GLOBAL_CALL) {
+ EmitCallWithLoadIC(expr);
+
+ } else if (call_type == Call::LOOKUP_SLOT_CALL) {
+ // Call to a lookup slot (dynamically introduced variable).
+ VariableProxy* proxy = callee->AsVariableProxy();
+ Label slow, done;
+
+ {
+ PreservePositionScope scope(masm()->positions_recorder());
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
+ }
+
+ __ bind(&slow);
+ // Call the runtime to find the function to call (returned in r3)
+ // and the object holding it (returned in edx).
+ DCHECK(!context_register().is(r5));
+ __ mov(r5, Operand(proxy->name()));
+ __ Push(context_register(), r5);
+ __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ __ Push(r3, r4); // Function, receiver.
+ PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
+
+ // If fast case code has been generated, emit code to push the
+ // function and receiver and have the slow path jump around this
+ // code.
+ if (done.is_linked()) {
+ Label call;
+ __ b(&call);
+ __ bind(&done);
+ // Push function.
+ __ push(r3);
+ // The receiver is implicitly the global receiver. Indicate this
+ // by passing the hole to the call function stub.
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ push(r4);
+ __ bind(&call);
+ }
+
+ // The receiver is either the global receiver or an object found
+ // by LoadContextSlot.
+ EmitCall(expr);
+ } else if (call_type == Call::PROPERTY_CALL) {
+ Property* property = callee->AsProperty();
+ bool is_named_call = property->key()->IsPropertyName();
+ if (property->IsSuperAccess()) {
+ if (is_named_call) {
+ EmitSuperCallWithLoadIC(expr);
+ } else {
+ EmitKeyedSuperCallWithLoadIC(expr);
+ }
+ } else {
+ {
+ PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(property->obj());
+ }
+ if (is_named_call) {
+ EmitCallWithLoadIC(expr);
+ } else {
+ EmitKeyedCallWithLoadIC(expr, property->key());
+ }
+ }
+ } else if (call_type == Call::SUPER_CALL) {
+ SuperReference* super_ref = callee->AsSuperReference();
+ EmitLoadSuperConstructor(super_ref);
+ __ Push(result_register());
+ VisitForStackValue(super_ref->this_var());
+ EmitCall(expr, CallICState::METHOD);
+ } else {
+ DCHECK(call_type == Call::OTHER_CALL);
+ // Call to an arbitrary expression not handled specially above.
+ {
+ PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(callee);
+ }
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ push(r4);
+ // Emit function call.
+ EmitCall(expr);
+ }
+
+#ifdef DEBUG
+ // RecordJSReturnSite should have been called.
+ DCHECK(expr->return_is_recorded_);
+#endif
+}
+
+
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
+ Comment cmnt(masm_, "[ CallNew");
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments.
+
+ // Push constructor on the stack. If it's not a function it's used as
+ // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+ // ignored.
+ if (expr->expression()->IsSuperReference()) {
+ EmitLoadSuperConstructor(expr->expression()->AsSuperReference());
+ __ Push(result_register());
+ } else {
+ VisitForStackValue(expr->expression());
+ }
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function and argument count into r4 and r3.
+ __ mov(r3, Operand(arg_count));
+ __ LoadP(r4, MemOperand(sp, arg_count * kPointerSize), r0);
+
+ // Record call targets in unoptimized code.
+ if (FLAG_pretenuring_call_new) {
+ EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
+ DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
+ expr->CallNewFeedbackSlot().ToInt() + 1);
+ }
+
+ __ Move(r5, FeedbackVector());
+ __ LoadSmiLiteral(r6, SmiFromSlot(expr->CallNewFeedbackSlot()));
+
+ CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
+ context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ TestIfSmi(r3, r0);
+ Split(eq, if_true, if_false, fall_through, cr0);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ TestIfPositiveSmi(r3, r0);
+ Split(eq, if_true, if_false, fall_through, cr0);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(r3, if_false);
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ cmp(r3, ip);
+ __ beq(if_true);
+ __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined when tested with typeof.
+ __ lbz(r4, FieldMemOperand(r5, Map::kBitFieldOffset));
+ __ andi(r0, r4, Operand(1 << Map::kIsUndetectable));
+ __ bne(if_false, cr0);
+ __ lbz(r4, FieldMemOperand(r5, Map::kInstanceTypeOffset));
+ __ cmpi(r4, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ blt(if_false);
+ __ cmpi(r4, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(le, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(r3, if_false);
+ __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(ge, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(r3, if_false);
+ __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ lbz(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
+ __ andi(r0, r4, Operand(1 << Map::kIsUndetectable));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(ne, if_true, if_false, fall_through, cr0);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
+ CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false, skip_lookup;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ AssertNotSmi(r3);
+
+ __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ lbz(ip, FieldMemOperand(r4, Map::kBitField2Offset));
+ __ andi(r0, ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ bne(&skip_lookup, cr0);
+
+ // Check for fast case object. Generate false result for slow case object.
+ __ LoadP(r5, FieldMemOperand(r3, JSObject::kPropertiesOffset));
+ __ LoadP(r5, FieldMemOperand(r5, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+ __ cmp(r5, ip);
+ __ beq(if_false);
+
+ // Look for valueOf name in the descriptor array, and indicate false if
+ // found. Since we omit an enumeration index check, if it is added via a
+ // transition that shares its descriptor array, this is a false positive.
+ Label entry, loop, done;
+
+ // Skip loop if no descriptors are valid.
+ __ NumberOfOwnDescriptors(r6, r4);
+ __ cmpi(r6, Operand::Zero());
+ __ beq(&done);
+
+ __ LoadInstanceDescriptors(r4, r7);
+ // r7: descriptor array.
+ // r6: valid entries in the descriptor array.
+ __ mov(ip, Operand(DescriptorArray::kDescriptorSize));
+ __ Mul(r6, r6, ip);
+ // Calculate location of the first key name.
+ __ addi(r7, r7, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag));
+ // Calculate the end of the descriptor array.
+ __ mr(r5, r7);
+ __ ShiftLeftImm(ip, r6, Operand(kPointerSizeLog2));
+ __ add(r5, r5, ip);
+
+ // Loop through all the keys in the descriptor array. If one of these is the
+ // string "valueOf" the result is false.
+ // The use of ip to store the valueOf string assumes that it is not otherwise
+ // used in the loop below.
+ __ mov(ip, Operand(isolate()->factory()->value_of_string()));
+ __ b(&entry);
+ __ bind(&loop);
+ __ LoadP(r6, MemOperand(r7, 0));
+ __ cmp(r6, ip);
+ __ beq(if_false);
+ __ addi(r7, r7, Operand(DescriptorArray::kDescriptorSize * kPointerSize));
+ __ bind(&entry);
+ __ cmp(r7, r5);
+ __ bne(&loop);
+
+ __ bind(&done);
+
+ // Set the bit in the map to indicate that there is no local valueOf field.
+ __ lbz(r5, FieldMemOperand(r4, Map::kBitField2Offset));
+ __ ori(r5, r5, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ stb(r5, FieldMemOperand(r4, Map::kBitField2Offset));
+
+ __ bind(&skip_lookup);
+
+ // If a valueOf property is not found on the object check that its
+ // prototype is the un-modified String prototype. If not result is false.
+ __ LoadP(r5, FieldMemOperand(r4, Map::kPrototypeOffset));
+ __ JumpIfSmi(r5, if_false);
+ __ LoadP(r5, FieldMemOperand(r5, HeapObject::kMapOffset));
+ __ LoadP(r6, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ LoadP(r6, FieldMemOperand(r6, GlobalObject::kNativeContextOffset));
+ __ LoadP(r6,
+ ContextOperand(r6, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+ __ cmp(r5, r6);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(r3, if_false);
+ __ CompareObjectType(r3, r4, r5, JS_FUNCTION_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ CheckMap(r3, r4, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
+#if V8_TARGET_ARCH_PPC64
+ __ LoadP(r4, FieldMemOperand(r3, HeapNumber::kValueOffset));
+ __ li(r5, Operand(1));
+ __ rotrdi(r5, r5, 1); // r5 = 0x80000000_00000000
+ __ cmp(r4, r5);
+#else
+ __ lwz(r5, FieldMemOperand(r3, HeapNumber::kExponentOffset));
+ __ lwz(r4, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
+ Label skip;
+ __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
+ __ cmp(r5, r0);
+ __ bne(&skip);
+ __ cmpi(r4, Operand::Zero());
+ __ bind(&skip);
+#endif
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(r3, if_false);
+ __ CompareObjectType(r3, r4, r4, JS_ARRAY_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(r3, if_false);
+ __ CompareObjectType(r3, r4, r4, JS_REGEXP_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(r3, if_false);
+ Register map = r4;
+ Register type_reg = r5;
+ __ LoadP(map, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ lbz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ subi(type_reg, type_reg, Operand(FIRST_JS_PROXY_TYPE));
+ __ cmpli(type_reg, Operand(LAST_JS_PROXY_TYPE - FIRST_JS_PROXY_TYPE));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(le, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+ DCHECK(expr->arguments()->length() == 0);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ // Get the frame pointer for the calling frame.
+ __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ LoadP(r4, MemOperand(r5, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(r4, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ bne(&check_frame_marker);
+ __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ bind(&check_frame_marker);
+ __ LoadP(r4, MemOperand(r5, StandardFrameConstants::kMarkerOffset));
+ STATIC_ASSERT(StackFrame::CONSTRUCT < 0x4000);
+ __ CmpSmiLiteral(r4, Smi::FromInt(StackFrame::CONSTRUCT), r0);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 2);
+
+ // Load the two objects into registers and perform the comparison.
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ pop(r4);
+ __ cmp(r3, r4);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ // ArgumentsAccessStub expects the key in edx and the formal
+ // parameter count in r3.
+ VisitForAccumulatorValue(args->at(0));
+ __ mr(r4, r3);
+ __ LoadSmiLiteral(r3, Smi::FromInt(info_->scope()->num_parameters()));
+ ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
+ __ CallStub(&stub);
+ context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+ DCHECK(expr->arguments()->length() == 0);
+ Label exit;
+ // Get the number of formal parameters.
+ __ LoadSmiLiteral(r3, Smi::FromInt(info_->scope()->num_parameters()));
+
+ // Check if the calling frame is an arguments adaptor frame.
+ __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ bne(&exit);
+
+ // Arguments adaptor case: Read the arguments length from the
+ // adaptor frame.
+ __ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ __ bind(&exit);
+ context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+ Label done, null, function, non_function_constructor;
+
+ VisitForAccumulatorValue(args->at(0));
+
+ // If the object is a smi, we return null.
+ __ JumpIfSmi(r3, &null);
+
+ // Check that the object is a JS object but take special care of JS
+ // functions to make sure they have 'Function' as their class.
+ // Assume that there are only two callable types, and one of them is at
+ // either end of the type range for JS object types. Saves extra comparisons.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ CompareObjectType(r3, r3, r4, FIRST_SPEC_OBJECT_TYPE);
+ // Map is now in r3.
+ __ blt(&null);
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ __ beq(&function);
+
+ __ cmpi(r4, Operand(LAST_SPEC_OBJECT_TYPE));
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_SPEC_OBJECT_TYPE - 1);
+ __ beq(&function);
+ // Assume that there is no larger type.
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+
+ // Check if the constructor in the map is a JS function.
+ __ LoadP(r3, FieldMemOperand(r3, Map::kConstructorOffset));
+ __ CompareObjectType(r3, r4, r4, JS_FUNCTION_TYPE);
+ __ bne(&non_function_constructor);
+
+ // r3 now contains the constructor function. Grab the
+ // instance class name from there.
+ __ LoadP(r3, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r3,
+ FieldMemOperand(r3, SharedFunctionInfo::kInstanceClassNameOffset));
+ __ b(&done);
+
+ // Functions have class 'Function'.
+ __ bind(&function);
+ __ LoadRoot(r3, Heap::kFunction_stringRootIndex);
+ __ b(&done);
+
+ // Objects with a non-function constructor have class 'Object'.
+ __ bind(&non_function_constructor);
+ __ LoadRoot(r3, Heap::kObject_stringRootIndex);
+ __ b(&done);
+
+ // Non-JS objects have class null.
+ __ bind(&null);
+ __ LoadRoot(r3, Heap::kNullValueRootIndex);
+
+ // All done.
+ __ bind(&done);
+
+ context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
+ // Load the arguments on the stack and call the stub.
+ SubStringStub stub(isolate());
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 3);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ __ CallStub(&stub);
+ context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
+ // Load the arguments on the stack and call the stub.
+ RegExpExecStub stub(isolate());
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 4);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ VisitForStackValue(args->at(3));
+ __ CallStub(&stub);
+ context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
+
+ Label done;
+ // If the object is a smi return the object.
+ __ JumpIfSmi(r3, &done);
+ // If the object is not a value type, return the object.
+ __ CompareObjectType(r3, r4, r4, JS_VALUE_TYPE);
+ __ bne(&done);
+ __ LoadP(r3, FieldMemOperand(r3, JSValue::kValueOffset));
+
+ __ bind(&done);
+ context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 2);
+ DCHECK_NE(NULL, args->at(1)->AsLiteral());
+ Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
+
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
+
+ Label runtime, done, not_date_object;
+ Register object = r3;
+ Register result = r3;
+ Register scratch0 = r11;
+ Register scratch1 = r4;
+
+ __ JumpIfSmi(object, &not_date_object);
+ __ CompareObjectType(object, scratch1, scratch1, JS_DATE_TYPE);
+ __ bne(&not_date_object);
+
+ if (index->value() == 0) {
+ __ LoadP(result, FieldMemOperand(object, JSDate::kValueOffset));
+ __ b(&done);
+ } else {
+ if (index->value() < JSDate::kFirstUncachedField) {
+ ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+ __ mov(scratch1, Operand(stamp));
+ __ LoadP(scratch1, MemOperand(scratch1));
+ __ LoadP(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset));
+ __ cmp(scratch1, scratch0);
+ __ bne(&runtime);
+ __ LoadP(result,
+ FieldMemOperand(object, JSDate::kValueOffset +
+ kPointerSize * index->value()),
+ scratch0);
+ __ b(&done);
+ }
+ __ bind(&runtime);
+ __ PrepareCallCFunction(2, scratch1);
+ __ LoadSmiLiteral(r4, index);
+ __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+ __ b(&done);
+ }
+
+ __ bind(&not_date_object);
+ __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ bind(&done);
+ context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(3, args->length());
+
+ Register string = r3;
+ Register index = r4;
+ Register value = r5;
+
+ VisitForStackValue(args->at(0)); // index
+ VisitForStackValue(args->at(1)); // value
+ VisitForAccumulatorValue(args->at(2)); // string
+ __ Pop(index, value);
+
+ if (FLAG_debug_code) {
+ __ TestIfSmi(value, r0);
+ __ Check(eq, kNonSmiValue, cr0);
+ __ TestIfSmi(index, r0);
+ __ Check(eq, kNonSmiIndex, cr0);
+ __ SmiUntag(index, index);
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ __ SmiTag(index, index);
+ }
+
+ __ SmiUntag(value);
+ __ addi(ip, string, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ SmiToByteArrayOffset(r0, index);
+ __ stbx(value, MemOperand(ip, r0));
+ context()->Plug(string);
+}
+
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(3, args->length());
+
+ Register string = r3;
+ Register index = r4;
+ Register value = r5;
+
+ VisitForStackValue(args->at(0)); // index
+ VisitForStackValue(args->at(1)); // value
+ VisitForAccumulatorValue(args->at(2)); // string
+ __ Pop(index, value);
+
+ if (FLAG_debug_code) {
+ __ TestIfSmi(value, r0);
+ __ Check(eq, kNonSmiValue, cr0);
+ __ TestIfSmi(index, r0);
+ __ Check(eq, kNonSmiIndex, cr0);
+ __ SmiUntag(index, index);
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ __ SmiTag(index, index);
+ }
+
+ __ SmiUntag(value);
+ __ addi(ip, string, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ SmiToShortArrayOffset(r0, index);
+ __ sthx(value, MemOperand(ip, r0));
+ context()->Plug(string);
+}
+
+
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
+ // Load the arguments on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 2);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ MathPowStub stub(isolate(), MathPowStub::ON_STACK);
+ __ CallStub(&stub);
+ context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 2);
+ VisitForStackValue(args->at(0)); // Load the object.
+ VisitForAccumulatorValue(args->at(1)); // Load the value.
+ __ pop(r4); // r3 = value. r4 = object.
+
+ Label done;
+ // If the object is a smi, return the value.
+ __ JumpIfSmi(r4, &done);
+
+ // If the object is not a value type, return the value.
+ __ CompareObjectType(r4, r5, r5, JS_VALUE_TYPE);
+ __ bne(&done);
+
+ // Store the value.
+ __ StoreP(r3, FieldMemOperand(r4, JSValue::kValueOffset), r0);
+ // Update the write barrier. Save the value as it will be
+ // overwritten by the write barrier code and is needed afterward.
+ __ mr(r5, r3);
+ __ RecordWriteField(r4, JSValue::kValueOffset, r5, r6, kLRHasBeenSaved,
+ kDontSaveFPRegs);
+
+ __ bind(&done);
+ context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(args->length(), 1);
+ // Load the argument into r3 and call the stub.
+ VisitForAccumulatorValue(args->at(0));
+
+ NumberToStringStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0));
+
+ Label done;
+ StringCharFromCodeGenerator generator(r3, r4);
+ generator.GenerateFast(masm_);
+ __ b(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ context()->Plug(r4);
+}
+
+
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 2);
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Register object = r4;
+ Register index = r3;
+ Register result = r6;
+
+ __ pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharCodeAtGenerator generator(object, index, result, &need_conversion,
+ &need_conversion, &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ b(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // NaN.
+ __ LoadRoot(result, Heap::kNanValueRootIndex);
+ __ b(&done);
+
+ __ bind(&need_conversion);
+ // Load the undefined value into the result register, which will
+ // trigger conversion.
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ b(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 2);
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Register object = r4;
+ Register index = r3;
+ Register scratch = r6;
+ Register result = r3;
+
+ __ pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharAtGenerator generator(object, index, scratch, result,
+ &need_conversion, &need_conversion,
+ &index_out_of_range, STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ b(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ LoadRoot(result, Heap::kempty_stringRootIndex);
+ __ b(&done);
+
+ __ bind(&need_conversion);
+ // Move smi zero into the result register, which will trigger
+ // conversion.
+ __ LoadSmiLiteral(result, Smi::FromInt(0));
+ __ b(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(2, args->length());
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ __ pop(r4);
+ StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
+ __ CallStub(&stub);
+ context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(2, args->length());
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ StringCompareStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() >= 2);
+
+ int arg_count = args->length() - 2; // 2 ~ receiver and function.
+ for (int i = 0; i < arg_count + 1; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ VisitForAccumulatorValue(args->last()); // Function.
+
+ Label runtime, done;
+ // Check for non-function argument (including proxy).
+ __ JumpIfSmi(r3, &runtime);
+ __ CompareObjectType(r3, r4, r4, JS_FUNCTION_TYPE);
+ __ bne(&runtime);
+
+ // InvokeFunction requires the function in r4. Move it in there.
+ __ mr(r4, result_register());
+ ParameterCount count(arg_count);
+ __ InvokeFunction(r4, count, CALL_FUNCTION, NullCallWrapper());
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ b(&done);
+
+ __ bind(&runtime);
+ __ push(r3);
+ __ CallRuntime(Runtime::kCall, args->length());
+ __ bind(&done);
+
+ context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
+ RegExpConstructResultStub stub(isolate());
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 3);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForAccumulatorValue(args->at(2));
+ __ Pop(r5, r4);
+ __ CallStub(&stub);
+ context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(2, args->length());
+ DCHECK_NE(NULL, args->at(0)->AsLiteral());
+ int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
+
+ Handle<FixedArray> jsfunction_result_caches(
+ isolate()->native_context()->jsfunction_result_caches());
+ if (jsfunction_result_caches->length() <= cache_id) {
+ __ Abort(kAttemptToUseUndefinedCache);
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ context()->Plug(r3);
+ return;
+ }
+
+ VisitForAccumulatorValue(args->at(1));
+
+ Register key = r3;
+ Register cache = r4;
+ __ LoadP(cache, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ LoadP(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset));
+ __ LoadP(cache,
+ ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
+ __ LoadP(cache,
+ FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)), r0);
+
+ Label done, not_found;
+ __ LoadP(r5, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
+ // r5 now holds finger offset as a smi.
+ __ addi(r6, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ // r6 now points to the start of fixed array elements.
+ __ SmiToPtrArrayOffset(r5, r5);
+ __ LoadPUX(r5, MemOperand(r6, r5));
+ // r6 now points to the key of the pair.
+ __ cmp(key, r5);
+ __ bne(&not_found);
+
+ __ LoadP(r3, MemOperand(r6, kPointerSize));
+ __ b(&done);
+
+ __ bind(&not_found);
+ // Call runtime to perform the lookup.
+ __ Push(cache, key);
+ __ CallRuntime(Runtime::kGetFromCache, 2);
+
+ __ bind(&done);
+ context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ lwz(r3, FieldMemOperand(r3, String::kHashFieldOffset));
+ // PPC - assume ip is free
+ __ mov(ip, Operand(String::kContainsCachedArrayIndexMask));
+ __ and_(r0, r3, ip);
+ __ cmpi(r0, Operand::Zero());
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0));
+
+ __ AssertString(r3);
+
+ __ lwz(r3, FieldMemOperand(r3, String::kHashFieldOffset));
+ __ IndexFromHash(r3, r3);
+
+ context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
+ Label bailout, done, one_char_separator, long_separator, non_trivial_array,
+ not_size_one_array, loop, empty_separator_loop, one_char_separator_loop,
+ one_char_separator_loop_entry, long_separator_loop;
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 2);
+ VisitForStackValue(args->at(1));
+ VisitForAccumulatorValue(args->at(0));
+
+ // All aliases of the same register have disjoint lifetimes.
+ Register array = r3;
+ Register elements = no_reg; // Will be r3.
+ Register result = no_reg; // Will be r3.
+ Register separator = r4;
+ Register array_length = r5;
+ Register result_pos = no_reg; // Will be r5
+ Register string_length = r6;
+ Register string = r7;
+ Register element = r8;
+ Register elements_end = r9;
+ Register scratch1 = r10;
+ Register scratch2 = r11;
+
+ // Separator operand is on the stack.
+ __ pop(separator);
+
+ // Check that the array is a JSArray.
+ __ JumpIfSmi(array, &bailout);
+ __ CompareObjectType(array, scratch1, scratch2, JS_ARRAY_TYPE);
+ __ bne(&bailout);
+
+ // Check that the array has fast elements.
+ __ CheckFastElements(scratch1, scratch2, &bailout);
+
+ // If the array has length zero, return the empty string.
+ __ LoadP(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
+ __ SmiUntag(array_length);
+ __ cmpi(array_length, Operand::Zero());
+ __ bne(&non_trivial_array);
+ __ LoadRoot(r3, Heap::kempty_stringRootIndex);
+ __ b(&done);
+
+ __ bind(&non_trivial_array);
+
+ // Get the FixedArray containing array's elements.
+ elements = array;
+ __ LoadP(elements, FieldMemOperand(array, JSArray::kElementsOffset));
+ array = no_reg; // End of array's live range.
+
+ // Check that all array elements are sequential one-byte strings, and
+ // accumulate the sum of their lengths, as a smi-encoded value.
+ __ li(string_length, Operand::Zero());
+ __ addi(element, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ ShiftLeftImm(elements_end, array_length, Operand(kPointerSizeLog2));
+ __ add(elements_end, element, elements_end);
+ // Loop condition: while (element < elements_end).
+ // Live values in registers:
+ // elements: Fixed array of strings.
+ // array_length: Length of the fixed array of strings (not smi)
+ // separator: Separator string
+ // string_length: Accumulated sum of string lengths (smi).
+ // element: Current array element.
+ // elements_end: Array end.
+ if (generate_debug_code_) {
+ __ cmpi(array_length, Operand::Zero());
+ __ Assert(gt, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
+ }
+ __ bind(&loop);
+ __ LoadP(string, MemOperand(element));
+ __ addi(element, element, Operand(kPointerSize));
+ __ JumpIfSmi(string, &bailout);
+ __ LoadP(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ lbz(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
+ __ LoadP(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
+
+ __ AddAndCheckForOverflow(string_length, string_length, scratch1, scratch2,
+ r0);
+ __ BranchOnOverflow(&bailout);
+
+ __ cmp(element, elements_end);
+ __ blt(&loop);
+
+ // If array_length is 1, return elements[0], a string.
+ __ cmpi(array_length, Operand(1));
+ __ bne(&not_size_one_array);
+ __ LoadP(r3, FieldMemOperand(elements, FixedArray::kHeaderSize));
+ __ b(&done);
+
+ __ bind(&not_size_one_array);
+
+ // Live values in registers:
+ // separator: Separator string
+ // array_length: Length of the array.
+ // string_length: Sum of string lengths (smi).
+ // elements: FixedArray of strings.
+
+ // Check that the separator is a flat one-byte string.
+ __ JumpIfSmi(separator, &bailout);
+ __ LoadP(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
+ __ lbz(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
+
+ // Add (separator length times array_length) - separator length to the
+ // string_length to get the length of the result string.
+ __ LoadP(scratch1,
+ FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
+ __ sub(string_length, string_length, scratch1);
+#if V8_TARGET_ARCH_PPC64
+ __ SmiUntag(scratch1, scratch1);
+ __ Mul(scratch2, array_length, scratch1);
+ // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
+ // zero.
+ __ ShiftRightImm(ip, scratch2, Operand(31), SetRC);
+ __ bne(&bailout, cr0);
+ __ SmiTag(scratch2, scratch2);
+#else
+ // array_length is not smi but the other values are, so the result is a smi
+ __ mullw(scratch2, array_length, scratch1);
+ __ mulhw(ip, array_length, scratch1);
+ // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
+ // zero.
+ __ cmpi(ip, Operand::Zero());
+ __ bne(&bailout);
+ __ cmpwi(scratch2, Operand::Zero());
+ __ blt(&bailout);
+#endif
+
+ __ AddAndCheckForOverflow(string_length, string_length, scratch2, scratch1,
+ r0);
+ __ BranchOnOverflow(&bailout);
+ __ SmiUntag(string_length);
+
+ // Get first element in the array to free up the elements register to be used
+ // for the result.
+ __ addi(element, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ result = elements; // End of live range for elements.
+ elements = no_reg;
+ // Live values in registers:
+ // element: First array element
+ // separator: Separator string
+ // string_length: Length of result string (not smi)
+ // array_length: Length of the array.
+ __ AllocateOneByteString(result, string_length, scratch1, scratch2,
+ elements_end, &bailout);
+ // Prepare for looping. Set up elements_end to end of the array. Set
+ // result_pos to the position of the result where to write the first
+ // character.
+ __ ShiftLeftImm(elements_end, array_length, Operand(kPointerSizeLog2));
+ __ add(elements_end, element, elements_end);
+ result_pos = array_length; // End of live range for array_length.
+ array_length = no_reg;
+ __ addi(result_pos, result,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+
+ // Check the length of the separator.
+ __ LoadP(scratch1,
+ FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
+ __ CmpSmiLiteral(scratch1, Smi::FromInt(1), r0);
+ __ beq(&one_char_separator);
+ __ bgt(&long_separator);
+
+ // Empty separator case
+ __ bind(&empty_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+
+ // Copy next array element to the result.
+ __ LoadP(string, MemOperand(element));
+ __ addi(element, element, Operand(kPointerSize));
+ __ LoadP(string_length, FieldMemOperand(string, String::kLengthOffset));
+ __ SmiUntag(string_length);
+ __ addi(string, string,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ cmp(element, elements_end);
+ __ blt(&empty_separator_loop); // End while (element < elements_end).
+ DCHECK(result.is(r3));
+ __ b(&done);
+
+ // One-character separator case
+ __ bind(&one_char_separator);
+ // Replace separator with its one-byte character value.
+ __ lbz(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
+ // Jump into the loop after the code that copies the separator, so the first
+ // element is not preceded by a separator
+ __ b(&one_char_separator_loop_entry);
+
+ __ bind(&one_char_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+ // separator: Single separator one-byte char (in lower byte).
+
+ // Copy the separator character to the result.
+ __ stb(separator, MemOperand(result_pos));
+ __ addi(result_pos, result_pos, Operand(1));
+
+ // Copy next array element to the result.
+ __ bind(&one_char_separator_loop_entry);
+ __ LoadP(string, MemOperand(element));
+ __ addi(element, element, Operand(kPointerSize));
+ __ LoadP(string_length, FieldMemOperand(string, String::kLengthOffset));
+ __ SmiUntag(string_length);
+ __ addi(string, string,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ cmpl(element, elements_end);
+ __ blt(&one_char_separator_loop); // End while (element < elements_end).
+ DCHECK(result.is(r3));
+ __ b(&done);
+
+ // Long separator case (separator is more than one character). Entry is at the
+ // label long_separator below.
+ __ bind(&long_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+ // separator: Separator string.
+
+ // Copy the separator to the result.
+ __ LoadP(string_length, FieldMemOperand(separator, String::kLengthOffset));
+ __ SmiUntag(string_length);
+ __ addi(string, separator,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+
+ __ bind(&long_separator);
+ __ LoadP(string, MemOperand(element));
+ __ addi(element, element, Operand(kPointerSize));
+ __ LoadP(string_length, FieldMemOperand(string, String::kLengthOffset));
+ __ SmiUntag(string_length);
+ __ addi(string, string,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ cmpl(element, elements_end);
+ __ blt(&long_separator_loop); // End while (element < elements_end).
+ DCHECK(result.is(r3));
+ __ b(&done);
+
+ __ bind(&bailout);
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ bind(&done);
+ context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
+ DCHECK(expr->arguments()->length() == 0);
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(isolate());
+ __ mov(ip, Operand(debug_is_active));
+ __ lbz(r3, MemOperand(ip));
+ __ SmiTag(r3);
+ context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+ if (expr->function() != NULL &&
+ expr->function()->intrinsic_type == Runtime::INLINE) {
+ Comment cmnt(masm_, "[ InlineRuntimeCall");
+ EmitInlineRuntimeCall(expr);
+ return;
+ }
+
+ Comment cmnt(masm_, "[ CallRuntime");
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ if (expr->is_jsruntime()) {
+ // Push the builtins object as the receiver.
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ __ LoadP(receiver, GlobalObjectOperand());
+ __ LoadP(receiver,
+ FieldMemOperand(receiver, GlobalObject::kBuiltinsOffset));
+ __ push(receiver);
+
+ // Load the function from the receiver.
+ __ mov(LoadDescriptor::NameRegister(), Operand(expr->name()));
+ if (FLAG_vector_ics) {
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
+ CallLoadIC(NOT_CONTEXTUAL);
+ } else {
+ CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+ }
+
+ // Push the target function under the receiver.
+ __ LoadP(ip, MemOperand(sp, 0));
+ __ push(ip);
+ __ StoreP(r3, MemOperand(sp, kPointerSize));
+
+ // Push the arguments ("left-to-right").
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+ __ CallStub(&stub);
+
+ // Restore context register.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ context()->DropAndPlug(1, r3);
+ } else {
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the C runtime function.
+ __ CallRuntime(expr->function(), arg_count);
+ context()->Plug(r3);
+ }
+}
+
+
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::DELETE: {
+ Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
+ Property* property = expr->expression()->AsProperty();
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+
+ if (property != NULL) {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ __ LoadSmiLiteral(r4, Smi::FromInt(strict_mode()));
+ __ push(r4);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ context()->Plug(r3);
+ } else if (proxy != NULL) {
+ Variable* var = proxy->var();
+ // Delete of an unqualified identifier is disallowed in strict mode
+ // but "delete this" is allowed.
+ DCHECK(strict_mode() == SLOPPY || var->is_this());
+ if (var->IsUnallocated()) {
+ __ LoadP(r5, GlobalObjectOperand());
+ __ mov(r4, Operand(var->name()));
+ __ LoadSmiLiteral(r3, Smi::FromInt(SLOPPY));
+ __ Push(r5, r4, r3);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ context()->Plug(r3);
+ } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+ // Result of deleting non-global, non-dynamic variables is false.
+ // The subexpression does not have side effects.
+ context()->Plug(var->is_this());
+ } else {
+ // Non-global variable. Call the runtime to try to delete from the
+ // context where the variable was introduced.
+ DCHECK(!context_register().is(r5));
+ __ mov(r5, Operand(var->name()));
+ __ Push(context_register(), r5);
+ __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
+ context()->Plug(r3);
+ }
+ } else {
+ // Result of deleting non-property, non-variable reference is true.
+ // The subexpression may have side effects.
+ VisitForEffect(expr->expression());
+ context()->Plug(true);
+ }
+ break;
+ }
+
+ case Token::VOID: {
+ Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+ VisitForEffect(expr->expression());
+ context()->Plug(Heap::kUndefinedValueRootIndex);
+ break;
+ }
+
+ case Token::NOT: {
+ Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+ if (context()->IsEffect()) {
+ // Unary NOT has no side effects so it's only necessary to visit the
+ // subexpression. Match the optimizing compiler by not branching.
+ VisitForEffect(expr->expression());
+ } else if (context()->IsTest()) {
+ const TestContext* test = TestContext::cast(context());
+ // The labels are swapped for the recursive call.
+ VisitForControl(expr->expression(), test->false_label(),
+ test->true_label(), test->fall_through());
+ context()->Plug(test->true_label(), test->false_label());
+ } else {
+ // We handle value contexts explicitly rather than simply visiting
+ // for control and plugging the control flow into the context,
+ // because we need to prepare a pair of extra administrative AST ids
+ // for the optimizing compiler.
+ DCHECK(context()->IsAccumulatorValue() || context()->IsStackValue());
+ Label materialize_true, materialize_false, done;
+ VisitForControl(expr->expression(), &materialize_false,
+ &materialize_true, &materialize_true);
+ __ bind(&materialize_true);
+ PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ __ LoadRoot(r3, Heap::kTrueValueRootIndex);
+ if (context()->IsStackValue()) __ push(r3);
+ __ b(&done);
+ __ bind(&materialize_false);
+ PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ __ LoadRoot(r3, Heap::kFalseValueRootIndex);
+ if (context()->IsStackValue()) __ push(r3);
+ __ bind(&done);
+ }
+ break;
+ }
+
+ case Token::TYPEOF: {
+ Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+ {
+ StackValueContext context(this);
+ VisitForTypeofValue(expr->expression());
+ }
+ __ CallRuntime(Runtime::kTypeof, 1);
+ context()->Plug(r3);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ DCHECK(expr->expression()->IsValidReferenceExpression());
+
+ Comment cmnt(masm_, "[ CountOperation");
+ SetSourcePosition(expr->position());
+
+ Property* prop = expr->expression()->AsProperty();
+ LhsKind assign_type = GetAssignType(prop);
+
+ // Evaluate expression and get value.
+ if (assign_type == VARIABLE) {
+ DCHECK(expr->expression()->AsVariableProxy()->var() != NULL);
+ AccumulatorValueContext context(this);
+ EmitVariableLoad(expr->expression()->AsVariableProxy());
+ } else {
+ // Reserve space for result of postfix operation.
+ if (expr->is_postfix() && !context()->IsEffect()) {
+ __ LoadSmiLiteral(ip, Smi::FromInt(0));
+ __ push(ip);
+ }
+ switch (assign_type) {
+ case NAMED_PROPERTY: {
+ // Put the object both on the stack and in the register.
+ VisitForStackValue(prop->obj());
+ __ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ EmitNamedPropertyLoad(prop);
+ break;
+ }
+
+ case NAMED_SUPER_PROPERTY: {
+ VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ __ Push(result_register());
+ const Register scratch = r4;
+ __ LoadP(scratch, MemOperand(sp, kPointerSize));
+ __ Push(scratch, result_register());
+ EmitNamedSuperPropertyLoad(prop);
+ break;
+ }
+
+ case KEYED_SUPER_PROPERTY: {
+ VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ const Register scratch = r4;
+ const Register scratch1 = r5;
+ __ Move(scratch, result_register());
+ VisitForAccumulatorValue(prop->key());
+ __ Push(scratch, result_register());
+ __ LoadP(scratch1, MemOperand(sp, 2 * kPointerSize));
+ __ Push(scratch1, scratch, result_register());
+ EmitKeyedSuperPropertyLoad(prop);
+ break;
+ }
+
+ case KEYED_PROPERTY: {
+ VisitForStackValue(prop->obj());
+ VisitForStackValue(prop->key());
+ __ LoadP(LoadDescriptor::ReceiverRegister(),
+ MemOperand(sp, 1 * kPointerSize));
+ __ LoadP(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
+ EmitKeyedPropertyLoad(prop);
+ break;
+ }
+
+ case VARIABLE:
+ UNREACHABLE();
+ }
+ }
+
+ // We need a second deoptimization point after loading the value
+ // in case evaluating the property load my have a side effect.
+ if (assign_type == VARIABLE) {
+ PrepareForBailout(expr->expression(), TOS_REG);
+ } else {
+ PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+ }
+
+ // Inline smi case if we are in a loop.
+ Label stub_call, done;
+ JumpPatchSite patch_site(masm_);
+
+ int count_value = expr->op() == Token::INC ? 1 : -1;
+ if (ShouldInlineSmiCase(expr->op())) {
+ Label slow;
+ patch_site.EmitJumpIfNotSmi(r3, &slow);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(r3);
+ break;
+ case NAMED_PROPERTY:
+ __ StoreP(r3, MemOperand(sp, kPointerSize));
+ break;
+ case NAMED_SUPER_PROPERTY:
+ __ StoreP(r3, MemOperand(sp, 2 * kPointerSize));
+ break;
+ case KEYED_PROPERTY:
+ __ StoreP(r3, MemOperand(sp, 2 * kPointerSize));
+ break;
+ case KEYED_SUPER_PROPERTY:
+ __ StoreP(r3, MemOperand(sp, 3 * kPointerSize));
+ break;
+ }
+ }
+ }
+
+ Register scratch1 = r4;
+ Register scratch2 = r5;
+ __ LoadSmiLiteral(scratch1, Smi::FromInt(count_value));
+ __ AddAndCheckForOverflow(r3, r3, scratch1, scratch2, r0);
+ __ BranchOnNoOverflow(&done);
+ // Call stub. Undo operation first.
+ __ sub(r3, r3, scratch1);
+ __ b(&stub_call);
+ __ bind(&slow);
+ }
+ ToNumberStub convert_stub(isolate());
+ __ CallStub(&convert_stub);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(r3);
+ break;
+ case NAMED_PROPERTY:
+ __ StoreP(r3, MemOperand(sp, kPointerSize));
+ break;
+ case NAMED_SUPER_PROPERTY:
+ __ StoreP(r3, MemOperand(sp, 2 * kPointerSize));
+ break;
+ case KEYED_PROPERTY:
+ __ StoreP(r3, MemOperand(sp, 2 * kPointerSize));
+ break;
+ case KEYED_SUPER_PROPERTY:
+ __ StoreP(r3, MemOperand(sp, 3 * kPointerSize));
+ break;
+ }
+ }
+ }
+
+ __ bind(&stub_call);
+ __ mr(r4, r3);
+ __ LoadSmiLiteral(r3, Smi::FromInt(count_value));
+
+ // Record position before stub call.
+ SetSourcePosition(expr->position());
+
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), Token::ADD, NO_OVERWRITE).code();
+ CallIC(code, expr->CountBinOpFeedbackId());
+ patch_site.EmitPatchInfo();
+ __ bind(&done);
+
+ // Store the value returned in r3.
+ switch (assign_type) {
+ case VARIABLE:
+ if (expr->is_postfix()) {
+ {
+ EffectContext context(this);
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context.Plug(r3);
+ }
+ // For all contexts except EffectConstant We have the result on
+ // top of the stack.
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(r3);
+ }
+ break;
+ case NAMED_PROPERTY: {
+ __ mov(StoreDescriptor::NameRegister(),
+ Operand(prop->key()->AsLiteral()->value()));
+ __ pop(StoreDescriptor::ReceiverRegister());
+ CallStoreIC(expr->CountStoreFeedbackId());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(r3);
+ }
+ break;
+ }
+ case NAMED_SUPER_PROPERTY: {
+ EmitNamedSuperPropertyStore(prop);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(r3);
+ }
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ EmitKeyedSuperPropertyStore(prop);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(r3);
+ }
+ break;
+ }
+ case KEYED_PROPERTY: {
+ __ Pop(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+ CallIC(ic, expr->CountStoreFeedbackId());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(r3);
+ }
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
+ DCHECK(!context()->IsEffect());
+ DCHECK(!context()->IsTest());
+ VariableProxy* proxy = expr->AsVariableProxy();
+ if (proxy != NULL && proxy->var()->IsUnallocated()) {
+ Comment cmnt(masm_, "[ Global variable");
+ __ LoadP(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::NameRegister(), Operand(proxy->name()));
+ if (FLAG_vector_ics) {
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ }
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ CallLoadIC(NOT_CONTEXTUAL);
+ PrepareForBailout(expr, TOS_REG);
+ context()->Plug(r3);
+ } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Comment cmnt(masm_, "[ Lookup slot");
+ Label done, slow;
+
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLookupFastCase(proxy, INSIDE_TYPEOF, &slow, &done);
+
+ __ bind(&slow);
+ __ mov(r3, Operand(proxy->name()));
+ __ Push(cp, r3);
+ __ CallRuntime(Runtime::kLoadLookupSlotNoReferenceError, 2);
+ PrepareForBailout(expr, TOS_REG);
+ __ bind(&done);
+
+ context()->Plug(r3);
+ } else {
+ // This expression cannot throw a reference error at the top level.
+ VisitInDuplicateContext(expr);
+ }
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+ Expression* sub_expr,
+ Handle<String> check) {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ {
+ AccumulatorValueContext context(this);
+ VisitForTypeofValue(sub_expr);
+ }
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+
+ Factory* factory = isolate()->factory();
+ if (String::Equals(check, factory->number_string())) {
+ __ JumpIfSmi(r3, if_true);
+ __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r3, ip);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->string_string())) {
+ __ JumpIfSmi(r3, if_false);
+ // Check for undetectable objects => false.
+ __ CompareObjectType(r3, r3, r4, FIRST_NONSTRING_TYPE);
+ __ bge(if_false);
+ __ lbz(r4, FieldMemOperand(r3, Map::kBitFieldOffset));
+ STATIC_ASSERT((1 << Map::kIsUndetectable) < 0x8000);
+ __ andi(r0, r4, Operand(1 << Map::kIsUndetectable));
+ Split(eq, if_true, if_false, fall_through, cr0);
+ } else if (String::Equals(check, factory->symbol_string())) {
+ __ JumpIfSmi(r3, if_false);
+ __ CompareObjectType(r3, r3, r4, SYMBOL_TYPE);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->boolean_string())) {
+ __ CompareRoot(r3, Heap::kTrueValueRootIndex);
+ __ beq(if_true);
+ __ CompareRoot(r3, Heap::kFalseValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->undefined_string())) {
+ __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ beq(if_true);
+ __ JumpIfSmi(r3, if_false);
+ // Check for undetectable objects => true.
+ __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ lbz(r4, FieldMemOperand(r3, Map::kBitFieldOffset));
+ __ andi(r0, r4, Operand(1 << Map::kIsUndetectable));
+ Split(ne, if_true, if_false, fall_through, cr0);
+
+ } else if (String::Equals(check, factory->function_string())) {
+ __ JumpIfSmi(r3, if_false);
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ CompareObjectType(r3, r3, r4, JS_FUNCTION_TYPE);
+ __ beq(if_true);
+ __ cmpi(r4, Operand(JS_FUNCTION_PROXY_TYPE));
+ Split(eq, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->object_string())) {
+ __ JumpIfSmi(r3, if_false);
+ __ CompareRoot(r3, Heap::kNullValueRootIndex);
+ __ beq(if_true);
+ // Check for JS objects => true.
+ __ CompareObjectType(r3, r3, r4, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ blt(if_false);
+ __ CompareInstanceType(r3, r4, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ bgt(if_false);
+ // Check for undetectable objects => false.
+ __ lbz(r4, FieldMemOperand(r3, Map::kBitFieldOffset));
+ __ andi(r0, r4, Operand(1 << Map::kIsUndetectable));
+ Split(eq, if_true, if_false, fall_through, cr0);
+ } else {
+ if (if_false != fall_through) __ b(if_false);
+ }
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+ Comment cmnt(masm_, "[ CompareOperation");
+ SetSourcePosition(expr->position());
+
+ // First we try a fast inlined version of the compare when one of
+ // the operands is a literal.
+ if (TryLiteralCompare(expr)) return;
+
+ // Always perform the comparison for its control flow. Pack the result
+ // into the expression's context after the comparison is performed.
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ Token::Value op = expr->op();
+ VisitForStackValue(expr->left());
+ switch (op) {
+ case Token::IN:
+ VisitForStackValue(expr->right());
+ __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(r3, ip);
+ Split(eq, if_true, if_false, fall_through);
+ break;
+
+ case Token::INSTANCEOF: {
+ VisitForStackValue(expr->right());
+ InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
+ __ CallStub(&stub);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ // The stub returns 0 for true.
+ __ cmpi(r3, Operand::Zero());
+ Split(eq, if_true, if_false, fall_through);
+ break;
+ }
+
+ default: {
+ VisitForAccumulatorValue(expr->right());
+ Condition cond = CompareIC::ComputeCondition(op);
+ __ pop(r4);
+
+ bool inline_smi_code = ShouldInlineSmiCase(op);
+ JumpPatchSite patch_site(masm_);
+ if (inline_smi_code) {
+ Label slow_case;
+ __ orx(r5, r3, r4);
+ patch_site.EmitJumpIfNotSmi(r5, &slow_case);
+ __ cmp(r4, r3);
+ Split(cond, if_true, if_false, NULL);
+ __ bind(&slow_case);
+ }
+
+ // Record position and call the compare IC.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ CallIC(ic, expr->CompareOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ cmpi(r3, Operand::Zero());
+ Split(cond, if_true, if_false, fall_through);
+ }
+ }
+
+ // Convert the result of the comparison into one expected for this
+ // expression's context.
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+ Expression* sub_expr,
+ NilValue nil) {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ VisitForAccumulatorValue(sub_expr);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ if (expr->op() == Token::EQ_STRICT) {
+ Heap::RootListIndex nil_value = nil == kNullValue
+ ? Heap::kNullValueRootIndex
+ : Heap::kUndefinedValueRootIndex;
+ __ LoadRoot(r4, nil_value);
+ __ cmp(r3, r4);
+ Split(eq, if_true, if_false, fall_through);
+ } else {
+ Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
+ CallIC(ic, expr->CompareOperationFeedbackId());
+ __ cmpi(r3, Operand::Zero());
+ Split(ne, if_true, if_false, fall_through);
+ }
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+ __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ context()->Plug(r3);
+}
+
+
+Register FullCodeGenerator::result_register() { return r3; }
+
+
+Register FullCodeGenerator::context_register() { return cp; }
+
+
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+ DCHECK_EQ(static_cast<int>(POINTER_SIZE_ALIGN(frame_offset)), frame_offset);
+ __ StoreP(value, MemOperand(fp, frame_offset), r0);
+}
+
+
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
+ __ LoadP(dst, ContextOperand(cp, context_index), r0);
+}
+
+
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+ Scope* declaration_scope = scope()->DeclarationScope();
+ if (declaration_scope->is_script_scope() ||
+ declaration_scope->is_module_scope()) {
+ // Contexts nested in the native context have a canonical empty function
+ // as their closure, not the anonymous closure containing the global
+ // code. Pass a smi sentinel and let the runtime look up the empty
+ // function.
+ __ LoadSmiLiteral(ip, Smi::FromInt(0));
+ } else if (declaration_scope->is_eval_scope()) {
+ // Contexts created by a call to eval have the same closure as the
+ // context calling eval, not the anonymous closure containing the eval
+ // code. Fetch it from the context.
+ __ LoadP(ip, ContextOperand(cp, Context::CLOSURE_INDEX));
+ } else {
+ DCHECK(declaration_scope->is_function_scope());
+ __ LoadP(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ __ push(ip);
+}
+
+
+// ----------------------------------------------------------------------------
+// Non-local control flow support.
+
+void FullCodeGenerator::EnterFinallyBlock() {
+ DCHECK(!result_register().is(r4));
+ // Store result register while executing finally block.
+ __ push(result_register());
+ // Cook return address in link register to stack (smi encoded Code* delta)
+ __ mflr(r4);
+ __ mov(ip, Operand(masm_->CodeObject()));
+ __ sub(r4, r4, ip);
+ __ SmiTag(r4);
+
+ // Store result register while executing finally block.
+ __ push(r4);
+
+ // Store pending message while executing finally block.
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ mov(ip, Operand(pending_message_obj));
+ __ LoadP(r4, MemOperand(ip));
+ __ push(r4);
+
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ mov(ip, Operand(has_pending_message));
+ __ lbz(r4, MemOperand(ip));
+ __ SmiTag(r4);
+ __ push(r4);
+
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ mov(ip, Operand(pending_message_script));
+ __ LoadP(r4, MemOperand(ip));
+ __ push(r4);
+}
+
+
+void FullCodeGenerator::ExitFinallyBlock() {
+ DCHECK(!result_register().is(r4));
+ // Restore pending message from stack.
+ __ pop(r4);
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ mov(ip, Operand(pending_message_script));
+ __ StoreP(r4, MemOperand(ip));
+
+ __ pop(r4);
+ __ SmiUntag(r4);
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ mov(ip, Operand(has_pending_message));
+ __ stb(r4, MemOperand(ip));
+
+ __ pop(r4);
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ mov(ip, Operand(pending_message_obj));
+ __ StoreP(r4, MemOperand(ip));
+
+ // Restore result register from stack.
+ __ pop(r4);
+
+ // Uncook return address and return.
+ __ pop(result_register());
+ __ SmiUntag(r4);
+ __ mov(ip, Operand(masm_->CodeObject()));
+ __ add(ip, ip, r4);
+ __ mtctr(ip);
+ __ bctr();
+}
+
+
+#undef __
+
+#define __ ACCESS_MASM(masm())
+
+FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
+ int* stack_depth, int* context_length) {
+ // The macros used here must preserve the result register.
+
+ // Because the handler block contains the context of the finally
+ // code, we can restore it directly from there for the finally code
+ // rather than iteratively unwinding contexts via their previous
+ // links.
+ __ Drop(*stack_depth); // Down to the handler block.
+ if (*context_length > 0) {
+ // Restore the context to its dedicated register and the stack.
+ __ LoadP(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
+ __ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ PopTryHandler();
+ __ b(finally_entry_, SetLK);
+
+ *stack_depth = 0;
+ *context_length = 0;
+ return previous_;
+}
+
+#undef __
+
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code, Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code) {
+ Address mov_address = Assembler::target_address_from_return_address(pc);
+ Address cmp_address = mov_address - 2 * Assembler::kInstrSize;
+ CodePatcher patcher(cmp_address, 1);
+
+ switch (target_state) {
+ case INTERRUPT: {
+ // <decrement profiling counter>
+ // cmpi r6, 0
+ // bge <ok> ;; not changed
+ // mov r12, <interrupt stub address>
+ // mtlr r12
+ // blrl
+ // <reset profiling counter>
+ // ok-label
+ patcher.masm()->cmpi(r6, Operand::Zero());
+ break;
+ }
+ case ON_STACK_REPLACEMENT:
+ case OSR_AFTER_STACK_CHECK:
+ // <decrement profiling counter>
+ // crset
+ // bge <ok> ;; not changed
+ // mov r12, <on-stack replacement address>
+ // mtlr r12
+ // blrl
+ // <reset profiling counter>
+ // ok-label ----- pc_after points here
+
+ // Set the LT bit such that bge is a NOP
+ patcher.masm()->crset(Assembler::encode_crbit(cr7, CR_LT));
+ break;
+ }
+
+ // Replace the stack check address in the mov sequence with the
+ // entry address of the replacement code.
+ Assembler::set_target_address_at(mov_address, unoptimized_code,
+ replacement_code->entry());
+
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, mov_address, replacement_code);
+}
+
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate, Code* unoptimized_code, Address pc) {
+ Address mov_address = Assembler::target_address_from_return_address(pc);
+ Address cmp_address = mov_address - 2 * Assembler::kInstrSize;
+ Address interrupt_address =
+ Assembler::target_address_at(mov_address, unoptimized_code);
+
+ if (Assembler::IsCmpImmediate(Assembler::instr_at(cmp_address))) {
+ DCHECK(interrupt_address == isolate->builtins()->InterruptCheck()->entry());
+ return INTERRUPT;
+ }
+
+ DCHECK(Assembler::IsCrSet(Assembler::instr_at(cmp_address)));
+
+ if (interrupt_address == isolate->builtins()->OnStackReplacement()->entry()) {
+ return ON_STACK_REPLACEMENT;
+ }
+
+ DCHECK(interrupt_address ==
+ isolate->builtins()->OsrAfterStackCheck()->entry());
+ return OSR_AFTER_STACK_CHECK;
+}
+}
+} // namespace v8::internal
+#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
new file mode 100644
index 0000000000..693f341e99
--- /dev/null
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -0,0 +1,306 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
+
+
+const Register LoadDescriptor::ReceiverRegister() { return r4; }
+const Register LoadDescriptor::NameRegister() { return r5; }
+
+
+const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return r3; }
+
+
+const Register VectorLoadICDescriptor::VectorRegister() { return r6; }
+
+
+const Register StoreDescriptor::ReceiverRegister() { return r4; }
+const Register StoreDescriptor::NameRegister() { return r5; }
+const Register StoreDescriptor::ValueRegister() { return r3; }
+
+
+const Register StoreTransitionDescriptor::MapRegister() { return r6; }
+
+
+const Register ElementTransitionAndStoreDescriptor::MapRegister() { return r6; }
+
+
+const Register InstanceofDescriptor::left() { return r3; }
+const Register InstanceofDescriptor::right() { return r4; }
+
+
+const Register ArgumentsAccessReadDescriptor::index() { return r4; }
+const Register ArgumentsAccessReadDescriptor::parameter_count() { return r3; }
+
+
+const Register ApiGetterDescriptor::function_address() { return r5; }
+
+
+const Register MathPowTaggedDescriptor::exponent() { return r5; }
+
+
+const Register MathPowIntegerDescriptor::exponent() {
+ return MathPowTaggedDescriptor::exponent();
+}
+
+
+void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r5};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r4};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r3};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r3};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastCloneShallowArrayDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r6, r5, r4};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void FastCloneShallowObjectDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r6, r5, r4, r3};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CreateAllocationSiteDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r5, r6};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StoreArrayLiteralElementDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r6, r3};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r4};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionWithFeedbackDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r4, r6};
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Smi()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // r3 : number of arguments
+ // r4 : the function to call
+ // r5 : feedback vector
+ // r6 : (only if r5 is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
+ // TODO(turbofan): So far we don't gather type feedback and hence skip the
+ // slot parameter, but ArrayConstructStub needs the vector to be undefined.
+ Register registers[] = {cp, r3, r4, r5};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void RegExpConstructResultDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r5, r4, r3};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void TransitionElementsKindDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r3, r4};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorConstantArgCountDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // cp -- context
+ // r3 -- number of arguments
+ // r4 -- function
+ // r5 -- allocation site with elements kind
+ Register registers[] = {cp, r4, r5};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // stack param count needs (constructor pointer, and single argument)
+ Register registers[] = {cp, r4, r5, r3};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(),
+ Representation::Tagged(), Representation::Integer32()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // cp -- context
+ // r3 -- number of arguments
+ // r4 -- constructor function
+ Register registers[] = {cp, r4};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void InternalArrayConstructorDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // stack param count needs (constructor pointer, and single argument)
+ Register registers[] = {cp, r4, r3};
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Integer32()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r3};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r3};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r4, r3};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpWithAllocationSiteDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r5, r4, r3};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r4, r3};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ cp, // context
+ r5, // key
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // key
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ cp, // context
+ r5, // name
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // name
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ cp, // context
+ r3, // receiver
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // receiver
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ cp, // context
+ r4, // JSFunction
+ r3, // actual number of arguments
+ r5, // expected number of arguments
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // JSFunction
+ Representation::Integer32(), // actual number of arguments
+ Representation::Integer32(), // expected number of arguments
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ cp, // context
+ r3, // callee
+ r7, // call_data
+ r5, // holder
+ r4, // api_function_address
+ };
+ Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ };
+ data->Initialize(arraysize(registers), registers, representations);
+}
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/lithium-codegen-ppc.cc b/deps/v8/src/ppc/lithium-codegen-ppc.cc
new file mode 100644
index 0000000000..7b6052c3e6
--- /dev/null
+++ b/deps/v8/src/ppc/lithium-codegen-ppc.cc
@@ -0,0 +1,6136 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/base/bits.h"
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/hydrogen-osr.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
+#include "src/ppc/lithium-codegen-ppc.h"
+#include "src/ppc/lithium-gap-resolver-ppc.h"
+
+namespace v8 {
+namespace internal {
+
+
+class SafepointGenerator FINAL : public CallWrapper {
+ public:
+ SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers,
+ Safepoint::DeoptMode mode)
+ : codegen_(codegen), pointers_(pointers), deopt_mode_(mode) {}
+ virtual ~SafepointGenerator() {}
+
+ void BeforeCall(int call_size) const OVERRIDE {}
+
+ void AfterCall() const OVERRIDE {
+ codegen_->RecordSafepoint(pointers_, deopt_mode_);
+ }
+
+ private:
+ LCodeGen* codegen_;
+ LPointerMap* pointers_;
+ Safepoint::DeoptMode deopt_mode_;
+};
+
+
+#define __ masm()->
+
+bool LCodeGen::GenerateCode() {
+ LPhase phase("Z_Code generation", chunk());
+ DCHECK(is_unused());
+ status_ = GENERATING;
+
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // NONE indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done in GeneratePrologue).
+ FrameScope frame_scope(masm_, StackFrame::NONE);
+
+ return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
+ GenerateJumpTable() && GenerateSafepointTable();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+ DCHECK(is_done());
+ code->set_stack_slots(GetStackSlotCount());
+ code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+ if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
+ PopulateDeoptimizationData(code);
+}
+
+
+void LCodeGen::SaveCallerDoubles() {
+ DCHECK(info()->saves_caller_doubles());
+ DCHECK(NeedsEagerFrame());
+ Comment(";;; Save clobbered callee double registers");
+ int count = 0;
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ while (!save_iterator.Done()) {
+ __ stfd(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+}
+
+
+void LCodeGen::RestoreCallerDoubles() {
+ DCHECK(info()->saves_caller_doubles());
+ DCHECK(NeedsEagerFrame());
+ Comment(";;; Restore clobbered callee double registers");
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ int count = 0;
+ while (!save_iterator.Done()) {
+ __ lfd(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+}
+
+
+bool LCodeGen::GeneratePrologue() {
+ DCHECK(is_generating());
+
+ if (info()->IsOptimizing()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+#ifdef DEBUG
+ if (strlen(FLAG_stop_at) > 0 &&
+ info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ __ stop("stop_at");
+ }
+#endif
+
+ // r4: Callee's JS function.
+ // cp: Callee's context.
+ // pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool)
+ // fp: Caller's frame pointer.
+ // lr: Caller's pc.
+ // ip: Our own function entry (required by the prologue)
+
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info_->this_has_uses() && info_->strict_mode() == SLOPPY &&
+ !info_->is_native()) {
+ Label ok;
+ int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
+ __ LoadP(r5, MemOperand(sp, receiver_offset));
+ __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ bne(&ok);
+
+ __ LoadP(r5, GlobalObjectOperand());
+ __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kGlobalProxyOffset));
+
+ __ StoreP(r5, MemOperand(sp, receiver_offset));
+
+ __ bind(&ok);
+ }
+ }
+
+ int prologue_offset = masm_->pc_offset();
+
+ if (prologue_offset) {
+ // Prologue logic requires it's starting address in ip and the
+ // corresponding offset from the function entry.
+ prologue_offset += Instruction::kInstrSize;
+ __ addi(ip, ip, Operand(prologue_offset));
+ }
+ info()->set_prologue_offset(prologue_offset);
+ if (NeedsEagerFrame()) {
+ if (info()->IsStub()) {
+ __ StubPrologue(prologue_offset);
+ } else {
+ __ Prologue(info()->IsCodePreAgingActive(), prologue_offset);
+ }
+ frame_is_built_ = true;
+ info_->AddNoFrameRange(0, masm_->pc_offset());
+ }
+
+ // Reserve space for the stack slots needed by the code.
+ int slots = GetStackSlotCount();
+ if (slots > 0) {
+ __ subi(sp, sp, Operand(slots * kPointerSize));
+ if (FLAG_debug_code) {
+ __ Push(r3, r4);
+ __ li(r0, Operand(slots));
+ __ mtctr(r0);
+ __ addi(r3, sp, Operand((slots + 2) * kPointerSize));
+ __ mov(r4, Operand(kSlotsZapValue));
+ Label loop;
+ __ bind(&loop);
+ __ StorePU(r4, MemOperand(r3, -kPointerSize));
+ __ bdnz(&loop);
+ __ Pop(r3, r4);
+ }
+ }
+
+ if (info()->saves_caller_doubles()) {
+ SaveCallerDoubles();
+ }
+
+ // Possibly allocate a local context.
+ int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment(";;; Allocate local context");
+ bool need_write_barrier = true;
+ // Argument to NewContext is the function, which is in r4.
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), heap_slots);
+ __ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
+ } else {
+ __ push(r4);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ }
+ RecordSafepoint(Safepoint::kNoLazyDeopt);
+ // Context is returned in both r3 and cp. It replaces the context
+ // passed to us. It's saved in the stack and kept live in cp.
+ __ mr(cp, r3);
+ __ StoreP(r3, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Variable* var = scope()->parameter(i);
+ if (var->IsContextSlot()) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ LoadP(r3, MemOperand(fp, parameter_offset));
+ // Store it in the context.
+ MemOperand target = ContextOperand(cp, var->index());
+ __ StoreP(r3, target, r0);
+ // Update the write barrier. This clobbers r6 and r3.
+ if (need_write_barrier) {
+ __ RecordWriteContextSlot(cp, target.offset(), r3, r6,
+ GetLinkRegisterState(), kSaveFPRegs);
+ } else if (FLAG_debug_code) {
+ Label done;
+ __ JumpIfInNewSpace(cp, r3, &done);
+ __ Abort(kExpectedNewSpaceObject);
+ __ bind(&done);
+ }
+ }
+ }
+ Comment(";;; End allocate local context");
+ }
+
+ // Trace the call.
+ if (FLAG_trace && info()->IsOptimizing()) {
+ // We have not executed any compiled code yet, so cp still holds the
+ // incoming context.
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+ return !is_aborted();
+}
+
+
+void LCodeGen::GenerateOsrPrologue() {
+ // Generate the OSR entry prologue at the first unknown OSR value, or if there
+ // are none, at the OSR entrypoint instruction.
+ if (osr_pc_offset_ >= 0) return;
+
+ osr_pc_offset_ = masm()->pc_offset();
+
+ // Adjust the frame size, subsuming the unoptimized frame into the
+ // optimized frame.
+ int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+ DCHECK(slots >= 0);
+ __ subi(sp, sp, Operand(slots * kPointerSize));
+}
+
+
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (instr->IsCall()) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ }
+ if (!instr->IsLazyBailout() && !instr->IsGap()) {
+ safepoints_.BumpLastLazySafepointIndex();
+ }
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+ DCHECK(is_generating());
+ if (deferred_.length() > 0) {
+ for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+ LDeferredCode* code = deferred_[i];
+
+ HValue* value =
+ instructions_->at(code->instruction_index())->hydrogen_value();
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+
+ Comment(
+ ";;; <@%d,#%d> "
+ "-------------------- Deferred %s --------------------",
+ code->instruction_index(), code->instr()->hydrogen_value()->id(),
+ code->instr()->Mnemonic());
+ __ bind(code->entry());
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Build frame");
+ DCHECK(!frame_is_built_);
+ DCHECK(info()->IsStub());
+ frame_is_built_ = true;
+ __ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB));
+ __ PushFixedFrame(scratch0());
+ __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ Comment(";;; Deferred code");
+ }
+ code->Generate();
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Destroy frame");
+ DCHECK(frame_is_built_);
+ __ PopFixedFrame(ip);
+ frame_is_built_ = false;
+ }
+ __ b(code->exit());
+ }
+ }
+
+ return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateJumpTable() {
+ // Check that the jump table is accessible from everywhere in the function
+ // code, i.e. that offsets to the table can be encoded in the 24bit signed
+ // immediate of a branch instruction.
+ // To simplify we consider the code size from the first instruction to the
+ // end of the jump table. We also don't consider the pc load delta.
+ // Each entry in the jump table generates one instruction and inlines one
+ // 32bit data after it.
+ if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
+ jump_table_.length() * 7)) {
+ Abort(kGeneratedCodeIsTooLarge);
+ }
+
+ if (jump_table_.length() > 0) {
+ Label needs_frame, call_deopt_entry;
+
+ Comment(";;; -------------------- Jump table --------------------");
+ Address base = jump_table_[0].address;
+
+ Register entry_offset = scratch0();
+
+ int length = jump_table_.length();
+ for (int i = 0; i < length; i++) {
+ Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+ __ bind(&table_entry->label);
+
+ DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
+ Address entry = table_entry->address;
+ DeoptComment(table_entry->reason);
+
+ // Second-level deopt table entries are contiguous and small, so instead
+ // of loading the full, absolute address of each one, load an immediate
+ // offset which will be added to the base address later.
+ __ mov(entry_offset, Operand(entry - base));
+
+ if (table_entry->needs_frame) {
+ DCHECK(!info()->saves_caller_doubles());
+ if (needs_frame.is_bound()) {
+ __ b(&needs_frame);
+ } else {
+ __ bind(&needs_frame);
+ Comment(";;; call deopt with frame");
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ DCHECK(info()->IsStub());
+ __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
+ __ PushFixedFrame(ip);
+ __ addi(fp, sp,
+ Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ bind(&call_deopt_entry);
+ // Add the base address to the offset previously loaded in
+ // entry_offset.
+ __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
+ __ add(ip, entry_offset, ip);
+ __ Call(ip);
+ }
+ } else {
+ // The last entry can fall through into `call_deopt_entry`, avoiding a
+ // branch.
+ bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound();
+
+ if (need_branch) __ b(&call_deopt_entry);
+ }
+ }
+
+ if (!call_deopt_entry.is_bound()) {
+ Comment(";;; call deopt");
+ __ bind(&call_deopt_entry);
+
+ if (info()->saves_caller_doubles()) {
+ DCHECK(info()->IsStub());
+ RestoreCallerDoubles();
+ }
+
+ // Add the base address to the offset previously loaded in entry_offset.
+ __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
+ __ add(ip, entry_offset, ip);
+ __ Call(ip);
+ }
+ }
+
+ // The deoptimization jump table is the last part of the instruction
+ // sequence. Mark the generated code as done unless we bailed out.
+ if (!is_aborted()) status_ = DONE;
+ return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+ DCHECK(is_done());
+ safepoints_.Emit(masm(), GetStackSlotCount());
+ return !is_aborted();
+}
+
+
+Register LCodeGen::ToRegister(int index) const {
+ return Register::FromAllocationIndex(index);
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
+ return DoubleRegister::FromAllocationIndex(index);
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+ DCHECK(op->IsRegister());
+ return ToRegister(op->index());
+}
+
+
+Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
+ if (op->IsRegister()) {
+ return ToRegister(op->index());
+ } else if (op->IsConstantOperand()) {
+ LConstantOperand* const_op = LConstantOperand::cast(op);
+ HConstant* constant = chunk_->LookupConstant(const_op);
+ Handle<Object> literal = constant->handle(isolate());
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsInteger32()) {
+ DCHECK(literal->IsNumber());
+ __ LoadIntLiteral(scratch, static_cast<int32_t>(literal->Number()));
+ } else if (r.IsDouble()) {
+ Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
+ } else {
+ DCHECK(r.IsSmiOrTagged());
+ __ Move(scratch, literal);
+ }
+ return scratch;
+ } else if (op->IsStackSlot()) {
+ __ LoadP(scratch, ToMemOperand(op));
+ return scratch;
+ }
+ UNREACHABLE();
+ return scratch;
+}
+
+
+void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op,
+ Register dst) {
+ DCHECK(IsInteger32(const_op));
+ HConstant* constant = chunk_->LookupConstant(const_op);
+ int32_t value = constant->Integer32Value();
+ if (IsSmi(const_op)) {
+ __ LoadSmiLiteral(dst, Smi::FromInt(value));
+ } else {
+ __ LoadIntLiteral(dst, value);
+ }
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+ DCHECK(op->IsDoubleRegister());
+ return ToDoubleRegister(op->index());
+}
+
+
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+ return constant->handle(isolate());
+}
+
+
+bool LCodeGen::IsInteger32(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+
+bool LCodeGen::IsSmi(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmi();
+}
+
+
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+ return ToRepresentation(op, Representation::Integer32());
+}
+
+
+intptr_t LCodeGen::ToRepresentation(LConstantOperand* op,
+ const Representation& r) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ int32_t value = constant->Integer32Value();
+ if (r.IsInteger32()) return value;
+ DCHECK(r.IsSmiOrTagged());
+ return reinterpret_cast<intptr_t>(Smi::FromInt(value));
+}
+
+
+Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ return Smi::FromInt(constant->Integer32Value());
+}
+
+
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ DCHECK(constant->HasDoubleValue());
+ return constant->DoubleValue();
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) {
+ if (op->IsConstantOperand()) {
+ LConstantOperand* const_op = LConstantOperand::cast(op);
+ HConstant* constant = chunk()->LookupConstant(const_op);
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsSmi()) {
+ DCHECK(constant->HasSmiValue());
+ return Operand(Smi::FromInt(constant->Integer32Value()));
+ } else if (r.IsInteger32()) {
+ DCHECK(constant->HasInteger32Value());
+ return Operand(constant->Integer32Value());
+ } else if (r.IsDouble()) {
+ Abort(kToOperandUnsupportedDoubleImmediate);
+ }
+ DCHECK(r.IsTagged());
+ return Operand(constant->handle(isolate()));
+ } else if (op->IsRegister()) {
+ return Operand(ToRegister(op));
+ } else if (op->IsDoubleRegister()) {
+ Abort(kToOperandIsDoubleRegisterUnimplemented);
+ return Operand::Zero();
+ }
+ // Stack slots not implemented, use ToMemOperand instead.
+ UNREACHABLE();
+ return Operand::Zero();
+}
+
+
+static int ArgumentsOffsetWithoutFrame(int index) {
+ DCHECK(index < 0);
+ return -(index + 1) * kPointerSize;
+}
+
+
+MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
+ DCHECK(!op->IsRegister());
+ DCHECK(!op->IsDoubleRegister());
+ DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ if (NeedsEagerFrame()) {
+ return MemOperand(fp, StackSlotOffset(op->index()));
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
+ }
+}
+
+
+MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
+ DCHECK(op->IsDoubleStackSlot());
+ if (NeedsEagerFrame()) {
+ return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return MemOperand(sp,
+ ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
+ }
+}
+
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+ Translation* translation) {
+ if (environment == NULL) return;
+
+ // The translation includes one command per value in the environment.
+ int translation_size = environment->translation_size();
+ // The output frame height does not include the parameters.
+ int height = translation_size - environment->parameter_count();
+
+ WriteTranslation(environment->outer(), translation);
+ bool has_closure_id =
+ !info()->closure().is_null() &&
+ !info()->closure().is_identical_to(environment->closure());
+ int closure_id = has_closure_id
+ ? DefineDeoptimizationLiteral(environment->closure())
+ : Translation::kSelfLiteralId;
+
+ switch (environment->frame_type()) {
+ case JS_FUNCTION:
+ translation->BeginJSFrame(environment->ast_id(), closure_id, height);
+ break;
+ case JS_CONSTRUCT:
+ translation->BeginConstructStubFrame(closure_id, translation_size);
+ break;
+ case JS_GETTER:
+ DCHECK(translation_size == 1);
+ DCHECK(height == 0);
+ translation->BeginGetterStubFrame(closure_id);
+ break;
+ case JS_SETTER:
+ DCHECK(translation_size == 2);
+ DCHECK(height == 0);
+ translation->BeginSetterStubFrame(closure_id);
+ break;
+ case STUB:
+ translation->BeginCompiledStubFrame();
+ break;
+ case ARGUMENTS_ADAPTOR:
+ translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
+ break;
+ }
+
+ int object_index = 0;
+ int dematerialized_index = 0;
+ for (int i = 0; i < translation_size; ++i) {
+ LOperand* value = environment->values()->at(i);
+ AddToTranslation(
+ environment, translation, value, environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
+ }
+}
+
+
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+ Translation* translation, LOperand* op,
+ bool is_tagged, bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer) {
+ if (op == LEnvironment::materialization_marker()) {
+ int object_index = (*object_index_pointer)++;
+ if (environment->ObjectIsDuplicateAt(object_index)) {
+ int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+ translation->DuplicateObject(dupe_of);
+ return;
+ }
+ int object_length = environment->ObjectLengthAt(object_index);
+ if (environment->ObjectIsArgumentsAt(object_index)) {
+ translation->BeginArgumentsObject(object_length);
+ } else {
+ translation->BeginCapturedObject(object_length);
+ }
+ int dematerialized_index = *dematerialized_index_pointer;
+ int env_offset = environment->translation_size() + dematerialized_index;
+ *dematerialized_index_pointer += object_length;
+ for (int i = 0; i < object_length; ++i) {
+ LOperand* value = environment->values()->at(env_offset + i);
+ AddToTranslation(environment, translation, value,
+ environment->HasTaggedValueAt(env_offset + i),
+ environment->HasUint32ValueAt(env_offset + i),
+ object_index_pointer, dematerialized_index_pointer);
+ }
+ return;
+ }
+
+ if (op->IsStackSlot()) {
+ if (is_tagged) {
+ translation->StoreStackSlot(op->index());
+ } else if (is_uint32) {
+ translation->StoreUint32StackSlot(op->index());
+ } else {
+ translation->StoreInt32StackSlot(op->index());
+ }
+ } else if (op->IsDoubleStackSlot()) {
+ translation->StoreDoubleStackSlot(op->index());
+ } else if (op->IsRegister()) {
+ Register reg = ToRegister(op);
+ if (is_tagged) {
+ translation->StoreRegister(reg);
+ } else if (is_uint32) {
+ translation->StoreUint32Register(reg);
+ } else {
+ translation->StoreInt32Register(reg);
+ }
+ } else if (op->IsDoubleRegister()) {
+ DoubleRegister reg = ToDoubleRegister(op);
+ translation->StoreDoubleRegister(reg);
+ } else if (op->IsConstantOperand()) {
+ HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+ int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
+ translation->StoreLiteral(src_index);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code, RelocInfo::Mode mode,
+ LInstruction* instr) {
+ CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode) {
+ DCHECK(instr != NULL);
+ __ Call(code, mode);
+ RecordSafepointWithLazyDeopt(instr, safepoint_mode);
+
+ // Signal that we don't inline smi code before these stubs in the
+ // optimizing code generator.
+ if (code->kind() == Code::BINARY_OP_IC || code->kind() == Code::COMPARE_IC) {
+ __ nop();
+ }
+}
+
+
+void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments,
+ LInstruction* instr, SaveFPRegsMode save_doubles) {
+ DCHECK(instr != NULL);
+
+ __ CallRuntime(function, num_arguments, save_doubles);
+
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+ if (context->IsRegister()) {
+ __ Move(cp, ToRegister(context));
+ } else if (context->IsStackSlot()) {
+ __ LoadP(cp, ToMemOperand(context));
+ } else if (context->IsConstantOperand()) {
+ HConstant* constant =
+ chunk_->LookupConstant(LConstantOperand::cast(context));
+ __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
+ LInstruction* instr, LOperand* context) {
+ LoadContextFromDeferred(context);
+ __ CallRuntimeSaveDoubles(id);
+ RecordSafepointWithRegisters(instr->pointer_map(), argc,
+ Safepoint::kNoLazyDeopt);
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode) {
+ environment->set_has_been_used();
+ if (!environment->HasBeenRegistered()) {
+ // Physical stack frame layout:
+ // -x ............. -4 0 ..................................... y
+ // [incoming arguments] [spill slots] [pushed outgoing arguments]
+
+ // Layout of the environment:
+ // 0 ..................................................... size-1
+ // [parameters] [locals] [expression stack including arguments]
+
+ // Layout of the translation:
+ // 0 ........................................................ size - 1 + 4
+ // [expression stack including arguments] [locals] [4 words] [parameters]
+ // |>------------ translation_size ------------<|
+
+ int frame_count = 0;
+ int jsframe_count = 0;
+ for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+ ++frame_count;
+ if (e->frame_type() == JS_FUNCTION) {
+ ++jsframe_count;
+ }
+ }
+ Translation translation(&translations_, frame_count, jsframe_count, zone());
+ WriteTranslation(environment, &translation);
+ int deoptimization_index = deoptimizations_.length();
+ int pc_offset = masm()->pc_offset();
+ environment->Register(deoptimization_index, translation.index(),
+ (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
+ deoptimizations_.Add(environment, zone());
+ }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
+ const char* detail,
+ Deoptimizer::BailoutType bailout_type,
+ CRegister cr) {
+ LEnvironment* environment = instr->environment();
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+ DCHECK(environment->HasBeenRegistered());
+ int id = environment->deoptimization_index();
+ DCHECK(info()->IsOptimizing() || info()->IsStub());
+ Address entry =
+ Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
+ if (entry == NULL) {
+ Abort(kBailoutWasNotPrepared);
+ return;
+ }
+
+ if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
+ CRegister alt_cr = cr6;
+ Register scratch = scratch0();
+ ExternalReference count = ExternalReference::stress_deopt_count(isolate());
+ Label no_deopt;
+ DCHECK(!alt_cr.is(cr));
+ __ Push(r4, scratch);
+ __ mov(scratch, Operand(count));
+ __ lwz(r4, MemOperand(scratch));
+ __ subi(r4, r4, Operand(1));
+ __ cmpi(r4, Operand::Zero(), alt_cr);
+ __ bne(&no_deopt, alt_cr);
+ __ li(r4, Operand(FLAG_deopt_every_n_times));
+ __ stw(r4, MemOperand(scratch));
+ __ Pop(r4, scratch);
+
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ __ bind(&no_deopt);
+ __ stw(r4, MemOperand(scratch));
+ __ Pop(r4, scratch);
+ }
+
+ if (info()->ShouldTrapOnDeopt()) {
+ __ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
+ }
+
+ Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+ instr->Mnemonic(), detail);
+ DCHECK(info()->IsStub() || frame_is_built_);
+ // Go through jump table if we need to handle condition, build frame, or
+ // restore caller doubles.
+ if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
+ DeoptComment(reason);
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+ !frame_is_built_);
+ // We often have several deopts to the same entry, reuse the last
+ // jump entry if this is the case.
+ if (jump_table_.is_empty() ||
+ !table_entry.IsEquivalentTo(jump_table_.last())) {
+ jump_table_.Add(table_entry, zone());
+ }
+ __ b(cond, &jump_table_.last().label, cr);
+ }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
+ const char* detail, CRegister cr) {
+ Deoptimizer::BailoutType bailout_type =
+ info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
+ DeoptimizeIf(condition, instr, detail, bailout_type, cr);
+}
+
+
+void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
+ int length = deoptimizations_.length();
+ if (length == 0) return;
+ Handle<DeoptimizationInputData> data =
+ DeoptimizationInputData::New(isolate(), length, TENURED);
+
+ Handle<ByteArray> translations =
+ translations_.CreateByteArray(isolate()->factory());
+ data->SetTranslationByteArray(*translations);
+ data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+ if (info_->IsOptimizing()) {
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
+ data->SetSharedFunctionInfo(*info_->shared_info());
+ } else {
+ data->SetSharedFunctionInfo(Smi::FromInt(0));
+ }
+
+ Handle<FixedArray> literals =
+ factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
+ {
+ AllowDeferredHandleDereference copy_handles;
+ for (int i = 0; i < deoptimization_literals_.length(); i++) {
+ literals->set(i, *deoptimization_literals_[i]);
+ }
+ data->SetLiteralArray(*literals);
+ }
+
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
+ data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
+
+ // Populate the deoptimization entries.
+ for (int i = 0; i < length; i++) {
+ LEnvironment* env = deoptimizations_[i];
+ data->SetAstId(i, env->ast_id());
+ data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
+ data->SetArgumentsStackHeight(i,
+ Smi::FromInt(env->arguments_stack_height()));
+ data->SetPc(i, Smi::FromInt(env->pc_offset()));
+ }
+ code->set_deoptimization_data(*data);
+}
+
+
+int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
+ int result = deoptimization_literals_.length();
+ for (int i = 0; i < deoptimization_literals_.length(); ++i) {
+ if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+ }
+ deoptimization_literals_.Add(literal, zone());
+ return result;
+}
+
+
+void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
+ DCHECK(deoptimization_literals_.length() == 0);
+
+ const ZoneList<Handle<JSFunction> >* inlined_closures =
+ chunk()->inlined_closures();
+
+ for (int i = 0, length = inlined_closures->length(); i < length; i++) {
+ DefineDeoptimizationLiteral(inlined_closures->at(i));
+ }
+
+ inlined_function_count_ = deoptimization_literals_.length();
+}
+
+
+void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
+ SafepointMode safepoint_mode) {
+ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+ RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+ } else {
+ DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ RecordSafepointWithRegisters(instr->pointer_map(), 0,
+ Safepoint::kLazyDeopt);
+ }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
+ int arguments, Safepoint::DeoptMode deopt_mode) {
+ DCHECK(expected_safepoint_kind_ == kind);
+
+ const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+ Safepoint safepoint =
+ safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
+ for (int i = 0; i < operands->length(); i++) {
+ LOperand* pointer = operands->at(i);
+ if (pointer->IsStackSlot()) {
+ safepoint.DefinePointerSlot(pointer->index(), zone());
+ } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+ safepoint.DefinePointerRegister(ToRegister(pointer), zone());
+ }
+ }
+#if V8_OOL_CONSTANT_POOL
+ if (kind & Safepoint::kWithRegisters) {
+ // Register always contains a pointer to the constant pool.
+ safepoint.DefinePointerRegister(kConstantPoolRegister, zone());
+ }
+#endif
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
+ LPointerMap empty_pointers(zone());
+ RecordSafepoint(&empty_pointers, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
+}
+
+
+void LCodeGen::RecordAndWritePosition(int position) {
+ if (position == RelocInfo::kNoPosition) return;
+ masm()->positions_recorder()->RecordPosition(position);
+ masm()->positions_recorder()->WriteRecordedPositions();
+}
+
+
+static const char* LabelType(LLabel* label) {
+ if (label->is_loop_header()) return " (loop header)";
+ if (label->is_osr_entry()) return " (OSR entry)";
+ return "";
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+ Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
+ current_instruction_, label->hydrogen_value()->id(),
+ label->block_id(), LabelType(label));
+ __ bind(label->label());
+ current_block_ = label->block_id();
+ DoGap(label);
+}
+
+
+void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); }
+
+
+void LCodeGen::DoGap(LGap* gap) {
+ for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION;
+ i++) {
+ LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+ LParallelMove* move = gap->GetParallelMove(inner_pos);
+ if (move != NULL) DoParallelMove(move);
+ }
+}
+
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); }
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+ // Nothing to do.
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->result()).is(r3));
+ switch (instr->hydrogen()->major_key()) {
+ case CodeStub::RegExpExec: {
+ RegExpExecStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::SubString: {
+ SubStringStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::StringCompare: {
+ StringCompareStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+ GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ DCHECK(dividend.is(ToRegister(instr->result())));
+
+ // Theoretically, a variation of the branch-free code for integer division by
+ // a power of 2 (calculating the remainder via an additional multiplication
+ // (which gets simplified to an 'and') and subtraction) should be faster, and
+ // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+ // indicate that positive dividends are heavily favored, so the branching
+ // version performs better.
+ HMod* hmod = instr->hydrogen();
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ Label dividend_is_not_negative, done;
+ if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+ __ cmpwi(dividend, Operand::Zero());
+ __ bge(&dividend_is_not_negative);
+ if (shift) {
+ // Note that this is correct even for kMinInt operands.
+ __ neg(dividend, dividend);
+ __ ExtractBitRange(dividend, dividend, shift - 1, 0);
+ __ neg(dividend, dividend, LeaveOE, SetRC);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr, "minus zero", cr0);
+ }
+ } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ li(dividend, Operand::Zero());
+ } else {
+ DeoptimizeIf(al, instr, "minus zero");
+ }
+ __ b(&done);
+ }
+
+ __ bind(&dividend_is_not_negative);
+ if (shift) {
+ __ ExtractBitRange(dividend, dividend, shift - 1, 0);
+ } else {
+ __ li(dividend, Operand::Zero());
+ }
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ DCHECK(!dividend.is(result));
+
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr, "division by zero");
+ return;
+ }
+
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ __ mov(ip, Operand(Abs(divisor)));
+ __ mullw(result, result, ip);
+ __ sub(result, dividend, result, LeaveOE, SetRC);
+
+ // Check for negative zero.
+ HMod* hmod = instr->hydrogen();
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label remainder_not_zero;
+ __ bne(&remainder_not_zero, cr0);
+ __ cmpwi(dividend, Operand::Zero());
+ DeoptimizeIf(lt, instr, "minus zero");
+ __ bind(&remainder_not_zero);
+ }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+ HMod* hmod = instr->hydrogen();
+ Register left_reg = ToRegister(instr->left());
+ Register right_reg = ToRegister(instr->right());
+ Register result_reg = ToRegister(instr->result());
+ Register scratch = scratch0();
+ Label done;
+
+ if (hmod->CheckFlag(HValue::kCanOverflow)) {
+ __ li(r0, Operand::Zero()); // clear xer
+ __ mtxer(r0);
+ }
+
+ __ divw(scratch, left_reg, right_reg, SetOE, SetRC);
+
+ // Check for x % 0.
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ cmpwi(right_reg, Operand::Zero());
+ DeoptimizeIf(eq, instr, "division by zero");
+ }
+
+ // Check for kMinInt % -1, divw will return undefined, which is not what we
+ // want. We have to deopt if we care about -0, because we can't return that.
+ if (hmod->CheckFlag(HValue::kCanOverflow)) {
+ Label no_overflow_possible;
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(overflow, instr, "minus zero", cr0);
+ } else {
+ __ bnooverflow(&no_overflow_possible, cr0);
+ __ li(result_reg, Operand::Zero());
+ __ b(&done);
+ }
+ __ bind(&no_overflow_possible);
+ }
+
+ __ mullw(scratch, right_reg, scratch);
+ __ sub(result_reg, left_reg, scratch, LeaveOE, SetRC);
+
+ // If we care about -0, test if the dividend is <0 and the result is 0.
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ bne(&done, cr0);
+ __ cmpwi(left_reg, Operand::Zero());
+ DeoptimizeIf(lt, instr, "minus zero");
+ }
+
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
+ DCHECK(!result.is(dividend));
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ cmpwi(dividend, Operand::Zero());
+ DeoptimizeIf(eq, instr, "minus zero");
+ }
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+ __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
+ __ cmpw(dividend, r0);
+ DeoptimizeIf(eq, instr, "overflow");
+ }
+
+ int32_t shift = WhichPowerOf2Abs(divisor);
+
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
+ __ TestBitRange(dividend, shift - 1, 0, r0);
+ DeoptimizeIf(ne, instr, "lost precision", cr0);
+ }
+
+ if (divisor == -1) { // Nice shortcut, not needed for correctness.
+ __ neg(result, dividend);
+ return;
+ }
+ if (shift == 0) {
+ __ mr(result, dividend);
+ } else {
+ if (shift == 1) {
+ __ srwi(result, dividend, Operand(31));
+ } else {
+ __ srawi(result, dividend, 31);
+ __ srwi(result, result, Operand(32 - shift));
+ }
+ __ add(result, dividend, result);
+ __ srawi(result, result, shift);
+ }
+ if (divisor < 0) __ neg(result, result);
+}
+
+
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ DCHECK(!dividend.is(result));
+
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr, "division by zero");
+ return;
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ cmpwi(dividend, Operand::Zero());
+ DeoptimizeIf(eq, instr, "minus zero");
+ }
+
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ neg(result, result);
+
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ Register scratch = scratch0();
+ __ mov(ip, Operand(divisor));
+ __ mullw(scratch, result, ip);
+ __ cmpw(scratch, dividend);
+ DeoptimizeIf(ne, instr, "lost precision");
+ }
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
+void LCodeGen::DoDivI(LDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ const Register dividend = ToRegister(instr->dividend());
+ const Register divisor = ToRegister(instr->divisor());
+ Register result = ToRegister(instr->result());
+
+ DCHECK(!dividend.is(result));
+ DCHECK(!divisor.is(result));
+
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ __ li(r0, Operand::Zero()); // clear xer
+ __ mtxer(r0);
+ }
+
+ __ divw(result, dividend, divisor, SetOE, SetRC);
+
+ // Check for x / 0.
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ cmpwi(divisor, Operand::Zero());
+ DeoptimizeIf(eq, instr, "division by zero");
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label dividend_not_zero;
+ __ cmpwi(dividend, Operand::Zero());
+ __ bne(&dividend_not_zero);
+ __ cmpwi(divisor, Operand::Zero());
+ DeoptimizeIf(lt, instr, "minus zero");
+ __ bind(&dividend_not_zero);
+ }
+
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ Label no_overflow_possible;
+ if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+ DeoptimizeIf(overflow, instr, "overflow", cr0);
+ } else {
+ // When truncating, we want kMinInt / -1 = kMinInt.
+ __ bnooverflow(&no_overflow_possible, cr0);
+ __ mr(result, dividend);
+ }
+ __ bind(&no_overflow_possible);
+ }
+
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ // Deoptimize if remainder is not 0.
+ Register scratch = scratch0();
+ __ mullw(scratch, divisor, result);
+ __ cmpw(dividend, scratch);
+ DeoptimizeIf(ne, instr, "lost precision");
+ }
+}
+
+
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister(instr->dividend());
+ Register result = ToRegister(instr->result());
+ int32_t divisor = instr->divisor();
+
+ // If the divisor is positive, things are easy: There can be no deopts and we
+ // can simply do an arithmetic right shift.
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (divisor > 0) {
+ if (shift || !result.is(dividend)) {
+ __ srawi(result, dividend, shift);
+ }
+ return;
+ }
+
+ // If the divisor is negative, we have to negate and handle edge cases.
+ OEBit oe = LeaveOE;
+#if V8_TARGET_ARCH_PPC64
+ if (divisor == -1 && hdiv->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
+ __ cmpw(dividend, r0);
+ DeoptimizeIf(eq, instr, "overflow");
+ }
+#else
+ if (hdiv->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ __ li(r0, Operand::Zero()); // clear xer
+ __ mtxer(r0);
+ oe = SetOE;
+ }
+#endif
+
+ __ neg(result, dividend, oe, SetRC);
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr, "minus zero", cr0);
+ }
+
+// If the negation could not overflow, simply shifting is OK.
+#if !V8_TARGET_ARCH_PPC64
+ if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+#endif
+ if (shift) {
+ __ ShiftRightArithImm(result, result, shift);
+ }
+ return;
+#if !V8_TARGET_ARCH_PPC64
+ }
+
+ // Dividing by -1 is basically negation, unless we overflow.
+ if (divisor == -1) {
+ DeoptimizeIf(overflow, instr, "overflow", cr0);
+ return;
+ }
+
+ Label overflow, done;
+ __ boverflow(&overflow, cr0);
+ __ srawi(result, result, shift);
+ __ b(&done);
+ __ bind(&overflow);
+ __ mov(result, Operand(kMinInt / divisor));
+ __ bind(&done);
+#endif
+}
+
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ DCHECK(!dividend.is(result));
+
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr, "division by zero");
+ return;
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ HMathFloorOfDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ cmpwi(dividend, Operand::Zero());
+ DeoptimizeIf(eq, instr, "minus zero");
+ }
+
+ // Easy case: We need no dynamic check for the dividend and the flooring
+ // division is the same as the truncating division.
+ if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ neg(result, result);
+ return;
+ }
+
+ // In the general case we may need to adjust before and after the truncating
+ // division to get a flooring division.
+ Register temp = ToRegister(instr->temp());
+ DCHECK(!temp.is(dividend) && !temp.is(result));
+ Label needs_adjustment, done;
+ __ cmpwi(dividend, Operand::Zero());
+ __ b(divisor > 0 ? lt : gt, &needs_adjustment);
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ neg(result, result);
+ __ b(&done);
+ __ bind(&needs_adjustment);
+ __ addi(temp, dividend, Operand(divisor > 0 ? 1 : -1));
+ __ TruncatingDiv(result, temp, Abs(divisor));
+ if (divisor < 0) __ neg(result, result);
+ __ subi(result, result, Operand(1));
+ __ bind(&done);
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ const Register dividend = ToRegister(instr->dividend());
+ const Register divisor = ToRegister(instr->divisor());
+ Register result = ToRegister(instr->result());
+
+ DCHECK(!dividend.is(result));
+ DCHECK(!divisor.is(result));
+
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ __ li(r0, Operand::Zero()); // clear xer
+ __ mtxer(r0);
+ }
+
+ __ divw(result, dividend, divisor, SetOE, SetRC);
+
+ // Check for x / 0.
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ cmpwi(divisor, Operand::Zero());
+ DeoptimizeIf(eq, instr, "division by zero");
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label dividend_not_zero;
+ __ cmpwi(dividend, Operand::Zero());
+ __ bne(&dividend_not_zero);
+ __ cmpwi(divisor, Operand::Zero());
+ DeoptimizeIf(lt, instr, "minus zero");
+ __ bind(&dividend_not_zero);
+ }
+
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ Label no_overflow_possible;
+ if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+ DeoptimizeIf(overflow, instr, "overflow", cr0);
+ } else {
+ // When truncating, we want kMinInt / -1 = kMinInt.
+ __ bnooverflow(&no_overflow_possible, cr0);
+ __ mr(result, dividend);
+ }
+ __ bind(&no_overflow_possible);
+ }
+
+ Label done;
+ Register scratch = scratch0();
+// If both operands have the same sign then we are done.
+#if V8_TARGET_ARCH_PPC64
+ __ xor_(scratch, dividend, divisor);
+ __ cmpwi(scratch, Operand::Zero());
+ __ bge(&done);
+#else
+ __ xor_(scratch, dividend, divisor, SetRC);
+ __ bge(&done, cr0);
+#endif
+
+ // If there is no remainder then we are done.
+ __ mullw(scratch, divisor, result);
+ __ cmpw(dividend, scratch);
+ __ beq(&done);
+
+ // We performed a truncating division. Correct the result.
+ __ subi(result, result, Operand(1));
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
+ DoubleRegister addend = ToDoubleRegister(instr->addend());
+ DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
+ DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+
+ __ fmadd(result, multiplier, multiplicand, addend);
+}
+
+
+void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
+ DoubleRegister minuend = ToDoubleRegister(instr->minuend());
+ DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
+ DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+
+ __ fmsub(result, multiplier, multiplicand, minuend);
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+ Register scratch = scratch0();
+ Register result = ToRegister(instr->result());
+ // Note that result may alias left.
+ Register left = ToRegister(instr->left());
+ LOperand* right_op = instr->right();
+
+ bool bailout_on_minus_zero =
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+ if (right_op->IsConstantOperand()) {
+ int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
+
+ if (bailout_on_minus_zero && (constant < 0)) {
+ // The case of a null constant will be handled separately.
+ // If constant is negative and left is null, the result should be -0.
+ __ cmpi(left, Operand::Zero());
+ DeoptimizeIf(eq, instr, "minus zero");
+ }
+
+ switch (constant) {
+ case -1:
+ if (can_overflow) {
+#if V8_TARGET_ARCH_PPC64
+ if (instr->hydrogen()->representation().IsSmi()) {
+#endif
+ __ li(r0, Operand::Zero()); // clear xer
+ __ mtxer(r0);
+ __ neg(result, left, SetOE, SetRC);
+ DeoptimizeIf(overflow, instr, "overflow", cr0);
+#if V8_TARGET_ARCH_PPC64
+ } else {
+ __ neg(result, left);
+ __ TestIfInt32(result, scratch, r0);
+ DeoptimizeIf(ne, instr, "overflow");
+ }
+#endif
+ } else {
+ __ neg(result, left);
+ }
+ break;
+ case 0:
+ if (bailout_on_minus_zero) {
+// If left is strictly negative and the constant is null, the
+// result is -0. Deoptimize if required, otherwise return 0.
+#if V8_TARGET_ARCH_PPC64
+ if (instr->hydrogen()->representation().IsSmi()) {
+#endif
+ __ cmpi(left, Operand::Zero());
+#if V8_TARGET_ARCH_PPC64
+ } else {
+ __ cmpwi(left, Operand::Zero());
+ }
+#endif
+ DeoptimizeIf(lt, instr, "minus zero");
+ }
+ __ li(result, Operand::Zero());
+ break;
+ case 1:
+ __ Move(result, left);
+ break;
+ default:
+ // Multiplying by powers of two and powers of two plus or minus
+ // one can be done faster with shifted operands.
+ // For other constants we emit standard code.
+ int32_t mask = constant >> 31;
+ uint32_t constant_abs = (constant + mask) ^ mask;
+
+ if (base::bits::IsPowerOfTwo32(constant_abs)) {
+ int32_t shift = WhichPowerOf2(constant_abs);
+ __ ShiftLeftImm(result, left, Operand(shift));
+ // Correct the sign of the result if the constant is negative.
+ if (constant < 0) __ neg(result, result);
+ } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs - 1);
+ __ ShiftLeftImm(scratch, left, Operand(shift));
+ __ add(result, scratch, left);
+ // Correct the sign of the result if the constant is negative.
+ if (constant < 0) __ neg(result, result);
+ } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs + 1);
+ __ ShiftLeftImm(scratch, left, Operand(shift));
+ __ sub(result, scratch, left);
+ // Correct the sign of the result if the constant is negative.
+ if (constant < 0) __ neg(result, result);
+ } else {
+ // Generate standard code.
+ __ mov(ip, Operand(constant));
+ __ Mul(result, left, ip);
+ }
+ }
+
+ } else {
+ DCHECK(right_op->IsRegister());
+ Register right = ToRegister(right_op);
+
+ if (can_overflow) {
+#if V8_TARGET_ARCH_PPC64
+ // result = left * right.
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(result, left);
+ __ SmiUntag(scratch, right);
+ __ Mul(result, result, scratch);
+ } else {
+ __ Mul(result, left, right);
+ }
+ __ TestIfInt32(result, scratch, r0);
+ DeoptimizeIf(ne, instr, "overflow");
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiTag(result);
+ }
+#else
+ // scratch:result = left * right.
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(result, left);
+ __ mulhw(scratch, result, right);
+ __ mullw(result, result, right);
+ } else {
+ __ mulhw(scratch, left, right);
+ __ mullw(result, left, right);
+ }
+ __ TestIfInt32(scratch, result, r0);
+ DeoptimizeIf(ne, instr, "overflow");
+#endif
+ } else {
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(result, left);
+ __ Mul(result, result, right);
+ } else {
+ __ Mul(result, left, right);
+ }
+ }
+
+ if (bailout_on_minus_zero) {
+ Label done;
+#if V8_TARGET_ARCH_PPC64
+ if (instr->hydrogen()->representation().IsSmi()) {
+#endif
+ __ xor_(r0, left, right, SetRC);
+ __ bge(&done, cr0);
+#if V8_TARGET_ARCH_PPC64
+ } else {
+ __ xor_(r0, left, right);
+ __ cmpwi(r0, Operand::Zero());
+ __ bge(&done);
+ }
+#endif
+ // Bail out if the result is minus zero.
+ __ cmpi(result, Operand::Zero());
+ DeoptimizeIf(eq, instr, "minus zero");
+ __ bind(&done);
+ }
+ }
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+ LOperand* left_op = instr->left();
+ LOperand* right_op = instr->right();
+ DCHECK(left_op->IsRegister());
+ Register left = ToRegister(left_op);
+ Register result = ToRegister(instr->result());
+ Operand right(no_reg);
+
+ if (right_op->IsStackSlot()) {
+ right = Operand(EmitLoadRegister(right_op, ip));
+ } else {
+ DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
+ right = ToOperand(right_op);
+
+ if (right_op->IsConstantOperand() && is_uint16(right.immediate())) {
+ switch (instr->op()) {
+ case Token::BIT_AND:
+ __ andi(result, left, right);
+ break;
+ case Token::BIT_OR:
+ __ ori(result, left, right);
+ break;
+ case Token::BIT_XOR:
+ __ xori(result, left, right);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ }
+
+ switch (instr->op()) {
+ case Token::BIT_AND:
+ __ And(result, left, right);
+ break;
+ case Token::BIT_OR:
+ __ Or(result, left, right);
+ break;
+ case Token::BIT_XOR:
+ if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
+ __ notx(result, left);
+ } else {
+ __ Xor(result, left, right);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+ // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
+ // result may alias either of them.
+ LOperand* right_op = instr->right();
+ Register left = ToRegister(instr->left());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+ if (right_op->IsRegister()) {
+ // Mask the right_op operand.
+ __ andi(scratch, ToRegister(right_op), Operand(0x1F));
+ switch (instr->op()) {
+ case Token::ROR:
+ // rotate_right(a, b) == rotate_left(a, 32 - b)
+ __ subfic(scratch, scratch, Operand(32));
+ __ rotlw(result, left, scratch);
+ break;
+ case Token::SAR:
+ __ sraw(result, left, scratch);
+ break;
+ case Token::SHR:
+ if (instr->can_deopt()) {
+ __ srw(result, left, scratch, SetRC);
+#if V8_TARGET_ARCH_PPC64
+ __ extsw(result, result, SetRC);
+#endif
+ DeoptimizeIf(lt, instr, "negative value", cr0);
+ } else {
+ __ srw(result, left, scratch);
+ }
+ break;
+ case Token::SHL:
+ __ slw(result, left, scratch);
+#if V8_TARGET_ARCH_PPC64
+ __ extsw(result, result);
+#endif
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ // Mask the right_op operand.
+ int value = ToInteger32(LConstantOperand::cast(right_op));
+ uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+ switch (instr->op()) {
+ case Token::ROR:
+ if (shift_count != 0) {
+ __ rotrwi(result, left, shift_count);
+ } else {
+ __ Move(result, left);
+ }
+ break;
+ case Token::SAR:
+ if (shift_count != 0) {
+ __ srawi(result, left, shift_count);
+ } else {
+ __ Move(result, left);
+ }
+ break;
+ case Token::SHR:
+ if (shift_count != 0) {
+ __ srwi(result, left, Operand(shift_count));
+ } else {
+ if (instr->can_deopt()) {
+ __ cmpwi(left, Operand::Zero());
+ DeoptimizeIf(lt, instr, "negative value");
+ }
+ __ Move(result, left);
+ }
+ break;
+ case Token::SHL:
+ if (shift_count != 0) {
+#if V8_TARGET_ARCH_PPC64
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ sldi(result, left, Operand(shift_count));
+#else
+ if (instr->hydrogen_value()->representation().IsSmi() &&
+ instr->can_deopt()) {
+ if (shift_count != 1) {
+ __ slwi(result, left, Operand(shift_count - 1));
+ __ SmiTagCheckOverflow(result, result, scratch);
+ } else {
+ __ SmiTagCheckOverflow(result, left, scratch);
+ }
+ DeoptimizeIf(lt, instr, "overflow", cr0);
+#endif
+ } else {
+ __ slwi(result, left, Operand(shift_count));
+#if V8_TARGET_ARCH_PPC64
+ __ extsw(result, result);
+#endif
+ }
+ } else {
+ __ Move(result, left);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+ LOperand* right = instr->right();
+ Register left = ToRegister(instr->left());
+ Register result = ToRegister(instr->result());
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ if (!can_overflow) {
+ if (right->IsConstantOperand()) {
+ __ Add(result, left, -(ToOperand(right).immediate()), r0);
+ } else {
+ __ sub(result, left, EmitLoadRegister(right, ip));
+ }
+ } else {
+ if (right->IsConstantOperand()) {
+ __ AddAndCheckForOverflow(result, left, -(ToOperand(right).immediate()),
+ scratch0(), r0);
+ } else {
+ __ SubAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
+ scratch0(), r0);
+ }
+// Doptimize on overflow
+#if V8_TARGET_ARCH_PPC64
+ if (!instr->hydrogen()->representation().IsSmi()) {
+ __ extsw(scratch0(), scratch0(), SetRC);
+ }
+#endif
+ DeoptimizeIf(lt, instr, "overflow", cr0);
+ }
+
+#if V8_TARGET_ARCH_PPC64
+ if (!instr->hydrogen()->representation().IsSmi()) {
+ __ extsw(result, result);
+ }
+#endif
+}
+
+
+void LCodeGen::DoRSubI(LRSubI* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ LOperand* result = instr->result();
+
+ DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) &&
+ right->IsConstantOperand());
+
+ Operand right_operand = ToOperand(right);
+ if (is_int16(right_operand.immediate())) {
+ __ subfic(ToRegister(result), ToRegister(left), right_operand);
+ } else {
+ __ mov(r0, right_operand);
+ __ sub(ToRegister(result), r0, ToRegister(left));
+ }
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+ __ mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
+ __ LoadSmiLiteral(ToRegister(instr->result()), instr->value());
+}
+
+
+// TODO(penguin): put const to constant pool instead
+// of storing double to stack
+void LCodeGen::DoConstantD(LConstantD* instr) {
+ DCHECK(instr->result()->IsDoubleRegister());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ double v = instr->value();
+ __ LoadDoubleLiteral(result, v, scratch0());
+}
+
+
+void LCodeGen::DoConstantE(LConstantE* instr) {
+ __ mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+ Handle<Object> object = instr->value(isolate());
+ AllowDeferredHandleDereference smi_check;
+ __ Move(ToRegister(instr->result()), object);
+}
+
+
+void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
+ Register result = ToRegister(instr->result());
+ Register map = ToRegister(instr->value());
+ __ EnumLength(result, map);
+}
+
+
+void LCodeGen::DoDateField(LDateField* instr) {
+ Register object = ToRegister(instr->date());
+ Register result = ToRegister(instr->result());
+ Register scratch = ToRegister(instr->temp());
+ Smi* index = instr->index();
+ Label runtime, done;
+ DCHECK(object.is(result));
+ DCHECK(object.is(r3));
+ DCHECK(!scratch.is(scratch0()));
+ DCHECK(!scratch.is(object));
+
+ __ TestIfSmi(object, r0);
+ DeoptimizeIf(eq, instr, "Smi", cr0);
+ __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
+ DeoptimizeIf(ne, instr, "not a date object");
+
+ if (index->value() == 0) {
+ __ LoadP(result, FieldMemOperand(object, JSDate::kValueOffset));
+ } else {
+ if (index->value() < JSDate::kFirstUncachedField) {
+ ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+ __ mov(scratch, Operand(stamp));
+ __ LoadP(scratch, MemOperand(scratch));
+ __ LoadP(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
+ __ cmp(scratch, scratch0());
+ __ bne(&runtime);
+ __ LoadP(result,
+ FieldMemOperand(object, JSDate::kValueOffset +
+ kPointerSize * index->value()));
+ __ b(&done);
+ }
+ __ bind(&runtime);
+ __ PrepareCallCFunction(2, scratch);
+ __ LoadSmiLiteral(r4, index);
+ __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+ __ bind(&done);
+ }
+}
+
+
+MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index,
+ String::Encoding encoding) {
+ if (index->IsConstantOperand()) {
+ int offset = ToInteger32(LConstantOperand::cast(index));
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ offset *= kUC16Size;
+ }
+ STATIC_ASSERT(kCharSize == 1);
+ return FieldMemOperand(string, SeqString::kHeaderSize + offset);
+ }
+ Register scratch = scratch0();
+ DCHECK(!scratch.is(string));
+ DCHECK(!scratch.is(ToRegister(index)));
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ add(scratch, string, ToRegister(index));
+ } else {
+ STATIC_ASSERT(kUC16Size == 2);
+ __ ShiftLeftImm(scratch, ToRegister(index), Operand(1));
+ __ add(scratch, string, scratch);
+ }
+ return FieldMemOperand(scratch, SeqString::kHeaderSize);
+}
+
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+
+ if (FLAG_debug_code) {
+ Register scratch = scratch0();
+ __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+ __ andi(scratch, scratch,
+ Operand(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ cmpi(scratch,
+ Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type
+ : two_byte_seq_type));
+ __ Check(eq, kUnexpectedStringType);
+ }
+
+ MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ lbz(result, operand);
+ } else {
+ __ lhz(result, operand);
+ }
+}
+
+
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+ Register value = ToRegister(instr->value());
+
+ if (FLAG_debug_code) {
+ Register index = ToRegister(instr->index());
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ int encoding_mask =
+ instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type
+ : two_byte_seq_type;
+ __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
+ }
+
+ MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ stb(value, operand);
+ } else {
+ __ sth(value, operand);
+ }
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+ LOperand* right = instr->right();
+ Register left = ToRegister(instr->left());
+ Register result = ToRegister(instr->result());
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+#if V8_TARGET_ARCH_PPC64
+ bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
+ instr->hydrogen()->representation().IsExternal());
+#endif
+
+ if (!can_overflow) {
+ if (right->IsConstantOperand()) {
+ __ Add(result, left, ToOperand(right).immediate(), r0);
+ } else {
+ __ add(result, left, EmitLoadRegister(right, ip));
+ }
+ } else {
+ if (right->IsConstantOperand()) {
+ __ AddAndCheckForOverflow(result, left, ToOperand(right).immediate(),
+ scratch0(), r0);
+ } else {
+ __ AddAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
+ scratch0(), r0);
+ }
+// Doptimize on overflow
+#if V8_TARGET_ARCH_PPC64
+ if (isInteger) {
+ __ extsw(scratch0(), scratch0(), SetRC);
+ }
+#endif
+ DeoptimizeIf(lt, instr, "overflow", cr0);
+ }
+
+#if V8_TARGET_ARCH_PPC64
+ if (isInteger) {
+ __ extsw(result, result);
+ }
+#endif
+}
+
+
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ HMathMinMax::Operation operation = instr->hydrogen()->operation();
+ Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge;
+ if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
+ Register left_reg = ToRegister(left);
+ Register right_reg = EmitLoadRegister(right, ip);
+ Register result_reg = ToRegister(instr->result());
+ Label return_left, done;
+#if V8_TARGET_ARCH_PPC64
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+#endif
+ __ cmp(left_reg, right_reg);
+#if V8_TARGET_ARCH_PPC64
+ } else {
+ __ cmpw(left_reg, right_reg);
+ }
+#endif
+ __ b(cond, &return_left);
+ __ Move(result_reg, right_reg);
+ __ b(&done);
+ __ bind(&return_left);
+ __ Move(result_reg, left_reg);
+ __ bind(&done);
+ } else {
+ DCHECK(instr->hydrogen()->representation().IsDouble());
+ DoubleRegister left_reg = ToDoubleRegister(left);
+ DoubleRegister right_reg = ToDoubleRegister(right);
+ DoubleRegister result_reg = ToDoubleRegister(instr->result());
+ Label check_nan_left, check_zero, return_left, return_right, done;
+ __ fcmpu(left_reg, right_reg);
+ __ bunordered(&check_nan_left);
+ __ beq(&check_zero);
+ __ b(cond, &return_left);
+ __ b(&return_right);
+
+ __ bind(&check_zero);
+ __ fcmpu(left_reg, kDoubleRegZero);
+ __ bne(&return_left); // left == right != 0.
+
+ // At this point, both left and right are either 0 or -0.
+ // N.B. The following works because +0 + -0 == +0
+ if (operation == HMathMinMax::kMathMin) {
+ // For min we want logical-or of sign bit: -(-L + -R)
+ __ fneg(left_reg, left_reg);
+ __ fsub(result_reg, left_reg, right_reg);
+ __ fneg(result_reg, result_reg);
+ } else {
+ // For max we want logical-and of sign bit: (L + R)
+ __ fadd(result_reg, left_reg, right_reg);
+ }
+ __ b(&done);
+
+ __ bind(&check_nan_left);
+ __ fcmpu(left_reg, left_reg);
+ __ bunordered(&return_left); // left == NaN.
+
+ __ bind(&return_right);
+ if (!right_reg.is(result_reg)) {
+ __ fmr(result_reg, right_reg);
+ }
+ __ b(&done);
+
+ __ bind(&return_left);
+ if (!left_reg.is(result_reg)) {
+ __ fmr(result_reg, left_reg);
+ }
+ __ bind(&done);
+ }
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+ DoubleRegister left = ToDoubleRegister(instr->left());
+ DoubleRegister right = ToDoubleRegister(instr->right());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ switch (instr->op()) {
+ case Token::ADD:
+ __ fadd(result, left, right);
+ break;
+ case Token::SUB:
+ __ fsub(result, left, right);
+ break;
+ case Token::MUL:
+ __ fmul(result, left, right);
+ break;
+ case Token::DIV:
+ __ fdiv(result, left, right);
+ break;
+ case Token::MOD: {
+ __ PrepareCallCFunction(0, 2, scratch0());
+ __ MovToFloatParameters(left, right);
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+ 0, 2);
+ // Move the result in the double result register.
+ __ MovFromFloatResult(result);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->left()).is(r4));
+ DCHECK(ToRegister(instr->right()).is(r3));
+ DCHECK(ToRegister(instr->result()).is(r3));
+
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
+ CallCode(code, RelocInfo::CODE_TARGET, instr);
+}
+
+
+template <class InstrType>
+void LCodeGen::EmitBranch(InstrType instr, Condition cond, CRegister cr) {
+ int left_block = instr->TrueDestination(chunk_);
+ int right_block = instr->FalseDestination(chunk_);
+
+ int next_block = GetNextEmittedBlock();
+
+ if (right_block == left_block || cond == al) {
+ EmitGoto(left_block);
+ } else if (left_block == next_block) {
+ __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block), cr);
+ } else if (right_block == next_block) {
+ __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
+ } else {
+ __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
+ __ b(chunk_->GetAssemblyLabel(right_block));
+ }
+}
+
+
+template <class InstrType>
+void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond, CRegister cr) {
+ int false_block = instr->FalseDestination(chunk_);
+ __ b(cond, chunk_->GetAssemblyLabel(false_block), cr);
+}
+
+
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); }
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+ Representation r = instr->hydrogen()->value()->representation();
+ DoubleRegister dbl_scratch = double_scratch0();
+ const uint crZOrNaNBits = (1 << (31 - Assembler::encode_crbit(cr7, CR_EQ)) |
+ 1 << (31 - Assembler::encode_crbit(cr7, CR_FU)));
+
+ if (r.IsInteger32()) {
+ DCHECK(!info()->IsStub());
+ Register reg = ToRegister(instr->value());
+ __ cmpwi(reg, Operand::Zero());
+ EmitBranch(instr, ne);
+ } else if (r.IsSmi()) {
+ DCHECK(!info()->IsStub());
+ Register reg = ToRegister(instr->value());
+ __ cmpi(reg, Operand::Zero());
+ EmitBranch(instr, ne);
+ } else if (r.IsDouble()) {
+ DCHECK(!info()->IsStub());
+ DoubleRegister reg = ToDoubleRegister(instr->value());
+ // Test the double value. Zero and NaN are false.
+ __ fcmpu(reg, kDoubleRegZero, cr7);
+ __ mfcr(r0);
+ __ andi(r0, r0, Operand(crZOrNaNBits));
+ EmitBranch(instr, eq, cr0);
+ } else {
+ DCHECK(r.IsTagged());
+ Register reg = ToRegister(instr->value());
+ HType type = instr->hydrogen()->value()->type();
+ if (type.IsBoolean()) {
+ DCHECK(!info()->IsStub());
+ __ CompareRoot(reg, Heap::kTrueValueRootIndex);
+ EmitBranch(instr, eq);
+ } else if (type.IsSmi()) {
+ DCHECK(!info()->IsStub());
+ __ cmpi(reg, Operand::Zero());
+ EmitBranch(instr, ne);
+ } else if (type.IsJSArray()) {
+ DCHECK(!info()->IsStub());
+ EmitBranch(instr, al);
+ } else if (type.IsHeapNumber()) {
+ DCHECK(!info()->IsStub());
+ __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+ // Test the double value. Zero and NaN are false.
+ __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
+ __ mfcr(r0);
+ __ andi(r0, r0, Operand(crZOrNaNBits));
+ EmitBranch(instr, eq, cr0);
+ } else if (type.IsString()) {
+ DCHECK(!info()->IsStub());
+ __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
+ __ cmpi(ip, Operand::Zero());
+ EmitBranch(instr, ne);
+ } else {
+ ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+ // Avoid deopts in the case where we've never executed this path before.
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+ if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ // undefined -> false.
+ __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
+ __ beq(instr->FalseLabel(chunk_));
+ }
+ if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ // Boolean -> its value.
+ __ CompareRoot(reg, Heap::kTrueValueRootIndex);
+ __ beq(instr->TrueLabel(chunk_));
+ __ CompareRoot(reg, Heap::kFalseValueRootIndex);
+ __ beq(instr->FalseLabel(chunk_));
+ }
+ if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ // 'null' -> false.
+ __ CompareRoot(reg, Heap::kNullValueRootIndex);
+ __ beq(instr->FalseLabel(chunk_));
+ }
+
+ if (expected.Contains(ToBooleanStub::SMI)) {
+ // Smis: 0 -> false, all other -> true.
+ __ cmpi(reg, Operand::Zero());
+ __ beq(instr->FalseLabel(chunk_));
+ __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
+ } else if (expected.NeedsMap()) {
+ // If we need a map later and have a Smi -> deopt.
+ __ TestIfSmi(reg, r0);
+ DeoptimizeIf(eq, instr, "Smi", cr0);
+ }
+
+ const Register map = scratch0();
+ if (expected.NeedsMap()) {
+ __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+ if (expected.CanBeUndetectable()) {
+ // Undetectable -> false.
+ __ lbz(ip, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ TestBit(ip, Map::kIsUndetectable, r0);
+ __ bne(instr->FalseLabel(chunk_), cr0);
+ }
+ }
+
+ if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ // spec object -> true.
+ __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
+ __ bge(instr->TrueLabel(chunk_));
+ }
+
+ if (expected.Contains(ToBooleanStub::STRING)) {
+ // String value -> false iff empty.
+ Label not_string;
+ __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
+ __ bge(&not_string);
+ __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
+ __ cmpi(ip, Operand::Zero());
+ __ bne(instr->TrueLabel(chunk_));
+ __ b(instr->FalseLabel(chunk_));
+ __ bind(&not_string);
+ }
+
+ if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ // Symbol value -> true.
+ __ CompareInstanceType(map, ip, SYMBOL_TYPE);
+ __ beq(instr->TrueLabel(chunk_));
+ }
+
+ if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ // heap number -> false iff +0, -0, or NaN.
+ Label not_heap_number;
+ __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ __ bne(&not_heap_number);
+ __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+ // Test the double value. Zero and NaN are false.
+ __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
+ __ mfcr(r0);
+ __ andi(r0, r0, Operand(crZOrNaNBits));
+ __ bne(instr->FalseLabel(chunk_), cr0);
+ __ b(instr->TrueLabel(chunk_));
+ __ bind(&not_heap_number);
+ }
+
+ if (!expected.IsGeneric()) {
+ // We've seen something for the first time -> deopt.
+ // This can only happen if we are not generic already.
+ DeoptimizeIf(al, instr, "unexpected object");
+ }
+ }
+ }
+}
+
+
+void LCodeGen::EmitGoto(int block) {
+ if (!IsNextEmittedBlock(block)) {
+ __ b(chunk_->GetAssemblyLabel(LookupDestination(block)));
+ }
+}
+
+
+void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); }
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op) {
+ Condition cond = kNoCondition;
+ switch (op) {
+ case Token::EQ:
+ case Token::EQ_STRICT:
+ cond = eq;
+ break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = ne;
+ break;
+ case Token::LT:
+ cond = lt;
+ break;
+ case Token::GT:
+ cond = gt;
+ break;
+ case Token::LTE:
+ cond = le;
+ break;
+ case Token::GTE:
+ cond = ge;
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
+ return cond;
+}
+
+
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ bool is_unsigned =
+ instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
+ instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
+ Condition cond = TokenToCondition(instr->op());
+
+ if (left->IsConstantOperand() && right->IsConstantOperand()) {
+ // We can statically evaluate the comparison.
+ double left_val = ToDouble(LConstantOperand::cast(left));
+ double right_val = ToDouble(LConstantOperand::cast(right));
+ int next_block = EvalComparison(instr->op(), left_val, right_val)
+ ? instr->TrueDestination(chunk_)
+ : instr->FalseDestination(chunk_);
+ EmitGoto(next_block);
+ } else {
+ if (instr->is_double()) {
+ // Compare left and right operands as doubles and load the
+ // resulting flags into the normal status register.
+ __ fcmpu(ToDoubleRegister(left), ToDoubleRegister(right));
+ // If a NaN is involved, i.e. the result is unordered,
+ // jump to false block label.
+ __ bunordered(instr->FalseLabel(chunk_));
+ } else {
+ if (right->IsConstantOperand()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(right));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ if (is_unsigned) {
+ __ CmplSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
+ } else {
+ __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
+ }
+ } else {
+ if (is_unsigned) {
+ __ Cmplwi(ToRegister(left), Operand(value), r0);
+ } else {
+ __ Cmpwi(ToRegister(left), Operand(value), r0);
+ }
+ }
+ } else if (left->IsConstantOperand()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(left));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ if (is_unsigned) {
+ __ CmplSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
+ } else {
+ __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
+ }
+ } else {
+ if (is_unsigned) {
+ __ Cmplwi(ToRegister(right), Operand(value), r0);
+ } else {
+ __ Cmpwi(ToRegister(right), Operand(value), r0);
+ }
+ }
+ // We commuted the operands, so commute the condition.
+ cond = CommuteCondition(cond);
+ } else if (instr->hydrogen_value()->representation().IsSmi()) {
+ if (is_unsigned) {
+ __ cmpl(ToRegister(left), ToRegister(right));
+ } else {
+ __ cmp(ToRegister(left), ToRegister(right));
+ }
+ } else {
+ if (is_unsigned) {
+ __ cmplw(ToRegister(left), ToRegister(right));
+ } else {
+ __ cmpw(ToRegister(left), ToRegister(right));
+ }
+ }
+ }
+ EmitBranch(instr, cond);
+ }
+}
+
+
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
+ Register left = ToRegister(instr->left());
+ Register right = ToRegister(instr->right());
+
+ __ cmp(left, right);
+ EmitBranch(instr, eq);
+}
+
+
+void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
+ if (instr->hydrogen()->representation().IsTagged()) {
+ Register input_reg = ToRegister(instr->object());
+ __ mov(ip, Operand(factory()->the_hole_value()));
+ __ cmp(input_reg, ip);
+ EmitBranch(instr, eq);
+ return;
+ }
+
+ DoubleRegister input_reg = ToDoubleRegister(instr->object());
+ __ fcmpu(input_reg, input_reg);
+ EmitFalseBranch(instr, ordered);
+
+ Register scratch = scratch0();
+ __ MovDoubleHighToInt(scratch, input_reg);
+ __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
+ EmitBranch(instr, eq);
+}
+
+
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+ Representation rep = instr->hydrogen()->value()->representation();
+ DCHECK(!rep.IsInteger32());
+ Register scratch = ToRegister(instr->temp());
+
+ if (rep.IsDouble()) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ __ fcmpu(value, kDoubleRegZero);
+ EmitFalseBranch(instr, ne);
+#if V8_TARGET_ARCH_PPC64
+ __ MovDoubleToInt64(scratch, value);
+#else
+ __ MovDoubleHighToInt(scratch, value);
+#endif
+ __ cmpi(scratch, Operand::Zero());
+ EmitBranch(instr, lt);
+ } else {
+ Register value = ToRegister(instr->value());
+ __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
+ instr->FalseLabel(chunk()), DO_SMI_CHECK);
+#if V8_TARGET_ARCH_PPC64
+ __ LoadP(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
+ __ li(ip, Operand(1));
+ __ rotrdi(ip, ip, 1); // ip = 0x80000000_00000000
+ __ cmp(scratch, ip);
+#else
+ __ lwz(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
+ __ lwz(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset));
+ Label skip;
+ __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
+ __ cmp(scratch, r0);
+ __ bne(&skip);
+ __ cmpi(ip, Operand::Zero());
+ __ bind(&skip);
+#endif
+ EmitBranch(instr, eq);
+ }
+}
+
+
+Condition LCodeGen::EmitIsObject(Register input, Register temp1,
+ Label* is_not_object, Label* is_object) {
+ Register temp2 = scratch0();
+ __ JumpIfSmi(input, is_not_object);
+
+ __ LoadRoot(temp2, Heap::kNullValueRootIndex);
+ __ cmp(input, temp2);
+ __ beq(is_object);
+
+ // Load map.
+ __ LoadP(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined.
+ __ lbz(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
+ __ TestBit(temp2, Map::kIsUndetectable, r0);
+ __ bne(is_not_object, cr0);
+
+ // Load instance type and check that it is in object type range.
+ __ lbz(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
+ __ cmpi(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ blt(is_not_object);
+ __ cmpi(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ return le;
+}
+
+
+void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
+ Register reg = ToRegister(instr->value());
+ Register temp1 = ToRegister(instr->temp());
+
+ Condition true_cond = EmitIsObject(reg, temp1, instr->FalseLabel(chunk_),
+ instr->TrueLabel(chunk_));
+
+ EmitBranch(instr, true_cond);
+}
+
+
+Condition LCodeGen::EmitIsString(Register input, Register temp1,
+ Label* is_not_string,
+ SmiCheck check_needed = INLINE_SMI_CHECK) {
+ if (check_needed == INLINE_SMI_CHECK) {
+ __ JumpIfSmi(input, is_not_string);
+ }
+ __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
+
+ return lt;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+ Register reg = ToRegister(instr->value());
+ Register temp1 = ToRegister(instr->temp());
+
+ SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
+ ? OMIT_SMI_CHECK
+ : INLINE_SMI_CHECK;
+ Condition true_cond =
+ EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
+
+ EmitBranch(instr, true_cond);
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+ Register input_reg = EmitLoadRegister(instr->value(), ip);
+ __ TestIfSmi(input_reg, r0);
+ EmitBranch(instr, eq, cr0);
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
+ __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ lbz(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+ __ TestBit(temp, Map::kIsUndetectable, r0);
+ EmitBranch(instr, ne, cr0);
+}
+
+
+static Condition ComputeCompareCondition(Token::Value op) {
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return eq;
+ case Token::LT:
+ return lt;
+ case Token::GT:
+ return gt;
+ case Token::LTE:
+ return le;
+ case Token::GTE:
+ return ge;
+ default:
+ UNREACHABLE();
+ return kNoCondition;
+ }
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ Token::Value op = instr->op();
+
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ // This instruction also signals no smi code inlined
+ __ cmpi(r3, Operand::Zero());
+
+ Condition condition = ComputeCompareCondition(op);
+
+ EmitBranch(instr, condition);
+}
+
+
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == FIRST_TYPE) return to;
+ DCHECK(from == to || to == LAST_TYPE);
+ return from;
+}
+
+
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == to) return eq;
+ if (to == LAST_TYPE) return ge;
+ if (from == FIRST_TYPE) return le;
+ UNREACHABLE();
+ return eq;
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+ Register scratch = scratch0();
+ Register input = ToRegister(instr->value());
+
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
+
+ __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
+ EmitBranch(instr, BranchCondition(instr->hydrogen()));
+}
+
+
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+
+ __ AssertString(input);
+
+ __ lwz(result, FieldMemOperand(input, String::kHashFieldOffset));
+ __ IndexFromHash(result, result);
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+ LHasCachedArrayIndexAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register scratch = scratch0();
+
+ __ lwz(scratch, FieldMemOperand(input, String::kHashFieldOffset));
+ __ mov(r0, Operand(String::kContainsCachedArrayIndexMask));
+ __ and_(r0, scratch, r0, SetRC);
+ EmitBranch(instr, eq, cr0);
+}
+
+
+// Branches to a label or falls through with the answer in flags. Trashes
+// the temp registers, but not the input.
+void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
+ Handle<String> class_name, Register input,
+ Register temp, Register temp2) {
+ DCHECK(!input.is(temp));
+ DCHECK(!input.is(temp2));
+ DCHECK(!temp.is(temp2));
+
+ __ JumpIfSmi(input, is_false);
+
+ if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
+ // Assuming the following assertions, we can use the same compares to test
+ // for both being a function type and being in the object type range.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
+ __ blt(is_false);
+ __ beq(is_true);
+ __ cmpi(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
+ __ beq(is_true);
+ } else {
+ // Faster code path to avoid two compares: subtract lower bound from the
+ // actual type and do a signed compare with the width of the type range.
+ __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ lbz(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
+ __ subi(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ cmpi(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ bgt(is_false);
+ }
+
+ // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
+ // Check if the constructor in the map is a function.
+ __ LoadP(temp, FieldMemOperand(temp, Map::kConstructorOffset));
+
+ // Objects with a non-function constructor have class 'Object'.
+ __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
+ if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) {
+ __ bne(is_true);
+ } else {
+ __ bne(is_false);
+ }
+
+ // temp now contains the constructor function. Grab the
+ // instance class name from there.
+ __ LoadP(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(temp,
+ FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
+ // The class name we are testing against is internalized since it's a literal.
+ // The name in the constructor is internalized because of the way the context
+ // is booted. This routine isn't expected to work for random API-created
+ // classes and it doesn't have to because you can't access it with natives
+ // syntax. Since both sides are internalized it is sufficient to use an
+ // identity comparison.
+ __ Cmpi(temp, Operand(class_name), r0);
+ // End with the answer in flags.
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register temp = scratch0();
+ Register temp2 = ToRegister(instr->temp());
+ Handle<String> class_name = instr->hydrogen()->class_name();
+
+ EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+ class_name, input, temp, temp2);
+
+ EmitBranch(instr, eq);
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+ Register reg = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ __ LoadP(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
+ __ Cmpi(temp, Operand(instr->map()), r0);
+ EmitBranch(instr, eq);
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->left()).is(r3)); // Object is in r3.
+ DCHECK(ToRegister(instr->right()).is(r4)); // Function is in r4.
+
+ InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+
+ Label equal, done;
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&equal);
+ __ mov(r3, Operand(factory()->false_value()));
+ __ b(&done);
+
+ __ bind(&equal);
+ __ mov(r3, Operand(factory()->true_value()));
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+ class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
+ public:
+ DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
+ LInstanceOfKnownGlobal* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() OVERRIDE {
+ codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
+ }
+ LInstruction* instr() OVERRIDE { return instr_; }
+ Label* map_check() { return &map_check_; }
+
+ private:
+ LInstanceOfKnownGlobal* instr_;
+ Label map_check_;
+ };
+
+ DeferredInstanceOfKnownGlobal* deferred;
+ deferred = new (zone()) DeferredInstanceOfKnownGlobal(this, instr);
+
+ Label done, false_result;
+ Register object = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+ Register result = ToRegister(instr->result());
+
+ // A Smi is not instance of anything.
+ __ JumpIfSmi(object, &false_result);
+
+ // This is the inlined call site instanceof cache. The two occurences of the
+ // hole value will be patched to the last map/result pair generated by the
+ // instanceof stub.
+ Label cache_miss;
+ Register map = temp;
+ __ LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ {
+ // Block constant pool emission to ensure the positions of instructions are
+ // as expected by the patcher. See InstanceofStub::Generate().
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ __ bind(deferred->map_check()); // Label for calculating code patching.
+ // We use Factory::the_hole_value() on purpose instead of loading from the
+ // root array to force relocation to be able to later patch with
+ // the cached map.
+ Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
+ __ mov(ip, Operand(Handle<Object>(cell)));
+ __ LoadP(ip, FieldMemOperand(ip, PropertyCell::kValueOffset));
+ __ cmp(map, ip);
+ __ bne(&cache_miss);
+ // We use Factory::the_hole_value() on purpose instead of loading from the
+ // root array to force relocation to be able to later patch
+ // with true or false.
+ __ mov(result, Operand(factory()->the_hole_value()));
+ }
+ __ b(&done);
+
+ // The inlined call site cache did not match. Check null and string before
+ // calling the deferred code.
+ __ bind(&cache_miss);
+ // Null is not instance of anything.
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ cmp(object, ip);
+ __ beq(&false_result);
+
+ // String values is not instance of anything.
+ Condition is_string = masm_->IsObjectStringType(object, temp);
+ __ b(is_string, &false_result, cr0);
+
+ // Go to the deferred code.
+ __ b(deferred->entry());
+
+ __ bind(&false_result);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+
+ // Here result has either true or false. Deferred code also produces true or
+ // false object.
+ __ bind(deferred->exit());
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check) {
+ InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
+ flags = static_cast<InstanceofStub::Flags>(flags |
+ InstanceofStub::kArgsInRegisters);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kCallSiteInlineCheck);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kReturnTrueFalseObject);
+ InstanceofStub stub(isolate(), flags);
+
+ PushSafepointRegistersScope scope(this);
+ LoadContextFromDeferred(instr->context());
+
+ __ Move(InstanceofStub::right(), instr->function());
+ // Include instructions below in delta: mov + call = mov + (mov + 2)
+ static const int kAdditionalDelta = (2 * Assembler::kMovInstructions) + 2;
+ int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
+ {
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ // r8 is used to communicate the offset to the location of the map check.
+ __ mov(r8, Operand(delta * Instruction::kInstrSize));
+ }
+ CallCodeGeneric(stub.GetCode(), RelocInfo::CODE_TARGET, instr,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ DCHECK(delta == masm_->InstructionsGeneratedSince(map_check));
+ LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+ // Put the result value (r3) into the result register slot and
+ // restore all registers.
+ __ StoreToSafepointRegisterSlot(r3, ToRegister(instr->result()));
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ Token::Value op = instr->op();
+
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ // This instruction also signals no smi code inlined
+ __ cmpi(r3, Operand::Zero());
+
+ Condition condition = ComputeCompareCondition(op);
+ Label true_value, done;
+
+ __ b(condition, &true_value);
+
+ __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
+ __ b(&done);
+
+ __ bind(&true_value);
+ __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
+
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+ if (FLAG_trace && info()->IsOptimizing()) {
+ // Push the return value on the stack as the parameter.
+ // Runtime::TraceExit returns its parameter in r3. We're leaving the code
+ // managed by the register allocator and tearing down the frame, it's
+ // safe to write to the context register.
+ __ push(r3);
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+ if (info()->saves_caller_doubles()) {
+ RestoreCallerDoubles();
+ }
+ int no_frame_start = -1;
+ if (instr->has_constant_parameter_count()) {
+ int parameter_count = ToInteger32(instr->constant_parameter_count());
+ int32_t sp_delta = (parameter_count + 1) * kPointerSize;
+ if (NeedsEagerFrame()) {
+ no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
+ } else if (sp_delta != 0) {
+ __ addi(sp, sp, Operand(sp_delta));
+ }
+ } else {
+ Register reg = ToRegister(instr->parameter_count());
+ // The argument count parameter is a smi
+ if (NeedsEagerFrame()) {
+ no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
+ }
+ __ SmiToPtrArrayOffset(r0, reg);
+ __ add(sp, sp, r0);
+ }
+
+ __ blr();
+
+ if (no_frame_start != -1) {
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
+}
+
+
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
+ Register result = ToRegister(instr->result());
+ __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
+ __ LoadP(result, FieldMemOperand(ip, Cell::kValueOffset));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(result, ip);
+ DeoptimizeIf(eq, instr, "hole");
+ }
+}
+
+
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+ DCHECK(FLAG_vector_ics);
+ Register vector = ToRegister(instr->temp_vector());
+ DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
+ __ Move(vector, instr->hydrogen()->feedback_vector());
+ // No need to allocate this register.
+ DCHECK(VectorLoadICDescriptor::SlotRegister().is(r3));
+ __ mov(VectorLoadICDescriptor::SlotRegister(),
+ Operand(Smi::FromInt(instr->hydrogen()->slot())));
+}
+
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->global_object())
+ .is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->result()).is(r3));
+
+ __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
+ if (FLAG_vector_ics) {
+ EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
+ }
+ ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
+ Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
+ Register value = ToRegister(instr->value());
+ Register cell = scratch0();
+
+ // Load the cell.
+ __ mov(cell, Operand(instr->hydrogen()->cell().handle()));
+
+ // If the cell we are storing to contains the hole it could have
+ // been deleted from the property dictionary. In that case, we need
+ // to update the property details in the property dictionary to mark
+ // it as no longer deleted.
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ // We use a temp to check the payload (CompareRoot might clobber ip).
+ Register payload = ToRegister(instr->temp());
+ __ LoadP(payload, FieldMemOperand(cell, Cell::kValueOffset));
+ __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(eq, instr, "hole");
+ }
+
+ // Store the value.
+ __ StoreP(value, FieldMemOperand(cell, Cell::kValueOffset), r0);
+ // Cells are always rescanned, so no write barrier here.
+}
+
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+ __ LoadP(result, ContextOperand(context, instr->slot_index()));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(result, ip);
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIf(eq, instr, "hole");
+ } else {
+ Label skip;
+ __ bne(&skip);
+ __ mov(result, Operand(factory()->undefined_value()));
+ __ bind(&skip);
+ }
+ }
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register value = ToRegister(instr->value());
+ Register scratch = scratch0();
+ MemOperand target = ContextOperand(context, instr->slot_index());
+
+ Label skip_assignment;
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ LoadP(scratch, target);
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(scratch, ip);
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIf(eq, instr, "hole");
+ } else {
+ __ bne(&skip_assignment);
+ }
+ }
+
+ __ StoreP(value, target, r0);
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
+ ? OMIT_SMI_CHECK
+ : INLINE_SMI_CHECK;
+ __ RecordWriteContextSlot(context, target.offset(), value, scratch,
+ GetLinkRegisterState(), kSaveFPRegs,
+ EMIT_REMEMBERED_SET, check_needed);
+ }
+
+ __ bind(&skip_assignment);
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
+ Register object = ToRegister(instr->object());
+
+ if (access.IsExternalMemory()) {
+ Register result = ToRegister(instr->result());
+ MemOperand operand = MemOperand(object, offset);
+ __ LoadRepresentation(result, operand, access.representation(), r0);
+ return;
+ }
+
+ if (instr->hydrogen()->representation().IsDouble()) {
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ lfd(result, FieldMemOperand(object, offset));
+ return;
+ }
+
+ Register result = ToRegister(instr->result());
+ if (!access.IsInobject()) {
+ __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ object = result;
+ }
+
+ Representation representation = access.representation();
+
+#if V8_TARGET_ARCH_PPC64
+ // 64-bit Smi optimization
+ if (representation.IsSmi() &&
+ instr->hydrogen()->representation().IsInteger32()) {
+ // Read int value directly from upper half of the smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+#if V8_TARGET_LITTLE_ENDIAN
+ offset += kPointerSize / 2;
+#endif
+ representation = Representation::Integer32();
+ }
+#endif
+
+ __ LoadRepresentation(result, FieldMemOperand(object, offset), representation,
+ r0);
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->result()).is(r3));
+
+ // Name is always in r5.
+ __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
+ if (FLAG_vector_ics) {
+ EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+ }
+ Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+ Register scratch = scratch0();
+ Register function = ToRegister(instr->function());
+ Register result = ToRegister(instr->result());
+
+ // Get the prototype or initial map from the function.
+ __ LoadP(result,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check that the function has a prototype or an initial map.
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(result, ip);
+ DeoptimizeIf(eq, instr, "hole");
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
+ __ bne(&done);
+
+ // Get the prototype from the initial map.
+ __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
+
+ // All done.
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+ Register result = ToRegister(instr->result());
+ __ LoadRoot(result, instr->index());
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+ Register arguments = ToRegister(instr->arguments());
+ Register result = ToRegister(instr->result());
+ // There are two words between the frame pointer and the last argument.
+ // Subtracting from length accounts for one of them add one more.
+ if (instr->length()->IsConstantOperand()) {
+ int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
+ if (instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int index = (const_length - const_index) + 1;
+ __ LoadP(result, MemOperand(arguments, index * kPointerSize), r0);
+ } else {
+ Register index = ToRegister(instr->index());
+ __ subfic(result, index, Operand(const_length + 1));
+ __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
+ __ LoadPX(result, MemOperand(arguments, result));
+ }
+ } else if (instr->index()->IsConstantOperand()) {
+ Register length = ToRegister(instr->length());
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int loc = const_index - 1;
+ if (loc != 0) {
+ __ subi(result, length, Operand(loc));
+ __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
+ __ LoadPX(result, MemOperand(arguments, result));
+ } else {
+ __ ShiftLeftImm(result, length, Operand(kPointerSizeLog2));
+ __ LoadPX(result, MemOperand(arguments, result));
+ }
+ } else {
+ Register length = ToRegister(instr->length());
+ Register index = ToRegister(instr->index());
+ __ sub(result, length, index);
+ __ addi(result, result, Operand(1));
+ __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
+ __ LoadPX(result, MemOperand(arguments, result));
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
+ Register external_pointer = ToRegister(instr->elements());
+ Register key = no_reg;
+ ElementsKind elements_kind = instr->elements_kind();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+ int base_offset = instr->base_offset();
+
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS ||
+ elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ if (key_is_constant) {
+ __ Add(scratch0(), external_pointer, constant_key << element_size_shift,
+ r0);
+ } else {
+ __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
+ __ add(scratch0(), external_pointer, r0);
+ }
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
+ __ lfs(result, MemOperand(scratch0(), base_offset));
+ } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+ __ lfd(result, MemOperand(scratch0(), base_offset));
+ }
+ } else {
+ Register result = ToRegister(instr->result());
+ MemOperand mem_operand =
+ PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
+ constant_key, element_size_shift, base_offset);
+ switch (elements_kind) {
+ case EXTERNAL_INT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ if (key_is_constant) {
+ __ LoadByte(result, mem_operand, r0);
+ } else {
+ __ lbzx(result, mem_operand);
+ }
+ __ extsb(result, result);
+ break;
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ if (key_is_constant) {
+ __ LoadByte(result, mem_operand, r0);
+ } else {
+ __ lbzx(result, mem_operand);
+ }
+ break;
+ case EXTERNAL_INT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ if (key_is_constant) {
+ __ LoadHalfWord(result, mem_operand, r0);
+ } else {
+ __ lhzx(result, mem_operand);
+ }
+ __ extsh(result, result);
+ break;
+ case EXTERNAL_UINT16_ELEMENTS:
+ case UINT16_ELEMENTS:
+ if (key_is_constant) {
+ __ LoadHalfWord(result, mem_operand, r0);
+ } else {
+ __ lhzx(result, mem_operand);
+ }
+ break;
+ case EXTERNAL_INT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ if (key_is_constant) {
+ __ LoadWord(result, mem_operand, r0);
+ } else {
+ __ lwzx(result, mem_operand);
+ }
+#if V8_TARGET_ARCH_PPC64
+ __ extsw(result, result);
+#endif
+ break;
+ case EXTERNAL_UINT32_ELEMENTS:
+ case UINT32_ELEMENTS:
+ if (key_is_constant) {
+ __ LoadWord(result, mem_operand, r0);
+ } else {
+ __ lwzx(result, mem_operand);
+ }
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
+ __ cmplw(result, r0);
+ DeoptimizeIf(ge, instr, "negative value");
+ }
+ break;
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+ Register elements = ToRegister(instr->elements());
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ Register key = no_reg;
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ Register scratch = scratch0();
+
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+
+ int base_offset = instr->base_offset() + constant_key * kDoubleSize;
+ if (!key_is_constant) {
+ __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
+ __ add(scratch, elements, r0);
+ elements = scratch;
+ }
+ if (!is_int16(base_offset)) {
+ __ Add(scratch, elements, base_offset, r0);
+ base_offset = 0;
+ elements = scratch;
+ }
+ __ lfd(result, MemOperand(elements, base_offset));
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (is_int16(base_offset + Register::kExponentOffset)) {
+ __ lwz(scratch,
+ MemOperand(elements, base_offset + Register::kExponentOffset));
+ } else {
+ __ addi(scratch, elements, Operand(base_offset));
+ __ lwz(scratch, MemOperand(scratch, Register::kExponentOffset));
+ }
+ __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
+ DeoptimizeIf(eq, instr, "hole");
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
+ HLoadKeyed* hinstr = instr->hydrogen();
+ Register elements = ToRegister(instr->elements());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+ Register store_base = scratch;
+ int offset = instr->base_offset();
+
+ if (instr->key()->IsConstantOperand()) {
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset += ToInteger32(const_operand) * kPointerSize;
+ store_base = elements;
+ } else {
+ Register key = ToRegister(instr->key());
+ // Even though the HLoadKeyed instruction forces the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (hinstr->key()->representation().IsSmi()) {
+ __ SmiToPtrArrayOffset(r0, key);
+ } else {
+ __ ShiftLeftImm(r0, key, Operand(kPointerSizeLog2));
+ }
+ __ add(scratch, elements, r0);
+ }
+
+ bool requires_hole_check = hinstr->RequiresHoleCheck();
+ Representation representation = hinstr->representation();
+
+#if V8_TARGET_ARCH_PPC64
+ // 64-bit Smi optimization
+ if (representation.IsInteger32() &&
+ hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
+ DCHECK(!requires_hole_check);
+ // Read int value directly from upper half of the smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+#if V8_TARGET_LITTLE_ENDIAN
+ offset += kPointerSize / 2;
+#endif
+ }
+#endif
+
+ __ LoadRepresentation(result, MemOperand(store_base, offset), representation,
+ r0);
+
+ // Check for the hole value.
+ if (requires_hole_check) {
+ if (IsFastSmiElementsKind(hinstr->elements_kind())) {
+ __ TestIfSmi(result, r0);
+ DeoptimizeIf(ne, instr, "not a Smi", cr0);
+ } else {
+ __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+ __ cmp(result, scratch);
+ DeoptimizeIf(eq, instr, "hole");
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
+ if (instr->is_typed_elements()) {
+ DoLoadKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->representation().IsDouble()) {
+ DoLoadKeyedFixedDoubleArray(instr);
+ } else {
+ DoLoadKeyedFixedArray(instr);
+ }
+}
+
+
+MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base,
+ bool key_is_constant, bool key_is_smi,
+ int constant_key,
+ int element_size_shift,
+ int base_offset) {
+ Register scratch = scratch0();
+
+ if (key_is_constant) {
+ return MemOperand(base, (constant_key << element_size_shift) + base_offset);
+ }
+
+ bool needs_shift =
+ (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0));
+
+ if (!(base_offset || needs_shift)) {
+ return MemOperand(base, key);
+ }
+
+ if (needs_shift) {
+ __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
+ key = scratch;
+ }
+
+ if (base_offset) {
+ __ Add(scratch, key, base_offset, r0);
+ }
+
+ return MemOperand(base, scratch);
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
+
+ if (FLAG_vector_ics) {
+ EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
+ }
+
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+ Register scratch = scratch0();
+ Register result = ToRegister(instr->result());
+
+ if (instr->hydrogen()->from_inlined()) {
+ __ subi(result, sp, Operand(2 * kPointerSize));
+ } else {
+ // Check if the calling frame is an arguments adaptor frame.
+ Label done, adapted;
+ __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(result,
+ MemOperand(scratch, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+
+ // Result is the frame pointer for the frame if not adapted and for the real
+ // frame below the adaptor frame if adapted.
+ __ beq(&adapted);
+ __ mr(result, fp);
+ __ b(&done);
+
+ __ bind(&adapted);
+ __ mr(result, scratch);
+ __ bind(&done);
+ }
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+ Register elem = ToRegister(instr->elements());
+ Register result = ToRegister(instr->result());
+
+ Label done;
+
+ // If no arguments adaptor frame the number of arguments is fixed.
+ __ cmp(fp, elem);
+ __ mov(result, Operand(scope()->num_parameters()));
+ __ beq(&done);
+
+ // Arguments adaptor frame present. Get argument length from there.
+ __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(result,
+ MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(result);
+
+ // Argument length is in result register.
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ // If the receiver is null or undefined, we have to pass the global
+ // object as a receiver to normal functions. Values have to be
+ // passed unchanged to builtins and strict-mode functions.
+ Label global_object, result_in_receiver;
+
+ if (!instr->hydrogen()->known_function()) {
+ // Do not transform the receiver to object for strict mode
+ // functions.
+ __ LoadP(scratch,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ lwz(scratch,
+ FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+ __ TestBit(scratch,
+#if V8_TARGET_ARCH_PPC64
+ SharedFunctionInfo::kStrictModeFunction,
+#else
+ SharedFunctionInfo::kStrictModeFunction + kSmiTagSize,
+#endif
+ r0);
+ __ bne(&result_in_receiver, cr0);
+
+ // Do not transform the receiver to object for builtins.
+ __ TestBit(scratch,
+#if V8_TARGET_ARCH_PPC64
+ SharedFunctionInfo::kNative,
+#else
+ SharedFunctionInfo::kNative + kSmiTagSize,
+#endif
+ r0);
+ __ bne(&result_in_receiver, cr0);
+ }
+
+ // Normal function. Replace undefined or null with global receiver.
+ __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+ __ cmp(receiver, scratch);
+ __ beq(&global_object);
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ cmp(receiver, scratch);
+ __ beq(&global_object);
+
+ // Deoptimize if the receiver is not a JS object.
+ __ TestIfSmi(receiver, r0);
+ DeoptimizeIf(eq, instr, "Smi");
+ __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
+ DeoptimizeIf(lt, instr, "not a JavaScript object");
+
+ __ b(&result_in_receiver);
+ __ bind(&global_object);
+ __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
+ __ LoadP(result, ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
+ __ LoadP(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
+ if (result.is(receiver)) {
+ __ bind(&result_in_receiver);
+ } else {
+ Label result_ok;
+ __ b(&result_ok);
+ __ bind(&result_in_receiver);
+ __ mr(result, receiver);
+ __ bind(&result_ok);
+ }
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register length = ToRegister(instr->length());
+ Register elements = ToRegister(instr->elements());
+ Register scratch = scratch0();
+ DCHECK(receiver.is(r3)); // Used for parameter count.
+ DCHECK(function.is(r4)); // Required by InvokeFunction.
+ DCHECK(ToRegister(instr->result()).is(r3));
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ const uint32_t kArgumentsLimit = 1 * KB;
+ __ cmpli(length, Operand(kArgumentsLimit));
+ DeoptimizeIf(gt, instr, "too many arguments");
+
+ // Push the receiver and use the register to keep the original
+ // number of arguments.
+ __ push(receiver);
+ __ mr(receiver, length);
+ // The arguments are at a one pointer size offset from elements.
+ __ addi(elements, elements, Operand(1 * kPointerSize));
+
+ // Loop through the arguments pushing them onto the execution
+ // stack.
+ Label invoke, loop;
+ // length is a small non-negative integer, due to the test above.
+ __ cmpi(length, Operand::Zero());
+ __ beq(&invoke);
+ __ mtctr(length);
+ __ bind(&loop);
+ __ ShiftLeftImm(r0, length, Operand(kPointerSizeLog2));
+ __ LoadPX(scratch, MemOperand(elements, r0));
+ __ push(scratch);
+ __ addi(length, length, Operand(-1));
+ __ bdnz(&loop);
+
+ __ bind(&invoke);
+ DCHECK(instr->HasPointerMap());
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
+ // The number of arguments is stored in receiver which is r3, as expected
+ // by InvokeFunction.
+ ParameterCount actual(receiver);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+ LOperand* argument = instr->value();
+ if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
+ Abort(kDoPushArgumentNotImplementedForDoubleType);
+ } else {
+ Register argument_reg = EmitLoadRegister(argument, ip);
+ __ push(argument_reg);
+ }
+}
+
+
+void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); }
+
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+ Register result = ToRegister(instr->result());
+ __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
+void LCodeGen::DoContext(LContext* instr) {
+ // If there is a non-return use, the context must be moved to a register.
+ Register result = ToRegister(instr->result());
+ if (info()->IsOptimizing()) {
+ __ LoadP(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ } else {
+ // If there is no frame, the context must be in cp.
+ DCHECK(result.is(cp));
+ }
+}
+
+
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ __ push(cp); // The context is the first argument.
+ __ Move(scratch0(), instr->hydrogen()->pairs());
+ __ push(scratch0());
+ __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
+ __ push(scratch0());
+ CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count, int arity,
+ LInstruction* instr, R4State r4_state) {
+ bool dont_adapt_arguments =
+ formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+ bool can_invoke_directly =
+ dont_adapt_arguments || formal_parameter_count == arity;
+
+ LPointerMap* pointers = instr->pointer_map();
+
+ if (can_invoke_directly) {
+ if (r4_state == R4_UNINITIALIZED) {
+ __ Move(r4, function);
+ }
+
+ // Change context.
+ __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+
+ // Set r3 to arguments count if adaption is not needed. Assumes that r3
+ // is available to write to at this point.
+ if (dont_adapt_arguments) {
+ __ mov(r3, Operand(arity));
+ }
+
+ bool is_self_call = function.is_identical_to(info()->closure());
+
+ // Invoke function.
+ if (is_self_call) {
+ __ CallSelf();
+ } else {
+ __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
+ __ CallJSEntry(ip);
+ }
+
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ } else {
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount count(arity);
+ ParameterCount expected(formal_parameter_count);
+ __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
+ }
+}
+
+
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
+ DCHECK(instr->context() != NULL);
+ DCHECK(ToRegister(instr->context()).is(cp));
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ // Deoptimize if not a heap number.
+ __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+ __ cmp(scratch, ip);
+ DeoptimizeIf(ne, instr, "not a heap number");
+
+ Label done;
+ Register exponent = scratch0();
+ scratch = no_reg;
+ __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+ // Check the sign of the argument. If the argument is positive, just
+ // return it.
+ __ cmpwi(exponent, Operand::Zero());
+ // Move the input to the result if necessary.
+ __ Move(result, input);
+ __ bge(&done);
+
+ // Input is negative. Reverse its sign.
+ // Preserve the value of all registers.
+ {
+ PushSafepointRegistersScope scope(this);
+
+ // Registers were saved at the safepoint, so we can use
+ // many scratch registers.
+ Register tmp1 = input.is(r4) ? r3 : r4;
+ Register tmp2 = input.is(r5) ? r3 : r5;
+ Register tmp3 = input.is(r6) ? r3 : r6;
+ Register tmp4 = input.is(r7) ? r3 : r7;
+
+ // exponent: floating point exponent value.
+
+ Label allocated, slow;
+ __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
+ __ b(&allocated);
+
+ // Slow case: Call the runtime system to do the number allocation.
+ __ bind(&slow);
+
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+ instr->context());
+ // Set the pointer to the new heap number in tmp.
+ if (!tmp1.is(r3)) __ mr(tmp1, r3);
+ // Restore input_reg after call to runtime.
+ __ LoadFromSafepointRegisterSlot(input, input);
+ __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+
+ __ bind(&allocated);
+ // exponent: floating point exponent value.
+ // tmp1: allocated heap number.
+ STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
+ __ clrlwi(exponent, exponent, Operand(1)); // clear sign bit
+ __ stw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
+ __ lwz(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
+ __ stw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
+
+ __ StoreToSafepointRegisterSlot(tmp1, result);
+ }
+
+ __ bind(&done);
+}
+
+
+void LCodeGen::EmitMathAbs(LMathAbs* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Label done;
+ __ cmpi(input, Operand::Zero());
+ __ Move(result, input);
+ __ bge(&done);
+ __ li(r0, Operand::Zero()); // clear xer
+ __ mtxer(r0);
+ __ neg(result, result, SetOE, SetRC);
+ // Deoptimize on overflow.
+ DeoptimizeIf(overflow, instr, "overflow", cr0);
+ __ bind(&done);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Label done;
+ __ cmpwi(input, Operand::Zero());
+ __ Move(result, input);
+ __ bge(&done);
+
+ // Deoptimize on overflow.
+ __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
+ __ cmpw(input, r0);
+ DeoptimizeIf(eq, instr, "overflow");
+
+ __ neg(result, result);
+ __ bind(&done);
+}
+#endif
+
+
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
+ // Class for deferred case.
+ class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
+ public:
+ DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() OVERRIDE {
+ codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+ }
+ LInstruction* instr() OVERRIDE { return instr_; }
+
+ private:
+ LMathAbs* instr_;
+ };
+
+ Representation r = instr->hydrogen()->value()->representation();
+ if (r.IsDouble()) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ fabs(result, input);
+#if V8_TARGET_ARCH_PPC64
+ } else if (r.IsInteger32()) {
+ EmitInteger32MathAbs(instr);
+ } else if (r.IsSmi()) {
+#else
+ } else if (r.IsSmiOrInteger32()) {
+#endif
+ EmitMathAbs(instr);
+ } else {
+ // Representation is tagged.
+ DeferredMathAbsTaggedHeapNumber* deferred =
+ new (zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
+ Register input = ToRegister(instr->value());
+ // Smi check.
+ __ JumpIfNotSmi(input, deferred->entry());
+ // If smi, handle it directly.
+ EmitMathAbs(instr);
+ __ bind(deferred->exit());
+ }
+}
+
+
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Register input_high = scratch0();
+ Register scratch = ip;
+ Label done, exact;
+
+ __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
+ &exact);
+ DeoptimizeIf(al, instr, "lost precision or NaN");
+
+ __ bind(&exact);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // Test for -0.
+ __ cmpi(result, Operand::Zero());
+ __ bne(&done);
+ __ cmpwi(input_high, Operand::Zero());
+ DeoptimizeIf(lt, instr, "minus zero");
+ }
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoMathRound(LMathRound* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
+ DoubleRegister input_plus_dot_five = double_scratch1;
+ Register scratch1 = scratch0();
+ Register scratch2 = ip;
+ DoubleRegister dot_five = double_scratch0();
+ Label convert, done;
+
+ __ LoadDoubleLiteral(dot_five, 0.5, r0);
+ __ fabs(double_scratch1, input);
+ __ fcmpu(double_scratch1, dot_five);
+ DeoptimizeIf(unordered, instr, "lost precision or NaN");
+ // If input is in [-0.5, -0], the result is -0.
+ // If input is in [+0, +0.5[, the result is +0.
+ // If the input is +0.5, the result is 1.
+ __ bgt(&convert); // Out of [-0.5, +0.5].
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+#if V8_TARGET_ARCH_PPC64
+ __ MovDoubleToInt64(scratch1, input);
+#else
+ __ MovDoubleHighToInt(scratch1, input);
+#endif
+ __ cmpi(scratch1, Operand::Zero());
+ // [-0.5, -0].
+ DeoptimizeIf(lt, instr, "minus zero");
+ }
+ Label return_zero;
+ __ fcmpu(input, dot_five);
+ __ bne(&return_zero);
+ __ li(result, Operand(1)); // +0.5.
+ __ b(&done);
+ // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
+ // flag kBailoutOnMinusZero.
+ __ bind(&return_zero);
+ __ li(result, Operand::Zero());
+ __ b(&done);
+
+ __ bind(&convert);
+ __ fadd(input_plus_dot_five, input, dot_five);
+ // Reuse dot_five (double_scratch0) as we no longer need this value.
+ __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
+ double_scratch0(), &done, &done);
+ DeoptimizeIf(al, instr, "lost precision or NaN");
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoMathFround(LMathFround* instr) {
+ DoubleRegister input_reg = ToDoubleRegister(instr->value());
+ DoubleRegister output_reg = ToDoubleRegister(instr->result());
+ __ frsp(output_reg, input_reg);
+}
+
+
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ fsqrt(result, input);
+}
+
+
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ DoubleRegister temp = double_scratch0();
+
+ // Note that according to ECMA-262 15.8.2.13:
+ // Math.pow(-Infinity, 0.5) == Infinity
+ // Math.sqrt(-Infinity) == NaN
+ Label skip, done;
+
+ __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0());
+ __ fcmpu(input, temp);
+ __ bne(&skip);
+ __ fneg(result, temp);
+ __ b(&done);
+
+ // Add +0 to convert -0 to +0.
+ __ bind(&skip);
+ __ fadd(result, input, kDoubleRegZero);
+ __ fsqrt(result, result);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+ Representation exponent_type = instr->hydrogen()->right()->representation();
+// Having marked this as a call, we can use any registers.
+// Just make sure that the input/output registers are the expected ones.
+#ifdef DEBUG
+ Register tagged_exponent = MathPowTaggedDescriptor::exponent();
+#endif
+ DCHECK(!instr->right()->IsDoubleRegister() ||
+ ToDoubleRegister(instr->right()).is(d2));
+ DCHECK(!instr->right()->IsRegister() ||
+ ToRegister(instr->right()).is(tagged_exponent));
+ DCHECK(ToDoubleRegister(instr->left()).is(d1));
+ DCHECK(ToDoubleRegister(instr->result()).is(d3));
+
+ if (exponent_type.IsSmi()) {
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsTagged()) {
+ Label no_deopt;
+ __ JumpIfSmi(r5, &no_deopt);
+ __ LoadP(r10, FieldMemOperand(r5, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r10, ip);
+ DeoptimizeIf(ne, instr, "not a heap number");
+ __ bind(&no_deopt);
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsInteger32()) {
+ MathPowStub stub(isolate(), MathPowStub::INTEGER);
+ __ CallStub(&stub);
+ } else {
+ DCHECK(exponent_type.IsDouble());
+ MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+ __ CallStub(&stub);
+ }
+}
+
+
+void LCodeGen::DoMathExp(LMathExp* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
+ DoubleRegister double_scratch2 = double_scratch0();
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ MathExpGenerator::EmitMathExp(masm(), input, result, double_scratch1,
+ double_scratch2, temp1, temp2, scratch0());
+}
+
+
+void LCodeGen::DoMathLog(LMathLog* instr) {
+ __ PrepareCallCFunction(0, 1, scratch0());
+ __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+ __ CallCFunction(ExternalReference::math_log_double_function(isolate()), 0,
+ 1);
+ __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
+
+
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ __ cntlzw_(result, input);
+}
+
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->function()).is(r4));
+ DCHECK(instr->HasPointerMap());
+
+ Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+ if (known_function.is_null()) {
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount count(instr->arity());
+ __ InvokeFunction(r4, count, CALL_FUNCTION, generator);
+ } else {
+ CallKnownFunction(known_function,
+ instr->hydrogen()->formal_parameter_count(),
+ instr->arity(), instr, R4_CONTAINS_TARGET);
+ }
+}
+
+
+void LCodeGen::DoTailCallThroughMegamorphicCache(
+ LTailCallThroughMegamorphicCache* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register name = ToRegister(instr->name());
+ DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(name.is(LoadDescriptor::NameRegister()));
+ DCHECK(receiver.is(r4));
+ DCHECK(name.is(r5));
+
+ Register scratch = r6;
+ Register extra = r7;
+ Register extra2 = r8;
+ Register extra3 = r9;
+
+ // Important for the tail-call.
+ bool must_teardown_frame = NeedsEagerFrame();
+
+ // The probe will tail call to a handler if found.
+ isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
+ must_teardown_frame, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Tail call to miss if we ended up here.
+ if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
+ LoadIC::GenerateMiss(masm());
+}
+
+
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
+ DCHECK(ToRegister(instr->result()).is(r3));
+
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+ __ Call(code, RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(target));
+ __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ CallJSEntry(ip);
+ }
+ generator.AfterCall();
+}
+
+
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
+ DCHECK(ToRegister(instr->function()).is(r4));
+ DCHECK(ToRegister(instr->result()).is(r3));
+
+ if (instr->hydrogen()->pass_argument_count()) {
+ __ mov(r3, Operand(instr->arity()));
+ }
+
+ // Change context.
+ __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+
+ bool is_self_call = false;
+ if (instr->hydrogen()->function()->IsConstant()) {
+ HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
+ Handle<JSFunction> jsfun =
+ Handle<JSFunction>::cast(fun_const->handle(isolate()));
+ is_self_call = jsfun.is_identical_to(info()->closure());
+ }
+
+ if (is_self_call) {
+ __ CallSelf();
+ } else {
+ __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
+ __ CallJSEntry(ip);
+ }
+
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->function()).is(r4));
+ DCHECK(ToRegister(instr->result()).is(r3));
+
+ int arity = instr->arity();
+ CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoCallNew(LCallNew* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->constructor()).is(r4));
+ DCHECK(ToRegister(instr->result()).is(r3));
+
+ __ mov(r3, Operand(instr->arity()));
+ // No cell in r5 for construct type feedback in optimized code
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+}
+
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->constructor()).is(r4));
+ DCHECK(ToRegister(instr->result()).is(r3));
+
+ __ mov(r3, Operand(instr->arity()));
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ ElementsKind kind = instr->hydrogen()->elements_kind();
+ AllocationSiteOverrideMode override_mode =
+ (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
+ ? DISABLE_ALLOCATION_SITES
+ : DONT_OVERRIDE;
+
+ if (instr->arity() == 0) {
+ ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ } else if (instr->arity() == 1) {
+ Label done;
+ if (IsFastPackedElementsKind(kind)) {
+ Label packed_case;
+ // We might need a change here
+ // look at the first argument
+ __ LoadP(r8, MemOperand(sp, 0));
+ __ cmpi(r8, Operand::Zero());
+ __ beq(&packed_case);
+
+ ElementsKind holey_kind = GetHoleyElementsKind(kind);
+ ArraySingleArgumentConstructorStub stub(isolate(), holey_kind,
+ override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ __ b(&done);
+ __ bind(&packed_case);
+ }
+
+ ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ __ bind(&done);
+ } else {
+ ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ }
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+ CallRuntime(instr->function(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+ Register function = ToRegister(instr->function());
+ Register code_object = ToRegister(instr->code_object());
+ __ addi(code_object, code_object,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ StoreP(code_object,
+ FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0);
+}
+
+
+void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
+ Register result = ToRegister(instr->result());
+ Register base = ToRegister(instr->base_object());
+ if (instr->offset()->IsConstantOperand()) {
+ LConstantOperand* offset = LConstantOperand::cast(instr->offset());
+ __ Add(result, base, ToInteger32(offset), r0);
+ } else {
+ Register offset = ToRegister(instr->offset());
+ __ add(result, base, offset);
+ }
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ HStoreNamedField* hinstr = instr->hydrogen();
+ Representation representation = instr->representation();
+
+ Register object = ToRegister(instr->object());
+ Register scratch = scratch0();
+ HObjectAccess access = hinstr->access();
+ int offset = access.offset();
+
+ if (access.IsExternalMemory()) {
+ Register value = ToRegister(instr->value());
+ MemOperand operand = MemOperand(object, offset);
+ __ StoreRepresentation(value, operand, representation, r0);
+ return;
+ }
+
+ __ AssertNotSmi(object);
+
+#if V8_TARGET_ARCH_PPC64
+ DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
+ IsInteger32(LConstantOperand::cast(instr->value())));
+#else
+ DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
+ IsSmi(LConstantOperand::cast(instr->value())));
+#endif
+ if (representation.IsDouble()) {
+ DCHECK(access.IsInobject());
+ DCHECK(!hinstr->has_transition());
+ DCHECK(!hinstr->NeedsWriteBarrier());
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ __ stfd(value, FieldMemOperand(object, offset));
+ return;
+ }
+
+ if (hinstr->has_transition()) {
+ Handle<Map> transition = hinstr->transition_map();
+ AddDeprecationDependency(transition);
+ __ mov(scratch, Operand(transition));
+ __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0);
+ if (hinstr->NeedsWriteBarrierForMap()) {
+ Register temp = ToRegister(instr->temp());
+ // Update the write barrier for the map field.
+ __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(),
+ kSaveFPRegs);
+ }
+ }
+
+ // Do the store.
+ Register value = ToRegister(instr->value());
+
+#if V8_TARGET_ARCH_PPC64
+ // 64-bit Smi optimization
+ if (representation.IsSmi() &&
+ hinstr->value()->representation().IsInteger32()) {
+ DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+ // Store int value directly to upper half of the smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+#if V8_TARGET_LITTLE_ENDIAN
+ offset += kPointerSize / 2;
+#endif
+ representation = Representation::Integer32();
+ }
+#endif
+
+ if (access.IsInobject()) {
+ MemOperand operand = FieldMemOperand(object, offset);
+ __ StoreRepresentation(value, operand, representation, r0);
+ if (hinstr->NeedsWriteBarrier()) {
+ // Update the write barrier for the object for in-object properties.
+ __ RecordWriteField(
+ object, offset, value, scratch, GetLinkRegisterState(), kSaveFPRegs,
+ EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
+ hinstr->PointersToHereCheckForValue());
+ }
+ } else {
+ __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ MemOperand operand = FieldMemOperand(scratch, offset);
+ __ StoreRepresentation(value, operand, representation, r0);
+ if (hinstr->NeedsWriteBarrier()) {
+ // Update the write barrier for the properties array.
+ // object is used as a scratch register.
+ __ RecordWriteField(
+ scratch, offset, value, object, GetLinkRegisterState(), kSaveFPRegs,
+ EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
+ hinstr->PointersToHereCheckForValue());
+ }
+ }
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+ __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+ Representation representation = instr->hydrogen()->length()->representation();
+ DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
+ DCHECK(representation.IsSmiOrInteger32());
+
+ Condition cc = instr->hydrogen()->allow_equality() ? lt : le;
+ if (instr->length()->IsConstantOperand()) {
+ int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
+ Register index = ToRegister(instr->index());
+ if (representation.IsSmi()) {
+ __ Cmpli(index, Operand(Smi::FromInt(length)), r0);
+ } else {
+ __ Cmplwi(index, Operand(length), r0);
+ }
+ cc = CommuteCondition(cc);
+ } else if (instr->index()->IsConstantOperand()) {
+ int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
+ Register length = ToRegister(instr->length());
+ if (representation.IsSmi()) {
+ __ Cmpli(length, Operand(Smi::FromInt(index)), r0);
+ } else {
+ __ Cmplwi(length, Operand(index), r0);
+ }
+ } else {
+ Register index = ToRegister(instr->index());
+ Register length = ToRegister(instr->length());
+ if (representation.IsSmi()) {
+ __ cmpl(length, index);
+ } else {
+ __ cmplw(length, index);
+ }
+ }
+ if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
+ Label done;
+ __ b(NegateCondition(cc), &done);
+ __ stop("eliminated bounds check failed");
+ __ bind(&done);
+ } else {
+ DeoptimizeIf(cc, instr, "out of bounds");
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+ Register external_pointer = ToRegister(instr->elements());
+ Register key = no_reg;
+ ElementsKind elements_kind = instr->elements_kind();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+ int base_offset = instr->base_offset();
+
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS ||
+ elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
+ Register address = scratch0();
+ DoubleRegister value(ToDoubleRegister(instr->value()));
+ if (key_is_constant) {
+ if (constant_key != 0) {
+ __ Add(address, external_pointer, constant_key << element_size_shift,
+ r0);
+ } else {
+ address = external_pointer;
+ }
+ } else {
+ __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
+ __ add(address, external_pointer, r0);
+ }
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
+ __ frsp(double_scratch0(), value);
+ __ stfs(double_scratch0(), MemOperand(address, base_offset));
+ } else { // Storing doubles, not floats.
+ __ stfd(value, MemOperand(address, base_offset));
+ }
+ } else {
+ Register value(ToRegister(instr->value()));
+ MemOperand mem_operand =
+ PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
+ constant_key, element_size_shift, base_offset);
+ switch (elements_kind) {
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ case INT8_ELEMENTS:
+ if (key_is_constant) {
+ __ StoreByte(value, mem_operand, r0);
+ } else {
+ __ stbx(value, mem_operand);
+ }
+ break;
+ case EXTERNAL_INT16_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ case UINT16_ELEMENTS:
+ if (key_is_constant) {
+ __ StoreHalfWord(value, mem_operand, r0);
+ } else {
+ __ sthx(value, mem_operand);
+ }
+ break;
+ case EXTERNAL_INT32_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ case UINT32_ELEMENTS:
+ if (key_is_constant) {
+ __ StoreWord(value, mem_operand, r0);
+ } else {
+ __ stwx(value, mem_operand);
+ }
+ break;
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register key = no_reg;
+ Register scratch = scratch0();
+ DoubleRegister double_scratch = double_scratch0();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+
+ // Calculate the effective address of the slot in the array to store the
+ // double value.
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+ int base_offset = instr->base_offset() + constant_key * kDoubleSize;
+ if (!key_is_constant) {
+ __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
+ __ add(scratch, elements, scratch);
+ elements = scratch;
+ }
+ if (!is_int16(base_offset)) {
+ __ Add(scratch, elements, base_offset, r0);
+ base_offset = 0;
+ elements = scratch;
+ }
+
+ if (instr->NeedsCanonicalization()) {
+ // Force a canonical NaN.
+ __ CanonicalizeNaN(double_scratch, value);
+ __ stfd(double_scratch, MemOperand(elements, base_offset));
+ } else {
+ __ stfd(value, MemOperand(elements, base_offset));
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
+ HStoreKeyed* hinstr = instr->hydrogen();
+ Register value = ToRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+ Register scratch = scratch0();
+ Register store_base = scratch;
+ int offset = instr->base_offset();
+
+ // Do the store.
+ if (instr->key()->IsConstantOperand()) {
+ DCHECK(!hinstr->NeedsWriteBarrier());
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset += ToInteger32(const_operand) * kPointerSize;
+ store_base = elements;
+ } else {
+ // Even though the HLoadKeyed instruction forces the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (hinstr->key()->representation().IsSmi()) {
+ __ SmiToPtrArrayOffset(scratch, key);
+ } else {
+ __ ShiftLeftImm(scratch, key, Operand(kPointerSizeLog2));
+ }
+ __ add(scratch, elements, scratch);
+ }
+
+ Representation representation = hinstr->value()->representation();
+
+#if V8_TARGET_ARCH_PPC64
+ // 64-bit Smi optimization
+ if (representation.IsInteger32()) {
+ DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+ DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
+ // Store int value directly to upper half of the smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+#if V8_TARGET_LITTLE_ENDIAN
+ offset += kPointerSize / 2;
+#endif
+ }
+#endif
+
+ __ StoreRepresentation(value, MemOperand(store_base, offset), representation,
+ r0);
+
+ if (hinstr->NeedsWriteBarrier()) {
+ SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
+ ? OMIT_SMI_CHECK
+ : INLINE_SMI_CHECK;
+ // Compute address of modified element and store it into key register.
+ __ Add(key, store_base, offset, r0);
+ __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs,
+ EMIT_REMEMBERED_SET, check_needed,
+ hinstr->PointersToHereCheckForValue());
+ }
+}
+
+
+void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
+ // By cases: external, fast double
+ if (instr->is_typed_elements()) {
+ DoStoreKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->value()->representation().IsDouble()) {
+ DoStoreKeyedFixedDoubleArray(instr);
+ } else {
+ DoStoreKeyedFixedArray(instr);
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+ Register object_reg = ToRegister(instr->object());
+ Register scratch = scratch0();
+
+ Handle<Map> from_map = instr->original_map();
+ Handle<Map> to_map = instr->transitioned_map();
+ ElementsKind from_kind = instr->from_kind();
+ ElementsKind to_kind = instr->to_kind();
+
+ Label not_applicable;
+ __ LoadP(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+ __ Cmpi(scratch, Operand(from_map), r0);
+ __ bne(&not_applicable);
+
+ if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ Register new_map_reg = ToRegister(instr->new_map_temp());
+ __ mov(new_map_reg, Operand(to_map));
+ __ StoreP(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset),
+ r0);
+ // Write barrier.
+ __ RecordWriteForMap(object_reg, new_map_reg, scratch,
+ GetLinkRegisterState(), kDontSaveFPRegs);
+ } else {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(object_reg.is(r3));
+ PushSafepointRegistersScope scope(this);
+ __ Move(r4, to_map);
+ bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+ TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+ __ CallStub(&stub);
+ RecordSafepointWithRegisters(instr->pointer_map(), 0,
+ Safepoint::kLazyDeopt);
+ }
+ __ bind(&not_applicable);
+}
+
+
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+ Register object = ToRegister(instr->object());
+ Register temp = ToRegister(instr->temp());
+ Label no_memento_found;
+ __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
+ DeoptimizeIf(eq, instr, "memento found");
+ __ bind(&no_memento_found);
+}
+
+
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->left()).is(r4));
+ DCHECK(ToRegister(instr->right()).is(r3));
+ StringAddStub stub(isolate(), instr->hydrogen()->flags(),
+ instr->hydrogen()->pretenure_flag());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+ class DeferredStringCharCodeAt FINAL : public LDeferredCode {
+ public:
+ DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() OVERRIDE { codegen()->DoDeferredStringCharCodeAt(instr_); }
+ LInstruction* instr() OVERRIDE { return instr_; }
+
+ private:
+ LStringCharCodeAt* instr_;
+ };
+
+ DeferredStringCharCodeAt* deferred =
+ new (zone()) DeferredStringCharCodeAt(this, instr);
+
+ StringCharLoadGenerator::Generate(
+ masm(), ToRegister(instr->string()), ToRegister(instr->index()),
+ ToRegister(instr->result()), deferred->entry());
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ li(result, Operand::Zero());
+
+ PushSafepointRegistersScope scope(this);
+ __ push(string);
+ // Push the index as a smi. This is safe because of the checks in
+ // DoStringCharCodeAt above.
+ if (instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ __ LoadSmiLiteral(scratch, Smi::FromInt(const_index));
+ __ push(scratch);
+ } else {
+ Register index = ToRegister(instr->index());
+ __ SmiTag(index);
+ __ push(index);
+ }
+ CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
+ instr->context());
+ __ AssertSmi(r3);
+ __ SmiUntag(r3);
+ __ StoreToSafepointRegisterSlot(r3, result);
+}
+
+
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+ class DeferredStringCharFromCode FINAL : public LDeferredCode {
+ public:
+ DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() OVERRIDE {
+ codegen()->DoDeferredStringCharFromCode(instr_);
+ }
+ LInstruction* instr() OVERRIDE { return instr_; }
+
+ private:
+ LStringCharFromCode* instr_;
+ };
+
+ DeferredStringCharFromCode* deferred =
+ new (zone()) DeferredStringCharFromCode(this, instr);
+
+ DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+ DCHECK(!char_code.is(result));
+
+ __ cmpli(char_code, Operand(String::kMaxOneByteCharCode));
+ __ bgt(deferred->entry());
+ __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
+ __ ShiftLeftImm(r0, char_code, Operand(kPointerSizeLog2));
+ __ add(result, result, r0);
+ __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize));
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(result, ip);
+ __ beq(deferred->entry());
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ li(result, Operand::Zero());
+
+ PushSafepointRegistersScope scope(this);
+ __ SmiTag(char_code);
+ __ push(char_code);
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
+ __ StoreToSafepointRegisterSlot(r3, result);
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+ LOperand* input = instr->value();
+ DCHECK(input->IsRegister() || input->IsStackSlot());
+ LOperand* output = instr->result();
+ DCHECK(output->IsDoubleRegister());
+ if (input->IsStackSlot()) {
+ Register scratch = scratch0();
+ __ LoadP(scratch, ToMemOperand(input));
+ __ ConvertIntToDouble(scratch, ToDoubleRegister(output));
+ } else {
+ __ ConvertIntToDouble(ToRegister(input), ToDoubleRegister(output));
+ }
+}
+
+
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+ LOperand* input = instr->value();
+ LOperand* output = instr->result();
+ __ ConvertUnsignedIntToDouble(ToRegister(input), ToDoubleRegister(output));
+}
+
+
+void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
+ class DeferredNumberTagI FINAL : public LDeferredCode {
+ public:
+ DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() OVERRIDE {
+ codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
+ instr_->temp2(), SIGNED_INT32);
+ }
+ LInstruction* instr() OVERRIDE { return instr_; }
+
+ private:
+ LNumberTagI* instr_;
+ };
+
+ Register src = ToRegister(instr->value());
+ Register dst = ToRegister(instr->result());
+
+ DeferredNumberTagI* deferred = new (zone()) DeferredNumberTagI(this, instr);
+#if V8_TARGET_ARCH_PPC64
+ __ SmiTag(dst, src);
+#else
+ __ SmiTagCheckOverflow(dst, src, r0);
+ __ BranchOnOverflow(deferred->entry());
+#endif
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+ class DeferredNumberTagU FINAL : public LDeferredCode {
+ public:
+ DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() OVERRIDE {
+ codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
+ instr_->temp2(), UNSIGNED_INT32);
+ }
+ LInstruction* instr() OVERRIDE { return instr_; }
+
+ private:
+ LNumberTagU* instr_;
+ };
+
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+
+ DeferredNumberTagU* deferred = new (zone()) DeferredNumberTagU(this, instr);
+ __ Cmpli(input, Operand(Smi::kMaxValue), r0);
+ __ bgt(deferred->entry());
+ __ SmiTag(result, input);
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
+ LOperand* temp1, LOperand* temp2,
+ IntegerSignedness signedness) {
+ Label done, slow;
+ Register src = ToRegister(value);
+ Register dst = ToRegister(instr->result());
+ Register tmp1 = scratch0();
+ Register tmp2 = ToRegister(temp1);
+ Register tmp3 = ToRegister(temp2);
+ DoubleRegister dbl_scratch = double_scratch0();
+
+ if (signedness == SIGNED_INT32) {
+ // There was overflow, so bits 30 and 31 of the original integer
+ // disagree. Try to allocate a heap number in new space and store
+ // the value in there. If that fails, call the runtime system.
+ if (dst.is(src)) {
+ __ SmiUntag(src, dst);
+ __ xoris(src, src, Operand(HeapNumber::kSignMask >> 16));
+ }
+ __ ConvertIntToDouble(src, dbl_scratch);
+ } else {
+ __ ConvertUnsignedIntToDouble(src, dbl_scratch);
+ }
+
+ if (FLAG_inline_new) {
+ __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
+ __ b(&done);
+ }
+
+ // Slow case: Call the runtime system to do the number allocation.
+ __ bind(&slow);
+ {
+ // TODO(3095996): Put a valid pointer value in the stack slot where the
+ // result register is stored, as this register is in the pointer map, but
+ // contains an integer value.
+ __ li(dst, Operand::Zero());
+
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this);
+
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(instr->pointer_map(), 0,
+ Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(r3, dst);
+ }
+
+ // Done. Put the value in dbl_scratch into the value of the allocated heap
+ // number.
+ __ bind(&done);
+ __ stfd(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+ class DeferredNumberTagD FINAL : public LDeferredCode {
+ public:
+ DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() OVERRIDE { codegen()->DoDeferredNumberTagD(instr_); }
+ LInstruction* instr() OVERRIDE { return instr_; }
+
+ private:
+ LNumberTagD* instr_;
+ };
+
+ DoubleRegister input_reg = ToDoubleRegister(instr->value());
+ Register scratch = scratch0();
+ Register reg = ToRegister(instr->result());
+ Register temp1 = ToRegister(instr->temp());
+ Register temp2 = ToRegister(instr->temp2());
+
+ DeferredNumberTagD* deferred = new (zone()) DeferredNumberTagD(this, instr);
+ if (FLAG_inline_new) {
+ __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
+ } else {
+ __ b(deferred->entry());
+ }
+ __ bind(deferred->exit());
+ __ stfd(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register reg = ToRegister(instr->result());
+ __ li(reg, Operand::Zero());
+
+ PushSafepointRegistersScope scope(this);
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(instr->pointer_map(), 0,
+ Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(r3, reg);
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+ HChange* hchange = instr->hydrogen();
+ Register input = ToRegister(instr->value());
+ Register output = ToRegister(instr->result());
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ TestUnsignedSmiCandidate(input, r0);
+ DeoptimizeIf(ne, instr, "overflow", cr0);
+ }
+#if !V8_TARGET_ARCH_PPC64
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ !hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ SmiTagCheckOverflow(output, input, r0);
+ DeoptimizeIf(lt, instr, "overflow", cr0);
+ } else {
+#endif
+ __ SmiTag(output, input);
+#if !V8_TARGET_ARCH_PPC64
+ }
+#endif
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+ Register scratch = scratch0();
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ if (instr->needs_check()) {
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ // If the input is a HeapObject, value of scratch won't be zero.
+ __ andi(scratch, input, Operand(kHeapObjectTag));
+ __ SmiUntag(result, input);
+ DeoptimizeIf(ne, instr, "not a Smi", cr0);
+ } else {
+ __ SmiUntag(result, input);
+ }
+}
+
+
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
+ DoubleRegister result_reg,
+ NumberUntagDMode mode) {
+ bool can_convert_undefined_to_nan =
+ instr->hydrogen()->can_convert_undefined_to_nan();
+ bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
+ Register scratch = scratch0();
+ DCHECK(!result_reg.is(double_scratch0()));
+
+ Label convert, load_smi, done;
+
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ // Smi check.
+ __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
+
+ // Heap number map check.
+ __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+ __ cmp(scratch, ip);
+ if (can_convert_undefined_to_nan) {
+ __ bne(&convert);
+ } else {
+ DeoptimizeIf(ne, instr, "not a heap number");
+ }
+ // load heap number
+ __ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+ if (deoptimize_on_minus_zero) {
+#if V8_TARGET_ARCH_PPC64
+ __ MovDoubleToInt64(scratch, result_reg);
+ // rotate left by one for simple compare.
+ __ rldicl(scratch, scratch, 1, 0);
+ __ cmpi(scratch, Operand(1));
+#else
+ __ MovDoubleToInt64(scratch, ip, result_reg);
+ __ cmpi(ip, Operand::Zero());
+ __ bne(&done);
+ __ Cmpi(scratch, Operand(HeapNumber::kSignMask), r0);
+#endif
+ DeoptimizeIf(eq, instr, "minus zero");
+ }
+ __ b(&done);
+ if (can_convert_undefined_to_nan) {
+ __ bind(&convert);
+ // Convert undefined (and hole) to NaN.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(input_reg, ip);
+ DeoptimizeIf(ne, instr, "not a heap number/undefined");
+ __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+ __ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
+ __ b(&done);
+ }
+ } else {
+ __ SmiUntag(scratch, input_reg);
+ DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
+ }
+ // Smi to double register conversion
+ __ bind(&load_smi);
+ // scratch: untagged value of input_reg
+ __ ConvertIntToDouble(scratch, result_reg);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
+ Register input_reg = ToRegister(instr->value());
+ Register scratch1 = scratch0();
+ Register scratch2 = ToRegister(instr->temp());
+ DoubleRegister double_scratch = double_scratch0();
+ DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
+
+ DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
+ DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
+
+ Label done;
+
+ // Heap number map check.
+ __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+ __ cmp(scratch1, ip);
+
+ if (instr->truncating()) {
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations.
+ Label no_heap_number, check_bools, check_false;
+ __ bne(&no_heap_number);
+ __ mr(scratch2, input_reg);
+ __ TruncateHeapNumberToI(input_reg, scratch2);
+ __ b(&done);
+
+ // Check for Oddballs. Undefined/False is converted to zero and True to one
+ // for truncating conversions.
+ __ bind(&no_heap_number);
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(input_reg, ip);
+ __ bne(&check_bools);
+ __ li(input_reg, Operand::Zero());
+ __ b(&done);
+
+ __ bind(&check_bools);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(input_reg, ip);
+ __ bne(&check_false);
+ __ li(input_reg, Operand(1));
+ __ b(&done);
+
+ __ bind(&check_false);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(input_reg, ip);
+ DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false", cr7);
+ __ li(input_reg, Operand::Zero());
+ } else {
+ DeoptimizeIf(ne, instr, "not a heap number", cr7);
+
+ __ lfd(double_scratch2,
+ FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // preserve heap number pointer in scratch2 for minus zero check below
+ __ mr(scratch2, input_reg);
+ }
+ __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
+ double_scratch);
+ DeoptimizeIf(ne, instr, "lost precision or NaN", cr7);
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ cmpi(input_reg, Operand::Zero());
+ __ bne(&done);
+ __ lwz(scratch1,
+ FieldMemOperand(scratch2, HeapNumber::kValueOffset +
+ Register::kExponentOffset));
+ __ cmpwi(scratch1, Operand::Zero());
+ DeoptimizeIf(lt, instr, "minus zero", cr7);
+ }
+ }
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+ class DeferredTaggedToI FINAL : public LDeferredCode {
+ public:
+ DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() OVERRIDE { codegen()->DoDeferredTaggedToI(instr_); }
+ LInstruction* instr() OVERRIDE { return instr_; }
+
+ private:
+ LTaggedToI* instr_;
+ };
+
+ LOperand* input = instr->value();
+ DCHECK(input->IsRegister());
+ DCHECK(input->Equals(instr->result()));
+
+ Register input_reg = ToRegister(input);
+
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiUntag(input_reg);
+ } else {
+ DeferredTaggedToI* deferred = new (zone()) DeferredTaggedToI(this, instr);
+
+ // Branch to deferred code if the input is a HeapObject.
+ __ JumpIfNotSmi(input_reg, deferred->entry());
+
+ __ SmiUntag(input_reg);
+ __ bind(deferred->exit());
+ }
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+ LOperand* input = instr->value();
+ DCHECK(input->IsRegister());
+ LOperand* result = instr->result();
+ DCHECK(result->IsDoubleRegister());
+
+ Register input_reg = ToRegister(input);
+ DoubleRegister result_reg = ToDoubleRegister(result);
+
+ HValue* value = instr->hydrogen()->value();
+ NumberUntagDMode mode = value->representation().IsSmi()
+ ? NUMBER_CANDIDATE_IS_SMI
+ : NUMBER_CANDIDATE_IS_ANY_TAGGED;
+
+ EmitNumberUntagD(instr, input_reg, result_reg, mode);
+}
+
+
+void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
+ Register result_reg = ToRegister(instr->result());
+ Register scratch1 = scratch0();
+ DoubleRegister double_input = ToDoubleRegister(instr->value());
+ DoubleRegister double_scratch = double_scratch0();
+
+ if (instr->truncating()) {
+ __ TruncateDoubleToI(result_reg, double_input);
+ } else {
+ __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
+ double_scratch);
+ // Deoptimize if the input wasn't a int32 (inside a double).
+ DeoptimizeIf(ne, instr, "lost precision or NaN");
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label done;
+ __ cmpi(result_reg, Operand::Zero());
+ __ bne(&done);
+#if V8_TARGET_ARCH_PPC64
+ __ MovDoubleToInt64(scratch1, double_input);
+#else
+ __ MovDoubleHighToInt(scratch1, double_input);
+#endif
+ __ cmpi(scratch1, Operand::Zero());
+ DeoptimizeIf(lt, instr, "minus zero");
+ __ bind(&done);
+ }
+ }
+}
+
+
+void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
+ Register result_reg = ToRegister(instr->result());
+ Register scratch1 = scratch0();
+ DoubleRegister double_input = ToDoubleRegister(instr->value());
+ DoubleRegister double_scratch = double_scratch0();
+
+ if (instr->truncating()) {
+ __ TruncateDoubleToI(result_reg, double_input);
+ } else {
+ __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
+ double_scratch);
+ // Deoptimize if the input wasn't a int32 (inside a double).
+ DeoptimizeIf(ne, instr, "lost precision or NaN");
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label done;
+ __ cmpi(result_reg, Operand::Zero());
+ __ bne(&done);
+#if V8_TARGET_ARCH_PPC64
+ __ MovDoubleToInt64(scratch1, double_input);
+#else
+ __ MovDoubleHighToInt(scratch1, double_input);
+#endif
+ __ cmpi(scratch1, Operand::Zero());
+ DeoptimizeIf(lt, instr, "minus zero");
+ __ bind(&done);
+ }
+ }
+#if V8_TARGET_ARCH_PPC64
+ __ SmiTag(result_reg);
+#else
+ __ SmiTagCheckOverflow(result_reg, r0);
+ DeoptimizeIf(lt, instr, "overflow", cr0);
+#endif
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+ LOperand* input = instr->value();
+ __ TestIfSmi(ToRegister(input), r0);
+ DeoptimizeIf(ne, instr, "not a Smi", cr0);
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ LOperand* input = instr->value();
+ __ TestIfSmi(ToRegister(input), r0);
+ DeoptimizeIf(eq, instr, "Smi", cr0);
+ }
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+ Register input = ToRegister(instr->value());
+ Register scratch = scratch0();
+
+ __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+ if (instr->hydrogen()->is_interval_check()) {
+ InstanceType first;
+ InstanceType last;
+ instr->hydrogen()->GetCheckInterval(&first, &last);
+
+ __ cmpli(scratch, Operand(first));
+
+ // If there is only one type in the interval check for equality.
+ if (first == last) {
+ DeoptimizeIf(ne, instr, "wrong instance type");
+ } else {
+ DeoptimizeIf(lt, instr, "wrong instance type");
+ // Omit check for the last type.
+ if (last != LAST_TYPE) {
+ __ cmpli(scratch, Operand(last));
+ DeoptimizeIf(gt, instr, "wrong instance type");
+ }
+ }
+ } else {
+ uint8_t mask;
+ uint8_t tag;
+ instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+ if (base::bits::IsPowerOfTwo32(mask)) {
+ DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
+ __ andi(r0, scratch, Operand(mask));
+ DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type", cr0);
+ } else {
+ __ andi(scratch, scratch, Operand(mask));
+ __ cmpi(scratch, Operand(tag));
+ DeoptimizeIf(ne, instr, "wrong instance type");
+ }
+ }
+}
+
+
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
+ Register reg = ToRegister(instr->value());
+ Handle<HeapObject> object = instr->hydrogen()->object().handle();
+ AllowDeferredHandleDereference smi_check;
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Register reg = ToRegister(instr->value());
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ __ mov(ip, Operand(Handle<Object>(cell)));
+ __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset));
+ __ cmp(reg, ip);
+ } else {
+ __ Cmpi(reg, Operand(object), r0);
+ }
+ DeoptimizeIf(ne, instr, "value mismatch");
+}
+
+
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+ {
+ PushSafepointRegistersScope scope(this);
+ __ push(object);
+ __ li(cp, Operand::Zero());
+ __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
+ RecordSafepointWithRegisters(instr->pointer_map(), 1,
+ Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(r3, scratch0());
+ }
+ __ TestIfSmi(scratch0(), r0);
+ DeoptimizeIf(eq, instr, "instance migration failed", cr0);
+}
+
+
+void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+ class DeferredCheckMaps FINAL : public LDeferredCode {
+ public:
+ DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
+ : LDeferredCode(codegen), instr_(instr), object_(object) {
+ SetExit(check_maps());
+ }
+ void Generate() OVERRIDE {
+ codegen()->DoDeferredInstanceMigration(instr_, object_);
+ }
+ Label* check_maps() { return &check_maps_; }
+ LInstruction* instr() OVERRIDE { return instr_; }
+
+ private:
+ LCheckMaps* instr_;
+ Label check_maps_;
+ Register object_;
+ };
+
+ if (instr->hydrogen()->IsStabilityCheck()) {
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+ for (int i = 0; i < maps->size(); ++i) {
+ AddStabilityDependency(maps->at(i).handle());
+ }
+ return;
+ }
+
+ Register map_reg = scratch0();
+
+ LOperand* input = instr->value();
+ DCHECK(input->IsRegister());
+ Register reg = ToRegister(input);
+
+ __ LoadP(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+ DeferredCheckMaps* deferred = NULL;
+ if (instr->hydrogen()->HasMigrationTarget()) {
+ deferred = new (zone()) DeferredCheckMaps(this, instr, reg);
+ __ bind(deferred->check_maps());
+ }
+
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+ Label success;
+ for (int i = 0; i < maps->size() - 1; i++) {
+ Handle<Map> map = maps->at(i).handle();
+ __ CompareMap(map_reg, map, &success);
+ __ beq(&success);
+ }
+
+ Handle<Map> map = maps->at(maps->size() - 1).handle();
+ __ CompareMap(map_reg, map, &success);
+ if (instr->hydrogen()->HasMigrationTarget()) {
+ __ bne(deferred->entry());
+ } else {
+ DeoptimizeIf(ne, instr, "wrong map");
+ }
+
+ __ bind(&success);
+}
+
+
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+ DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+ Register unclamped_reg = ToRegister(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ __ ClampUint8(result_reg, unclamped_reg);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+ Register scratch = scratch0();
+ Register input_reg = ToRegister(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
+ Label is_smi, done, heap_number;
+
+ // Both smi and heap number cases are handled.
+ __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
+
+ // Check for heap number
+ __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ Cmpi(scratch, Operand(factory()->heap_number_map()), r0);
+ __ beq(&heap_number);
+
+ // Check for undefined. Undefined is converted to zero for clamping
+ // conversions.
+ __ Cmpi(input_reg, Operand(factory()->undefined_value()), r0);
+ DeoptimizeIf(ne, instr, "not a heap number/undefined");
+ __ li(result_reg, Operand::Zero());
+ __ b(&done);
+
+ // Heap number
+ __ bind(&heap_number);
+ __ lfd(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
+ __ b(&done);
+
+ // smi
+ __ bind(&is_smi);
+ __ ClampUint8(result_reg, result_reg);
+
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+ DoubleRegister value_reg = ToDoubleRegister(instr->value());
+ Register result_reg = ToRegister(instr->result());
+
+ if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+ __ MovDoubleHighToInt(result_reg, value_reg);
+ } else {
+ __ MovDoubleLowToInt(result_reg, value_reg);
+ }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+ Register hi_reg = ToRegister(instr->hi());
+ Register lo_reg = ToRegister(instr->lo());
+ DoubleRegister result_reg = ToDoubleRegister(instr->result());
+#if V8_TARGET_ARCH_PPC64
+ __ MovInt64ComponentsToDouble(result_reg, hi_reg, lo_reg, r0);
+#else
+ __ MovInt64ToDouble(result_reg, hi_reg, lo_reg);
+#endif
+}
+
+
+void LCodeGen::DoAllocate(LAllocate* instr) {
+ class DeferredAllocate FINAL : public LDeferredCode {
+ public:
+ DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() OVERRIDE { codegen()->DoDeferredAllocate(instr_); }
+ LInstruction* instr() OVERRIDE { return instr_; }
+
+ private:
+ LAllocate* instr_;
+ };
+
+ DeferredAllocate* deferred = new (zone()) DeferredAllocate(this, instr);
+
+ Register result = ToRegister(instr->result());
+ Register scratch = ToRegister(instr->temp1());
+ Register scratch2 = ToRegister(instr->temp2());
+
+ // Allocate memory for the object.
+ AllocationFlags flags = TAG_OBJECT;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+ if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
+ } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
+ }
+
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ if (size <= Page::kMaxRegularHeapObjectSize) {
+ __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+ } else {
+ __ b(deferred->entry());
+ }
+ } else {
+ Register size = ToRegister(instr->size());
+ __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+ }
+
+ __ bind(deferred->exit());
+
+ if (instr->hydrogen()->MustPrefillWithFiller()) {
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ LoadIntLiteral(scratch, size - kHeapObjectTag);
+ } else {
+ __ subi(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
+ }
+ __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+ Label loop;
+ __ bind(&loop);
+ __ subi(scratch, scratch, Operand(kPointerSize));
+ __ StorePX(scratch2, MemOperand(result, scratch));
+ __ cmpi(scratch, Operand::Zero());
+ __ bge(&loop);
+ }
+}
+
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ LoadSmiLiteral(result, Smi::FromInt(0));
+
+ PushSafepointRegistersScope scope(this);
+ if (instr->size()->IsRegister()) {
+ Register size = ToRegister(instr->size());
+ DCHECK(!size.is(result));
+ __ SmiTag(size);
+ __ push(size);
+ } else {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+#if !V8_TARGET_ARCH_PPC64
+ if (size >= 0 && size <= Smi::kMaxValue) {
+#endif
+ __ Push(Smi::FromInt(size));
+#if !V8_TARGET_ARCH_PPC64
+ } else {
+ // We should never get here at runtime => abort
+ __ stop("invalid allocation size");
+ return;
+ }
+#endif
+ }
+
+ int flags = AllocateDoubleAlignFlag::encode(
+ instr->hydrogen()->MustAllocateDoubleAligned());
+ if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
+ } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
+ } else {
+ flags = AllocateTargetSpace::update(flags, NEW_SPACE);
+ }
+ __ Push(Smi::FromInt(flags));
+
+ CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr,
+ instr->context());
+ __ StoreToSafepointRegisterSlot(r3, result);
+}
+
+
+void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
+ DCHECK(ToRegister(instr->value()).is(r3));
+ __ push(r3);
+ CallRuntime(Runtime::kToFastProperties, 1, instr);
+}
+
+
+void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ Label materialized;
+ // Registers will be used as follows:
+ // r10 = literals array.
+ // r4 = regexp literal.
+ // r3 = regexp literal clone.
+ // r5 and r7-r9 are used as temporaries.
+ int literal_offset =
+ FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+ __ Move(r10, instr->hydrogen()->literals());
+ __ LoadP(r4, FieldMemOperand(r10, literal_offset));
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(r4, ip);
+ __ bne(&materialized);
+
+ // Create regexp literal using runtime function
+ // Result will be in r3.
+ __ LoadSmiLiteral(r9, Smi::FromInt(instr->hydrogen()->literal_index()));
+ __ mov(r8, Operand(instr->hydrogen()->pattern()));
+ __ mov(r7, Operand(instr->hydrogen()->flags()));
+ __ Push(r10, r9, r8, r7);
+ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+ __ mr(r4, r3);
+
+ __ bind(&materialized);
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Label allocated, runtime_allocate;
+
+ __ Allocate(size, r3, r5, r6, &runtime_allocate, TAG_OBJECT);
+ __ b(&allocated);
+
+ __ bind(&runtime_allocate);
+ __ LoadSmiLiteral(r3, Smi::FromInt(size));
+ __ Push(r4, r3);
+ CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+ __ pop(r4);
+
+ __ bind(&allocated);
+ // Copy the content into the newly allocated memory.
+ __ CopyFields(r3, r4, r5.bit(), size / kPointerSize);
+}
+
+
+void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning.
+ bool pretenure = instr->hydrogen()->pretenure();
+ if (!pretenure && instr->hydrogen()->has_no_literals()) {
+ FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+ instr->hydrogen()->kind());
+ __ mov(r5, Operand(instr->hydrogen()->shared_info()));
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ mov(r5, Operand(instr->hydrogen()->shared_info()));
+ __ mov(r4, Operand(pretenure ? factory()->true_value()
+ : factory()->false_value()));
+ __ Push(cp, r5, r4);
+ CallRuntime(Runtime::kNewClosure, 3, instr);
+ }
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+ Register input = ToRegister(instr->value());
+ __ push(input);
+ CallRuntime(Runtime::kTypeof, 1, instr);
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+
+ Condition final_branch_condition =
+ EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), input,
+ instr->type_literal());
+ if (final_branch_condition != kNoCondition) {
+ EmitBranch(instr, final_branch_condition);
+ }
+}
+
+
+Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
+ Register input, Handle<String> type_name) {
+ Condition final_branch_condition = kNoCondition;
+ Register scratch = scratch0();
+ Factory* factory = isolate()->factory();
+ if (String::Equals(type_name, factory->number_string())) {
+ __ JumpIfSmi(input, true_label);
+ __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
+ final_branch_condition = eq;
+
+ } else if (String::Equals(type_name, factory->string_string())) {
+ __ JumpIfSmi(input, false_label);
+ __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
+ __ bge(false_label);
+ __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ ExtractBit(r0, scratch, Map::kIsUndetectable);
+ __ cmpi(r0, Operand::Zero());
+ final_branch_condition = eq;
+
+ } else if (String::Equals(type_name, factory->symbol_string())) {
+ __ JumpIfSmi(input, false_label);
+ __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
+ final_branch_condition = eq;
+
+ } else if (String::Equals(type_name, factory->boolean_string())) {
+ __ CompareRoot(input, Heap::kTrueValueRootIndex);
+ __ beq(true_label);
+ __ CompareRoot(input, Heap::kFalseValueRootIndex);
+ final_branch_condition = eq;
+
+ } else if (String::Equals(type_name, factory->undefined_string())) {
+ __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
+ __ beq(true_label);
+ __ JumpIfSmi(input, false_label);
+ // Check for undetectable objects => true.
+ __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ ExtractBit(r0, scratch, Map::kIsUndetectable);
+ __ cmpi(r0, Operand::Zero());
+ final_branch_condition = ne;
+
+ } else if (String::Equals(type_name, factory->function_string())) {
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ Register type_reg = scratch;
+ __ JumpIfSmi(input, false_label);
+ __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE);
+ __ beq(true_label);
+ __ cmpi(type_reg, Operand(JS_FUNCTION_PROXY_TYPE));
+ final_branch_condition = eq;
+
+ } else if (String::Equals(type_name, factory->object_string())) {
+ Register map = scratch;
+ __ JumpIfSmi(input, false_label);
+ __ CompareRoot(input, Heap::kNullValueRootIndex);
+ __ beq(true_label);
+ __ CheckObjectTypeRange(input, map, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label);
+ // Check for undetectable objects => false.
+ __ lbz(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ ExtractBit(r0, scratch, Map::kIsUndetectable);
+ __ cmpi(r0, Operand::Zero());
+ final_branch_condition = eq;
+
+ } else {
+ __ b(false_label);
+ }
+
+ return final_branch_condition;
+}
+
+
+void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
+ Register temp1 = ToRegister(instr->temp());
+
+ EmitIsConstructCall(temp1, scratch0());
+ EmitBranch(instr, eq);
+}
+
+
+void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
+ DCHECK(!temp1.is(temp2));
+ // Get the frame pointer for the calling frame.
+ __ LoadP(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ LoadP(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ bne(&check_frame_marker);
+ __ LoadP(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ bind(&check_frame_marker);
+ __ LoadP(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
+ __ CmpSmiLiteral(temp1, Smi::FromInt(StackFrame::CONSTRUCT), r0);
+}
+
+
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+ if (!info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= Assembler::kInstrSize;
+ }
+ }
+ }
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+ DCHECK(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+ Deoptimizer::BailoutType type = instr->hydrogen()->type();
+ // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+ // needed return address), even though the implementation of LAZY and EAGER is
+ // now identical. When LAZY is eventually completely folded into EAGER, remove
+ // the special case below.
+ if (info()->IsStub() && type == Deoptimizer::EAGER) {
+ type = Deoptimizer::LAZY;
+ }
+
+ DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
+}
+
+
+void LCodeGen::DoDummy(LDummy* instr) {
+ // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+ // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+ PushSafepointRegistersScope scope(this);
+ LoadContextFromDeferred(instr->context());
+ __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ RecordSafepointWithLazyDeopt(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ DCHECK(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+ class DeferredStackCheck FINAL : public LDeferredCode {
+ public:
+ DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() OVERRIDE { codegen()->DoDeferredStackCheck(instr_); }
+ LInstruction* instr() OVERRIDE { return instr_; }
+
+ private:
+ LStackCheck* instr_;
+ };
+
+ DCHECK(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ // There is no LLazyBailout instruction for stack-checks. We have to
+ // prepare for lazy deoptimization explicitly here.
+ if (instr->hydrogen()->is_function_entry()) {
+ // Perform stack overflow check.
+ Label done;
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmpl(sp, ip);
+ __ bge(&done);
+ DCHECK(instr->context()->IsRegister());
+ DCHECK(ToRegister(instr->context()).is(cp));
+ CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET,
+ instr);
+ __ bind(&done);
+ } else {
+ DCHECK(instr->hydrogen()->is_backwards_branch());
+ // Perform stack overflow check if this goto needs it before jumping.
+ DeferredStackCheck* deferred_stack_check =
+ new (zone()) DeferredStackCheck(this, instr);
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmpl(sp, ip);
+ __ blt(deferred_stack_check->entry());
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ __ bind(instr->done_label());
+ deferred_stack_check->SetExit(instr->done_label());
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ // Don't record a deoptimization index for the safepoint here.
+ // This will be done explicitly when emitting call and the safepoint in
+ // the deferred code.
+ }
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+ // This is a pseudo-instruction that ensures that the environment here is
+ // properly registered for deoptimization and records the assembler's PC
+ // offset.
+ LEnvironment* environment = instr->environment();
+
+ // If the environment were already registered, we would have no way of
+ // backpatching it with the spill slot operands.
+ DCHECK(!environment->HasBeenRegistered());
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+
+ GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(r3, ip);
+ DeoptimizeIf(eq, instr, "undefined");
+
+ Register null_value = r8;
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ __ cmp(r3, null_value);
+ DeoptimizeIf(eq, instr, "null");
+
+ __ TestIfSmi(r3, r0);
+ DeoptimizeIf(eq, instr, "Smi", cr0);
+
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(r3, r4, r4, LAST_JS_PROXY_TYPE);
+ DeoptimizeIf(le, instr, "wrong instance type");
+
+ Label use_cache, call_runtime;
+ __ CheckEnumCache(null_value, &call_runtime);
+
+ __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ b(&use_cache);
+
+ // Get the set of properties to enumerate.
+ __ bind(&call_runtime);
+ __ push(r3);
+ CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+
+ __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kMetaMapRootIndex);
+ __ cmp(r4, ip);
+ DeoptimizeIf(ne, instr, "wrong map");
+ __ bind(&use_cache);
+}
+
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+ Register map = ToRegister(instr->map());
+ Register result = ToRegister(instr->result());
+ Label load_cache, done;
+ __ EnumLength(result, map);
+ __ CmpSmiLiteral(result, Smi::FromInt(0), r0);
+ __ bne(&load_cache);
+ __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
+ __ b(&done);
+
+ __ bind(&load_cache);
+ __ LoadInstanceDescriptors(map, result);
+ __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
+ __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
+ __ cmpi(result, Operand::Zero());
+ DeoptimizeIf(eq, instr, "no cache");
+
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+ Register object = ToRegister(instr->value());
+ Register map = ToRegister(instr->map());
+ __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
+ __ cmp(map, scratch0());
+ DeoptimizeIf(ne, instr, "wrong map");
+}
+
+
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register result, Register object,
+ Register index) {
+ PushSafepointRegistersScope scope(this);
+ __ Push(object, index);
+ __ li(cp, Operand::Zero());
+ __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+ RecordSafepointWithRegisters(instr->pointer_map(), 2,
+ Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(r3, result);
+}
+
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ class DeferredLoadMutableDouble FINAL : public LDeferredCode {
+ public:
+ DeferredLoadMutableDouble(LCodeGen* codegen, LLoadFieldByIndex* instr,
+ Register result, Register object, Register index)
+ : LDeferredCode(codegen),
+ instr_(instr),
+ result_(result),
+ object_(object),
+ index_(index) {}
+ void Generate() OVERRIDE {
+ codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
+ }
+ LInstruction* instr() OVERRIDE { return instr_; }
+
+ private:
+ LLoadFieldByIndex* instr_;
+ Register result_;
+ Register object_;
+ Register index_;
+ };
+
+ Register object = ToRegister(instr->object());
+ Register index = ToRegister(instr->index());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ DeferredLoadMutableDouble* deferred;
+ deferred = new (zone())
+ DeferredLoadMutableDouble(this, instr, result, object, index);
+
+ Label out_of_object, done;
+
+ __ TestBitMask(index, reinterpret_cast<uintptr_t>(Smi::FromInt(1)), r0);
+ __ bne(deferred->entry(), cr0);
+ __ ShiftRightArithImm(index, index, 1);
+
+ __ cmpi(index, Operand::Zero());
+ __ blt(&out_of_object);
+
+ __ SmiToPtrArrayOffset(r0, index);
+ __ add(scratch, object, r0);
+ __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
+
+ __ b(&done);
+
+ __ bind(&out_of_object);
+ __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ // Index is equal to negated out of object property index plus 1.
+ __ SmiToPtrArrayOffset(r0, index);
+ __ sub(scratch, result, r0);
+ __ LoadP(result,
+ FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize));
+ __ bind(deferred->exit());
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
+ Register context = ToRegister(instr->context());
+ __ StoreP(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
+ Handle<ScopeInfo> scope_info = instr->scope_info();
+ __ Push(scope_info);
+ __ push(ToRegister(instr->function()));
+ CallRuntime(Runtime::kPushBlockContext, 2, instr);
+ RecordSafepoint(Safepoint::kNoLazyDeopt);
+}
+
+
+#undef __
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/ppc/lithium-codegen-ppc.h b/deps/v8/src/ppc/lithium-codegen-ppc.h
new file mode 100644
index 0000000000..8ae3b3c5d3
--- /dev/null
+++ b/deps/v8/src/ppc/lithium-codegen-ppc.h
@@ -0,0 +1,372 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PPC_LITHIUM_CODEGEN_PPC_H_
+#define V8_PPC_LITHIUM_CODEGEN_PPC_H_
+
+#include "src/ppc/lithium-ppc.h"
+
+#include "src/ppc/lithium-gap-resolver-ppc.h"
+#include "src/deoptimizer.h"
+#include "src/lithium-codegen.h"
+#include "src/safepoint-table.h"
+#include "src/scopes.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+class SafepointGenerator;
+
+class LCodeGen : public LCodeGenBase {
+ public:
+ LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+ : LCodeGenBase(chunk, assembler, info),
+ deoptimizations_(4, info->zone()),
+ jump_table_(4, info->zone()),
+ deoptimization_literals_(8, info->zone()),
+ inlined_function_count_(0),
+ scope_(info->scope()),
+ translations_(info->zone()),
+ deferred_(8, info->zone()),
+ osr_pc_offset_(-1),
+ frame_is_built_(false),
+ safepoints_(info->zone()),
+ resolver_(this),
+ expected_safepoint_kind_(Safepoint::kSimple) {
+ PopulateDeoptimizationLiteralsWithInlinedFunctions();
+ }
+
+
+ int LookupDestination(int block_id) const {
+ return chunk()->LookupDestination(block_id);
+ }
+
+ bool IsNextEmittedBlock(int block_id) const {
+ return LookupDestination(block_id) == GetNextEmittedBlock();
+ }
+
+ bool NeedsEagerFrame() const {
+ return GetStackSlotCount() > 0 || info()->is_non_deferred_calling() ||
+ !info()->IsStub() || info()->requires_frame();
+ }
+ bool NeedsDeferredFrame() const {
+ return !NeedsEagerFrame() && info()->is_deferred_calling();
+ }
+
+ LinkRegisterStatus GetLinkRegisterState() const {
+ return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
+ }
+
+ // Support for converting LOperands to assembler types.
+ // LOperand must be a register.
+ Register ToRegister(LOperand* op) const;
+
+ // LOperand is loaded into scratch, unless already a register.
+ Register EmitLoadRegister(LOperand* op, Register scratch);
+
+ // LConstantOperand must be an Integer32 or Smi
+ void EmitLoadIntegerConstant(LConstantOperand* const_op, Register dst);
+
+ // LOperand must be a double register.
+ DoubleRegister ToDoubleRegister(LOperand* op) const;
+
+ intptr_t ToRepresentation(LConstantOperand* op,
+ const Representation& r) const;
+ int32_t ToInteger32(LConstantOperand* op) const;
+ Smi* ToSmi(LConstantOperand* op) const;
+ double ToDouble(LConstantOperand* op) const;
+ Operand ToOperand(LOperand* op);
+ MemOperand ToMemOperand(LOperand* op) const;
+ // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
+ MemOperand ToHighMemOperand(LOperand* op) const;
+
+ bool IsInteger32(LConstantOperand* op) const;
+ bool IsSmi(LConstantOperand* op) const;
+ Handle<Object> ToHandle(LConstantOperand* op) const;
+
+ // Try to generate code for the entire chunk, but it may fail if the
+ // chunk contains constructs we cannot handle. Returns true if the
+ // code generation attempt succeeded.
+ bool GenerateCode();
+
+ // Finish the code by setting stack height, safepoint, and bailout
+ // information on it.
+ void FinishCode(Handle<Code> code);
+
+ // Deferred code support.
+ void DoDeferredNumberTagD(LNumberTagD* instr);
+
+ enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+ void DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
+ LOperand* temp1, LOperand* temp2,
+ IntegerSignedness signedness);
+
+ void DoDeferredTaggedToI(LTaggedToI* instr);
+ void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
+ void DoDeferredStackCheck(LStackCheck* instr);
+ void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+ void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+ void DoDeferredAllocate(LAllocate* instr);
+ void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check);
+ void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+ void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, Register result,
+ Register object, Register index);
+
+ // Parallel move support.
+ void DoParallelMove(LParallelMove* move);
+ void DoGap(LGap* instr);
+
+ MemOperand PrepareKeyedOperand(Register key, Register base,
+ bool key_is_constant, bool key_is_tagged,
+ int constant_key, int element_size_shift,
+ int base_offset);
+
+ // Emit frame translation commands for an environment.
+ void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+// Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+ StrictMode strict_mode() const { return info()->strict_mode(); }
+
+ Scope* scope() const { return scope_; }
+
+ Register scratch0() { return r11; }
+ DoubleRegister double_scratch0() { return kScratchDoubleReg; }
+
+ LInstruction* GetNextInstruction();
+
+ void EmitClassOfTest(Label* if_true, Label* if_false,
+ Handle<String> class_name, Register input,
+ Register temporary, Register temporary2);
+
+ int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+
+ void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+
+ void SaveCallerDoubles();
+ void RestoreCallerDoubles();
+
+ // Code generation passes. Returns true if code generation should
+ // continue.
+ void GenerateBodyInstructionPre(LInstruction* instr) OVERRIDE;
+ bool GeneratePrologue();
+ bool GenerateDeferredCode();
+ bool GenerateJumpTable();
+ bool GenerateSafepointTable();
+
+ // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+ void GenerateOsrPrologue();
+
+ enum SafepointMode {
+ RECORD_SIMPLE_SAFEPOINT,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+ };
+
+ void CallCode(Handle<Code> code, RelocInfo::Mode mode, LInstruction* instr);
+
+ void CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
+ LInstruction* instr, SafepointMode safepoint_mode);
+
+ void CallRuntime(const Runtime::Function* function, int num_arguments,
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+ void CallRuntime(Runtime::FunctionId id, int num_arguments,
+ LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, num_arguments, instr);
+ }
+
+ void LoadContextFromDeferred(LOperand* context);
+ void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
+ LInstruction* instr, LOperand* context);
+
+ enum R4State { R4_UNINITIALIZED, R4_CONTAINS_TARGET };
+
+ // Generate a direct call to a known function. Expects the function
+ // to be in r4.
+ void CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count, int arity,
+ LInstruction* instr, R4State r4_state);
+
+ void RecordSafepointWithLazyDeopt(LInstruction* instr,
+ SafepointMode safepoint_mode);
+
+ void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode);
+ void DeoptimizeIf(Condition condition, LInstruction* instr,
+ const char* detail, Deoptimizer::BailoutType bailout_type,
+ CRegister cr = cr7);
+ void DeoptimizeIf(Condition condition, LInstruction* instr,
+ const char* detail, CRegister cr = cr7);
+
+ void AddToTranslation(LEnvironment* environment, Translation* translation,
+ LOperand* op, bool is_tagged, bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer);
+ void PopulateDeoptimizationData(Handle<Code> code);
+ int DefineDeoptimizationLiteral(Handle<Object> literal);
+
+ void PopulateDeoptimizationLiteralsWithInlinedFunctions();
+
+ Register ToRegister(int index) const;
+ DoubleRegister ToDoubleRegister(int index) const;
+
+ MemOperand BuildSeqStringOperand(Register string, LOperand* index,
+ String::Encoding encoding);
+
+ void EmitMathAbs(LMathAbs* instr);
+#if V8_TARGET_ARCH_PPC64
+ void EmitInteger32MathAbs(LMathAbs* instr);
+#endif
+
+ // Support for recording safepoint and position information.
+ void RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
+ int arguments, Safepoint::DeoptMode mode);
+ void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+ void RecordSafepoint(Safepoint::DeoptMode mode);
+ void RecordSafepointWithRegisters(LPointerMap* pointers, int arguments,
+ Safepoint::DeoptMode mode);
+
+ void RecordAndWritePosition(int position) OVERRIDE;
+
+ static Condition TokenToCondition(Token::Value op);
+ void EmitGoto(int block);
+
+ // EmitBranch expects to be the last instruction of a block.
+ template <class InstrType>
+ void EmitBranch(InstrType instr, Condition condition, CRegister cr = cr7);
+ template <class InstrType>
+ void EmitFalseBranch(InstrType instr, Condition condition,
+ CRegister cr = cr7);
+ void EmitNumberUntagD(LNumberUntagD* instr, Register input,
+ DoubleRegister result, NumberUntagDMode mode);
+
+ // Emits optimized code for typeof x == "y". Modifies input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitTypeofIs(Label* true_label, Label* false_label, Register input,
+ Handle<String> type_name);
+
+ // Emits optimized code for %_IsObject(x). Preserves input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitIsObject(Register input, Register temp1, Label* is_not_object,
+ Label* is_object);
+
+ // Emits optimized code for %_IsString(x). Preserves input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
+ SmiCheck check_needed);
+
+ // Emits optimized code for %_IsConstructCall().
+ // Caller should branch on equal condition.
+ void EmitIsConstructCall(Register temp1, Register temp2);
+
+ // Emits optimized code to deep-copy the contents of statically known
+ // object graphs (e.g. object literal boilerplate).
+ void EmitDeepCopy(Handle<JSObject> object, Register result, Register source,
+ int* offset, AllocationSiteMode mode);
+
+ void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
+ void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedArray(LLoadKeyed* instr);
+ void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedArray(LStoreKeyed* instr);
+
+ template <class T>
+ void EmitVectorLoadICRegisters(T* instr);
+
+ ZoneList<LEnvironment*> deoptimizations_;
+ ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
+ ZoneList<Handle<Object> > deoptimization_literals_;
+ int inlined_function_count_;
+ Scope* const scope_;
+ TranslationBuffer translations_;
+ ZoneList<LDeferredCode*> deferred_;
+ int osr_pc_offset_;
+ bool frame_is_built_;
+
+ // Builder that keeps track of safepoints in the code. The table
+ // itself is emitted at the end of the generated code.
+ SafepointTableBuilder safepoints_;
+
+ // Compiler from a set of parallel moves to a sequential list of moves.
+ LGapResolver resolver_;
+
+ Safepoint::Kind expected_safepoint_kind_;
+
+ class PushSafepointRegistersScope FINAL BASE_EMBEDDED {
+ public:
+ explicit PushSafepointRegistersScope(LCodeGen* codegen)
+ : codegen_(codegen) {
+ DCHECK(codegen_->info()->is_calling());
+ DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+ codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+ StoreRegistersStateStub stub(codegen_->isolate());
+ codegen_->masm_->CallStub(&stub);
+ }
+
+ ~PushSafepointRegistersScope() {
+ DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+ RestoreRegistersStateStub stub(codegen_->isolate());
+ codegen_->masm_->CallStub(&stub);
+ codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+ }
+
+ private:
+ LCodeGen* codegen_;
+ };
+
+ friend class LDeferredCode;
+ friend class LEnvironment;
+ friend class SafepointGenerator;
+ DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode : public ZoneObject {
+ public:
+ explicit LDeferredCode(LCodeGen* codegen)
+ : codegen_(codegen),
+ external_exit_(NULL),
+ instruction_index_(codegen->current_instruction_) {
+ codegen->AddDeferredCode(this);
+ }
+
+ virtual ~LDeferredCode() {}
+ virtual void Generate() = 0;
+ virtual LInstruction* instr() = 0;
+
+ void SetExit(Label* exit) { external_exit_ = exit; }
+ Label* entry() { return &entry_; }
+ Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+ int instruction_index() const { return instruction_index_; }
+
+ protected:
+ LCodeGen* codegen() const { return codegen_; }
+ MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+ LCodeGen* codegen_;
+ Label entry_;
+ Label exit_;
+ Label* external_exit_;
+ int instruction_index_;
+};
+}
+} // namespace v8::internal
+
+#endif // V8_PPC_LITHIUM_CODEGEN_PPC_H_
diff --git a/deps/v8/src/ppc/lithium-gap-resolver-ppc.cc b/deps/v8/src/ppc/lithium-gap-resolver-ppc.cc
new file mode 100644
index 0000000000..c261b665e7
--- /dev/null
+++ b/deps/v8/src/ppc/lithium-gap-resolver-ppc.cc
@@ -0,0 +1,288 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/ppc/lithium-codegen-ppc.h"
+#include "src/ppc/lithium-gap-resolver-ppc.h"
+
+namespace v8 {
+namespace internal {
+
+static const Register kSavedValueRegister = {11};
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+ : cgen_(owner),
+ moves_(32, owner->zone()),
+ root_index_(0),
+ in_cycle_(false),
+ saved_destination_(NULL) {}
+
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+ DCHECK(moves_.is_empty());
+ // Build up a worklist of moves.
+ BuildInitialMoveList(parallel_move);
+
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands move = moves_[i];
+ // Skip constants to perform them last. They don't block other moves
+ // and skipping such moves with register destinations keeps those
+ // registers free for the whole algorithm.
+ if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+ root_index_ = i; // Any cycle is found when by reaching this move again.
+ PerformMove(i);
+ if (in_cycle_) {
+ RestoreValue();
+ }
+ }
+ }
+
+ // Perform the moves with constant sources.
+ for (int i = 0; i < moves_.length(); ++i) {
+ if (!moves_[i].IsEliminated()) {
+ DCHECK(moves_[i].source()->IsConstantOperand());
+ EmitMove(i);
+ }
+ }
+
+ moves_.Rewind(0);
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+ // Perform a linear sweep of the moves to add them to the initial list of
+ // moves to perform, ignoring any move that is redundant (the source is
+ // the same as the destination, the destination is ignored and
+ // unallocated, or the move was already eliminated).
+ const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+ for (int i = 0; i < moves->length(); ++i) {
+ LMoveOperands move = moves->at(i);
+ if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
+ }
+ Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+ // Each call to this function performs a move and deletes it from the move
+ // graph. We first recursively perform any move blocking this one. We
+ // mark a move as "pending" on entry to PerformMove in order to detect
+ // cycles in the move graph.
+
+ // We can only find a cycle, when doing a depth-first traversal of moves,
+ // be encountering the starting move again. So by spilling the source of
+ // the starting move, we break the cycle. All moves are then unblocked,
+ // and the starting move is completed by writing the spilled value to
+ // its destination. All other moves from the spilled source have been
+ // completed prior to breaking the cycle.
+ // An additional complication is that moves to MemOperands with large
+ // offsets (more than 1K or 4K) require us to spill this spilled value to
+ // the stack, to free up the register.
+ DCHECK(!moves_[index].IsPending());
+ DCHECK(!moves_[index].IsRedundant());
+
+ // Clear this move's destination to indicate a pending move. The actual
+ // destination is saved in a stack allocated local. Multiple moves can
+ // be pending because this function is recursive.
+ DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated.
+ LOperand* destination = moves_[index].destination();
+ moves_[index].set_destination(NULL);
+
+ // Perform a depth-first traversal of the move graph to resolve
+ // dependencies. Any unperformed, unpending move with a source the same
+ // as this one's destination blocks this one so recursively perform all
+ // such moves.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(destination) && !other_move.IsPending()) {
+ PerformMove(i);
+ // If there is a blocking, pending move it must be moves_[root_index_]
+ // and all other moves with the same source as moves_[root_index_] are
+ // sucessfully executed (because they are cycle-free) by this loop.
+ }
+ }
+
+ // We are about to resolve this move and don't need it marked as
+ // pending, so restore its destination.
+ moves_[index].set_destination(destination);
+
+ // The move may be blocked on a pending move, which must be the starting move.
+ // In this case, we have a cycle, and we save the source of this move to
+ // a scratch register to break it.
+ LMoveOperands other_move = moves_[root_index_];
+ if (other_move.Blocks(destination)) {
+ DCHECK(other_move.IsPending());
+ BreakCycle(index);
+ return;
+ }
+
+ // This move is no longer blocked.
+ EmitMove(index);
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_DCHECKS
+ // No operand should be the destination for more than one move.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LOperand* destination = moves_[i].destination();
+ for (int j = i + 1; j < moves_.length(); ++j) {
+ SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
+ }
+ }
+#endif
+}
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::BreakCycle(int index) {
+ // We save in a register the value that should end up in the source of
+ // moves_[root_index]. After performing all moves in the tree rooted
+ // in that move, we save the value to that source.
+ DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
+ DCHECK(!in_cycle_);
+ in_cycle_ = true;
+ LOperand* source = moves_[index].source();
+ saved_destination_ = moves_[index].destination();
+ if (source->IsRegister()) {
+ __ mr(kSavedValueRegister, cgen_->ToRegister(source));
+ } else if (source->IsStackSlot()) {
+ __ LoadP(kSavedValueRegister, cgen_->ToMemOperand(source));
+ } else if (source->IsDoubleRegister()) {
+ __ fmr(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
+ } else if (source->IsDoubleStackSlot()) {
+ __ lfd(kScratchDoubleReg, cgen_->ToMemOperand(source));
+ } else {
+ UNREACHABLE();
+ }
+ // This move will be done by restoring the saved value to the destination.
+ moves_[index].Eliminate();
+}
+
+
+void LGapResolver::RestoreValue() {
+ DCHECK(in_cycle_);
+ DCHECK(saved_destination_ != NULL);
+
+ // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
+ if (saved_destination_->IsRegister()) {
+ __ mr(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
+ } else if (saved_destination_->IsStackSlot()) {
+ __ StoreP(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
+ } else if (saved_destination_->IsDoubleRegister()) {
+ __ fmr(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
+ } else if (saved_destination_->IsDoubleStackSlot()) {
+ __ stfd(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
+ } else {
+ UNREACHABLE();
+ }
+
+ in_cycle_ = false;
+ saved_destination_ = NULL;
+}
+
+
+void LGapResolver::EmitMove(int index) {
+ LOperand* source = moves_[index].source();
+ LOperand* destination = moves_[index].destination();
+
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+
+ if (source->IsRegister()) {
+ Register source_register = cgen_->ToRegister(source);
+ if (destination->IsRegister()) {
+ __ mr(cgen_->ToRegister(destination), source_register);
+ } else {
+ DCHECK(destination->IsStackSlot());
+ __ StoreP(source_register, cgen_->ToMemOperand(destination));
+ }
+ } else if (source->IsStackSlot()) {
+ MemOperand source_operand = cgen_->ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ LoadP(cgen_->ToRegister(destination), source_operand);
+ } else {
+ DCHECK(destination->IsStackSlot());
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ if (in_cycle_) {
+ __ LoadP(ip, source_operand);
+ __ StoreP(ip, destination_operand);
+ } else {
+ __ LoadP(kSavedValueRegister, source_operand);
+ __ StoreP(kSavedValueRegister, destination_operand);
+ }
+ }
+
+ } else if (source->IsConstantOperand()) {
+ LConstantOperand* constant_source = LConstantOperand::cast(source);
+ if (destination->IsRegister()) {
+ Register dst = cgen_->ToRegister(destination);
+ if (cgen_->IsInteger32(constant_source)) {
+ cgen_->EmitLoadIntegerConstant(constant_source, dst);
+ } else {
+ __ Move(dst, cgen_->ToHandle(constant_source));
+ }
+ } else if (destination->IsDoubleRegister()) {
+ DoubleRegister result = cgen_->ToDoubleRegister(destination);
+ double v = cgen_->ToDouble(constant_source);
+ __ LoadDoubleLiteral(result, v, ip);
+ } else {
+ DCHECK(destination->IsStackSlot());
+ DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone.
+ if (cgen_->IsInteger32(constant_source)) {
+ cgen_->EmitLoadIntegerConstant(constant_source, kSavedValueRegister);
+ } else {
+ __ Move(kSavedValueRegister, cgen_->ToHandle(constant_source));
+ }
+ __ StoreP(kSavedValueRegister, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsDoubleRegister()) {
+ DoubleRegister source_register = cgen_->ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ __ fmr(cgen_->ToDoubleRegister(destination), source_register);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ __ stfd(source_register, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsDoubleStackSlot()) {
+ MemOperand source_operand = cgen_->ToMemOperand(source);
+ if (destination->IsDoubleRegister()) {
+ __ lfd(cgen_->ToDoubleRegister(destination), source_operand);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ if (in_cycle_) {
+// kSavedDoubleValueRegister was used to break the cycle,
+// but kSavedValueRegister is free.
+#if V8_TARGET_ARCH_PPC64
+ __ ld(kSavedValueRegister, source_operand);
+ __ std(kSavedValueRegister, destination_operand);
+#else
+ MemOperand source_high_operand = cgen_->ToHighMemOperand(source);
+ MemOperand destination_high_operand =
+ cgen_->ToHighMemOperand(destination);
+ __ lwz(kSavedValueRegister, source_operand);
+ __ stw(kSavedValueRegister, destination_operand);
+ __ lwz(kSavedValueRegister, source_high_operand);
+ __ stw(kSavedValueRegister, destination_high_operand);
+#endif
+ } else {
+ __ lfd(kScratchDoubleReg, source_operand);
+ __ stfd(kScratchDoubleReg, destination_operand);
+ }
+ }
+ } else {
+ UNREACHABLE();
+ }
+
+ moves_[index].Eliminate();
+}
+
+
+#undef __
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/ppc/lithium-gap-resolver-ppc.h b/deps/v8/src/ppc/lithium-gap-resolver-ppc.h
new file mode 100644
index 0000000000..78bd21355e
--- /dev/null
+++ b/deps/v8/src/ppc/lithium-gap-resolver-ppc.h
@@ -0,0 +1,60 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PPC_LITHIUM_GAP_RESOLVER_PPC_H_
+#define V8_PPC_LITHIUM_GAP_RESOLVER_PPC_H_
+
+#include "src/v8.h"
+
+#include "src/lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver FINAL BASE_EMBEDDED {
+ public:
+ explicit LGapResolver(LCodeGen* owner);
+
+ // Resolve a set of parallel moves, emitting assembler instructions.
+ void Resolve(LParallelMove* parallel_move);
+
+ private:
+ // Build the initial list of moves.
+ void BuildInitialMoveList(LParallelMove* parallel_move);
+
+ // Perform the move at the moves_ index in question (possibly requiring
+ // other moves to satisfy dependencies).
+ void PerformMove(int index);
+
+ // If a cycle is found in the series of moves, save the blocking value to
+ // a scratch register. The cycle must be found by hitting the root of the
+ // depth-first search.
+ void BreakCycle(int index);
+
+ // After a cycle has been resolved, restore the value from the scratch
+ // register to its proper destination.
+ void RestoreValue();
+
+ // Emit a move and remove it from the move graph.
+ void EmitMove(int index);
+
+ // Verify the move list before performing moves.
+ void Verify();
+
+ LCodeGen* cgen_;
+
+ // List of moves not yet resolved.
+ ZoneList<LMoveOperands> moves_;
+
+ int root_index_;
+ bool in_cycle_;
+ LOperand* saved_destination_;
+};
+}
+} // namespace v8::internal
+
+#endif // V8_PPC_LITHIUM_GAP_RESOLVER_PPC_H_
diff --git a/deps/v8/src/ppc/lithium-ppc.cc b/deps/v8/src/ppc/lithium-ppc.cc
new file mode 100644
index 0000000000..42470c53a0
--- /dev/null
+++ b/deps/v8/src/ppc/lithium-ppc.cc
@@ -0,0 +1,2626 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <sstream>
+
+#include "src/v8.h"
+
+#include "src/hydrogen-osr.h"
+#include "src/lithium-inl.h"
+#include "src/ppc/lithium-codegen-ppc.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type) \
+ void L##type::CompileToNative(LCodeGen* generator) { \
+ generator->Do##type(this); \
+ }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+ // Call instructions can use only fixed registers as temporaries and
+ // outputs because all registers are blocked by the calling convention.
+ // Inputs operands must use a fixed register or use-at-start policy or
+ // a non-register policy.
+ DCHECK(Output() == NULL || LUnallocated::cast(Output())->HasFixedPolicy() ||
+ !LUnallocated::cast(Output())->HasRegisterPolicy());
+ for (UseIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ DCHECK(operand->HasFixedPolicy() || operand->IsUsedAtStart());
+ }
+ for (TempIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ DCHECK(operand->HasFixedPolicy() || !operand->HasRegisterPolicy());
+ }
+}
+#endif
+
+
+void LInstruction::PrintTo(StringStream* stream) {
+ stream->Add("%s ", this->Mnemonic());
+
+ PrintOutputOperandTo(stream);
+
+ PrintDataTo(stream);
+
+ if (HasEnvironment()) {
+ stream->Add(" ");
+ environment()->PrintTo(stream);
+ }
+
+ if (HasPointerMap()) {
+ stream->Add(" ");
+ pointer_map()->PrintTo(stream);
+ }
+}
+
+
+void LInstruction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ for (int i = 0; i < InputCount(); i++) {
+ if (i > 0) stream->Add(" ");
+ if (InputAt(i) == NULL) {
+ stream->Add("NULL");
+ } else {
+ InputAt(i)->PrintTo(stream);
+ }
+ }
+}
+
+
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+ if (HasResult()) result()->PrintTo(stream);
+}
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
+ LGap::PrintDataTo(stream);
+ LLabel* rep = replacement();
+ if (rep != NULL) {
+ stream->Add(" Dead block replaced with B%d", rep->block_id());
+ }
+}
+
+
+bool LGap::IsRedundant() const {
+ for (int i = 0; i < 4; i++) {
+ if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < 4; i++) {
+ stream->Add("(");
+ if (parallel_moves_[i] != NULL) {
+ parallel_moves_[i]->PrintDataTo(stream);
+ }
+ stream->Add(") ");
+ }
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+ switch (op()) {
+ case Token::ADD:
+ return "add-d";
+ case Token::SUB:
+ return "sub-d";
+ case Token::MUL:
+ return "mul-d";
+ case Token::DIV:
+ return "div-d";
+ case Token::MOD:
+ return "mod-d";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+ switch (op()) {
+ case Token::ADD:
+ return "add-t";
+ case Token::SUB:
+ return "sub-t";
+ case Token::MUL:
+ return "mul-t";
+ case Token::MOD:
+ return "mod-t";
+ case Token::DIV:
+ return "div-t";
+ case Token::BIT_AND:
+ return "bit-and-t";
+ case Token::BIT_OR:
+ return "bit-or-t";
+ case Token::BIT_XOR:
+ return "bit-xor-t";
+ case Token::ROR:
+ return "ror-t";
+ case Token::SHL:
+ return "shl-t";
+ case Token::SAR:
+ return "sar-t";
+ case Token::SHR:
+ return "shr-t";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+bool LGoto::HasInterestingComment(LCodeGen* gen) const {
+ return !gen->IsNextEmittedBlock(block_id());
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) {
+ stream->Add("B%d", block_id());
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+ value()->PrintTo(stream);
+}
+
+
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if ");
+ left()->PrintTo(stream);
+ stream->Add(" %s ", Token::String(op()));
+ right()->PrintTo(stream);
+ stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_object(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_string(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_smi(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_undetectable(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if string_compare(");
+ left()->PrintTo(stream);
+ right()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if has_instance_type(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if has_cached_array_index(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if class_of_test(");
+ value()->PrintTo(stream);
+ stream->Add(", \"%o\") then B%d else B%d", *hydrogen()->class_name(),
+ true_block_id(), false_block_id());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if typeof ");
+ value()->PrintTo(stream);
+ stream->Add(" == \"%s\" then B%d else B%d",
+ hydrogen()->type_literal()->ToCString().get(), true_block_id(),
+ false_block_id());
+}
+
+
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ function()->PrintTo(stream);
+ stream->Add(".code_entry = ");
+ code_object()->PrintTo(stream);
+}
+
+
+void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ base_object()->PrintTo(stream);
+ stream->Add(" + ");
+ offset()->PrintTo(stream);
+}
+
+
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
+ stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < InputCount(); i++) {
+ InputAt(i)->PrintTo(stream);
+ stream->Add(" ");
+ }
+ stream->Add("#%d / ", arity());
+}
+
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add("[%d] <- ", slot_index());
+ value()->PrintTo(stream);
+}
+
+
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
+void LCallNew::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+ ElementsKind kind = hydrogen()->elements_kind();
+ stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+ arguments()->PrintTo(stream);
+ stream->Add(" length ");
+ length()->PrintTo(stream);
+ stream->Add(" index ");
+ index()->PrintTo(stream);
+}
+
+
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ std::ostringstream os;
+ os << hydrogen()->access() << " <- ";
+ stream->Add(os.str().c_str());
+ value()->PrintTo(stream);
+}
+
+
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(".");
+ stream->Add(String::cast(*name())->ToCString().get());
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LLoadKeyed::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d]", base_offset());
+ } else {
+ stream->Add("]");
+ }
+}
+
+
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d] <-", base_offset());
+ } else {
+ stream->Add("] <- ");
+ }
+
+ if (value() == NULL) {
+ DCHECK(hydrogen()->IsConstantHoleStore() &&
+ hydrogen()->value()->representation().IsDouble());
+ stream->Add("<the hole(nan)>");
+ } else {
+ value()->PrintTo(stream);
+ }
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
+ // Skip a slot if for a double-width slot.
+ if (kind == DOUBLE_REGISTERS) spill_slot_count_++;
+ return spill_slot_count_++;
+}
+
+
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+ int index = GetNextSpillIndex(kind);
+ if (kind == DOUBLE_REGISTERS) {
+ return LDoubleStackSlot::Create(index, zone());
+ } else {
+ DCHECK(kind == GENERAL_REGISTERS);
+ return LStackSlot::Create(index, zone());
+ }
+}
+
+
+LPlatformChunk* LChunkBuilder::Build() {
+ DCHECK(is_unused());
+ chunk_ = new (zone()) LPlatformChunk(info(), graph());
+ LPhase phase("L_Building chunk", chunk_);
+ status_ = BUILDING;
+
+ // If compiling for OSR, reserve space for the unoptimized frame,
+ // which will be subsumed into this frame.
+ if (graph()->has_osr()) {
+ for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+ chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
+ }
+ }
+
+ const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+ for (int i = 0; i < blocks->length(); i++) {
+ HBasicBlock* next = NULL;
+ if (i < blocks->length() - 1) next = blocks->at(i + 1);
+ DoBasicBlock(blocks->at(i), next);
+ if (is_aborted()) return NULL;
+ }
+ status_ = DONE;
+ return chunk_;
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+ return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
+ Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
+ return new (zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+ DoubleRegister::ToAllocationIndex(reg));
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+ return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
+ return Use(value, ToUnallocated(reg));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+ return Use(value,
+ new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+ return Use(value, new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+ LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
+ return Use(value, new (zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value) {
+ return Use(value, new (zone()) LUnallocated(LUnallocated::NONE));
+}
+
+
+LOperand* LChunkBuilder::UseAtStart(HValue* value) {
+ return Use(value, new (zone())
+ LUnallocated(LUnallocated::NONE, LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : Use(value);
+}
+
+
+LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseRegisterAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+ return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : Use(value, new (zone()) LUnallocated(LUnallocated::ANY));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+ if (value->EmitAtUses()) {
+ HInstruction* instr = HInstruction::cast(value);
+ VisitInstruction(instr);
+ }
+ operand->set_virtual_register(value->id());
+ return operand;
+}
+
+
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result) {
+ result->set_virtual_register(current_instruction_->id());
+ instr->set_result(result);
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::DefineAsRegister(
+ LTemplateResultInstruction<1>* instr) {
+ return Define(instr,
+ new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsSpilled(
+ LTemplateResultInstruction<1>* instr, int index) {
+ return Define(instr,
+ new (zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+ LTemplateResultInstruction<1>* instr) {
+ return Define(instr,
+ new (zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
+ Register reg) {
+ return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixedDouble(
+ LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
+ return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+ HEnvironment* hydrogen_env = current_block_->last_environment();
+ int argument_index_accumulator = 0;
+ ZoneList<HValue*> objects_to_materialize(0, zone());
+ instr->set_environment(CreateEnvironment(
+ hydrogen_env, &argument_index_accumulator, &objects_to_materialize));
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+ HInstruction* hinstr,
+ CanDeoptimize can_deoptimize) {
+ info()->MarkAsNonDeferredCalling();
+#ifdef DEBUG
+ instr->VerifyCall();
+#endif
+ instr->MarkAsCall();
+ instr = AssignPointerMap(instr);
+
+ // If instruction does not have side-effects lazy deoptimization
+ // after the call will try to deoptimize to the point before the call.
+ // Thus we still need to attach environment to this call even if
+ // call sequence can not deoptimize eagerly.
+ bool needs_environment = (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+ !hinstr->HasObservableSideEffects();
+ if (needs_environment && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ // We can't really figure out if the environment is needed or not.
+ instr->environment()->set_has_been_used();
+ }
+
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+ DCHECK(!instr->HasPointerMap());
+ instr->set_pointer_map(new (zone()) LPointerMap(zone()));
+ return instr;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+ LUnallocated* operand =
+ new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+ int vreg = allocator_->GetVirtualRegister();
+ if (!allocator_->AllocationOk()) {
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+ vreg = 0;
+ }
+ operand->set_virtual_register(vreg);
+ return operand;
+}
+
+
+LUnallocated* LChunkBuilder::TempDoubleRegister() {
+ LUnallocated* operand =
+ new (zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER);
+ int vreg = allocator_->GetVirtualRegister();
+ if (!allocator_->AllocationOk()) {
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+ vreg = 0;
+ }
+ operand->set_virtual_register(vreg);
+ return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+ LUnallocated* operand = ToUnallocated(reg);
+ DCHECK(operand->HasFixedPolicy());
+ return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
+ LUnallocated* operand = ToUnallocated(reg);
+ DCHECK(operand->HasFixedPolicy());
+ return operand;
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+ return new (zone()) LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+ return DefineAsRegister(new (zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+ return AssignEnvironment(new (zone()) LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+ HBitwiseBinaryOperation* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->left());
+
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ int constant_value = 0;
+ bool does_deopt = false;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
+ // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+ // truncated to smi.
+ if (instr->representation().IsSmi() && constant_value > 0) {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ }
+ } else {
+ right = UseRegisterAtStart(right_value);
+ }
+
+ // Shift operations can only deoptimize if we do a logical shift
+ // by 0 and the result cannot be truncated to int32.
+ if (op == Token::SHR && constant_value == 0) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ }
+
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
+ } else {
+ return DoArithmeticT(op, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+ HArithmeticBinaryOperation* instr) {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->left()->representation().IsDouble());
+ DCHECK(instr->right()->representation().IsDouble());
+ if (op == Token::MOD) {
+ LOperand* left = UseFixedDouble(instr->left(), d1);
+ LOperand* right = UseFixedDouble(instr->right(), d2);
+ LArithmeticD* result = new (zone()) LArithmeticD(op, left, right);
+ // We call a C function for double modulo. It can't trigger a GC. We need
+ // to use fixed result register for the call.
+ // TODO(fschneider): Allow any register as input registers.
+ return MarkAsCall(DefineFixedDouble(result, d1), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LArithmeticD* result = new (zone()) LArithmeticD(op, left, right);
+ return DefineAsRegister(result);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+ HBinaryOperation* instr) {
+ HValue* left = instr->left();
+ HValue* right = instr->right();
+ DCHECK(left->representation().IsTagged());
+ DCHECK(right->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left_operand = UseFixed(left, r4);
+ LOperand* right_operand = UseFixed(right, r3);
+ LArithmeticT* result =
+ new (zone()) LArithmeticT(op, context, left_operand, right_operand);
+ return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
+ DCHECK(is_building());
+ current_block_ = block;
+ next_block_ = next_block;
+ if (block->IsStartBlock()) {
+ block->UpdateEnvironment(graph_->start_environment());
+ argument_count_ = 0;
+ } else if (block->predecessors()->length() == 1) {
+ // We have a single predecessor => copy environment and outgoing
+ // argument count from the predecessor.
+ DCHECK(block->phis()->length() == 0);
+ HBasicBlock* pred = block->predecessors()->at(0);
+ HEnvironment* last_environment = pred->last_environment();
+ DCHECK(last_environment != NULL);
+ // Only copy the environment, if it is later used again.
+ if (pred->end()->SecondSuccessor() == NULL) {
+ DCHECK(pred->end()->FirstSuccessor() == block);
+ } else {
+ if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
+ pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
+ last_environment = last_environment->Copy();
+ }
+ }
+ block->UpdateEnvironment(last_environment);
+ DCHECK(pred->argument_count() >= 0);
+ argument_count_ = pred->argument_count();
+ } else {
+ // We are at a state join => process phis.
+ HBasicBlock* pred = block->predecessors()->at(0);
+ // No need to copy the environment, it cannot be used later.
+ HEnvironment* last_environment = pred->last_environment();
+ for (int i = 0; i < block->phis()->length(); ++i) {
+ HPhi* phi = block->phis()->at(i);
+ if (phi->HasMergedIndex()) {
+ last_environment->SetValueAt(phi->merged_index(), phi);
+ }
+ }
+ for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+ if (block->deleted_phis()->at(i) < last_environment->length()) {
+ last_environment->SetValueAt(block->deleted_phis()->at(i),
+ graph_->GetConstantUndefined());
+ }
+ }
+ block->UpdateEnvironment(last_environment);
+ // Pick up the outgoing argument count of one of the predecessors.
+ argument_count_ = pred->argument_count();
+ }
+ HInstruction* current = block->first();
+ int start = chunk_->instructions()->length();
+ while (current != NULL && !is_aborted()) {
+ // Code for constants in registers is generated lazily.
+ if (!current->EmitAtUses()) {
+ VisitInstruction(current);
+ }
+ current = current->next();
+ }
+ int end = chunk_->instructions()->length() - 1;
+ if (end >= start) {
+ block->set_first_instruction_index(start);
+ block->set_last_instruction_index(end);
+ }
+ block->set_argument_count(argument_count_);
+ next_block_ = NULL;
+ current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+ HInstruction* old_current = current_instruction_;
+ current_instruction_ = current;
+
+ LInstruction* instr = NULL;
+ if (current->CanReplaceWithDummyUses()) {
+ if (current->OperandCount() == 0) {
+ instr = DefineAsRegister(new (zone()) LDummy());
+ } else {
+ DCHECK(!current->OperandAt(0)->IsControlInstruction());
+ instr = DefineAsRegister(new (zone())
+ LDummyUse(UseAny(current->OperandAt(0))));
+ }
+ for (int i = 1; i < current->OperandCount(); ++i) {
+ if (current->OperandAt(i)->IsControlInstruction()) continue;
+ LInstruction* dummy =
+ new (zone()) LDummyUse(UseAny(current->OperandAt(i)));
+ dummy->set_hydrogen_value(current);
+ chunk_->AddInstruction(dummy, current_block_);
+ }
+ } else {
+ HBasicBlock* successor;
+ if (current->IsControlInstruction() &&
+ HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
+ successor != NULL) {
+ instr = new (zone()) LGoto(successor);
+ } else {
+ instr = current->CompileToLithium(this);
+ }
+ }
+
+ argument_count_ += current->argument_delta();
+ DCHECK(argument_count_ >= 0);
+
+ if (instr != NULL) {
+ AddInstruction(instr, current);
+ }
+
+ current_instruction_ = old_current;
+}
+
+
+void LChunkBuilder::AddInstruction(LInstruction* instr,
+ HInstruction* hydrogen_val) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(hydrogen_val);
+
+#if DEBUG
+ // Make sure that the lithium instruction has either no fixed register
+ // constraints in temps or the result OR no uses that are only used at
+ // start. If this invariant doesn't hold, the register allocator can decide
+ // to insert a split of a range immediately before the instruction due to an
+ // already allocated register needing to be used for the instruction's fixed
+ // register constraint. In this case, The register allocator won't see an
+ // interference between the split child and the use-at-start (it would if
+ // the it was just a plain use), so it is free to move the split child into
+ // the same register that is used for the use-at-start.
+ // See https://code.google.com/p/chromium/issues/detail?id=201590
+ if (!(instr->ClobbersRegisters() &&
+ instr->ClobbersDoubleRegisters(isolate()))) {
+ int fixed = 0;
+ int used_at_start = 0;
+ for (UseIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->IsUsedAtStart()) ++used_at_start;
+ }
+ if (instr->Output() != NULL) {
+ if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
+ }
+ for (TempIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->HasFixedPolicy()) ++fixed;
+ }
+ DCHECK(fixed == 0 || used_at_start == 0);
+ }
+#endif
+
+ if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+ instr = AssignPointerMap(instr);
+ }
+ if (FLAG_stress_environments && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ }
+ chunk_->AddInstruction(instr, current_block_);
+
+ if (instr->IsCall()) {
+ HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
+ LInstruction* instruction_needing_environment = NULL;
+ if (hydrogen_val->HasObservableSideEffects()) {
+ HSimulate* sim = HSimulate::cast(hydrogen_val->next());
+ instruction_needing_environment = instr;
+ sim->ReplayEnvironment(current_block_->last_environment());
+ hydrogen_value_for_lazy_bailout = sim;
+ }
+ LInstruction* bailout = AssignEnvironment(new (zone()) LLazyBailout());
+ bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+ chunk_->AddInstruction(bailout, current_block_);
+ if (instruction_needing_environment != NULL) {
+ // Store the lazy deopt environment with the instruction if needed.
+ // Right now it is only used for LInstanceOfKnownGlobal.
+ instruction_needing_environment->SetDeferredLazyDeoptimizationEnvironment(
+ bailout->environment());
+ }
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+ return new (zone()) LGoto(instr->FirstSuccessor());
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+ HValue* value = instr->value();
+ Representation r = value->representation();
+ HType type = value->type();
+ ToBooleanStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+ bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
+ type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+ LInstruction* branch = new (zone()) LBranch(UseRegister(value));
+ if (!easy_case &&
+ ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ !expected.IsGeneric())) {
+ branch = AssignEnvironment(branch);
+ }
+ return branch;
+}
+
+
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+ return new (zone()) LDebugBreak();
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+ DCHECK(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ return new (zone()) LCmpMapAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) {
+ info()->MarkAsRequiresFrame();
+ LOperand* value = UseRegister(instr->value());
+ return DefineAsRegister(new (zone()) LArgumentsLength(value));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+ info()->MarkAsRequiresFrame();
+ return DefineAsRegister(new (zone()) LArgumentsElements);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LInstanceOf* result = new (zone()) LInstanceOf(
+ context, UseFixed(instr->left(), r3), UseFixed(instr->right(), r4));
+ return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
+ HInstanceOfKnownGlobal* instr) {
+ LInstanceOfKnownGlobal* result = new (zone())
+ LInstanceOfKnownGlobal(UseFixed(instr->context(), cp),
+ UseFixed(instr->left(), r3), FixedTemp(r7));
+ return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+ LOperand* receiver = UseRegisterAtStart(instr->receiver());
+ LOperand* function = UseRegisterAtStart(instr->function());
+ LWrapReceiver* result = new (zone()) LWrapReceiver(receiver, function);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+ LOperand* function = UseFixed(instr->function(), r4);
+ LOperand* receiver = UseFixed(instr->receiver(), r3);
+ LOperand* length = UseFixed(instr->length(), r5);
+ LOperand* elements = UseFixed(instr->elements(), r6);
+ LApplyArguments* result =
+ new (zone()) LApplyArguments(function, receiver, length, elements);
+ return MarkAsCall(DefineFixed(result, r3), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
+ int argc = instr->OperandCount();
+ for (int i = 0; i < argc; ++i) {
+ LOperand* argument = Use(instr->argument(i));
+ AddInstruction(new (zone()) LPushArgument(argument), instr);
+ }
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+ HStoreCodeEntry* store_code_entry) {
+ LOperand* function = UseRegister(store_code_entry->function());
+ LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+ return new (zone()) LStoreCodeEntry(function, code_object);
+}
+
+
+LInstruction* LChunkBuilder::DoInnerAllocatedObject(
+ HInnerAllocatedObject* instr) {
+ LOperand* base_object = UseRegisterAtStart(instr->base_object());
+ LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+ return DefineAsRegister(new (zone())
+ LInnerAllocatedObject(base_object, offset));
+}
+
+
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+ return instr->HasNoUses() ? NULL
+ : DefineAsRegister(new (zone()) LThisFunction);
+}
+
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+ if (instr->HasNoUses()) return NULL;
+
+ if (info()->IsStub()) {
+ return DefineFixed(new (zone()) LContext, cp);
+ }
+
+ return DefineAsRegister(new (zone()) LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new (zone()) LDeclareGlobals(context), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallJSFunction(HCallJSFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), r4);
+
+ LCallJSFunction* result = new (zone()) LCallJSFunction(function);
+
+ return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallWithDescriptor(HCallWithDescriptor* instr) {
+ CallInterfaceDescriptor descriptor = instr->descriptor();
+
+ LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+ ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ ops.Add(target, zone());
+ for (int i = 1; i < instr->OperandCount(); i++) {
+ LOperand* op =
+ UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
+ ops.Add(op, zone());
+ }
+
+ LCallWithDescriptor* result =
+ new (zone()) LCallWithDescriptor(descriptor, ops, zone());
+ return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
+ HTailCallThroughMegamorphicCache* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* receiver_register =
+ UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
+ LOperand* name_register =
+ UseFixed(instr->name(), LoadDescriptor::NameRegister());
+ // Not marked as call. It can't deoptimize, and it never returns.
+ return new (zone()) LTailCallThroughMegamorphicCache(
+ context, receiver_register, name_register);
+}
+
+
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* function = UseFixed(instr->function(), r4);
+ LInvokeFunction* result = new (zone()) LInvokeFunction(context, function);
+ return MarkAsCall(DefineFixed(result, r3), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+ switch (instr->op()) {
+ case kMathFloor:
+ return DoMathFloor(instr);
+ case kMathRound:
+ return DoMathRound(instr);
+ case kMathFround:
+ return DoMathFround(instr);
+ case kMathAbs:
+ return DoMathAbs(instr);
+ case kMathLog:
+ return DoMathLog(instr);
+ case kMathExp:
+ return DoMathExp(instr);
+ case kMathSqrt:
+ return DoMathSqrt(instr);
+ case kMathPowHalf:
+ return DoMathPowHalf(instr);
+ case kMathClz32:
+ return DoMathClz32(instr);
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LMathFloor* result = new (zone()) LMathFloor(input);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp = TempDoubleRegister();
+ LMathRound* result = new (zone()) LMathRound(input, temp);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LMathFround* result = new (zone()) LMathFround(input);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+ Representation r = instr->value()->representation();
+ LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32())
+ ? NULL
+ : UseFixed(instr->context(), cp);
+ LOperand* input = UseRegister(instr->value());
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LMathAbs(context, input));
+ if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
+ if (!r.IsDouble()) result = AssignEnvironment(result);
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
+ LOperand* input = UseFixedDouble(instr->value(), d1);
+ return MarkAsCall(DefineFixedDouble(new (zone()) LMathLog(input), d1), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathClz32* result = new (zone()) LMathClz32(input);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* double_temp = TempDoubleRegister();
+ LMathExp* result = new (zone()) LMathExp(input, double_temp, temp1, temp2);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathSqrt* result = new (zone()) LMathSqrt(input);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathPowHalf* result = new (zone()) LMathPowHalf(input);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* constructor = UseFixed(instr->constructor(), r4);
+ LCallNew* result = new (zone()) LCallNew(context, constructor);
+ return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* constructor = UseFixed(instr->constructor(), r4);
+ LCallNewArray* result = new (zone()) LCallNewArray(context, constructor);
+ return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* function = UseFixed(instr->function(), r4);
+ LCallFunction* call = new (zone()) LCallFunction(context, function);
+ return MarkAsCall(DefineFixed(call, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new (zone()) LCallRuntime(context), r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+ return DoShift(Token::ROR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+ return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+ return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+ return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
+
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
+ return DefineAsRegister(new (zone()) LBitI(left, right));
+ } else {
+ return DoArithmeticT(instr->op(), instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LDivByPowerOf2I(dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+ (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+ DCHECK(instr->representation().IsInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LDivByConstI(dividend, divisor));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LDivI(dividend, divisor));
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ (instr->CheckFlag(HValue::kCanOverflow) &&
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) ||
+ (!instr->IsMathFloorOfDiv() &&
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->RightIsPowerOf2()) {
+ return DoDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
+ }
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
+ } else {
+ return DoArithmeticT(Token::DIV, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LFlooringDivByPowerOf2I(dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+ DCHECK(instr->representation().IsInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp =
+ ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive)))
+ ? NULL
+ : TempRegister();
+ LInstruction* result = DefineAsRegister(
+ new (zone()) LFlooringDivByConstI(dividend, divisor, temp));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LFlooringDivI* div = new (zone()) LFlooringDivI(dividend, divisor);
+ return AssignEnvironment(DefineAsRegister(div));
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+ if (instr->RightIsPowerOf2()) {
+ return DoFlooringDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoFlooringDivByConstI(instr);
+ } else {
+ return DoFlooringDivI(instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result =
+ DefineSameAsFirst(new (zone()) LModByPowerOf2I(dividend, divisor));
+ if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LModByConstI(dividend, divisor));
+ if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LModI(dividend, divisor));
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->RightIsPowerOf2()) {
+ return DoModByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoModByConstI(instr);
+ } else {
+ return DoModI(instr);
+ }
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MOD, instr);
+ } else {
+ return DoArithmeticT(Token::MOD, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ HValue* left = instr->BetterLeftOperand();
+ HValue* right = instr->BetterRightOperand();
+ LOperand* left_op;
+ LOperand* right_op;
+ bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ if (right->IsConstant()) {
+ HConstant* constant = HConstant::cast(right);
+ int32_t constant_value = constant->Integer32Value();
+ // Constants -1, 0 and 1 can be optimized if the result can overflow.
+ // For other constants, it can be optimized only without overflow.
+ if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
+ left_op = UseRegisterAtStart(left);
+ right_op = UseConstant(right);
+ } else {
+ if (bailout_on_minus_zero) {
+ left_op = UseRegister(left);
+ } else {
+ left_op = UseRegisterAtStart(left);
+ }
+ right_op = UseRegister(right);
+ }
+ } else {
+ if (bailout_on_minus_zero) {
+ left_op = UseRegister(left);
+ } else {
+ left_op = UseRegisterAtStart(left);
+ }
+ right_op = UseRegister(right);
+ }
+ LMulI* mul = new (zone()) LMulI(left_op, right_op);
+ if (can_overflow || bailout_on_minus_zero) {
+ AssignEnvironment(mul);
+ }
+ return DefineAsRegister(mul);
+
+ } else if (instr->representation().IsDouble()) {
+ if (instr->HasOneUse() &&
+ (instr->uses().value()->IsAdd() || instr->uses().value()->IsSub())) {
+ HBinaryOperation* use = HBinaryOperation::cast(instr->uses().value());
+
+ if (use->IsAdd() && instr == use->left()) {
+ // This mul is the lhs of an add. The add and mul will be folded into a
+ // multiply-add in DoAdd.
+ return NULL;
+ }
+ if (instr == use->right() && use->IsAdd() &&
+ !(use->left()->IsMul() && use->left()->HasOneUse())) {
+ // This mul is the rhs of an add, where the lhs is not another mul.
+ // The add and mul will be folded into a multiply-add in DoAdd.
+ return NULL;
+ }
+ if (instr == use->left() && use->IsSub()) {
+ // This mul is the lhs of a sub. The mul and sub will be folded into a
+ // multiply-sub in DoSub.
+ return NULL;
+ }
+ }
+
+ return DoArithmeticD(Token::MUL, instr);
+ } else {
+ return DoArithmeticT(Token::MUL, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+
+ if (instr->left()->IsConstant() &&
+ !instr->CheckFlag(HValue::kCanOverflow)) {
+ // If lhs is constant, do reverse subtraction instead.
+ return DoRSub(instr);
+ }
+
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ LSubI* sub = new (zone()) LSubI(left, right);
+ LInstruction* result = DefineAsRegister(sub);
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ if (instr->left()->IsMul() && instr->left()->HasOneUse()) {
+ return DoMultiplySub(instr->right(), HMul::cast(instr->left()));
+ }
+
+ return DoArithmeticD(Token::SUB, instr);
+ } else {
+ return DoArithmeticT(Token::SUB, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoRSub(HSub* instr) {
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
+
+ // Note: The lhs of the subtraction becomes the rhs of the
+ // reverse-subtraction.
+ LOperand* left = UseRegisterAtStart(instr->right());
+ LOperand* right = UseOrConstantAtStart(instr->left());
+ LRSubI* rsb = new (zone()) LRSubI(left, right);
+ LInstruction* result = DefineAsRegister(rsb);
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
+ LOperand* multiplier_op = UseRegisterAtStart(mul->left());
+ LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
+ LOperand* addend_op = UseRegisterAtStart(addend);
+ return DefineSameAsFirst(
+ new (zone()) LMultiplyAddD(addend_op, multiplier_op, multiplicand_op));
+}
+
+
+LInstruction* LChunkBuilder::DoMultiplySub(HValue* minuend, HMul* mul) {
+ LOperand* minuend_op = UseRegisterAtStart(minuend);
+ LOperand* multiplier_op = UseRegisterAtStart(mul->left());
+ LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
+
+ return DefineSameAsFirst(
+ new (zone()) LMultiplySubD(minuend_op, multiplier_op, multiplicand_op));
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
+ LAddI* add = new (zone()) LAddI(left, right);
+ LInstruction* result = DefineAsRegister(add);
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsExternal()) {
+ DCHECK(instr->left()->representation().IsExternal());
+ DCHECK(instr->right()->representation().IsInteger32());
+ DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ LAddI* add = new (zone()) LAddI(left, right);
+ LInstruction* result = DefineAsRegister(add);
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ if (instr->left()->IsMul() && instr->left()->HasOneUse()) {
+ return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
+ }
+
+ if (instr->right()->IsMul() && instr->right()->HasOneUse()) {
+ DCHECK(!instr->left()->IsMul() || !instr->left()->HasOneUse());
+ return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
+ }
+
+ return DoArithmeticD(Token::ADD, instr);
+ } else {
+ return DoArithmeticT(Token::ADD, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+ LOperand* left = NULL;
+ LOperand* right = NULL;
+ if (instr->representation().IsSmiOrInteger32()) {
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
+ right = UseOrConstantAtStart(instr->BetterRightOperand());
+ } else {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->left()->representation().IsDouble());
+ DCHECK(instr->right()->representation().IsDouble());
+ left = UseRegisterAtStart(instr->left());
+ right = UseRegisterAtStart(instr->right());
+ }
+ return DefineAsRegister(new (zone()) LMathMinMax(left, right));
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+ DCHECK(instr->representation().IsDouble());
+ // We call a C function for double power. It can't trigger a GC.
+ // We need to use fixed result register for the call.
+ Representation exponent_type = instr->right()->representation();
+ DCHECK(instr->left()->representation().IsDouble());
+ LOperand* left = UseFixedDouble(instr->left(), d1);
+ LOperand* right =
+ exponent_type.IsDouble()
+ ? UseFixedDouble(instr->right(), d2)
+ : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
+ LPower* result = new (zone()) LPower(left, right);
+ return MarkAsCall(DefineFixedDouble(result, d3), instr,
+ CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+ DCHECK(instr->left()->representation().IsTagged());
+ DCHECK(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = UseFixed(instr->left(), r4);
+ LOperand* right = UseFixed(instr->right(), r3);
+ LCmpT* result = new (zone()) LCmpT(context, left, right);
+ return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+ HCompareNumericAndBranch* instr) {
+ Representation r = instr->representation();
+ if (r.IsSmiOrInteger32()) {
+ DCHECK(instr->left()->representation().Equals(r));
+ DCHECK(instr->right()->representation().Equals(r));
+ LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ return new (zone()) LCompareNumericAndBranch(left, right);
+ } else {
+ DCHECK(r.IsDouble());
+ DCHECK(instr->left()->representation().IsDouble());
+ DCHECK(instr->right()->representation().IsDouble());
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ return new (zone()) LCompareNumericAndBranch(left, right);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+ HCompareObjectEqAndBranch* instr) {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ return new (zone()) LCmpObjectEqAndBranch(left, right);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+ HCompareHoleAndBranch* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new (zone()) LCmpHoleAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+ HCompareMinusZeroAndBranch* instr) {
+ LOperand* value = UseRegister(instr->value());
+ LOperand* scratch = TempRegister();
+ return new (zone()) LCompareMinusZeroAndBranch(value, scratch);
+}
+
+
+LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
+ DCHECK(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ return new (zone()) LIsObjectAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+ DCHECK(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ return new (zone()) LIsStringAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
+ DCHECK(instr->value()->representation().IsTagged());
+ return new (zone()) LIsSmiAndBranch(Use(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+ HIsUndetectableAndBranch* instr) {
+ DCHECK(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new (zone()) LIsUndetectableAndBranch(value, TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+ HStringCompareAndBranch* instr) {
+ DCHECK(instr->left()->representation().IsTagged());
+ DCHECK(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = UseFixed(instr->left(), r4);
+ LOperand* right = UseFixed(instr->right(), r3);
+ LStringCompareAndBranch* result =
+ new (zone()) LStringCompareAndBranch(context, left, right);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+ HHasInstanceTypeAndBranch* instr) {
+ DCHECK(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new (zone()) LHasInstanceTypeAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+ HGetCachedArrayIndex* instr) {
+ DCHECK(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+
+ return DefineAsRegister(new (zone()) LGetCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+ HHasCachedArrayIndexAndBranch* instr) {
+ DCHECK(instr->value()->representation().IsTagged());
+ return new (zone())
+ LHasCachedArrayIndexAndBranch(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+ HClassOfTestAndBranch* instr) {
+ DCHECK(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegister(instr->value());
+ return new (zone()) LClassOfTestAndBranch(value, TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
+ LOperand* map = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new (zone()) LMapEnumLength(map));
+}
+
+
+LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
+ LOperand* object = UseFixed(instr->value(), r3);
+ LDateField* result =
+ new (zone()) LDateField(object, FixedTemp(r4), instr->index());
+ return MarkAsCall(DefineFixed(result, r3), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ return DefineAsRegister(new (zone()) LSeqStringGetChar(string, index));
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = FLAG_debug_code
+ ? UseRegisterAtStart(instr->index())
+ : UseRegisterOrConstantAtStart(instr->index());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
+ return new (zone()) LSeqStringSetChar(context, string, index, value);
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+ if (!FLAG_debug_code && instr->skip_check()) return NULL;
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ LOperand* length = !index->IsConstantOperand()
+ ? UseRegisterOrConstantAtStart(instr->length())
+ : UseRegisterAtStart(instr->length());
+ LInstruction* result = new (zone()) LBoundsCheck(index, length);
+ if (!FLAG_debug_code || !instr->skip_check()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
+ HBoundsCheckBaseIndexInformation* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+ // The control instruction marking the end of a block that completed
+ // abruptly (e.g., threw an exception). There is nothing specific to do.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) { return NULL; }
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+ // All HForceRepresentation instructions should be eliminated in the
+ // representation change phase of Hydrogen.
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+ Representation from = instr->from();
+ Representation to = instr->to();
+ HValue* val = instr->value();
+ if (from.IsSmi()) {
+ if (to.IsTagged()) {
+ LOperand* value = UseRegister(val);
+ return DefineSameAsFirst(new (zone()) LDummyUse(value));
+ }
+ from = Representation::Tagged();
+ }
+ if (from.IsTagged()) {
+ if (to.IsDouble()) {
+ LOperand* value = UseRegister(val);
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LNumberUntagD(value));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(val);
+ if (val->type().IsSmi()) {
+ return DefineSameAsFirst(new (zone()) LDummyUse(value));
+ }
+ return AssignEnvironment(
+ DefineSameAsFirst(new (zone()) LCheckSmi(value)));
+ } else {
+ DCHECK(to.IsInteger32());
+ if (val->type().IsSmi() || val->representation().IsSmi()) {
+ LOperand* value = UseRegisterAtStart(val);
+ return DefineAsRegister(new (zone()) LSmiUntag(value, false));
+ } else {
+ LOperand* value = UseRegister(val);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempDoubleRegister();
+ LInstruction* result =
+ DefineSameAsFirst(new (zone()) LTaggedToI(value, temp1, temp2));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
+ }
+ }
+ } else if (from.IsDouble()) {
+ if (to.IsTagged()) {
+ info()->MarkAsDeferredCalling();
+ LOperand* value = UseRegister(val);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LUnallocated* result_temp = TempRegister();
+ LNumberTagD* result = new (zone()) LNumberTagD(value, temp1, temp2);
+ return AssignPointerMap(Define(result, result_temp));
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(val);
+ return AssignEnvironment(
+ DefineAsRegister(new (zone()) LDoubleToSmi(value)));
+ } else {
+ DCHECK(to.IsInteger32());
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineAsRegister(new (zone()) LDoubleToI(value));
+ if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
+ return result;
+ }
+ } else if (from.IsInteger32()) {
+ info()->MarkAsDeferredCalling();
+ if (to.IsTagged()) {
+ if (!instr->CheckFlag(HValue::kCanOverflow)) {
+ LOperand* value = UseRegisterAtStart(val);
+ return DefineAsRegister(new (zone()) LSmiTag(value));
+ } else if (val->CheckFlag(HInstruction::kUint32)) {
+ LOperand* value = UseRegisterAtStart(val);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LNumberTagU* result = new (zone()) LNumberTagU(value, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
+ } else {
+ LOperand* value = UseRegisterAtStart(val);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LNumberTagI* result = new (zone()) LNumberTagI(value, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
+ }
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineAsRegister(new (zone()) LSmiTag(value));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else {
+ DCHECK(to.IsDouble());
+ if (val->CheckFlag(HInstruction::kUint32)) {
+ return DefineAsRegister(new (zone()) LUint32ToDouble(UseRegister(val)));
+ } else {
+ return DefineAsRegister(new (zone()) LInteger32ToDouble(Use(val)));
+ }
+ }
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LInstruction* result = new (zone()) LCheckNonSmi(value);
+ if (!instr->value()->type().IsHeapObject()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new (zone()) LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LInstruction* result = new (zone()) LCheckInstanceType(value);
+ return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new (zone()) LCheckValue(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
+ if (instr->IsStabilityCheck()) return new (zone()) LCheckMaps;
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LInstruction* result = AssignEnvironment(new (zone()) LCheckMaps(value));
+ if (instr->HasMigrationTarget()) {
+ info()->MarkAsDeferredCalling();
+ result = AssignPointerMap(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+ HValue* value = instr->value();
+ Representation input_rep = value->representation();
+ LOperand* reg = UseRegister(value);
+ if (input_rep.IsDouble()) {
+ return DefineAsRegister(new (zone()) LClampDToUint8(reg));
+ } else if (input_rep.IsInteger32()) {
+ return DefineAsRegister(new (zone()) LClampIToUint8(reg));
+ } else {
+ DCHECK(input_rep.IsSmiOrTagged());
+ LClampTToUint8* result =
+ new (zone()) LClampTToUint8(reg, TempDoubleRegister());
+ return AssignEnvironment(DefineAsRegister(result));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+ HValue* value = instr->value();
+ DCHECK(value->representation().IsDouble());
+ return DefineAsRegister(new (zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+ LOperand* lo = UseRegister(instr->lo());
+ LOperand* hi = UseRegister(instr->hi());
+ return DefineAsRegister(new (zone()) LConstructDouble(hi, lo));
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+ LOperand* context = info()->IsStub() ? UseFixed(instr->context(), cp) : NULL;
+ LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
+ return new (zone())
+ LReturn(UseFixed(instr->value(), r3), context, parameter_count);
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+ Representation r = instr->representation();
+ if (r.IsSmi()) {
+ return DefineAsRegister(new (zone()) LConstantS);
+ } else if (r.IsInteger32()) {
+ return DefineAsRegister(new (zone()) LConstantI);
+ } else if (r.IsDouble()) {
+ return DefineAsRegister(new (zone()) LConstantD);
+ } else if (r.IsExternal()) {
+ return DefineAsRegister(new (zone()) LConstantE);
+ } else if (r.IsTagged()) {
+ return DefineAsRegister(new (zone()) LConstantT);
+ } else {
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+ LLoadGlobalCell* result = new (zone()) LLoadGlobalCell;
+ return instr->RequiresHoleCheck()
+ ? AssignEnvironment(DefineAsRegister(result))
+ : DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* global_object =
+ UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
+ LOperand* vector = NULL;
+ if (FLAG_vector_ics) {
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ }
+ LLoadGlobalGeneric* result =
+ new (zone()) LLoadGlobalGeneric(context, global_object, vector);
+ return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
+ LOperand* value = UseRegister(instr->value());
+ // Use a temp to check the value in the cell in the case where we perform
+ // a hole check.
+ return instr->RequiresHoleCheck()
+ ? AssignEnvironment(new (zone())
+ LStoreGlobalCell(value, TempRegister()))
+ : new (zone()) LStoreGlobalCell(value, NULL);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+ LOperand* context = UseRegisterAtStart(instr->value());
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LLoadContextSlot(context));
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+ LOperand* context;
+ LOperand* value;
+ if (instr->NeedsWriteBarrier()) {
+ context = UseTempRegister(instr->context());
+ value = UseTempRegister(instr->value());
+ } else {
+ context = UseRegister(instr->context());
+ value = UseRegister(instr->value());
+ }
+ LInstruction* result = new (zone()) LStoreContextSlot(context, value);
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ return DefineAsRegister(new (zone()) LLoadNamedField(obj));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object =
+ UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+ LOperand* vector = NULL;
+ if (FLAG_vector_ics) {
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ }
+
+ LInstruction* result =
+ DefineFixed(new (zone()) LLoadNamedGeneric(context, object, vector), r3);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+ HLoadFunctionPrototype* instr) {
+ return AssignEnvironment(DefineAsRegister(
+ new (zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new (zone()) LLoadRoot);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+ DCHECK(instr->key()->representation().IsSmiOrInteger32());
+ ElementsKind elements_kind = instr->elements_kind();
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LInstruction* result = NULL;
+
+ if (!instr->is_typed_elements()) {
+ LOperand* obj = NULL;
+ if (instr->representation().IsDouble()) {
+ obj = UseRegister(instr->elements());
+ } else {
+ obj = UseRegisterAtStart(instr->elements());
+ }
+ result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key));
+ } else {
+ DCHECK((instr->representation().IsInteger32() &&
+ !IsDoubleOrFloatElementsKind(elements_kind)) ||
+ (instr->representation().IsDouble() &&
+ IsDoubleOrFloatElementsKind(elements_kind)));
+ LOperand* backing_store = UseRegister(instr->elements());
+ result = DefineAsRegister(new (zone()) LLoadKeyed(backing_store, key));
+ }
+
+ if ((instr->is_external() || instr->is_fixed_typed_array())
+ ?
+ // see LCodeGen::DoLoadKeyedExternalArray
+ ((elements_kind == EXTERNAL_UINT32_ELEMENTS ||
+ elements_kind == UINT32_ELEMENTS) &&
+ !instr->CheckFlag(HInstruction::kUint32))
+ :
+ // see LCodeGen::DoLoadKeyedFixedDoubleArray and
+ // LCodeGen::DoLoadKeyedFixedArray
+ instr->RequiresHoleCheck()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object =
+ UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+ LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
+ LOperand* vector = NULL;
+ if (FLAG_vector_ics) {
+ vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+ }
+
+ LInstruction* result = DefineFixed(
+ new (zone()) LLoadKeyedGeneric(context, object, key, vector), r3);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+ if (!instr->is_typed_elements()) {
+ DCHECK(instr->elements()->representation().IsTagged());
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+ LOperand* object = NULL;
+ LOperand* key = NULL;
+ LOperand* val = NULL;
+
+ if (instr->value()->representation().IsDouble()) {
+ object = UseRegisterAtStart(instr->elements());
+ val = UseRegister(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ } else {
+ if (needs_write_barrier) {
+ object = UseTempRegister(instr->elements());
+ val = UseTempRegister(instr->value());
+ key = UseTempRegister(instr->key());
+ } else {
+ object = UseRegisterAtStart(instr->elements());
+ val = UseRegisterAtStart(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ }
+ }
+
+ return new (zone()) LStoreKeyed(object, key, val);
+ }
+
+ DCHECK((instr->value()->representation().IsInteger32() &&
+ !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
+ (instr->value()->representation().IsDouble() &&
+ IsDoubleOrFloatElementsKind(instr->elements_kind())));
+ DCHECK((instr->is_fixed_typed_array() &&
+ instr->elements()->representation().IsTagged()) ||
+ (instr->is_external() &&
+ instr->elements()->representation().IsExternal()));
+ LOperand* val = UseRegister(instr->value());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LOperand* backing_store = UseRegister(instr->elements());
+ return new (zone()) LStoreKeyed(backing_store, key, val);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* obj =
+ UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+ LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+ LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+
+ DCHECK(instr->object()->representation().IsTagged());
+ DCHECK(instr->key()->representation().IsTagged());
+ DCHECK(instr->value()->representation().IsTagged());
+
+ return MarkAsCall(new (zone()) LStoreKeyedGeneric(context, obj, key, val),
+ instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+ HTransitionElementsKind* instr) {
+ if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* new_map_reg = TempRegister();
+ LTransitionElementsKind* result =
+ new (zone()) LTransitionElementsKind(object, NULL, new_map_reg);
+ return result;
+ } else {
+ LOperand* object = UseFixed(instr->object(), r3);
+ LOperand* context = UseFixed(instr->context(), cp);
+ LTransitionElementsKind* result =
+ new (zone()) LTransitionElementsKind(object, context, NULL);
+ return MarkAsCall(result, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+ HTrapAllocationMemento* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* temp = TempRegister();
+ LTrapAllocationMemento* result =
+ new (zone()) LTrapAllocationMemento(object, temp);
+ return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+ bool is_in_object = instr->access().IsInobject();
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+ bool needs_write_barrier_for_map =
+ instr->has_transition() && instr->NeedsWriteBarrierForMap();
+
+ LOperand* obj;
+ if (needs_write_barrier) {
+ obj = is_in_object ? UseRegister(instr->object())
+ : UseTempRegister(instr->object());
+ } else {
+ obj = needs_write_barrier_for_map ? UseRegister(instr->object())
+ : UseRegisterAtStart(instr->object());
+ }
+
+ LOperand* val;
+ if (needs_write_barrier) {
+ val = UseTempRegister(instr->value());
+ } else if (instr->field_representation().IsDouble()) {
+ val = UseRegisterAtStart(instr->value());
+ } else {
+ val = UseRegister(instr->value());
+ }
+
+ // We need a temporary register for write barrier of the map field.
+ LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
+
+ return new (zone()) LStoreNamedField(obj, val, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* obj =
+ UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+ LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+
+ LInstruction* result = new (zone()) LStoreNamedGeneric(context, obj, val);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = UseFixed(instr->left(), r4);
+ LOperand* right = UseFixed(instr->right(), r3);
+ return MarkAsCall(
+ DefineFixed(new (zone()) LStringAdd(context, left, right), r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+ LOperand* string = UseTempRegister(instr->string());
+ LOperand* index = UseTempRegister(instr->index());
+ LOperand* context = UseAny(instr->context());
+ LStringCharCodeAt* result =
+ new (zone()) LStringCharCodeAt(context, string, index);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+ LOperand* char_code = UseRegister(instr->value());
+ LOperand* context = UseAny(instr->context());
+ LStringCharFromCode* result =
+ new (zone()) LStringCharFromCode(context, char_code);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
+ LOperand* size = UseRegisterOrConstant(instr->size());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new (zone()) LRegExpLiteral(context), r3),
+ instr);
+}
+
+
+LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new (zone()) LFunctionLiteral(context), r3),
+ instr);
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+ DCHECK(argument_count_ == 0);
+ allocator_->MarkAsOsrEntry();
+ current_block_->last_environment()->set_ast_id(instr->ast_id());
+ return AssignEnvironment(new (zone()) LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+ LParameter* result = new (zone()) LParameter;
+ if (instr->kind() == HParameter::STACK_PARAMETER) {
+ int spill_index = chunk()->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(result, spill_index);
+ } else {
+ DCHECK(info()->IsStub());
+ CallInterfaceDescriptor descriptor =
+ info()->code_stub()->GetCallInterfaceDescriptor();
+ int index = static_cast<int>(instr->index());
+ Register reg = descriptor.GetEnvironmentParameterRegister(index);
+ return DefineFixed(result, reg);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+ // Use an index that corresponds to the location in the unoptimized frame,
+ // which the optimized frame will subsume.
+ int env_index = instr->index();
+ int spill_index = 0;
+ if (instr->environment()->is_parameter_index(env_index)) {
+ spill_index = chunk()->GetParameterStackSlot(env_index);
+ } else {
+ spill_index = env_index - instr->environment()->first_local_index();
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+ Retry(kTooManySpillSlotsNeededForOSR);
+ spill_index = 0;
+ }
+ }
+ return DefineAsSpilled(new (zone()) LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new (zone()) LCallStub(context), r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+ // There are no real uses of the arguments object.
+ // arguments.length and element access are supported directly on
+ // stack arguments, and any real arguments object use causes a bailout.
+ // So this value is never used.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+
+ // There are no real uses of a captured object.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+ info()->MarkAsRequiresFrame();
+ LOperand* args = UseRegister(instr->arguments());
+ LOperand* length = UseRegisterOrConstantAtStart(instr->length());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ return DefineAsRegister(new (zone()) LAccessArgumentsAt(args, length, index));
+}
+
+
+LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
+ LOperand* object = UseFixed(instr->value(), r3);
+ LToFastProperties* result = new (zone()) LToFastProperties(object);
+ return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LTypeof* result = new (zone()) LTypeof(context, UseFixed(instr->value(), r3));
+ return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+ return new (zone()) LTypeofIsAndBranch(UseRegister(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
+ HIsConstructCallAndBranch* instr) {
+ return new (zone()) LIsConstructCallAndBranch(TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+ if (instr->is_function_entry()) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new (zone()) LStackCheck(context), instr);
+ } else {
+ DCHECK(instr->is_backwards_branch());
+ LOperand* context = UseAny(instr->context());
+ return AssignEnvironment(
+ AssignPointerMap(new (zone()) LStackCheck(context)));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+ HEnvironment* outer = current_block_->last_environment();
+ outer->set_ast_id(instr->ReturnId());
+ HConstant* undefined = graph()->GetConstantUndefined();
+ HEnvironment* inner = outer->CopyForInlining(
+ instr->closure(), instr->arguments_count(), instr->function(), undefined,
+ instr->inlining_kind());
+ // Only replay binding of arguments object if it wasn't removed from graph.
+ if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
+ inner->Bind(instr->arguments_var(), instr->arguments_object());
+ }
+ inner->BindContext(instr->closure_context());
+ inner->set_entry(instr);
+ current_block_->UpdateEnvironment(inner);
+ chunk_->AddInlinedClosure(instr->closure());
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+ LInstruction* pop = NULL;
+
+ HEnvironment* env = current_block_->last_environment();
+
+ if (env->entry()->arguments_pushed()) {
+ int argument_count = env->arguments_environment()->parameter_count();
+ pop = new (zone()) LDrop(argument_count);
+ DCHECK(instr->argument_delta() == -argument_count);
+ }
+
+ HEnvironment* outer =
+ current_block_->last_environment()->DiscardInlined(false);
+ current_block_->UpdateEnvironment(outer);
+
+ return pop;
+}
+
+
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = UseFixed(instr->enumerable(), r3);
+ LForInPrepareMap* result = new (zone()) LForInPrepareMap(context, object);
+ return MarkAsCall(DefineFixed(result, r3), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+ LOperand* map = UseRegister(instr->map());
+ return AssignEnvironment(
+ DefineAsRegister(new (zone()) LForInCacheArray(map)));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* map = UseRegisterAtStart(instr->map());
+ return AssignEnvironment(new (zone()) LCheckMapValue(value, map));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* index = UseTempRegister(instr->index());
+ LLoadFieldByIndex* load = new (zone()) LLoadFieldByIndex(object, index);
+ LInstruction* result = DefineSameAsFirst(load);
+ return AssignPointerMap(result);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
+ LOperand* context = UseRegisterAtStart(instr->context());
+ return new (zone()) LStoreFrameContext(context);
+}
+
+
+LInstruction* LChunkBuilder::DoAllocateBlockContext(
+ HAllocateBlockContext* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* function = UseRegisterAtStart(instr->function());
+ LAllocateBlockContext* result =
+ new (zone()) LAllocateBlockContext(context, function);
+ return MarkAsCall(DefineFixed(result, cp), instr);
+}
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/ppc/lithium-ppc.h b/deps/v8/src/ppc/lithium-ppc.h
new file mode 100644
index 0000000000..2176fa66c5
--- /dev/null
+++ b/deps/v8/src/ppc/lithium-ppc.h
@@ -0,0 +1,2746 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PPC_LITHIUM_PPC_H_
+#define V8_PPC_LITHIUM_PPC_H_
+
+#include "src/hydrogen.h"
+#include "src/lithium.h"
+#include "src/lithium-allocator.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+ V(AccessArgumentsAt) \
+ V(AddI) \
+ V(Allocate) \
+ V(AllocateBlockContext) \
+ V(ApplyArguments) \
+ V(ArgumentsElements) \
+ V(ArgumentsLength) \
+ V(ArithmeticD) \
+ V(ArithmeticT) \
+ V(BitI) \
+ V(BoundsCheck) \
+ V(Branch) \
+ V(CallJSFunction) \
+ V(CallWithDescriptor) \
+ V(CallFunction) \
+ V(CallNew) \
+ V(CallNewArray) \
+ V(CallRuntime) \
+ V(CallStub) \
+ V(CheckInstanceType) \
+ V(CheckNonSmi) \
+ V(CheckMaps) \
+ V(CheckMapValue) \
+ V(CheckSmi) \
+ V(CheckValue) \
+ V(ClampDToUint8) \
+ V(ClampIToUint8) \
+ V(ClampTToUint8) \
+ V(ClassOfTestAndBranch) \
+ V(CompareMinusZeroAndBranch) \
+ V(CompareNumericAndBranch) \
+ V(CmpObjectEqAndBranch) \
+ V(CmpHoleAndBranch) \
+ V(CmpMapAndBranch) \
+ V(CmpT) \
+ V(ConstantD) \
+ V(ConstantE) \
+ V(ConstantI) \
+ V(ConstantS) \
+ V(ConstantT) \
+ V(ConstructDouble) \
+ V(Context) \
+ V(DateField) \
+ V(DebugBreak) \
+ V(DeclareGlobals) \
+ V(Deoptimize) \
+ V(DivByConstI) \
+ V(DivByPowerOf2I) \
+ V(DivI) \
+ V(DoubleBits) \
+ V(DoubleToI) \
+ V(DoubleToSmi) \
+ V(Drop) \
+ V(Dummy) \
+ V(DummyUse) \
+ V(FlooringDivByConstI) \
+ V(FlooringDivByPowerOf2I) \
+ V(FlooringDivI) \
+ V(ForInCacheArray) \
+ V(ForInPrepareMap) \
+ V(FunctionLiteral) \
+ V(GetCachedArrayIndex) \
+ V(Goto) \
+ V(HasCachedArrayIndexAndBranch) \
+ V(HasInstanceTypeAndBranch) \
+ V(InnerAllocatedObject) \
+ V(InstanceOf) \
+ V(InstanceOfKnownGlobal) \
+ V(InstructionGap) \
+ V(Integer32ToDouble) \
+ V(InvokeFunction) \
+ V(IsConstructCallAndBranch) \
+ V(IsObjectAndBranch) \
+ V(IsStringAndBranch) \
+ V(IsSmiAndBranch) \
+ V(IsUndetectableAndBranch) \
+ V(Label) \
+ V(LazyBailout) \
+ V(LoadContextSlot) \
+ V(LoadRoot) \
+ V(LoadFieldByIndex) \
+ V(LoadFunctionPrototype) \
+ V(LoadGlobalCell) \
+ V(LoadGlobalGeneric) \
+ V(LoadKeyed) \
+ V(LoadKeyedGeneric) \
+ V(LoadNamedField) \
+ V(LoadNamedGeneric) \
+ V(MapEnumLength) \
+ V(MathAbs) \
+ V(MathClz32) \
+ V(MathExp) \
+ V(MathFloor) \
+ V(MathFround) \
+ V(MathLog) \
+ V(MathMinMax) \
+ V(MathPowHalf) \
+ V(MathRound) \
+ V(MathSqrt) \
+ V(ModByConstI) \
+ V(ModByPowerOf2I) \
+ V(ModI) \
+ V(MulI) \
+ V(MultiplyAddD) \
+ V(MultiplySubD) \
+ V(NumberTagD) \
+ V(NumberTagI) \
+ V(NumberTagU) \
+ V(NumberUntagD) \
+ V(OsrEntry) \
+ V(Parameter) \
+ V(Power) \
+ V(PushArgument) \
+ V(RegExpLiteral) \
+ V(Return) \
+ V(SeqStringGetChar) \
+ V(SeqStringSetChar) \
+ V(ShiftI) \
+ V(SmiTag) \
+ V(SmiUntag) \
+ V(StackCheck) \
+ V(StoreCodeEntry) \
+ V(StoreContextSlot) \
+ V(StoreFrameContext) \
+ V(StoreGlobalCell) \
+ V(StoreKeyed) \
+ V(StoreKeyedGeneric) \
+ V(StoreNamedField) \
+ V(StoreNamedGeneric) \
+ V(StringAdd) \
+ V(StringCharCodeAt) \
+ V(StringCharFromCode) \
+ V(StringCompareAndBranch) \
+ V(SubI) \
+ V(RSubI) \
+ V(TaggedToI) \
+ V(TailCallThroughMegamorphicCache) \
+ V(ThisFunction) \
+ V(ToFastProperties) \
+ V(TransitionElementsKind) \
+ V(TrapAllocationMemento) \
+ V(Typeof) \
+ V(TypeofIsAndBranch) \
+ V(Uint32ToDouble) \
+ V(UnknownOSRValue) \
+ V(WrapReceiver)
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ Opcode opcode() const FINAL { return LInstruction::k##type; } \
+ void CompileToNative(LCodeGen* generator) FINAL; \
+ const char* Mnemonic() const FINAL { return mnemonic; } \
+ static L##type* cast(LInstruction* instr) { \
+ DCHECK(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
+ }
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type) \
+ H##type* hydrogen() const { return H##type::cast(hydrogen_value()); }
+
+
+class LInstruction : public ZoneObject {
+ public:
+ LInstruction()
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ bit_field_(IsCallBits::encode(false)) {}
+
+ virtual ~LInstruction() {}
+
+ virtual void CompileToNative(LCodeGen* generator) = 0;
+ virtual const char* Mnemonic() const = 0;
+ virtual void PrintTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintOutputOperandTo(StringStream* stream);
+
+ enum Opcode {
+// Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) kNumberOfInstructions
+#undef DECLARE_OPCODE
+ };
+
+ virtual Opcode opcode() const = 0;
+
+// Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+ bool Is##type() const { return opcode() == k##type; }
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+ // Declare virtual predicates for instructions that don't have
+ // an opcode.
+ virtual bool IsGap() const { return false; }
+
+ virtual bool IsControl() const { return false; }
+
+ // Try deleting this instruction if possible.
+ virtual bool TryDelete() { return false; }
+
+ void set_environment(LEnvironment* env) { environment_ = env; }
+ LEnvironment* environment() const { return environment_; }
+ bool HasEnvironment() const { return environment_ != NULL; }
+
+ void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+ LPointerMap* pointer_map() const { return pointer_map_.get(); }
+ bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+ void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+ HValue* hydrogen_value() const { return hydrogen_value_; }
+
+ virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {}
+
+ void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+ bool IsCall() const { return IsCallBits::decode(bit_field_); }
+
+ // Interface to the register allocator and iterators.
+ bool ClobbersTemps() const { return IsCall(); }
+ bool ClobbersRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
+ return IsCall();
+ }
+
+ // Interface to the register allocator and iterators.
+ bool IsMarkedAsCall() const { return IsCall(); }
+
+ virtual bool HasResult() const = 0;
+ virtual LOperand* result() const = 0;
+
+ LOperand* FirstInput() { return InputAt(0); }
+ LOperand* Output() { return HasResult() ? result() : NULL; }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+
+#ifdef DEBUG
+ void VerifyCall();
+#endif
+
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+
+ private:
+ // Iterator support.
+ friend class InputIterator;
+
+ friend class TempIterator;
+ virtual int TempCount() = 0;
+ virtual LOperand* TempAt(int i) = 0;
+
+ class IsCallBits : public BitField<bool, 0, 1> {};
+
+ LEnvironment* environment_;
+ SetOncePointer<LPointerMap> pointer_map_;
+ HValue* hydrogen_value_;
+ int bit_field_;
+};
+
+
+// R = number of result operands (0 or 1).
+template <int R>
+class LTemplateResultInstruction : public LInstruction {
+ public:
+ // Allow 0 or 1 output operands.
+ STATIC_ASSERT(R == 0 || R == 1);
+ bool HasResult() const FINAL { return R != 0 && result() != NULL; }
+ void set_result(LOperand* operand) { results_[0] = operand; }
+ LOperand* result() const { return results_[0]; }
+
+ protected:
+ EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template <int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
+ EmbeddedContainer<LOperand*, I> inputs_;
+ EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+ // Iterator support.
+ int InputCount() FINAL { return I; }
+ LOperand* InputAt(int i) FINAL { return inputs_[i]; }
+
+ int TempCount() FINAL { return T; }
+ LOperand* TempAt(int i) FINAL { return temps_[i]; }
+};
+
+
+class LGap : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LGap(HBasicBlock* block) : block_(block) {
+ parallel_moves_[BEFORE] = NULL;
+ parallel_moves_[START] = NULL;
+ parallel_moves_[END] = NULL;
+ parallel_moves_[AFTER] = NULL;
+ }
+
+ // Can't use the DECLARE-macro here because of sub-classes.
+ bool IsGap() const OVERRIDE { return true; }
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+ static LGap* cast(LInstruction* instr) {
+ DCHECK(instr->IsGap());
+ return reinterpret_cast<LGap*>(instr);
+ }
+
+ bool IsRedundant() const;
+
+ HBasicBlock* block() const { return block_; }
+
+ enum InnerPosition {
+ BEFORE,
+ START,
+ END,
+ AFTER,
+ FIRST_INNER_POSITION = BEFORE,
+ LAST_INNER_POSITION = AFTER
+ };
+
+ LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
+ if (parallel_moves_[pos] == NULL) {
+ parallel_moves_[pos] = new (zone) LParallelMove(zone);
+ }
+ return parallel_moves_[pos];
+ }
+
+ LParallelMove* GetParallelMove(InnerPosition pos) {
+ return parallel_moves_[pos];
+ }
+
+ private:
+ LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+ HBasicBlock* block_;
+};
+
+
+class LInstructionGap FINAL : public LGap {
+ public:
+ explicit LInstructionGap(HBasicBlock* block) : LGap(block) {}
+
+ bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
+ return !IsRedundant();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
+class LGoto FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LGoto(HBasicBlock* block) : block_(block) {}
+
+ bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
+ DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+ bool IsControl() const OVERRIDE { return true; }
+
+ int block_id() const { return block_->block_id(); }
+
+ private:
+ HBasicBlock* block_;
+};
+
+
+class LLazyBailout FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ LLazyBailout() : gap_instructions_size_(0) {}
+
+ DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+ void set_gap_instructions_size(int gap_instructions_size) {
+ gap_instructions_size_ = gap_instructions_size;
+ }
+ int gap_instructions_size() { return gap_instructions_size_; }
+
+ private:
+ int gap_instructions_size_;
+};
+
+
+class LDummy FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ LDummy() {}
+ DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+
+class LDummyUse FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDummyUse(LOperand* value) { inputs_[0] = value; }
+ DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
+class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ bool IsControl() const OVERRIDE { return true; }
+ DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+ DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
+};
+
+
+class LLabel FINAL : public LGap {
+ public:
+ explicit LLabel(HBasicBlock* block) : LGap(block), replacement_(NULL) {}
+
+ bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
+ DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+
+ int block_id() const { return block()->block_id(); }
+ bool is_loop_header() const { return block()->IsLoopHeader(); }
+ bool is_osr_entry() const { return block()->is_osr_entry(); }
+ Label* label() { return &label_; }
+ LLabel* replacement() const { return replacement_; }
+ void set_replacement(LLabel* label) { replacement_ = label; }
+ bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+ Label label_;
+ LLabel* replacement_;
+};
+
+
+class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+
+class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallStub(LOperand* context) { inputs_[0] = context; }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+ DECLARE_HYDROGEN_ACCESSOR(CallStub)
+};
+
+
+class LTailCallThroughMegamorphicCache FINAL
+ : public LTemplateInstruction<0, 3, 0> {
+ public:
+ explicit LTailCallThroughMegamorphicCache(LOperand* context,
+ LOperand* receiver,
+ LOperand* name) {
+ inputs_[0] = context;
+ inputs_[1] = receiver;
+ inputs_[2] = name;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* receiver() { return inputs_[1]; }
+ LOperand* name() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
+ "tail-call-through-megamorphic-cache")
+ DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
+};
+
+class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
+ DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+template <int I, int T>
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
+ public:
+ LControlInstruction() : false_label_(NULL), true_label_(NULL) {}
+
+ bool IsControl() const FINAL { return true; }
+
+ int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+ HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+
+ int TrueDestination(LChunk* chunk) {
+ return chunk->LookupDestination(true_block_id());
+ }
+ int FalseDestination(LChunk* chunk) {
+ return chunk->LookupDestination(false_block_id());
+ }
+
+ Label* TrueLabel(LChunk* chunk) {
+ if (true_label_ == NULL) {
+ true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+ }
+ return true_label_;
+ }
+ Label* FalseLabel(LChunk* chunk) {
+ if (false_label_ == NULL) {
+ false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+ }
+ return false_label_;
+ }
+
+ protected:
+ int true_block_id() { return SuccessorAt(0)->block_id(); }
+ int false_block_id() { return SuccessorAt(1)->block_id(); }
+
+ private:
+ HControlInstruction* hydrogen() {
+ return HControlInstruction::cast(this->hydrogen_value());
+ }
+
+ Label* false_label_;
+ Label* true_label_;
+};
+
+
+class LWrapReceiver FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LWrapReceiver(LOperand* receiver, LOperand* function) {
+ inputs_[0] = receiver;
+ inputs_[1] = function;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+ DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
+
+ LOperand* receiver() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+};
+
+
+class LApplyArguments FINAL : public LTemplateInstruction<1, 4, 0> {
+ public:
+ LApplyArguments(LOperand* function, LOperand* receiver, LOperand* length,
+ LOperand* elements) {
+ inputs_[0] = function;
+ inputs_[1] = receiver;
+ inputs_[2] = length;
+ inputs_[3] = elements;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* receiver() { return inputs_[1]; }
+ LOperand* length() { return inputs_[2]; }
+ LOperand* elements() { return inputs_[3]; }
+};
+
+
+class LAccessArgumentsAt FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
+ inputs_[0] = arguments;
+ inputs_[1] = length;
+ inputs_[2] = index;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+ LOperand* arguments() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LArgumentsLength FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LArgumentsLength(LOperand* elements) { inputs_[0] = elements; }
+
+ LOperand* elements() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArgumentsElements FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+ DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
+};
+
+
+class LModByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModByConstI FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByConstI(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModI FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LModI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivByConstI FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByConstI(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivI FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LDivI(LOperand* dividend, LOperand* divisor) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+ DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+};
+
+
+class LFlooringDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+ "flooring-div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivI FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LFlooringDivI(LOperand* dividend, LOperand* divisor) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
+class LMulI FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMulI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+// Instruction for computing multiplier * multiplicand + addend.
+class LMultiplyAddD FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LMultiplyAddD(LOperand* addend, LOperand* multiplier,
+ LOperand* multiplicand) {
+ inputs_[0] = addend;
+ inputs_[1] = multiplier;
+ inputs_[2] = multiplicand;
+ }
+
+ LOperand* addend() { return inputs_[0]; }
+ LOperand* multiplier() { return inputs_[1]; }
+ LOperand* multiplicand() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
+};
+
+
+// Instruction for computing minuend - multiplier * multiplicand.
+class LMultiplySubD FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LMultiplySubD(LOperand* minuend, LOperand* multiplier,
+ LOperand* multiplicand) {
+ inputs_[0] = minuend;
+ inputs_[1] = multiplier;
+ inputs_[2] = multiplicand;
+ }
+
+ LOperand* minuend() { return inputs_[0]; }
+ LOperand* multiplier() { return inputs_[1]; }
+ LOperand* multiplicand() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MultiplySubD, "multiply-sub-d")
+};
+
+
+class LDebugBreak FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+
+class LCompareNumericAndBranch FINAL : public LControlInstruction<2, 0> {
+ public:
+ LCompareNumericAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+ "compare-numeric-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
+
+ Token::Value op() const { return hydrogen()->token(); }
+ bool is_double() const { return hydrogen()->representation().IsDouble(); }
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LMathFloor FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathFloor(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathRound FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LMathRound(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathFround FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathFround(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
+};
+
+
+class LMathAbs FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathAbs(LOperand* context, LOperand* value) {
+ inputs_[1] = context;
+ inputs_[0] = value;
+ }
+
+ LOperand* context() { return inputs_[1]; }
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathLog FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathLog(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
+};
+
+
+class LMathClz32 FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathClz32(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+
+class LMathExp FINAL : public LTemplateInstruction<1, 1, 3> {
+ public:
+ LMathExp(LOperand* value, LOperand* double_temp, LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = double_temp;
+ ExternalReference::InitializeMathExpData();
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* double_temp() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
+
+
+class LMathSqrt FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathSqrt(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+
+class LMathPowHalf FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathPowHalf(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+};
+
+
+class LCmpObjectEqAndBranch FINAL : public LControlInstruction<2, 0> {
+ public:
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
+};
+
+
+class LCmpHoleAndBranch FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LCmpHoleAndBranch(LOperand* object) { inputs_[0] = object; }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LCompareMinusZeroAndBranch FINAL : public LControlInstruction<1, 1> {
+ public:
+ LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+ "cmp-minus-zero-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
+};
+
+
+class LIsObjectAndBranch FINAL : public LControlInstruction<1, 1> {
+ public:
+ LIsObjectAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LIsStringAndBranch FINAL : public LControlInstruction<1, 1> {
+ public:
+ LIsStringAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LIsSmiAndBranch FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LIsSmiAndBranch(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LIsUndetectableAndBranch FINAL : public LControlInstruction<1, 1> {
+ public:
+ explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+ "is-undetectable-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LStringCompareAndBranch FINAL : public LControlInstruction<3, 0> {
+ public:
+ LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+ "string-compare-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+ Token::Value op() const { return hydrogen()->token(); }
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LHasInstanceTypeAndBranch FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LHasInstanceTypeAndBranch(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+ "has-instance-type-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LGetCachedArrayIndex FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LGetCachedArrayIndex(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+ DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+ "has-cached-array-index-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LClassOfTestAndBranch FINAL : public LControlInstruction<1, 1> {
+ public:
+ LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, "class-of-test-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LCmpT FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+ DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+
+ Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LInstanceOf FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LInstanceOfKnownGlobal FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
+ "instance-of-known-global")
+ DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
+
+ Handle<JSFunction> function() const { return hydrogen()->function(); }
+ LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
+ return lazy_deopt_env_;
+ }
+ virtual void SetDeferredLazyDeoptimizationEnvironment(
+ LEnvironment* env) OVERRIDE {
+ lazy_deopt_env_ = env;
+ }
+
+ private:
+ LEnvironment* lazy_deopt_env_;
+};
+
+
+class LBoundsCheck FINAL : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LBoundsCheck(LOperand* index, LOperand* length) {
+ inputs_[0] = index;
+ inputs_[1] = length;
+ }
+
+ LOperand* index() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+ DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
+};
+
+
+class LBitI FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LBitI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ Token::Value op() const { return hydrogen()->op(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+ DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+};
+
+
+class LShiftI FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+ : op_(op), can_deopt_(can_deopt) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ bool can_deopt() const { return can_deopt_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+ Token::Value op_;
+ bool can_deopt_;
+};
+
+
+class LSubI FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LSubI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+ DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LRSubI FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LRSubI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(RSubI, "rsub-i")
+ DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LConstantI FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ int32_t value() const { return hydrogen()->Integer32Value(); }
+};
+
+
+class LConstantS FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+
+class LConstantD FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ double value() const { return hydrogen()->DoubleValue(); }
+};
+
+
+class LConstantE FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ ExternalReference value() const {
+ return hydrogen()->ExternalReferenceValue();
+ }
+};
+
+
+class LConstantT FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Handle<Object> value(Isolate* isolate) const {
+ return hydrogen()->handle(isolate);
+ }
+};
+
+
+class LBranch FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LBranch(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+ DECLARE_HYDROGEN_ACCESSOR(Branch)
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LCmpMapAndBranch FINAL : public LControlInstruction<1, 1> {
+ public:
+ LCmpMapAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMap)
+
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
+};
+
+
+class LMapEnumLength FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMapEnumLength(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
+};
+
+
+class LDateField FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
+ inputs_[0] = date;
+ temps_[0] = temp;
+ }
+
+ LOperand* date() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ Smi* index() const { return index_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
+ DECLARE_HYDROGEN_ACCESSOR(DateField)
+
+ private:
+ Smi* index_;
+};
+
+
+class LSeqStringGetChar FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LSeqStringGetChar(LOperand* string, LOperand* index) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ }
+
+ LOperand* string() const { return inputs_[0]; }
+ LOperand* index() const { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
+};
+
+
+class LSeqStringSetChar FINAL : public LTemplateInstruction<1, 4, 0> {
+ public:
+ LSeqStringSetChar(LOperand* context, LOperand* string, LOperand* index,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ inputs_[3] = value;
+ }
+
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+};
+
+
+class LAddI FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAddI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+ DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LMathMinMax FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathMinMax(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
+ DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+
+class LPower FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LPower(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+ DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
+class LArithmeticD FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LArithmeticD(Token::Value op, LOperand* left, LOperand* right) : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ Opcode opcode() const OVERRIDE { return LInstruction::kArithmeticD; }
+ void CompileToNative(LCodeGen* generator) OVERRIDE;
+ const char* Mnemonic() const OVERRIDE;
+
+ private:
+ Token::Value op_;
+};
+
+
+class LArithmeticT FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LArithmeticT(Token::Value op, LOperand* context, LOperand* left,
+ LOperand* right)
+ : op_(op) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+ Token::Value op() const { return op_; }
+
+ Opcode opcode() const OVERRIDE { return LInstruction::kArithmeticT; }
+ void CompileToNative(LCodeGen* generator) OVERRIDE;
+ const char* Mnemonic() const OVERRIDE;
+
+ private:
+ Token::Value op_;
+};
+
+
+class LReturn FINAL : public LTemplateInstruction<0, 3, 0> {
+ public:
+ LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
+ inputs_[0] = value;
+ inputs_[1] = context;
+ inputs_[2] = parameter_count;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ bool has_constant_parameter_count() {
+ return parameter_count()->IsConstantOperand();
+ }
+ LConstantOperand* constant_parameter_count() {
+ DCHECK(has_constant_parameter_count());
+ return LConstantOperand::cast(parameter_count());
+ }
+ LOperand* parameter_count() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+
+class LLoadNamedField FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadNamedField(LOperand* object) { inputs_[0] = object; }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LLoadNamedGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ temps_[0] = vector;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* temp_vector() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadFunctionPrototype FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadFunctionPrototype(LOperand* function) { inputs_[0] = function; }
+
+ LOperand* function() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+ DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+};
+
+
+class LLoadRoot FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
+class LLoadKeyed FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyed(LOperand* elements, LOperand* key) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ }
+
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
+ bool is_external() const { return hydrogen()->is_external(); }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
+};
+
+
+class LLoadKeyedGeneric FINAL : public LTemplateInstruction<1, 3, 1> {
+ public:
+ LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+ LOperand* vector) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = key;
+ temps_[0] = vector;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+ LOperand* temp_vector() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
+};
+
+
+class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
+};
+
+
+class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
+ LOperand* vector) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
+ temps_[0] = vector;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
+ LOperand* temp_vector() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+ bool for_typeof() const { return hydrogen()->for_typeof(); }
+};
+
+
+class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 1> {
+ public:
+ LStoreGlobalCell(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+};
+
+
+class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadContextSlot(LOperand* context) { inputs_[0] = context; }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+ int slot_index() { return hydrogen()->slot_index(); }
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreContextSlot(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+ int slot_index() { return hydrogen()->slot_index(); }
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LPushArgument FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LPushArgument(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LDrop FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LDrop(int count) : count_(count) {}
+
+ int count() const { return count_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
+
+ private:
+ int count_;
+};
+
+
+class LStoreCodeEntry FINAL : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+ inputs_[0] = function;
+ inputs_[1] = code_object;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* code_object() { return inputs_[1]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+ DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LInnerAllocatedObject FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
+ inputs_[0] = base_object;
+ inputs_[1] = offset;
+ }
+
+ LOperand* base_object() const { return inputs_[0]; }
+ LOperand* offset() const { return inputs_[1]; }
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+
+ DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
+};
+
+
+class LThisFunction FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+ DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
+};
+
+
+class LContext FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+ DECLARE_HYDROGEN_ACCESSOR(Context)
+};
+
+
+class LDeclareGlobals FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LDeclareGlobals(LOperand* context) { inputs_[0] = context; }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+ DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+
+class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallJSFunction(LOperand* function) { inputs_[0] = function; }
+
+ LOperand* function() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
+ public:
+ LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+ const ZoneList<LOperand*>& operands, Zone* zone)
+ : descriptor_(descriptor),
+ inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
+ DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
+ inputs_.AddAll(operands, zone);
+ }
+
+ LOperand* target() const { return inputs_[0]; }
+
+ const CallInterfaceDescriptor descriptor() { return descriptor_; }
+
+ private:
+ DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+
+ CallInterfaceDescriptor descriptor_;
+ ZoneList<LOperand*> inputs_;
+
+ // Iterator support.
+ int InputCount() FINAL { return inputs_.length(); }
+ LOperand* InputAt(int i) FINAL { return inputs_[i]; }
+
+ int TempCount() FINAL { return 0; }
+ LOperand* TempAt(int i) FINAL { return NULL; }
+};
+
+
+class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInvokeFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNew FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallNew(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
+ DECLARE_HYDROGEN_ACCESSOR(CallNew)
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNewArray FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallNewArray(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+ DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallRuntime(LOperand* context) { inputs_[0] = context; }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+ DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+ bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
+ return save_doubles() == kDontSaveFPRegs;
+ }
+
+ const Runtime::Function* function() const { return hydrogen()->function(); }
+ int arity() const { return hydrogen()->argument_count(); }
+ SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
+};
+
+
+class LInteger32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInteger32ToDouble(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LUint32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LUint32ToDouble(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+
+class LNumberTagI FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
+};
+
+
+class LNumberTagU FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+
+class LNumberTagD FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LDoubleToSmi FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleToSmi(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+// Sometimes truncating conversion from a tagged value to an int32.
+class LDoubleToI FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleToI(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LTaggedToI(LOperand* value, LOperand* temp, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LSmiTag FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LSmiTag(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LNumberUntagD FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LNumberUntagD(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LSmiUntag FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LSmiUntag(LOperand* value, bool needs_check) : needs_check_(needs_check) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ bool needs_check() const { return needs_check_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+ private:
+ bool needs_check_;
+};
+
+
+class LStoreNamedField FINAL : public LTemplateInstruction<0, 2, 1> {
+ public:
+ LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
+ inputs_[0] = object;
+ inputs_[1] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+
+ Representation representation() const {
+ return hydrogen()->field_representation();
+ }
+};
+
+
+class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+
+class LStoreKeyed FINAL : public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
+ inputs_[0] = object;
+ inputs_[1] = key;
+ inputs_[2] = value;
+ }
+
+ bool is_external() const { return hydrogen()->is_external(); }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+ ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+ bool NeedsCanonicalization() {
+ if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() ||
+ hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) {
+ return false;
+ }
+ return hydrogen()->NeedsCanonicalization();
+ }
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
+};
+
+
+class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
+ public:
+ LStoreKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = obj;
+ inputs_[2] = key;
+ inputs_[3] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+
+class LTransitionElementsKind FINAL : public LTemplateInstruction<0, 2, 1> {
+ public:
+ LTransitionElementsKind(LOperand* object, LOperand* context,
+ LOperand* new_map_temp) {
+ inputs_[0] = object;
+ inputs_[1] = context;
+ temps_[0] = new_map_temp;
+ }
+
+ LOperand* context() { return inputs_[1]; }
+ LOperand* object() { return inputs_[0]; }
+ LOperand* new_map_temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+ "transition-elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
+ ElementsKind from_kind() { return hydrogen()->from_kind(); }
+ ElementsKind to_kind() { return hydrogen()->to_kind(); }
+};
+
+
+class LTrapAllocationMemento FINAL : public LTemplateInstruction<0, 1, 1> {
+ public:
+ LTrapAllocationMemento(LOperand* object, LOperand* temp) {
+ inputs_[0] = object;
+ temps_[0] = temp;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, "trap-allocation-memento")
+};
+
+
+class LStringAdd FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+ DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+};
+
+
+class LStringCharCodeAt FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+};
+
+
+class LStringCharFromCode FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
+ inputs_[0] = context;
+ inputs_[1] = char_code;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* char_code() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+};
+
+
+class LCheckValue FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckValue(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
+};
+
+
+class LCheckInstanceType FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckInstanceType(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+ DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+};
+
+
+class LCheckMaps FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckMaps(LOperand* value = NULL) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
+ DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
+};
+
+
+class LCheckSmi FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCheckSmi(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
+
+
+class LCheckNonSmi FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckNonSmi(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+ DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
+};
+
+
+class LClampDToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LClampDToUint8(LOperand* unclamped) { inputs_[0] = unclamped; }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LClampIToUint8(LOperand* unclamped) { inputs_[0] = unclamped; }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8 FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LClampTToUint8(LOperand* unclamped, LOperand* temp) {
+ inputs_[0] = unclamped;
+ temps_[0] = temp;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
+class LDoubleBits FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleBits(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+ DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LConstructDouble(LOperand* hi, LOperand* lo) {
+ inputs_[0] = hi;
+ inputs_[1] = lo;
+ }
+
+ LOperand* hi() { return inputs_[0]; }
+ LOperand* lo() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
+class LAllocate FINAL : public LTemplateInstruction<1, 2, 2> {
+ public:
+ LAllocate(LOperand* context, LOperand* size, LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = context;
+ inputs_[1] = size;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* size() { return inputs_[1]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
+
+class LRegExpLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LRegExpLiteral(LOperand* context) { inputs_[0] = context; }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
+ DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
+};
+
+
+class LFunctionLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LFunctionLiteral(LOperand* context) { inputs_[0] = context; }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
+ DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
+};
+
+
+class LToFastProperties FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LToFastProperties(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
+ DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
+};
+
+
+class LTypeof FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LTypeof(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIsAndBranch FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LTypeofIsAndBranch(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
+
+ Handle<String> type_literal() { return hydrogen()->type_literal(); }
+
+ void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LIsConstructCallAndBranch FINAL : public LControlInstruction<0, 1> {
+ public:
+ explicit LIsConstructCallAndBranch(LOperand* temp) { temps_[0] = temp; }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
+ "is-construct-call-and-branch")
+};
+
+
+class LOsrEntry FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ LOsrEntry() {}
+
+ bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
+ DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+};
+
+
+class LStackCheck FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LStackCheck(LOperand* context) { inputs_[0] = context; }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+ DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+ Label* done_label() { return &done_label_; }
+
+ private:
+ Label done_label_;
+};
+
+
+class LForInPrepareMap FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LForInPrepareMap(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+
+class LForInCacheArray FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LForInCacheArray(LOperand* map) { inputs_[0] = map; }
+
+ LOperand* map() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+ int idx() { return HForInCacheArray::cast(this->hydrogen_value())->idx(); }
+};
+
+
+class LCheckMapValue FINAL : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LCheckMapValue(LOperand* value, LOperand* map) {
+ inputs_[0] = value;
+ inputs_[1] = map;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* map() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+
+class LLoadFieldByIndex FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadFieldByIndex(LOperand* object, LOperand* index) {
+ inputs_[0] = object;
+ inputs_[1] = index;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+
+class LStoreFrameContext : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LStoreFrameContext(LOperand* context) { inputs_[0] = context; }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
+};
+
+
+class LAllocateBlockContext : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAllocateBlockContext(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
+ DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
+};
+
+
+class LChunkBuilder;
+class LPlatformChunk FINAL : public LChunk {
+ public:
+ LPlatformChunk(CompilationInfo* info, HGraph* graph) : LChunk(info, graph) {}
+
+ int GetNextSpillIndex(RegisterKind kind);
+ LOperand* GetNextSpillSlot(RegisterKind kind);
+};
+
+
+class LChunkBuilder FINAL : public LChunkBuilderBase {
+ public:
+ LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
+ : LChunkBuilderBase(info, graph),
+ current_instruction_(NULL),
+ current_block_(NULL),
+ next_block_(NULL),
+ allocator_(allocator) {}
+
+ // Build the sequence for the graph.
+ LPlatformChunk* Build();
+
+// Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
+ HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
+ LInstruction* DoMultiplySub(HValue* minuend, HMul* mul);
+ LInstruction* DoRSub(HSub* instr);
+
+ static bool HasMagicNumberForDivisor(int32_t divisor);
+
+ LInstruction* DoMathFloor(HUnaryMathOperation* instr);
+ LInstruction* DoMathRound(HUnaryMathOperation* instr);
+ LInstruction* DoMathFround(HUnaryMathOperation* instr);
+ LInstruction* DoMathAbs(HUnaryMathOperation* instr);
+ LInstruction* DoMathLog(HUnaryMathOperation* instr);
+ LInstruction* DoMathExp(HUnaryMathOperation* instr);
+ LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
+ LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+ LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+ LInstruction* DoDivByPowerOf2I(HDiv* instr);
+ LInstruction* DoDivByConstI(HDiv* instr);
+ LInstruction* DoDivI(HDiv* instr);
+ LInstruction* DoModByPowerOf2I(HMod* instr);
+ LInstruction* DoModByConstI(HMod* instr);
+ LInstruction* DoModI(HMod* instr);
+ LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
+
+ private:
+ // Methods for getting operands for Use / Define / Temp.
+ LUnallocated* ToUnallocated(Register reg);
+ LUnallocated* ToUnallocated(DoubleRegister reg);
+
+ // Methods for setting up define-use relationships.
+ MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+ MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+ MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
+ DoubleRegister fixed_register);
+
+ // A value that is guaranteed to be allocated to a register.
+ // Operand created by UseRegister is guaranteed to be live until the end of
+ // instruction. This means that register allocator will not reuse it's
+ // register for any other operand inside instruction.
+ // Operand created by UseRegisterAtStart is guaranteed to be live only at
+ // instruction start. Register allocator is free to assign the same register
+ // to some other operand used inside instruction (i.e. temporary or
+ // output).
+ MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+ // An input operand in a register that may be trashed.
+ MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
+
+ // An input operand in a register or stack slot.
+ MUST_USE_RESULT LOperand* Use(HValue* value);
+ MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
+
+ // An input operand in a register, stack slot or a constant operand.
+ MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
+
+ // An input operand in a register or a constant operand.
+ MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+ // An input operand in a constant operand.
+ MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
+ // An input operand in register, stack slot or a constant operand.
+ // Will not be moved to a register even if one is freely available.
+ MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
+
+ // Temporary operand that must be in a register.
+ MUST_USE_RESULT LUnallocated* TempRegister();
+ MUST_USE_RESULT LUnallocated* TempDoubleRegister();
+ MUST_USE_RESULT LOperand* FixedTemp(Register reg);
+ MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
+
+ // Methods for setting up define-use relationships.
+ // Return the same instruction that they are passed.
+ LInstruction* Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result);
+ LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+ int index);
+ LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr, Register reg);
+ LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+ DoubleRegister reg);
+ LInstruction* AssignEnvironment(LInstruction* instr);
+ LInstruction* AssignPointerMap(LInstruction* instr);
+
+ enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+ // By default we assume that instruction sequences generated for calls
+ // cannot deoptimize eagerly and we do not attach environment to this
+ // instruction.
+ LInstruction* MarkAsCall(
+ LInstruction* instr, HInstruction* hinstr,
+ CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+
+ void VisitInstruction(HInstruction* current);
+ void AddInstruction(LInstruction* instr, HInstruction* current);
+
+ void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+ LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+ LInstruction* DoArithmeticD(Token::Value op,
+ HArithmeticBinaryOperation* instr);
+ LInstruction* DoArithmeticT(Token::Value op, HBinaryOperation* instr);
+
+ HInstruction* current_instruction_;
+ HBasicBlock* current_block_;
+ HBasicBlock* next_block_;
+ LAllocator* allocator_;
+
+ DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_CONCRETE_INSTRUCTION
+}
+} // namespace v8::internal
+
+#endif // V8_PPC_LITHIUM_PPC_H_
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
new file mode 100644
index 0000000000..0b3d72945f
--- /dev/null
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -0,0 +1,4819 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <assert.h> // For assert
+#include <limits.h> // For LONG_MIN, LONG_MAX.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/base/bits.h"
+#include "src/base/division-by-constant.h"
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/cpu-profiler.h"
+#include "src/debug.h"
+#include "src/isolate-inl.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+ : Assembler(arg_isolate, buffer, size),
+ generating_stub_(false),
+ has_frame_(false) {
+ if (isolate() != NULL) {
+ code_object_ =
+ Handle<Object>(isolate()->heap()->undefined_value(), isolate());
+ }
+}
+
+
+void MacroAssembler::Jump(Register target) {
+ mtctr(target);
+ bctr();
+}
+
+
+void MacroAssembler::JumpToJSEntry(Register target) {
+ Move(ip, target);
+ Jump(ip);
+}
+
+
+void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
+ Condition cond, CRegister cr) {
+ Label skip;
+
+ if (cond != al) b(NegateCondition(cond), &skip, cr);
+
+ DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
+
+ mov(ip, Operand(target, rmode));
+ mtctr(ip);
+ bctr();
+
+ bind(&skip);
+}
+
+
+void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
+ CRegister cr) {
+ DCHECK(!RelocInfo::IsCodeTarget(rmode));
+ Jump(reinterpret_cast<intptr_t>(target), rmode, cond, cr);
+}
+
+
+void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+ Condition cond) {
+ DCHECK(RelocInfo::IsCodeTarget(rmode));
+ // 'code' is always generated ppc code, never THUMB code
+ AllowDeferredHandleDereference embedding_raw_address;
+ Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+}
+
+
+int MacroAssembler::CallSize(Register target) { return 2 * kInstrSize; }
+
+
+void MacroAssembler::Call(Register target) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label start;
+ bind(&start);
+
+ // Statement positions are expected to be recorded when the target
+ // address is loaded.
+ positions_recorder()->WriteRecordedPositions();
+
+ // branch via link register and set LK bit for return point
+ mtctr(target);
+ bctrl();
+
+ DCHECK_EQ(CallSize(target), SizeOfCodeGeneratedSince(&start));
+}
+
+
+void MacroAssembler::CallJSEntry(Register target) {
+ DCHECK(target.is(ip));
+ Call(target);
+}
+
+
+int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode,
+ Condition cond) {
+ Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
+ return (2 + instructions_required_for_mov(mov_operand)) * kInstrSize;
+}
+
+
+int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
+ RelocInfo::Mode rmode,
+ Condition cond) {
+ return (2 + kMovInstructionsNoConstantPool) * kInstrSize;
+}
+
+
+void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
+ Condition cond) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(cond == al);
+
+#ifdef DEBUG
+ // Check the expected size before generating code to ensure we assume the same
+ // constant pool availability (e.g., whether constant pool is full or not).
+ int expected_size = CallSize(target, rmode, cond);
+ Label start;
+ bind(&start);
+#endif
+
+ // Statement positions are expected to be recorded when the target
+ // address is loaded.
+ positions_recorder()->WriteRecordedPositions();
+
+ // This can likely be optimized to make use of bc() with 24bit relative
+ //
+ // RecordRelocInfo(x.rmode_, x.imm_);
+ // bc( BA, .... offset, LKset);
+ //
+
+ mov(ip, Operand(reinterpret_cast<intptr_t>(target), rmode));
+ mtctr(ip);
+ bctrl();
+
+ DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
+}
+
+
+int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
+ TypeFeedbackId ast_id, Condition cond) {
+ AllowDeferredHandleDereference using_raw_address;
+ return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
+}
+
+
+void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
+ TypeFeedbackId ast_id, Condition cond) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(RelocInfo::IsCodeTarget(rmode));
+
+#ifdef DEBUG
+ // Check the expected size before generating code to ensure we assume the same
+ // constant pool availability (e.g., whether constant pool is full or not).
+ int expected_size = CallSize(code, rmode, ast_id, cond);
+ Label start;
+ bind(&start);
+#endif
+
+ if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
+ SetRecordedAstId(ast_id);
+ rmode = RelocInfo::CODE_TARGET_WITH_ID;
+ }
+ AllowDeferredHandleDereference using_raw_address;
+ Call(reinterpret_cast<Address>(code.location()), rmode, cond);
+ DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
+}
+
+
+void MacroAssembler::Ret(Condition cond) {
+ DCHECK(cond == al);
+ blr();
+}
+
+
+void MacroAssembler::Drop(int count, Condition cond) {
+ DCHECK(cond == al);
+ if (count > 0) {
+ Add(sp, sp, count * kPointerSize, r0);
+ }
+}
+
+
+void MacroAssembler::Ret(int drop, Condition cond) {
+ Drop(drop, cond);
+ Ret(cond);
+}
+
+
+void MacroAssembler::Call(Label* target) { b(target, SetLK); }
+
+
+void MacroAssembler::Push(Handle<Object> handle) {
+ mov(r0, Operand(handle));
+ push(r0);
+}
+
+
+void MacroAssembler::Move(Register dst, Handle<Object> value) {
+ AllowDeferredHandleDereference smi_check;
+ if (value->IsSmi()) {
+ LoadSmiLiteral(dst, reinterpret_cast<Smi*>(*value));
+ } else {
+ DCHECK(value->IsHeapObject());
+ if (isolate()->heap()->InNewSpace(*value)) {
+ Handle<Cell> cell = isolate()->factory()->NewCell(value);
+ mov(dst, Operand(cell));
+ LoadP(dst, FieldMemOperand(dst, Cell::kValueOffset));
+ } else {
+ mov(dst, Operand(value));
+ }
+ }
+}
+
+
+void MacroAssembler::Move(Register dst, Register src, Condition cond) {
+ DCHECK(cond == al);
+ if (!dst.is(src)) {
+ mr(dst, src);
+ }
+}
+
+
+void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
+ if (!dst.is(src)) {
+ fmr(dst, src);
+ }
+}
+
+
+void MacroAssembler::MultiPush(RegList regs) {
+ int16_t num_to_push = NumberOfBitsSet(regs);
+ int16_t stack_offset = num_to_push * kPointerSize;
+
+ subi(sp, sp, Operand(stack_offset));
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ if ((regs & (1 << i)) != 0) {
+ stack_offset -= kPointerSize;
+ StoreP(ToRegister(i), MemOperand(sp, stack_offset));
+ }
+ }
+}
+
+
+void MacroAssembler::MultiPop(RegList regs) {
+ int16_t stack_offset = 0;
+
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs & (1 << i)) != 0) {
+ LoadP(ToRegister(i), MemOperand(sp, stack_offset));
+ stack_offset += kPointerSize;
+ }
+ }
+ addi(sp, sp, Operand(stack_offset));
+}
+
+
+void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
+ Condition cond) {
+ DCHECK(cond == al);
+ LoadP(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
+}
+
+
+void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index,
+ Condition cond) {
+ DCHECK(cond == al);
+ StoreP(source, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
+}
+
+
+void MacroAssembler::InNewSpace(Register object, Register scratch,
+ Condition cond, Label* branch) {
+ // N.B. scratch may be same register as object
+ DCHECK(cond == eq || cond == ne);
+ mov(r0, Operand(ExternalReference::new_space_mask(isolate())));
+ and_(scratch, object, r0);
+ mov(r0, Operand(ExternalReference::new_space_start(isolate())));
+ cmp(scratch, r0);
+ b(cond, branch);
+}
+
+
+void MacroAssembler::RecordWriteField(
+ Register object, int offset, Register value, Register dst,
+ LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action, SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis.
+ Label done;
+
+ // Skip barrier if writing a smi.
+ if (smi_check == INLINE_SMI_CHECK) {
+ JumpIfSmi(value, &done);
+ }
+
+ // Although the object register is tagged, the offset is relative to the start
+ // of the object, so so offset must be a multiple of kPointerSize.
+ DCHECK(IsAligned(offset, kPointerSize));
+
+ Add(dst, object, offset - kHeapObjectTag, r0);
+ if (emit_debug_code()) {
+ Label ok;
+ andi(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
+ beq(&ok, cr0);
+ stop("Unaligned cell in write barrier");
+ bind(&ok);
+ }
+
+ RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
+ OMIT_SMI_CHECK, pointers_to_here_check_for_value);
+
+ bind(&done);
+
+ // Clobber clobbered input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
+ mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
+ }
+}
+
+
+// Will clobber 4 registers: object, map, dst, ip. The
+// register 'object' contains a heap object pointer.
+void MacroAssembler::RecordWriteForMap(Register object, Register map,
+ Register dst,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode fp_mode) {
+ if (emit_debug_code()) {
+ LoadP(dst, FieldMemOperand(map, HeapObject::kMapOffset));
+ Cmpi(dst, Operand(isolate()->factory()->meta_map()), r0);
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ }
+
+ if (!FLAG_incremental_marking) {
+ return;
+ }
+
+ if (emit_debug_code()) {
+ LoadP(ip, FieldMemOperand(object, HeapObject::kMapOffset));
+ cmp(ip, map);
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ }
+
+ Label done;
+
+ // A single check of the map's pages interesting flag suffices, since it is
+ // only set during incremental collection, and then it's also guaranteed that
+ // the from object's page's interesting flag is also set. This optimization
+ // relies on the fact that maps can never be in new space.
+ CheckPageFlag(map,
+ map, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
+
+ addi(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
+ if (emit_debug_code()) {
+ Label ok;
+ andi(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
+ beq(&ok, cr0);
+ stop("Unaligned cell in write barrier");
+ bind(&ok);
+ }
+
+ // Record the actual write.
+ if (lr_status == kLRHasNotBeenSaved) {
+ mflr(r0);
+ push(r0);
+ }
+ RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
+ fp_mode);
+ CallStub(&stub);
+ if (lr_status == kLRHasNotBeenSaved) {
+ pop(r0);
+ mtlr(r0);
+ }
+
+ bind(&done);
+
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
+
+ // Clobber clobbered registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 12)));
+ mov(map, Operand(bit_cast<intptr_t>(kZapValue + 16)));
+ }
+}
+
+
+// Will clobber 4 registers: object, address, scratch, ip. The
+// register 'object' contains a heap object pointer. The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(
+ Register object, Register address, Register value,
+ LinkRegisterStatus lr_status, SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action, SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
+ DCHECK(!object.is(value));
+ if (emit_debug_code()) {
+ LoadP(r0, MemOperand(address));
+ cmp(r0, value);
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ }
+
+ if (remembered_set_action == OMIT_REMEMBERED_SET &&
+ !FLAG_incremental_marking) {
+ return;
+ }
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
+ Label done;
+
+ if (smi_check == INLINE_SMI_CHECK) {
+ JumpIfSmi(value, &done);
+ }
+
+ if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
+ }
+ CheckPageFlag(object,
+ value, // Used as scratch.
+ MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
+
+ // Record the actual write.
+ if (lr_status == kLRHasNotBeenSaved) {
+ mflr(r0);
+ push(r0);
+ }
+ RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
+ fp_mode);
+ CallStub(&stub);
+ if (lr_status == kLRHasNotBeenSaved) {
+ pop(r0);
+ mtlr(r0);
+ }
+
+ bind(&done);
+
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
+ value);
+
+ // Clobber clobbered registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
+ mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
+ }
+}
+
+
+void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
+ Register address, Register scratch,
+ SaveFPRegsMode fp_mode,
+ RememberedSetFinalAction and_then) {
+ Label done;
+ if (emit_debug_code()) {
+ Label ok;
+ JumpIfNotInNewSpace(object, scratch, &ok);
+ stop("Remembered set pointer is in new space");
+ bind(&ok);
+ }
+ // Load store buffer top.
+ ExternalReference store_buffer =
+ ExternalReference::store_buffer_top(isolate());
+ mov(ip, Operand(store_buffer));
+ LoadP(scratch, MemOperand(ip));
+ // Store pointer to buffer and increment buffer top.
+ StoreP(address, MemOperand(scratch));
+ addi(scratch, scratch, Operand(kPointerSize));
+ // Write back new top of buffer.
+ StoreP(scratch, MemOperand(ip));
+ // Call stub on end of buffer.
+ // Check for end of buffer.
+ mov(r0, Operand(StoreBuffer::kStoreBufferOverflowBit));
+ and_(r0, scratch, r0, SetRC);
+
+ if (and_then == kFallThroughAtEnd) {
+ beq(&done, cr0);
+ } else {
+ DCHECK(and_then == kReturnAtEnd);
+ beq(&done, cr0);
+ }
+ mflr(r0);
+ push(r0);
+ StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
+ CallStub(&store_buffer_overflow);
+ pop(r0);
+ mtlr(r0);
+ bind(&done);
+ if (and_then == kReturnAtEnd) {
+ Ret();
+ }
+}
+
+
+void MacroAssembler::PushFixedFrame(Register marker_reg) {
+ mflr(r0);
+#if V8_OOL_CONSTANT_POOL
+ if (marker_reg.is_valid()) {
+ Push(r0, fp, kConstantPoolRegister, cp, marker_reg);
+ } else {
+ Push(r0, fp, kConstantPoolRegister, cp);
+ }
+#else
+ if (marker_reg.is_valid()) {
+ Push(r0, fp, cp, marker_reg);
+ } else {
+ Push(r0, fp, cp);
+ }
+#endif
+}
+
+
+void MacroAssembler::PopFixedFrame(Register marker_reg) {
+#if V8_OOL_CONSTANT_POOL
+ if (marker_reg.is_valid()) {
+ Pop(r0, fp, kConstantPoolRegister, cp, marker_reg);
+ } else {
+ Pop(r0, fp, kConstantPoolRegister, cp);
+ }
+#else
+ if (marker_reg.is_valid()) {
+ Pop(r0, fp, cp, marker_reg);
+ } else {
+ Pop(r0, fp, cp);
+ }
+#endif
+ mtlr(r0);
+}
+
+
+// Push and pop all registers that can hold pointers.
+void MacroAssembler::PushSafepointRegisters() {
+ // Safepoints expect a block of kNumSafepointRegisters values on the
+ // stack, so adjust the stack for unsaved registers.
+ const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+ DCHECK(num_unsaved >= 0);
+ if (num_unsaved > 0) {
+ subi(sp, sp, Operand(num_unsaved * kPointerSize));
+ }
+ MultiPush(kSafepointSavedRegisters);
+}
+
+
+void MacroAssembler::PopSafepointRegisters() {
+ const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+ MultiPop(kSafepointSavedRegisters);
+ if (num_unsaved > 0) {
+ addi(sp, sp, Operand(num_unsaved * kPointerSize));
+ }
+}
+
+
+void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
+ StoreP(src, SafepointRegisterSlot(dst));
+}
+
+
+void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
+ LoadP(dst, SafepointRegisterSlot(src));
+}
+
+
+int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
+ // The registers are pushed starting with the highest encoding,
+ // which means that lowest encodings are closest to the stack pointer.
+ RegList regs = kSafepointSavedRegisters;
+ int index = 0;
+
+ DCHECK(reg_code >= 0 && reg_code < kNumRegisters);
+
+ for (int16_t i = 0; i < reg_code; i++) {
+ if ((regs & (1 << i)) != 0) {
+ index++;
+ }
+ }
+
+ return index;
+}
+
+
+MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
+ return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
+}
+
+
+MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
+ // General purpose registers are pushed last on the stack.
+ int doubles_size = DoubleRegister::NumAllocatableRegisters() * kDoubleSize;
+ int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
+ return MemOperand(sp, doubles_size + register_offset);
+}
+
+
+void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
+ const DoubleRegister src) {
+ Label done;
+
+ // Test for NaN
+ fcmpu(src, src);
+
+ if (dst.is(src)) {
+ bordered(&done);
+ } else {
+ Label is_nan;
+ bunordered(&is_nan);
+ fmr(dst, src);
+ b(&done);
+ bind(&is_nan);
+ }
+
+ // Replace with canonical NaN.
+ double nan_value = FixedDoubleArray::canonical_not_the_hole_nan_as_double();
+ LoadDoubleLiteral(dst, nan_value, r0);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::ConvertIntToDouble(Register src,
+ DoubleRegister double_dst) {
+ MovIntToDouble(double_dst, src, r0);
+ fcfid(double_dst, double_dst);
+}
+
+
+void MacroAssembler::ConvertUnsignedIntToDouble(Register src,
+ DoubleRegister double_dst) {
+ MovUnsignedIntToDouble(double_dst, src, r0);
+ fcfid(double_dst, double_dst);
+}
+
+
+void MacroAssembler::ConvertIntToFloat(const DoubleRegister dst,
+ const Register src,
+ const Register int_scratch) {
+ MovIntToDouble(dst, src, int_scratch);
+ fcfid(dst, dst);
+ frsp(dst, dst);
+}
+
+
+void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
+#if !V8_TARGET_ARCH_PPC64
+ const Register dst_hi,
+#endif
+ const Register dst,
+ const DoubleRegister double_dst,
+ FPRoundingMode rounding_mode) {
+ if (rounding_mode == kRoundToZero) {
+ fctidz(double_dst, double_input);
+ } else {
+ SetRoundingMode(rounding_mode);
+ fctid(double_dst, double_input);
+ ResetRoundingMode();
+ }
+
+ MovDoubleToInt64(
+#if !V8_TARGET_ARCH_PPC64
+ dst_hi,
+#endif
+ dst, double_dst);
+}
+
+
+#if V8_OOL_CONSTANT_POOL
+void MacroAssembler::LoadConstantPoolPointerRegister(
+ CodeObjectAccessMethod access_method, int ip_code_entry_delta) {
+ Register base;
+ int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize;
+ if (access_method == CAN_USE_IP) {
+ base = ip;
+ constant_pool_offset += ip_code_entry_delta;
+ } else {
+ DCHECK(access_method == CONSTRUCT_INTERNAL_REFERENCE);
+ base = kConstantPoolRegister;
+ ConstantPoolUnavailableScope constant_pool_unavailable(this);
+
+ // CheckBuffer() is called too frequently. This will pre-grow
+ // the buffer if needed to avoid spliting the relocation and instructions
+ EnsureSpaceFor(kMovInstructionsNoConstantPool * kInstrSize);
+
+ uintptr_t code_start = reinterpret_cast<uintptr_t>(pc_) - pc_offset();
+ mov(base, Operand(code_start, RelocInfo::INTERNAL_REFERENCE));
+ }
+ LoadP(kConstantPoolRegister, MemOperand(base, constant_pool_offset));
+}
+#endif
+
+
+void MacroAssembler::StubPrologue(int prologue_offset) {
+ LoadSmiLiteral(r11, Smi::FromInt(StackFrame::STUB));
+ PushFixedFrame(r11);
+ // Adjust FP to point to saved FP.
+ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+#if V8_OOL_CONSTANT_POOL
+ // ip contains prologue address
+ LoadConstantPoolPointerRegister(CAN_USE_IP, -prologue_offset);
+ set_ool_constant_pool_available(true);
+#endif
+}
+
+
+void MacroAssembler::Prologue(bool code_pre_aging, int prologue_offset) {
+ {
+ PredictableCodeSizeScope predictible_code_size_scope(
+ this, kNoCodeAgeSequenceLength);
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(this);
+ // The following instructions must remain together and unmodified
+ // for code aging to work properly.
+ if (code_pre_aging) {
+ // Pre-age the code.
+ // This matches the code found in PatchPlatformCodeAge()
+ Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
+ intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
+ // Don't use Call -- we need to preserve ip and lr
+ nop(); // marker to detect sequence (see IsOld)
+ mov(r3, Operand(target));
+ Jump(r3);
+ for (int i = 0; i < kCodeAgingSequenceNops; i++) {
+ nop();
+ }
+ } else {
+ // This matches the code found in GetNoCodeAgeSequence()
+ PushFixedFrame(r4);
+ // Adjust fp to point to saved fp.
+ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
+ nop();
+ }
+ }
+ }
+#if V8_OOL_CONSTANT_POOL
+ // ip contains prologue address
+ LoadConstantPoolPointerRegister(CAN_USE_IP, -prologue_offset);
+ set_ool_constant_pool_available(true);
+#endif
+}
+
+
+void MacroAssembler::EnterFrame(StackFrame::Type type,
+ bool load_constant_pool_pointer_reg) {
+ if (FLAG_enable_ool_constant_pool && load_constant_pool_pointer_reg) {
+ PushFixedFrame();
+#if V8_OOL_CONSTANT_POOL
+ // This path should not rely on ip containing code entry.
+ LoadConstantPoolPointerRegister(CONSTRUCT_INTERNAL_REFERENCE);
+#endif
+ LoadSmiLiteral(ip, Smi::FromInt(type));
+ push(ip);
+ } else {
+ LoadSmiLiteral(ip, Smi::FromInt(type));
+ PushFixedFrame(ip);
+ }
+ // Adjust FP to point to saved FP.
+ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+
+ mov(r0, Operand(CodeObject()));
+ push(r0);
+}
+
+
+int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
+#if V8_OOL_CONSTANT_POOL
+ ConstantPoolUnavailableScope constant_pool_unavailable(this);
+#endif
+ // r3: preserved
+ // r4: preserved
+ // r5: preserved
+
+ // Drop the execution stack down to the frame pointer and restore
+ // the caller frame pointer, return address and constant pool pointer.
+ int frame_ends;
+ LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ LoadP(ip, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+#if V8_OOL_CONSTANT_POOL
+ const int exitOffset = ExitFrameConstants::kConstantPoolOffset;
+ const int standardOffset = StandardFrameConstants::kConstantPoolOffset;
+ const int offset = ((type == StackFrame::EXIT) ? exitOffset : standardOffset);
+ LoadP(kConstantPoolRegister, MemOperand(fp, offset));
+#endif
+ mtlr(r0);
+ frame_ends = pc_offset();
+ Add(sp, fp, StandardFrameConstants::kCallerSPOffset + stack_adjustment, r0);
+ mr(fp, ip);
+ return frame_ends;
+}
+
+
+// ExitFrame layout (probably wrongish.. needs updating)
+//
+// SP -> previousSP
+// LK reserved
+// code
+// sp_on_exit (for debug?)
+// oldSP->prev SP
+// LK
+// <parameters on stack>
+
+// Prior to calling EnterExitFrame, we've got a bunch of parameters
+// on the stack that we need to wrap a real frame around.. so first
+// we reserve a slot for LK and push the previous SP which is captured
+// in the fp register (r31)
+// Then - we buy a new frame
+
+void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
+ // Set up the frame structure on the stack.
+ DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
+ DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
+ DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
+ DCHECK(stack_space > 0);
+
+ // This is an opportunity to build a frame to wrap
+ // all of the pushes that have happened inside of V8
+ // since we were called from C code
+
+ // replicate ARM frame - TODO make this more closely follow PPC ABI
+ mflr(r0);
+ Push(r0, fp);
+ mr(fp, sp);
+ // Reserve room for saved entry sp and code object.
+ subi(sp, sp, Operand(ExitFrameConstants::kFrameSize));
+
+ if (emit_debug_code()) {
+ li(r8, Operand::Zero());
+ StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
+ }
+#if V8_OOL_CONSTANT_POOL
+ StoreP(kConstantPoolRegister,
+ MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
+#endif
+ mov(r8, Operand(CodeObject()));
+ StoreP(r8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
+
+ // Save the frame pointer and the context in top.
+ mov(r8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ StoreP(fp, MemOperand(r8));
+ mov(r8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ StoreP(cp, MemOperand(r8));
+
+ // Optionally save all volatile double registers.
+ if (save_doubles) {
+ SaveFPRegs(sp, 0, DoubleRegister::kNumVolatileRegisters);
+ // Note that d0 will be accessible at
+ // fp - ExitFrameConstants::kFrameSize -
+ // kNumVolatileRegisters * kDoubleSize,
+ // since the sp slot and code slot were pushed after the fp.
+ }
+
+ addi(sp, sp, Operand(-stack_space * kPointerSize));
+
+ // Allocate and align the frame preparing for calling the runtime
+ // function.
+ const int frame_alignment = ActivationFrameAlignment();
+ if (frame_alignment > kPointerSize) {
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
+ }
+ li(r0, Operand::Zero());
+ StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
+
+ // Set the exit frame sp value to point just before the return address
+ // location.
+ addi(r8, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
+ StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
+}
+
+
+void MacroAssembler::InitializeNewString(Register string, Register length,
+ Heap::RootListIndex map_index,
+ Register scratch1, Register scratch2) {
+ SmiTag(scratch1, length);
+ LoadRoot(scratch2, map_index);
+ StoreP(scratch1, FieldMemOperand(string, String::kLengthOffset), r0);
+ li(scratch1, Operand(String::kEmptyHashField));
+ StoreP(scratch2, FieldMemOperand(string, HeapObject::kMapOffset), r0);
+ StoreP(scratch1, FieldMemOperand(string, String::kHashFieldSlot), r0);
+}
+
+
+int MacroAssembler::ActivationFrameAlignment() {
+#if !defined(USE_SIMULATOR)
+ // Running on the real platform. Use the alignment as mandated by the local
+ // environment.
+ // Note: This will break if we ever start generating snapshots on one PPC
+ // platform for another PPC platform with a different alignment.
+ return base::OS::ActivationFrameAlignment();
+#else // Simulated
+ // If we are using the simulator then we should always align to the expected
+ // alignment. As the simulator is used to generate snapshots we do not know
+ // if the target platform will need alignment, so this is controlled from a
+ // flag.
+ return FLAG_sim_stack_alignment;
+#endif
+}
+
+
+void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
+ bool restore_context) {
+#if V8_OOL_CONSTANT_POOL
+ ConstantPoolUnavailableScope constant_pool_unavailable(this);
+#endif
+ // Optionally restore all double registers.
+ if (save_doubles) {
+ // Calculate the stack location of the saved doubles and restore them.
+ const int kNumRegs = DoubleRegister::kNumVolatileRegisters;
+ const int offset =
+ (ExitFrameConstants::kFrameSize + kNumRegs * kDoubleSize);
+ addi(r6, fp, Operand(-offset));
+ RestoreFPRegs(r6, 0, kNumRegs);
+ }
+
+ // Clear top frame.
+ li(r6, Operand::Zero());
+ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ StoreP(r6, MemOperand(ip));
+
+ // Restore current context from top and clear it in debug mode.
+ if (restore_context) {
+ mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ LoadP(cp, MemOperand(ip));
+ }
+#ifdef DEBUG
+ mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ StoreP(r6, MemOperand(ip));
+#endif
+
+ // Tear down the exit frame, pop the arguments, and return.
+ LeaveFrame(StackFrame::EXIT);
+
+ if (argument_count.is_valid()) {
+ ShiftLeftImm(argument_count, argument_count, Operand(kPointerSizeLog2));
+ add(sp, sp, argument_count);
+ }
+}
+
+
+void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
+ Move(dst, d1);
+}
+
+
+void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
+ Move(dst, d1);
+}
+
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ Register code_reg, Label* done,
+ bool* definitely_mismatches,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ bool definitely_matches = false;
+ *definitely_mismatches = false;
+ Label regular_invoke;
+
+ // Check whether the expected and actual arguments count match. If not,
+ // setup registers according to contract with ArgumentsAdaptorTrampoline:
+ // r3: actual arguments count
+ // r4: function (passed through to callee)
+ // r5: expected arguments count
+
+ // The code below is made a lot easier because the calling code already sets
+ // up actual and expected registers according to the contract if values are
+ // passed in registers.
+
+ // ARM has some sanity checks as per below, considering add them for PPC
+ // DCHECK(actual.is_immediate() || actual.reg().is(r3));
+ // DCHECK(expected.is_immediate() || expected.reg().is(r5));
+ // DCHECK((!code_constant.is_null() && code_reg.is(no_reg))
+ // || code_reg.is(r6));
+
+ if (expected.is_immediate()) {
+ DCHECK(actual.is_immediate());
+ if (expected.immediate() == actual.immediate()) {
+ definitely_matches = true;
+ } else {
+ mov(r3, Operand(actual.immediate()));
+ const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+ if (expected.immediate() == sentinel) {
+ // Don't worry about adapting arguments for builtins that
+ // don't want that done. Skip adaption code by making it look
+ // like we have a match between expected and actual number of
+ // arguments.
+ definitely_matches = true;
+ } else {
+ *definitely_mismatches = true;
+ mov(r5, Operand(expected.immediate()));
+ }
+ }
+ } else {
+ if (actual.is_immediate()) {
+ cmpi(expected.reg(), Operand(actual.immediate()));
+ beq(&regular_invoke);
+ mov(r3, Operand(actual.immediate()));
+ } else {
+ cmp(expected.reg(), actual.reg());
+ beq(&regular_invoke);
+ }
+ }
+
+ if (!definitely_matches) {
+ if (!code_constant.is_null()) {
+ mov(r6, Operand(code_constant));
+ addi(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
+ }
+
+ Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(adaptor));
+ Call(adaptor);
+ call_wrapper.AfterCall();
+ if (!*definitely_mismatches) {
+ b(done);
+ }
+ } else {
+ Jump(adaptor, RelocInfo::CODE_TARGET);
+ }
+ bind(&regular_invoke);
+ }
+}
+
+
+void MacroAssembler::InvokeCode(Register code, const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
+
+ Label done;
+ bool definitely_mismatches = false;
+ InvokePrologue(expected, actual, Handle<Code>::null(), code, &done,
+ &definitely_mismatches, flag, call_wrapper);
+ if (!definitely_mismatches) {
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code));
+ CallJSEntry(code);
+ call_wrapper.AfterCall();
+ } else {
+ DCHECK(flag == JUMP_FUNCTION);
+ JumpToJSEntry(code);
+ }
+
+ // Continue here if InvokePrologue does handle the invocation due to
+ // mismatched parameter counts.
+ bind(&done);
+ }
+}
+
+
+void MacroAssembler::InvokeFunction(Register fun, const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
+
+ // Contract with called JS functions requires that function is passed in r4.
+ DCHECK(fun.is(r4));
+
+ Register expected_reg = r5;
+ Register code_reg = ip;
+
+ LoadP(code_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+ LoadWordArith(expected_reg,
+ FieldMemOperand(
+ code_reg, SharedFunctionInfo::kFormalParameterCountOffset));
+#if !defined(V8_TARGET_ARCH_PPC64)
+ SmiUntag(expected_reg);
+#endif
+ LoadP(code_reg, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
+
+ ParameterCount expected(expected_reg);
+ InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Register function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
+
+ // Contract with called JS functions requires that function is passed in r4.
+ DCHECK(function.is(r4));
+
+ // Get the function and setup the context.
+ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
+ InvokeCode(ip, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ Move(r4, function);
+ InvokeFunction(r4, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::IsObjectJSObjectType(Register heap_object, Register map,
+ Register scratch, Label* fail) {
+ LoadP(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
+ IsInstanceJSObjectType(map, scratch, fail);
+}
+
+
+void MacroAssembler::IsInstanceJSObjectType(Register map, Register scratch,
+ Label* fail) {
+ lbz(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ cmpi(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ blt(fail);
+ cmpi(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ bgt(fail);
+}
+
+
+void MacroAssembler::IsObjectJSStringType(Register object, Register scratch,
+ Label* fail) {
+ DCHECK(kNotStringTag != 0);
+
+ LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ andi(r0, scratch, Operand(kIsNotStringMask));
+ bne(fail, cr0);
+}
+
+
+void MacroAssembler::IsObjectNameType(Register object, Register scratch,
+ Label* fail) {
+ LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ cmpi(scratch, Operand(LAST_NAME_TYPE));
+ bgt(fail);
+}
+
+
+void MacroAssembler::DebugBreak() {
+ li(r3, Operand::Zero());
+ mov(r4, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
+ CEntryStub ces(isolate(), 1);
+ DCHECK(AllowThisStubCall(&ces));
+ Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+}
+
+
+void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
+ int handler_index) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // For the JSEntry handler, we must preserve r1-r7, r0,r8-r15 are available.
+ // We want the stack to look like
+ // sp -> NextOffset
+ // CodeObject
+ // state
+ // context
+ // frame pointer
+
+ // Link the current handler as the next handler.
+ mov(r8, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ LoadP(r0, MemOperand(r8));
+ StorePU(r0, MemOperand(sp, -StackHandlerConstants::kSize));
+ // Set this new handler as the current one.
+ StoreP(sp, MemOperand(r8));
+
+ if (kind == StackHandler::JS_ENTRY) {
+ li(r8, Operand::Zero()); // NULL frame pointer.
+ StoreP(r8, MemOperand(sp, StackHandlerConstants::kFPOffset));
+ LoadSmiLiteral(r8, Smi::FromInt(0)); // Indicates no context.
+ StoreP(r8, MemOperand(sp, StackHandlerConstants::kContextOffset));
+ } else {
+ // still not sure if fp is right
+ StoreP(fp, MemOperand(sp, StackHandlerConstants::kFPOffset));
+ StoreP(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
+ }
+ unsigned state = StackHandler::IndexField::encode(handler_index) |
+ StackHandler::KindField::encode(kind);
+ LoadIntLiteral(r8, state);
+ StoreP(r8, MemOperand(sp, StackHandlerConstants::kStateOffset));
+ mov(r8, Operand(CodeObject()));
+ StoreP(r8, MemOperand(sp, StackHandlerConstants::kCodeOffset));
+}
+
+
+void MacroAssembler::PopTryHandler() {
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ pop(r4);
+ mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ addi(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
+ StoreP(r4, MemOperand(ip));
+}
+
+
+// PPC - make use of ip as a temporary register
+void MacroAssembler::JumpToHandlerEntry() {
+// Compute the handler entry address and jump to it. The handler table is
+// a fixed array of (smi-tagged) code offsets.
+// r3 = exception, r4 = code object, r5 = state.
+#if V8_OOL_CONSTANT_POOL
+ ConstantPoolUnavailableScope constant_pool_unavailable(this);
+ LoadP(kConstantPoolRegister, FieldMemOperand(r4, Code::kConstantPoolOffset));
+#endif
+ LoadP(r6, FieldMemOperand(r4, Code::kHandlerTableOffset)); // Handler table.
+ addi(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ srwi(r5, r5, Operand(StackHandler::kKindWidth)); // Handler index.
+ slwi(ip, r5, Operand(kPointerSizeLog2));
+ add(ip, r6, ip);
+ LoadP(r5, MemOperand(ip)); // Smi-tagged offset.
+ addi(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
+ SmiUntag(ip, r5);
+ add(r0, r4, ip);
+ mtctr(r0);
+ bctr();
+}
+
+
+void MacroAssembler::Throw(Register value) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+ Label skip;
+
+ // The exception is expected in r3.
+ if (!value.is(r3)) {
+ mr(r3, value);
+ }
+ // Drop the stack pointer to the top of the top handler.
+ mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ LoadP(sp, MemOperand(r6));
+ // Restore the next handler.
+ pop(r5);
+ StoreP(r5, MemOperand(r6));
+
+ // Get the code object (r4) and state (r5). Restore the context and frame
+ // pointer.
+ pop(r4);
+ pop(r5);
+ pop(cp);
+ pop(fp);
+
+ // If the handler is a JS frame, restore the context to the frame.
+ // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
+ // or cp.
+ cmpi(cp, Operand::Zero());
+ beq(&skip);
+ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ bind(&skip);
+
+ JumpToHandlerEntry();
+}
+
+
+void MacroAssembler::ThrowUncatchable(Register value) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // The exception is expected in r3.
+ if (!value.is(r3)) {
+ mr(r3, value);
+ }
+ // Drop the stack pointer to the top of the top stack handler.
+ mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ LoadP(sp, MemOperand(r6));
+
+ // Unwind the handlers until the ENTRY handler is found.
+ Label fetch_next, check_kind;
+ b(&check_kind);
+ bind(&fetch_next);
+ LoadP(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
+
+ bind(&check_kind);
+ STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
+ LoadP(r5, MemOperand(sp, StackHandlerConstants::kStateOffset));
+ andi(r0, r5, Operand(StackHandler::KindField::kMask));
+ bne(&fetch_next, cr0);
+
+ // Set the top handler address to next handler past the top ENTRY handler.
+ pop(r5);
+ StoreP(r5, MemOperand(r6));
+ // Get the code object (r4) and state (r5). Clear the context and frame
+ // pointer (0 was saved in the handler).
+ pop(r4);
+ pop(r5);
+ pop(cp);
+ pop(fp);
+
+ JumpToHandlerEntry();
+}
+
+
+void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
+ Register scratch, Label* miss) {
+ Label same_contexts;
+
+ DCHECK(!holder_reg.is(scratch));
+ DCHECK(!holder_reg.is(ip));
+ DCHECK(!scratch.is(ip));
+
+ // Load current lexical context from the stack frame.
+ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
+// In debug mode, make sure the lexical context is set.
+#ifdef DEBUG
+ cmpi(scratch, Operand::Zero());
+ Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
+#endif
+
+ // Load the native context of the current context.
+ int offset =
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
+ LoadP(scratch, FieldMemOperand(scratch, offset));
+ LoadP(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+
+ // Check the context is a native context.
+ if (emit_debug_code()) {
+ // Cannot use ip as a temporary in this verification code. Due to the fact
+ // that ip is clobbered as part of cmp with an object Operand.
+ push(holder_reg); // Temporarily save holder on the stack.
+ // Read the first word and compare to the native_context_map.
+ LoadP(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
+ LoadRoot(ip, Heap::kNativeContextMapRootIndex);
+ cmp(holder_reg, ip);
+ Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
+ pop(holder_reg); // Restore holder.
+ }
+
+ // Check if both contexts are the same.
+ LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
+ cmp(scratch, ip);
+ beq(&same_contexts);
+
+ // Check the context is a native context.
+ if (emit_debug_code()) {
+ // Cannot use ip as a temporary in this verification code. Due to the fact
+ // that ip is clobbered as part of cmp with an object Operand.
+ push(holder_reg); // Temporarily save holder on the stack.
+ mr(holder_reg, ip); // Move ip to its holding place.
+ LoadRoot(ip, Heap::kNullValueRootIndex);
+ cmp(holder_reg, ip);
+ Check(ne, kJSGlobalProxyContextShouldNotBeNull);
+
+ LoadP(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
+ LoadRoot(ip, Heap::kNativeContextMapRootIndex);
+ cmp(holder_reg, ip);
+ Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
+ // Restore ip is not needed. ip is reloaded below.
+ pop(holder_reg); // Restore holder.
+ // Restore ip to holder's context.
+ LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
+ }
+
+ // Check that the security token in the calling global object is
+ // compatible with the security token in the receiving global
+ // object.
+ int token_offset =
+ Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
+
+ LoadP(scratch, FieldMemOperand(scratch, token_offset));
+ LoadP(ip, FieldMemOperand(ip, token_offset));
+ cmp(scratch, ip);
+ bne(miss);
+
+ bind(&same_contexts);
+}
+
+
+// Compute the hash code from the untagged key. This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
+// code-stub-hydrogen.cc
+void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
+ // First of all we assign the hash seed to scratch.
+ LoadRoot(scratch, Heap::kHashSeedRootIndex);
+ SmiUntag(scratch);
+
+ // Xor original key with a seed.
+ xor_(t0, t0, scratch);
+
+ // Compute the hash code from the untagged key. This must be kept in sync
+ // with ComputeIntegerHash in utils.h.
+ //
+ // hash = ~hash + (hash << 15);
+ notx(scratch, t0);
+ slwi(t0, t0, Operand(15));
+ add(t0, scratch, t0);
+ // hash = hash ^ (hash >> 12);
+ srwi(scratch, t0, Operand(12));
+ xor_(t0, t0, scratch);
+ // hash = hash + (hash << 2);
+ slwi(scratch, t0, Operand(2));
+ add(t0, t0, scratch);
+ // hash = hash ^ (hash >> 4);
+ srwi(scratch, t0, Operand(4));
+ xor_(t0, t0, scratch);
+ // hash = hash * 2057;
+ mr(r0, t0);
+ slwi(scratch, t0, Operand(3));
+ add(t0, t0, scratch);
+ slwi(scratch, r0, Operand(11));
+ add(t0, t0, scratch);
+ // hash = hash ^ (hash >> 16);
+ srwi(scratch, t0, Operand(16));
+ xor_(t0, t0, scratch);
+}
+
+
+void MacroAssembler::LoadFromNumberDictionary(Label* miss, Register elements,
+ Register key, Register result,
+ Register t0, Register t1,
+ Register t2) {
+ // Register use:
+ //
+ // elements - holds the slow-case elements of the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the same as 'key' or 'result'.
+ // Unchanged on bailout so 'key' or 'result' can be used
+ // in further computation.
+ //
+ // Scratch registers:
+ //
+ // t0 - holds the untagged key on entry and holds the hash once computed.
+ //
+ // t1 - used to hold the capacity mask of the dictionary
+ //
+ // t2 - used for the index into the dictionary.
+ Label done;
+
+ GetNumberHash(t0, t1);
+
+ // Compute the capacity mask.
+ LoadP(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
+ SmiUntag(t1);
+ subi(t1, t1, Operand(1));
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ for (int i = 0; i < kNumberDictionaryProbes; i++) {
+ // Use t2 for index calculations and keep the hash intact in t0.
+ mr(t2, t0);
+ // Compute the masked index: (hash + i + i * i) & mask.
+ if (i > 0) {
+ addi(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
+ }
+ and_(t2, t2, t1);
+
+ // Scale the index by multiplying by the element size.
+ DCHECK(SeededNumberDictionary::kEntrySize == 3);
+ slwi(ip, t2, Operand(1));
+ add(t2, t2, ip); // t2 = t2 * 3
+
+ // Check if the key is identical to the name.
+ slwi(t2, t2, Operand(kPointerSizeLog2));
+ add(t2, elements, t2);
+ LoadP(ip,
+ FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
+ cmp(key, ip);
+ if (i != kNumberDictionaryProbes - 1) {
+ beq(&done);
+ } else {
+ bne(miss);
+ }
+ }
+
+ bind(&done);
+ // Check that the value is a field property.
+ // t2: elements + (index * kPointerSize)
+ const int kDetailsOffset =
+ SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ LoadP(t1, FieldMemOperand(t2, kDetailsOffset));
+ LoadSmiLiteral(ip, Smi::FromInt(PropertyDetails::TypeField::kMask));
+ DCHECK_EQ(FIELD, 0);
+ and_(r0, t1, ip, SetRC);
+ bne(miss, cr0);
+
+ // Get the value at the masked, scaled index and return.
+ const int kValueOffset =
+ SeededNumberDictionary::kElementsStartOffset + kPointerSize;
+ LoadP(result, FieldMemOperand(t2, kValueOffset));
+}
+
+
+void MacroAssembler::Allocate(int object_size, Register result,
+ Register scratch1, Register scratch2,
+ Label* gc_required, AllocationFlags flags) {
+ DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ if (!FLAG_inline_new) {
+ if (emit_debug_code()) {
+ // Trash the registers to simulate an allocation failure.
+ li(result, Operand(0x7091));
+ li(scratch1, Operand(0x7191));
+ li(scratch2, Operand(0x7291));
+ }
+ b(gc_required);
+ return;
+ }
+
+ DCHECK(!result.is(scratch1));
+ DCHECK(!result.is(scratch2));
+ DCHECK(!scratch1.is(scratch2));
+ DCHECK(!scratch1.is(ip));
+ DCHECK(!scratch2.is(ip));
+
+ // Make object size into bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ object_size *= kPointerSize;
+ }
+ DCHECK_EQ(0, static_cast<int>(object_size & kObjectAlignmentMask));
+
+ // Check relative positions of allocation top and limit addresses.
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
+ intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
+ DCHECK((limit - top) == kPointerSize);
+
+ // Set up allocation top address register.
+ Register topaddr = scratch1;
+ mov(topaddr, Operand(allocation_top));
+
+ // This code stores a temporary value in ip. This is OK, as the code below
+ // does not need ip for implicit literal generation.
+ if ((flags & RESULT_CONTAINS_TOP) == 0) {
+ // Load allocation top into result and allocation limit into ip.
+ LoadP(result, MemOperand(topaddr));
+ LoadP(ip, MemOperand(topaddr, kPointerSize));
+ } else {
+ if (emit_debug_code()) {
+ // Assert that result actually contains top on entry. ip is used
+ // immediately below so this use of ip does not cause difference with
+ // respect to register content between debug and release mode.
+ LoadP(ip, MemOperand(topaddr));
+ cmp(result, ip);
+ Check(eq, kUnexpectedAllocationTop);
+ }
+ // Load allocation limit into ip. Result already contains allocation top.
+ LoadP(ip, MemOperand(topaddr, limit - top), r0);
+ }
+
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+#if V8_TARGET_ARCH_PPC64
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+#else
+ STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ andi(scratch2, result, Operand(kDoubleAlignmentMask));
+ Label aligned;
+ beq(&aligned, cr0);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ cmpl(result, ip);
+ bge(gc_required);
+ }
+ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+ stw(scratch2, MemOperand(result));
+ addi(result, result, Operand(kDoubleSize / 2));
+ bind(&aligned);
+#endif
+ }
+
+ // Calculate new top and bail out if new space is exhausted. Use result
+ // to calculate the new top.
+ sub(r0, ip, result);
+ if (is_int16(object_size)) {
+ cmpi(r0, Operand(object_size));
+ blt(gc_required);
+ addi(scratch2, result, Operand(object_size));
+ } else {
+ Cmpi(r0, Operand(object_size), scratch2);
+ blt(gc_required);
+ add(scratch2, result, scratch2);
+ }
+ StoreP(scratch2, MemOperand(topaddr));
+
+ // Tag object if requested.
+ if ((flags & TAG_OBJECT) != 0) {
+ addi(result, result, Operand(kHeapObjectTag));
+ }
+}
+
+
+void MacroAssembler::Allocate(Register object_size, Register result,
+ Register scratch1, Register scratch2,
+ Label* gc_required, AllocationFlags flags) {
+ if (!FLAG_inline_new) {
+ if (emit_debug_code()) {
+ // Trash the registers to simulate an allocation failure.
+ li(result, Operand(0x7091));
+ li(scratch1, Operand(0x7191));
+ li(scratch2, Operand(0x7291));
+ }
+ b(gc_required);
+ return;
+ }
+
+ // Assert that the register arguments are different and that none of
+ // them are ip. ip is used explicitly in the code generated below.
+ DCHECK(!result.is(scratch1));
+ DCHECK(!result.is(scratch2));
+ DCHECK(!scratch1.is(scratch2));
+ DCHECK(!object_size.is(ip));
+ DCHECK(!result.is(ip));
+ DCHECK(!scratch1.is(ip));
+ DCHECK(!scratch2.is(ip));
+
+ // Check relative positions of allocation top and limit addresses.
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+ intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
+ DCHECK((limit - top) == kPointerSize);
+
+ // Set up allocation top address.
+ Register topaddr = scratch1;
+ mov(topaddr, Operand(allocation_top));
+
+ // This code stores a temporary value in ip. This is OK, as the code below
+ // does not need ip for implicit literal generation.
+ if ((flags & RESULT_CONTAINS_TOP) == 0) {
+ // Load allocation top into result and allocation limit into ip.
+ LoadP(result, MemOperand(topaddr));
+ LoadP(ip, MemOperand(topaddr, kPointerSize));
+ } else {
+ if (emit_debug_code()) {
+ // Assert that result actually contains top on entry. ip is used
+ // immediately below so this use of ip does not cause difference with
+ // respect to register content between debug and release mode.
+ LoadP(ip, MemOperand(topaddr));
+ cmp(result, ip);
+ Check(eq, kUnexpectedAllocationTop);
+ }
+ // Load allocation limit into ip. Result already contains allocation top.
+ LoadP(ip, MemOperand(topaddr, limit - top));
+ }
+
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+#if V8_TARGET_ARCH_PPC64
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+#else
+ STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ andi(scratch2, result, Operand(kDoubleAlignmentMask));
+ Label aligned;
+ beq(&aligned, cr0);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ cmpl(result, ip);
+ bge(gc_required);
+ }
+ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+ stw(scratch2, MemOperand(result));
+ addi(result, result, Operand(kDoubleSize / 2));
+ bind(&aligned);
+#endif
+ }
+
+ // Calculate new top and bail out if new space is exhausted. Use result
+ // to calculate the new top. Object size may be in words so a shift is
+ // required to get the number of bytes.
+ sub(r0, ip, result);
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ ShiftLeftImm(scratch2, object_size, Operand(kPointerSizeLog2));
+ cmp(r0, scratch2);
+ blt(gc_required);
+ add(scratch2, result, scratch2);
+ } else {
+ cmp(r0, object_size);
+ blt(gc_required);
+ add(scratch2, result, object_size);
+ }
+
+ // Update allocation top. result temporarily holds the new top.
+ if (emit_debug_code()) {
+ andi(r0, scratch2, Operand(kObjectAlignmentMask));
+ Check(eq, kUnalignedAllocationInNewSpace, cr0);
+ }
+ StoreP(scratch2, MemOperand(topaddr));
+
+ // Tag object if requested.
+ if ((flags & TAG_OBJECT) != 0) {
+ addi(result, result, Operand(kHeapObjectTag));
+ }
+}
+
+
+void MacroAssembler::UndoAllocationInNewSpace(Register object,
+ Register scratch) {
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+
+ // Make sure the object has no tag before resetting top.
+ mov(r0, Operand(~kHeapObjectTagMask));
+ and_(object, object, r0);
+// was.. and_(object, object, Operand(~kHeapObjectTagMask));
+#ifdef DEBUG
+ // Check that the object un-allocated is below the current top.
+ mov(scratch, Operand(new_space_allocation_top));
+ LoadP(scratch, MemOperand(scratch));
+ cmp(object, scratch);
+ Check(lt, kUndoAllocationOfNonAllocatedMemory);
+#endif
+ // Write the address of the object to un-allocate as the current top.
+ mov(scratch, Operand(new_space_allocation_top));
+ StoreP(object, MemOperand(scratch));
+}
+
+
+void MacroAssembler::AllocateTwoByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ slwi(scratch1, length, Operand(1)); // Length in bytes, not chars.
+ addi(scratch1, scratch1,
+ Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
+ mov(r0, Operand(~kObjectAlignmentMask));
+ and_(scratch1, scratch1, r0);
+
+ // Allocate two-byte string in new space.
+ Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ InitializeNewString(result, length, Heap::kStringMapRootIndex, scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateOneByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ DCHECK(kCharSize == 1);
+ addi(scratch1, length,
+ Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
+ li(r0, Operand(~kObjectAlignmentMask));
+ and_(scratch1, scratch1, r0);
+
+ // Allocate one-byte string in new space.
+ Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
+ scratch1, scratch2);
+}
+
+
+void MacroAssembler::AllocateTwoByteConsString(Register result, Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ InitializeNewString(result, length, Heap::kConsStringMapRootIndex, scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
+ scratch1, scratch2);
+}
+
+
+void MacroAssembler::AllocateTwoByteSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ InitializeNewString(result, length, Heap::kSlicedStringMapRootIndex, scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateOneByteSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
+ scratch1, scratch2);
+}
+
+
+void MacroAssembler::CompareObjectType(Register object, Register map,
+ Register type_reg, InstanceType type) {
+ const Register temp = type_reg.is(no_reg) ? r0 : type_reg;
+
+ LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(map, temp, type);
+}
+
+
+void MacroAssembler::CheckObjectTypeRange(Register object, Register map,
+ InstanceType min_type,
+ InstanceType max_type,
+ Label* false_label) {
+ STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
+ STATIC_ASSERT(LAST_TYPE < 256);
+ LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbz(ip, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ subi(ip, ip, Operand(min_type));
+ cmpli(ip, Operand(max_type - min_type));
+ bgt(false_label);
+}
+
+
+void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
+ InstanceType type) {
+ STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
+ STATIC_ASSERT(LAST_TYPE < 256);
+ lbz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ cmpi(type_reg, Operand(type));
+}
+
+
+void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
+ DCHECK(!obj.is(r0));
+ LoadRoot(r0, index);
+ cmp(obj, r0);
+}
+
+
+void MacroAssembler::CheckFastElements(Register map, Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ STATIC_ASSERT(Map::kMaximumBitField2FastHoleyElementValue < 0x8000);
+ cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
+ bgt(fail);
+}
+
+
+void MacroAssembler::CheckFastObjectElements(Register map, Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
+ ble(fail);
+ cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
+ bgt(fail);
+}
+
+
+void MacroAssembler::CheckFastSmiElements(Register map, Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
+ bgt(fail);
+}
+
+
+void MacroAssembler::StoreNumberToDoubleElements(
+ Register value_reg, Register key_reg, Register elements_reg,
+ Register scratch1, DoubleRegister double_scratch, Label* fail,
+ int elements_offset) {
+ Label smi_value, store;
+
+ // Handle smi values specially.
+ JumpIfSmi(value_reg, &smi_value);
+
+ // Ensure that the object is a heap number
+ CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), fail,
+ DONT_DO_SMI_CHECK);
+
+ lfd(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+ // Force a canonical NaN.
+ CanonicalizeNaN(double_scratch);
+ b(&store);
+
+ bind(&smi_value);
+ SmiToDouble(double_scratch, value_reg);
+
+ bind(&store);
+ SmiToDoubleArrayOffset(scratch1, key_reg);
+ add(scratch1, elements_reg, scratch1);
+ stfd(double_scratch, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize -
+ elements_offset));
+}
+
+
+void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
+ Register right,
+ Register overflow_dst,
+ Register scratch) {
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!dst.is(scratch));
+ DCHECK(!overflow_dst.is(scratch));
+ DCHECK(!overflow_dst.is(left));
+ DCHECK(!overflow_dst.is(right));
+
+ // C = A+B; C overflows if A/B have same sign and C has diff sign than A
+ if (dst.is(left)) {
+ mr(scratch, left); // Preserve left.
+ add(dst, left, right); // Left is overwritten.
+ xor_(scratch, dst, scratch); // Original left.
+ xor_(overflow_dst, dst, right);
+ } else if (dst.is(right)) {
+ mr(scratch, right); // Preserve right.
+ add(dst, left, right); // Right is overwritten.
+ xor_(scratch, dst, scratch); // Original right.
+ xor_(overflow_dst, dst, left);
+ } else {
+ add(dst, left, right);
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, dst, right);
+ }
+ and_(overflow_dst, scratch, overflow_dst, SetRC);
+}
+
+
+void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
+ intptr_t right,
+ Register overflow_dst,
+ Register scratch) {
+ Register original_left = left;
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!dst.is(scratch));
+ DCHECK(!overflow_dst.is(scratch));
+ DCHECK(!overflow_dst.is(left));
+
+ // C = A+B; C overflows if A/B have same sign and C has diff sign than A
+ if (dst.is(left)) {
+ // Preserve left.
+ original_left = overflow_dst;
+ mr(original_left, left);
+ }
+ Add(dst, left, right, scratch);
+ xor_(overflow_dst, dst, original_left);
+ if (right >= 0) {
+ and_(overflow_dst, overflow_dst, dst, SetRC);
+ } else {
+ andc(overflow_dst, overflow_dst, dst, SetRC);
+ }
+}
+
+
+void MacroAssembler::SubAndCheckForOverflow(Register dst, Register left,
+ Register right,
+ Register overflow_dst,
+ Register scratch) {
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!dst.is(scratch));
+ DCHECK(!overflow_dst.is(scratch));
+ DCHECK(!overflow_dst.is(left));
+ DCHECK(!overflow_dst.is(right));
+
+ // C = A-B; C overflows if A/B have diff signs and C has diff sign than A
+ if (dst.is(left)) {
+ mr(scratch, left); // Preserve left.
+ sub(dst, left, right); // Left is overwritten.
+ xor_(overflow_dst, dst, scratch);
+ xor_(scratch, scratch, right);
+ and_(overflow_dst, overflow_dst, scratch, SetRC);
+ } else if (dst.is(right)) {
+ mr(scratch, right); // Preserve right.
+ sub(dst, left, right); // Right is overwritten.
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, left, scratch);
+ and_(overflow_dst, overflow_dst, scratch, SetRC);
+ } else {
+ sub(dst, left, right);
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, left, right);
+ and_(overflow_dst, scratch, overflow_dst, SetRC);
+ }
+}
+
+
+void MacroAssembler::CompareMap(Register obj, Register scratch, Handle<Map> map,
+ Label* early_success) {
+ LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ CompareMap(scratch, map, early_success);
+}
+
+
+void MacroAssembler::CompareMap(Register obj_map, Handle<Map> map,
+ Label* early_success) {
+ mov(r0, Operand(map));
+ cmp(obj_map, r0);
+}
+
+
+void MacroAssembler::CheckMap(Register obj, Register scratch, Handle<Map> map,
+ Label* fail, SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, fail);
+ }
+
+ Label success;
+ CompareMap(obj, scratch, map, &success);
+ bne(fail);
+ bind(&success);
+}
+
+
+void MacroAssembler::CheckMap(Register obj, Register scratch,
+ Heap::RootListIndex index, Label* fail,
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, fail);
+ }
+ LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ LoadRoot(r0, index);
+ cmp(scratch, r0);
+ bne(fail);
+}
+
+
+void MacroAssembler::DispatchMap(Register obj, Register scratch,
+ Handle<Map> map, Handle<Code> success,
+ SmiCheckType smi_check_type) {
+ Label fail;
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, &fail);
+ }
+ LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ mov(r0, Operand(map));
+ cmp(scratch, r0);
+ bne(&fail);
+ Jump(success, RelocInfo::CODE_TARGET, al);
+ bind(&fail);
+}
+
+
+void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
+ Register scratch, Label* miss,
+ bool miss_on_bound_function) {
+ Label non_instance;
+ if (miss_on_bound_function) {
+ // Check that the receiver isn't a smi.
+ JumpIfSmi(function, miss);
+
+ // Check that the function really is a function. Load map into result reg.
+ CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
+ bne(miss);
+
+ LoadP(scratch,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ lwz(scratch,
+ FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+ TestBit(scratch,
+#if V8_TARGET_ARCH_PPC64
+ SharedFunctionInfo::kBoundFunction,
+#else
+ SharedFunctionInfo::kBoundFunction + kSmiTagSize,
+#endif
+ r0);
+ bne(miss, cr0);
+
+ // Make sure that the function has an instance prototype.
+ lbz(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+ andi(r0, scratch, Operand(1 << Map::kHasNonInstancePrototype));
+ bne(&non_instance, cr0);
+ }
+
+ // Get the prototype or initial map from the function.
+ LoadP(result,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // If the prototype or initial map is the hole, don't return it and
+ // simply miss the cache instead. This will allow us to allocate a
+ // prototype object on-demand in the runtime system.
+ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
+ cmp(result, r0);
+ beq(miss);
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ CompareObjectType(result, scratch, scratch, MAP_TYPE);
+ bne(&done);
+
+ // Get the prototype from the initial map.
+ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
+
+ if (miss_on_bound_function) {
+ b(&done);
+
+ // Non-instance prototype: Fetch prototype from constructor field
+ // in initial map.
+ bind(&non_instance);
+ LoadP(result, FieldMemOperand(result, Map::kConstructorOffset));
+ }
+
+ // All done.
+ bind(&done);
+}
+
+
+void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id,
+ Condition cond) {
+ DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
+}
+
+
+void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
+ Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
+}
+
+
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ return ref0.address() - ref1.address();
+}
+
+
+void MacroAssembler::CallApiFunctionAndReturn(
+ Register function_address, ExternalReference thunk_ref, int stack_space,
+ MemOperand return_value_operand, MemOperand* context_restore_operand) {
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate());
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(isolate()), next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(isolate()), next_address);
+
+ DCHECK(function_address.is(r4) || function_address.is(r5));
+ Register scratch = r6;
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ mov(scratch, Operand(ExternalReference::is_profiling_address(isolate())));
+ lbz(scratch, MemOperand(scratch, 0));
+ cmpi(scratch, Operand::Zero());
+ beq(&profiler_disabled);
+
+ // Additional parameter is the address of the actual callback.
+ mov(scratch, Operand(thunk_ref));
+ jmp(&end_profiler_check);
+
+ bind(&profiler_disabled);
+ mr(scratch, function_address);
+ bind(&end_profiler_check);
+
+ // Allocate HandleScope in callee-save registers.
+ // r17 - next_address
+ // r14 - next_address->kNextOffset
+ // r15 - next_address->kLimitOffset
+ // r16 - next_address->kLevelOffset
+ mov(r17, Operand(next_address));
+ LoadP(r14, MemOperand(r17, kNextOffset));
+ LoadP(r15, MemOperand(r17, kLimitOffset));
+ lwz(r16, MemOperand(r17, kLevelOffset));
+ addi(r16, r16, Operand(1));
+ stw(r16, MemOperand(r17, kLevelOffset));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(1, r3);
+ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
+ CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
+
+ // Native call returns to the DirectCEntry stub which redirects to the
+ // return address pushed on stack (could have moved after GC).
+ // DirectCEntry stub itself is generated early and never moves.
+ DirectCEntryStub stub(isolate());
+ stub.GenerateCall(this, scratch);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(1, r3);
+ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
+ CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
+
+ Label promote_scheduled_exception;
+ Label exception_handled;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+ Label return_value_loaded;
+
+ // load value from ReturnValue
+ LoadP(r3, return_value_operand);
+ bind(&return_value_loaded);
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ StoreP(r14, MemOperand(r17, kNextOffset));
+ if (emit_debug_code()) {
+ lwz(r4, MemOperand(r17, kLevelOffset));
+ cmp(r4, r16);
+ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
+ }
+ subi(r16, r16, Operand(1));
+ stw(r16, MemOperand(r17, kLevelOffset));
+ LoadP(r0, MemOperand(r17, kLimitOffset));
+ cmp(r15, r0);
+ bne(&delete_allocated_handles);
+
+ // Check if the function scheduled an exception.
+ bind(&leave_exit_frame);
+ LoadRoot(r14, Heap::kTheHoleValueRootIndex);
+ mov(r15, Operand(ExternalReference::scheduled_exception_address(isolate())));
+ LoadP(r15, MemOperand(r15));
+ cmp(r14, r15);
+ bne(&promote_scheduled_exception);
+ bind(&exception_handled);
+
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ LoadP(cp, *context_restore_operand);
+ }
+ // LeaveExitFrame expects unwind space to be in a register.
+ mov(r14, Operand(stack_space));
+ LeaveExitFrame(false, r14, !restore_context);
+ blr();
+
+ bind(&promote_scheduled_exception);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0);
+ }
+ jmp(&exception_handled);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ bind(&delete_allocated_handles);
+ StoreP(r15, MemOperand(r17, kLimitOffset));
+ mr(r14, r3);
+ PrepareCallCFunction(1, r15);
+ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
+ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
+ 1);
+ mr(r3, r14);
+ b(&leave_exit_frame);
+}
+
+
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+ return has_frame_ || !stub->SometimesSetsUpAFrame();
+}
+
+
+void MacroAssembler::IndexFromHash(Register hash, Register index) {
+ // If the hash field contains an array index pick it out. The assert checks
+ // that the constants for the maximum number of digits for an array index
+ // cached in the hash field and the number of bits reserved for it does not
+ // conflict.
+ DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
+}
+
+
+void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) {
+ SmiUntag(ip, smi);
+ ConvertIntToDouble(ip, value);
+}
+
+
+void MacroAssembler::TestDoubleIsInt32(DoubleRegister double_input,
+ Register scratch1, Register scratch2,
+ DoubleRegister double_scratch) {
+ TryDoubleToInt32Exact(scratch1, double_input, scratch2, double_scratch);
+}
+
+
+void MacroAssembler::TryDoubleToInt32Exact(Register result,
+ DoubleRegister double_input,
+ Register scratch,
+ DoubleRegister double_scratch) {
+ Label done;
+ DCHECK(!double_input.is(double_scratch));
+
+ ConvertDoubleToInt64(double_input,
+#if !V8_TARGET_ARCH_PPC64
+ scratch,
+#endif
+ result, double_scratch);
+
+#if V8_TARGET_ARCH_PPC64
+ TestIfInt32(result, scratch, r0);
+#else
+ TestIfInt32(scratch, result, r0);
+#endif
+ bne(&done);
+
+ // convert back and compare
+ fcfid(double_scratch, double_scratch);
+ fcmpu(double_scratch, double_input);
+ bind(&done);
+}
+
+
+void MacroAssembler::TryInt32Floor(Register result, DoubleRegister double_input,
+ Register input_high, Register scratch,
+ DoubleRegister double_scratch, Label* done,
+ Label* exact) {
+ DCHECK(!result.is(input_high));
+ DCHECK(!double_input.is(double_scratch));
+ Label exception;
+
+ MovDoubleHighToInt(input_high, double_input);
+
+ // Test for NaN/Inf
+ ExtractBitMask(result, input_high, HeapNumber::kExponentMask);
+ cmpli(result, Operand(0x7ff));
+ beq(&exception);
+
+ // Convert (rounding to -Inf)
+ ConvertDoubleToInt64(double_input,
+#if !V8_TARGET_ARCH_PPC64
+ scratch,
+#endif
+ result, double_scratch, kRoundToMinusInf);
+
+// Test for overflow
+#if V8_TARGET_ARCH_PPC64
+ TestIfInt32(result, scratch, r0);
+#else
+ TestIfInt32(scratch, result, r0);
+#endif
+ bne(&exception);
+
+ // Test for exactness
+ fcfid(double_scratch, double_scratch);
+ fcmpu(double_scratch, double_input);
+ beq(exact);
+ b(done);
+
+ bind(&exception);
+}
+
+
+void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
+ DoubleRegister double_input,
+ Label* done) {
+ DoubleRegister double_scratch = kScratchDoubleReg;
+ Register scratch = ip;
+
+ ConvertDoubleToInt64(double_input,
+#if !V8_TARGET_ARCH_PPC64
+ scratch,
+#endif
+ result, double_scratch);
+
+// Test for overflow
+#if V8_TARGET_ARCH_PPC64
+ TestIfInt32(result, scratch, r0);
+#else
+ TestIfInt32(scratch, result, r0);
+#endif
+ beq(done);
+}
+
+
+void MacroAssembler::TruncateDoubleToI(Register result,
+ DoubleRegister double_input) {
+ Label done;
+
+ TryInlineTruncateDoubleToI(result, double_input, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ mflr(r0);
+ push(r0);
+ // Put input on stack.
+ stfdu(double_input, MemOperand(sp, -kDoubleSize));
+
+ DoubleToIStub stub(isolate(), sp, result, 0, true, true);
+ CallStub(&stub);
+
+ addi(sp, sp, Operand(kDoubleSize));
+ pop(r0);
+ mtlr(r0);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
+ Label done;
+ DoubleRegister double_scratch = kScratchDoubleReg;
+ DCHECK(!result.is(object));
+
+ lfd(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
+ TryInlineTruncateDoubleToI(result, double_scratch, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ mflr(r0);
+ push(r0);
+ DoubleToIStub stub(isolate(), object, result,
+ HeapNumber::kValueOffset - kHeapObjectTag, true, true);
+ CallStub(&stub);
+ pop(r0);
+ mtlr(r0);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::TruncateNumberToI(Register object, Register result,
+ Register heap_number_map,
+ Register scratch1, Label* not_number) {
+ Label done;
+ DCHECK(!result.is(object));
+
+ UntagAndJumpIfSmi(result, object, &done);
+ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
+ TruncateHeapNumberToI(result, object);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src,
+ int num_least_bits) {
+#if V8_TARGET_ARCH_PPC64
+ rldicl(dst, src, kBitsPerPointer - kSmiShift,
+ kBitsPerPointer - num_least_bits);
+#else
+ rlwinm(dst, src, kBitsPerPointer - kSmiShift,
+ kBitsPerPointer - num_least_bits, 31);
+#endif
+}
+
+
+void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src,
+ int num_least_bits) {
+ rlwinm(dst, src, 0, 32 - num_least_bits, 31);
+}
+
+
+void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
+ SaveFPRegsMode save_doubles) {
+ // All parameters are on the stack. r3 has the return value after call.
+
+ // If the expected number of arguments of the runtime function is
+ // constant, we check that the actual number of arguments match the
+ // expectation.
+ CHECK(f->nargs < 0 || f->nargs == num_arguments);
+
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ mov(r3, Operand(num_arguments));
+ mov(r4, Operand(ExternalReference(f, isolate())));
+ CEntryStub stub(isolate(),
+#if V8_TARGET_ARCH_PPC64
+ f->result_size,
+#else
+ 1,
+#endif
+ save_doubles);
+ CallStub(&stub);
+}
+
+
+void MacroAssembler::CallExternalReference(const ExternalReference& ext,
+ int num_arguments) {
+ mov(r3, Operand(num_arguments));
+ mov(r4, Operand(ext));
+
+ CEntryStub stub(isolate(), 1);
+ CallStub(&stub);
+}
+
+
+void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ int result_size) {
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ mov(r3, Operand(num_arguments));
+ JumpToExternalReference(ext);
+}
+
+
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, int num_arguments,
+ int result_size) {
+ TailCallExternalReference(ExternalReference(fid, isolate()), num_arguments,
+ result_size);
+}
+
+
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
+ mov(r4, Operand(builtin));
+ CEntryStub stub(isolate(), 1);
+ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a builtin without a valid frame.
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
+
+ GetBuiltinEntry(ip, id);
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(ip));
+ CallJSEntry(ip);
+ call_wrapper.AfterCall();
+ } else {
+ DCHECK(flag == JUMP_FUNCTION);
+ JumpToJSEntry(ip);
+ }
+}
+
+
+void MacroAssembler::GetBuiltinFunction(Register target,
+ Builtins::JavaScript id) {
+ // Load the builtins object into target register.
+ LoadP(target,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ LoadP(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
+ // Load the JavaScript builtin function from the builtins object.
+ LoadP(target,
+ FieldMemOperand(target, JSBuiltinsObject::OffsetOfFunctionWithId(id)),
+ r0);
+}
+
+
+void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+ DCHECK(!target.is(r4));
+ GetBuiltinFunction(r4, id);
+ // Load the code entry point from the builtins object.
+ LoadP(target, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
+}
+
+
+void MacroAssembler::SetCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ mov(scratch1, Operand(value));
+ mov(scratch2, Operand(ExternalReference(counter)));
+ stw(scratch1, MemOperand(scratch2));
+ }
+}
+
+
+void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ DCHECK(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ mov(scratch2, Operand(ExternalReference(counter)));
+ lwz(scratch1, MemOperand(scratch2));
+ addi(scratch1, scratch1, Operand(value));
+ stw(scratch1, MemOperand(scratch2));
+ }
+}
+
+
+void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ DCHECK(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ mov(scratch2, Operand(ExternalReference(counter)));
+ lwz(scratch1, MemOperand(scratch2));
+ subi(scratch1, scratch1, Operand(value));
+ stw(scratch1, MemOperand(scratch2));
+ }
+}
+
+
+void MacroAssembler::Assert(Condition cond, BailoutReason reason,
+ CRegister cr) {
+ if (emit_debug_code()) Check(cond, reason, cr);
+}
+
+
+void MacroAssembler::AssertFastElements(Register elements) {
+ if (emit_debug_code()) {
+ DCHECK(!elements.is(r0));
+ Label ok;
+ push(elements);
+ LoadP(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
+ LoadRoot(r0, Heap::kFixedArrayMapRootIndex);
+ cmp(elements, r0);
+ beq(&ok);
+ LoadRoot(r0, Heap::kFixedDoubleArrayMapRootIndex);
+ cmp(elements, r0);
+ beq(&ok);
+ LoadRoot(r0, Heap::kFixedCOWArrayMapRootIndex);
+ cmp(elements, r0);
+ beq(&ok);
+ Abort(kJSObjectWithFastElementsMapHasSlowElements);
+ bind(&ok);
+ pop(elements);
+ }
+}
+
+
+void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
+ Label L;
+ b(cond, &L, cr);
+ Abort(reason);
+ // will not return here
+ bind(&L);
+}
+
+
+void MacroAssembler::Abort(BailoutReason reason) {
+ Label abort_start;
+ bind(&abort_start);
+#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
+ if (msg != NULL) {
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
+
+ if (FLAG_trap_on_abort) {
+ stop(msg);
+ return;
+ }
+#endif
+
+ LoadSmiLiteral(r0, Smi::FromInt(reason));
+ push(r0);
+ // Disable stub call restrictions to always allow calls to abort.
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kAbort, 1);
+ } else {
+ CallRuntime(Runtime::kAbort, 1);
+ }
+ // will not return here
+}
+
+
+void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
+ if (context_chain_length > 0) {
+ // Move up the chain of contexts to the context containing the slot.
+ LoadP(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ for (int i = 1; i < context_chain_length; i++) {
+ LoadP(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ }
+ } else {
+ // Slot is in the current function context. Move it into the
+ // destination register in case we store into it (the write barrier
+ // cannot be allowed to destroy the context in esi).
+ mr(dst, cp);
+ }
+}
+
+
+void MacroAssembler::LoadTransitionedArrayMapConditional(
+ ElementsKind expected_kind, ElementsKind transitioned_kind,
+ Register map_in_out, Register scratch, Label* no_map_match) {
+ // Load the global or builtins object from the current context.
+ LoadP(scratch,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ LoadP(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+
+ // Check that the function's map is the same as the expected cached map.
+ LoadP(scratch,
+ MemOperand(scratch, Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
+ size_t offset = expected_kind * kPointerSize + FixedArrayBase::kHeaderSize;
+ LoadP(scratch, FieldMemOperand(scratch, offset));
+ cmp(map_in_out, scratch);
+ bne(no_map_match);
+
+ // Use the transitioned cached map.
+ offset = transitioned_kind * kPointerSize + FixedArrayBase::kHeaderSize;
+ LoadP(map_in_out, FieldMemOperand(scratch, offset));
+}
+
+
+void MacroAssembler::LoadGlobalFunction(int index, Register function) {
+ // Load the global or builtins object from the current context.
+ LoadP(function,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the native context from the global or builtins object.
+ LoadP(function,
+ FieldMemOperand(function, GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
+ LoadP(function, MemOperand(function, Context::SlotOffset(index)), r0);
+}
+
+
+void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
+ Register map,
+ Register scratch) {
+ // Load the initial map. The global functions all have initial maps.
+ LoadP(map,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ if (emit_debug_code()) {
+ Label ok, fail;
+ CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
+ b(&ok);
+ bind(&fail);
+ Abort(kGlobalFunctionsMustHaveInitialMap);
+ bind(&ok);
+ }
+}
+
+
+void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
+ Register reg, Register scratch, Label* not_power_of_two_or_zero) {
+ subi(scratch, reg, Operand(1));
+ cmpi(scratch, Operand::Zero());
+ blt(not_power_of_two_or_zero);
+ and_(r0, scratch, reg, SetRC);
+ bne(not_power_of_two_or_zero, cr0);
+}
+
+
+void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
+ Register scratch,
+ Label* zero_and_neg,
+ Label* not_power_of_two) {
+ subi(scratch, reg, Operand(1));
+ cmpi(scratch, Operand::Zero());
+ blt(zero_and_neg);
+ and_(r0, scratch, reg, SetRC);
+ bne(not_power_of_two, cr0);
+}
+
+#if !V8_TARGET_ARCH_PPC64
+void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
+ DCHECK(!reg.is(overflow));
+ mr(overflow, reg); // Save original value.
+ SmiTag(reg);
+ xor_(overflow, overflow, reg, SetRC); // Overflow if (value ^ 2 * value) < 0.
+}
+
+
+void MacroAssembler::SmiTagCheckOverflow(Register dst, Register src,
+ Register overflow) {
+ if (dst.is(src)) {
+ // Fall back to slower case.
+ SmiTagCheckOverflow(dst, overflow);
+ } else {
+ DCHECK(!dst.is(src));
+ DCHECK(!dst.is(overflow));
+ DCHECK(!src.is(overflow));
+ SmiTag(dst, src);
+ xor_(overflow, dst, src, SetRC); // Overflow if (value ^ 2 * value) < 0.
+ }
+}
+#endif
+
+void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2,
+ Label* on_not_both_smi) {
+ STATIC_ASSERT(kSmiTag == 0);
+ DCHECK_EQ(1, static_cast<int>(kSmiTagMask));
+ orx(r0, reg1, reg2, LeaveRC);
+ JumpIfNotSmi(r0, on_not_both_smi);
+}
+
+
+void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
+ Label* smi_case) {
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ TestBit(src, 0, r0);
+ SmiUntag(dst, src);
+ beq(smi_case, cr0);
+}
+
+
+void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src,
+ Label* non_smi_case) {
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ TestBit(src, 0, r0);
+ SmiUntag(dst, src);
+ bne(non_smi_case, cr0);
+}
+
+
+void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
+ Label* on_either_smi) {
+ STATIC_ASSERT(kSmiTag == 0);
+ JumpIfSmi(reg1, on_either_smi);
+ JumpIfSmi(reg2, on_either_smi);
+}
+
+
+void MacroAssembler::AssertNotSmi(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ TestIfSmi(object, r0);
+ Check(ne, kOperandIsASmi, cr0);
+ }
+}
+
+
+void MacroAssembler::AssertSmi(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ TestIfSmi(object, r0);
+ Check(eq, kOperandIsNotSmi, cr0);
+ }
+}
+
+
+void MacroAssembler::AssertString(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ TestIfSmi(object, r0);
+ Check(ne, kOperandIsASmiAndNotAString, cr0);
+ push(object);
+ LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
+ pop(object);
+ Check(lt, kOperandIsNotAString);
+ }
+}
+
+
+void MacroAssembler::AssertName(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ TestIfSmi(object, r0);
+ Check(ne, kOperandIsASmiAndNotAName, cr0);
+ push(object);
+ LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(object, object, LAST_NAME_TYPE);
+ pop(object);
+ Check(le, kOperandIsNotAName);
+ }
+}
+
+
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
+ Register scratch) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ CompareRoot(object, Heap::kUndefinedValueRootIndex);
+ beq(&done_checking);
+ LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
+ Assert(eq, kExpectedUndefinedOrCell);
+ bind(&done_checking);
+ }
+}
+
+
+void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
+ if (emit_debug_code()) {
+ CompareRoot(reg, index);
+ Check(eq, kHeapNumberMapRegisterClobbered);
+ }
+}
+
+
+void MacroAssembler::JumpIfNotHeapNumber(Register object,
+ Register heap_number_map,
+ Register scratch,
+ Label* on_not_heap_number) {
+ LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ cmp(scratch, heap_number_map);
+ bne(on_not_heap_number);
+}
+
+
+void MacroAssembler::LookupNumberStringCache(Register object, Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch3;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ LoadP(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
+ // Divide length by two (length is a smi).
+ ShiftRightArithImm(mask, mask, kSmiTagSize + kSmiShiftSize + 1);
+ subi(mask, mask, Operand(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+ JumpIfSmi(object, &is_smi);
+ CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ lwz(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ lwz(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+ xor_(scratch1, scratch1, scratch2);
+ and_(scratch1, scratch1, mask);
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ ShiftLeftImm(scratch1, scratch1, Operand(kPointerSizeLog2 + 1));
+ add(scratch1, number_string_cache, scratch1);
+
+ Register probe = mask;
+ LoadP(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ lfd(d0, FieldMemOperand(object, HeapNumber::kValueOffset));
+ lfd(d1, FieldMemOperand(probe, HeapNumber::kValueOffset));
+ fcmpu(d0, d1);
+ bne(not_found); // The cache did not contain this value.
+ b(&load_result_from_cache);
+
+ bind(&is_smi);
+ Register scratch = scratch1;
+ SmiUntag(scratch, object);
+ and_(scratch, mask, scratch);
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ ShiftLeftImm(scratch, scratch, Operand(kPointerSizeLog2 + 1));
+ add(scratch, number_string_cache, scratch);
+
+ // Check if the entry is the smi we are looking for.
+ LoadP(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ cmp(object, probe);
+ bne(not_found);
+
+ // Get the result from the cache.
+ bind(&load_result_from_cache);
+ LoadP(result,
+ FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
+ IncrementCounter(isolate()->counters()->number_to_string_native(), 1,
+ scratch1, scratch2);
+}
+
+
+void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
+ Register first, Register second, Register scratch1, Register scratch2,
+ Label* failure) {
+ // Test that both first and second are sequential one-byte strings.
+ // Assume that they are non-smis.
+ LoadP(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
+ LoadP(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
+ lbz(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ lbz(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
+
+ JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
+ scratch2, failure);
+}
+
+void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ // Check that neither is a smi.
+ and_(scratch1, first, second);
+ JumpIfSmi(scratch1, failure);
+ JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
+ scratch2, failure);
+}
+
+
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
+ Label* not_unique_name) {
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ Label succeed;
+ andi(r0, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+ beq(&succeed, cr0);
+ cmpi(reg, Operand(SYMBOL_TYPE));
+ bne(not_unique_name);
+
+ bind(&succeed);
+}
+
+
+// Allocates a heap number or jumps to the need_gc label if the young space
+// is full and a scavenge is needed.
+void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1,
+ Register scratch2,
+ Register heap_number_map,
+ Label* gc_required,
+ TaggingMode tagging_mode,
+ MutableMode mode) {
+ // Allocate an object in the heap for the heap number and tag it as a heap
+ // object.
+ Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
+ tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
+
+ Heap::RootListIndex map_index = mode == MUTABLE
+ ? Heap::kMutableHeapNumberMapRootIndex
+ : Heap::kHeapNumberMapRootIndex;
+ AssertIsRoot(heap_number_map, map_index);
+
+ // Store heap number map in the allocated object.
+ if (tagging_mode == TAG_RESULT) {
+ StoreP(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset),
+ r0);
+ } else {
+ StoreP(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
+ }
+}
+
+
+void MacroAssembler::AllocateHeapNumberWithValue(
+ Register result, DoubleRegister value, Register scratch1, Register scratch2,
+ Register heap_number_map, Label* gc_required) {
+ AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
+ stfd(value, FieldMemOperand(result, HeapNumber::kValueOffset));
+}
+
+
+// Copies a fixed number of fields of heap objects from src to dst.
+void MacroAssembler::CopyFields(Register dst, Register src, RegList temps,
+ int field_count) {
+ // At least one bit set in the first 15 registers.
+ DCHECK((temps & ((1 << 15) - 1)) != 0);
+ DCHECK((temps & dst.bit()) == 0);
+ DCHECK((temps & src.bit()) == 0);
+ // Primitive implementation using only one temporary register.
+
+ Register tmp = no_reg;
+ // Find a temp register in temps list.
+ for (int i = 0; i < 15; i++) {
+ if ((temps & (1 << i)) != 0) {
+ tmp.set_code(i);
+ break;
+ }
+ }
+ DCHECK(!tmp.is(no_reg));
+
+ for (int i = 0; i < field_count; i++) {
+ LoadP(tmp, FieldMemOperand(src, i * kPointerSize), r0);
+ StoreP(tmp, FieldMemOperand(dst, i * kPointerSize), r0);
+ }
+}
+
+
+void MacroAssembler::CopyBytes(Register src, Register dst, Register length,
+ Register scratch) {
+ Label align_loop, aligned, word_loop, byte_loop, byte_loop_1, done;
+
+ DCHECK(!scratch.is(r0));
+
+ cmpi(length, Operand::Zero());
+ beq(&done);
+
+ // Check src alignment and length to see whether word_loop is possible
+ andi(scratch, src, Operand(kPointerSize - 1));
+ beq(&aligned, cr0);
+ subfic(scratch, scratch, Operand(kPointerSize * 2));
+ cmp(length, scratch);
+ blt(&byte_loop);
+
+ // Align src before copying in word size chunks.
+ subi(scratch, scratch, Operand(kPointerSize));
+ mtctr(scratch);
+ bind(&align_loop);
+ lbz(scratch, MemOperand(src));
+ addi(src, src, Operand(1));
+ subi(length, length, Operand(1));
+ stb(scratch, MemOperand(dst));
+ addi(dst, dst, Operand(1));
+ bdnz(&align_loop);
+
+ bind(&aligned);
+
+ // Copy bytes in word size chunks.
+ if (emit_debug_code()) {
+ andi(r0, src, Operand(kPointerSize - 1));
+ Assert(eq, kExpectingAlignmentForCopyBytes, cr0);
+ }
+
+ ShiftRightImm(scratch, length, Operand(kPointerSizeLog2));
+ cmpi(scratch, Operand::Zero());
+ beq(&byte_loop);
+
+ mtctr(scratch);
+ bind(&word_loop);
+ LoadP(scratch, MemOperand(src));
+ addi(src, src, Operand(kPointerSize));
+ subi(length, length, Operand(kPointerSize));
+ if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
+ // currently false for PPC - but possible future opt
+ StoreP(scratch, MemOperand(dst));
+ addi(dst, dst, Operand(kPointerSize));
+ } else {
+#if V8_TARGET_LITTLE_ENDIAN
+ stb(scratch, MemOperand(dst, 0));
+ ShiftRightImm(scratch, scratch, Operand(8));
+ stb(scratch, MemOperand(dst, 1));
+ ShiftRightImm(scratch, scratch, Operand(8));
+ stb(scratch, MemOperand(dst, 2));
+ ShiftRightImm(scratch, scratch, Operand(8));
+ stb(scratch, MemOperand(dst, 3));
+#if V8_TARGET_ARCH_PPC64
+ ShiftRightImm(scratch, scratch, Operand(8));
+ stb(scratch, MemOperand(dst, 4));
+ ShiftRightImm(scratch, scratch, Operand(8));
+ stb(scratch, MemOperand(dst, 5));
+ ShiftRightImm(scratch, scratch, Operand(8));
+ stb(scratch, MemOperand(dst, 6));
+ ShiftRightImm(scratch, scratch, Operand(8));
+ stb(scratch, MemOperand(dst, 7));
+#endif
+#else
+#if V8_TARGET_ARCH_PPC64
+ stb(scratch, MemOperand(dst, 7));
+ ShiftRightImm(scratch, scratch, Operand(8));
+ stb(scratch, MemOperand(dst, 6));
+ ShiftRightImm(scratch, scratch, Operand(8));
+ stb(scratch, MemOperand(dst, 5));
+ ShiftRightImm(scratch, scratch, Operand(8));
+ stb(scratch, MemOperand(dst, 4));
+ ShiftRightImm(scratch, scratch, Operand(8));
+#endif
+ stb(scratch, MemOperand(dst, 3));
+ ShiftRightImm(scratch, scratch, Operand(8));
+ stb(scratch, MemOperand(dst, 2));
+ ShiftRightImm(scratch, scratch, Operand(8));
+ stb(scratch, MemOperand(dst, 1));
+ ShiftRightImm(scratch, scratch, Operand(8));
+ stb(scratch, MemOperand(dst, 0));
+#endif
+ addi(dst, dst, Operand(kPointerSize));
+ }
+ bdnz(&word_loop);
+
+ // Copy the last bytes if any left.
+ cmpi(length, Operand::Zero());
+ beq(&done);
+
+ bind(&byte_loop);
+ mtctr(length);
+ bind(&byte_loop_1);
+ lbz(scratch, MemOperand(src));
+ addi(src, src, Operand(1));
+ stb(scratch, MemOperand(dst));
+ addi(dst, dst, Operand(1));
+ bdnz(&byte_loop_1);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::InitializeNFieldsWithFiller(Register start_offset,
+ Register count,
+ Register filler) {
+ Label loop;
+ mtctr(count);
+ bind(&loop);
+ StoreP(filler, MemOperand(start_offset));
+ addi(start_offset, start_offset, Operand(kPointerSize));
+ bdnz(&loop);
+}
+
+void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
+ Register end_offset,
+ Register filler) {
+ Label done;
+ sub(r0, end_offset, start_offset, LeaveOE, SetRC);
+ beq(&done, cr0);
+ ShiftRightImm(r0, r0, Operand(kPointerSizeLog2));
+ InitializeNFieldsWithFiller(start_offset, r0, filler);
+ bind(&done);
+}
+
+
+void MacroAssembler::SaveFPRegs(Register location, int first, int count) {
+ DCHECK(count > 0);
+ int cur = first;
+ subi(location, location, Operand(count * kDoubleSize));
+ for (int i = 0; i < count; i++) {
+ DoubleRegister reg = DoubleRegister::from_code(cur++);
+ stfd(reg, MemOperand(location, i * kDoubleSize));
+ }
+}
+
+
+void MacroAssembler::RestoreFPRegs(Register location, int first, int count) {
+ DCHECK(count > 0);
+ int cur = first + count - 1;
+ for (int i = count - 1; i >= 0; i--) {
+ DoubleRegister reg = DoubleRegister::from_code(cur--);
+ lfd(reg, MemOperand(location, i * kDoubleSize));
+ }
+ addi(location, location, Operand(count * kDoubleSize));
+}
+
+
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
+ Register first, Register second, Register scratch1, Register scratch2,
+ Label* failure) {
+ const int kFlatOneByteStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ const int kFlatOneByteStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
+ andi(scratch1, first, Operand(kFlatOneByteStringMask));
+ andi(scratch2, second, Operand(kFlatOneByteStringMask));
+ cmpi(scratch1, Operand(kFlatOneByteStringTag));
+ bne(failure);
+ cmpi(scratch2, Operand(kFlatOneByteStringTag));
+ bne(failure);
+}
+
+
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
+ Register scratch,
+ Label* failure) {
+ const int kFlatOneByteStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ const int kFlatOneByteStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
+ andi(scratch, type, Operand(kFlatOneByteStringMask));
+ cmpi(scratch, Operand(kFlatOneByteStringTag));
+ bne(failure);
+}
+
+static const int kRegisterPassedArguments = 8;
+
+
+int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
+ int num_double_arguments) {
+ int stack_passed_words = 0;
+ if (num_double_arguments > DoubleRegister::kNumRegisters) {
+ stack_passed_words +=
+ 2 * (num_double_arguments - DoubleRegister::kNumRegisters);
+ }
+ // Up to 8 simple arguments are passed in registers r3..r10.
+ if (num_reg_arguments > kRegisterPassedArguments) {
+ stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
+ }
+ return stack_passed_words;
+}
+
+
+void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Register index,
+ Register value,
+ uint32_t encoding_mask) {
+ Label is_object;
+ TestIfSmi(string, r0);
+ Check(ne, kNonObject, cr0);
+
+ LoadP(ip, FieldMemOperand(string, HeapObject::kMapOffset));
+ lbz(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
+
+ andi(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
+ cmpi(ip, Operand(encoding_mask));
+ Check(eq, kUnexpectedStringType);
+
+// The index is assumed to be untagged coming in, tag it to compare with the
+// string length without using a temp register, it is restored at the end of
+// this function.
+#if !V8_TARGET_ARCH_PPC64
+ Label index_tag_ok, index_tag_bad;
+ JumpIfNotSmiCandidate(index, r0, &index_tag_bad);
+#endif
+ SmiTag(index, index);
+#if !V8_TARGET_ARCH_PPC64
+ b(&index_tag_ok);
+ bind(&index_tag_bad);
+ Abort(kIndexIsTooLarge);
+ bind(&index_tag_ok);
+#endif
+
+ LoadP(ip, FieldMemOperand(string, String::kLengthOffset));
+ cmp(index, ip);
+ Check(lt, kIndexIsTooLarge);
+
+ DCHECK(Smi::FromInt(0) == 0);
+ cmpi(index, Operand::Zero());
+ Check(ge, kIndexIsNegative);
+
+ SmiUntag(index, index);
+}
+
+
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+ int num_double_arguments,
+ Register scratch) {
+ int frame_alignment = ActivationFrameAlignment();
+ int stack_passed_arguments =
+ CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
+ int stack_space = kNumRequiredStackFrameSlots;
+
+ if (frame_alignment > kPointerSize) {
+ // Make stack end at alignment and make room for stack arguments
+ // -- preserving original value of sp.
+ mr(scratch, sp);
+ addi(sp, sp, Operand(-(stack_passed_arguments + 1) * kPointerSize));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
+ StoreP(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ } else {
+ // Make room for stack arguments
+ stack_space += stack_passed_arguments;
+ }
+
+ // Allocate frame with required slots to make ABI work.
+ li(r0, Operand::Zero());
+ StorePU(r0, MemOperand(sp, -stack_space * kPointerSize));
+}
+
+
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+ Register scratch) {
+ PrepareCallCFunction(num_reg_arguments, 0, scratch);
+}
+
+
+void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d1, src); }
+
+
+void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d1, src); }
+
+
+void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
+ DoubleRegister src2) {
+ if (src2.is(d1)) {
+ DCHECK(!src1.is(d2));
+ Move(d2, src2);
+ Move(d1, src1);
+ } else {
+ Move(d1, src1);
+ Move(d2, src2);
+ }
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_reg_arguments,
+ int num_double_arguments) {
+ mov(ip, Operand(function));
+ CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
+ int num_double_arguments) {
+ CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_arguments) {
+ CallCFunction(function, num_arguments, 0);
+}
+
+
+void MacroAssembler::CallCFunction(Register function, int num_arguments) {
+ CallCFunction(function, num_arguments, 0);
+}
+
+
+void MacroAssembler::CallCFunctionHelper(Register function,
+ int num_reg_arguments,
+ int num_double_arguments) {
+ DCHECK(has_frame());
+// Just call directly. The function called cannot cause a GC, or
+// allow preemption, so the return address in the link register
+// stays correct.
+#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
+ // AIX uses a function descriptor. When calling C code be aware
+ // of this descriptor and pick up values from it
+ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(function, kPointerSize));
+ LoadP(ip, MemOperand(function, 0));
+ Register dest = ip;
+#elif ABI_TOC_ADDRESSABILITY_VIA_IP
+ Move(ip, function);
+ Register dest = ip;
+#else
+ Register dest = function;
+#endif
+
+ Call(dest);
+
+ // Remove frame bought in PrepareCallCFunction
+ int stack_passed_arguments =
+ CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
+ int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
+ if (ActivationFrameAlignment() > kPointerSize) {
+ LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
+ } else {
+ addi(sp, sp, Operand(stack_space * kPointerSize));
+ }
+}
+
+
+void MacroAssembler::FlushICache(Register address, size_t size,
+ Register scratch) {
+ if (CpuFeatures::IsSupported(INSTR_AND_DATA_CACHE_COHERENCY)) {
+ sync();
+ icbi(r0, address);
+ isync();
+ return;
+ }
+
+ Label done;
+
+ dcbf(r0, address);
+ sync();
+ icbi(r0, address);
+ isync();
+
+ // This code handles ranges which cross a single cacheline boundary.
+ // scratch is last cacheline which intersects range.
+ const int kCacheLineSizeLog2 = WhichPowerOf2(CpuFeatures::cache_line_size());
+
+ DCHECK(size > 0 && size <= (size_t)(1 << kCacheLineSizeLog2));
+ addi(scratch, address, Operand(size - 1));
+ ClearRightImm(scratch, scratch, Operand(kCacheLineSizeLog2));
+ cmpl(scratch, address);
+ ble(&done);
+
+ dcbf(r0, scratch);
+ sync();
+ icbi(r0, scratch);
+ isync();
+
+ bind(&done);
+}
+
+
+void MacroAssembler::SetRelocatedValue(Register location, Register scratch,
+ Register new_value) {
+ lwz(scratch, MemOperand(location));
+
+#if V8_OOL_CONSTANT_POOL
+ if (emit_debug_code()) {
+// Check that the instruction sequence is a load from the constant pool
+#if V8_TARGET_ARCH_PPC64
+ And(scratch, scratch, Operand(kOpcodeMask | (0x1f * B16)));
+ Cmpi(scratch, Operand(ADDI), r0);
+ Check(eq, kTheInstructionShouldBeALi);
+ lwz(scratch, MemOperand(location, kInstrSize));
+#endif
+ ExtractBitMask(scratch, scratch, 0x1f * B16);
+ cmpi(scratch, Operand(kConstantPoolRegister.code()));
+ Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
+ // Scratch was clobbered. Restore it.
+ lwz(scratch, MemOperand(location));
+ }
+ // Get the address of the constant and patch it.
+ andi(scratch, scratch, Operand(kImm16Mask));
+ StorePX(new_value, MemOperand(kConstantPoolRegister, scratch));
+#else
+ // This code assumes a FIXED_SEQUENCE for lis/ori
+
+ // At this point scratch is a lis instruction.
+ if (emit_debug_code()) {
+ And(scratch, scratch, Operand(kOpcodeMask | (0x1f * B16)));
+ Cmpi(scratch, Operand(ADDIS), r0);
+ Check(eq, kTheInstructionToPatchShouldBeALis);
+ lwz(scratch, MemOperand(location));
+ }
+
+// insert new high word into lis instruction
+#if V8_TARGET_ARCH_PPC64
+ srdi(ip, new_value, Operand(32));
+ rlwimi(scratch, ip, 16, 16, 31);
+#else
+ rlwimi(scratch, new_value, 16, 16, 31);
+#endif
+
+ stw(scratch, MemOperand(location));
+
+ lwz(scratch, MemOperand(location, kInstrSize));
+ // scratch is now ori.
+ if (emit_debug_code()) {
+ And(scratch, scratch, Operand(kOpcodeMask));
+ Cmpi(scratch, Operand(ORI), r0);
+ Check(eq, kTheInstructionShouldBeAnOri);
+ lwz(scratch, MemOperand(location, kInstrSize));
+ }
+
+// insert new low word into ori instruction
+#if V8_TARGET_ARCH_PPC64
+ rlwimi(scratch, ip, 0, 16, 31);
+#else
+ rlwimi(scratch, new_value, 0, 16, 31);
+#endif
+ stw(scratch, MemOperand(location, kInstrSize));
+
+#if V8_TARGET_ARCH_PPC64
+ if (emit_debug_code()) {
+ lwz(scratch, MemOperand(location, 2 * kInstrSize));
+ // scratch is now sldi.
+ And(scratch, scratch, Operand(kOpcodeMask | kExt5OpcodeMask));
+ Cmpi(scratch, Operand(EXT5 | RLDICR), r0);
+ Check(eq, kTheInstructionShouldBeASldi);
+ }
+
+ lwz(scratch, MemOperand(location, 3 * kInstrSize));
+ // scratch is now ori.
+ if (emit_debug_code()) {
+ And(scratch, scratch, Operand(kOpcodeMask));
+ Cmpi(scratch, Operand(ORIS), r0);
+ Check(eq, kTheInstructionShouldBeAnOris);
+ lwz(scratch, MemOperand(location, 3 * kInstrSize));
+ }
+
+ rlwimi(scratch, new_value, 16, 16, 31);
+ stw(scratch, MemOperand(location, 3 * kInstrSize));
+
+ lwz(scratch, MemOperand(location, 4 * kInstrSize));
+ // scratch is now ori.
+ if (emit_debug_code()) {
+ And(scratch, scratch, Operand(kOpcodeMask));
+ Cmpi(scratch, Operand(ORI), r0);
+ Check(eq, kTheInstructionShouldBeAnOri);
+ lwz(scratch, MemOperand(location, 4 * kInstrSize));
+ }
+ rlwimi(scratch, new_value, 0, 16, 31);
+ stw(scratch, MemOperand(location, 4 * kInstrSize));
+#endif
+
+// Update the I-cache so the new lis and addic can be executed.
+#if V8_TARGET_ARCH_PPC64
+ FlushICache(location, 5 * kInstrSize, scratch);
+#else
+ FlushICache(location, 2 * kInstrSize, scratch);
+#endif
+#endif
+}
+
+
+void MacroAssembler::GetRelocatedValue(Register location, Register result,
+ Register scratch) {
+ lwz(result, MemOperand(location));
+
+#if V8_OOL_CONSTANT_POOL
+ if (emit_debug_code()) {
+// Check that the instruction sequence is a load from the constant pool
+#if V8_TARGET_ARCH_PPC64
+ And(result, result, Operand(kOpcodeMask | (0x1f * B16)));
+ Cmpi(result, Operand(ADDI), r0);
+ Check(eq, kTheInstructionShouldBeALi);
+ lwz(result, MemOperand(location, kInstrSize));
+#endif
+ ExtractBitMask(result, result, 0x1f * B16);
+ cmpi(result, Operand(kConstantPoolRegister.code()));
+ Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
+ lwz(result, MemOperand(location));
+ }
+ // Get the address of the constant and retrieve it.
+ andi(result, result, Operand(kImm16Mask));
+ LoadPX(result, MemOperand(kConstantPoolRegister, result));
+#else
+ // This code assumes a FIXED_SEQUENCE for lis/ori
+ if (emit_debug_code()) {
+ And(result, result, Operand(kOpcodeMask | (0x1f * B16)));
+ Cmpi(result, Operand(ADDIS), r0);
+ Check(eq, kTheInstructionShouldBeALis);
+ lwz(result, MemOperand(location));
+ }
+
+ // result now holds a lis instruction. Extract the immediate.
+ slwi(result, result, Operand(16));
+
+ lwz(scratch, MemOperand(location, kInstrSize));
+ if (emit_debug_code()) {
+ And(scratch, scratch, Operand(kOpcodeMask));
+ Cmpi(scratch, Operand(ORI), r0);
+ Check(eq, kTheInstructionShouldBeAnOri);
+ lwz(scratch, MemOperand(location, kInstrSize));
+ }
+ // Copy the low 16bits from ori instruction into result
+ rlwimi(result, scratch, 0, 16, 31);
+
+#if V8_TARGET_ARCH_PPC64
+ if (emit_debug_code()) {
+ lwz(scratch, MemOperand(location, 2 * kInstrSize));
+ // scratch is now sldi.
+ And(scratch, scratch, Operand(kOpcodeMask | kExt5OpcodeMask));
+ Cmpi(scratch, Operand(EXT5 | RLDICR), r0);
+ Check(eq, kTheInstructionShouldBeASldi);
+ }
+
+ lwz(scratch, MemOperand(location, 3 * kInstrSize));
+ // scratch is now ori.
+ if (emit_debug_code()) {
+ And(scratch, scratch, Operand(kOpcodeMask));
+ Cmpi(scratch, Operand(ORIS), r0);
+ Check(eq, kTheInstructionShouldBeAnOris);
+ lwz(scratch, MemOperand(location, 3 * kInstrSize));
+ }
+ sldi(result, result, Operand(16));
+ rldimi(result, scratch, 0, 48);
+
+ lwz(scratch, MemOperand(location, 4 * kInstrSize));
+ // scratch is now ori.
+ if (emit_debug_code()) {
+ And(scratch, scratch, Operand(kOpcodeMask));
+ Cmpi(scratch, Operand(ORI), r0);
+ Check(eq, kTheInstructionShouldBeAnOri);
+ lwz(scratch, MemOperand(location, 4 * kInstrSize));
+ }
+ sldi(result, result, Operand(16));
+ rldimi(result, scratch, 0, 48);
+#endif
+#endif
+}
+
+
+void MacroAssembler::CheckPageFlag(
+ Register object,
+ Register scratch, // scratch may be same register as object
+ int mask, Condition cc, Label* condition_met) {
+ DCHECK(cc == ne || cc == eq);
+ ClearRightImm(scratch, object, Operand(kPageSizeBits));
+ LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+
+ And(r0, scratch, Operand(mask), SetRC);
+
+ if (cc == ne) {
+ bne(condition_met, cr0);
+ }
+ if (cc == eq) {
+ beq(condition_met, cr0);
+ }
+}
+
+
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map, Register scratch,
+ Label* if_deprecated) {
+ if (map->CanBeDeprecated()) {
+ mov(scratch, Operand(map));
+ lwz(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
+ ExtractBitMask(scratch, scratch, Map::Deprecated::kMask, SetRC);
+ bne(if_deprecated, cr0);
+ }
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object, Register scratch0,
+ Register scratch1, Label* on_black) {
+ HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
+ DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+}
+
+
+void MacroAssembler::HasColor(Register object, Register bitmap_scratch,
+ Register mask_scratch, Label* has_color,
+ int first_bit, int second_bit) {
+ DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
+
+ GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+ Label other_color, word_boundary;
+ lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ // Test the first bit
+ and_(r0, ip, mask_scratch, SetRC);
+ b(first_bit == 1 ? eq : ne, &other_color, cr0);
+ // Shift left 1
+ // May need to load the next cell
+ slwi(mask_scratch, mask_scratch, Operand(1), SetRC);
+ beq(&word_boundary, cr0);
+ // Test the second bit
+ and_(r0, ip, mask_scratch, SetRC);
+ b(second_bit == 1 ? ne : eq, has_color, cr0);
+ b(&other_color);
+
+ bind(&word_boundary);
+ lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kIntSize));
+ andi(r0, ip, Operand(1));
+ b(second_bit == 1 ? ne : eq, has_color, cr0);
+ bind(&other_color);
+}
+
+
+// Detect some, but not all, common pointer-free objects. This is used by the
+// incremental write barrier which doesn't care about oddballs (they are always
+// marked black immediately so this code is not hit).
+void MacroAssembler::JumpIfDataObject(Register value, Register scratch,
+ Label* not_data_object) {
+ Label is_data_object;
+ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
+ beq(&is_data_object);
+ DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object containing
+ // no GC pointers.
+ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ STATIC_ASSERT((kIsIndirectStringMask | kIsNotStringMask) == 0x81);
+ andi(scratch, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
+ bne(not_data_object, cr0);
+ bind(&is_data_object);
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg,
+ Register mask_reg) {
+ DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
+ DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
+ lis(r0, Operand((~Page::kPageAlignmentMask >> 16)));
+ and_(bitmap_reg, addr_reg, r0);
+ const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
+ ExtractBitRange(mask_reg, addr_reg, kLowBits - 1, kPointerSizeLog2);
+ ExtractBitRange(ip, addr_reg, kPageSizeBits - 1, kLowBits);
+ ShiftLeftImm(ip, ip, Operand(Bitmap::kBytesPerCellLog2));
+ add(bitmap_reg, bitmap_reg, ip);
+ li(ip, Operand(1));
+ slw(mask_reg, ip, mask_reg);
+}
+
+
+void MacroAssembler::EnsureNotWhite(Register value, Register bitmap_scratch,
+ Register mask_scratch,
+ Register load_scratch,
+ Label* value_is_white_and_not_data) {
+ DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
+ GetMarkBits(value, bitmap_scratch, mask_scratch);
+
+ // If the value is black or grey we don't need to do anything.
+ DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+ Label done;
+
+ // Since both black and grey have a 1 in the first position and white does
+ // not have a 1 there we only need to check one bit.
+ lwz(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ and_(r0, mask_scratch, load_scratch, SetRC);
+ bne(&done, cr0);
+
+ if (emit_debug_code()) {
+ // Check for impossible bit pattern.
+ Label ok;
+ // LSL may overflow, making the check conservative.
+ slwi(r0, mask_scratch, Operand(1));
+ and_(r0, load_scratch, r0, SetRC);
+ beq(&ok, cr0);
+ stop("Impossible marking bit pattern");
+ bind(&ok);
+ }
+
+ // Value is white. We check whether it is data that doesn't need scanning.
+ // Currently only checks for HeapNumber and non-cons strings.
+ Register map = load_scratch; // Holds map while checking type.
+ Register length = load_scratch; // Holds length of object after testing type.
+ Label is_data_object, maybe_string_object, is_string_object, is_encoded;
+#if V8_TARGET_ARCH_PPC64
+ Label length_computed;
+#endif
+
+
+ // Check for heap-number
+ LoadP(map, FieldMemOperand(value, HeapObject::kMapOffset));
+ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ bne(&maybe_string_object);
+ li(length, Operand(HeapNumber::kSize));
+ b(&is_data_object);
+ bind(&maybe_string_object);
+
+ // Check for strings.
+ DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object containing
+ // no GC pointers.
+ Register instance_type = load_scratch;
+ lbz(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ andi(r0, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
+ bne(value_is_white_and_not_data, cr0);
+ // It's a non-indirect (non-cons and non-slice) string.
+ // If it's external, the length is just ExternalString::kSize.
+ // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+ // External strings are the only ones with the kExternalStringTag bit
+ // set.
+ DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
+ DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
+ andi(r0, instance_type, Operand(kExternalStringTag));
+ beq(&is_string_object, cr0);
+ li(length, Operand(ExternalString::kSize));
+ b(&is_data_object);
+ bind(&is_string_object);
+
+ // Sequential string, either Latin1 or UC16.
+ // For Latin1 (char-size of 1) we untag the smi to get the length.
+ // For UC16 (char-size of 2):
+ // - (32-bit) we just leave the smi tag in place, thereby getting
+ // the length multiplied by 2.
+ // - (64-bit) we compute the offset in the 2-byte array
+ DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
+ LoadP(ip, FieldMemOperand(value, String::kLengthOffset));
+ andi(r0, instance_type, Operand(kStringEncodingMask));
+ beq(&is_encoded, cr0);
+ SmiUntag(ip);
+#if V8_TARGET_ARCH_PPC64
+ b(&length_computed);
+#endif
+ bind(&is_encoded);
+#if V8_TARGET_ARCH_PPC64
+ SmiToShortArrayOffset(ip, ip);
+ bind(&length_computed);
+#else
+ DCHECK(kSmiShift == 1);
+#endif
+ addi(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
+ li(r0, Operand(~kObjectAlignmentMask));
+ and_(length, length, r0);
+
+ bind(&is_data_object);
+ // Value is a data object, and it is white. Mark it black. Since we know
+ // that the object is white we can make it black by flipping one bit.
+ lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ orx(ip, ip, mask_scratch);
+ stw(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+
+ mov(ip, Operand(~Page::kPageAlignmentMask));
+ and_(bitmap_scratch, bitmap_scratch, ip);
+ lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+ add(ip, ip, length);
+ stw(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+
+ bind(&done);
+}
+
+
+// Saturate a value into 8-bit unsigned integer
+// if input_value < 0, output_value is 0
+// if input_value > 255, output_value is 255
+// otherwise output_value is the input_value
+void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
+ Label done, negative_label, overflow_label;
+ int satval = (1 << 8) - 1;
+
+ cmpi(input_reg, Operand::Zero());
+ blt(&negative_label);
+
+ cmpi(input_reg, Operand(satval));
+ bgt(&overflow_label);
+ if (!output_reg.is(input_reg)) {
+ mr(output_reg, input_reg);
+ }
+ b(&done);
+
+ bind(&negative_label);
+ li(output_reg, Operand::Zero()); // set to 0 if negative
+ b(&done);
+
+
+ bind(&overflow_label); // set to satval if > satval
+ li(output_reg, Operand(satval));
+
+ bind(&done);
+}
+
+
+void MacroAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); }
+
+
+void MacroAssembler::ResetRoundingMode() {
+ mtfsfi(7, kRoundToNearest); // reset (default is kRoundToNearest)
+}
+
+
+void MacroAssembler::ClampDoubleToUint8(Register result_reg,
+ DoubleRegister input_reg,
+ DoubleRegister double_scratch) {
+ Label above_zero;
+ Label done;
+ Label in_bounds;
+
+ LoadDoubleLiteral(double_scratch, 0.0, result_reg);
+ fcmpu(input_reg, double_scratch);
+ bgt(&above_zero);
+
+ // Double value is less than zero, NaN or Inf, return 0.
+ LoadIntLiteral(result_reg, 0);
+ b(&done);
+
+ // Double value is >= 255, return 255.
+ bind(&above_zero);
+ LoadDoubleLiteral(double_scratch, 255.0, result_reg);
+ fcmpu(input_reg, double_scratch);
+ ble(&in_bounds);
+ LoadIntLiteral(result_reg, 255);
+ b(&done);
+
+ // In 0-255 range, round and truncate.
+ bind(&in_bounds);
+
+ // round to nearest (default rounding mode)
+ fctiw(double_scratch, input_reg);
+ MovDoubleLowToInt(result_reg, double_scratch);
+ bind(&done);
+}
+
+
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+ Register descriptors) {
+ LoadP(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
+}
+
+
+void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
+ lwz(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
+}
+
+
+void MacroAssembler::EnumLength(Register dst, Register map) {
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+ lwz(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ ExtractBitMask(dst, dst, Map::EnumLengthBits::kMask);
+ SmiTag(dst);
+}
+
+
+void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
+ Register empty_fixed_array_value = r9;
+ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+ Label next, start;
+ mr(r5, r3);
+
+ // Check if the enum length field is properly initialized, indicating that
+ // there is an enum cache.
+ LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
+
+ EnumLength(r6, r4);
+ CmpSmiLiteral(r6, Smi::FromInt(kInvalidEnumCacheSentinel), r0);
+ beq(call_runtime);
+
+ b(&start);
+
+ bind(&next);
+ LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
+
+ // For all objects but the receiver, check that the cache is empty.
+ EnumLength(r6, r4);
+ CmpSmiLiteral(r6, Smi::FromInt(0), r0);
+ bne(call_runtime);
+
+ bind(&start);
+
+ // Check that there are no elements. Register r5 contains the current JS
+ // object we've reached through the prototype chain.
+ Label no_elements;
+ LoadP(r5, FieldMemOperand(r5, JSObject::kElementsOffset));
+ cmp(r5, empty_fixed_array_value);
+ beq(&no_elements);
+
+ // Second chance, the object may be using the empty slow element dictionary.
+ CompareRoot(r5, Heap::kEmptySlowElementDictionaryRootIndex);
+ bne(call_runtime);
+
+ bind(&no_elements);
+ LoadP(r5, FieldMemOperand(r4, Map::kPrototypeOffset));
+ cmp(r5, null_value);
+ bne(&next);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// New MacroAssembler Interfaces added for PPC
+//
+////////////////////////////////////////////////////////////////////////////////
+void MacroAssembler::LoadIntLiteral(Register dst, int value) {
+ mov(dst, Operand(value));
+}
+
+
+void MacroAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
+ mov(dst, Operand(smi));
+}
+
+
+void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
+ Register scratch) {
+#if V8_OOL_CONSTANT_POOL
+ // TODO(mbrandy): enable extended constant pool usage for doubles.
+ // See ARM commit e27ab337 for a reference.
+ if (is_ool_constant_pool_available() && !is_constant_pool_full()) {
+ RelocInfo rinfo(pc_, value);
+ ConstantPoolAddEntry(rinfo);
+#if V8_TARGET_ARCH_PPC64
+ // We use 2 instruction sequence here for consistency with mov.
+ li(scratch, Operand::Zero());
+ lfdx(result, MemOperand(kConstantPoolRegister, scratch));
+#else
+ lfd(result, MemOperand(kConstantPoolRegister, 0));
+#endif
+ return;
+ }
+#endif
+
+ // avoid gcc strict aliasing error using union cast
+ union {
+ double dval;
+#if V8_TARGET_ARCH_PPC64
+ intptr_t ival;
+#else
+ intptr_t ival[2];
+#endif
+ } litVal;
+
+ litVal.dval = value;
+
+#if V8_TARGET_ARCH_PPC64
+ if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
+ mov(scratch, Operand(litVal.ival));
+ mtfprd(result, scratch);
+ return;
+ }
+#endif
+
+ addi(sp, sp, Operand(-kDoubleSize));
+#if V8_TARGET_ARCH_PPC64
+ mov(scratch, Operand(litVal.ival));
+ std(scratch, MemOperand(sp));
+#else
+ LoadIntLiteral(scratch, litVal.ival[0]);
+ stw(scratch, MemOperand(sp, 0));
+ LoadIntLiteral(scratch, litVal.ival[1]);
+ stw(scratch, MemOperand(sp, 4));
+#endif
+ nop(GROUP_ENDING_NOP); // LHS/RAW optimization
+ lfd(result, MemOperand(sp, 0));
+ addi(sp, sp, Operand(kDoubleSize));
+}
+
+
+void MacroAssembler::MovIntToDouble(DoubleRegister dst, Register src,
+ Register scratch) {
+// sign-extend src to 64-bit
+#if V8_TARGET_ARCH_PPC64
+ if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
+ mtfprwa(dst, src);
+ return;
+ }
+#endif
+
+ DCHECK(!src.is(scratch));
+ subi(sp, sp, Operand(kDoubleSize));
+#if V8_TARGET_ARCH_PPC64
+ extsw(scratch, src);
+ std(scratch, MemOperand(sp, 0));
+#else
+ srawi(scratch, src, 31);
+ stw(scratch, MemOperand(sp, Register::kExponentOffset));
+ stw(src, MemOperand(sp, Register::kMantissaOffset));
+#endif
+ nop(GROUP_ENDING_NOP); // LHS/RAW optimization
+ lfd(dst, MemOperand(sp, 0));
+ addi(sp, sp, Operand(kDoubleSize));
+}
+
+
+void MacroAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src,
+ Register scratch) {
+// zero-extend src to 64-bit
+#if V8_TARGET_ARCH_PPC64
+ if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
+ mtfprwz(dst, src);
+ return;
+ }
+#endif
+
+ DCHECK(!src.is(scratch));
+ subi(sp, sp, Operand(kDoubleSize));
+#if V8_TARGET_ARCH_PPC64
+ clrldi(scratch, src, Operand(32));
+ std(scratch, MemOperand(sp, 0));
+#else
+ li(scratch, Operand::Zero());
+ stw(scratch, MemOperand(sp, Register::kExponentOffset));
+ stw(src, MemOperand(sp, Register::kMantissaOffset));
+#endif
+ nop(GROUP_ENDING_NOP); // LHS/RAW optimization
+ lfd(dst, MemOperand(sp, 0));
+ addi(sp, sp, Operand(kDoubleSize));
+}
+
+
+void MacroAssembler::MovInt64ToDouble(DoubleRegister dst,
+#if !V8_TARGET_ARCH_PPC64
+ Register src_hi,
+#endif
+ Register src) {
+#if V8_TARGET_ARCH_PPC64
+ if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
+ mtfprd(dst, src);
+ return;
+ }
+#endif
+
+ subi(sp, sp, Operand(kDoubleSize));
+#if V8_TARGET_ARCH_PPC64
+ std(src, MemOperand(sp, 0));
+#else
+ stw(src_hi, MemOperand(sp, Register::kExponentOffset));
+ stw(src, MemOperand(sp, Register::kMantissaOffset));
+#endif
+ nop(GROUP_ENDING_NOP); // LHS/RAW optimization
+ lfd(dst, MemOperand(sp, 0));
+ addi(sp, sp, Operand(kDoubleSize));
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void MacroAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
+ Register src_hi,
+ Register src_lo,
+ Register scratch) {
+ if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
+ sldi(scratch, src_hi, Operand(32));
+ rldimi(scratch, src_lo, 0, 32);
+ mtfprd(dst, scratch);
+ return;
+ }
+
+ subi(sp, sp, Operand(kDoubleSize));
+ stw(src_hi, MemOperand(sp, Register::kExponentOffset));
+ stw(src_lo, MemOperand(sp, Register::kMantissaOffset));
+ nop(GROUP_ENDING_NOP); // LHS/RAW optimization
+ lfd(dst, MemOperand(sp));
+ addi(sp, sp, Operand(kDoubleSize));
+}
+#endif
+
+
+void MacroAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
+#if V8_TARGET_ARCH_PPC64
+ if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
+ mffprwz(dst, src);
+ return;
+ }
+#endif
+
+ subi(sp, sp, Operand(kDoubleSize));
+ stfd(src, MemOperand(sp));
+ nop(GROUP_ENDING_NOP); // LHS/RAW optimization
+ lwz(dst, MemOperand(sp, Register::kMantissaOffset));
+ addi(sp, sp, Operand(kDoubleSize));
+}
+
+
+void MacroAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) {
+#if V8_TARGET_ARCH_PPC64
+ if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
+ mffprd(dst, src);
+ srdi(dst, dst, Operand(32));
+ return;
+ }
+#endif
+
+ subi(sp, sp, Operand(kDoubleSize));
+ stfd(src, MemOperand(sp));
+ nop(GROUP_ENDING_NOP); // LHS/RAW optimization
+ lwz(dst, MemOperand(sp, Register::kExponentOffset));
+ addi(sp, sp, Operand(kDoubleSize));
+}
+
+
+void MacroAssembler::MovDoubleToInt64(
+#if !V8_TARGET_ARCH_PPC64
+ Register dst_hi,
+#endif
+ Register dst, DoubleRegister src) {
+#if V8_TARGET_ARCH_PPC64
+ if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
+ mffprd(dst, src);
+ return;
+ }
+#endif
+
+ subi(sp, sp, Operand(kDoubleSize));
+ stfd(src, MemOperand(sp));
+ nop(GROUP_ENDING_NOP); // LHS/RAW optimization
+#if V8_TARGET_ARCH_PPC64
+ ld(dst, MemOperand(sp, 0));
+#else
+ lwz(dst_hi, MemOperand(sp, Register::kExponentOffset));
+ lwz(dst, MemOperand(sp, Register::kMantissaOffset));
+#endif
+ addi(sp, sp, Operand(kDoubleSize));
+}
+
+
+void MacroAssembler::Add(Register dst, Register src, intptr_t value,
+ Register scratch) {
+ if (is_int16(value)) {
+ addi(dst, src, Operand(value));
+ } else {
+ mov(scratch, Operand(value));
+ add(dst, src, scratch);
+ }
+}
+
+
+void MacroAssembler::Cmpi(Register src1, const Operand& src2, Register scratch,
+ CRegister cr) {
+ intptr_t value = src2.immediate();
+ if (is_int16(value)) {
+ cmpi(src1, src2, cr);
+ } else {
+ mov(scratch, src2);
+ cmp(src1, scratch, cr);
+ }
+}
+
+
+void MacroAssembler::Cmpli(Register src1, const Operand& src2, Register scratch,
+ CRegister cr) {
+ intptr_t value = src2.immediate();
+ if (is_uint16(value)) {
+ cmpli(src1, src2, cr);
+ } else {
+ mov(scratch, src2);
+ cmpl(src1, scratch, cr);
+ }
+}
+
+
+void MacroAssembler::Cmpwi(Register src1, const Operand& src2, Register scratch,
+ CRegister cr) {
+ intptr_t value = src2.immediate();
+ if (is_int16(value)) {
+ cmpwi(src1, src2, cr);
+ } else {
+ mov(scratch, src2);
+ cmpw(src1, scratch, cr);
+ }
+}
+
+
+void MacroAssembler::Cmplwi(Register src1, const Operand& src2,
+ Register scratch, CRegister cr) {
+ intptr_t value = src2.immediate();
+ if (is_uint16(value)) {
+ cmplwi(src1, src2, cr);
+ } else {
+ mov(scratch, src2);
+ cmplw(src1, scratch, cr);
+ }
+}
+
+
+void MacroAssembler::And(Register ra, Register rs, const Operand& rb,
+ RCBit rc) {
+ if (rb.is_reg()) {
+ and_(ra, rs, rb.rm(), rc);
+ } else {
+ if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == SetRC) {
+ andi(ra, rs, rb);
+ } else {
+ // mov handles the relocation.
+ DCHECK(!rs.is(r0));
+ mov(r0, rb);
+ and_(ra, rs, r0, rc);
+ }
+ }
+}
+
+
+void MacroAssembler::Or(Register ra, Register rs, const Operand& rb, RCBit rc) {
+ if (rb.is_reg()) {
+ orx(ra, rs, rb.rm(), rc);
+ } else {
+ if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) {
+ ori(ra, rs, rb);
+ } else {
+ // mov handles the relocation.
+ DCHECK(!rs.is(r0));
+ mov(r0, rb);
+ orx(ra, rs, r0, rc);
+ }
+ }
+}
+
+
+void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
+ RCBit rc) {
+ if (rb.is_reg()) {
+ xor_(ra, rs, rb.rm(), rc);
+ } else {
+ if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) {
+ xori(ra, rs, rb);
+ } else {
+ // mov handles the relocation.
+ DCHECK(!rs.is(r0));
+ mov(r0, rb);
+ xor_(ra, rs, r0, rc);
+ }
+ }
+}
+
+
+void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
+ CRegister cr) {
+#if V8_TARGET_ARCH_PPC64
+ LoadSmiLiteral(scratch, smi);
+ cmp(src1, scratch, cr);
+#else
+ Cmpi(src1, Operand(smi), scratch, cr);
+#endif
+}
+
+
+void MacroAssembler::CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
+ CRegister cr) {
+#if V8_TARGET_ARCH_PPC64
+ LoadSmiLiteral(scratch, smi);
+ cmpl(src1, scratch, cr);
+#else
+ Cmpli(src1, Operand(smi), scratch, cr);
+#endif
+}
+
+
+void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
+ Register scratch) {
+#if V8_TARGET_ARCH_PPC64
+ LoadSmiLiteral(scratch, smi);
+ add(dst, src, scratch);
+#else
+ Add(dst, src, reinterpret_cast<intptr_t>(smi), scratch);
+#endif
+}
+
+
+void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
+ Register scratch) {
+#if V8_TARGET_ARCH_PPC64
+ LoadSmiLiteral(scratch, smi);
+ sub(dst, src, scratch);
+#else
+ Add(dst, src, -(reinterpret_cast<intptr_t>(smi)), scratch);
+#endif
+}
+
+
+void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi,
+ Register scratch, RCBit rc) {
+#if V8_TARGET_ARCH_PPC64
+ LoadSmiLiteral(scratch, smi);
+ and_(dst, src, scratch, rc);
+#else
+ And(dst, src, Operand(smi), rc);
+#endif
+}
+
+
+// Load a "pointer" sized value from the memory location
+void MacroAssembler::LoadP(Register dst, const MemOperand& mem,
+ Register scratch) {
+ int offset = mem.offset();
+
+ if (!scratch.is(no_reg) && !is_int16(offset)) {
+ /* cannot use d-form */
+ LoadIntLiteral(scratch, offset);
+#if V8_TARGET_ARCH_PPC64
+ ldx(dst, MemOperand(mem.ra(), scratch));
+#else
+ lwzx(dst, MemOperand(mem.ra(), scratch));
+#endif
+ } else {
+#if V8_TARGET_ARCH_PPC64
+ int misaligned = (offset & 3);
+ if (misaligned) {
+ // adjust base to conform to offset alignment requirements
+ // Todo: enhance to use scratch if dst is unsuitable
+ DCHECK(!dst.is(r0));
+ addi(dst, mem.ra(), Operand((offset & 3) - 4));
+ ld(dst, MemOperand(dst, (offset & ~3) + 4));
+ } else {
+ ld(dst, mem);
+ }
+#else
+ lwz(dst, mem);
+#endif
+ }
+}
+
+
+// Store a "pointer" sized value to the memory location
+void MacroAssembler::StoreP(Register src, const MemOperand& mem,
+ Register scratch) {
+ int offset = mem.offset();
+
+ if (!scratch.is(no_reg) && !is_int16(offset)) {
+ /* cannot use d-form */
+ LoadIntLiteral(scratch, offset);
+#if V8_TARGET_ARCH_PPC64
+ stdx(src, MemOperand(mem.ra(), scratch));
+#else
+ stwx(src, MemOperand(mem.ra(), scratch));
+#endif
+ } else {
+#if V8_TARGET_ARCH_PPC64
+ int misaligned = (offset & 3);
+ if (misaligned) {
+ // adjust base to conform to offset alignment requirements
+ // a suitable scratch is required here
+ DCHECK(!scratch.is(no_reg));
+ if (scratch.is(r0)) {
+ LoadIntLiteral(scratch, offset);
+ stdx(src, MemOperand(mem.ra(), scratch));
+ } else {
+ addi(scratch, mem.ra(), Operand((offset & 3) - 4));
+ std(src, MemOperand(scratch, (offset & ~3) + 4));
+ }
+ } else {
+ std(src, mem);
+ }
+#else
+ stw(src, mem);
+#endif
+ }
+}
+
+void MacroAssembler::LoadWordArith(Register dst, const MemOperand& mem,
+ Register scratch) {
+ int offset = mem.offset();
+
+ if (!scratch.is(no_reg) && !is_int16(offset)) {
+ /* cannot use d-form */
+ LoadIntLiteral(scratch, offset);
+#if V8_TARGET_ARCH_PPC64
+ // lwax(dst, MemOperand(mem.ra(), scratch));
+ DCHECK(0); // lwax not yet implemented
+#else
+ lwzx(dst, MemOperand(mem.ra(), scratch));
+#endif
+ } else {
+#if V8_TARGET_ARCH_PPC64
+ int misaligned = (offset & 3);
+ if (misaligned) {
+ // adjust base to conform to offset alignment requirements
+ // Todo: enhance to use scratch if dst is unsuitable
+ DCHECK(!dst.is(r0));
+ addi(dst, mem.ra(), Operand((offset & 3) - 4));
+ lwa(dst, MemOperand(dst, (offset & ~3) + 4));
+ } else {
+ lwa(dst, mem);
+ }
+#else
+ lwz(dst, mem);
+#endif
+ }
+}
+
+
+// Variable length depending on whether offset fits into immediate field
+// MemOperand currently only supports d-form
+void MacroAssembler::LoadWord(Register dst, const MemOperand& mem,
+ Register scratch) {
+ Register base = mem.ra();
+ int offset = mem.offset();
+
+ if (!is_int16(offset)) {
+ LoadIntLiteral(scratch, offset);
+ lwzx(dst, MemOperand(base, scratch));
+ } else {
+ lwz(dst, mem);
+ }
+}
+
+
+// Variable length depending on whether offset fits into immediate field
+// MemOperand current only supports d-form
+void MacroAssembler::StoreWord(Register src, const MemOperand& mem,
+ Register scratch) {
+ Register base = mem.ra();
+ int offset = mem.offset();
+
+ if (!is_int16(offset)) {
+ LoadIntLiteral(scratch, offset);
+ stwx(src, MemOperand(base, scratch));
+ } else {
+ stw(src, mem);
+ }
+}
+
+
+// Variable length depending on whether offset fits into immediate field
+// MemOperand currently only supports d-form
+void MacroAssembler::LoadHalfWord(Register dst, const MemOperand& mem,
+ Register scratch) {
+ Register base = mem.ra();
+ int offset = mem.offset();
+
+ if (!is_int16(offset)) {
+ LoadIntLiteral(scratch, offset);
+ lhzx(dst, MemOperand(base, scratch));
+ } else {
+ lhz(dst, mem);
+ }
+}
+
+
+// Variable length depending on whether offset fits into immediate field
+// MemOperand current only supports d-form
+void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
+ Register scratch) {
+ Register base = mem.ra();
+ int offset = mem.offset();
+
+ if (!is_int16(offset)) {
+ LoadIntLiteral(scratch, offset);
+ sthx(src, MemOperand(base, scratch));
+ } else {
+ sth(src, mem);
+ }
+}
+
+
+// Variable length depending on whether offset fits into immediate field
+// MemOperand currently only supports d-form
+void MacroAssembler::LoadByte(Register dst, const MemOperand& mem,
+ Register scratch) {
+ Register base = mem.ra();
+ int offset = mem.offset();
+
+ if (!is_int16(offset)) {
+ LoadIntLiteral(scratch, offset);
+ lbzx(dst, MemOperand(base, scratch));
+ } else {
+ lbz(dst, mem);
+ }
+}
+
+
+// Variable length depending on whether offset fits into immediate field
+// MemOperand current only supports d-form
+void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
+ Register scratch) {
+ Register base = mem.ra();
+ int offset = mem.offset();
+
+ if (!is_int16(offset)) {
+ LoadIntLiteral(scratch, offset);
+ stbx(src, MemOperand(base, scratch));
+ } else {
+ stb(src, mem);
+ }
+}
+
+
+void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem,
+ Representation r, Register scratch) {
+ DCHECK(!r.IsDouble());
+ if (r.IsInteger8()) {
+ LoadByte(dst, mem, scratch);
+ extsb(dst, dst);
+ } else if (r.IsUInteger8()) {
+ LoadByte(dst, mem, scratch);
+ } else if (r.IsInteger16()) {
+ LoadHalfWord(dst, mem, scratch);
+ extsh(dst, dst);
+ } else if (r.IsUInteger16()) {
+ LoadHalfWord(dst, mem, scratch);
+#if V8_TARGET_ARCH_PPC64
+ } else if (r.IsInteger32()) {
+ LoadWord(dst, mem, scratch);
+#endif
+ } else {
+ LoadP(dst, mem, scratch);
+ }
+}
+
+
+void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem,
+ Representation r, Register scratch) {
+ DCHECK(!r.IsDouble());
+ if (r.IsInteger8() || r.IsUInteger8()) {
+ StoreByte(src, mem, scratch);
+ } else if (r.IsInteger16() || r.IsUInteger16()) {
+ StoreHalfWord(src, mem, scratch);
+#if V8_TARGET_ARCH_PPC64
+ } else if (r.IsInteger32()) {
+ StoreWord(src, mem, scratch);
+#endif
+ } else {
+ if (r.IsHeapObject()) {
+ AssertNotSmi(src);
+ } else if (r.IsSmi()) {
+ AssertSmi(src);
+ }
+ StoreP(src, mem, scratch);
+ }
+}
+
+
+void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* no_memento_found) {
+ ExternalReference new_space_start =
+ ExternalReference::new_space_start(isolate());
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+ addi(scratch_reg, receiver_reg,
+ Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
+ Cmpi(scratch_reg, Operand(new_space_start), r0);
+ blt(no_memento_found);
+ mov(ip, Operand(new_space_allocation_top));
+ LoadP(ip, MemOperand(ip));
+ cmp(scratch_reg, ip);
+ bgt(no_memento_found);
+ LoadP(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
+ Cmpi(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()),
+ r0);
+}
+
+
+Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
+ Register reg4, Register reg5,
+ Register reg6) {
+ RegList regs = 0;
+ if (reg1.is_valid()) regs |= reg1.bit();
+ if (reg2.is_valid()) regs |= reg2.bit();
+ if (reg3.is_valid()) regs |= reg3.bit();
+ if (reg4.is_valid()) regs |= reg4.bit();
+ if (reg5.is_valid()) regs |= reg5.bit();
+ if (reg6.is_valid()) regs |= reg6.bit();
+
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
+ Register candidate = Register::FromAllocationIndex(i);
+ if (regs & candidate.bit()) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+ return no_reg;
+}
+
+
+void MacroAssembler::JumpIfDictionaryInPrototypeChain(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* found) {
+ DCHECK(!scratch1.is(scratch0));
+ Factory* factory = isolate()->factory();
+ Register current = scratch0;
+ Label loop_again;
+
+ // scratch contained elements pointer.
+ mr(current, object);
+
+ // Loop based on the map going up the prototype chain.
+ bind(&loop_again);
+ LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
+ lbz(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
+ DecodeField<Map::ElementsKindBits>(scratch1);
+ cmpi(scratch1, Operand(DICTIONARY_ELEMENTS));
+ beq(found);
+ LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
+ Cmpi(current, Operand(factory->null_value()), r0);
+ bne(&loop_again);
+}
+
+
+#ifdef DEBUG
+bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
+ Register reg5, Register reg6, Register reg7, Register reg8) {
+ int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
+ reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
+ reg7.is_valid() + reg8.is_valid();
+
+ RegList regs = 0;
+ if (reg1.is_valid()) regs |= reg1.bit();
+ if (reg2.is_valid()) regs |= reg2.bit();
+ if (reg3.is_valid()) regs |= reg3.bit();
+ if (reg4.is_valid()) regs |= reg4.bit();
+ if (reg5.is_valid()) regs |= reg5.bit();
+ if (reg6.is_valid()) regs |= reg6.bit();
+ if (reg7.is_valid()) regs |= reg7.bit();
+ if (reg8.is_valid()) regs |= reg8.bit();
+ int n_of_non_aliasing_regs = NumRegs(regs);
+
+ return n_of_valid_regs != n_of_non_aliasing_regs;
+}
+#endif
+
+
+CodePatcher::CodePatcher(byte* address, int instructions,
+ FlushICache flush_cache)
+ : address_(address),
+ size_(instructions * Assembler::kInstrSize),
+ masm_(NULL, address, size_ + Assembler::kGap),
+ flush_cache_(flush_cache) {
+ // Create a new macro assembler pointing to the address of the code to patch.
+ // The size is adjusted with kGap on order for the assembler to generate size
+ // bytes of instructions without failing with buffer size constraints.
+ DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+CodePatcher::~CodePatcher() {
+ // Indicate that code has changed.
+ if (flush_cache_ == FLUSH) {
+ CpuFeatures::FlushICache(address_, size_);
+ }
+
+ // Check that the code was patched as expected.
+ DCHECK(masm_.pc_ == address_ + size_);
+ DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+void CodePatcher::Emit(Instr instr) { masm()->emit(instr); }
+
+
+void CodePatcher::EmitCondition(Condition cond) {
+ Instr instr = Assembler::instr_at(masm_.pc_);
+ switch (cond) {
+ case eq:
+ instr = (instr & ~kCondMask) | BT;
+ break;
+ case ne:
+ instr = (instr & ~kCondMask) | BF;
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ masm_.emit(instr);
+}
+
+
+void MacroAssembler::TruncatingDiv(Register result, Register dividend,
+ int32_t divisor) {
+ DCHECK(!dividend.is(result));
+ DCHECK(!dividend.is(r0));
+ DCHECK(!result.is(r0));
+ base::MagicNumbersForDivision<uint32_t> mag =
+ base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
+ mov(r0, Operand(mag.multiplier));
+ mulhw(result, dividend, r0);
+ bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
+ if (divisor > 0 && neg) {
+ add(result, result, dividend);
+ }
+ if (divisor < 0 && !neg && mag.multiplier > 0) {
+ sub(result, result, dividend);
+ }
+ if (mag.shift > 0) srawi(result, result, mag.shift);
+ ExtractBit(r0, dividend, 31);
+ add(result, result, r0);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
new file mode 100644
index 0000000000..8f1aeab09f
--- /dev/null
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -0,0 +1,1554 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PPC_MACRO_ASSEMBLER_PPC_H_
+#define V8_PPC_MACRO_ASSEMBLER_PPC_H_
+
+#include "src/assembler.h"
+#include "src/bailout-reason.h"
+#include "src/frames.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Static helper functions
+
+// Generate a MemOperand for loading a field from an object.
+inline MemOperand FieldMemOperand(Register object, int offset) {
+ return MemOperand(object, offset - kHeapObjectTag);
+}
+
+
+// Flags used for AllocateHeapNumber
+enum TaggingMode {
+ // Tag the result.
+ TAG_RESULT,
+ // Don't tag
+ DONT_TAG_RESULT
+};
+
+
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum PointersToHereCheck {
+ kPointersToHereMaybeInteresting,
+ kPointersToHereAreAlwaysInteresting
+};
+enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
+
+
+Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
+ Register reg3 = no_reg,
+ Register reg4 = no_reg,
+ Register reg5 = no_reg,
+ Register reg6 = no_reg);
+
+
+#ifdef DEBUG
+bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
+ Register reg4 = no_reg, Register reg5 = no_reg,
+ Register reg6 = no_reg, Register reg7 = no_reg,
+ Register reg8 = no_reg);
+#endif
+
+// These exist to provide portability between 32 and 64bit
+#if V8_TARGET_ARCH_PPC64
+#define LoadPU ldu
+#define LoadPX ldx
+#define LoadPUX ldux
+#define StorePU stdu
+#define StorePX stdx
+#define StorePUX stdux
+#define ShiftLeftImm sldi
+#define ShiftRightImm srdi
+#define ClearLeftImm clrldi
+#define ClearRightImm clrrdi
+#define ShiftRightArithImm sradi
+#define ShiftLeft_ sld
+#define ShiftRight_ srd
+#define ShiftRightArith srad
+#define Mul mulld
+#define Div divd
+#else
+#define LoadPU lwzu
+#define LoadPX lwzx
+#define LoadPUX lwzux
+#define StorePU stwu
+#define StorePX stwx
+#define StorePUX stwux
+#define ShiftLeftImm slwi
+#define ShiftRightImm srwi
+#define ClearLeftImm clrlwi
+#define ClearRightImm clrrwi
+#define ShiftRightArithImm srawi
+#define ShiftLeft_ slw
+#define ShiftRight_ srw
+#define ShiftRightArith sraw
+#define Mul mullw
+#define Div divw
+#endif
+
+
+// MacroAssembler implements a collection of frequently used macros.
+class MacroAssembler : public Assembler {
+ public:
+ // The isolate parameter can be NULL if the macro assembler should
+ // not use isolate-dependent functionality. In this case, it's the
+ // responsibility of the caller to never invoke such function on the
+ // macro assembler.
+ MacroAssembler(Isolate* isolate, void* buffer, int size);
+
+
+ // Returns the size of a call in instructions. Note, the value returned is
+ // only valid as long as no entries are added to the constant pool between
+ // checking the call size and emitting the actual call.
+ static int CallSize(Register target);
+ int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
+ static int CallSizeNotPredictableCodeSize(Address target,
+ RelocInfo::Mode rmode,
+ Condition cond = al);
+
+ // Jump, Call, and Ret pseudo instructions implementing inter-working.
+ void Jump(Register target);
+ void JumpToJSEntry(Register target);
+ void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
+ CRegister cr = cr7);
+ void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+ void Call(Register target);
+ void CallJSEntry(Register target);
+ void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
+ int CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ TypeFeedbackId ast_id = TypeFeedbackId::None(),
+ Condition cond = al);
+ void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ TypeFeedbackId ast_id = TypeFeedbackId::None(),
+ Condition cond = al);
+ void Ret(Condition cond = al);
+
+ // Emit code to discard a non-negative number of pointer-sized elements
+ // from the stack, clobbering only the sp register.
+ void Drop(int count, Condition cond = al);
+
+ void Ret(int drop, Condition cond = al);
+
+ void Call(Label* target);
+
+ // Emit call to the code we are currently generating.
+ void CallSelf() {
+ Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
+ Call(self, RelocInfo::CODE_TARGET);
+ }
+
+ // Register move. May do nothing if the registers are identical.
+ void Move(Register dst, Handle<Object> value);
+ void Move(Register dst, Register src, Condition cond = al);
+ void Move(DoubleRegister dst, DoubleRegister src);
+
+ void MultiPush(RegList regs);
+ void MultiPop(RegList regs);
+
+ // Load an object from the root table.
+ void LoadRoot(Register destination, Heap::RootListIndex index,
+ Condition cond = al);
+ // Store an object to the root table.
+ void StoreRoot(Register source, Heap::RootListIndex index,
+ Condition cond = al);
+
+ // ---------------------------------------------------------------------------
+ // GC Support
+
+ void IncrementalMarkingRecordWriteHelper(Register object, Register value,
+ Register address);
+
+ enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
+
+ // Record in the remembered set the fact that we have a pointer to new space
+ // at the address pointed to by the addr register. Only works if addr is not
+ // in new space.
+ void RememberedSetHelper(Register object, // Used for debug code.
+ Register addr, Register scratch,
+ SaveFPRegsMode save_fp,
+ RememberedSetFinalAction and_then);
+
+ void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
+ Label* condition_met);
+
+ void CheckMapDeprecated(Handle<Map> map, Register scratch,
+ Label* if_deprecated);
+
+ // Check if object is in new space. Jumps if the object is not in new space.
+ // The register scratch can be object itself, but scratch will be clobbered.
+ void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
+ InNewSpace(object, scratch, ne, branch);
+ }
+
+ // Check if object is in new space. Jumps if the object is in new space.
+ // The register scratch can be object itself, but it will be clobbered.
+ void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
+ InNewSpace(object, scratch, eq, branch);
+ }
+
+ // Check if an object has a given incremental marking color.
+ void HasColor(Register object, Register scratch0, Register scratch1,
+ Label* has_color, int first_bit, int second_bit);
+
+ void JumpIfBlack(Register object, Register scratch0, Register scratch1,
+ Label* on_black);
+
+ // Checks the color of an object. If the object is already grey or black
+ // then we just fall through, since it is already live. If it is white and
+ // we can determine that it doesn't need to be scanned, then we just mark it
+ // black and fall through. For the rest we jump to the label so the
+ // incremental marker can fix its assumptions.
+ void EnsureNotWhite(Register object, Register scratch1, Register scratch2,
+ Register scratch3, Label* object_is_white_and_not_data);
+
+ // Detects conservatively whether an object is data-only, i.e. it does need to
+ // be scanned by the garbage collector.
+ void JumpIfDataObject(Register value, Register scratch,
+ Label* not_data_object);
+
+ // Notify the garbage collector that we wrote a pointer into an object.
+ // |object| is the object being stored into, |value| is the object being
+ // stored. value and scratch registers are clobbered by the operation.
+ // The offset is the offset from the start of the object, not the offset from
+ // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
+ void RecordWriteField(
+ Register object, int offset, Register value, Register scratch,
+ LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
+
+ // As above, but the offset has the tag presubtracted. For use with
+ // MemOperand(reg, off).
+ inline void RecordWriteContextSlot(
+ Register context, int offset, Register value, Register scratch,
+ LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting) {
+ RecordWriteField(context, offset + kHeapObjectTag, value, scratch,
+ lr_status, save_fp, remembered_set_action, smi_check,
+ pointers_to_here_check_for_value);
+ }
+
+ void RecordWriteForMap(Register object, Register map, Register dst,
+ LinkRegisterStatus lr_status, SaveFPRegsMode save_fp);
+
+ // For a given |object| notify the garbage collector that the slot |address|
+ // has been written. |value| is the object being stored. The value and
+ // address registers are clobbered by the operation.
+ void RecordWrite(
+ Register object, Register address, Register value,
+ LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
+
+ void Push(Register src) { push(src); }
+
+ // Push a handle.
+ void Push(Handle<Object> handle);
+ void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
+
+ // Push two registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2) {
+ StorePU(src2, MemOperand(sp, -2 * kPointerSize));
+ StoreP(src1, MemOperand(sp, kPointerSize));
+ }
+
+ // Push three registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3) {
+ StorePU(src3, MemOperand(sp, -3 * kPointerSize));
+ StoreP(src2, MemOperand(sp, kPointerSize));
+ StoreP(src1, MemOperand(sp, 2 * kPointerSize));
+ }
+
+ // Push four registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3, Register src4) {
+ StorePU(src4, MemOperand(sp, -4 * kPointerSize));
+ StoreP(src3, MemOperand(sp, kPointerSize));
+ StoreP(src2, MemOperand(sp, 2 * kPointerSize));
+ StoreP(src1, MemOperand(sp, 3 * kPointerSize));
+ }
+
+ // Push five registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3, Register src4,
+ Register src5) {
+ StorePU(src5, MemOperand(sp, -5 * kPointerSize));
+ StoreP(src4, MemOperand(sp, kPointerSize));
+ StoreP(src3, MemOperand(sp, 2 * kPointerSize));
+ StoreP(src2, MemOperand(sp, 3 * kPointerSize));
+ StoreP(src1, MemOperand(sp, 4 * kPointerSize));
+ }
+
+ void Pop(Register dst) { pop(dst); }
+
+ // Pop two registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2) {
+ LoadP(src2, MemOperand(sp, 0));
+ LoadP(src1, MemOperand(sp, kPointerSize));
+ addi(sp, sp, Operand(2 * kPointerSize));
+ }
+
+ // Pop three registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2, Register src3) {
+ LoadP(src3, MemOperand(sp, 0));
+ LoadP(src2, MemOperand(sp, kPointerSize));
+ LoadP(src1, MemOperand(sp, 2 * kPointerSize));
+ addi(sp, sp, Operand(3 * kPointerSize));
+ }
+
+ // Pop four registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2, Register src3, Register src4) {
+ LoadP(src4, MemOperand(sp, 0));
+ LoadP(src3, MemOperand(sp, kPointerSize));
+ LoadP(src2, MemOperand(sp, 2 * kPointerSize));
+ LoadP(src1, MemOperand(sp, 3 * kPointerSize));
+ addi(sp, sp, Operand(4 * kPointerSize));
+ }
+
+ // Pop five registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2, Register src3, Register src4,
+ Register src5) {
+ LoadP(src5, MemOperand(sp, 0));
+ LoadP(src4, MemOperand(sp, kPointerSize));
+ LoadP(src3, MemOperand(sp, 2 * kPointerSize));
+ LoadP(src2, MemOperand(sp, 3 * kPointerSize));
+ LoadP(src1, MemOperand(sp, 4 * kPointerSize));
+ addi(sp, sp, Operand(5 * kPointerSize));
+ }
+
+ // Push a fixed frame, consisting of lr, fp, context and
+ // JS function / marker id if marker_reg is a valid register.
+ void PushFixedFrame(Register marker_reg = no_reg);
+ void PopFixedFrame(Register marker_reg = no_reg);
+
+ // Push and pop the registers that can hold pointers, as defined by the
+ // RegList constant kSafepointSavedRegisters.
+ void PushSafepointRegisters();
+ void PopSafepointRegisters();
+ // Store value in register src in the safepoint stack slot for
+ // register dst.
+ void StoreToSafepointRegisterSlot(Register src, Register dst);
+ // Load the value of the src register from its safepoint stack slot
+ // into register dst.
+ void LoadFromSafepointRegisterSlot(Register dst, Register src);
+
+ // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
+ // from C.
+ // Does not handle errors.
+ void FlushICache(Register address, size_t size, Register scratch);
+
+ // If the value is a NaN, canonicalize the value else, do nothing.
+ void CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
+ void CanonicalizeNaN(const DoubleRegister value) {
+ CanonicalizeNaN(value, value);
+ }
+
+ // Converts the integer (untagged smi) in |src| to a double, storing
+ // the result to |double_dst|
+ void ConvertIntToDouble(Register src, DoubleRegister double_dst);
+
+ // Converts the unsigned integer (untagged smi) in |src| to
+ // a double, storing the result to |double_dst|
+ void ConvertUnsignedIntToDouble(Register src, DoubleRegister double_dst);
+
+ // Converts the integer (untagged smi) in |src| to
+ // a float, storing the result in |dst|
+ // Warning: The value in |int_scrach| will be changed in the process!
+ void ConvertIntToFloat(const DoubleRegister dst, const Register src,
+ const Register int_scratch);
+
+ // Converts the double_input to an integer. Note that, upon return,
+ // the contents of double_dst will also hold the fixed point representation.
+ void ConvertDoubleToInt64(const DoubleRegister double_input,
+#if !V8_TARGET_ARCH_PPC64
+ const Register dst_hi,
+#endif
+ const Register dst, const DoubleRegister double_dst,
+ FPRoundingMode rounding_mode = kRoundToZero);
+
+ // Generates function and stub prologue code.
+ void StubPrologue(int prologue_offset = 0);
+ void Prologue(bool code_pre_aging, int prologue_offset = 0);
+
+ // Enter exit frame.
+ // stack_space - extra stack space, used for alignment before call to C.
+ void EnterExitFrame(bool save_doubles, int stack_space = 0);
+
+ // Leave the current exit frame. Expects the return value in r0.
+ // Expect the number of values, pushed prior to the exit frame, to
+ // remove in a register (or no_reg, if there is nothing to remove).
+ void LeaveExitFrame(bool save_doubles, Register argument_count,
+ bool restore_context);
+
+ // Get the actual activation frame alignment for target environment.
+ static int ActivationFrameAlignment();
+
+ void LoadContext(Register dst, int context_chain_length);
+
+ // Conditionally load the cached Array transitioned map of type
+ // transitioned_kind from the native context if the map in register
+ // map_in_out is the cached Array map in the native context of
+ // expected_kind.
+ void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch,
+ Label* no_map_match);
+
+ void LoadGlobalFunction(int index, Register function);
+
+ // Load the initial map from the global function. The registers
+ // function and map can be the same, function is then overwritten.
+ void LoadGlobalFunctionInitialMap(Register function, Register map,
+ Register scratch);
+
+ void InitializeRootRegister() {
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ mov(kRootRegister, Operand(roots_array_start));
+ }
+
+ // ----------------------------------------------------------------
+ // new PPC macro-assembler interfaces that are slightly higher level
+ // than assembler-ppc and may generate variable length sequences
+
+ // load a literal signed int value <value> to GPR <dst>
+ void LoadIntLiteral(Register dst, int value);
+
+ // load an SMI value <value> to GPR <dst>
+ void LoadSmiLiteral(Register dst, Smi* smi);
+
+ // load a literal double value <value> to FPR <result>
+ void LoadDoubleLiteral(DoubleRegister result, double value, Register scratch);
+
+ void LoadWord(Register dst, const MemOperand& mem, Register scratch);
+
+ void LoadWordArith(Register dst, const MemOperand& mem,
+ Register scratch = no_reg);
+
+ void StoreWord(Register src, const MemOperand& mem, Register scratch);
+
+ void LoadHalfWord(Register dst, const MemOperand& mem, Register scratch);
+
+ void StoreHalfWord(Register src, const MemOperand& mem, Register scratch);
+
+ void LoadByte(Register dst, const MemOperand& mem, Register scratch);
+
+ void StoreByte(Register src, const MemOperand& mem, Register scratch);
+
+ void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
+ Register scratch = no_reg);
+
+ void StoreRepresentation(Register src, const MemOperand& mem,
+ Representation r, Register scratch = no_reg);
+
+ // Move values between integer and floating point registers.
+ void MovIntToDouble(DoubleRegister dst, Register src, Register scratch);
+ void MovUnsignedIntToDouble(DoubleRegister dst, Register src,
+ Register scratch);
+ void MovInt64ToDouble(DoubleRegister dst,
+#if !V8_TARGET_ARCH_PPC64
+ Register src_hi,
+#endif
+ Register src);
+#if V8_TARGET_ARCH_PPC64
+ void MovInt64ComponentsToDouble(DoubleRegister dst, Register src_hi,
+ Register src_lo, Register scratch);
+#endif
+ void MovDoubleLowToInt(Register dst, DoubleRegister src);
+ void MovDoubleHighToInt(Register dst, DoubleRegister src);
+ void MovDoubleToInt64(
+#if !V8_TARGET_ARCH_PPC64
+ Register dst_hi,
+#endif
+ Register dst, DoubleRegister src);
+
+ void Add(Register dst, Register src, intptr_t value, Register scratch);
+ void Cmpi(Register src1, const Operand& src2, Register scratch,
+ CRegister cr = cr7);
+ void Cmpli(Register src1, const Operand& src2, Register scratch,
+ CRegister cr = cr7);
+ void Cmpwi(Register src1, const Operand& src2, Register scratch,
+ CRegister cr = cr7);
+ void Cmplwi(Register src1, const Operand& src2, Register scratch,
+ CRegister cr = cr7);
+ void And(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
+ void Or(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
+ void Xor(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
+
+ void AddSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
+ void SubSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
+ void CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
+ CRegister cr = cr7);
+ void CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
+ CRegister cr = cr7);
+ void AndSmiLiteral(Register dst, Register src, Smi* smi, Register scratch,
+ RCBit rc = LeaveRC);
+
+ // Set new rounding mode RN to FPSCR
+ void SetRoundingMode(FPRoundingMode RN);
+
+ // reset rounding mode to default (kRoundToNearest)
+ void ResetRoundingMode();
+
+ // These exist to provide portability between 32 and 64bit
+ void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
+ void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
+
+ // ---------------------------------------------------------------------------
+ // JavaScript invokes
+
+ // Invoke the JavaScript function code by either calling or jumping.
+ void InvokeCode(Register code, const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ // Invoke the JavaScript function in the given register. Changes the
+ // current context to the context in the function before invoking.
+ void InvokeFunction(Register function, const ParameterCount& actual,
+ InvokeFlag flag, const CallWrapper& call_wrapper);
+
+ void InvokeFunction(Register function, const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void IsObjectJSObjectType(Register heap_object, Register map,
+ Register scratch, Label* fail);
+
+ void IsInstanceJSObjectType(Register map, Register scratch, Label* fail);
+
+ void IsObjectJSStringType(Register object, Register scratch, Label* fail);
+
+ void IsObjectNameType(Register object, Register scratch, Label* fail);
+
+ // ---------------------------------------------------------------------------
+ // Debugger Support
+
+ void DebugBreak();
+
+ // ---------------------------------------------------------------------------
+ // Exception handling
+
+ // Push a new try handler and link into try handler chain.
+ void PushTryHandler(StackHandler::Kind kind, int handler_index);
+
+ // Unlink the stack handler on top of the stack from the try handler chain.
+ // Must preserve the result register.
+ void PopTryHandler();
+
+ // Passes thrown value to the handler of top of the try handler chain.
+ void Throw(Register value);
+
+ // Propagates an uncatchable exception to the top of the current JS stack's
+ // handler chain.
+ void ThrowUncatchable(Register value);
+
+ // ---------------------------------------------------------------------------
+ // Inline caching support
+
+ // Generate code for checking access rights - used for security checks
+ // on access to global objects across environments. The holder register
+ // is left untouched, whereas both scratch registers are clobbered.
+ void CheckAccessGlobalProxy(Register holder_reg, Register scratch,
+ Label* miss);
+
+ void GetNumberHash(Register t0, Register scratch);
+
+ void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
+ Register result, Register t0, Register t1,
+ Register t2);
+
+
+ inline void MarkCode(NopMarkerTypes type) { nop(type); }
+
+ // Check if the given instruction is a 'type' marker.
+ // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
+ // These instructions are generated to mark special location in the code,
+ // like some special IC code.
+ static inline bool IsMarkedCode(Instr instr, int type) {
+ DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
+ return IsNop(instr, type);
+ }
+
+
+ static inline int GetCodeMarker(Instr instr) {
+ int dst_reg_offset = 12;
+ int dst_mask = 0xf << dst_reg_offset;
+ int src_mask = 0xf;
+ int dst_reg = (instr & dst_mask) >> dst_reg_offset;
+ int src_reg = instr & src_mask;
+ uint32_t non_register_mask = ~(dst_mask | src_mask);
+ uint32_t mov_mask = al | 13 << 21;
+
+ // Return <n> if we have a mov rn rn, else return -1.
+ int type = ((instr & non_register_mask) == mov_mask) &&
+ (dst_reg == src_reg) && (FIRST_IC_MARKER <= dst_reg) &&
+ (dst_reg < LAST_CODE_MARKER)
+ ? src_reg
+ : -1;
+ DCHECK((type == -1) ||
+ ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
+ return type;
+ }
+
+
+ // ---------------------------------------------------------------------------
+ // Allocation support
+
+ // Allocate an object in new space or old pointer space. The object_size is
+ // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
+ // is passed. If the space is exhausted control continues at the gc_required
+ // label. The allocated object is returned in result. If the flag
+ // tag_allocated_object is true the result is tagged as as a heap object.
+ // All registers are clobbered also when control continues at the gc_required
+ // label.
+ void Allocate(int object_size, Register result, Register scratch1,
+ Register scratch2, Label* gc_required, AllocationFlags flags);
+
+ void Allocate(Register object_size, Register result, Register scratch1,
+ Register scratch2, Label* gc_required, AllocationFlags flags);
+
+ // Undo allocation in new space. The object passed and objects allocated after
+ // it will no longer be allocated. The caller must make sure that no pointers
+ // are left to the object(s) no longer allocated as they would be invalid when
+ // allocation is undone.
+ void UndoAllocationInNewSpace(Register object, Register scratch);
+
+
+ void AllocateTwoByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3, Label* gc_required);
+ void AllocateOneByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3, Label* gc_required);
+ void AllocateTwoByteConsString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
+ void AllocateOneByteConsString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
+ void AllocateTwoByteSlicedString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
+ void AllocateOneByteSlicedString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
+
+ // Allocates a heap number or jumps to the gc_required label if the young
+ // space is full and a scavenge is needed. All registers are clobbered also
+ // when control continues at the gc_required label.
+ void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
+ Register heap_number_map, Label* gc_required,
+ TaggingMode tagging_mode = TAG_RESULT,
+ MutableMode mode = IMMUTABLE);
+ void AllocateHeapNumberWithValue(Register result, DoubleRegister value,
+ Register scratch1, Register scratch2,
+ Register heap_number_map,
+ Label* gc_required);
+
+ // Copies a fixed number of fields of heap objects from src to dst.
+ void CopyFields(Register dst, Register src, RegList temps, int field_count);
+
+ // Copies a number of bytes from src to dst. All registers are clobbered. On
+ // exit src and dst will point to the place just after where the last byte was
+ // read or written and length will be zero.
+ void CopyBytes(Register src, Register dst, Register length, Register scratch);
+
+ // Initialize fields with filler values. |count| fields starting at
+ // |start_offset| are overwritten with the value in |filler|. At the end the
+ // loop, |start_offset| points at the next uninitialized field. |count| is
+ // assumed to be non-zero.
+ void InitializeNFieldsWithFiller(Register start_offset, Register count,
+ Register filler);
+
+ // Initialize fields with filler values. Fields starting at |start_offset|
+ // not including end_offset are overwritten with the value in |filler|. At
+ // the end the loop, |start_offset| takes the value of |end_offset|.
+ void InitializeFieldsWithFiller(Register start_offset, Register end_offset,
+ Register filler);
+
+ // ---------------------------------------------------------------------------
+ // Support functions.
+
+ // Try to get function prototype of a function and puts the value in
+ // the result register. Checks that the function really is a
+ // function and jumps to the miss label if the fast checks fail. The
+ // function register will be untouched; the other registers may be
+ // clobbered.
+ void TryGetFunctionPrototype(Register function, Register result,
+ Register scratch, Label* miss,
+ bool miss_on_bound_function = false);
+
+ // Compare object type for heap object. heap_object contains a non-Smi
+ // whose object type should be compared with the given type. This both
+ // sets the flags and leaves the object type in the type_reg register.
+ // It leaves the map in the map register (unless the type_reg and map register
+ // are the same register). It leaves the heap object in the heap_object
+ // register unless the heap_object register is the same register as one of the
+ // other registers.
+ // Type_reg can be no_reg. In that case ip is used.
+ void CompareObjectType(Register heap_object, Register map, Register type_reg,
+ InstanceType type);
+
+ // Compare object type for heap object. Branch to false_label if type
+ // is lower than min_type or greater than max_type.
+ // Load map into the register map.
+ void CheckObjectTypeRange(Register heap_object, Register map,
+ InstanceType min_type, InstanceType max_type,
+ Label* false_label);
+
+ // Compare instance type in a map. map contains a valid map object whose
+ // object type should be compared with the given type. This both
+ // sets the flags and leaves the object type in the type_reg register.
+ void CompareInstanceType(Register map, Register type_reg, InstanceType type);
+
+
+ // Check if a map for a JSObject indicates that the object has fast elements.
+ // Jump to the specified label if it does not.
+ void CheckFastElements(Register map, Register scratch, Label* fail);
+
+ // Check if a map for a JSObject indicates that the object can have both smi
+ // and HeapObject elements. Jump to the specified label if it does not.
+ void CheckFastObjectElements(Register map, Register scratch, Label* fail);
+
+ // Check if a map for a JSObject indicates that the object has fast smi only
+ // elements. Jump to the specified label if it does not.
+ void CheckFastSmiElements(Register map, Register scratch, Label* fail);
+
+ // Check to see if maybe_number can be stored as a double in
+ // FastDoubleElements. If it can, store it at the index specified by key in
+ // the FastDoubleElements array elements. Otherwise jump to fail.
+ void StoreNumberToDoubleElements(Register value_reg, Register key_reg,
+ Register elements_reg, Register scratch1,
+ DoubleRegister double_scratch, Label* fail,
+ int elements_offset = 0);
+
+ // Compare an object's map with the specified map and its transitioned
+ // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
+ // set with result of map compare. If multiple map compares are required, the
+ // compare sequences branches to early_success.
+ void CompareMap(Register obj, Register scratch, Handle<Map> map,
+ Label* early_success);
+
+ // As above, but the map of the object is already loaded into the register
+ // which is preserved by the code generated.
+ void CompareMap(Register obj_map, Handle<Map> map, Label* early_success);
+
+ // Check if the map of an object is equal to a specified map and branch to
+ // label if not. Skip the smi check if not required (object is known to be a
+ // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
+ // against maps that are ElementsKind transition maps of the specified map.
+ void CheckMap(Register obj, Register scratch, Handle<Map> map, Label* fail,
+ SmiCheckType smi_check_type);
+
+
+ void CheckMap(Register obj, Register scratch, Heap::RootListIndex index,
+ Label* fail, SmiCheckType smi_check_type);
+
+
+ // Check if the map of an object is equal to a specified map and branch to a
+ // specified target if equal. Skip the smi check if not required (object is
+ // known to be a heap object)
+ void DispatchMap(Register obj, Register scratch, Handle<Map> map,
+ Handle<Code> success, SmiCheckType smi_check_type);
+
+
+ // Compare the object in a register to a value from the root list.
+ // Uses the ip register as scratch.
+ void CompareRoot(Register obj, Heap::RootListIndex index);
+
+
+ // Load and check the instance type of an object for being a string.
+ // Loads the type into the second argument register.
+ // Returns a condition that will be enabled if the object was a string.
+ Condition IsObjectStringType(Register obj, Register type) {
+ LoadP(type, FieldMemOperand(obj, HeapObject::kMapOffset));
+ lbz(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
+ andi(r0, type, Operand(kIsNotStringMask));
+ DCHECK_EQ(0, kStringTag);
+ return eq;
+ }
+
+
+ // Picks out an array index from the hash field.
+ // Register use:
+ // hash - holds the index's hash. Clobbered.
+ // index - holds the overwritten index on exit.
+ void IndexFromHash(Register hash, Register index);
+
+ // Get the number of least significant bits from a register
+ void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
+ void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
+
+ // Load the value of a smi object into a double register.
+ void SmiToDouble(DoubleRegister value, Register smi);
+
+ // Check if a double can be exactly represented as a signed 32-bit integer.
+ // CR_EQ in cr7 is set if true.
+ void TestDoubleIsInt32(DoubleRegister double_input, Register scratch1,
+ Register scratch2, DoubleRegister double_scratch);
+
+ // Try to convert a double to a signed 32-bit integer.
+ // CR_EQ in cr7 is set and result assigned if the conversion is exact.
+ void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
+ Register scratch, DoubleRegister double_scratch);
+
+ // Floor a double and writes the value to the result register.
+ // Go to exact if the conversion is exact (to be able to test -0),
+ // fall through calling code if an overflow occurred, else go to done.
+ // In return, input_high is loaded with high bits of input.
+ void TryInt32Floor(Register result, DoubleRegister double_input,
+ Register input_high, Register scratch,
+ DoubleRegister double_scratch, Label* done, Label* exact);
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+ // succeeds, otherwise falls through if result is saturated. On return
+ // 'result' either holds answer, or is clobbered on fall through.
+ //
+ // Only public for the test code in test-code-stubs-arm.cc.
+ void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
+ Label* done);
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
+ // Exits with 'result' holding the answer.
+ void TruncateDoubleToI(Register result, DoubleRegister double_input);
+
+ // Performs a truncating conversion of a heap number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
+ // must be different registers. Exits with 'result' holding the answer.
+ void TruncateHeapNumberToI(Register result, Register object);
+
+ // Converts the smi or heap number in object to an int32 using the rules
+ // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+ // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
+ // different registers.
+ void TruncateNumberToI(Register object, Register result,
+ Register heap_number_map, Register scratch1,
+ Label* not_int32);
+
+ // Overflow handling functions.
+ // Usage: call the appropriate arithmetic function and then call one of the
+ // flow control functions with the corresponding label.
+
+ // Compute dst = left + right, setting condition codes. dst may be same as
+ // either left or right (or a unique register). left and right must not be
+ // the same register.
+ void AddAndCheckForOverflow(Register dst, Register left, Register right,
+ Register overflow_dst, Register scratch = r0);
+ void AddAndCheckForOverflow(Register dst, Register left, intptr_t right,
+ Register overflow_dst, Register scratch = r0);
+
+ // Compute dst = left - right, setting condition codes. dst may be same as
+ // either left or right (or a unique register). left and right must not be
+ // the same register.
+ void SubAndCheckForOverflow(Register dst, Register left, Register right,
+ Register overflow_dst, Register scratch = r0);
+
+ void BranchOnOverflow(Label* label) { blt(label, cr0); }
+
+ void BranchOnNoOverflow(Label* label) { bge(label, cr0); }
+
+ void RetOnOverflow(void) {
+ Label label;
+
+ blt(&label, cr0);
+ Ret();
+ bind(&label);
+ }
+
+ void RetOnNoOverflow(void) {
+ Label label;
+
+ bge(&label, cr0);
+ Ret();
+ bind(&label);
+ }
+
+ // Pushes <count> double values to <location>, starting from d<first>.
+ void SaveFPRegs(Register location, int first, int count);
+
+ // Pops <count> double values from <location>, starting from d<first>.
+ void RestoreFPRegs(Register location, int first, int count);
+
+ // ---------------------------------------------------------------------------
+ // Runtime calls
+
+ // Call a code stub.
+ void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None(),
+ Condition cond = al);
+
+ // Call a code stub.
+ void TailCallStub(CodeStub* stub, Condition cond = al);
+
+ // Call a runtime routine.
+ void CallRuntime(const Runtime::Function* f, int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, kSaveFPRegs);
+ }
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId id, int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ }
+
+ // Convenience function: call an external reference.
+ void CallExternalReference(const ExternalReference& ext, int num_arguments);
+
+ // Tail call of a runtime routine (jump).
+ // Like JumpToExternalReference, but also takes care of passing the number
+ // of parameters.
+ void TailCallExternalReference(const ExternalReference& ext,
+ int num_arguments, int result_size);
+
+ // Convenience function: tail call a runtime routine (jump).
+ void TailCallRuntime(Runtime::FunctionId fid, int num_arguments,
+ int result_size);
+
+ int CalculateStackPassedWords(int num_reg_arguments,
+ int num_double_arguments);
+
+ // Before calling a C-function from generated code, align arguments on stack.
+ // After aligning the frame, non-register arguments must be stored in
+ // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
+ // are word sized. If double arguments are used, this function assumes that
+ // all double arguments are stored before core registers; otherwise the
+ // correct alignment of the double values is not guaranteed.
+ // Some compilers/platforms require the stack to be aligned when calling
+ // C++ code.
+ // Needs a scratch register to do some arithmetic. This register will be
+ // trashed.
+ void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
+ Register scratch);
+ void PrepareCallCFunction(int num_reg_arguments, Register scratch);
+
+ // There are two ways of passing double arguments on ARM, depending on
+ // whether soft or hard floating point ABI is used. These functions
+ // abstract parameter passing for the three different ways we call
+ // C functions from generated code.
+ void MovToFloatParameter(DoubleRegister src);
+ void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
+ void MovToFloatResult(DoubleRegister src);
+
+ // Calls a C function and cleans up the space for arguments allocated
+ // by PrepareCallCFunction. The called function is not allowed to trigger a
+ // garbage collection, since that might move the code and invalidate the
+ // return address (unless this is somehow accounted for by the called
+ // function).
+ void CallCFunction(ExternalReference function, int num_arguments);
+ void CallCFunction(Register function, int num_arguments);
+ void CallCFunction(ExternalReference function, int num_reg_arguments,
+ int num_double_arguments);
+ void CallCFunction(Register function, int num_reg_arguments,
+ int num_double_arguments);
+
+ void MovFromFloatParameter(DoubleRegister dst);
+ void MovFromFloatResult(DoubleRegister dst);
+
+ // Calls an API function. Allocates HandleScope, extracts returned value
+ // from handle and propagates exceptions. Restores context. stack_space
+ // - space to be unwound on exit (includes the call JS arguments space and
+ // the additional space allocated for the fast call).
+ void CallApiFunctionAndReturn(Register function_address,
+ ExternalReference thunk_ref, int stack_space,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand);
+
+ // Jump to a runtime routine.
+ void JumpToExternalReference(const ExternalReference& builtin);
+
+ // Invoke specified builtin JavaScript function. Adds an entry to
+ // the unresolved list if the name does not resolve.
+ void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag,
+ const CallWrapper& call_wrapper = NullCallWrapper());
+
+ // Store the code object for the given builtin in the target register and
+ // setup the function in r1.
+ void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+
+ // Store the function for the given builtin in the target register.
+ void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+
+ Handle<Object> CodeObject() {
+ DCHECK(!code_object_.is_null());
+ return code_object_;
+ }
+
+
+ // Emit code for a truncating division by a constant. The dividend register is
+ // unchanged and ip gets clobbered. Dividend and result must be different.
+ void TruncatingDiv(Register result, Register dividend, int32_t divisor);
+
+ // ---------------------------------------------------------------------------
+ // StatsCounter support
+
+ void SetCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+ void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+ void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+
+
+ // ---------------------------------------------------------------------------
+ // Debugging
+
+ // Calls Abort(msg) if the condition cond is not satisfied.
+ // Use --debug_code to enable.
+ void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7);
+ void AssertFastElements(Register elements);
+
+ // Like Assert(), but always enabled.
+ void Check(Condition cond, BailoutReason reason, CRegister cr = cr7);
+
+ // Print a message to stdout and abort execution.
+ void Abort(BailoutReason reason);
+
+ // Verify restrictions about code generated in stubs.
+ void set_generating_stub(bool value) { generating_stub_ = value; }
+ bool generating_stub() { return generating_stub_; }
+ void set_has_frame(bool value) { has_frame_ = value; }
+ bool has_frame() { return has_frame_; }
+ inline bool AllowThisStubCall(CodeStub* stub);
+
+ // ---------------------------------------------------------------------------
+ // Number utilities
+
+ // Check whether the value of reg is a power of two and not zero. If not
+ // control continues at the label not_power_of_two. If reg is a power of two
+ // the register scratch contains the value of (reg - 1) when control falls
+ // through.
+ void JumpIfNotPowerOfTwoOrZero(Register reg, Register scratch,
+ Label* not_power_of_two_or_zero);
+ // Check whether the value of reg is a power of two and not zero.
+ // Control falls through if it is, with scratch containing the mask
+ // value (reg - 1).
+ // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
+ // zero or negative, or jumps to the 'not_power_of_two' label if the value is
+ // strictly positive but not a power of two.
+ void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, Register scratch,
+ Label* zero_and_neg,
+ Label* not_power_of_two);
+
+ // ---------------------------------------------------------------------------
+ // Bit testing/extraction
+ //
+ // Bit numbering is such that the least significant bit is bit 0
+ // (for consistency between 32/64-bit).
+
+ // Extract consecutive bits (defined by rangeStart - rangeEnd) from src
+ // and place them into the least significant bits of dst.
+ inline void ExtractBitRange(Register dst, Register src, int rangeStart,
+ int rangeEnd, RCBit rc = LeaveRC) {
+ DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
+ int rotate = (rangeEnd == 0) ? 0 : kBitsPerPointer - rangeEnd;
+ int width = rangeStart - rangeEnd + 1;
+#if V8_TARGET_ARCH_PPC64
+ rldicl(dst, src, rotate, kBitsPerPointer - width, rc);
+#else
+ rlwinm(dst, src, rotate, kBitsPerPointer - width, kBitsPerPointer - 1, rc);
+#endif
+ }
+
+ inline void ExtractBit(Register dst, Register src, uint32_t bitNumber,
+ RCBit rc = LeaveRC) {
+ ExtractBitRange(dst, src, bitNumber, bitNumber, rc);
+ }
+
+ // Extract consecutive bits (defined by mask) from src and place them
+ // into the least significant bits of dst.
+ inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
+ RCBit rc = LeaveRC) {
+ int start = kBitsPerPointer - 1;
+ int end;
+ uintptr_t bit = (1L << start);
+
+ while (bit && (mask & bit) == 0) {
+ start--;
+ bit >>= 1;
+ }
+ end = start;
+ bit >>= 1;
+
+ while (bit && (mask & bit)) {
+ end--;
+ bit >>= 1;
+ }
+
+ // 1-bits in mask must be contiguous
+ DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
+
+ ExtractBitRange(dst, src, start, end, rc);
+ }
+
+ // Test single bit in value.
+ inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
+ ExtractBitRange(scratch, value, bitNumber, bitNumber, SetRC);
+ }
+
+ // Test consecutive bit range in value. Range is defined by
+ // rangeStart - rangeEnd.
+ inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
+ Register scratch = r0) {
+ ExtractBitRange(scratch, value, rangeStart, rangeEnd, SetRC);
+ }
+
+ // Test consecutive bit range in value. Range is defined by mask.
+ inline void TestBitMask(Register value, uintptr_t mask,
+ Register scratch = r0) {
+ ExtractBitMask(scratch, value, mask, SetRC);
+ }
+
+
+ // ---------------------------------------------------------------------------
+ // Smi utilities
+
+ // Shift left by 1
+ void SmiTag(Register reg, RCBit rc = LeaveRC) { SmiTag(reg, reg, rc); }
+ void SmiTag(Register dst, Register src, RCBit rc = LeaveRC) {
+ ShiftLeftImm(dst, src, Operand(kSmiShift), rc);
+ }
+
+#if !V8_TARGET_ARCH_PPC64
+ // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
+ void SmiTagCheckOverflow(Register reg, Register overflow);
+ void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
+
+ inline void JumpIfNotSmiCandidate(Register value, Register scratch,
+ Label* not_smi_label) {
+ // High bits must be identical to fit into an Smi
+ addis(scratch, value, Operand(0x40000000u >> 16));
+ cmpi(scratch, Operand::Zero());
+ blt(not_smi_label);
+ }
+#endif
+ inline void TestUnsignedSmiCandidate(Register value, Register scratch) {
+ // The test is different for unsigned int values. Since we need
+ // the value to be in the range of a positive smi, we can't
+ // handle any of the high bits being set in the value.
+ TestBitRange(value, kBitsPerPointer - 1, kBitsPerPointer - 1 - kSmiShift,
+ scratch);
+ }
+ inline void JumpIfNotUnsignedSmiCandidate(Register value, Register scratch,
+ Label* not_smi_label) {
+ TestUnsignedSmiCandidate(value, scratch);
+ bne(not_smi_label, cr0);
+ }
+
+ void SmiUntag(Register reg, RCBit rc = LeaveRC) { SmiUntag(reg, reg, rc); }
+
+ void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC) {
+ ShiftRightArithImm(dst, src, kSmiShift, rc);
+ }
+
+ void SmiToPtrArrayOffset(Register dst, Register src) {
+#if V8_TARGET_ARCH_PPC64
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
+ ShiftRightArithImm(dst, src, kSmiShift - kPointerSizeLog2);
+#else
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2);
+ ShiftLeftImm(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
+#endif
+ }
+
+ void SmiToByteArrayOffset(Register dst, Register src) { SmiUntag(dst, src); }
+
+ void SmiToShortArrayOffset(Register dst, Register src) {
+#if V8_TARGET_ARCH_PPC64
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 1);
+ ShiftRightArithImm(dst, src, kSmiShift - 1);
+#else
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift == 1);
+ if (!dst.is(src)) {
+ mr(dst, src);
+ }
+#endif
+ }
+
+ void SmiToIntArrayOffset(Register dst, Register src) {
+#if V8_TARGET_ARCH_PPC64
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 2);
+ ShiftRightArithImm(dst, src, kSmiShift - 2);
+#else
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift < 2);
+ ShiftLeftImm(dst, src, Operand(2 - kSmiShift));
+#endif
+ }
+
+#define SmiToFloatArrayOffset SmiToIntArrayOffset
+
+ void SmiToDoubleArrayOffset(Register dst, Register src) {
+#if V8_TARGET_ARCH_PPC64
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kDoubleSizeLog2);
+ ShiftRightArithImm(dst, src, kSmiShift - kDoubleSizeLog2);
+#else
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kDoubleSizeLog2);
+ ShiftLeftImm(dst, src, Operand(kDoubleSizeLog2 - kSmiShift));
+#endif
+ }
+
+ void SmiToArrayOffset(Register dst, Register src, int elementSizeLog2) {
+ if (kSmiShift < elementSizeLog2) {
+ ShiftLeftImm(dst, src, Operand(elementSizeLog2 - kSmiShift));
+ } else if (kSmiShift > elementSizeLog2) {
+ ShiftRightArithImm(dst, src, kSmiShift - elementSizeLog2);
+ } else if (!dst.is(src)) {
+ mr(dst, src);
+ }
+ }
+
+ void IndexToArrayOffset(Register dst, Register src, int elementSizeLog2,
+ bool isSmi) {
+ if (isSmi) {
+ SmiToArrayOffset(dst, src, elementSizeLog2);
+ } else {
+ ShiftLeftImm(dst, src, Operand(elementSizeLog2));
+ }
+ }
+
+ // Untag the source value into destination and jump if source is a smi.
+ // Souce and destination can be the same register.
+ void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
+
+ // Untag the source value into destination and jump if source is not a smi.
+ // Souce and destination can be the same register.
+ void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
+
+ inline void TestIfSmi(Register value, Register scratch) {
+ TestBit(value, 0, scratch); // tst(value, Operand(kSmiTagMask));
+ }
+
+ inline void TestIfPositiveSmi(Register value, Register scratch) {
+ STATIC_ASSERT((kSmiTagMask | kSmiSignMask) ==
+ (intptr_t)(1UL << (kBitsPerPointer - 1) | 1));
+#if V8_TARGET_ARCH_PPC64
+ rldicl(scratch, value, 1, kBitsPerPointer - 2, SetRC);
+#else
+ rlwinm(scratch, value, 1, kBitsPerPointer - 2, kBitsPerPointer - 1, SetRC);
+#endif
+ }
+
+ // Jump the register contains a smi.
+ inline void JumpIfSmi(Register value, Label* smi_label) {
+ TestIfSmi(value, r0);
+ beq(smi_label, cr0); // branch if SMI
+ }
+ // Jump if either of the registers contain a non-smi.
+ inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
+ TestIfSmi(value, r0);
+ bne(not_smi_label, cr0);
+ }
+ // Jump if either of the registers contain a non-smi.
+ void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
+ // Jump if either of the registers contain a smi.
+ void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
+
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object);
+ void AssertSmi(Register object);
+
+
+#if V8_TARGET_ARCH_PPC64
+ inline void TestIfInt32(Register value, Register scratch1, Register scratch2,
+ CRegister cr = cr7) {
+ // High bits must be identical to fit into an 32-bit integer
+ srawi(scratch1, value, 31);
+ sradi(scratch2, value, 32);
+ cmp(scratch1, scratch2, cr);
+ }
+#else
+ inline void TestIfInt32(Register hi_word, Register lo_word, Register scratch,
+ CRegister cr = cr7) {
+ // High bits must be identical to fit into an 32-bit integer
+ srawi(scratch, lo_word, 31);
+ cmp(scratch, hi_word, cr);
+ }
+#endif
+
+ // Abort execution if argument is not a string, enabled via --debug-code.
+ void AssertString(Register object);
+
+ // Abort execution if argument is not a name, enabled via --debug-code.
+ void AssertName(Register object);
+
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object, Register scratch);
+
+ // Abort execution if reg is not the root value with the given index,
+ // enabled via --debug-code.
+ void AssertIsRoot(Register reg, Heap::RootListIndex index);
+
+ // ---------------------------------------------------------------------------
+ // HeapNumber utilities
+
+ void JumpIfNotHeapNumber(Register object, Register heap_number_map,
+ Register scratch, Label* on_not_heap_number);
+
+ // ---------------------------------------------------------------------------
+ // String utilities
+
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ void LookupNumberStringCache(Register object, Register result,
+ Register scratch1, Register scratch2,
+ Register scratch3, Label* not_found);
+
+ // Checks if both objects are sequential one-byte strings and jumps to label
+ // if either is not. Assumes that neither object is a smi.
+ void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
+ Register object2,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
+
+ // Checks if both objects are sequential one-byte strings and jumps to label
+ // if either is not.
+ void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* not_flat_one_byte_strings);
+
+ // Checks if both instance types are sequential one-byte strings and jumps to
+ // label if either is not.
+ void JumpIfBothInstanceTypesAreNotSequentialOneByte(
+ Register first_object_instance_type, Register second_object_instance_type,
+ Register scratch1, Register scratch2, Label* failure);
+
+ // Check if instance type is sequential one-byte string and jump to label if
+ // it is not.
+ void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
+ Label* failure);
+
+ void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
+
+ void EmitSeqStringSetCharCheck(Register string, Register index,
+ Register value, uint32_t encoding_mask);
+
+ // ---------------------------------------------------------------------------
+ // Patching helpers.
+
+ // Retrieve/patch the relocated value (lis/ori pair or constant pool load).
+ void GetRelocatedValue(Register location, Register result, Register scratch);
+ void SetRelocatedValue(Register location, Register scratch,
+ Register new_value);
+
+ void ClampUint8(Register output_reg, Register input_reg);
+
+ // Saturate a value into 8-bit unsigned integer
+ // if input_value < 0, output_value is 0
+ // if input_value > 255, output_value is 255
+ // otherwise output_value is the (int)input_value (round to nearest)
+ void ClampDoubleToUint8(Register result_reg, DoubleRegister input_reg,
+ DoubleRegister temp_double_reg);
+
+
+ void LoadInstanceDescriptors(Register map, Register descriptors);
+ void EnumLength(Register dst, Register map);
+ void NumberOfOwnDescriptors(Register dst, Register map);
+
+ template <typename Field>
+ void DecodeField(Register dst, Register src) {
+ ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift);
+ }
+
+ template <typename Field>
+ void DecodeField(Register reg) {
+ DecodeField<Field>(reg, reg);
+ }
+
+ template <typename Field>
+ void DecodeFieldToSmi(Register dst, Register src) {
+#if V8_TARGET_ARCH_PPC64
+ DecodeField<Field>(dst, src);
+ SmiTag(dst);
+#else
+ // 32-bit can do this in one instruction:
+ int start = Field::kSize + kSmiShift - 1;
+ int end = kSmiShift;
+ int rotate = kSmiShift - Field::kShift;
+ if (rotate < 0) {
+ rotate += kBitsPerPointer;
+ }
+ rlwinm(dst, src, rotate, kBitsPerPointer - start - 1,
+ kBitsPerPointer - end - 1);
+#endif
+ }
+
+ template <typename Field>
+ void DecodeFieldToSmi(Register reg) {
+ DecodeFieldToSmi<Field>(reg, reg);
+ }
+
+ // Activation support.
+ void EnterFrame(StackFrame::Type type,
+ bool load_constant_pool_pointer_reg = false);
+ // Returns the pc offset at which the frame ends.
+ int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
+
+ // Expects object in r0 and returns map with validated enum cache
+ // in r0. Assumes that any other register can be used as a scratch.
+ void CheckEnumCache(Register null_value, Label* call_runtime);
+
+ // AllocationMemento support. Arrays may have an associated
+ // AllocationMemento object that can be checked for in order to pretransition
+ // to another type.
+ // On entry, receiver_reg should point to the array object.
+ // scratch_reg gets clobbered.
+ // If allocation info is present, condition flags are set to eq.
+ void TestJSArrayForAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* no_memento_found);
+
+ void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* memento_found) {
+ Label no_memento_found;
+ TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
+ &no_memento_found);
+ beq(memento_found);
+ bind(&no_memento_found);
+ }
+
+ // Jumps to found label if a prototype map has dictionary elements.
+ void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
+ Register scratch1, Label* found);
+
+ private:
+ static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
+
+ void CallCFunctionHelper(Register function, int num_reg_arguments,
+ int num_double_arguments);
+
+ void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
+ CRegister cr = cr7);
+
+ // Helper functions for generating invokes.
+ void InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual, Handle<Code> code_constant,
+ Register code_reg, Label* done,
+ bool* definitely_mismatches, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void InitializeNewString(Register string, Register length,
+ Heap::RootListIndex map_index, Register scratch1,
+ Register scratch2);
+
+ // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+ void InNewSpace(Register object, Register scratch,
+ Condition cond, // eq for new space, ne otherwise.
+ Label* branch);
+
+ // Helper for finding the mark bits for an address. Afterwards, the
+ // bitmap register points at the word with the mark bits and the mask
+ // the position of the first bit. Leaves addr_reg unchanged.
+ inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
+ Register mask_reg);
+
+ // Helper for throwing exceptions. Compute a handler address and jump to
+ // it. See the implementation for register usage.
+ void JumpToHandlerEntry();
+
+ // Compute memory operands for safepoint stack slots.
+ static int SafepointRegisterStackIndex(int reg_code);
+ MemOperand SafepointRegisterSlot(Register reg);
+ MemOperand SafepointRegistersAndDoublesSlot(Register reg);
+
+#if V8_OOL_CONSTANT_POOL
+ // Loads the constant pool pointer (kConstantPoolRegister).
+ enum CodeObjectAccessMethod { CAN_USE_IP, CONSTRUCT_INTERNAL_REFERENCE };
+ void LoadConstantPoolPointerRegister(CodeObjectAccessMethod access_method,
+ int ip_code_entry_delta = 0);
+#endif
+
+ bool generating_stub_;
+ bool has_frame_;
+ // This handle will be patched with the code object on installation.
+ Handle<Object> code_object_;
+
+ // Needs access to SafepointRegisterStackIndex for compiled frame
+ // traversal.
+ friend class StandardFrame;
+};
+
+
+// The code patcher is used to patch (typically) small parts of code e.g. for
+// debugging and other types of instrumentation. When using the code patcher
+// the exact number of bytes specified must be emitted. It is not legal to emit
+// relocation information. If any of these constraints are violated it causes
+// an assertion to fail.
+class CodePatcher {
+ public:
+ enum FlushICache { FLUSH, DONT_FLUSH };
+
+ CodePatcher(byte* address, int instructions, FlushICache flush_cache = FLUSH);
+ virtual ~CodePatcher();
+
+ // Macro assembler to emit code.
+ MacroAssembler* masm() { return &masm_; }
+
+ // Emit an instruction directly.
+ void Emit(Instr instr);
+
+ // Emit the condition part of an instruction leaving the rest of the current
+ // instruction unchanged.
+ void EmitCondition(Condition cond);
+
+ private:
+ byte* address_; // The address of the code being patched.
+ int size_; // Number of bytes of the expected patch size.
+ MacroAssembler masm_; // Macro assembler used to generate the code.
+ FlushICache flush_cache_; // Whether to flush the I cache after patching.
+};
+
+
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+inline MemOperand ContextOperand(Register context, int index) {
+ return MemOperand(context, Context::SlotOffset(index));
+}
+
+
+inline MemOperand GlobalObjectOperand() {
+ return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
+}
+
+
+#ifdef GENERATED_CODE_COVERAGE
+#define CODE_COVERAGE_STRINGIFY(x) #x
+#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
+#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
+#define ACCESS_MASM(masm) \
+ masm->stop(__FILE_LINE__); \
+ masm->
+#else
+#define ACCESS_MASM(masm) masm->
+#endif
+}
+} // namespace v8::internal
+
+#endif // V8_PPC_MACRO_ASSEMBLER_PPC_H_
diff --git a/deps/v8/src/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/ppc/regexp-macro-assembler-ppc.cc
new file mode 100644
index 0000000000..54acce16fb
--- /dev/null
+++ b/deps/v8/src/ppc/regexp-macro-assembler-ppc.cc
@@ -0,0 +1,1337 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/base/bits.h"
+#include "src/code-stubs.h"
+#include "src/cpu-profiler.h"
+#include "src/log.h"
+#include "src/macro-assembler.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/regexp-stack.h"
+#include "src/unicode.h"
+
+#include "src/ppc/regexp-macro-assembler-ppc.h"
+
+namespace v8 {
+namespace internal {
+
+#ifndef V8_INTERPRETED_REGEXP
+/*
+ * This assembler uses the following register assignment convention
+ * - r25: Temporarily stores the index of capture start after a matching pass
+ * for a global regexp.
+ * - r26: Pointer to current code object (Code*) including heap object tag.
+ * - r27: Current position in input, as negative offset from end of string.
+ * Please notice that this is the byte offset, not the character offset!
+ * - r28: Currently loaded character. Must be loaded using
+ * LoadCurrentCharacter before using any of the dispatch methods.
+ * - r29: Points to tip of backtrack stack
+ * - r30: End of input (points to byte after last character in input).
+ * - r31: Frame pointer. Used to access arguments, local variables and
+ * RegExp registers.
+ * - r12: IP register, used by assembler. Very volatile.
+ * - r1/sp : Points to tip of C stack.
+ *
+ * The remaining registers are free for computations.
+ * Each call to a public method should retain this convention.
+ *
+ * The stack will have the following structure:
+ * - fp[44] Isolate* isolate (address of the current isolate)
+ * - fp[40] secondary link/return address used by native call.
+ * - fp[36] lr save area (currently unused)
+ * - fp[32] backchain (currently unused)
+ * --- sp when called ---
+ * - fp[28] return address (lr).
+ * - fp[24] old frame pointer (r31).
+ * - fp[0..20] backup of registers r25..r30
+ * --- frame pointer ----
+ * - fp[-4] direct_call (if 1, direct call from JavaScript code,
+ * if 0, call through the runtime system).
+ * - fp[-8] stack_area_base (high end of the memory area to use as
+ * backtracking stack).
+ * - fp[-12] capture array size (may fit multiple sets of matches)
+ * - fp[-16] int* capture_array (int[num_saved_registers_], for output).
+ * - fp[-20] end of input (address of end of string).
+ * - fp[-24] start of input (address of first character in string).
+ * - fp[-28] start index (character index of start).
+ * - fp[-32] void* input_string (location of a handle containing the string).
+ * - fp[-36] success counter (only for global regexps to count matches).
+ * - fp[-40] Offset of location before start of input (effectively character
+ * position -1). Used to initialize capture registers to a
+ * non-position.
+ * - fp[-44] At start (if 1, we are starting at the start of the
+ * string, otherwise 0)
+ * - fp[-48] register 0 (Only positions must be stored in the first
+ * - register 1 num_saved_registers_ registers)
+ * - ...
+ * - register num_registers-1
+ * --- sp ---
+ *
+ * The first num_saved_registers_ registers are initialized to point to
+ * "character -1" in the string (i.e., char_size() bytes before the first
+ * character of the string). The remaining registers start out as garbage.
+ *
+ * The data up to the return address must be placed there by the calling
+ * code and the remaining arguments are passed in registers, e.g. by calling the
+ * code entry as cast to a function with the signature:
+ * int (*match)(String* input_string,
+ * int start_index,
+ * Address start,
+ * Address end,
+ * int* capture_output_array,
+ * byte* stack_area_base,
+ * Address secondary_return_address, // Only used by native call.
+ * bool direct_call = false)
+ * The call is performed by NativeRegExpMacroAssembler::Execute()
+ * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
+ * in ppc/simulator-ppc.h.
+ * When calling as a non-direct call (i.e., from C++ code), the return address
+ * area is overwritten with the LR register by the RegExp code. When doing a
+ * direct call from generated code, the return address is placed there by
+ * the calling code, as in a normal exit frame.
+ */
+
+#define __ ACCESS_MASM(masm_)
+
+RegExpMacroAssemblerPPC::RegExpMacroAssemblerPPC(Mode mode,
+ int registers_to_save,
+ Zone* zone)
+ : NativeRegExpMacroAssembler(zone),
+ masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
+ mode_(mode),
+ num_registers_(registers_to_save),
+ num_saved_registers_(registers_to_save),
+ entry_label_(),
+ start_label_(),
+ success_label_(),
+ backtrack_label_(),
+ exit_label_(),
+ internal_failure_label_() {
+ DCHECK_EQ(0, registers_to_save % 2);
+
+// Called from C
+#if ABI_USES_FUNCTION_DESCRIPTORS
+ __ function_descriptor();
+#endif
+
+ __ b(&entry_label_); // We'll write the entry code later.
+ // If the code gets too big or corrupted, an internal exception will be
+ // raised, and we will exit right away.
+ __ bind(&internal_failure_label_);
+ __ li(r3, Operand(FAILURE));
+ __ Ret();
+ __ bind(&start_label_); // And then continue from here.
+}
+
+
+RegExpMacroAssemblerPPC::~RegExpMacroAssemblerPPC() {
+ delete masm_;
+ // Unuse labels in case we throw away the assembler without calling GetCode.
+ entry_label_.Unuse();
+ start_label_.Unuse();
+ success_label_.Unuse();
+ backtrack_label_.Unuse();
+ exit_label_.Unuse();
+ check_preempt_label_.Unuse();
+ stack_overflow_label_.Unuse();
+ internal_failure_label_.Unuse();
+}
+
+
+int RegExpMacroAssemblerPPC::stack_limit_slack() {
+ return RegExpStack::kStackLimitSlack;
+}
+
+
+void RegExpMacroAssemblerPPC::AdvanceCurrentPosition(int by) {
+ if (by != 0) {
+ __ addi(current_input_offset(), current_input_offset(),
+ Operand(by * char_size()));
+ }
+}
+
+
+void RegExpMacroAssemblerPPC::AdvanceRegister(int reg, int by) {
+ DCHECK(reg >= 0);
+ DCHECK(reg < num_registers_);
+ if (by != 0) {
+ __ LoadP(r3, register_location(reg), r0);
+ __ mov(r0, Operand(by));
+ __ add(r3, r3, r0);
+ __ StoreP(r3, register_location(reg), r0);
+ }
+}
+
+
+void RegExpMacroAssemblerPPC::Backtrack() {
+ CheckPreemption();
+ // Pop Code* offset from backtrack stack, add Code* and jump to location.
+ Pop(r3);
+ __ add(r3, r3, code_pointer());
+ __ mtctr(r3);
+ __ bctr();
+}
+
+
+void RegExpMacroAssemblerPPC::Bind(Label* label) { __ bind(label); }
+
+
+void RegExpMacroAssemblerPPC::CheckCharacter(uint32_t c, Label* on_equal) {
+ __ Cmpli(current_character(), Operand(c), r0);
+ BranchOrBacktrack(eq, on_equal);
+}
+
+
+void RegExpMacroAssemblerPPC::CheckCharacterGT(uc16 limit, Label* on_greater) {
+ __ Cmpli(current_character(), Operand(limit), r0);
+ BranchOrBacktrack(gt, on_greater);
+}
+
+
+void RegExpMacroAssemblerPPC::CheckAtStart(Label* on_at_start) {
+ Label not_at_start;
+ // Did we start the match at the start of the string at all?
+ __ LoadP(r3, MemOperand(frame_pointer(), kStartIndex));
+ __ cmpi(r3, Operand::Zero());
+ BranchOrBacktrack(ne, &not_at_start);
+
+ // If we did, are we still at the start of the input?
+ __ LoadP(r4, MemOperand(frame_pointer(), kInputStart));
+ __ mr(r0, current_input_offset());
+ __ add(r3, end_of_input_address(), r0);
+ __ cmp(r4, r3);
+ BranchOrBacktrack(eq, on_at_start);
+ __ bind(&not_at_start);
+}
+
+
+void RegExpMacroAssemblerPPC::CheckNotAtStart(Label* on_not_at_start) {
+ // Did we start the match at the start of the string at all?
+ __ LoadP(r3, MemOperand(frame_pointer(), kStartIndex));
+ __ cmpi(r3, Operand::Zero());
+ BranchOrBacktrack(ne, on_not_at_start);
+ // If we did, are we still at the start of the input?
+ __ LoadP(r4, MemOperand(frame_pointer(), kInputStart));
+ __ add(r3, end_of_input_address(), current_input_offset());
+ __ cmp(r3, r4);
+ BranchOrBacktrack(ne, on_not_at_start);
+}
+
+
+void RegExpMacroAssemblerPPC::CheckCharacterLT(uc16 limit, Label* on_less) {
+ __ Cmpli(current_character(), Operand(limit), r0);
+ BranchOrBacktrack(lt, on_less);
+}
+
+
+void RegExpMacroAssemblerPPC::CheckGreedyLoop(Label* on_equal) {
+ Label backtrack_non_equal;
+ __ LoadP(r3, MemOperand(backtrack_stackpointer(), 0));
+ __ cmp(current_input_offset(), r3);
+ __ bne(&backtrack_non_equal);
+ __ addi(backtrack_stackpointer(), backtrack_stackpointer(),
+ Operand(kPointerSize));
+
+ __ bind(&backtrack_non_equal);
+ BranchOrBacktrack(eq, on_equal);
+}
+
+
+void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
+ int start_reg, Label* on_no_match) {
+ Label fallthrough;
+ __ LoadP(r3, register_location(start_reg), r0); // Index of start of capture
+ __ LoadP(r4, register_location(start_reg + 1), r0); // Index of end
+ __ sub(r4, r4, r3, LeaveOE, SetRC); // Length of capture.
+
+ // If length is zero, either the capture is empty or it is not participating.
+ // In either case succeed immediately.
+ __ beq(&fallthrough, cr0);
+
+ // Check that there are enough characters left in the input.
+ __ add(r0, r4, current_input_offset(), LeaveOE, SetRC);
+ // __ cmn(r1, Operand(current_input_offset()));
+ BranchOrBacktrack(gt, on_no_match, cr0);
+
+ if (mode_ == LATIN1) {
+ Label success;
+ Label fail;
+ Label loop_check;
+
+ // r3 - offset of start of capture
+ // r4 - length of capture
+ __ add(r3, r3, end_of_input_address());
+ __ add(r5, end_of_input_address(), current_input_offset());
+ __ add(r4, r3, r4);
+
+ // r3 - Address of start of capture.
+ // r4 - Address of end of capture
+ // r5 - Address of current input position.
+
+ Label loop;
+ __ bind(&loop);
+ __ lbz(r6, MemOperand(r3));
+ __ addi(r3, r3, Operand(char_size()));
+ __ lbz(r25, MemOperand(r5));
+ __ addi(r5, r5, Operand(char_size()));
+ __ cmp(r25, r6);
+ __ beq(&loop_check);
+
+ // Mismatch, try case-insensitive match (converting letters to lower-case).
+ __ ori(r6, r6, Operand(0x20)); // Convert capture character to lower-case.
+ __ ori(r25, r25, Operand(0x20)); // Also convert input character.
+ __ cmp(r25, r6);
+ __ bne(&fail);
+ __ subi(r6, r6, Operand('a'));
+ __ cmpli(r6, Operand('z' - 'a')); // Is r6 a lowercase letter?
+ __ ble(&loop_check); // In range 'a'-'z'.
+ // Latin-1: Check for values in range [224,254] but not 247.
+ __ subi(r6, r6, Operand(224 - 'a'));
+ __ cmpli(r6, Operand(254 - 224));
+ __ bgt(&fail); // Weren't Latin-1 letters.
+ __ cmpi(r6, Operand(247 - 224)); // Check for 247.
+ __ beq(&fail);
+
+ __ bind(&loop_check);
+ __ cmp(r3, r4);
+ __ blt(&loop);
+ __ b(&success);
+
+ __ bind(&fail);
+ BranchOrBacktrack(al, on_no_match);
+
+ __ bind(&success);
+ // Compute new value of character position after the matched part.
+ __ sub(current_input_offset(), r5, end_of_input_address());
+ } else {
+ DCHECK(mode_ == UC16);
+ int argument_count = 4;
+ __ PrepareCallCFunction(argument_count, r5);
+
+ // r3 - offset of start of capture
+ // r4 - length of capture
+
+ // Put arguments into arguments registers.
+ // Parameters are
+ // r3: Address byte_offset1 - Address captured substring's start.
+ // r4: Address byte_offset2 - Address of current character position.
+ // r5: size_t byte_length - length of capture in bytes(!)
+ // r6: Isolate* isolate
+
+ // Address of start of capture.
+ __ add(r3, r3, end_of_input_address());
+ // Length of capture.
+ __ mr(r5, r4);
+ // Save length in callee-save register for use on return.
+ __ mr(r25, r4);
+ // Address of current input position.
+ __ add(r4, current_input_offset(), end_of_input_address());
+ // Isolate.
+ __ mov(r6, Operand(ExternalReference::isolate_address(isolate())));
+
+ {
+ AllowExternalCallThatCantCauseGC scope(masm_);
+ ExternalReference function =
+ ExternalReference::re_case_insensitive_compare_uc16(isolate());
+ __ CallCFunction(function, argument_count);
+ }
+
+ // Check if function returned non-zero for success or zero for failure.
+ __ cmpi(r3, Operand::Zero());
+ BranchOrBacktrack(eq, on_no_match);
+ // On success, increment position by length of capture.
+ __ add(current_input_offset(), current_input_offset(), r25);
+ }
+
+ __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerPPC::CheckNotBackReference(int start_reg,
+ Label* on_no_match) {
+ Label fallthrough;
+ Label success;
+
+ // Find length of back-referenced capture.
+ __ LoadP(r3, register_location(start_reg), r0);
+ __ LoadP(r4, register_location(start_reg + 1), r0);
+ __ sub(r4, r4, r3, LeaveOE, SetRC); // Length to check.
+ // Succeed on empty capture (including no capture).
+ __ beq(&fallthrough, cr0);
+
+ // Check that there are enough characters left in the input.
+ __ add(r0, r4, current_input_offset(), LeaveOE, SetRC);
+ BranchOrBacktrack(gt, on_no_match, cr0);
+
+ // Compute pointers to match string and capture string
+ __ add(r3, r3, end_of_input_address());
+ __ add(r5, end_of_input_address(), current_input_offset());
+ __ add(r4, r4, r3);
+
+ Label loop;
+ __ bind(&loop);
+ if (mode_ == LATIN1) {
+ __ lbz(r6, MemOperand(r3));
+ __ addi(r3, r3, Operand(char_size()));
+ __ lbz(r25, MemOperand(r5));
+ __ addi(r5, r5, Operand(char_size()));
+ } else {
+ DCHECK(mode_ == UC16);
+ __ lhz(r6, MemOperand(r3));
+ __ addi(r3, r3, Operand(char_size()));
+ __ lhz(r25, MemOperand(r5));
+ __ addi(r5, r5, Operand(char_size()));
+ }
+ __ cmp(r6, r25);
+ BranchOrBacktrack(ne, on_no_match);
+ __ cmp(r3, r4);
+ __ blt(&loop);
+
+ // Move current character position to position after match.
+ __ sub(current_input_offset(), r5, end_of_input_address());
+ __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerPPC::CheckNotCharacter(unsigned c,
+ Label* on_not_equal) {
+ __ Cmpli(current_character(), Operand(c), r0);
+ BranchOrBacktrack(ne, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerPPC::CheckCharacterAfterAnd(uint32_t c, uint32_t mask,
+ Label* on_equal) {
+ __ mov(r0, Operand(mask));
+ if (c == 0) {
+ __ and_(r3, current_character(), r0, SetRC);
+ } else {
+ __ and_(r3, current_character(), r0);
+ __ Cmpli(r3, Operand(c), r0, cr0);
+ }
+ BranchOrBacktrack(eq, on_equal, cr0);
+}
+
+
+void RegExpMacroAssemblerPPC::CheckNotCharacterAfterAnd(unsigned c,
+ unsigned mask,
+ Label* on_not_equal) {
+ __ mov(r0, Operand(mask));
+ if (c == 0) {
+ __ and_(r3, current_character(), r0, SetRC);
+ } else {
+ __ and_(r3, current_character(), r0);
+ __ Cmpli(r3, Operand(c), r0, cr0);
+ }
+ BranchOrBacktrack(ne, on_not_equal, cr0);
+}
+
+
+void RegExpMacroAssemblerPPC::CheckNotCharacterAfterMinusAnd(
+ uc16 c, uc16 minus, uc16 mask, Label* on_not_equal) {
+ DCHECK(minus < String::kMaxUtf16CodeUnit);
+ __ subi(r3, current_character(), Operand(minus));
+ __ mov(r0, Operand(mask));
+ __ and_(r3, r3, r0);
+ __ Cmpli(r3, Operand(c), r0);
+ BranchOrBacktrack(ne, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerPPC::CheckCharacterInRange(uc16 from, uc16 to,
+ Label* on_in_range) {
+ __ mov(r0, Operand(from));
+ __ sub(r3, current_character(), r0);
+ __ Cmpli(r3, Operand(to - from), r0);
+ BranchOrBacktrack(le, on_in_range); // Unsigned lower-or-same condition.
+}
+
+
+void RegExpMacroAssemblerPPC::CheckCharacterNotInRange(uc16 from, uc16 to,
+ Label* on_not_in_range) {
+ __ mov(r0, Operand(from));
+ __ sub(r3, current_character(), r0);
+ __ Cmpli(r3, Operand(to - from), r0);
+ BranchOrBacktrack(gt, on_not_in_range); // Unsigned higher condition.
+}
+
+
+void RegExpMacroAssemblerPPC::CheckBitInTable(Handle<ByteArray> table,
+ Label* on_bit_set) {
+ __ mov(r3, Operand(table));
+ if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) {
+ __ andi(r4, current_character(), Operand(kTableSize - 1));
+ __ addi(r4, r4, Operand(ByteArray::kHeaderSize - kHeapObjectTag));
+ } else {
+ __ addi(r4, current_character(),
+ Operand(ByteArray::kHeaderSize - kHeapObjectTag));
+ }
+ __ lbzx(r3, MemOperand(r3, r4));
+ __ cmpi(r3, Operand::Zero());
+ BranchOrBacktrack(ne, on_bit_set);
+}
+
+
+bool RegExpMacroAssemblerPPC::CheckSpecialCharacterClass(uc16 type,
+ Label* on_no_match) {
+ // Range checks (c in min..max) are generally implemented by an unsigned
+ // (c - min) <= (max - min) check
+ switch (type) {
+ case 's':
+ // Match space-characters
+ if (mode_ == LATIN1) {
+ // One byte space characters are '\t'..'\r', ' ' and \u00a0.
+ Label success;
+ __ cmpi(current_character(), Operand(' '));
+ __ beq(&success);
+ // Check range 0x09..0x0d
+ __ subi(r3, current_character(), Operand('\t'));
+ __ cmpli(r3, Operand('\r' - '\t'));
+ __ ble(&success);
+ // \u00a0 (NBSP).
+ __ cmpi(r3, Operand(0x00a0 - '\t'));
+ BranchOrBacktrack(ne, on_no_match);
+ __ bind(&success);
+ return true;
+ }
+ return false;
+ case 'S':
+ // The emitted code for generic character classes is good enough.
+ return false;
+ case 'd':
+ // Match ASCII digits ('0'..'9')
+ __ subi(r3, current_character(), Operand('0'));
+ __ cmpli(r3, Operand('9' - '0'));
+ BranchOrBacktrack(gt, on_no_match);
+ return true;
+ case 'D':
+ // Match non ASCII-digits
+ __ subi(r3, current_character(), Operand('0'));
+ __ cmpli(r3, Operand('9' - '0'));
+ BranchOrBacktrack(le, on_no_match);
+ return true;
+ case '.': {
+ // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ __ xori(r3, current_character(), Operand(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
+ __ subi(r3, r3, Operand(0x0b));
+ __ cmpli(r3, Operand(0x0c - 0x0b));
+ BranchOrBacktrack(le, on_no_match);
+ if (mode_ == UC16) {
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+ // 0x201d (0x2028 - 0x0b) or 0x201e.
+ __ subi(r3, r3, Operand(0x2028 - 0x0b));
+ __ cmpli(r3, Operand(1));
+ BranchOrBacktrack(le, on_no_match);
+ }
+ return true;
+ }
+ case 'n': {
+ // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ __ xori(r3, current_character(), Operand(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
+ __ subi(r3, r3, Operand(0x0b));
+ __ cmpli(r3, Operand(0x0c - 0x0b));
+ if (mode_ == LATIN1) {
+ BranchOrBacktrack(gt, on_no_match);
+ } else {
+ Label done;
+ __ ble(&done);
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+ // 0x201d (0x2028 - 0x0b) or 0x201e.
+ __ subi(r3, r3, Operand(0x2028 - 0x0b));
+ __ cmpli(r3, Operand(1));
+ BranchOrBacktrack(gt, on_no_match);
+ __ bind(&done);
+ }
+ return true;
+ }
+ case 'w': {
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all Latin1 characters can be tested.
+ __ cmpi(current_character(), Operand('z'));
+ BranchOrBacktrack(gt, on_no_match);
+ }
+ ExternalReference map = ExternalReference::re_word_character_map();
+ __ mov(r3, Operand(map));
+ __ lbzx(r3, MemOperand(r3, current_character()));
+ __ cmpli(r3, Operand::Zero());
+ BranchOrBacktrack(eq, on_no_match);
+ return true;
+ }
+ case 'W': {
+ Label done;
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all Latin1 characters can be tested.
+ __ cmpli(current_character(), Operand('z'));
+ __ bgt(&done);
+ }
+ ExternalReference map = ExternalReference::re_word_character_map();
+ __ mov(r3, Operand(map));
+ __ lbzx(r3, MemOperand(r3, current_character()));
+ __ cmpli(r3, Operand::Zero());
+ BranchOrBacktrack(ne, on_no_match);
+ if (mode_ != LATIN1) {
+ __ bind(&done);
+ }
+ return true;
+ }
+ case '*':
+ // Match any character.
+ return true;
+ // No custom implementation (yet): s(UC16), S(UC16).
+ default:
+ return false;
+ }
+}
+
+
+void RegExpMacroAssemblerPPC::Fail() {
+ __ li(r3, Operand(FAILURE));
+ __ b(&exit_label_);
+}
+
+
+Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
+ Label return_r3;
+
+ if (masm_->has_exception()) {
+ // If the code gets corrupted due to long regular expressions and lack of
+ // space on trampolines, an internal exception flag is set. If this case
+ // is detected, we will jump into exit sequence right away.
+ __ bind_to(&entry_label_, internal_failure_label_.pos());
+ } else {
+ // Finalize code - write the entry point code now we know how many
+ // registers we need.
+
+ // Entry code:
+ __ bind(&entry_label_);
+
+ // Tell the system that we have a stack frame. Because the type
+ // is MANUAL, no is generated.
+ FrameScope scope(masm_, StackFrame::MANUAL);
+
+ // Ensure register assigments are consistent with callee save mask
+ DCHECK(r25.bit() & kRegExpCalleeSaved);
+ DCHECK(code_pointer().bit() & kRegExpCalleeSaved);
+ DCHECK(current_input_offset().bit() & kRegExpCalleeSaved);
+ DCHECK(current_character().bit() & kRegExpCalleeSaved);
+ DCHECK(backtrack_stackpointer().bit() & kRegExpCalleeSaved);
+ DCHECK(end_of_input_address().bit() & kRegExpCalleeSaved);
+ DCHECK(frame_pointer().bit() & kRegExpCalleeSaved);
+
+ // Actually emit code to start a new stack frame.
+ // Push arguments
+ // Save callee-save registers.
+ // Start new stack frame.
+ // Store link register in existing stack-cell.
+ // Order here should correspond to order of offset constants in header file.
+ RegList registers_to_retain = kRegExpCalleeSaved;
+ RegList argument_registers = r3.bit() | r4.bit() | r5.bit() | r6.bit() |
+ r7.bit() | r8.bit() | r9.bit() | r10.bit();
+ __ mflr(r0);
+ __ push(r0);
+ __ MultiPush(argument_registers | registers_to_retain);
+ // Set frame pointer in space for it if this is not a direct call
+ // from generated code.
+ __ addi(frame_pointer(), sp, Operand(8 * kPointerSize));
+ __ li(r3, Operand::Zero());
+ __ push(r3); // Make room for success counter and initialize it to 0.
+ __ push(r3); // Make room for "position - 1" constant (value is irrelevant)
+ // Check if we have space on the stack for registers.
+ Label stack_limit_hit;
+ Label stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ mov(r3, Operand(stack_limit));
+ __ LoadP(r3, MemOperand(r3));
+ __ sub(r3, sp, r3, LeaveOE, SetRC);
+ // Handle it if the stack pointer is already below the stack limit.
+ __ ble(&stack_limit_hit, cr0);
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ Cmpli(r3, Operand(num_registers_ * kPointerSize), r0);
+ __ bge(&stack_ok);
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ li(r3, Operand(EXCEPTION));
+ __ b(&return_r3);
+
+ __ bind(&stack_limit_hit);
+ CallCheckStackGuardState(r3);
+ __ cmpi(r3, Operand::Zero());
+ // If returned value is non-zero, we exit with the returned value as result.
+ __ bne(&return_r3);
+
+ __ bind(&stack_ok);
+
+ // Allocate space on stack for registers.
+ __ Add(sp, sp, -num_registers_ * kPointerSize, r0);
+ // Load string end.
+ __ LoadP(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ // Load input start.
+ __ LoadP(r3, MemOperand(frame_pointer(), kInputStart));
+ // Find negative length (offset of start relative to end).
+ __ sub(current_input_offset(), r3, end_of_input_address());
+ // Set r3 to address of char before start of the input string
+ // (effectively string position -1).
+ __ LoadP(r4, MemOperand(frame_pointer(), kStartIndex));
+ __ subi(r3, current_input_offset(), Operand(char_size()));
+ if (mode_ == UC16) {
+ __ ShiftLeftImm(r0, r4, Operand(1));
+ __ sub(r3, r3, r0);
+ } else {
+ __ sub(r3, r3, r4);
+ }
+ // Store this value in a local variable, for use when clearing
+ // position registers.
+ __ StoreP(r3, MemOperand(frame_pointer(), kInputStartMinusOne));
+
+ // Initialize code pointer register
+ __ mov(code_pointer(), Operand(masm_->CodeObject()));
+
+ Label load_char_start_regexp, start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ cmpi(r4, Operand::Zero());
+ __ bne(&load_char_start_regexp);
+ __ li(current_character(), Operand('\n'));
+ __ b(&start_regexp);
+
+ // Global regexp restarts matching here.
+ __ bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&start_regexp);
+
+ // Initialize on-stack registers.
+ if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
+ // Fill saved registers with initial value = start offset - 1
+ if (num_saved_registers_ > 8) {
+ // One slot beyond address of register 0.
+ __ addi(r4, frame_pointer(), Operand(kRegisterZero + kPointerSize));
+ __ li(r5, Operand(num_saved_registers_));
+ __ mtctr(r5);
+ Label init_loop;
+ __ bind(&init_loop);
+ __ StorePU(r3, MemOperand(r4, -kPointerSize));
+ __ bdnz(&init_loop);
+ } else {
+ for (int i = 0; i < num_saved_registers_; i++) {
+ __ StoreP(r3, register_location(i), r0);
+ }
+ }
+ }
+
+ // Initialize backtrack stack pointer.
+ __ LoadP(backtrack_stackpointer(),
+ MemOperand(frame_pointer(), kStackHighEnd));
+
+ __ b(&start_label_);
+
+ // Exit code:
+ if (success_label_.is_linked()) {
+ // Save captures when successful.
+ __ bind(&success_label_);
+ if (num_saved_registers_ > 0) {
+ // copy captures to output
+ __ LoadP(r4, MemOperand(frame_pointer(), kInputStart));
+ __ LoadP(r3, MemOperand(frame_pointer(), kRegisterOutput));
+ __ LoadP(r5, MemOperand(frame_pointer(), kStartIndex));
+ __ sub(r4, end_of_input_address(), r4);
+ // r4 is length of input in bytes.
+ if (mode_ == UC16) {
+ __ ShiftRightImm(r4, r4, Operand(1));
+ }
+ // r4 is length of input in characters.
+ __ add(r4, r4, r5);
+ // r4 is length of string in characters.
+
+ DCHECK_EQ(0, num_saved_registers_ % 2);
+ // Always an even number of capture registers. This allows us to
+ // unroll the loop once to add an operation between a load of a register
+ // and the following use of that register.
+ for (int i = 0; i < num_saved_registers_; i += 2) {
+ __ LoadP(r5, register_location(i), r0);
+ __ LoadP(r6, register_location(i + 1), r0);
+ if (i == 0 && global_with_zero_length_check()) {
+ // Keep capture start in r25 for the zero-length check later.
+ __ mr(r25, r5);
+ }
+ if (mode_ == UC16) {
+ __ ShiftRightArithImm(r5, r5, 1);
+ __ add(r5, r4, r5);
+ __ ShiftRightArithImm(r6, r6, 1);
+ __ add(r6, r4, r6);
+ } else {
+ __ add(r5, r4, r5);
+ __ add(r6, r4, r6);
+ }
+ __ stw(r5, MemOperand(r3));
+ __ addi(r3, r3, Operand(kIntSize));
+ __ stw(r6, MemOperand(r3));
+ __ addi(r3, r3, Operand(kIntSize));
+ }
+ }
+
+ if (global()) {
+ // Restart matching if the regular expression is flagged as global.
+ __ LoadP(r3, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ __ LoadP(r4, MemOperand(frame_pointer(), kNumOutputRegisters));
+ __ LoadP(r5, MemOperand(frame_pointer(), kRegisterOutput));
+ // Increment success counter.
+ __ addi(r3, r3, Operand(1));
+ __ StoreP(r3, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ // Capture results have been stored, so the number of remaining global
+ // output registers is reduced by the number of stored captures.
+ __ subi(r4, r4, Operand(num_saved_registers_));
+ // Check whether we have enough room for another set of capture results.
+ __ cmpi(r4, Operand(num_saved_registers_));
+ __ blt(&return_r3);
+
+ __ StoreP(r4, MemOperand(frame_pointer(), kNumOutputRegisters));
+ // Advance the location for output.
+ __ addi(r5, r5, Operand(num_saved_registers_ * kIntSize));
+ __ StoreP(r5, MemOperand(frame_pointer(), kRegisterOutput));
+
+ // Prepare r3 to initialize registers with its value in the next run.
+ __ LoadP(r3, MemOperand(frame_pointer(), kInputStartMinusOne));
+
+ if (global_with_zero_length_check()) {
+ // Special case for zero-length matches.
+ // r25: capture start index
+ __ cmp(current_input_offset(), r25);
+ // Not a zero-length match, restart.
+ __ bne(&load_char_start_regexp);
+ // Offset from the end is zero if we already reached the end.
+ __ cmpi(current_input_offset(), Operand::Zero());
+ __ beq(&exit_label_);
+ // Advance current position after a zero-length match.
+ __ addi(current_input_offset(), current_input_offset(),
+ Operand((mode_ == UC16) ? 2 : 1));
+ }
+
+ __ b(&load_char_start_regexp);
+ } else {
+ __ li(r3, Operand(SUCCESS));
+ }
+ }
+
+ // Exit and return r3
+ __ bind(&exit_label_);
+ if (global()) {
+ __ LoadP(r3, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ }
+
+ __ bind(&return_r3);
+ // Skip sp past regexp registers and local variables..
+ __ mr(sp, frame_pointer());
+ // Restore registers r25..r31 and return (restoring lr to pc).
+ __ MultiPop(registers_to_retain);
+ __ pop(r0);
+ __ mtctr(r0);
+ __ bctr();
+
+ // Backtrack code (branch target for conditional backtracks).
+ if (backtrack_label_.is_linked()) {
+ __ bind(&backtrack_label_);
+ Backtrack();
+ }
+
+ Label exit_with_exception;
+
+ // Preempt-code
+ if (check_preempt_label_.is_linked()) {
+ SafeCallTarget(&check_preempt_label_);
+
+ CallCheckStackGuardState(r3);
+ __ cmpi(r3, Operand::Zero());
+ // If returning non-zero, we should end execution with the given
+ // result as return value.
+ __ bne(&return_r3);
+
+ // String might have moved: Reload end of string from frame.
+ __ LoadP(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ SafeReturn();
+ }
+
+ // Backtrack stack overflow code.
+ if (stack_overflow_label_.is_linked()) {
+ SafeCallTarget(&stack_overflow_label_);
+ // Reached if the backtrack-stack limit has been hit.
+ Label grow_failed;
+
+ // Call GrowStack(backtrack_stackpointer(), &stack_base)
+ static const int num_arguments = 3;
+ __ PrepareCallCFunction(num_arguments, r3);
+ __ mr(r3, backtrack_stackpointer());
+ __ addi(r4, frame_pointer(), Operand(kStackHighEnd));
+ __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
+ ExternalReference grow_stack =
+ ExternalReference::re_grow_stack(isolate());
+ __ CallCFunction(grow_stack, num_arguments);
+ // If return NULL, we have failed to grow the stack, and
+ // must exit with a stack-overflow exception.
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&exit_with_exception);
+ // Otherwise use return value as new stack pointer.
+ __ mr(backtrack_stackpointer(), r3);
+ // Restore saved registers and continue.
+ SafeReturn();
+ }
+
+ if (exit_with_exception.is_linked()) {
+ // If any of the code above needed to exit with an exception.
+ __ bind(&exit_with_exception);
+ // Exit with Result EXCEPTION(-1) to signal thrown exception.
+ __ li(r3, Operand(EXCEPTION));
+ __ b(&return_r3);
+ }
+ }
+
+ CodeDesc code_desc;
+ masm_->GetCode(&code_desc);
+ Handle<Code> code = isolate()->factory()->NewCode(
+ code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
+ PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
+ return Handle<HeapObject>::cast(code);
+}
+
+
+void RegExpMacroAssemblerPPC::GoTo(Label* to) { BranchOrBacktrack(al, to); }
+
+
+void RegExpMacroAssemblerPPC::IfRegisterGE(int reg, int comparand,
+ Label* if_ge) {
+ __ LoadP(r3, register_location(reg), r0);
+ __ Cmpi(r3, Operand(comparand), r0);
+ BranchOrBacktrack(ge, if_ge);
+}
+
+
+void RegExpMacroAssemblerPPC::IfRegisterLT(int reg, int comparand,
+ Label* if_lt) {
+ __ LoadP(r3, register_location(reg), r0);
+ __ Cmpi(r3, Operand(comparand), r0);
+ BranchOrBacktrack(lt, if_lt);
+}
+
+
+void RegExpMacroAssemblerPPC::IfRegisterEqPos(int reg, Label* if_eq) {
+ __ LoadP(r3, register_location(reg), r0);
+ __ cmp(r3, current_input_offset());
+ BranchOrBacktrack(eq, if_eq);
+}
+
+
+RegExpMacroAssembler::IrregexpImplementation
+RegExpMacroAssemblerPPC::Implementation() {
+ return kPPCImplementation;
+}
+
+
+void RegExpMacroAssemblerPPC::LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds,
+ int characters) {
+ DCHECK(cp_offset >= -1); // ^ and \b can look behind one character.
+ DCHECK(cp_offset < (1 << 30)); // Be sane! (And ensure negation works)
+ if (check_bounds) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ }
+ LoadCurrentCharacterUnchecked(cp_offset, characters);
+}
+
+
+void RegExpMacroAssemblerPPC::PopCurrentPosition() {
+ Pop(current_input_offset());
+}
+
+
+void RegExpMacroAssemblerPPC::PopRegister(int register_index) {
+ Pop(r3);
+ __ StoreP(r3, register_location(register_index), r0);
+}
+
+
+void RegExpMacroAssemblerPPC::PushBacktrack(Label* label) {
+ __ mov_label_offset(r3, label);
+ Push(r3);
+ CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerPPC::PushCurrentPosition() {
+ Push(current_input_offset());
+}
+
+
+void RegExpMacroAssemblerPPC::PushRegister(int register_index,
+ StackCheckFlag check_stack_limit) {
+ __ LoadP(r3, register_location(register_index), r0);
+ Push(r3);
+ if (check_stack_limit) CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerPPC::ReadCurrentPositionFromRegister(int reg) {
+ __ LoadP(current_input_offset(), register_location(reg), r0);
+}
+
+
+void RegExpMacroAssemblerPPC::ReadStackPointerFromRegister(int reg) {
+ __ LoadP(backtrack_stackpointer(), register_location(reg), r0);
+ __ LoadP(r3, MemOperand(frame_pointer(), kStackHighEnd));
+ __ add(backtrack_stackpointer(), backtrack_stackpointer(), r3);
+}
+
+
+void RegExpMacroAssemblerPPC::SetCurrentPositionFromEnd(int by) {
+ Label after_position;
+ __ Cmpi(current_input_offset(), Operand(-by * char_size()), r0);
+ __ bge(&after_position);
+ __ mov(current_input_offset(), Operand(-by * char_size()));
+ // On RegExp code entry (where this operation is used), the character before
+ // the current position is expected to be already loaded.
+ // We have advanced the position, so it's safe to read backwards.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&after_position);
+}
+
+
+void RegExpMacroAssemblerPPC::SetRegister(int register_index, int to) {
+ DCHECK(register_index >= num_saved_registers_); // Reserved for positions!
+ __ mov(r3, Operand(to));
+ __ StoreP(r3, register_location(register_index), r0);
+}
+
+
+bool RegExpMacroAssemblerPPC::Succeed() {
+ __ b(&success_label_);
+ return global();
+}
+
+
+void RegExpMacroAssemblerPPC::WriteCurrentPositionToRegister(int reg,
+ int cp_offset) {
+ if (cp_offset == 0) {
+ __ StoreP(current_input_offset(), register_location(reg), r0);
+ } else {
+ __ mov(r0, Operand(cp_offset * char_size()));
+ __ add(r3, current_input_offset(), r0);
+ __ StoreP(r3, register_location(reg), r0);
+ }
+}
+
+
+void RegExpMacroAssemblerPPC::ClearRegisters(int reg_from, int reg_to) {
+ DCHECK(reg_from <= reg_to);
+ __ LoadP(r3, MemOperand(frame_pointer(), kInputStartMinusOne));
+ for (int reg = reg_from; reg <= reg_to; reg++) {
+ __ StoreP(r3, register_location(reg), r0);
+ }
+}
+
+
+void RegExpMacroAssemblerPPC::WriteStackPointerToRegister(int reg) {
+ __ LoadP(r4, MemOperand(frame_pointer(), kStackHighEnd));
+ __ sub(r3, backtrack_stackpointer(), r4);
+ __ StoreP(r3, register_location(reg), r0);
+}
+
+
+// Private methods:
+
+void RegExpMacroAssemblerPPC::CallCheckStackGuardState(Register scratch) {
+ int frame_alignment = masm_->ActivationFrameAlignment();
+ int stack_space = kNumRequiredStackFrameSlots;
+ int stack_passed_arguments = 1; // space for return address pointer
+
+ // The following stack manipulation logic is similar to
+ // PrepareCallCFunction. However, we need an extra slot on the
+ // stack to house the return address parameter.
+ if (frame_alignment > kPointerSize) {
+ // Make stack end at alignment and make room for stack arguments
+ // -- preserving original value of sp.
+ __ mr(scratch, sp);
+ __ addi(sp, sp, Operand(-(stack_passed_arguments + 1) * kPointerSize));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ __ ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
+ __ StoreP(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ } else {
+ // Make room for stack arguments
+ stack_space += stack_passed_arguments;
+ }
+
+ // Allocate frame with required slots to make ABI work.
+ __ li(r0, Operand::Zero());
+ __ StorePU(r0, MemOperand(sp, -stack_space * kPointerSize));
+
+ // RegExp code frame pointer.
+ __ mr(r5, frame_pointer());
+ // Code* of self.
+ __ mov(r4, Operand(masm_->CodeObject()));
+ // r3 will point to the return address, placed by DirectCEntry.
+ __ addi(r3, sp, Operand(kStackFrameExtraParamSlot * kPointerSize));
+
+ ExternalReference stack_guard_check =
+ ExternalReference::re_check_stack_guard_state(isolate());
+ __ mov(ip, Operand(stack_guard_check));
+ DirectCEntryStub stub(isolate());
+ stub.GenerateCall(masm_, ip);
+
+ // Restore the stack pointer
+ stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
+ if (frame_alignment > kPointerSize) {
+ __ LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
+ } else {
+ __ addi(sp, sp, Operand(stack_space * kPointerSize));
+ }
+
+ __ mov(code_pointer(), Operand(masm_->CodeObject()));
+}
+
+
+// Helper function for reading a value out of a stack frame.
+template <typename T>
+static T& frame_entry(Address re_frame, int frame_offset) {
+ return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
+}
+
+
+int RegExpMacroAssemblerPPC::CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame) {
+ Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
+ StackLimitCheck check(isolate);
+ if (check.JsHasOverflowed()) {
+ isolate->StackOverflow();
+ return EXCEPTION;
+ }
+
+ // If not real stack overflow the stack guard was used to interrupt
+ // execution for another purpose.
+
+ // If this is a direct call from JavaScript retry the RegExp forcing the call
+ // through the runtime system. Currently the direct call cannot handle a GC.
+ if (frame_entry<int>(re_frame, kDirectCall) == 1) {
+ return RETRY;
+ }
+
+ // Prepare for possible GC.
+ HandleScope handles(isolate);
+ Handle<Code> code_handle(re_code);
+
+ Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
+
+ // Current string.
+ bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
+
+ DCHECK(re_code->instruction_start() <= *return_address);
+ DCHECK(*return_address <=
+ re_code->instruction_start() + re_code->instruction_size());
+
+ Object* result = isolate->stack_guard()->HandleInterrupts();
+
+ if (*code_handle != re_code) { // Return address no longer valid
+ intptr_t delta = code_handle->address() - re_code->address();
+ // Overwrite the return address on the stack.
+ *return_address += delta;
+ }
+
+ if (result->IsException()) {
+ return EXCEPTION;
+ }
+
+ Handle<String> subject_tmp = subject;
+ int slice_offset = 0;
+
+ // Extract the underlying string and the slice offset.
+ if (StringShape(*subject_tmp).IsCons()) {
+ subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
+ } else if (StringShape(*subject_tmp).IsSliced()) {
+ SlicedString* slice = SlicedString::cast(*subject_tmp);
+ subject_tmp = Handle<String>(slice->parent());
+ slice_offset = slice->offset();
+ }
+
+ // String might have changed.
+ if (subject_tmp->IsOneByteRepresentation() != is_one_byte) {
+ // If we changed between an Latin1 and an UC16 string, the specialized
+ // code cannot be used, and we need to restart regexp matching from
+ // scratch (including, potentially, compiling a new version of the code).
+ return RETRY;
+ }
+
+ // Otherwise, the content of the string might have moved. It must still
+ // be a sequential or external string with the same content.
+ // Update the start and end pointers in the stack frame to the current
+ // location (whether it has actually moved or not).
+ DCHECK(StringShape(*subject_tmp).IsSequential() ||
+ StringShape(*subject_tmp).IsExternal());
+
+ // The original start address of the characters to match.
+ const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
+
+ // Find the current start address of the same character at the current string
+ // position.
+ int start_index = frame_entry<intptr_t>(re_frame, kStartIndex);
+ const byte* new_address =
+ StringCharacterPosition(*subject_tmp, start_index + slice_offset);
+
+ if (start_address != new_address) {
+ // If there is a difference, update the object pointer and start and end
+ // addresses in the RegExp stack frame to match the new value.
+ const byte* end_address = frame_entry<const byte*>(re_frame, kInputEnd);
+ int byte_length = static_cast<int>(end_address - start_address);
+ frame_entry<const String*>(re_frame, kInputString) = *subject;
+ frame_entry<const byte*>(re_frame, kInputStart) = new_address;
+ frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+ } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
+ // Subject string might have been a ConsString that underwent
+ // short-circuiting during GC. That will not change start_address but
+ // will change pointer inside the subject handle.
+ frame_entry<const String*>(re_frame, kInputString) = *subject;
+ }
+
+ return 0;
+}
+
+
+MemOperand RegExpMacroAssemblerPPC::register_location(int register_index) {
+ DCHECK(register_index < (1 << 30));
+ if (num_registers_ <= register_index) {
+ num_registers_ = register_index + 1;
+ }
+ return MemOperand(frame_pointer(),
+ kRegisterZero - register_index * kPointerSize);
+}
+
+
+void RegExpMacroAssemblerPPC::CheckPosition(int cp_offset,
+ Label* on_outside_input) {
+ __ Cmpi(current_input_offset(), Operand(-cp_offset * char_size()), r0);
+ BranchOrBacktrack(ge, on_outside_input);
+}
+
+
+void RegExpMacroAssemblerPPC::BranchOrBacktrack(Condition condition, Label* to,
+ CRegister cr) {
+ if (condition == al) { // Unconditional.
+ if (to == NULL) {
+ Backtrack();
+ return;
+ }
+ __ b(to);
+ return;
+ }
+ if (to == NULL) {
+ __ b(condition, &backtrack_label_, cr);
+ return;
+ }
+ __ b(condition, to, cr);
+}
+
+
+void RegExpMacroAssemblerPPC::SafeCall(Label* to, Condition cond,
+ CRegister cr) {
+ __ b(cond, to, cr, SetLK);
+}
+
+
+void RegExpMacroAssemblerPPC::SafeReturn() {
+ __ pop(r0);
+ __ mov(ip, Operand(masm_->CodeObject()));
+ __ add(r0, r0, ip);
+ __ mtlr(r0);
+ __ blr();
+}
+
+
+void RegExpMacroAssemblerPPC::SafeCallTarget(Label* name) {
+ __ bind(name);
+ __ mflr(r0);
+ __ mov(ip, Operand(masm_->CodeObject()));
+ __ sub(r0, r0, ip);
+ __ push(r0);
+}
+
+
+void RegExpMacroAssemblerPPC::Push(Register source) {
+ DCHECK(!source.is(backtrack_stackpointer()));
+ __ StorePU(source, MemOperand(backtrack_stackpointer(), -kPointerSize));
+}
+
+
+void RegExpMacroAssemblerPPC::Pop(Register target) {
+ DCHECK(!target.is(backtrack_stackpointer()));
+ __ LoadP(target, MemOperand(backtrack_stackpointer()));
+ __ addi(backtrack_stackpointer(), backtrack_stackpointer(),
+ Operand(kPointerSize));
+}
+
+
+void RegExpMacroAssemblerPPC::CheckPreemption() {
+ // Check for preemption.
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ mov(r3, Operand(stack_limit));
+ __ LoadP(r3, MemOperand(r3));
+ __ cmpl(sp, r3);
+ SafeCall(&check_preempt_label_, le);
+}
+
+
+void RegExpMacroAssemblerPPC::CheckStackLimit() {
+ ExternalReference stack_limit =
+ ExternalReference::address_of_regexp_stack_limit(isolate());
+ __ mov(r3, Operand(stack_limit));
+ __ LoadP(r3, MemOperand(r3));
+ __ cmpl(backtrack_stackpointer(), r3);
+ SafeCall(&stack_overflow_label_, le);
+}
+
+
+bool RegExpMacroAssemblerPPC::CanReadUnaligned() {
+ return CpuFeatures::IsSupported(UNALIGNED_ACCESSES) && !slow_safe();
+}
+
+
+void RegExpMacroAssemblerPPC::LoadCurrentCharacterUnchecked(int cp_offset,
+ int characters) {
+ Register offset = current_input_offset();
+ if (cp_offset != 0) {
+ // r25 is not being used to store the capture start index at this point.
+ __ addi(r25, current_input_offset(), Operand(cp_offset * char_size()));
+ offset = r25;
+ }
+ // The lwz, stw, lhz, sth instructions can do unaligned accesses, if the CPU
+ // and the operating system running on the target allow it.
+ // We assume we don't want to do unaligned loads on PPC, so this function
+ // must only be used to load a single character at a time.
+
+ DCHECK(characters == 1);
+ __ add(current_character(), end_of_input_address(), offset);
+ if (mode_ == LATIN1) {
+ __ lbz(current_character(), MemOperand(current_character()));
+ } else {
+ DCHECK(mode_ == UC16);
+ __ lhz(current_character(), MemOperand(current_character()));
+ }
+}
+
+
+#undef __
+
+#endif // V8_INTERPRETED_REGEXP
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/regexp-macro-assembler-ppc.h b/deps/v8/src/ppc/regexp-macro-assembler-ppc.h
new file mode 100644
index 0000000000..1f9c3a0f38
--- /dev/null
+++ b/deps/v8/src/ppc/regexp-macro-assembler-ppc.h
@@ -0,0 +1,212 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PPC_REGEXP_MACRO_ASSEMBLER_PPC_H_
+#define V8_PPC_REGEXP_MACRO_ASSEMBLER_PPC_H_
+
+#include "src/macro-assembler.h"
+#include "src/ppc/assembler-ppc.h"
+#include "src/ppc/assembler-ppc-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+#ifndef V8_INTERPRETED_REGEXP
+class RegExpMacroAssemblerPPC : public NativeRegExpMacroAssembler {
+ public:
+ RegExpMacroAssemblerPPC(Mode mode, int registers_to_save, Zone* zone);
+ virtual ~RegExpMacroAssemblerPPC();
+ virtual int stack_limit_slack();
+ virtual void AdvanceCurrentPosition(int by);
+ virtual void AdvanceRegister(int reg, int by);
+ virtual void Backtrack();
+ virtual void Bind(Label* label);
+ virtual void CheckAtStart(Label* on_at_start);
+ virtual void CheckCharacter(unsigned c, Label* on_equal);
+ virtual void CheckCharacterAfterAnd(unsigned c, unsigned mask,
+ Label* on_equal);
+ virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
+ virtual void CheckCharacterLT(uc16 limit, Label* on_less);
+ // A "greedy loop" is a loop that is both greedy and with a simple
+ // body. It has a particularly simple implementation.
+ virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+ virtual void CheckNotAtStart(Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ Label* on_no_match);
+ virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
+ virtual void CheckNotCharacterAfterAnd(unsigned c, unsigned mask,
+ Label* on_not_equal);
+ virtual void CheckNotCharacterAfterMinusAnd(uc16 c, uc16 minus, uc16 mask,
+ Label* on_not_equal);
+ virtual void CheckCharacterInRange(uc16 from, uc16 to, Label* on_in_range);
+ virtual void CheckCharacterNotInRange(uc16 from, uc16 to,
+ Label* on_not_in_range);
+ virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
+
+ // Checks whether the given offset from the current position is before
+ // the end of the string.
+ virtual void CheckPosition(int cp_offset, Label* on_outside_input);
+ virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match);
+ virtual void Fail();
+ virtual Handle<HeapObject> GetCode(Handle<String> source);
+ virtual void GoTo(Label* label);
+ virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
+ virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
+ virtual void IfRegisterEqPos(int reg, Label* if_eq);
+ virtual IrregexpImplementation Implementation();
+ virtual void LoadCurrentCharacter(int cp_offset, Label* on_end_of_input,
+ bool check_bounds = true,
+ int characters = 1);
+ virtual void PopCurrentPosition();
+ virtual void PopRegister(int register_index);
+ virtual void PushBacktrack(Label* label);
+ virtual void PushCurrentPosition();
+ virtual void PushRegister(int register_index,
+ StackCheckFlag check_stack_limit);
+ virtual void ReadCurrentPositionFromRegister(int reg);
+ virtual void ReadStackPointerFromRegister(int reg);
+ virtual void SetCurrentPositionFromEnd(int by);
+ virtual void SetRegister(int register_index, int to);
+ virtual bool Succeed();
+ virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+ virtual void ClearRegisters(int reg_from, int reg_to);
+ virtual void WriteStackPointerToRegister(int reg);
+ virtual bool CanReadUnaligned();
+
+ // Called from RegExp if the stack-guard is triggered.
+ // If the code object is relocated, the return address is fixed before
+ // returning.
+ static int CheckStackGuardState(Address* return_address, Code* re_code,
+ Address re_frame);
+
+ private:
+ // Offsets from frame_pointer() of function parameters and stored registers.
+ static const int kFramePointer = 0;
+
+ // Above the frame pointer - Stored registers and stack passed parameters.
+ // Register 25..31.
+ static const int kStoredRegisters = kFramePointer;
+ // Return address (stored from link register, read into pc on return).
+ static const int kReturnAddress = kStoredRegisters + 7 * kPointerSize;
+ static const int kCallerFrame = kReturnAddress + kPointerSize;
+ // Stack parameters placed by caller.
+ static const int kSecondaryReturnAddress =
+ kCallerFrame + kStackFrameExtraParamSlot * kPointerSize;
+ static const int kIsolate = kSecondaryReturnAddress + kPointerSize;
+
+ // Below the frame pointer.
+ // Register parameters stored by setup code.
+ static const int kDirectCall = kFramePointer - kPointerSize;
+ static const int kStackHighEnd = kDirectCall - kPointerSize;
+ static const int kNumOutputRegisters = kStackHighEnd - kPointerSize;
+ static const int kRegisterOutput = kNumOutputRegisters - kPointerSize;
+ static const int kInputEnd = kRegisterOutput - kPointerSize;
+ static const int kInputStart = kInputEnd - kPointerSize;
+ static const int kStartIndex = kInputStart - kPointerSize;
+ static const int kInputString = kStartIndex - kPointerSize;
+ // When adding local variables remember to push space for them in
+ // the frame in GetCode.
+ static const int kSuccessfulCaptures = kInputString - kPointerSize;
+ static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ // First register address. Following registers are below it on the stack.
+ static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+
+ // Initial size of code buffer.
+ static const size_t kRegExpCodeSize = 1024;
+
+ // Load a number of characters at the given offset from the
+ // current position, into the current-character register.
+ void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
+
+ // Check whether preemption has been requested.
+ void CheckPreemption();
+
+ // Check whether we are exceeding the stack limit on the backtrack stack.
+ void CheckStackLimit();
+
+
+ // Generate a call to CheckStackGuardState.
+ void CallCheckStackGuardState(Register scratch);
+
+ // The ebp-relative location of a regexp register.
+ MemOperand register_location(int register_index);
+
+ // Register holding the current input position as negative offset from
+ // the end of the string.
+ inline Register current_input_offset() { return r27; }
+
+ // The register containing the current character after LoadCurrentCharacter.
+ inline Register current_character() { return r28; }
+
+ // Register holding address of the end of the input string.
+ inline Register end_of_input_address() { return r30; }
+
+ // Register holding the frame address. Local variables, parameters and
+ // regexp registers are addressed relative to this.
+ inline Register frame_pointer() { return fp; }
+
+ // The register containing the backtrack stack top. Provides a meaningful
+ // name to the register.
+ inline Register backtrack_stackpointer() { return r29; }
+
+ // Register holding pointer to the current code object.
+ inline Register code_pointer() { return r26; }
+
+ // Byte size of chars in the string to match (decided by the Mode argument)
+ inline int char_size() { return static_cast<int>(mode_); }
+
+ // Equivalent to a conditional branch to the label, unless the label
+ // is NULL, in which case it is a conditional Backtrack.
+ void BranchOrBacktrack(Condition condition, Label* to, CRegister cr = cr7);
+
+ // Call and return internally in the generated code in a way that
+ // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
+ inline void SafeCall(Label* to, Condition cond = al, CRegister cr = cr7);
+ inline void SafeReturn();
+ inline void SafeCallTarget(Label* name);
+
+ // Pushes the value of a register on the backtrack stack. Decrements the
+ // stack pointer by a word size and stores the register's value there.
+ inline void Push(Register source);
+
+ // Pops a value from the backtrack stack. Reads the word at the stack pointer
+ // and increments it by a word size.
+ inline void Pop(Register target);
+
+ Isolate* isolate() const { return masm_->isolate(); }
+
+ MacroAssembler* masm_;
+
+ // Which mode to generate code for (Latin1 or UC16).
+ Mode mode_;
+
+ // One greater than maximal register index actually used.
+ int num_registers_;
+
+ // Number of registers to output at the end (the saved registers
+ // are always 0..num_saved_registers_-1)
+ int num_saved_registers_;
+
+ // Labels used internally.
+ Label entry_label_;
+ Label start_label_;
+ Label success_label_;
+ Label backtrack_label_;
+ Label exit_label_;
+ Label check_preempt_label_;
+ Label stack_overflow_label_;
+ Label internal_failure_label_;
+};
+
+// Set of non-volatile registers saved/restored by generated regexp code.
+const RegList kRegExpCalleeSaved =
+ 1 << 25 | 1 << 26 | 1 << 27 | 1 << 28 | 1 << 29 | 1 << 30 | 1 << 31;
+
+#endif // V8_INTERPRETED_REGEXP
+}
+} // namespace v8::internal
+
+#endif // V8_PPC_REGEXP_MACRO_ASSEMBLER_PPC_H_
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
new file mode 100644
index 0000000000..0d10153790
--- /dev/null
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -0,0 +1,3803 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdarg.h>
+#include <stdlib.h>
+#include <cmath>
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/assembler.h"
+#include "src/codegen.h"
+#include "src/disasm.h"
+#include "src/ppc/constants-ppc.h"
+#include "src/ppc/frames-ppc.h"
+#include "src/ppc/simulator-ppc.h"
+
+#if defined(USE_SIMULATOR)
+
+// Only build the simulator if not compiling for real PPC hardware.
+namespace v8 {
+namespace internal {
+
+// This macro provides a platform independent use of sscanf. The reason for
+// SScanF not being implemented in a platform independent way through
+// ::v8::internal::OS in the same way as SNPrintF is that the
+// Windows C Run-Time Library does not provide vsscanf.
+#define SScanF sscanf // NOLINT
+
+// The PPCDebugger class is used by the simulator while debugging simulated
+// PowerPC code.
+class PPCDebugger {
+ public:
+ explicit PPCDebugger(Simulator* sim) : sim_(sim) {}
+ ~PPCDebugger();
+
+ void Stop(Instruction* instr);
+ void Info(Instruction* instr);
+ void Debug();
+
+ private:
+ static const Instr kBreakpointInstr = (TWI | 0x1f * B21);
+ static const Instr kNopInstr = (ORI); // ori, 0,0,0
+
+ Simulator* sim_;
+
+ intptr_t GetRegisterValue(int regnum);
+ double GetRegisterPairDoubleValue(int regnum);
+ double GetFPDoubleRegisterValue(int regnum);
+ bool GetValue(const char* desc, intptr_t* value);
+ bool GetFPDoubleValue(const char* desc, double* value);
+
+ // Set or delete a breakpoint. Returns true if successful.
+ bool SetBreakpoint(Instruction* break_pc);
+ bool DeleteBreakpoint(Instruction* break_pc);
+
+ // Undo and redo all breakpoints. This is needed to bracket disassembly and
+ // execution to skip past breakpoints when run from the debugger.
+ void UndoBreakpoints();
+ void RedoBreakpoints();
+};
+
+
+PPCDebugger::~PPCDebugger() {}
+
+
+#ifdef GENERATED_CODE_COVERAGE
+static FILE* coverage_log = NULL;
+
+
+static void InitializeCoverage() {
+ char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
+ if (file_name != NULL) {
+ coverage_log = fopen(file_name, "aw+");
+ }
+}
+
+
+void PPCDebugger::Stop(Instruction* instr) {
+ // Get the stop code.
+ uint32_t code = instr->SvcValue() & kStopCodeMask;
+ // Retrieve the encoded address, which comes just after this stop.
+ char** msg_address =
+ reinterpret_cast<char**>(sim_->get_pc() + Instruction::kInstrSize);
+ char* msg = *msg_address;
+ DCHECK(msg != NULL);
+
+ // Update this stop description.
+ if (isWatchedStop(code) && !watched_stops_[code].desc) {
+ watched_stops_[code].desc = msg;
+ }
+
+ if (strlen(msg) > 0) {
+ if (coverage_log != NULL) {
+ fprintf(coverage_log, "%s\n", msg);
+ fflush(coverage_log);
+ }
+ // Overwrite the instruction and address with nops.
+ instr->SetInstructionBits(kNopInstr);
+ reinterpret_cast<Instruction*>(msg_address)->SetInstructionBits(kNopInstr);
+ }
+ sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize + kPointerSize);
+}
+
+#else // ndef GENERATED_CODE_COVERAGE
+
+static void InitializeCoverage() {}
+
+
+void PPCDebugger::Stop(Instruction* instr) {
+ // Get the stop code.
+ // use of kStopCodeMask not right on PowerPC
+ uint32_t code = instr->SvcValue() & kStopCodeMask;
+ // Retrieve the encoded address, which comes just after this stop.
+ char* msg =
+ *reinterpret_cast<char**>(sim_->get_pc() + Instruction::kInstrSize);
+ // Update this stop description.
+ if (sim_->isWatchedStop(code) && !sim_->watched_stops_[code].desc) {
+ sim_->watched_stops_[code].desc = msg;
+ }
+ // Print the stop message and code if it is not the default code.
+ if (code != kMaxStopCode) {
+ PrintF("Simulator hit stop %u: %s\n", code, msg);
+ } else {
+ PrintF("Simulator hit %s\n", msg);
+ }
+ sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize + kPointerSize);
+ Debug();
+}
+#endif
+
+
+void PPCDebugger::Info(Instruction* instr) {
+ // Retrieve the encoded address immediately following the Info breakpoint.
+ char* msg =
+ *reinterpret_cast<char**>(sim_->get_pc() + Instruction::kInstrSize);
+ PrintF("Simulator info %s\n", msg);
+ sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize + kPointerSize);
+}
+
+
+intptr_t PPCDebugger::GetRegisterValue(int regnum) {
+ return sim_->get_register(regnum);
+}
+
+
+double PPCDebugger::GetRegisterPairDoubleValue(int regnum) {
+ return sim_->get_double_from_register_pair(regnum);
+}
+
+
+double PPCDebugger::GetFPDoubleRegisterValue(int regnum) {
+ return sim_->get_double_from_d_register(regnum);
+}
+
+
+bool PPCDebugger::GetValue(const char* desc, intptr_t* value) {
+ int regnum = Registers::Number(desc);
+ if (regnum != kNoRegister) {
+ *value = GetRegisterValue(regnum);
+ return true;
+ } else {
+ if (strncmp(desc, "0x", 2) == 0) {
+ return SScanF(desc + 2, "%" V8PRIxPTR,
+ reinterpret_cast<uintptr_t*>(value)) == 1;
+ } else {
+ return SScanF(desc, "%" V8PRIuPTR, reinterpret_cast<uintptr_t*>(value)) ==
+ 1;
+ }
+ }
+ return false;
+}
+
+
+bool PPCDebugger::GetFPDoubleValue(const char* desc, double* value) {
+ int regnum = FPRegisters::Number(desc);
+ if (regnum != kNoRegister) {
+ *value = sim_->get_double_from_d_register(regnum);
+ return true;
+ }
+ return false;
+}
+
+
+bool PPCDebugger::SetBreakpoint(Instruction* break_pc) {
+ // Check if a breakpoint can be set. If not return without any side-effects.
+ if (sim_->break_pc_ != NULL) {
+ return false;
+ }
+
+ // Set the breakpoint.
+ sim_->break_pc_ = break_pc;
+ sim_->break_instr_ = break_pc->InstructionBits();
+ // Not setting the breakpoint instruction in the code itself. It will be set
+ // when the debugger shell continues.
+ return true;
+}
+
+
+bool PPCDebugger::DeleteBreakpoint(Instruction* break_pc) {
+ if (sim_->break_pc_ != NULL) {
+ sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+ }
+
+ sim_->break_pc_ = NULL;
+ sim_->break_instr_ = 0;
+ return true;
+}
+
+
+void PPCDebugger::UndoBreakpoints() {
+ if (sim_->break_pc_ != NULL) {
+ sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+ }
+}
+
+
+void PPCDebugger::RedoBreakpoints() {
+ if (sim_->break_pc_ != NULL) {
+ sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
+ }
+}
+
+
+void PPCDebugger::Debug() {
+ intptr_t last_pc = -1;
+ bool done = false;
+
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+ char cmd[COMMAND_SIZE + 1];
+ char arg1[ARG_SIZE + 1];
+ char arg2[ARG_SIZE + 1];
+ char* argv[3] = {cmd, arg1, arg2};
+
+ // make sure to have a proper terminating character if reaching the limit
+ cmd[COMMAND_SIZE] = 0;
+ arg1[ARG_SIZE] = 0;
+ arg2[ARG_SIZE] = 0;
+
+ // Undo all set breakpoints while running in the debugger shell. This will
+ // make them invisible to all commands.
+ UndoBreakpoints();
+ // Disable tracing while simulating
+ bool trace = ::v8::internal::FLAG_trace_sim;
+ ::v8::internal::FLAG_trace_sim = false;
+
+ while (!done && !sim_->has_bad_pc()) {
+ if (last_pc != sim_->get_pc()) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // use a reasonably large buffer
+ v8::internal::EmbeddedVector<char, 256> buffer;
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(sim_->get_pc()));
+ PrintF(" 0x%08" V8PRIxPTR " %s\n", sim_->get_pc(), buffer.start());
+ last_pc = sim_->get_pc();
+ }
+ char* line = ReadLine("sim> ");
+ if (line == NULL) {
+ break;
+ } else {
+ char* last_input = sim_->last_debugger_input();
+ if (strcmp(line, "\n") == 0 && last_input != NULL) {
+ line = last_input;
+ } else {
+ // Ownership is transferred to sim_;
+ sim_->set_last_debugger_input(line);
+ }
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int argc = SScanF(line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ intptr_t value;
+
+ // If at a breakpoint, proceed past it.
+ if ((reinterpret_cast<Instruction*>(sim_->get_pc()))
+ ->InstructionBits() == 0x7d821008) {
+ sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
+ } else {
+ sim_->ExecuteInstruction(
+ reinterpret_cast<Instruction*>(sim_->get_pc()));
+ }
+
+ if (argc == 2 && last_pc != sim_->get_pc() && GetValue(arg1, &value)) {
+ for (int i = 1; i < value; i++) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // use a reasonably large buffer
+ v8::internal::EmbeddedVector<char, 256> buffer;
+ dasm.InstructionDecode(buffer,
+ reinterpret_cast<byte*>(sim_->get_pc()));
+ PrintF(" 0x%08" V8PRIxPTR " %s\n", sim_->get_pc(),
+ buffer.start());
+ sim_->ExecuteInstruction(
+ reinterpret_cast<Instruction*>(sim_->get_pc()));
+ }
+ }
+ } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+ // If at a breakpoint, proceed past it.
+ if ((reinterpret_cast<Instruction*>(sim_->get_pc()))
+ ->InstructionBits() == 0x7d821008) {
+ sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
+ } else {
+ // Execute the one instruction we broke at with breakpoints disabled.
+ sim_->ExecuteInstruction(
+ reinterpret_cast<Instruction*>(sim_->get_pc()));
+ }
+ // Leave the debugger shell.
+ done = true;
+ } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+ if (argc == 2 || (argc == 3 && strcmp(arg2, "fp") == 0)) {
+ intptr_t value;
+ double dvalue;
+ if (strcmp(arg1, "all") == 0) {
+ for (int i = 0; i < kNumRegisters; i++) {
+ value = GetRegisterValue(i);
+ PrintF(" %3s: %08" V8PRIxPTR, Registers::Name(i), value);
+ if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
+ (i % 2) == 0) {
+ dvalue = GetRegisterPairDoubleValue(i);
+ PrintF(" (%f)\n", dvalue);
+ } else if (i != 0 && !((i + 1) & 3)) {
+ PrintF("\n");
+ }
+ }
+ PrintF(" pc: %08" V8PRIxPTR " lr: %08" V8PRIxPTR
+ " "
+ "ctr: %08" V8PRIxPTR " xer: %08x cr: %08x\n",
+ sim_->special_reg_pc_, sim_->special_reg_lr_,
+ sim_->special_reg_ctr_, sim_->special_reg_xer_,
+ sim_->condition_reg_);
+ } else if (strcmp(arg1, "alld") == 0) {
+ for (int i = 0; i < kNumRegisters; i++) {
+ value = GetRegisterValue(i);
+ PrintF(" %3s: %08" V8PRIxPTR " %11" V8PRIdPTR,
+ Registers::Name(i), value, value);
+ if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
+ (i % 2) == 0) {
+ dvalue = GetRegisterPairDoubleValue(i);
+ PrintF(" (%f)\n", dvalue);
+ } else if (!((i + 1) % 2)) {
+ PrintF("\n");
+ }
+ }
+ PrintF(" pc: %08" V8PRIxPTR " lr: %08" V8PRIxPTR
+ " "
+ "ctr: %08" V8PRIxPTR " xer: %08x cr: %08x\n",
+ sim_->special_reg_pc_, sim_->special_reg_lr_,
+ sim_->special_reg_ctr_, sim_->special_reg_xer_,
+ sim_->condition_reg_);
+ } else if (strcmp(arg1, "allf") == 0) {
+ for (int i = 0; i < DoubleRegister::kNumRegisters; i++) {
+ dvalue = GetFPDoubleRegisterValue(i);
+ uint64_t as_words = bit_cast<uint64_t>(dvalue);
+ PrintF("%3s: %f 0x%08x %08x\n", FPRegisters::Name(i), dvalue,
+ static_cast<uint32_t>(as_words >> 32),
+ static_cast<uint32_t>(as_words & 0xffffffff));
+ }
+ } else if (arg1[0] == 'r' &&
+ (arg1[1] >= '0' && arg1[1] <= '9' &&
+ (arg1[2] == '\0' || (arg1[2] >= '0' && arg1[2] <= '9' &&
+ arg1[3] == '\0')))) {
+ int regnum = strtoul(&arg1[1], 0, 10);
+ if (regnum != kNoRegister) {
+ value = GetRegisterValue(regnum);
+ PrintF("%s: 0x%08" V8PRIxPTR " %" V8PRIdPTR "\n", arg1, value,
+ value);
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ if (GetValue(arg1, &value)) {
+ PrintF("%s: 0x%08" V8PRIxPTR " %" V8PRIdPTR "\n", arg1, value,
+ value);
+ } else if (GetFPDoubleValue(arg1, &dvalue)) {
+ uint64_t as_words = bit_cast<uint64_t>(dvalue);
+ PrintF("%s: %f 0x%08x %08x\n", arg1, dvalue,
+ static_cast<uint32_t>(as_words >> 32),
+ static_cast<uint32_t>(as_words & 0xffffffff));
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ }
+ } else {
+ PrintF("print <register>\n");
+ }
+ } else if ((strcmp(cmd, "po") == 0) ||
+ (strcmp(cmd, "printobject") == 0)) {
+ if (argc == 2) {
+ intptr_t value;
+ OFStream os(stdout);
+ if (GetValue(arg1, &value)) {
+ Object* obj = reinterpret_cast<Object*>(value);
+ os << arg1 << ": \n";
+#ifdef DEBUG
+ obj->Print(os);
+ os << "\n";
+#else
+ os << Brief(obj) << "\n";
+#endif
+ } else {
+ os << arg1 << " unrecognized\n";
+ }
+ } else {
+ PrintF("printobject <value>\n");
+ }
+ } else if (strcmp(cmd, "setpc") == 0) {
+ intptr_t value;
+
+ if (!GetValue(arg1, &value)) {
+ PrintF("%s unrecognized\n", arg1);
+ continue;
+ }
+ sim_->set_pc(value);
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ intptr_t* cur = NULL;
+ intptr_t* end = NULL;
+ int next_arg = 1;
+
+ if (strcmp(cmd, "stack") == 0) {
+ cur = reinterpret_cast<intptr_t*>(sim_->get_register(Simulator::sp));
+ } else { // "mem"
+ intptr_t value;
+ if (!GetValue(arg1, &value)) {
+ PrintF("%s unrecognized\n", arg1);
+ continue;
+ }
+ cur = reinterpret_cast<intptr_t*>(value);
+ next_arg++;
+ }
+
+ intptr_t words; // likely inaccurate variable name for 64bit
+ if (argc == next_arg) {
+ words = 10;
+ } else {
+ if (!GetValue(argv[next_arg], &words)) {
+ words = 10;
+ }
+ }
+ end = cur + words;
+
+ while (cur < end) {
+ PrintF(" 0x%08" V8PRIxPTR ": 0x%08" V8PRIxPTR " %10" V8PRIdPTR,
+ reinterpret_cast<intptr_t>(cur), *cur, *cur);
+ HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
+ intptr_t value = *cur;
+ Heap* current_heap = v8::internal::Isolate::Current()->heap();
+ if (((value & 1) == 0) || current_heap->Contains(obj)) {
+ PrintF(" (");
+ if ((value & 1) == 0) {
+ PrintF("smi %d", PlatformSmiTagging::SmiToInt(obj));
+ } else {
+ obj->ShortPrint();
+ }
+ PrintF(")");
+ }
+ PrintF("\n");
+ cur++;
+ }
+ } else if (strcmp(cmd, "disasm") == 0 || strcmp(cmd, "di") == 0) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // use a reasonably large buffer
+ v8::internal::EmbeddedVector<char, 256> buffer;
+
+ byte* prev = NULL;
+ byte* cur = NULL;
+ byte* end = NULL;
+
+ if (argc == 1) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ end = cur + (10 * Instruction::kInstrSize);
+ } else if (argc == 2) {
+ int regnum = Registers::Number(arg1);
+ if (regnum != kNoRegister || strncmp(arg1, "0x", 2) == 0) {
+ // The argument is an address or a register name.
+ intptr_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(value);
+ // Disassemble 10 instructions at <arg1>.
+ end = cur + (10 * Instruction::kInstrSize);
+ }
+ } else {
+ // The argument is the number of instructions.
+ intptr_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ // Disassemble <arg1> instructions.
+ end = cur + (value * Instruction::kInstrSize);
+ }
+ }
+ } else {
+ intptr_t value1;
+ intptr_t value2;
+ if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
+ cur = reinterpret_cast<byte*>(value1);
+ end = cur + (value2 * Instruction::kInstrSize);
+ }
+ }
+
+ while (cur < end) {
+ prev = cur;
+ cur += dasm.InstructionDecode(buffer, cur);
+ PrintF(" 0x%08" V8PRIxPTR " %s\n", reinterpret_cast<intptr_t>(prev),
+ buffer.start());
+ }
+ } else if (strcmp(cmd, "gdb") == 0) {
+ PrintF("relinquishing control to gdb\n");
+ v8::base::OS::DebugBreak();
+ PrintF("regaining control from gdb\n");
+ } else if (strcmp(cmd, "break") == 0) {
+ if (argc == 2) {
+ intptr_t value;
+ if (GetValue(arg1, &value)) {
+ if (!SetBreakpoint(reinterpret_cast<Instruction*>(value))) {
+ PrintF("setting breakpoint failed\n");
+ }
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ PrintF("break <address>\n");
+ }
+ } else if (strcmp(cmd, "del") == 0) {
+ if (!DeleteBreakpoint(NULL)) {
+ PrintF("deleting breakpoint failed\n");
+ }
+ } else if (strcmp(cmd, "cr") == 0) {
+ PrintF("Condition reg: %08x\n", sim_->condition_reg_);
+ } else if (strcmp(cmd, "lr") == 0) {
+ PrintF("Link reg: %08" V8PRIxPTR "\n", sim_->special_reg_lr_);
+ } else if (strcmp(cmd, "ctr") == 0) {
+ PrintF("Ctr reg: %08" V8PRIxPTR "\n", sim_->special_reg_ctr_);
+ } else if (strcmp(cmd, "xer") == 0) {
+ PrintF("XER: %08x\n", sim_->special_reg_xer_);
+ } else if (strcmp(cmd, "fpscr") == 0) {
+ PrintF("FPSCR: %08x\n", sim_->fp_condition_reg_);
+ } else if (strcmp(cmd, "stop") == 0) {
+ intptr_t value;
+ intptr_t stop_pc =
+ sim_->get_pc() - (Instruction::kInstrSize + kPointerSize);
+ Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
+ Instruction* msg_address =
+ reinterpret_cast<Instruction*>(stop_pc + Instruction::kInstrSize);
+ if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
+ // Remove the current stop.
+ if (sim_->isStopInstruction(stop_instr)) {
+ stop_instr->SetInstructionBits(kNopInstr);
+ msg_address->SetInstructionBits(kNopInstr);
+ } else {
+ PrintF("Not at debugger stop.\n");
+ }
+ } else if (argc == 3) {
+ // Print information about all/the specified breakpoint(s).
+ if (strcmp(arg1, "info") == 0) {
+ if (strcmp(arg2, "all") == 0) {
+ PrintF("Stop information:\n");
+ for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
+ sim_->PrintStopInfo(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->PrintStopInfo(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "enable") == 0) {
+ // Enable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
+ sim_->EnableStop(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->EnableStop(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "disable") == 0) {
+ // Disable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
+ sim_->DisableStop(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->DisableStop(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ }
+ } else {
+ PrintF("Wrong usage. Use help command for more information.\n");
+ }
+ } else if ((strcmp(cmd, "t") == 0) || strcmp(cmd, "trace") == 0) {
+ ::v8::internal::FLAG_trace_sim = !::v8::internal::FLAG_trace_sim;
+ PrintF("Trace of executed instructions is %s\n",
+ ::v8::internal::FLAG_trace_sim ? "on" : "off");
+ } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
+ PrintF("cont\n");
+ PrintF(" continue execution (alias 'c')\n");
+ PrintF("stepi [num instructions]\n");
+ PrintF(" step one/num instruction(s) (alias 'si')\n");
+ PrintF("print <register>\n");
+ PrintF(" print register content (alias 'p')\n");
+ PrintF(" use register name 'all' to display all integer registers\n");
+ PrintF(
+ " use register name 'alld' to display integer registers "
+ "with decimal values\n");
+ PrintF(" use register name 'rN' to display register number 'N'\n");
+ PrintF(" add argument 'fp' to print register pair double values\n");
+ PrintF(
+ " use register name 'allf' to display floating-point "
+ "registers\n");
+ PrintF("printobject <register>\n");
+ PrintF(" print an object from a register (alias 'po')\n");
+ PrintF("cr\n");
+ PrintF(" print condition register\n");
+ PrintF("lr\n");
+ PrintF(" print link register\n");
+ PrintF("ctr\n");
+ PrintF(" print ctr register\n");
+ PrintF("xer\n");
+ PrintF(" print XER\n");
+ PrintF("fpscr\n");
+ PrintF(" print FPSCR\n");
+ PrintF("stack [<num words>]\n");
+ PrintF(" dump stack content, default dump 10 words)\n");
+ PrintF("mem <address> [<num words>]\n");
+ PrintF(" dump memory content, default dump 10 words)\n");
+ PrintF("disasm [<instructions>]\n");
+ PrintF("disasm [<address/register>]\n");
+ PrintF("disasm [[<address/register>] <instructions>]\n");
+ PrintF(" disassemble code, default is 10 instructions\n");
+ PrintF(" from pc (alias 'di')\n");
+ PrintF("gdb\n");
+ PrintF(" enter gdb\n");
+ PrintF("break <address>\n");
+ PrintF(" set a break point on the address\n");
+ PrintF("del\n");
+ PrintF(" delete the breakpoint\n");
+ PrintF("trace (alias 't')\n");
+ PrintF(" toogle the tracing of all executed statements\n");
+ PrintF("stop feature:\n");
+ PrintF(" Description:\n");
+ PrintF(" Stops are debug instructions inserted by\n");
+ PrintF(" the Assembler::stop() function.\n");
+ PrintF(" When hitting a stop, the Simulator will\n");
+ PrintF(" stop and and give control to the PPCDebugger.\n");
+ PrintF(" The first %d stop codes are watched:\n",
+ Simulator::kNumOfWatchedStops);
+ PrintF(" - They can be enabled / disabled: the Simulator\n");
+ PrintF(" will / won't stop when hitting them.\n");
+ PrintF(" - The Simulator keeps track of how many times they \n");
+ PrintF(" are met. (See the info command.) Going over a\n");
+ PrintF(" disabled stop still increases its counter. \n");
+ PrintF(" Commands:\n");
+ PrintF(" stop info all/<code> : print infos about number <code>\n");
+ PrintF(" or all stop(s).\n");
+ PrintF(" stop enable/disable all/<code> : enables / disables\n");
+ PrintF(" all or number <code> stop(s)\n");
+ PrintF(" stop unstop\n");
+ PrintF(" ignore the stop instruction at the current location\n");
+ PrintF(" from now on\n");
+ } else {
+ PrintF("Unknown command: %s\n", cmd);
+ }
+ }
+ }
+
+ // Add all the breakpoints back to stop execution and enter the debugger
+ // shell when hit.
+ RedoBreakpoints();
+ // Restore tracing
+ ::v8::internal::FLAG_trace_sim = trace;
+
+#undef COMMAND_SIZE
+#undef ARG_SIZE
+
+#undef STR
+#undef XSTR
+}
+
+
+static bool ICacheMatch(void* one, void* two) {
+ DCHECK((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
+ DCHECK((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
+ return one == two;
+}
+
+
+static uint32_t ICacheHash(void* key) {
+ return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)) >> 2;
+}
+
+
+static bool AllOnOnePage(uintptr_t start, int size) {
+ intptr_t start_page = (start & ~CachePage::kPageMask);
+ intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
+ return start_page == end_page;
+}
+
+
+void Simulator::set_last_debugger_input(char* input) {
+ DeleteArray(last_debugger_input_);
+ last_debugger_input_ = input;
+}
+
+
+void Simulator::FlushICache(v8::internal::HashMap* i_cache, void* start_addr,
+ size_t size) {
+ intptr_t start = reinterpret_cast<intptr_t>(start_addr);
+ int intra_line = (start & CachePage::kLineMask);
+ start -= intra_line;
+ size += intra_line;
+ size = ((size - 1) | CachePage::kLineMask) + 1;
+ int offset = (start & CachePage::kPageMask);
+ while (!AllOnOnePage(start, size - 1)) {
+ int bytes_to_flush = CachePage::kPageSize - offset;
+ FlushOnePage(i_cache, start, bytes_to_flush);
+ start += bytes_to_flush;
+ size -= bytes_to_flush;
+ DCHECK_EQ(0, static_cast<int>(start & CachePage::kPageMask));
+ offset = 0;
+ }
+ if (size != 0) {
+ FlushOnePage(i_cache, start, size);
+ }
+}
+
+
+CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
+ v8::internal::HashMap::Entry* entry =
+ i_cache->Lookup(page, ICacheHash(page), true);
+ if (entry->value == NULL) {
+ CachePage* new_page = new CachePage();
+ entry->value = new_page;
+ }
+ return reinterpret_cast<CachePage*>(entry->value);
+}
+
+
+// Flush from start up to and not including start + size.
+void Simulator::FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
+ int size) {
+ DCHECK(size <= CachePage::kPageSize);
+ DCHECK(AllOnOnePage(start, size - 1));
+ DCHECK((start & CachePage::kLineMask) == 0);
+ DCHECK((size & CachePage::kLineMask) == 0);
+ void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
+ int offset = (start & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePage(i_cache, page);
+ char* valid_bytemap = cache_page->ValidityByte(offset);
+ memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
+}
+
+
+void Simulator::CheckICache(v8::internal::HashMap* i_cache,
+ Instruction* instr) {
+ intptr_t address = reinterpret_cast<intptr_t>(instr);
+ void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
+ void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
+ int offset = (address & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePage(i_cache, page);
+ char* cache_valid_byte = cache_page->ValidityByte(offset);
+ bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
+ char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
+ if (cache_hit) {
+ // Check that the data in memory matches the contents of the I-cache.
+ CHECK_EQ(0,
+ memcmp(reinterpret_cast<void*>(instr),
+ cache_page->CachedData(offset), Instruction::kInstrSize));
+ } else {
+ // Cache miss. Load memory into the cache.
+ memcpy(cached_line, line, CachePage::kLineLength);
+ *cache_valid_byte = CachePage::LINE_VALID;
+ }
+}
+
+
+void Simulator::Initialize(Isolate* isolate) {
+ if (isolate->simulator_initialized()) return;
+ isolate->set_simulator_initialized(true);
+ ::v8::internal::ExternalReference::set_redirector(isolate,
+ &RedirectExternalReference);
+}
+
+
+Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
+ i_cache_ = isolate_->simulator_i_cache();
+ if (i_cache_ == NULL) {
+ i_cache_ = new v8::internal::HashMap(&ICacheMatch);
+ isolate_->set_simulator_i_cache(i_cache_);
+ }
+ Initialize(isolate);
+// Set up simulator support first. Some of this information is needed to
+// setup the architecture state.
+#if V8_TARGET_ARCH_PPC64
+ size_t stack_size = 2 * 1024 * 1024; // allocate 2MB for stack
+#else
+ size_t stack_size = 1 * 1024 * 1024; // allocate 1MB for stack
+#endif
+ stack_ = reinterpret_cast<char*>(malloc(stack_size));
+ pc_modified_ = false;
+ icount_ = 0;
+ break_pc_ = NULL;
+ break_instr_ = 0;
+
+ // Set up architecture state.
+ // All registers are initialized to zero to start with.
+ for (int i = 0; i < kNumGPRs; i++) {
+ registers_[i] = 0;
+ }
+ condition_reg_ = 0;
+ fp_condition_reg_ = 0;
+ special_reg_pc_ = 0;
+ special_reg_lr_ = 0;
+ special_reg_ctr_ = 0;
+
+ // Initializing FP registers.
+ for (int i = 0; i < kNumFPRs; i++) {
+ fp_registers_[i] = 0.0;
+ }
+
+ // The sp is initialized to point to the bottom (high address) of the
+ // allocated stack area. To be safe in potential stack underflows we leave
+ // some buffer below.
+ registers_[sp] = reinterpret_cast<intptr_t>(stack_) + stack_size - 64;
+ InitializeCoverage();
+
+ last_debugger_input_ = NULL;
+}
+
+
+Simulator::~Simulator() {}
+
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator. The external reference will be a function compiled for the
+// host architecture. We need to call that function instead of trying to
+// execute it with the simulator. We do that by redirecting the external
+// reference to a svc (Supervisor Call) instruction that is handled by
+// the simulator. We write the original destination of the jump just at a known
+// offset from the svc instruction so the simulator knows what to call.
+class Redirection {
+ public:
+ Redirection(void* external_function, ExternalReference::Type type)
+ : external_function_(external_function),
+ swi_instruction_(rtCallRedirInstr | kCallRtRedirected),
+ type_(type),
+ next_(NULL) {
+ Isolate* isolate = Isolate::Current();
+ next_ = isolate->simulator_redirection();
+ Simulator::current(isolate)->FlushICache(
+ isolate->simulator_i_cache(),
+ reinterpret_cast<void*>(&swi_instruction_), Instruction::kInstrSize);
+ isolate->set_simulator_redirection(this);
+ }
+
+ void* address_of_swi_instruction() {
+ return reinterpret_cast<void*>(&swi_instruction_);
+ }
+
+ void* external_function() { return external_function_; }
+ ExternalReference::Type type() { return type_; }
+
+ static Redirection* Get(void* external_function,
+ ExternalReference::Type type) {
+ Isolate* isolate = Isolate::Current();
+ Redirection* current = isolate->simulator_redirection();
+ for (; current != NULL; current = current->next_) {
+ if (current->external_function_ == external_function) {
+ DCHECK_EQ(current->type(), type);
+ return current;
+ }
+ }
+ return new Redirection(external_function, type);
+ }
+
+ static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
+ char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
+ char* addr_of_redirection =
+ addr_of_swi - OFFSET_OF(Redirection, swi_instruction_);
+ return reinterpret_cast<Redirection*>(addr_of_redirection);
+ }
+
+ static void* ReverseRedirection(intptr_t reg) {
+ Redirection* redirection = FromSwiInstruction(
+ reinterpret_cast<Instruction*>(reinterpret_cast<void*>(reg)));
+ return redirection->external_function();
+ }
+
+ private:
+ void* external_function_;
+ uint32_t swi_instruction_;
+ ExternalReference::Type type_;
+ Redirection* next_;
+};
+
+
+void* Simulator::RedirectExternalReference(void* external_function,
+ ExternalReference::Type type) {
+ Redirection* redirection = Redirection::Get(external_function, type);
+ return redirection->address_of_swi_instruction();
+}
+
+
+// Get the active Simulator for the current thread.
+Simulator* Simulator::current(Isolate* isolate) {
+ v8::internal::Isolate::PerIsolateThreadData* isolate_data =
+ isolate->FindOrAllocatePerThreadDataForThisThread();
+ DCHECK(isolate_data != NULL);
+
+ Simulator* sim = isolate_data->simulator();
+ if (sim == NULL) {
+ // TODO(146): delete the simulator object when a thread/isolate goes away.
+ sim = new Simulator(isolate);
+ isolate_data->set_simulator(sim);
+ }
+ return sim;
+}
+
+
+// Sets the register in the architecture state.
+void Simulator::set_register(int reg, intptr_t value) {
+ DCHECK((reg >= 0) && (reg < kNumGPRs));
+ registers_[reg] = value;
+}
+
+
+// Get the register from the architecture state.
+intptr_t Simulator::get_register(int reg) const {
+ DCHECK((reg >= 0) && (reg < kNumGPRs));
+ // Stupid code added to avoid bug in GCC.
+ // See: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949
+ if (reg >= kNumGPRs) return 0;
+ // End stupid code.
+ return registers_[reg];
+}
+
+
+double Simulator::get_double_from_register_pair(int reg) {
+ DCHECK((reg >= 0) && (reg < kNumGPRs) && ((reg % 2) == 0));
+
+ double dm_val = 0.0;
+#if !V8_TARGET_ARCH_PPC64 // doesn't make sense in 64bit mode
+ // Read the bits from the unsigned integer register_[] array
+ // into the double precision floating point value and return it.
+ char buffer[sizeof(fp_registers_[0])];
+ memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
+ memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
+#endif
+ return (dm_val);
+}
+
+
+// Raw access to the PC register.
+void Simulator::set_pc(intptr_t value) {
+ pc_modified_ = true;
+ special_reg_pc_ = value;
+}
+
+
+bool Simulator::has_bad_pc() const {
+ return ((special_reg_pc_ == bad_lr) || (special_reg_pc_ == end_sim_pc));
+}
+
+
+// Raw access to the PC register without the special adjustment when reading.
+intptr_t Simulator::get_pc() const { return special_reg_pc_; }
+
+
+// Runtime FP routines take:
+// - two double arguments
+// - one double argument and zero or one integer arguments.
+// All are consructed here from d1, d2 and r3.
+void Simulator::GetFpArgs(double* x, double* y, intptr_t* z) {
+ *x = get_double_from_d_register(1);
+ *y = get_double_from_d_register(2);
+ *z = get_register(3);
+}
+
+
+// The return value is in d1.
+void Simulator::SetFpResult(const double& result) { fp_registers_[1] = result; }
+
+
+void Simulator::TrashCallerSaveRegisters() {
+// We don't trash the registers with the return value.
+#if 0 // A good idea to trash volatile registers, needs to be done
+ registers_[2] = 0x50Bad4U;
+ registers_[3] = 0x50Bad4U;
+ registers_[12] = 0x50Bad4U;
+#endif
+}
+
+
+uint32_t Simulator::ReadWU(intptr_t addr, Instruction* instr) {
+ uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
+ return *ptr;
+}
+
+
+int32_t Simulator::ReadW(intptr_t addr, Instruction* instr) {
+ int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+ return *ptr;
+}
+
+
+void Simulator::WriteW(intptr_t addr, uint32_t value, Instruction* instr) {
+ uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
+ *ptr = value;
+ return;
+}
+
+
+void Simulator::WriteW(intptr_t addr, int32_t value, Instruction* instr) {
+ int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+ *ptr = value;
+ return;
+}
+
+
+uint16_t Simulator::ReadHU(intptr_t addr, Instruction* instr) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ return *ptr;
+}
+
+
+int16_t Simulator::ReadH(intptr_t addr, Instruction* instr) {
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ return *ptr;
+}
+
+
+void Simulator::WriteH(intptr_t addr, uint16_t value, Instruction* instr) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ *ptr = value;
+ return;
+}
+
+
+void Simulator::WriteH(intptr_t addr, int16_t value, Instruction* instr) {
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ *ptr = value;
+ return;
+}
+
+
+uint8_t Simulator::ReadBU(intptr_t addr) {
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ return *ptr;
+}
+
+
+int8_t Simulator::ReadB(intptr_t addr) {
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ return *ptr;
+}
+
+
+void Simulator::WriteB(intptr_t addr, uint8_t value) {
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ *ptr = value;
+}
+
+
+void Simulator::WriteB(intptr_t addr, int8_t value) {
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ *ptr = value;
+}
+
+
+intptr_t* Simulator::ReadDW(intptr_t addr) {
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+ return ptr;
+}
+
+
+void Simulator::WriteDW(intptr_t addr, int64_t value) {
+ int64_t* ptr = reinterpret_cast<int64_t*>(addr);
+ *ptr = value;
+ return;
+}
+
+
+// Returns the limit of the stack area to enable checking for stack overflows.
+uintptr_t Simulator::StackLimit() const {
+ // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
+ // pushing values.
+ return reinterpret_cast<uintptr_t>(stack_) + 1024;
+}
+
+
+// Unsupported instructions use Format to print an error and stop execution.
+void Simulator::Format(Instruction* instr, const char* format) {
+ PrintF("Simulator found unsupported instruction:\n 0x%08" V8PRIxPTR ": %s\n",
+ reinterpret_cast<intptr_t>(instr), format);
+ UNIMPLEMENTED();
+}
+
+
+// Calculate C flag value for additions.
+bool Simulator::CarryFrom(int32_t left, int32_t right, int32_t carry) {
+ uint32_t uleft = static_cast<uint32_t>(left);
+ uint32_t uright = static_cast<uint32_t>(right);
+ uint32_t urest = 0xffffffffU - uleft;
+
+ return (uright > urest) ||
+ (carry && (((uright + 1) > urest) || (uright > (urest - 1))));
+}
+
+
+// Calculate C flag value for subtractions.
+bool Simulator::BorrowFrom(int32_t left, int32_t right) {
+ uint32_t uleft = static_cast<uint32_t>(left);
+ uint32_t uright = static_cast<uint32_t>(right);
+
+ return (uright > uleft);
+}
+
+
+// Calculate V flag value for additions and subtractions.
+bool Simulator::OverflowFrom(int32_t alu_out, int32_t left, int32_t right,
+ bool addition) {
+ bool overflow;
+ if (addition) {
+ // operands have the same sign
+ overflow = ((left >= 0 && right >= 0) || (left < 0 && right < 0))
+ // and operands and result have different sign
+ &&
+ ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
+ } else {
+ // operands have different signs
+ overflow = ((left < 0 && right >= 0) || (left >= 0 && right < 0))
+ // and first operand and result have different signs
+ &&
+ ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
+ }
+ return overflow;
+}
+
+
+#if !V8_TARGET_ARCH_PPC64
+// Calls into the V8 runtime are based on this very simple interface.
+// Note: To be able to return two values from some calls the code in runtime.cc
+// uses the ObjectPair which is essentially two 32-bit values stuffed into a
+// 64-bit value. With the code below we assume that all runtime calls return
+// 64 bits of result. If they don't, the r4 result register contains a bogus
+// value, which is fine because it is caller-saved.
+typedef int64_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1,
+ intptr_t arg2, intptr_t arg3,
+ intptr_t arg4, intptr_t arg5);
+#else
+// For 64-bit, we need to be more explicit.
+typedef intptr_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1,
+ intptr_t arg2, intptr_t arg3,
+ intptr_t arg4, intptr_t arg5);
+struct ObjectPair {
+ intptr_t x;
+ intptr_t y;
+};
+
+typedef struct ObjectPair (*SimulatorRuntimeObjectPairCall)(
+ intptr_t arg0, intptr_t arg1, intptr_t arg2, intptr_t arg3, intptr_t arg4,
+ intptr_t arg5);
+#endif
+
+// These prototypes handle the four types of FP calls.
+typedef int (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
+typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
+typedef double (*SimulatorRuntimeFPCall)(double darg0);
+typedef double (*SimulatorRuntimeFPIntCall)(double darg0, intptr_t arg0);
+
+// This signature supports direct call in to API function native callback
+// (refer to InvocationCallback in v8.h).
+typedef void (*SimulatorRuntimeDirectApiCall)(intptr_t arg0);
+typedef void (*SimulatorRuntimeProfilingApiCall)(intptr_t arg0, void* arg1);
+
+// This signature supports direct call to accessor getter callback.
+typedef void (*SimulatorRuntimeDirectGetterCall)(intptr_t arg0, intptr_t arg1);
+typedef void (*SimulatorRuntimeProfilingGetterCall)(intptr_t arg0,
+ intptr_t arg1, void* arg2);
+
+// Software interrupt instructions are used by the simulator to call into the
+// C-based V8 runtime.
+void Simulator::SoftwareInterrupt(Instruction* instr) {
+ int svc = instr->SvcValue();
+ switch (svc) {
+ case kCallRtRedirected: {
+ // Check if stack is aligned. Error if not aligned is reported below to
+ // include information on the function called.
+ bool stack_aligned =
+ (get_register(sp) & (::v8::internal::FLAG_sim_stack_alignment - 1)) ==
+ 0;
+ Redirection* redirection = Redirection::FromSwiInstruction(instr);
+ const int kArgCount = 6;
+ int arg0_regnum = 3;
+#if V8_TARGET_ARCH_PPC64 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
+ intptr_t result_buffer = 0;
+ if (redirection->type() == ExternalReference::BUILTIN_OBJECTPAIR_CALL) {
+ result_buffer = get_register(r3);
+ arg0_regnum++;
+ }
+#endif
+ intptr_t arg[kArgCount];
+ for (int i = 0; i < kArgCount; i++) {
+ arg[i] = get_register(arg0_regnum + i);
+ }
+ bool fp_call =
+ (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
+ // This is dodgy but it works because the C entry stubs are never moved.
+ // See comment in codegen-arm.cc and bug 1242173.
+ intptr_t saved_lr = special_reg_lr_;
+ intptr_t external =
+ reinterpret_cast<intptr_t>(redirection->external_function());
+ if (fp_call) {
+ double dval0, dval1; // one or two double parameters
+ intptr_t ival; // zero or one integer parameters
+ int iresult = 0; // integer return value
+ double dresult = 0; // double return value
+ GetFpArgs(&dval0, &dval1, &ival);
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ SimulatorRuntimeCall generic_target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ PrintF("Call to host function at %p with args %f, %f",
+ FUNCTION_ADDR(generic_target), dval0, dval1);
+ break;
+ case ExternalReference::BUILTIN_FP_CALL:
+ PrintF("Call to host function at %p with arg %f",
+ FUNCTION_ADDR(generic_target), dval0);
+ break;
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ PrintF("Call to host function at %p with args %f, %" V8PRIdPTR,
+ FUNCTION_ADDR(generic_target), dval0, ival);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ if (!stack_aligned) {
+ PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
+ get_register(sp));
+ }
+ PrintF("\n");
+ }
+ CHECK(stack_aligned);
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL: {
+ SimulatorRuntimeCompareCall target =
+ reinterpret_cast<SimulatorRuntimeCompareCall>(external);
+ iresult = target(dval0, dval1);
+ set_register(r3, iresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_FP_CALL: {
+ SimulatorRuntimeFPFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
+ dresult = target(dval0, dval1);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_CALL: {
+ SimulatorRuntimeFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ dresult = target(dval0);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_INT_CALL: {
+ SimulatorRuntimeFPIntCall target =
+ reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
+ dresult = target(dval0, ival);
+ SetFpResult(dresult);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ PrintF("Returned %08x\n", iresult);
+ break;
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_FP_CALL:
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ PrintF("Returned %f\n", dresult);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
+ // See callers of MacroAssembler::CallApiFunctionAndReturn for
+ // explanation of register usage.
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ PrintF("Call to host function at %p args %08" V8PRIxPTR,
+ reinterpret_cast<void*>(external), arg[0]);
+ if (!stack_aligned) {
+ PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
+ get_register(sp));
+ }
+ PrintF("\n");
+ }
+ CHECK(stack_aligned);
+ SimulatorRuntimeDirectApiCall target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+ target(arg[0]);
+ } else if (redirection->type() == ExternalReference::PROFILING_API_CALL) {
+ // See callers of MacroAssembler::CallApiFunctionAndReturn for
+ // explanation of register usage.
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ PrintF("Call to host function at %p args %08" V8PRIxPTR
+ " %08" V8PRIxPTR,
+ reinterpret_cast<void*>(external), arg[0], arg[1]);
+ if (!stack_aligned) {
+ PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
+ get_register(sp));
+ }
+ PrintF("\n");
+ }
+ CHECK(stack_aligned);
+ SimulatorRuntimeProfilingApiCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
+ target(arg[0], Redirection::ReverseRedirection(arg[1]));
+ } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
+ // See callers of MacroAssembler::CallApiFunctionAndReturn for
+ // explanation of register usage.
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ PrintF("Call to host function at %p args %08" V8PRIxPTR
+ " %08" V8PRIxPTR,
+ reinterpret_cast<void*>(external), arg[0], arg[1]);
+ if (!stack_aligned) {
+ PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
+ get_register(sp));
+ }
+ PrintF("\n");
+ }
+ CHECK(stack_aligned);
+ SimulatorRuntimeDirectGetterCall target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+#if !ABI_PASSES_HANDLES_IN_REGS
+ arg[0] = *(reinterpret_cast<intptr_t*>(arg[0]));
+#endif
+ target(arg[0], arg[1]);
+ } else if (redirection->type() ==
+ ExternalReference::PROFILING_GETTER_CALL) {
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ PrintF("Call to host function at %p args %08" V8PRIxPTR
+ " %08" V8PRIxPTR " %08" V8PRIxPTR,
+ reinterpret_cast<void*>(external), arg[0], arg[1], arg[2]);
+ if (!stack_aligned) {
+ PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
+ get_register(sp));
+ }
+ PrintF("\n");
+ }
+ CHECK(stack_aligned);
+ SimulatorRuntimeProfilingGetterCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
+#if !ABI_PASSES_HANDLES_IN_REGS
+ arg[0] = *(reinterpret_cast<intptr_t*>(arg[0]));
+#endif
+ target(arg[0], arg[1], Redirection::ReverseRedirection(arg[2]));
+ } else {
+ // builtin call.
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+ PrintF(
+ "Call to host function at %p,\n"
+ "\t\t\t\targs %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
+ ", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR,
+ FUNCTION_ADDR(target), arg[0], arg[1], arg[2], arg[3], arg[4],
+ arg[5]);
+ if (!stack_aligned) {
+ PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
+ get_register(sp));
+ }
+ PrintF("\n");
+ }
+ CHECK(stack_aligned);
+#if !V8_TARGET_ARCH_PPC64
+ DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+ int64_t result = target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+ int32_t lo_res = static_cast<int32_t>(result);
+ int32_t hi_res = static_cast<int32_t>(result >> 32);
+#if V8_TARGET_BIG_ENDIAN
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned %08x\n", hi_res);
+ }
+ set_register(r3, hi_res);
+ set_register(r4, lo_res);
+#else
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned %08x\n", lo_res);
+ }
+ set_register(r3, lo_res);
+ set_register(r4, hi_res);
+#endif
+#else
+ if (redirection->type() == ExternalReference::BUILTIN_CALL) {
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+ intptr_t result =
+ target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned %08" V8PRIxPTR "\n", result);
+ }
+ set_register(r3, result);
+ } else {
+ DCHECK(redirection->type() ==
+ ExternalReference::BUILTIN_OBJECTPAIR_CALL);
+ SimulatorRuntimeObjectPairCall target =
+ reinterpret_cast<SimulatorRuntimeObjectPairCall>(external);
+ struct ObjectPair result =
+ target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned %08" V8PRIxPTR ", %08" V8PRIxPTR "\n", result.x,
+ result.y);
+ }
+#if ABI_RETURNS_OBJECT_PAIRS_IN_REGS
+ set_register(r3, result.x);
+ set_register(r4, result.y);
+#else
+ memcpy(reinterpret_cast<void*>(result_buffer), &result,
+ sizeof(struct ObjectPair));
+#endif
+ }
+#endif
+ }
+ set_pc(saved_lr);
+ break;
+ }
+ case kBreakpoint: {
+ PPCDebugger dbg(this);
+ dbg.Debug();
+ break;
+ }
+ case kInfo: {
+ PPCDebugger dbg(this);
+ dbg.Info(instr);
+ break;
+ }
+ // stop uses all codes greater than 1 << 23.
+ default: {
+ if (svc >= (1 << 23)) {
+ uint32_t code = svc & kStopCodeMask;
+ if (isWatchedStop(code)) {
+ IncreaseStopCounter(code);
+ }
+ // Stop if it is enabled, otherwise go on jumping over the stop
+ // and the message address.
+ if (isEnabledStop(code)) {
+ PPCDebugger dbg(this);
+ dbg.Stop(instr);
+ } else {
+ set_pc(get_pc() + Instruction::kInstrSize + kPointerSize);
+ }
+ } else {
+ // This is not a valid svc code.
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+}
+
+
+// Stop helper functions.
+bool Simulator::isStopInstruction(Instruction* instr) {
+ return (instr->Bits(27, 24) == 0xF) && (instr->SvcValue() >= kStopCode);
+}
+
+
+bool Simulator::isWatchedStop(uint32_t code) {
+ DCHECK(code <= kMaxStopCode);
+ return code < kNumOfWatchedStops;
+}
+
+
+bool Simulator::isEnabledStop(uint32_t code) {
+ DCHECK(code <= kMaxStopCode);
+ // Unwatched stops are always enabled.
+ return !isWatchedStop(code) ||
+ !(watched_stops_[code].count & kStopDisabledBit);
+}
+
+
+void Simulator::EnableStop(uint32_t code) {
+ DCHECK(isWatchedStop(code));
+ if (!isEnabledStop(code)) {
+ watched_stops_[code].count &= ~kStopDisabledBit;
+ }
+}
+
+
+void Simulator::DisableStop(uint32_t code) {
+ DCHECK(isWatchedStop(code));
+ if (isEnabledStop(code)) {
+ watched_stops_[code].count |= kStopDisabledBit;
+ }
+}
+
+
+void Simulator::IncreaseStopCounter(uint32_t code) {
+ DCHECK(code <= kMaxStopCode);
+ DCHECK(isWatchedStop(code));
+ if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
+ PrintF(
+ "Stop counter for code %i has overflowed.\n"
+ "Enabling this code and reseting the counter to 0.\n",
+ code);
+ watched_stops_[code].count = 0;
+ EnableStop(code);
+ } else {
+ watched_stops_[code].count++;
+ }
+}
+
+
+// Print a stop status.
+void Simulator::PrintStopInfo(uint32_t code) {
+ DCHECK(code <= kMaxStopCode);
+ if (!isWatchedStop(code)) {
+ PrintF("Stop not watched.");
+ } else {
+ const char* state = isEnabledStop(code) ? "Enabled" : "Disabled";
+ int32_t count = watched_stops_[code].count & ~kStopDisabledBit;
+ // Don't print the state of unused breakpoints.
+ if (count != 0) {
+ if (watched_stops_[code].desc) {
+ PrintF("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n", code, code,
+ state, count, watched_stops_[code].desc);
+ } else {
+ PrintF("stop %i - 0x%x: \t%s, \tcounter = %i\n", code, code, state,
+ count);
+ }
+ }
+ }
+}
+
+
+void Simulator::SetCR0(intptr_t result, bool setSO) {
+ int bf = 0;
+ if (result < 0) {
+ bf |= 0x80000000;
+ }
+ if (result > 0) {
+ bf |= 0x40000000;
+ }
+ if (result == 0) {
+ bf |= 0x20000000;
+ }
+ if (setSO) {
+ bf |= 0x10000000;
+ }
+ condition_reg_ = (condition_reg_ & ~0xF0000000) | bf;
+}
+
+
+void Simulator::ExecuteBranchConditional(Instruction* instr) {
+ int bo = instr->Bits(25, 21) << 21;
+ int offset = (instr->Bits(15, 2) << 18) >> 16;
+ int condition_bit = instr->Bits(20, 16);
+ int condition_mask = 0x80000000 >> condition_bit;
+ switch (bo) {
+ case DCBNZF: // Decrement CTR; branch if CTR != 0 and condition false
+ case DCBEZF: // Decrement CTR; branch if CTR == 0 and condition false
+ UNIMPLEMENTED();
+ case BF: { // Branch if condition false
+ if (!(condition_reg_ & condition_mask)) {
+ if (instr->Bit(0) == 1) { // LK flag set
+ special_reg_lr_ = get_pc() + 4;
+ }
+ set_pc(get_pc() + offset);
+ }
+ break;
+ }
+ case DCBNZT: // Decrement CTR; branch if CTR != 0 and condition true
+ case DCBEZT: // Decrement CTR; branch if CTR == 0 and condition true
+ UNIMPLEMENTED();
+ case BT: { // Branch if condition true
+ if (condition_reg_ & condition_mask) {
+ if (instr->Bit(0) == 1) { // LK flag set
+ special_reg_lr_ = get_pc() + 4;
+ }
+ set_pc(get_pc() + offset);
+ }
+ break;
+ }
+ case DCBNZ: // Decrement CTR; branch if CTR != 0
+ case DCBEZ: // Decrement CTR; branch if CTR == 0
+ special_reg_ctr_ -= 1;
+ if ((special_reg_ctr_ == 0) == (bo == DCBEZ)) {
+ if (instr->Bit(0) == 1) { // LK flag set
+ special_reg_lr_ = get_pc() + 4;
+ }
+ set_pc(get_pc() + offset);
+ }
+ break;
+ case BA: { // Branch always
+ if (instr->Bit(0) == 1) { // LK flag set
+ special_reg_lr_ = get_pc() + 4;
+ }
+ set_pc(get_pc() + offset);
+ break;
+ }
+ default:
+ UNIMPLEMENTED(); // Invalid encoding
+ }
+}
+
+
+// Handle execution based on instruction types.
+void Simulator::ExecuteExt1(Instruction* instr) {
+ switch (instr->Bits(10, 1) << 1) {
+ case MCRF:
+ UNIMPLEMENTED(); // Not used by V8.
+ case BCLRX: {
+ // need to check BO flag
+ intptr_t old_pc = get_pc();
+ set_pc(special_reg_lr_);
+ if (instr->Bit(0) == 1) { // LK flag set
+ special_reg_lr_ = old_pc + 4;
+ }
+ break;
+ }
+ case BCCTRX: {
+ // need to check BO flag
+ intptr_t old_pc = get_pc();
+ set_pc(special_reg_ctr_);
+ if (instr->Bit(0) == 1) { // LK flag set
+ special_reg_lr_ = old_pc + 4;
+ }
+ break;
+ }
+ case CRNOR:
+ case RFI:
+ case CRANDC:
+ UNIMPLEMENTED();
+ case ISYNC: {
+ // todo - simulate isync
+ break;
+ }
+ case CRXOR: {
+ int bt = instr->Bits(25, 21);
+ int ba = instr->Bits(20, 16);
+ int bb = instr->Bits(15, 11);
+ int ba_val = ((0x80000000 >> ba) & condition_reg_) == 0 ? 0 : 1;
+ int bb_val = ((0x80000000 >> bb) & condition_reg_) == 0 ? 0 : 1;
+ int bt_val = ba_val ^ bb_val;
+ bt_val = bt_val << (31 - bt); // shift bit to correct destination
+ condition_reg_ &= ~(0x80000000 >> bt);
+ condition_reg_ |= bt_val;
+ break;
+ }
+ case CREQV: {
+ int bt = instr->Bits(25, 21);
+ int ba = instr->Bits(20, 16);
+ int bb = instr->Bits(15, 11);
+ int ba_val = ((0x80000000 >> ba) & condition_reg_) == 0 ? 0 : 1;
+ int bb_val = ((0x80000000 >> bb) & condition_reg_) == 0 ? 0 : 1;
+ int bt_val = 1 - (ba_val ^ bb_val);
+ bt_val = bt_val << (31 - bt); // shift bit to correct destination
+ condition_reg_ &= ~(0x80000000 >> bt);
+ condition_reg_ |= bt_val;
+ break;
+ }
+ case CRNAND:
+ case CRAND:
+ case CRORC:
+ case CROR:
+ default: {
+ UNIMPLEMENTED(); // Not used by V8.
+ }
+ }
+}
+
+
+bool Simulator::ExecuteExt2_10bit(Instruction* instr) {
+ bool found = true;
+
+ int opcode = instr->Bits(10, 1) << 1;
+ switch (opcode) {
+ case SRWX: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ uint32_t rs_val = get_register(rs);
+ uintptr_t rb_val = get_register(rb);
+ intptr_t result = rs_val >> (rb_val & 0x3f);
+ set_register(ra, result);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(result);
+ }
+ break;
+ }
+#if V8_TARGET_ARCH_PPC64
+ case SRDX: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ uintptr_t rs_val = get_register(rs);
+ uintptr_t rb_val = get_register(rb);
+ intptr_t result = rs_val >> (rb_val & 0x7f);
+ set_register(ra, result);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(result);
+ }
+ break;
+ }
+#endif
+ case SRAW: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ int32_t rs_val = get_register(rs);
+ intptr_t rb_val = get_register(rb);
+ intptr_t result = rs_val >> (rb_val & 0x3f);
+ set_register(ra, result);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(result);
+ }
+ break;
+ }
+#if V8_TARGET_ARCH_PPC64
+ case SRAD: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t rs_val = get_register(rs);
+ intptr_t rb_val = get_register(rb);
+ intptr_t result = rs_val >> (rb_val & 0x7f);
+ set_register(ra, result);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(result);
+ }
+ break;
+ }
+#endif
+ case SRAWIX: {
+ int ra = instr->RAValue();
+ int rs = instr->RSValue();
+ int sh = instr->Bits(15, 11);
+ int32_t rs_val = get_register(rs);
+ intptr_t result = rs_val >> sh;
+ set_register(ra, result);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(result);
+ }
+ break;
+ }
+#if V8_TARGET_ARCH_PPC64
+ case EXTSW: {
+ const int shift = kBitsPerPointer - 32;
+ int ra = instr->RAValue();
+ int rs = instr->RSValue();
+ intptr_t rs_val = get_register(rs);
+ intptr_t ra_val = (rs_val << shift) >> shift;
+ set_register(ra, ra_val);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(ra_val);
+ }
+ break;
+ }
+#endif
+ case EXTSH: {
+ const int shift = kBitsPerPointer - 16;
+ int ra = instr->RAValue();
+ int rs = instr->RSValue();
+ intptr_t rs_val = get_register(rs);
+ intptr_t ra_val = (rs_val << shift) >> shift;
+ set_register(ra, ra_val);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(ra_val);
+ }
+ break;
+ }
+ case EXTSB: {
+ const int shift = kBitsPerPointer - 8;
+ int ra = instr->RAValue();
+ int rs = instr->RSValue();
+ intptr_t rs_val = get_register(rs);
+ intptr_t ra_val = (rs_val << shift) >> shift;
+ set_register(ra, ra_val);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(ra_val);
+ }
+ break;
+ }
+ case LFSUX:
+ case LFSX: {
+ int frt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ intptr_t rb_val = get_register(rb);
+ int32_t val = ReadW(ra_val + rb_val, instr);
+ float* fptr = reinterpret_cast<float*>(&val);
+ set_d_register_from_double(frt, static_cast<double>(*fptr));
+ if (opcode == LFSUX) {
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + rb_val);
+ }
+ break;
+ }
+ case LFDUX:
+ case LFDX: {
+ int frt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ intptr_t rb_val = get_register(rb);
+ double* dptr = reinterpret_cast<double*>(ReadDW(ra_val + rb_val));
+ set_d_register_from_double(frt, *dptr);
+ if (opcode == LFDUX) {
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + rb_val);
+ }
+ break;
+ }
+ case STFSUX: {
+ case STFSX:
+ int frs = instr->RSValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ intptr_t rb_val = get_register(rb);
+ float frs_val = static_cast<float>(get_double_from_d_register(frs));
+ int32_t* p = reinterpret_cast<int32_t*>(&frs_val);
+ WriteW(ra_val + rb_val, *p, instr);
+ if (opcode == STFSUX) {
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + rb_val);
+ }
+ break;
+ }
+ case STFDUX: {
+ case STFDX:
+ int frs = instr->RSValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ intptr_t rb_val = get_register(rb);
+ double frs_val = get_double_from_d_register(frs);
+ int64_t* p = reinterpret_cast<int64_t*>(&frs_val);
+ WriteDW(ra_val + rb_val, *p);
+ if (opcode == STFDUX) {
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + rb_val);
+ }
+ break;
+ }
+ case SYNC: {
+ // todo - simulate sync
+ break;
+ }
+ case ICBI: {
+ // todo - simulate icbi
+ break;
+ }
+ default: {
+ found = false;
+ break;
+ }
+ }
+
+ if (found) return found;
+
+ found = true;
+ opcode = instr->Bits(10, 2) << 2;
+ switch (opcode) {
+ case SRADIX: {
+ int ra = instr->RAValue();
+ int rs = instr->RSValue();
+ int sh = (instr->Bits(15, 11) | (instr->Bit(1) << 5));
+ intptr_t rs_val = get_register(rs);
+ intptr_t result = rs_val >> sh;
+ set_register(ra, result);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(result);
+ }
+ break;
+ }
+ default: {
+ found = false;
+ break;
+ }
+ }
+
+ return found;
+}
+
+
+bool Simulator::ExecuteExt2_9bit_part1(Instruction* instr) {
+ bool found = true;
+
+ int opcode = instr->Bits(9, 1) << 1;
+ switch (opcode) {
+ case TW: {
+ // used for call redirection in simulation mode
+ SoftwareInterrupt(instr);
+ break;
+ }
+ case CMP: {
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ int cr = instr->Bits(25, 23);
+ uint32_t bf = 0;
+#if V8_TARGET_ARCH_PPC64
+ int L = instr->Bit(21);
+ if (L) {
+#endif
+ intptr_t ra_val = get_register(ra);
+ intptr_t rb_val = get_register(rb);
+ if (ra_val < rb_val) {
+ bf |= 0x80000000;
+ }
+ if (ra_val > rb_val) {
+ bf |= 0x40000000;
+ }
+ if (ra_val == rb_val) {
+ bf |= 0x20000000;
+ }
+#if V8_TARGET_ARCH_PPC64
+ } else {
+ int32_t ra_val = get_register(ra);
+ int32_t rb_val = get_register(rb);
+ if (ra_val < rb_val) {
+ bf |= 0x80000000;
+ }
+ if (ra_val > rb_val) {
+ bf |= 0x40000000;
+ }
+ if (ra_val == rb_val) {
+ bf |= 0x20000000;
+ }
+ }
+#endif
+ uint32_t condition_mask = 0xF0000000U >> (cr * 4);
+ uint32_t condition = bf >> (cr * 4);
+ condition_reg_ = (condition_reg_ & ~condition_mask) | condition;
+ break;
+ }
+ case SUBFCX: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ // int oe = instr->Bit(10);
+ uintptr_t ra_val = get_register(ra);
+ uintptr_t rb_val = get_register(rb);
+ uintptr_t alu_out = ~ra_val + rb_val + 1;
+ set_register(rt, alu_out);
+ // If the sign of rb and alu_out don't match, carry = 0
+ if ((alu_out ^ rb_val) & 0x80000000) {
+ special_reg_xer_ &= ~0xF0000000;
+ } else {
+ special_reg_xer_ = (special_reg_xer_ & ~0xF0000000) | 0x20000000;
+ }
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(alu_out);
+ }
+ // todo - handle OE bit
+ break;
+ }
+ case ADDCX: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ // int oe = instr->Bit(10);
+ uintptr_t ra_val = get_register(ra);
+ uintptr_t rb_val = get_register(rb);
+ uintptr_t alu_out = ra_val + rb_val;
+ // Check overflow
+ if (~ra_val < rb_val) {
+ special_reg_xer_ = (special_reg_xer_ & ~0xF0000000) | 0x20000000;
+ } else {
+ special_reg_xer_ &= ~0xF0000000;
+ }
+ set_register(rt, alu_out);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(static_cast<intptr_t>(alu_out));
+ }
+ // todo - handle OE bit
+ break;
+ }
+ case MULHWX: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ int32_t ra_val = (get_register(ra) & 0xFFFFFFFF);
+ int32_t rb_val = (get_register(rb) & 0xFFFFFFFF);
+ int64_t alu_out = (int64_t)ra_val * (int64_t)rb_val;
+ alu_out >>= 32;
+ set_register(rt, alu_out);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(static_cast<intptr_t>(alu_out));
+ }
+ // todo - handle OE bit
+ break;
+ }
+ case NEGX: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ intptr_t ra_val = get_register(ra);
+ intptr_t alu_out = 1 + ~ra_val;
+#if V8_TARGET_ARCH_PPC64
+ intptr_t one = 1; // work-around gcc
+ intptr_t kOverflowVal = (one << 63);
+#else
+ intptr_t kOverflowVal = kMinInt;
+#endif
+ set_register(rt, alu_out);
+ if (instr->Bit(10)) { // OE bit set
+ if (ra_val == kOverflowVal) {
+ special_reg_xer_ |= 0xC0000000; // set SO,OV
+ } else {
+ special_reg_xer_ &= ~0x40000000; // clear OV
+ }
+ }
+ if (instr->Bit(0)) { // RC bit set
+ bool setSO = (special_reg_xer_ & 0x80000000);
+ SetCR0(alu_out, setSO);
+ }
+ break;
+ }
+ case SLWX: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ uint32_t rs_val = get_register(rs);
+ uintptr_t rb_val = get_register(rb);
+ uint32_t result = rs_val << (rb_val & 0x3f);
+ set_register(ra, result);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(result);
+ }
+ break;
+ }
+#if V8_TARGET_ARCH_PPC64
+ case SLDX: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ uintptr_t rs_val = get_register(rs);
+ uintptr_t rb_val = get_register(rb);
+ uintptr_t result = rs_val << (rb_val & 0x7f);
+ set_register(ra, result);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(result);
+ }
+ break;
+ }
+ case MFVSRD: {
+ DCHECK(!instr->Bit(0));
+ int frt = instr->RTValue();
+ int ra = instr->RAValue();
+ double frt_val = get_double_from_d_register(frt);
+ int64_t* p = reinterpret_cast<int64_t*>(&frt_val);
+ set_register(ra, *p);
+ break;
+ }
+ case MFVSRWZ: {
+ DCHECK(!instr->Bit(0));
+ int frt = instr->RTValue();
+ int ra = instr->RAValue();
+ double frt_val = get_double_from_d_register(frt);
+ int64_t* p = reinterpret_cast<int64_t*>(&frt_val);
+ set_register(ra, static_cast<uint32_t>(*p));
+ break;
+ }
+ case MTVSRD: {
+ DCHECK(!instr->Bit(0));
+ int frt = instr->RTValue();
+ int ra = instr->RAValue();
+ int64_t ra_val = get_register(ra);
+ double* p = reinterpret_cast<double*>(&ra_val);
+ set_d_register_from_double(frt, *p);
+ break;
+ }
+ case MTVSRWA: {
+ DCHECK(!instr->Bit(0));
+ int frt = instr->RTValue();
+ int ra = instr->RAValue();
+ int64_t ra_val = static_cast<int32_t>(get_register(ra));
+ double* p = reinterpret_cast<double*>(&ra_val);
+ set_d_register_from_double(frt, *p);
+ break;
+ }
+ case MTVSRWZ: {
+ DCHECK(!instr->Bit(0));
+ int frt = instr->RTValue();
+ int ra = instr->RAValue();
+ uint64_t ra_val = static_cast<uint32_t>(get_register(ra));
+ double* p = reinterpret_cast<double*>(&ra_val);
+ set_d_register_from_double(frt, *p);
+ break;
+ }
+#endif
+ default: {
+ found = false;
+ break;
+ }
+ }
+
+ return found;
+}
+
+
+void Simulator::ExecuteExt2_9bit_part2(Instruction* instr) {
+ int opcode = instr->Bits(9, 1) << 1;
+ switch (opcode) {
+ case CNTLZWX: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ uintptr_t rs_val = get_register(rs);
+ uintptr_t count = 0;
+ int n = 0;
+ uintptr_t bit = 0x80000000;
+ for (; n < 32; n++) {
+ if (bit & rs_val) break;
+ count++;
+ bit >>= 1;
+ }
+ set_register(ra, count);
+ if (instr->Bit(0)) { // RC Bit set
+ int bf = 0;
+ if (count > 0) {
+ bf |= 0x40000000;
+ }
+ if (count == 0) {
+ bf |= 0x20000000;
+ }
+ condition_reg_ = (condition_reg_ & ~0xF0000000) | bf;
+ }
+ break;
+ }
+#if V8_TARGET_ARCH_PPC64
+ case CNTLZDX: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ uintptr_t rs_val = get_register(rs);
+ uintptr_t count = 0;
+ int n = 0;
+ uintptr_t bit = 0x8000000000000000UL;
+ for (; n < 64; n++) {
+ if (bit & rs_val) break;
+ count++;
+ bit >>= 1;
+ }
+ set_register(ra, count);
+ if (instr->Bit(0)) { // RC Bit set
+ int bf = 0;
+ if (count > 0) {
+ bf |= 0x40000000;
+ }
+ if (count == 0) {
+ bf |= 0x20000000;
+ }
+ condition_reg_ = (condition_reg_ & ~0xF0000000) | bf;
+ }
+ break;
+ }
+#endif
+ case ANDX: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t rs_val = get_register(rs);
+ intptr_t rb_val = get_register(rb);
+ intptr_t alu_out = rs_val & rb_val;
+ set_register(ra, alu_out);
+ if (instr->Bit(0)) { // RC Bit set
+ SetCR0(alu_out);
+ }
+ break;
+ }
+ case ANDCX: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t rs_val = get_register(rs);
+ intptr_t rb_val = get_register(rb);
+ intptr_t alu_out = rs_val & ~rb_val;
+ set_register(ra, alu_out);
+ if (instr->Bit(0)) { // RC Bit set
+ SetCR0(alu_out);
+ }
+ break;
+ }
+ case CMPL: {
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ int cr = instr->Bits(25, 23);
+ uint32_t bf = 0;
+#if V8_TARGET_ARCH_PPC64
+ int L = instr->Bit(21);
+ if (L) {
+#endif
+ uintptr_t ra_val = get_register(ra);
+ uintptr_t rb_val = get_register(rb);
+ if (ra_val < rb_val) {
+ bf |= 0x80000000;
+ }
+ if (ra_val > rb_val) {
+ bf |= 0x40000000;
+ }
+ if (ra_val == rb_val) {
+ bf |= 0x20000000;
+ }
+#if V8_TARGET_ARCH_PPC64
+ } else {
+ uint32_t ra_val = get_register(ra);
+ uint32_t rb_val = get_register(rb);
+ if (ra_val < rb_val) {
+ bf |= 0x80000000;
+ }
+ if (ra_val > rb_val) {
+ bf |= 0x40000000;
+ }
+ if (ra_val == rb_val) {
+ bf |= 0x20000000;
+ }
+ }
+#endif
+ uint32_t condition_mask = 0xF0000000U >> (cr * 4);
+ uint32_t condition = bf >> (cr * 4);
+ condition_reg_ = (condition_reg_ & ~condition_mask) | condition;
+ break;
+ }
+ case SUBFX: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ // int oe = instr->Bit(10);
+ intptr_t ra_val = get_register(ra);
+ intptr_t rb_val = get_register(rb);
+ intptr_t alu_out = rb_val - ra_val;
+ // todo - figure out underflow
+ set_register(rt, alu_out);
+ if (instr->Bit(0)) { // RC Bit set
+ SetCR0(alu_out);
+ }
+ // todo - handle OE bit
+ break;
+ }
+ case ADDZEX: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ intptr_t ra_val = get_register(ra);
+ if (special_reg_xer_ & 0x20000000) {
+ ra_val += 1;
+ }
+ set_register(rt, ra_val);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(ra_val);
+ }
+ // todo - handle OE bit
+ break;
+ }
+ case NORX: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t rs_val = get_register(rs);
+ intptr_t rb_val = get_register(rb);
+ intptr_t alu_out = ~(rs_val | rb_val);
+ set_register(ra, alu_out);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(alu_out);
+ }
+ break;
+ }
+ case MULLW: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ int32_t ra_val = (get_register(ra) & 0xFFFFFFFF);
+ int32_t rb_val = (get_register(rb) & 0xFFFFFFFF);
+ int32_t alu_out = ra_val * rb_val;
+ set_register(rt, alu_out);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(alu_out);
+ }
+ // todo - handle OE bit
+ break;
+ }
+#if V8_TARGET_ARCH_PPC64
+ case MULLD: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ int64_t ra_val = get_register(ra);
+ int64_t rb_val = get_register(rb);
+ int64_t alu_out = ra_val * rb_val;
+ set_register(rt, alu_out);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(alu_out);
+ }
+ // todo - handle OE bit
+ break;
+ }
+#endif
+ case DIVW: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ int32_t ra_val = get_register(ra);
+ int32_t rb_val = get_register(rb);
+ bool overflow = (ra_val == kMinInt && rb_val == -1);
+ // result is undefined if divisor is zero or if operation
+ // is 0x80000000 / -1.
+ int32_t alu_out = (rb_val == 0 || overflow) ? -1 : ra_val / rb_val;
+ set_register(rt, alu_out);
+ if (instr->Bit(10)) { // OE bit set
+ if (overflow) {
+ special_reg_xer_ |= 0xC0000000; // set SO,OV
+ } else {
+ special_reg_xer_ &= ~0x40000000; // clear OV
+ }
+ }
+ if (instr->Bit(0)) { // RC bit set
+ bool setSO = (special_reg_xer_ & 0x80000000);
+ SetCR0(alu_out, setSO);
+ }
+ break;
+ }
+#if V8_TARGET_ARCH_PPC64
+ case DIVD: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ int64_t ra_val = get_register(ra);
+ int64_t rb_val = get_register(rb);
+ int64_t one = 1; // work-around gcc
+ int64_t kMinLongLong = (one << 63);
+ // result is undefined if divisor is zero or if operation
+ // is 0x80000000_00000000 / -1.
+ int64_t alu_out =
+ (rb_val == 0 || (ra_val == kMinLongLong && rb_val == -1))
+ ? -1
+ : ra_val / rb_val;
+ set_register(rt, alu_out);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(alu_out);
+ }
+ // todo - handle OE bit
+ break;
+ }
+#endif
+ case ADDX: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ // int oe = instr->Bit(10);
+ intptr_t ra_val = get_register(ra);
+ intptr_t rb_val = get_register(rb);
+ intptr_t alu_out = ra_val + rb_val;
+ set_register(rt, alu_out);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(alu_out);
+ }
+ // todo - handle OE bit
+ break;
+ }
+ case XORX: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t rs_val = get_register(rs);
+ intptr_t rb_val = get_register(rb);
+ intptr_t alu_out = rs_val ^ rb_val;
+ set_register(ra, alu_out);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(alu_out);
+ }
+ break;
+ }
+ case ORX: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t rs_val = get_register(rs);
+ intptr_t rb_val = get_register(rb);
+ intptr_t alu_out = rs_val | rb_val;
+ set_register(ra, alu_out);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(alu_out);
+ }
+ break;
+ }
+ case MFSPR: {
+ int rt = instr->RTValue();
+ int spr = instr->Bits(20, 11);
+ if (spr != 256) {
+ UNIMPLEMENTED(); // Only LRLR supported
+ }
+ set_register(rt, special_reg_lr_);
+ break;
+ }
+ case MTSPR: {
+ int rt = instr->RTValue();
+ intptr_t rt_val = get_register(rt);
+ int spr = instr->Bits(20, 11);
+ if (spr == 256) {
+ special_reg_lr_ = rt_val;
+ } else if (spr == 288) {
+ special_reg_ctr_ = rt_val;
+ } else if (spr == 32) {
+ special_reg_xer_ = rt_val;
+ } else {
+ UNIMPLEMENTED(); // Only LR supported
+ }
+ break;
+ }
+ case MFCR: {
+ int rt = instr->RTValue();
+ set_register(rt, condition_reg_);
+ break;
+ }
+ case STWUX:
+ case STWX: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int32_t rs_val = get_register(rs);
+ intptr_t rb_val = get_register(rb);
+ WriteW(ra_val + rb_val, rs_val, instr);
+ if (opcode == STWUX) {
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + rb_val);
+ }
+ break;
+ }
+ case STBUX:
+ case STBX: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int8_t rs_val = get_register(rs);
+ intptr_t rb_val = get_register(rb);
+ WriteB(ra_val + rb_val, rs_val);
+ if (opcode == STBUX) {
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + rb_val);
+ }
+ break;
+ }
+ case STHUX:
+ case STHX: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int16_t rs_val = get_register(rs);
+ intptr_t rb_val = get_register(rb);
+ WriteH(ra_val + rb_val, rs_val, instr);
+ if (opcode == STHUX) {
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + rb_val);
+ }
+ break;
+ }
+ case LWZX:
+ case LWZUX: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ intptr_t rb_val = get_register(rb);
+ set_register(rt, ReadWU(ra_val + rb_val, instr));
+ if (opcode == LWZUX) {
+ DCHECK(ra != 0 && ra != rt);
+ set_register(ra, ra_val + rb_val);
+ }
+ break;
+ }
+#if V8_TARGET_ARCH_PPC64
+ case LDX:
+ case LDUX: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ intptr_t rb_val = get_register(rb);
+ intptr_t* result = ReadDW(ra_val + rb_val);
+ set_register(rt, *result);
+ if (opcode == LDUX) {
+ DCHECK(ra != 0 && ra != rt);
+ set_register(ra, ra_val + rb_val);
+ }
+ break;
+ }
+ case STDX:
+ case STDUX: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ intptr_t rs_val = get_register(rs);
+ intptr_t rb_val = get_register(rb);
+ WriteDW(ra_val + rb_val, rs_val);
+ if (opcode == STDUX) {
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + rb_val);
+ }
+ break;
+ }
+#endif
+ case LBZX:
+ case LBZUX: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ intptr_t rb_val = get_register(rb);
+ set_register(rt, ReadBU(ra_val + rb_val) & 0xFF);
+ if (opcode == LBZUX) {
+ DCHECK(ra != 0 && ra != rt);
+ set_register(ra, ra_val + rb_val);
+ }
+ break;
+ }
+ case LHZX:
+ case LHZUX: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ intptr_t rb_val = get_register(rb);
+ set_register(rt, ReadHU(ra_val + rb_val, instr) & 0xFFFF);
+ if (opcode == LHZUX) {
+ DCHECK(ra != 0 && ra != rt);
+ set_register(ra, ra_val + rb_val);
+ }
+ break;
+ }
+ case DCBF: {
+ // todo - simulate dcbf
+ break;
+ }
+ default: {
+ PrintF("Unimplemented: %08x\n", instr->InstructionBits());
+ UNIMPLEMENTED(); // Not used by V8.
+ }
+ }
+}
+
+
+void Simulator::ExecuteExt2(Instruction* instr) {
+ // Check first the 10-1 bit versions
+ if (ExecuteExt2_10bit(instr)) return;
+ // Now look at the lesser encodings
+ if (ExecuteExt2_9bit_part1(instr)) return;
+ ExecuteExt2_9bit_part2(instr);
+}
+
+
+void Simulator::ExecuteExt4(Instruction* instr) {
+ switch (instr->Bits(5, 1) << 1) {
+ case FDIV: {
+ int frt = instr->RTValue();
+ int fra = instr->RAValue();
+ int frb = instr->RBValue();
+ double fra_val = get_double_from_d_register(fra);
+ double frb_val = get_double_from_d_register(frb);
+ double frt_val = fra_val / frb_val;
+ set_d_register_from_double(frt, frt_val);
+ return;
+ }
+ case FSUB: {
+ int frt = instr->RTValue();
+ int fra = instr->RAValue();
+ int frb = instr->RBValue();
+ double fra_val = get_double_from_d_register(fra);
+ double frb_val = get_double_from_d_register(frb);
+ double frt_val = fra_val - frb_val;
+ set_d_register_from_double(frt, frt_val);
+ return;
+ }
+ case FADD: {
+ int frt = instr->RTValue();
+ int fra = instr->RAValue();
+ int frb = instr->RBValue();
+ double fra_val = get_double_from_d_register(fra);
+ double frb_val = get_double_from_d_register(frb);
+ double frt_val = fra_val + frb_val;
+ set_d_register_from_double(frt, frt_val);
+ return;
+ }
+ case FSQRT: {
+ int frt = instr->RTValue();
+ int frb = instr->RBValue();
+ double frb_val = get_double_from_d_register(frb);
+ double frt_val = std::sqrt(frb_val);
+ set_d_register_from_double(frt, frt_val);
+ return;
+ }
+ case FSEL: {
+ int frt = instr->RTValue();
+ int fra = instr->RAValue();
+ int frb = instr->RBValue();
+ int frc = instr->RCValue();
+ double fra_val = get_double_from_d_register(fra);
+ double frb_val = get_double_from_d_register(frb);
+ double frc_val = get_double_from_d_register(frc);
+ double frt_val = ((fra_val >= 0.0) ? frc_val : frb_val);
+ set_d_register_from_double(frt, frt_val);
+ return;
+ }
+ case FMUL: {
+ int frt = instr->RTValue();
+ int fra = instr->RAValue();
+ int frc = instr->RCValue();
+ double fra_val = get_double_from_d_register(fra);
+ double frc_val = get_double_from_d_register(frc);
+ double frt_val = fra_val * frc_val;
+ set_d_register_from_double(frt, frt_val);
+ return;
+ }
+ case FMSUB: {
+ int frt = instr->RTValue();
+ int fra = instr->RAValue();
+ int frb = instr->RBValue();
+ int frc = instr->RCValue();
+ double fra_val = get_double_from_d_register(fra);
+ double frb_val = get_double_from_d_register(frb);
+ double frc_val = get_double_from_d_register(frc);
+ double frt_val = (fra_val * frc_val) - frb_val;
+ set_d_register_from_double(frt, frt_val);
+ return;
+ }
+ case FMADD: {
+ int frt = instr->RTValue();
+ int fra = instr->RAValue();
+ int frb = instr->RBValue();
+ int frc = instr->RCValue();
+ double fra_val = get_double_from_d_register(fra);
+ double frb_val = get_double_from_d_register(frb);
+ double frc_val = get_double_from_d_register(frc);
+ double frt_val = (fra_val * frc_val) + frb_val;
+ set_d_register_from_double(frt, frt_val);
+ return;
+ }
+ }
+ int opcode = instr->Bits(10, 1) << 1;
+ switch (opcode) {
+ case FCMPU: {
+ int fra = instr->RAValue();
+ int frb = instr->RBValue();
+ double fra_val = get_double_from_d_register(fra);
+ double frb_val = get_double_from_d_register(frb);
+ int cr = instr->Bits(25, 23);
+ int bf = 0;
+ if (fra_val < frb_val) {
+ bf |= 0x80000000;
+ }
+ if (fra_val > frb_val) {
+ bf |= 0x40000000;
+ }
+ if (fra_val == frb_val) {
+ bf |= 0x20000000;
+ }
+ if (std::isunordered(fra_val, frb_val)) {
+ bf |= 0x10000000;
+ }
+ int condition_mask = 0xF0000000 >> (cr * 4);
+ int condition = bf >> (cr * 4);
+ condition_reg_ = (condition_reg_ & ~condition_mask) | condition;
+ return;
+ }
+ case FRSP: {
+ int frt = instr->RTValue();
+ int frb = instr->RBValue();
+ double frb_val = get_double_from_d_register(frb);
+ // frsp round 8-byte double-precision value to 8-byte
+ // single-precision value, ignore the round here
+ set_d_register_from_double(frt, frb_val);
+ if (instr->Bit(0)) { // RC bit set
+ // UNIMPLEMENTED();
+ }
+ return;
+ }
+ case FCFID: {
+ int frt = instr->RTValue();
+ int frb = instr->RBValue();
+ double t_val = get_double_from_d_register(frb);
+ int64_t* frb_val_p = reinterpret_cast<int64_t*>(&t_val);
+ double frt_val = static_cast<double>(*frb_val_p);
+ set_d_register_from_double(frt, frt_val);
+ return;
+ }
+ case FCTID: {
+ int frt = instr->RTValue();
+ int frb = instr->RBValue();
+ double frb_val = get_double_from_d_register(frb);
+ int64_t frt_val;
+ int64_t one = 1; // work-around gcc
+ int64_t kMinLongLong = (one << 63);
+ int64_t kMaxLongLong = kMinLongLong - 1;
+
+ if (frb_val > kMaxLongLong) {
+ frt_val = kMaxLongLong;
+ } else if (frb_val < kMinLongLong) {
+ frt_val = kMinLongLong;
+ } else {
+ switch (fp_condition_reg_ & kFPRoundingModeMask) {
+ case kRoundToZero:
+ frt_val = (int64_t)frb_val;
+ break;
+ case kRoundToPlusInf:
+ frt_val = (int64_t)std::ceil(frb_val);
+ break;
+ case kRoundToMinusInf:
+ frt_val = (int64_t)std::floor(frb_val);
+ break;
+ default:
+ frt_val = (int64_t)frb_val;
+ UNIMPLEMENTED(); // Not used by V8.
+ break;
+ }
+ }
+ double* p = reinterpret_cast<double*>(&frt_val);
+ set_d_register_from_double(frt, *p);
+ return;
+ }
+ case FCTIDZ: {
+ int frt = instr->RTValue();
+ int frb = instr->RBValue();
+ double frb_val = get_double_from_d_register(frb);
+ int64_t frt_val;
+ int64_t one = 1; // work-around gcc
+ int64_t kMinLongLong = (one << 63);
+ int64_t kMaxLongLong = kMinLongLong - 1;
+
+ if (frb_val > kMaxLongLong) {
+ frt_val = kMaxLongLong;
+ } else if (frb_val < kMinLongLong) {
+ frt_val = kMinLongLong;
+ } else {
+ frt_val = (int64_t)frb_val;
+ }
+ double* p = reinterpret_cast<double*>(&frt_val);
+ set_d_register_from_double(frt, *p);
+ return;
+ }
+ case FCTIW:
+ case FCTIWZ: {
+ int frt = instr->RTValue();
+ int frb = instr->RBValue();
+ double frb_val = get_double_from_d_register(frb);
+ int64_t frt_val;
+ if (frb_val > kMaxInt) {
+ frt_val = kMaxInt;
+ } else if (frb_val < kMinInt) {
+ frt_val = kMinInt;
+ } else {
+ if (opcode == FCTIWZ) {
+ frt_val = (int64_t)frb_val;
+ } else {
+ switch (fp_condition_reg_ & kFPRoundingModeMask) {
+ case kRoundToZero:
+ frt_val = (int64_t)frb_val;
+ break;
+ case kRoundToPlusInf:
+ frt_val = (int64_t)std::ceil(frb_val);
+ break;
+ case kRoundToMinusInf:
+ frt_val = (int64_t)std::floor(frb_val);
+ break;
+ case kRoundToNearest:
+ frt_val = (int64_t)lround(frb_val);
+
+ // Round to even if exactly halfway. (lround rounds up)
+ if (std::fabs(static_cast<double>(frt_val) - frb_val) == 0.5 &&
+ (frt_val % 2)) {
+ frt_val += ((frt_val > 0) ? -1 : 1);
+ }
+
+ break;
+ default:
+ DCHECK(false);
+ frt_val = (int64_t)frb_val;
+ break;
+ }
+ }
+ }
+ double* p = reinterpret_cast<double*>(&frt_val);
+ set_d_register_from_double(frt, *p);
+ return;
+ }
+ case FNEG: {
+ int frt = instr->RTValue();
+ int frb = instr->RBValue();
+ double frb_val = get_double_from_d_register(frb);
+ double frt_val = -frb_val;
+ set_d_register_from_double(frt, frt_val);
+ return;
+ }
+ case FMR: {
+ int frt = instr->RTValue();
+ int frb = instr->RBValue();
+ double frb_val = get_double_from_d_register(frb);
+ double frt_val = frb_val;
+ set_d_register_from_double(frt, frt_val);
+ return;
+ }
+ case MTFSFI: {
+ int bf = instr->Bits(25, 23);
+ int imm = instr->Bits(15, 12);
+ int fp_condition_mask = 0xF0000000 >> (bf * 4);
+ fp_condition_reg_ &= ~fp_condition_mask;
+ fp_condition_reg_ |= (imm << (28 - (bf * 4)));
+ if (instr->Bit(0)) { // RC bit set
+ condition_reg_ &= 0xF0FFFFFF;
+ condition_reg_ |= (imm << 23);
+ }
+ return;
+ }
+ case MTFSF: {
+ int frb = instr->RBValue();
+ double frb_dval = get_double_from_d_register(frb);
+ int64_t* p = reinterpret_cast<int64_t*>(&frb_dval);
+ int32_t frb_ival = static_cast<int32_t>((*p) & 0xffffffff);
+ int l = instr->Bits(25, 25);
+ if (l == 1) {
+ fp_condition_reg_ = frb_ival;
+ } else {
+ UNIMPLEMENTED();
+ }
+ if (instr->Bit(0)) { // RC bit set
+ UNIMPLEMENTED();
+ // int w = instr->Bits(16, 16);
+ // int flm = instr->Bits(24, 17);
+ }
+ return;
+ }
+ case MFFS: {
+ int frt = instr->RTValue();
+ int64_t lval = static_cast<int64_t>(fp_condition_reg_);
+ double* p = reinterpret_cast<double*>(&lval);
+ set_d_register_from_double(frt, *p);
+ return;
+ }
+ case FABS: {
+ int frt = instr->RTValue();
+ int frb = instr->RBValue();
+ double frb_val = get_double_from_d_register(frb);
+ double frt_val = std::fabs(frb_val);
+ set_d_register_from_double(frt, frt_val);
+ return;
+ }
+ case FRIM: {
+ int frt = instr->RTValue();
+ int frb = instr->RBValue();
+ double frb_val = get_double_from_d_register(frb);
+ int64_t floor_val = (int64_t)frb_val;
+ if (floor_val > frb_val) floor_val--;
+ double frt_val = static_cast<double>(floor_val);
+ set_d_register_from_double(frt, frt_val);
+ return;
+ }
+ }
+ UNIMPLEMENTED(); // Not used by V8.
+}
+
+#if V8_TARGET_ARCH_PPC64
+void Simulator::ExecuteExt5(Instruction* instr) {
+ switch (instr->Bits(4, 2) << 2) {
+ case RLDICL: {
+ int ra = instr->RAValue();
+ int rs = instr->RSValue();
+ uintptr_t rs_val = get_register(rs);
+ int sh = (instr->Bits(15, 11) | (instr->Bit(1) << 5));
+ int mb = (instr->Bits(10, 6) | (instr->Bit(5) << 5));
+ DCHECK(sh >= 0 && sh <= 63);
+ DCHECK(mb >= 0 && mb <= 63);
+ // rotate left
+ uintptr_t result = (rs_val << sh) | (rs_val >> (64 - sh));
+ uintptr_t mask = 0xffffffffffffffff >> mb;
+ result &= mask;
+ set_register(ra, result);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(result);
+ }
+ return;
+ }
+ case RLDICR: {
+ int ra = instr->RAValue();
+ int rs = instr->RSValue();
+ uintptr_t rs_val = get_register(rs);
+ int sh = (instr->Bits(15, 11) | (instr->Bit(1) << 5));
+ int me = (instr->Bits(10, 6) | (instr->Bit(5) << 5));
+ DCHECK(sh >= 0 && sh <= 63);
+ DCHECK(me >= 0 && me <= 63);
+ // rotate left
+ uintptr_t result = (rs_val << sh) | (rs_val >> (64 - sh));
+ uintptr_t mask = 0xffffffffffffffff << (63 - me);
+ result &= mask;
+ set_register(ra, result);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(result);
+ }
+ return;
+ }
+ case RLDIC: {
+ int ra = instr->RAValue();
+ int rs = instr->RSValue();
+ uintptr_t rs_val = get_register(rs);
+ int sh = (instr->Bits(15, 11) | (instr->Bit(1) << 5));
+ int mb = (instr->Bits(10, 6) | (instr->Bit(5) << 5));
+ DCHECK(sh >= 0 && sh <= 63);
+ DCHECK(mb >= 0 && mb <= 63);
+ // rotate left
+ uintptr_t result = (rs_val << sh) | (rs_val >> (64 - sh));
+ uintptr_t mask = (0xffffffffffffffff >> mb) & (0xffffffffffffffff << sh);
+ result &= mask;
+ set_register(ra, result);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(result);
+ }
+ return;
+ }
+ case RLDIMI: {
+ int ra = instr->RAValue();
+ int rs = instr->RSValue();
+ uintptr_t rs_val = get_register(rs);
+ intptr_t ra_val = get_register(ra);
+ int sh = (instr->Bits(15, 11) | (instr->Bit(1) << 5));
+ int mb = (instr->Bits(10, 6) | (instr->Bit(5) << 5));
+ int me = 63 - sh;
+ // rotate left
+ uintptr_t result = (rs_val << sh) | (rs_val >> (64 - sh));
+ uintptr_t mask = 0;
+ if (mb < me + 1) {
+ uintptr_t bit = 0x8000000000000000 >> mb;
+ for (; mb <= me; mb++) {
+ mask |= bit;
+ bit >>= 1;
+ }
+ } else if (mb == me + 1) {
+ mask = 0xffffffffffffffff;
+ } else { // mb > me+1
+ uintptr_t bit = 0x8000000000000000 >> (me + 1); // needs to be tested
+ mask = 0xffffffffffffffff;
+ for (; me < mb; me++) {
+ mask ^= bit;
+ bit >>= 1;
+ }
+ }
+ result &= mask;
+ ra_val &= ~mask;
+ result |= ra_val;
+ set_register(ra, result);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(result);
+ }
+ return;
+ }
+ }
+ switch (instr->Bits(4, 1) << 1) {
+ case RLDCL: {
+ int ra = instr->RAValue();
+ int rs = instr->RSValue();
+ int rb = instr->RBValue();
+ uintptr_t rs_val = get_register(rs);
+ uintptr_t rb_val = get_register(rb);
+ int sh = (rb_val & 0x3f);
+ int mb = (instr->Bits(10, 6) | (instr->Bit(5) << 5));
+ DCHECK(sh >= 0 && sh <= 63);
+ DCHECK(mb >= 0 && mb <= 63);
+ // rotate left
+ uintptr_t result = (rs_val << sh) | (rs_val >> (64 - sh));
+ uintptr_t mask = 0xffffffffffffffff >> mb;
+ result &= mask;
+ set_register(ra, result);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(result);
+ }
+ return;
+ }
+ }
+ UNIMPLEMENTED(); // Not used by V8.
+}
+#endif
+
+
+void Simulator::ExecuteGeneric(Instruction* instr) {
+ int opcode = instr->OpcodeValue() << 26;
+ switch (opcode) {
+ case SUBFIC: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ intptr_t ra_val = get_register(ra);
+ int32_t im_val = instr->Bits(15, 0);
+ im_val = SIGN_EXT_IMM16(im_val);
+ intptr_t alu_out = im_val - ra_val;
+ set_register(rt, alu_out);
+ // todo - handle RC bit
+ break;
+ }
+ case CMPLI: {
+ int ra = instr->RAValue();
+ uint32_t im_val = instr->Bits(15, 0);
+ int cr = instr->Bits(25, 23);
+ uint32_t bf = 0;
+#if V8_TARGET_ARCH_PPC64
+ int L = instr->Bit(21);
+ if (L) {
+#endif
+ uintptr_t ra_val = get_register(ra);
+ if (ra_val < im_val) {
+ bf |= 0x80000000;
+ }
+ if (ra_val > im_val) {
+ bf |= 0x40000000;
+ }
+ if (ra_val == im_val) {
+ bf |= 0x20000000;
+ }
+#if V8_TARGET_ARCH_PPC64
+ } else {
+ uint32_t ra_val = get_register(ra);
+ if (ra_val < im_val) {
+ bf |= 0x80000000;
+ }
+ if (ra_val > im_val) {
+ bf |= 0x40000000;
+ }
+ if (ra_val == im_val) {
+ bf |= 0x20000000;
+ }
+ }
+#endif
+ uint32_t condition_mask = 0xF0000000U >> (cr * 4);
+ uint32_t condition = bf >> (cr * 4);
+ condition_reg_ = (condition_reg_ & ~condition_mask) | condition;
+ break;
+ }
+ case CMPI: {
+ int ra = instr->RAValue();
+ int32_t im_val = instr->Bits(15, 0);
+ im_val = SIGN_EXT_IMM16(im_val);
+ int cr = instr->Bits(25, 23);
+ uint32_t bf = 0;
+#if V8_TARGET_ARCH_PPC64
+ int L = instr->Bit(21);
+ if (L) {
+#endif
+ intptr_t ra_val = get_register(ra);
+ if (ra_val < im_val) {
+ bf |= 0x80000000;
+ }
+ if (ra_val > im_val) {
+ bf |= 0x40000000;
+ }
+ if (ra_val == im_val) {
+ bf |= 0x20000000;
+ }
+#if V8_TARGET_ARCH_PPC64
+ } else {
+ int32_t ra_val = get_register(ra);
+ if (ra_val < im_val) {
+ bf |= 0x80000000;
+ }
+ if (ra_val > im_val) {
+ bf |= 0x40000000;
+ }
+ if (ra_val == im_val) {
+ bf |= 0x20000000;
+ }
+ }
+#endif
+ uint32_t condition_mask = 0xF0000000U >> (cr * 4);
+ uint32_t condition = bf >> (cr * 4);
+ condition_reg_ = (condition_reg_ & ~condition_mask) | condition;
+ break;
+ }
+ case ADDIC: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ uintptr_t ra_val = get_register(ra);
+ uintptr_t im_val = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ uintptr_t alu_out = ra_val + im_val;
+ // Check overflow
+ if (~ra_val < im_val) {
+ special_reg_xer_ = (special_reg_xer_ & ~0xF0000000) | 0x20000000;
+ } else {
+ special_reg_xer_ &= ~0xF0000000;
+ }
+ set_register(rt, alu_out);
+ break;
+ }
+ case ADDI: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int32_t im_val = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ intptr_t alu_out;
+ if (ra == 0) {
+ alu_out = im_val;
+ } else {
+ intptr_t ra_val = get_register(ra);
+ alu_out = ra_val + im_val;
+ }
+ set_register(rt, alu_out);
+ // todo - handle RC bit
+ break;
+ }
+ case ADDIS: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int32_t im_val = (instr->Bits(15, 0) << 16);
+ intptr_t alu_out;
+ if (ra == 0) { // treat r0 as zero
+ alu_out = im_val;
+ } else {
+ intptr_t ra_val = get_register(ra);
+ alu_out = ra_val + im_val;
+ }
+ set_register(rt, alu_out);
+ break;
+ }
+ case BCX: {
+ ExecuteBranchConditional(instr);
+ break;
+ }
+ case BX: {
+ int offset = (instr->Bits(25, 2) << 8) >> 6;
+ if (instr->Bit(0) == 1) { // LK flag set
+ special_reg_lr_ = get_pc() + 4;
+ }
+ set_pc(get_pc() + offset);
+ // todo - AA flag
+ break;
+ }
+ case EXT1: {
+ ExecuteExt1(instr);
+ break;
+ }
+ case RLWIMIX: {
+ int ra = instr->RAValue();
+ int rs = instr->RSValue();
+ uint32_t rs_val = get_register(rs);
+ int32_t ra_val = get_register(ra);
+ int sh = instr->Bits(15, 11);
+ int mb = instr->Bits(10, 6);
+ int me = instr->Bits(5, 1);
+ // rotate left
+ uint32_t result = (rs_val << sh) | (rs_val >> (32 - sh));
+ int mask = 0;
+ if (mb < me + 1) {
+ int bit = 0x80000000 >> mb;
+ for (; mb <= me; mb++) {
+ mask |= bit;
+ bit >>= 1;
+ }
+ } else if (mb == me + 1) {
+ mask = 0xffffffff;
+ } else { // mb > me+1
+ int bit = 0x80000000 >> (me + 1); // needs to be tested
+ mask = 0xffffffff;
+ for (; me < mb; me++) {
+ mask ^= bit;
+ bit >>= 1;
+ }
+ }
+ result &= mask;
+ ra_val &= ~mask;
+ result |= ra_val;
+ set_register(ra, result);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(result);
+ }
+ break;
+ }
+ case RLWINMX:
+ case RLWNMX: {
+ int ra = instr->RAValue();
+ int rs = instr->RSValue();
+ uint32_t rs_val = get_register(rs);
+ int sh = 0;
+ if (opcode == RLWINMX) {
+ sh = instr->Bits(15, 11);
+ } else {
+ int rb = instr->RBValue();
+ uint32_t rb_val = get_register(rb);
+ sh = (rb_val & 0x1f);
+ }
+ int mb = instr->Bits(10, 6);
+ int me = instr->Bits(5, 1);
+ // rotate left
+ uint32_t result = (rs_val << sh) | (rs_val >> (32 - sh));
+ int mask = 0;
+ if (mb < me + 1) {
+ int bit = 0x80000000 >> mb;
+ for (; mb <= me; mb++) {
+ mask |= bit;
+ bit >>= 1;
+ }
+ } else if (mb == me + 1) {
+ mask = 0xffffffff;
+ } else { // mb > me+1
+ int bit = 0x80000000 >> (me + 1); // needs to be tested
+ mask = 0xffffffff;
+ for (; me < mb; me++) {
+ mask ^= bit;
+ bit >>= 1;
+ }
+ }
+ result &= mask;
+ set_register(ra, result);
+ if (instr->Bit(0)) { // RC bit set
+ SetCR0(result);
+ }
+ break;
+ }
+ case ORI: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ intptr_t rs_val = get_register(rs);
+ uint32_t im_val = instr->Bits(15, 0);
+ intptr_t alu_out = rs_val | im_val;
+ set_register(ra, alu_out);
+ break;
+ }
+ case ORIS: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ intptr_t rs_val = get_register(rs);
+ uint32_t im_val = instr->Bits(15, 0);
+ intptr_t alu_out = rs_val | (im_val << 16);
+ set_register(ra, alu_out);
+ break;
+ }
+ case XORI: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ intptr_t rs_val = get_register(rs);
+ uint32_t im_val = instr->Bits(15, 0);
+ intptr_t alu_out = rs_val ^ im_val;
+ set_register(ra, alu_out);
+ // todo - set condition based SO bit
+ break;
+ }
+ case XORIS: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ intptr_t rs_val = get_register(rs);
+ uint32_t im_val = instr->Bits(15, 0);
+ intptr_t alu_out = rs_val ^ (im_val << 16);
+ set_register(ra, alu_out);
+ break;
+ }
+ case ANDIx: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ intptr_t rs_val = get_register(rs);
+ uint32_t im_val = instr->Bits(15, 0);
+ intptr_t alu_out = rs_val & im_val;
+ set_register(ra, alu_out);
+ SetCR0(alu_out);
+ break;
+ }
+ case ANDISx: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ intptr_t rs_val = get_register(rs);
+ uint32_t im_val = instr->Bits(15, 0);
+ intptr_t alu_out = rs_val & (im_val << 16);
+ set_register(ra, alu_out);
+ SetCR0(alu_out);
+ break;
+ }
+ case EXT2: {
+ ExecuteExt2(instr);
+ break;
+ }
+
+ case LWZU:
+ case LWZ: {
+ int ra = instr->RAValue();
+ int rt = instr->RTValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ set_register(rt, ReadWU(ra_val + offset, instr));
+ if (opcode == LWZU) {
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + offset);
+ }
+ break;
+ }
+
+ case LBZU:
+ case LBZ: {
+ int ra = instr->RAValue();
+ int rt = instr->RTValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ set_register(rt, ReadB(ra_val + offset) & 0xFF);
+ if (opcode == LBZU) {
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + offset);
+ }
+ break;
+ }
+
+ case STWU:
+ case STW: {
+ int ra = instr->RAValue();
+ int rs = instr->RSValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int32_t rs_val = get_register(rs);
+ int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ WriteW(ra_val + offset, rs_val, instr);
+ if (opcode == STWU) {
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + offset);
+ }
+ // printf("r%d %08x -> %08x\n", rs, rs_val, offset); // 0xdead
+ break;
+ }
+
+ case STBU:
+ case STB: {
+ int ra = instr->RAValue();
+ int rs = instr->RSValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int8_t rs_val = get_register(rs);
+ int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ WriteB(ra_val + offset, rs_val);
+ if (opcode == STBU) {
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + offset);
+ }
+ break;
+ }
+
+ case LHZU:
+ case LHZ: {
+ int ra = instr->RAValue();
+ int rt = instr->RTValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ uintptr_t result = ReadHU(ra_val + offset, instr) & 0xffff;
+ set_register(rt, result);
+ if (opcode == LHZU) {
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + offset);
+ }
+ break;
+ }
+
+ case LHA:
+ case LHAU: {
+ UNIMPLEMENTED();
+ break;
+ }
+
+ case STHU:
+ case STH: {
+ int ra = instr->RAValue();
+ int rs = instr->RSValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int16_t rs_val = get_register(rs);
+ int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ WriteH(ra_val + offset, rs_val, instr);
+ if (opcode == STHU) {
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + offset);
+ }
+ break;
+ }
+
+ case LMW:
+ case STMW: {
+ UNIMPLEMENTED();
+ break;
+ }
+
+ case LFSU:
+ case LFS: {
+ int frt = instr->RTValue();
+ int ra = instr->RAValue();
+ int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int32_t val = ReadW(ra_val + offset, instr);
+ float* fptr = reinterpret_cast<float*>(&val);
+ set_d_register_from_double(frt, static_cast<double>(*fptr));
+ if (opcode == LFSU) {
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + offset);
+ }
+ break;
+ }
+
+ case LFDU:
+ case LFD: {
+ int frt = instr->RTValue();
+ int ra = instr->RAValue();
+ int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ double* dptr = reinterpret_cast<double*>(ReadDW(ra_val + offset));
+ set_d_register_from_double(frt, *dptr);
+ if (opcode == LFDU) {
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + offset);
+ }
+ break;
+ }
+
+ case STFSU: {
+ case STFS:
+ int frs = instr->RSValue();
+ int ra = instr->RAValue();
+ int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ float frs_val = static_cast<float>(get_double_from_d_register(frs));
+ int32_t* p = reinterpret_cast<int32_t*>(&frs_val);
+ WriteW(ra_val + offset, *p, instr);
+ if (opcode == STFSU) {
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + offset);
+ }
+ break;
+ }
+
+ case STFDU:
+ case STFD: {
+ int frs = instr->RSValue();
+ int ra = instr->RAValue();
+ int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ double frs_val = get_double_from_d_register(frs);
+ int64_t* p = reinterpret_cast<int64_t*>(&frs_val);
+ WriteDW(ra_val + offset, *p);
+ if (opcode == STFDU) {
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + offset);
+ }
+ break;
+ }
+
+ case EXT3:
+ UNIMPLEMENTED();
+ case EXT4: {
+ ExecuteExt4(instr);
+ break;
+ }
+
+#if V8_TARGET_ARCH_PPC64
+ case EXT5: {
+ ExecuteExt5(instr);
+ break;
+ }
+ case LD: {
+ int ra = instr->RAValue();
+ int rt = instr->RTValue();
+ int64_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int offset = SIGN_EXT_IMM16(instr->Bits(15, 0) & ~3);
+ switch (instr->Bits(1, 0)) {
+ case 0: { // ld
+ intptr_t* result = ReadDW(ra_val + offset);
+ set_register(rt, *result);
+ break;
+ }
+ case 1: { // ldu
+ intptr_t* result = ReadDW(ra_val + offset);
+ set_register(rt, *result);
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + offset);
+ break;
+ }
+ case 2: { // lwa
+ intptr_t result = ReadW(ra_val + offset, instr);
+ set_register(rt, result);
+ break;
+ }
+ }
+ break;
+ }
+
+ case STD: {
+ int ra = instr->RAValue();
+ int rs = instr->RSValue();
+ int64_t ra_val = ra == 0 ? 0 : get_register(ra);
+ int64_t rs_val = get_register(rs);
+ int offset = SIGN_EXT_IMM16(instr->Bits(15, 0) & ~3);
+ WriteDW(ra_val + offset, rs_val);
+ if (instr->Bit(0) == 1) { // This is the STDU form
+ DCHECK(ra != 0);
+ set_register(ra, ra_val + offset);
+ }
+ break;
+ }
+#endif
+
+ case FAKE_OPCODE: {
+ if (instr->Bits(MARKER_SUBOPCODE_BIT, MARKER_SUBOPCODE_BIT) == 1) {
+ int marker_code = instr->Bits(STUB_MARKER_HIGH_BIT, 0);
+ DCHECK(marker_code < F_NEXT_AVAILABLE_STUB_MARKER);
+ PrintF("Hit stub-marker: %d (EMIT_STUB_MARKER)\n", marker_code);
+ } else {
+ int fake_opcode = instr->Bits(FAKE_OPCODE_HIGH_BIT, 0);
+ if (fake_opcode == fBKPT) {
+ PPCDebugger dbg(this);
+ PrintF("Simulator hit BKPT.\n");
+ dbg.Debug();
+ } else {
+ DCHECK(fake_opcode < fLastFaker);
+ PrintF("Hit ARM opcode: %d(FAKE_OPCODE defined in constant-ppc.h)\n",
+ fake_opcode);
+ UNIMPLEMENTED();
+ }
+ }
+ break;
+ }
+
+ default: {
+ UNIMPLEMENTED();
+ break;
+ }
+ }
+} // NOLINT
+
+
+void Simulator::Trace(Instruction* instr) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // use a reasonably large buffer
+ v8::internal::EmbeddedVector<char, 256> buffer;
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
+ PrintF("%05d %08" V8PRIxPTR " %s\n", icount_,
+ reinterpret_cast<intptr_t>(instr), buffer.start());
+}
+
+
+// Executes the current instruction.
+void Simulator::ExecuteInstruction(Instruction* instr) {
+ if (v8::internal::FLAG_check_icache) {
+ CheckICache(isolate_->simulator_i_cache(), instr);
+ }
+ pc_modified_ = false;
+ if (::v8::internal::FLAG_trace_sim) {
+ Trace(instr);
+ }
+ int opcode = instr->OpcodeValue() << 26;
+ if (opcode == TWI) {
+ SoftwareInterrupt(instr);
+ } else {
+ ExecuteGeneric(instr);
+ }
+ if (!pc_modified_) {
+ set_pc(reinterpret_cast<intptr_t>(instr) + Instruction::kInstrSize);
+ }
+}
+
+
+void Simulator::Execute() {
+ // Get the PC to simulate. Cannot use the accessor here as we need the
+ // raw PC value and not the one used as input to arithmetic instructions.
+ intptr_t program_counter = get_pc();
+
+ if (::v8::internal::FLAG_stop_sim_at == 0) {
+ // Fast version of the dispatch loop without checking whether the simulator
+ // should be stopping at a particular executed instruction.
+ while (program_counter != end_sim_pc) {
+ Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
+ icount_++;
+ ExecuteInstruction(instr);
+ program_counter = get_pc();
+ }
+ } else {
+ // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
+ // we reach the particular instuction count.
+ while (program_counter != end_sim_pc) {
+ Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
+ icount_++;
+ if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
+ PPCDebugger dbg(this);
+ dbg.Debug();
+ } else {
+ ExecuteInstruction(instr);
+ }
+ program_counter = get_pc();
+ }
+ }
+}
+
+
+void Simulator::CallInternal(byte* entry) {
+// Prepare to execute the code at entry
+#if ABI_USES_FUNCTION_DESCRIPTORS
+ // entry is the function descriptor
+ set_pc(*(reinterpret_cast<intptr_t*>(entry)));
+#else
+ // entry is the instruction address
+ set_pc(reinterpret_cast<intptr_t>(entry));
+#endif
+
+ // Put down marker for end of simulation. The simulator will stop simulation
+ // when the PC reaches this value. By saving the "end simulation" value into
+ // the LR the simulation stops when returning to this call point.
+ special_reg_lr_ = end_sim_pc;
+
+ // Remember the values of non-volatile registers.
+ intptr_t r2_val = get_register(r2);
+ intptr_t r13_val = get_register(r13);
+ intptr_t r14_val = get_register(r14);
+ intptr_t r15_val = get_register(r15);
+ intptr_t r16_val = get_register(r16);
+ intptr_t r17_val = get_register(r17);
+ intptr_t r18_val = get_register(r18);
+ intptr_t r19_val = get_register(r19);
+ intptr_t r20_val = get_register(r20);
+ intptr_t r21_val = get_register(r21);
+ intptr_t r22_val = get_register(r22);
+ intptr_t r23_val = get_register(r23);
+ intptr_t r24_val = get_register(r24);
+ intptr_t r25_val = get_register(r25);
+ intptr_t r26_val = get_register(r26);
+ intptr_t r27_val = get_register(r27);
+ intptr_t r28_val = get_register(r28);
+ intptr_t r29_val = get_register(r29);
+ intptr_t r30_val = get_register(r30);
+ intptr_t r31_val = get_register(fp);
+
+ // Set up the non-volatile registers with a known value. To be able to check
+ // that they are preserved properly across JS execution.
+ intptr_t callee_saved_value = icount_;
+ set_register(r2, callee_saved_value);
+ set_register(r13, callee_saved_value);
+ set_register(r14, callee_saved_value);
+ set_register(r15, callee_saved_value);
+ set_register(r16, callee_saved_value);
+ set_register(r17, callee_saved_value);
+ set_register(r18, callee_saved_value);
+ set_register(r19, callee_saved_value);
+ set_register(r20, callee_saved_value);
+ set_register(r21, callee_saved_value);
+ set_register(r22, callee_saved_value);
+ set_register(r23, callee_saved_value);
+ set_register(r24, callee_saved_value);
+ set_register(r25, callee_saved_value);
+ set_register(r26, callee_saved_value);
+ set_register(r27, callee_saved_value);
+ set_register(r28, callee_saved_value);
+ set_register(r29, callee_saved_value);
+ set_register(r30, callee_saved_value);
+ set_register(fp, callee_saved_value);
+
+ // Start the simulation
+ Execute();
+
+ // Check that the non-volatile registers have been preserved.
+ CHECK_EQ(callee_saved_value, get_register(r2));
+ CHECK_EQ(callee_saved_value, get_register(r13));
+ CHECK_EQ(callee_saved_value, get_register(r14));
+ CHECK_EQ(callee_saved_value, get_register(r15));
+ CHECK_EQ(callee_saved_value, get_register(r16));
+ CHECK_EQ(callee_saved_value, get_register(r17));
+ CHECK_EQ(callee_saved_value, get_register(r18));
+ CHECK_EQ(callee_saved_value, get_register(r19));
+ CHECK_EQ(callee_saved_value, get_register(r20));
+ CHECK_EQ(callee_saved_value, get_register(r21));
+ CHECK_EQ(callee_saved_value, get_register(r22));
+ CHECK_EQ(callee_saved_value, get_register(r23));
+ CHECK_EQ(callee_saved_value, get_register(r24));
+ CHECK_EQ(callee_saved_value, get_register(r25));
+ CHECK_EQ(callee_saved_value, get_register(r26));
+ CHECK_EQ(callee_saved_value, get_register(r27));
+ CHECK_EQ(callee_saved_value, get_register(r28));
+ CHECK_EQ(callee_saved_value, get_register(r29));
+ CHECK_EQ(callee_saved_value, get_register(r30));
+ CHECK_EQ(callee_saved_value, get_register(fp));
+
+ // Restore non-volatile registers with the original value.
+ set_register(r2, r2_val);
+ set_register(r13, r13_val);
+ set_register(r14, r14_val);
+ set_register(r15, r15_val);
+ set_register(r16, r16_val);
+ set_register(r17, r17_val);
+ set_register(r18, r18_val);
+ set_register(r19, r19_val);
+ set_register(r20, r20_val);
+ set_register(r21, r21_val);
+ set_register(r22, r22_val);
+ set_register(r23, r23_val);
+ set_register(r24, r24_val);
+ set_register(r25, r25_val);
+ set_register(r26, r26_val);
+ set_register(r27, r27_val);
+ set_register(r28, r28_val);
+ set_register(r29, r29_val);
+ set_register(r30, r30_val);
+ set_register(fp, r31_val);
+}
+
+
+intptr_t Simulator::Call(byte* entry, int argument_count, ...) {
+ va_list parameters;
+ va_start(parameters, argument_count);
+ // Set up arguments
+
+ // First eight arguments passed in registers r3-r10.
+ int reg_arg_count = (argument_count > 8) ? 8 : argument_count;
+ int stack_arg_count = argument_count - reg_arg_count;
+ for (int i = 0; i < reg_arg_count; i++) {
+ set_register(i + 3, va_arg(parameters, intptr_t));
+ }
+
+ // Remaining arguments passed on stack.
+ intptr_t original_stack = get_register(sp);
+ // Compute position of stack on entry to generated code.
+ intptr_t entry_stack =
+ (original_stack -
+ (kNumRequiredStackFrameSlots + stack_arg_count) * sizeof(intptr_t));
+ if (base::OS::ActivationFrameAlignment() != 0) {
+ entry_stack &= -base::OS::ActivationFrameAlignment();
+ }
+ // Store remaining arguments on stack, from low to high memory.
+ // +2 is a hack for the LR slot + old SP on PPC
+ intptr_t* stack_argument =
+ reinterpret_cast<intptr_t*>(entry_stack) + kStackFrameExtraParamSlot;
+ for (int i = 0; i < stack_arg_count; i++) {
+ stack_argument[i] = va_arg(parameters, intptr_t);
+ }
+ va_end(parameters);
+ set_register(sp, entry_stack);
+
+ CallInternal(entry);
+
+ // Pop stack passed arguments.
+ CHECK_EQ(entry_stack, get_register(sp));
+ set_register(sp, original_stack);
+
+ intptr_t result = get_register(r3);
+ return result;
+}
+
+
+void Simulator::CallFP(byte* entry, double d0, double d1) {
+ set_d_register_from_double(1, d0);
+ set_d_register_from_double(2, d1);
+ CallInternal(entry);
+}
+
+
+int32_t Simulator::CallFPReturnsInt(byte* entry, double d0, double d1) {
+ CallFP(entry, d0, d1);
+ int32_t result = get_register(r3);
+ return result;
+}
+
+
+double Simulator::CallFPReturnsDouble(byte* entry, double d0, double d1) {
+ CallFP(entry, d0, d1);
+ return get_double_from_d_register(1);
+}
+
+
+uintptr_t Simulator::PushAddress(uintptr_t address) {
+ uintptr_t new_sp = get_register(sp) - sizeof(uintptr_t);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+ *stack_slot = address;
+ set_register(sp, new_sp);
+ return new_sp;
+}
+
+
+uintptr_t Simulator::PopAddress() {
+ uintptr_t current_sp = get_register(sp);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+ uintptr_t address = *stack_slot;
+ set_register(sp, current_sp + sizeof(uintptr_t));
+ return address;
+}
+}
+} // namespace v8::internal
+
+#endif // USE_SIMULATOR
+#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/simulator-ppc.h b/deps/v8/src/ppc/simulator-ppc.h
new file mode 100644
index 0000000000..98fe9a5351
--- /dev/null
+++ b/deps/v8/src/ppc/simulator-ppc.h
@@ -0,0 +1,413 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// Declares a Simulator for PPC instructions if we are not generating a native
+// PPC binary. This Simulator allows us to run and debug PPC code generation on
+// regular desktop machines.
+// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
+// which will start execution in the Simulator or forwards to the real entry
+// on a PPC HW platform.
+
+#ifndef V8_PPC_SIMULATOR_PPC_H_
+#define V8_PPC_SIMULATOR_PPC_H_
+
+#include "src/allocation.h"
+
+#if !defined(USE_SIMULATOR)
+// Running without a simulator on a native ppc platform.
+
+namespace v8 {
+namespace internal {
+
+// When running without a simulator we call the entry directly.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+ (entry(p0, p1, p2, p3, p4))
+
+typedef int (*ppc_regexp_matcher)(String*, int, const byte*, const byte*, int*,
+ int, Address, int, void*, Isolate*);
+
+
+// Call the generated regexp code directly. The code at the entry address
+// should act as a function matching the type ppc_regexp_matcher.
+// The ninth argument is a dummy that reserves the space used for
+// the return address added by the ExitFrame in native calls.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+ (FUNCTION_CAST<ppc_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
+ NULL, p8))
+
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on ppc uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
+ uintptr_t c_limit) {
+ USE(isolate);
+ return c_limit;
+ }
+
+ static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ return try_catch_address;
+ }
+
+ static inline void UnregisterCTryCatch() {}
+};
+}
+} // namespace v8::internal
+
+#else // !defined(USE_SIMULATOR)
+// Running with a simulator.
+
+#include "src/assembler.h"
+#include "src/hashmap.h"
+#include "src/ppc/constants-ppc.h"
+
+namespace v8 {
+namespace internal {
+
+class CachePage {
+ public:
+ static const int LINE_VALID = 0;
+ static const int LINE_INVALID = 1;
+
+ static const int kPageShift = 12;
+ static const int kPageSize = 1 << kPageShift;
+ static const int kPageMask = kPageSize - 1;
+ static const int kLineShift = 2; // The cache line is only 4 bytes right now.
+ static const int kLineLength = 1 << kLineShift;
+ static const int kLineMask = kLineLength - 1;
+
+ CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
+
+ char* ValidityByte(int offset) {
+ return &validity_map_[offset >> kLineShift];
+ }
+
+ char* CachedData(int offset) { return &data_[offset]; }
+
+ private:
+ char data_[kPageSize]; // The cached data.
+ static const int kValidityMapSize = kPageSize >> kLineShift;
+ char validity_map_[kValidityMapSize]; // One byte per line.
+};
+
+
+class Simulator {
+ public:
+ friend class PPCDebugger;
+ enum Register {
+ no_reg = -1,
+ r0 = 0,
+ sp,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ r14,
+ r15,
+ r16,
+ r17,
+ r18,
+ r19,
+ r20,
+ r21,
+ r22,
+ r23,
+ r24,
+ r25,
+ r26,
+ r27,
+ r28,
+ r29,
+ r30,
+ fp,
+ kNumGPRs = 32,
+ d0 = 0,
+ d1,
+ d2,
+ d3,
+ d4,
+ d5,
+ d6,
+ d7,
+ d8,
+ d9,
+ d10,
+ d11,
+ d12,
+ d13,
+ d14,
+ d15,
+ d16,
+ d17,
+ d18,
+ d19,
+ d20,
+ d21,
+ d22,
+ d23,
+ d24,
+ d25,
+ d26,
+ d27,
+ d28,
+ d29,
+ d30,
+ d31,
+ kNumFPRs = 32
+ };
+
+ explicit Simulator(Isolate* isolate);
+ ~Simulator();
+
+ // The currently executing Simulator instance. Potentially there can be one
+ // for each native thread.
+ static Simulator* current(v8::internal::Isolate* isolate);
+
+ // Accessors for register state.
+ void set_register(int reg, intptr_t value);
+ intptr_t get_register(int reg) const;
+ double get_double_from_register_pair(int reg);
+ void set_d_register_from_double(int dreg, const double dbl) {
+ DCHECK(dreg >= 0 && dreg < kNumFPRs);
+ fp_registers_[dreg] = dbl;
+ }
+ double get_double_from_d_register(int dreg) { return fp_registers_[dreg]; }
+
+ // Special case of set_register and get_register to access the raw PC value.
+ void set_pc(intptr_t value);
+ intptr_t get_pc() const;
+
+ Address get_sp() {
+ return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
+ }
+
+ // Accessor to the internal simulator stack area.
+ uintptr_t StackLimit() const;
+
+ // Executes PPC instructions until the PC reaches end_sim_pc.
+ void Execute();
+
+ // Call on program start.
+ static void Initialize(Isolate* isolate);
+
+ // V8 generally calls into generated JS code with 5 parameters and into
+ // generated RegExp code with 7 parameters. This is a convenience function,
+ // which sets up the simulator state and grabs the result on return.
+ intptr_t Call(byte* entry, int argument_count, ...);
+ // Alternative: call a 2-argument double function.
+ void CallFP(byte* entry, double d0, double d1);
+ int32_t CallFPReturnsInt(byte* entry, double d0, double d1);
+ double CallFPReturnsDouble(byte* entry, double d0, double d1);
+
+ // Push an address onto the JS stack.
+ uintptr_t PushAddress(uintptr_t address);
+
+ // Pop an address from the JS stack.
+ uintptr_t PopAddress();
+
+ // Debugger input.
+ void set_last_debugger_input(char* input);
+ char* last_debugger_input() { return last_debugger_input_; }
+
+ // ICache checking.
+ static void FlushICache(v8::internal::HashMap* i_cache, void* start,
+ size_t size);
+
+ // Returns true if pc register contains one of the 'special_values' defined
+ // below (bad_lr, end_sim_pc).
+ bool has_bad_pc() const;
+
+ private:
+ enum special_values {
+ // Known bad pc value to ensure that the simulator does not execute
+ // without being properly setup.
+ bad_lr = -1,
+ // A pc value used to signal the simulator to stop execution. Generally
+ // the lr is set to this value on transition from native C code to
+ // simulated execution, so that the simulator can "return" to the native
+ // C code.
+ end_sim_pc = -2
+ };
+
+ // Unsupported instructions use Format to print an error and stop execution.
+ void Format(Instruction* instr, const char* format);
+
+ // Helper functions to set the conditional flags in the architecture state.
+ bool CarryFrom(int32_t left, int32_t right, int32_t carry = 0);
+ bool BorrowFrom(int32_t left, int32_t right);
+ bool OverflowFrom(int32_t alu_out, int32_t left, int32_t right,
+ bool addition);
+
+ // Helper functions to decode common "addressing" modes
+ int32_t GetShiftRm(Instruction* instr, bool* carry_out);
+ int32_t GetImm(Instruction* instr, bool* carry_out);
+ void ProcessPUW(Instruction* instr, int num_regs, int operand_size,
+ intptr_t* start_address, intptr_t* end_address);
+ void HandleRList(Instruction* instr, bool load);
+ void HandleVList(Instruction* inst);
+ void SoftwareInterrupt(Instruction* instr);
+
+ // Stop helper functions.
+ inline bool isStopInstruction(Instruction* instr);
+ inline bool isWatchedStop(uint32_t bkpt_code);
+ inline bool isEnabledStop(uint32_t bkpt_code);
+ inline void EnableStop(uint32_t bkpt_code);
+ inline void DisableStop(uint32_t bkpt_code);
+ inline void IncreaseStopCounter(uint32_t bkpt_code);
+ void PrintStopInfo(uint32_t code);
+
+ // Read and write memory.
+ inline uint8_t ReadBU(intptr_t addr);
+ inline int8_t ReadB(intptr_t addr);
+ inline void WriteB(intptr_t addr, uint8_t value);
+ inline void WriteB(intptr_t addr, int8_t value);
+
+ inline uint16_t ReadHU(intptr_t addr, Instruction* instr);
+ inline int16_t ReadH(intptr_t addr, Instruction* instr);
+ // Note: Overloaded on the sign of the value.
+ inline void WriteH(intptr_t addr, uint16_t value, Instruction* instr);
+ inline void WriteH(intptr_t addr, int16_t value, Instruction* instr);
+
+ inline uint32_t ReadWU(intptr_t addr, Instruction* instr);
+ inline int32_t ReadW(intptr_t addr, Instruction* instr);
+ inline void WriteW(intptr_t addr, uint32_t value, Instruction* instr);
+ inline void WriteW(intptr_t addr, int32_t value, Instruction* instr);
+
+ intptr_t* ReadDW(intptr_t addr);
+ void WriteDW(intptr_t addr, int64_t value);
+
+ void Trace(Instruction* instr);
+ void SetCR0(intptr_t result, bool setSO = false);
+ void ExecuteBranchConditional(Instruction* instr);
+ void ExecuteExt1(Instruction* instr);
+ bool ExecuteExt2_10bit(Instruction* instr);
+ bool ExecuteExt2_9bit_part1(Instruction* instr);
+ void ExecuteExt2_9bit_part2(Instruction* instr);
+ void ExecuteExt2(Instruction* instr);
+ void ExecuteExt4(Instruction* instr);
+#if V8_TARGET_ARCH_PPC64
+ void ExecuteExt5(Instruction* instr);
+#endif
+ void ExecuteGeneric(Instruction* instr);
+
+ // Executes one instruction.
+ void ExecuteInstruction(Instruction* instr);
+
+ // ICache.
+ static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
+ static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
+ int size);
+ static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
+
+ // Runtime call support.
+ static void* RedirectExternalReference(
+ void* external_function, v8::internal::ExternalReference::Type type);
+
+ // Handle arguments and return value for runtime FP functions.
+ void GetFpArgs(double* x, double* y, intptr_t* z);
+ void SetFpResult(const double& result);
+ void TrashCallerSaveRegisters();
+
+ void CallInternal(byte* entry);
+
+ // Architecture state.
+ // Saturating instructions require a Q flag to indicate saturation.
+ // There is currently no way to read the CPSR directly, and thus read the Q
+ // flag, so this is left unimplemented.
+ intptr_t registers_[kNumGPRs];
+ int32_t condition_reg_;
+ int32_t fp_condition_reg_;
+ intptr_t special_reg_lr_;
+ intptr_t special_reg_pc_;
+ intptr_t special_reg_ctr_;
+ int32_t special_reg_xer_;
+
+ double fp_registers_[kNumFPRs];
+
+ // Simulator support.
+ char* stack_;
+ bool pc_modified_;
+ int icount_;
+
+ // Debugger input.
+ char* last_debugger_input_;
+
+ // Icache simulation
+ v8::internal::HashMap* i_cache_;
+
+ // Registered breakpoints.
+ Instruction* break_pc_;
+ Instr break_instr_;
+
+ v8::internal::Isolate* isolate_;
+
+ // A stop is watched if its code is less than kNumOfWatchedStops.
+ // Only watched stops support enabling/disabling and the counter feature.
+ static const uint32_t kNumOfWatchedStops = 256;
+
+ // Breakpoint is disabled if bit 31 is set.
+ static const uint32_t kStopDisabledBit = 1 << 31;
+
+ // A stop is enabled, meaning the simulator will stop when meeting the
+ // instruction, if bit 31 of watched_stops_[code].count is unset.
+ // The value watched_stops_[code].count & ~(1 << 31) indicates how many times
+ // the breakpoint was hit or gone through.
+ struct StopCountAndDesc {
+ uint32_t count;
+ char* desc;
+ };
+ StopCountAndDesc watched_stops_[kNumOfWatchedStops];
+};
+
+
+// When running with the simulator transition into simulated execution at this
+// point.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+ reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
+ FUNCTION_ADDR(entry), 5, (intptr_t)p0, (intptr_t)p1, (intptr_t)p2, \
+ (intptr_t)p3, (intptr_t)p4))
+
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+ Simulator::current(Isolate::Current()) \
+ ->Call(entry, 10, (intptr_t)p0, (intptr_t)p1, (intptr_t)p2, \
+ (intptr_t)p3, (intptr_t)p4, (intptr_t)p5, (intptr_t)p6, \
+ (intptr_t)p7, (intptr_t)NULL, (intptr_t)p8)
+
+
+// The simulator has its own stack. Thus it has a different stack limit from
+// the C-based native code. Setting the c_limit to indicate a very small
+// stack cause stack overflow errors, since the simulator ignores the input.
+// This is unlikely to be an issue in practice, though it might cause testing
+// trouble down the line.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
+ uintptr_t c_limit) {
+ return Simulator::current(isolate)->StackLimit();
+ }
+
+ static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ Simulator* sim = Simulator::current(Isolate::Current());
+ return sim->PushAddress(try_catch_address);
+ }
+
+ static inline void UnregisterCTryCatch() {
+ Simulator::current(Isolate::Current())->PopAddress();
+ }
+};
+}
+} // namespace v8::internal
+
+#endif // !defined(USE_SIMULATOR)
+#endif // V8_PPC_SIMULATOR_PPC_H_