summaryrefslogtreecommitdiff
path: root/chromium/v8/src/x64
diff options
context:
space:
mode:
authorJocelyn Turcotte <jocelyn.turcotte@digia.com>2014-08-08 14:30:41 +0200
committerJocelyn Turcotte <jocelyn.turcotte@digia.com>2014-08-12 13:49:54 +0200
commitab0a50979b9eb4dfa3320eff7e187e41efedf7a9 (patch)
tree498dfb8a97ff3361a9f7486863a52bb4e26bb898 /chromium/v8/src/x64
parent4ce69f7403811819800e7c5ae1318b2647e778d1 (diff)
downloadqtwebengine-chromium-ab0a50979b9eb4dfa3320eff7e187e41efedf7a9.tar.gz
Update Chromium to beta version 37.0.2062.68
Change-Id: I188e3b5aff1bec75566014291b654eb19f5bc8ca Reviewed-by: Andras Becsi <andras.becsi@digia.com>
Diffstat (limited to 'chromium/v8/src/x64')
-rw-r--r--chromium/v8/src/x64/assembler-x64-inl.h125
-rw-r--r--chromium/v8/src/x64/assembler-x64.cc668
-rw-r--r--chromium/v8/src/x64/assembler-x64.h887
-rw-r--r--chromium/v8/src/x64/builtins-x64.cc728
-rw-r--r--chromium/v8/src/x64/code-stubs-x64.cc3357
-rw-r--r--chromium/v8/src/x64/code-stubs-x64.h164
-rw-r--r--chromium/v8/src/x64/codegen-x64.cc261
-rw-r--r--chromium/v8/src/x64/codegen-x64.h67
-rw-r--r--chromium/v8/src/x64/cpu-x64.cc45
-rw-r--r--chromium/v8/src/x64/debug-x64.cc159
-rw-r--r--chromium/v8/src/x64/deoptimizer-x64.cc172
-rw-r--r--chromium/v8/src/x64/disasm-x64.cc113
-rw-r--r--chromium/v8/src/x64/frames-x64.cc53
-rw-r--r--chromium/v8/src/x64/frames-x64.h44
-rw-r--r--chromium/v8/src/x64/full-codegen-x64.cc1734
-rw-r--r--chromium/v8/src/x64/ic-x64.cc624
-rw-r--r--chromium/v8/src/x64/lithium-codegen-x64.cc2528
-rw-r--r--chromium/v8/src/x64/lithium-codegen-x64.h72
-rw-r--r--chromium/v8/src/x64/lithium-gap-resolver-x64.cc71
-rw-r--r--chromium/v8/src/x64/lithium-gap-resolver-x64.h31
-rw-r--r--chromium/v8/src/x64/lithium-x64.cc1122
-rw-r--r--chromium/v8/src/x64/lithium-x64.h726
-rw-r--r--chromium/v8/src/x64/macro-assembler-x64.cc2231
-rw-r--r--chromium/v8/src/x64/macro-assembler-x64.h269
-rw-r--r--chromium/v8/src/x64/regexp-macro-assembler-x64.cc402
-rw-r--r--chromium/v8/src/x64/regexp-macro-assembler-x64.h84
-rw-r--r--chromium/v8/src/x64/simulator-x64.cc27
-rw-r--r--chromium/v8/src/x64/simulator-x64.h32
-rw-r--r--chromium/v8/src/x64/stub-cache-x64.cc2052
29 files changed, 7956 insertions, 10892 deletions
diff --git a/chromium/v8/src/x64/assembler-x64-inl.h b/chromium/v8/src/x64/assembler-x64-inl.h
index 073fcbe8e94..f1731af34b6 100644
--- a/chromium/v8/src/x64/assembler-x64-inl.h
+++ b/chromium/v8/src/x64/assembler-x64-inl.h
@@ -1,49 +1,29 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_ASSEMBLER_X64_INL_H_
#define V8_X64_ASSEMBLER_X64_INL_H_
-#include "x64/assembler-x64.h"
+#include "src/x64/assembler-x64.h"
-#include "cpu.h"
-#include "debug.h"
-#include "v8memory.h"
+#include "src/cpu.h"
+#include "src/debug.h"
+#include "src/v8memory.h"
namespace v8 {
namespace internal {
+bool CpuFeatures::SupportsCrankshaft() { return true; }
+
// -----------------------------------------------------------------------------
// Implementation of Assembler
static const byte kCallOpcode = 0xE8;
-static const int kNoCodeAgeSequenceLength = 6;
+// The length of pushq(rbp), movp(rbp, rsp), Push(rsi) and Push(rdi).
+static const int kNoCodeAgeSequenceLength = kPointerSize == kInt64Size ? 6 : 17;
void Assembler::emitl(uint32_t x) {
@@ -97,7 +77,6 @@ void Assembler::emit_code_target(Handle<Code> target,
void Assembler::emit_runtime_entry(Address entry, RelocInfo::Mode rmode) {
ASSERT(RelocInfo::IsRuntimeEntry(rmode));
- ASSERT(isolate()->code_range()->exists());
RecordRelocInfo(rmode);
emitl(static_cast<uint32_t>(entry - isolate()->code_range()->start()));
}
@@ -205,14 +184,20 @@ void Assembler::emit_optional_rex_32(const Operand& op) {
}
-Address Assembler::target_address_at(Address pc) {
+Address Assembler::target_address_at(Address pc,
+ ConstantPoolArray* constant_pool) {
return Memory::int32_at(pc) + pc + 4;
}
-void Assembler::set_target_address_at(Address pc, Address target) {
+void Assembler::set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target,
+ ICacheFlushMode icache_flush_mode) {
Memory::int32_at(pc) = static_cast<int32_t>(target - pc - 4);
- CPU::FlushICache(pc, sizeof(int32_t));
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ CPU::FlushICache(pc, sizeof(int32_t));
+ }
}
@@ -227,7 +212,6 @@ Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
Address Assembler::runtime_entry_at(Address pc) {
- ASSERT(isolate()->code_range()->exists());
return Memory::int32_at(pc) + isolate()->code_range()->start();
}
@@ -235,19 +219,20 @@ Address Assembler::runtime_entry_at(Address pc) {
// Implementation of RelocInfo
// The modes possibly affected by apply must be in kApplyMask.
-void RelocInfo::apply(intptr_t delta) {
+void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
+ bool flush_icache = icache_flush_mode != SKIP_ICACHE_FLUSH;
if (IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
Memory::Address_at(pc_) += static_cast<int32_t>(delta);
- CPU::FlushICache(pc_, sizeof(Address));
+ if (flush_icache) CPU::FlushICache(pc_, sizeof(Address));
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
Memory::int32_at(pc_) -= static_cast<int32_t>(delta);
- CPU::FlushICache(pc_, sizeof(int32_t));
+ if (flush_icache) CPU::FlushICache(pc_, sizeof(int32_t));
} else if (rmode_ == CODE_AGE_SEQUENCE) {
if (*pc_ == kCallOpcode) {
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
*p -= static_cast<int32_t>(delta); // Relocate entry.
- CPU::FlushICache(p, sizeof(uint32_t));
+ if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
}
}
}
@@ -255,7 +240,7 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- return Assembler::target_address_at(pc_);
+ return Assembler::target_address_at(pc_, host_);
}
@@ -267,6 +252,12 @@ Address RelocInfo::target_address_address() {
}
+Address RelocInfo::constant_pool_entry_address() {
+ UNREACHABLE();
+ return NULL;
+}
+
+
int RelocInfo::target_address_size() {
if (IsCodedSpecially()) {
return Assembler::kSpecialTargetSize;
@@ -276,10 +267,13 @@ int RelocInfo::target_address_size() {
}
-void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
+void RelocInfo::set_target_address(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, target);
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
+ Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
+ IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
@@ -309,12 +303,16 @@ Address RelocInfo::target_reference() {
}
-void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
+void RelocInfo::set_target_object(Object* target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
ASSERT(!target->IsConsString());
Memory::Object_at(pc_) = target;
- CPU::FlushICache(pc_, sizeof(Address));
- if (mode == UPDATE_WRITE_BARRIER &&
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ CPU::FlushICache(pc_, sizeof(Address));
+ }
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWrite(
@@ -330,9 +328,12 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
void RelocInfo::set_target_runtime_entry(Address target,
- WriteBarrierMode mode) {
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(IsRuntimeEntry(rmode_));
- if (target_address() != target) set_target_address(target, mode);
+ if (target_address() != target) {
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
+ }
}
@@ -349,12 +350,16 @@ Cell* RelocInfo::target_cell() {
}
-void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
+void RelocInfo::set_target_cell(Cell* cell,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(rmode_ == RelocInfo::CELL);
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
- CPU::FlushICache(pc_, sizeof(Address));
- if (mode == UPDATE_WRITE_BARRIER &&
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ CPU::FlushICache(pc_, sizeof(Address));
+ }
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
// evacuation candidate.
@@ -369,7 +374,7 @@ void RelocInfo::WipeOut() {
Memory::Address_at(pc_) = NULL;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
- Assembler::set_target_address_at(pc_, pc_ + sizeof(int32_t));
+ Assembler::set_target_address_at(pc_, host_, pc_ + sizeof(int32_t));
} else {
UNREACHABLE();
}
@@ -383,12 +388,8 @@ bool RelocInfo::IsPatchedReturnSequence() {
// movq(rsp, rbp); pop(rbp); ret(n); int3 *6
// The 11th byte is int3 (0xCC) in the return sequence and
// REX.WB (0x48+register bit) for the call sequence.
-#ifdef ENABLE_DEBUGGER_SUPPORT
return pc_[Assembler::kMoveAddressIntoScratchRegisterInstructionLength] !=
0xCC;
-#else
- return false;
-#endif
}
@@ -408,14 +409,16 @@ Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
ASSERT(*pc_ == kCallOpcode);
return Code::GetCodeFromTargetAddress(
- Assembler::target_address_at(pc_ + 1));
+ Assembler::target_address_at(pc_ + 1, host_));
}
-void RelocInfo::set_code_age_stub(Code* stub) {
+void RelocInfo::set_code_age_stub(Code* stub,
+ ICacheFlushMode icache_flush_mode) {
ASSERT(*pc_ == kCallOpcode);
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(pc_ + 1, stub->instruction_start());
+ Assembler::set_target_address_at(pc_ + 1, host_, stub->instruction_start(),
+ icache_flush_mode);
}
@@ -474,14 +477,12 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
isolate->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
-#endif
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
}
@@ -503,14 +504,12 @@ void RelocInfo::Visit(Heap* heap) {
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()))) {
StaticVisitor::VisitDebugTarget(heap, this);
-#endif
} else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
}
diff --git a/chromium/v8/src/x64/assembler-x64.cc b/chromium/v8/src/x64/assembler-x64.cc
index bc875d67e83..59b027f5beb 100644
--- a/chromium/v8/src/x64/assembler-x64.cc
+++ b/chromium/v8/src/x64/assembler-x64.cc
@@ -1,36 +1,13 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "macro-assembler.h"
-#include "serialize.h"
+#include "src/macro-assembler.h"
+#include "src/serialize.h"
namespace v8 {
namespace internal {
@@ -38,60 +15,25 @@ namespace internal {
// -----------------------------------------------------------------------------
// Implementation of CpuFeatures
-
-#ifdef DEBUG
-bool CpuFeatures::initialized_ = false;
-#endif
-uint64_t CpuFeatures::supported_ = CpuFeatures::kDefaultCpuFeatures;
-uint64_t CpuFeatures::found_by_runtime_probing_only_ = 0;
-uint64_t CpuFeatures::cross_compile_ = 0;
-
-ExternalReference ExternalReference::cpu_features() {
- ASSERT(CpuFeatures::initialized_);
- return ExternalReference(&CpuFeatures::supported_);
-}
-
-
-void CpuFeatures::Probe() {
- ASSERT(supported_ == CpuFeatures::kDefaultCpuFeatures);
-#ifdef DEBUG
- initialized_ = true;
-#endif
- supported_ = kDefaultCpuFeatures;
- if (Serializer::enabled()) {
- supported_ |= OS::CpuFeaturesImpliedByPlatform();
- return; // No features if we might serialize.
- }
-
- uint64_t probed_features = 0;
+void CpuFeatures::ProbeImpl(bool cross_compile) {
CPU cpu;
- if (cpu.has_sse41()) {
- probed_features |= static_cast<uint64_t>(1) << SSE4_1;
- }
- if (cpu.has_sse3()) {
- probed_features |= static_cast<uint64_t>(1) << SSE3;
- }
-
- // SSE2 must be available on every x64 CPU.
- ASSERT(cpu.has_sse2());
- probed_features |= static_cast<uint64_t>(1) << SSE2;
+ CHECK(cpu.has_sse2()); // SSE2 support is mandatory.
+ CHECK(cpu.has_cmov()); // CMOV support is mandatory.
- // CMOV must be available on every x64 CPU.
- ASSERT(cpu.has_cmov());
- probed_features |= static_cast<uint64_t>(1) << CMOV;
+ // Only use statically determined features for cross compile (snapshot).
+ if (cross_compile) return;
+ if (cpu.has_sse41() && FLAG_enable_sse4_1) supported_ |= 1u << SSE4_1;
+ if (cpu.has_sse3() && FLAG_enable_sse3) supported_ |= 1u << SSE3;
// SAHF is not generally available in long mode.
- if (cpu.has_sahf()) {
- probed_features |= static_cast<uint64_t>(1) << SAHF;
- }
-
- uint64_t platform_features = OS::CpuFeaturesImpliedByPlatform();
- supported_ = probed_features | platform_features;
- found_by_runtime_probing_only_
- = probed_features & ~kDefaultCpuFeatures & ~platform_features;
+ if (cpu.has_sahf() && FLAG_enable_sahf) supported_|= 1u << SAHF;
}
+void CpuFeatures::PrintTarget() { }
+void CpuFeatures::PrintFeatures() { }
+
+
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
@@ -110,7 +52,8 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
#endif
// Patch the code.
- patcher.masm()->movq(kScratchRegister, target, RelocInfo::NONE64);
+ patcher.masm()->movp(kScratchRegister, reinterpret_cast<void*>(target),
+ Assembler::RelocInfoNone());
patcher.masm()->call(kScratchRegister);
// Check that the size of the code generated is as expected.
@@ -416,9 +359,9 @@ void Assembler::GrowBuffer() {
intptr_t pc_delta = desc.buffer - buffer_;
intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
(buffer_ + buffer_size_);
- OS::MemMove(desc.buffer, buffer_, desc.instr_size);
- OS::MemMove(rc_delta + reloc_info_writer.pos(),
- reloc_info_writer.pos(), desc.reloc_size);
+ MemMove(desc.buffer, buffer_, desc.instr_size);
+ MemMove(rc_delta + reloc_info_writer.pos(), reloc_info_writer.pos(),
+ desc.reloc_size);
// Switch buffers.
if (isolate() != NULL &&
@@ -466,24 +409,30 @@ void Assembler::emit_operand(int code, const Operand& adr) {
// Assembler Instruction implementations.
-void Assembler::arithmetic_op(byte opcode, Register reg, const Operand& op) {
+void Assembler::arithmetic_op(byte opcode,
+ Register reg,
+ const Operand& op,
+ int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(reg, op);
+ emit_rex(reg, op, size);
emit(opcode);
emit_operand(reg, op);
}
-void Assembler::arithmetic_op(byte opcode, Register reg, Register rm_reg) {
+void Assembler::arithmetic_op(byte opcode,
+ Register reg,
+ Register rm_reg,
+ int size) {
EnsureSpace ensure_space(this);
ASSERT((opcode & 0xC6) == 2);
if (rm_reg.low_bits() == 4) { // Forces SIB byte.
// Swap reg and rm_reg and change opcode operand order.
- emit_rex_64(rm_reg, reg);
+ emit_rex(rm_reg, reg, size);
emit(opcode ^ 0x02);
emit_modrm(rm_reg, reg);
} else {
- emit_rex_64(reg, rm_reg);
+ emit_rex(reg, rm_reg, size);
emit(opcode);
emit_modrm(reg, rm_reg);
}
@@ -519,37 +468,45 @@ void Assembler::arithmetic_op_16(byte opcode,
}
-void Assembler::arithmetic_op_32(byte opcode, Register reg, Register rm_reg) {
+void Assembler::arithmetic_op_8(byte opcode, Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ if (!reg.is_byte_register()) {
+ // Register is not one of al, bl, cl, dl. Its encoding needs REX.
+ emit_rex_32(reg);
+ }
+ emit(opcode);
+ emit_operand(reg, op);
+}
+
+
+void Assembler::arithmetic_op_8(byte opcode, Register reg, Register rm_reg) {
EnsureSpace ensure_space(this);
ASSERT((opcode & 0xC6) == 2);
- if (rm_reg.low_bits() == 4) { // Forces SIB byte.
+ if (rm_reg.low_bits() == 4) { // Forces SIB byte.
// Swap reg and rm_reg and change opcode operand order.
- emit_optional_rex_32(rm_reg, reg);
- emit(opcode ^ 0x02); // E.g. 0x03 -> 0x01 for ADD.
+ if (!rm_reg.is_byte_register() || !reg.is_byte_register()) {
+ // Register is not one of al, bl, cl, dl. Its encoding needs REX.
+ emit_rex_32(rm_reg, reg);
+ }
+ emit(opcode ^ 0x02);
emit_modrm(rm_reg, reg);
} else {
- emit_optional_rex_32(reg, rm_reg);
+ if (!reg.is_byte_register() || !rm_reg.is_byte_register()) {
+ // Register is not one of al, bl, cl, dl. Its encoding needs REX.
+ emit_rex_32(reg, rm_reg);
+ }
emit(opcode);
emit_modrm(reg, rm_reg);
}
}
-void Assembler::arithmetic_op_32(byte opcode,
- Register reg,
- const Operand& rm_reg) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(reg, rm_reg);
- emit(opcode);
- emit_operand(reg, rm_reg);
-}
-
-
void Assembler::immediate_arithmetic_op(byte subcode,
Register dst,
- Immediate src) {
+ Immediate src,
+ int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
+ emit_rex(dst, size);
if (is_int8(src.value_)) {
emit(0x83);
emit_modrm(subcode, dst);
@@ -566,9 +523,10 @@ void Assembler::immediate_arithmetic_op(byte subcode,
void Assembler::immediate_arithmetic_op(byte subcode,
const Operand& dst,
- Immediate src) {
+ Immediate src,
+ int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
+ emit_rex(dst, size);
if (is_int8(src.value_)) {
emit(0x83);
emit_operand(subcode, dst);
@@ -620,43 +578,6 @@ void Assembler::immediate_arithmetic_op_16(byte subcode,
}
-void Assembler::immediate_arithmetic_op_32(byte subcode,
- Register dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- if (is_int8(src.value_)) {
- emit(0x83);
- emit_modrm(subcode, dst);
- emit(src.value_);
- } else if (dst.is(rax)) {
- emit(0x05 | (subcode << 3));
- emitl(src.value_);
- } else {
- emit(0x81);
- emit_modrm(subcode, dst);
- emitl(src.value_);
- }
-}
-
-
-void Assembler::immediate_arithmetic_op_32(byte subcode,
- const Operand& dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- if (is_int8(src.value_)) {
- emit(0x83);
- emit_operand(subcode, dst);
- emit(src.value_);
- } else {
- emit(0x81);
- emit_operand(subcode, dst);
- emitl(src.value_);
- }
-}
-
-
void Assembler::immediate_arithmetic_op_8(byte subcode,
const Operand& dst,
Immediate src) {
@@ -674,8 +595,8 @@ void Assembler::immediate_arithmetic_op_8(byte subcode,
Immediate src) {
EnsureSpace ensure_space(this);
if (!dst.is_byte_register()) {
- // Use 64-bit mode byte registers.
- emit_rex_64(dst);
+ // Register is not one of al, bl, cl, dl. Its encoding needs REX.
+ emit_rex_32(dst);
}
ASSERT(is_int8(src.value_) || is_uint8(src.value_));
emit(0x80);
@@ -684,15 +605,19 @@ void Assembler::immediate_arithmetic_op_8(byte subcode,
}
-void Assembler::shift(Register dst, Immediate shift_amount, int subcode) {
+void Assembler::shift(Register dst,
+ Immediate shift_amount,
+ int subcode,
+ int size) {
EnsureSpace ensure_space(this);
- ASSERT(is_uint6(shift_amount.value_)); // illegal shift count
+ ASSERT(size == kInt64Size ? is_uint6(shift_amount.value_)
+ : is_uint5(shift_amount.value_));
if (shift_amount.value_ == 1) {
- emit_rex_64(dst);
+ emit_rex(dst, size);
emit(0xD1);
emit_modrm(subcode, dst);
} else {
- emit_rex_64(dst);
+ emit_rex(dst, size);
emit(0xC1);
emit_modrm(subcode, dst);
emit(shift_amount.value_);
@@ -700,38 +625,14 @@ void Assembler::shift(Register dst, Immediate shift_amount, int subcode) {
}
-void Assembler::shift(Register dst, int subcode) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xD3);
- emit_modrm(subcode, dst);
-}
-
-
-void Assembler::shift_32(Register dst, int subcode) {
+void Assembler::shift(Register dst, int subcode, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
+ emit_rex(dst, size);
emit(0xD3);
emit_modrm(subcode, dst);
}
-void Assembler::shift_32(Register dst, Immediate shift_amount, int subcode) {
- EnsureSpace ensure_space(this);
- ASSERT(is_uint5(shift_amount.value_)); // illegal shift count
- if (shift_amount.value_ == 1) {
- emit_optional_rex_32(dst);
- emit(0xD1);
- emit_modrm(subcode, dst);
- } else {
- emit_optional_rex_32(dst);
- emit(0xC1);
- emit_modrm(subcode, dst);
- emit(shift_amount.value_);
- }
-}
-
-
void Assembler::bt(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
emit_rex_64(src, dst);
@@ -750,6 +651,15 @@ void Assembler::bts(const Operand& dst, Register src) {
}
+void Assembler::bsrl(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xBD);
+ emit_modrm(dst, src);
+}
+
+
void Assembler::call(Label* L) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
@@ -934,33 +844,17 @@ void Assembler::cqo() {
}
-void Assembler::decq(Register dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xFF);
- emit_modrm(0x1, dst);
-}
-
-
-void Assembler::decq(const Operand& dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xFF);
- emit_operand(1, dst);
-}
-
-
-void Assembler::decl(Register dst) {
+void Assembler::emit_dec(Register dst, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
+ emit_rex(dst, size);
emit(0xFF);
emit_modrm(0x1, dst);
}
-void Assembler::decl(const Operand& dst) {
+void Assembler::emit_dec(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
+ emit_rex(dst, size);
emit(0xFF);
emit_operand(1, dst);
}
@@ -999,84 +893,43 @@ void Assembler::hlt() {
}
-void Assembler::idivq(Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(src);
- emit(0xF7);
- emit_modrm(0x7, src);
-}
-
-
-void Assembler::idivl(Register src) {
+void Assembler::emit_idiv(Register src, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(src);
+ emit_rex(src, size);
emit(0xF7);
emit_modrm(0x7, src);
}
-void Assembler::imul(Register src) {
+void Assembler::emit_imul(Register src, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(src);
+ emit_rex(src, size);
emit(0xF7);
emit_modrm(0x5, src);
}
-void Assembler::imul(Register dst, Register src) {
+void Assembler::emit_imul(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0xAF);
- emit_modrm(dst, src);
-}
-
-
-void Assembler::imul(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0xAF);
- emit_operand(dst, src);
-}
-
-
-void Assembler::imul(Register dst, Register src, Immediate imm) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- if (is_int8(imm.value_)) {
- emit(0x6B);
- emit_modrm(dst, src);
- emit(imm.value_);
- } else {
- emit(0x69);
- emit_modrm(dst, src);
- emitl(imm.value_);
- }
-}
-
-
-void Assembler::imull(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
emit(0x0F);
emit(0xAF);
emit_modrm(dst, src);
}
-void Assembler::imull(Register dst, const Operand& src) {
+void Assembler::emit_imul(Register dst, const Operand& src, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
emit(0x0F);
emit(0xAF);
emit_operand(dst, src);
}
-void Assembler::imull(Register dst, Register src, Immediate imm) {
+void Assembler::emit_imul(Register dst, Register src, Immediate imm, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
if (is_int8(imm.value_)) {
emit(0x6B);
emit_modrm(dst, src);
@@ -1089,38 +942,22 @@ void Assembler::imull(Register dst, Register src, Immediate imm) {
}
-void Assembler::incq(Register dst) {
+void Assembler::emit_inc(Register dst, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
+ emit_rex(dst, size);
emit(0xFF);
emit_modrm(0x0, dst);
}
-void Assembler::incq(const Operand& dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xFF);
- emit_operand(0, dst);
-}
-
-
-void Assembler::incl(const Operand& dst) {
+void Assembler::emit_inc(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
+ emit_rex(dst, size);
emit(0xFF);
emit_operand(0, dst);
}
-void Assembler::incl(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xFF);
- emit_modrm(0, dst);
-}
-
-
void Assembler::int3() {
EnsureSpace ensure_space(this);
emit(0xCC);
@@ -1287,17 +1124,9 @@ void Assembler::jmp(const Operand& src) {
}
-void Assembler::lea(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x8D);
- emit_operand(dst, src);
-}
-
-
-void Assembler::leal(Register dst, const Operand& src) {
+void Assembler::emit_lea(Register dst, const Operand& src, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
emit(0x8D);
emit_operand(dst, src);
}
@@ -1305,9 +1134,19 @@ void Assembler::leal(Register dst, const Operand& src) {
void Assembler::load_rax(void* value, RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
- emit(0x48); // REX.W
- emit(0xA1);
- emitp(value, mode);
+ if (kPointerSize == kInt64Size) {
+ emit(0x48); // REX.W
+ emit(0xA1);
+ emitp(value, mode);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ emit(0xA1);
+ emitp(value, mode);
+ // In 64-bit mode, need to zero extend the operand to 8 bytes.
+ // See 2.2.1.4 in Intel64 and IA32 Architectures Software
+ // Developer's Manual Volume 2.
+ emitl(0);
+ }
}
@@ -1448,18 +1287,11 @@ void Assembler::emit_mov(const Operand& dst, Immediate value, int size) {
}
-void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) {
- // This method must not be used with heap object references. The stored
- // address is not GC safe. Use the handle version instead.
- ASSERT(rmode > RelocInfo::LAST_GCED_ENUM);
- if (RelocInfo::IsNone(rmode)) {
- movq(dst, reinterpret_cast<int64_t>(value));
- } else {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xB8 | dst.low_bits());
- emitp(value, rmode);
- }
+void Assembler::movp(Register dst, void* value, RelocInfo::Mode rmode) {
+ EnsureSpace ensure_space(this);
+ emit_rex(dst, kPointerSize);
+ emit(0xB8 | dst.low_bits());
+ emitp(value, rmode);
}
@@ -1499,15 +1331,12 @@ void Assembler::movl(const Operand& dst, Label* src) {
}
-void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
- AllowDeferredHandleDereference using_raw_address;
- ASSERT(!RelocInfo::IsNone(mode));
+void Assembler::movsxbl(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- ASSERT(value->IsHeapObject());
- ASSERT(!isolate()->heap()->InNewSpace(*value));
- emit_rex_64(dst);
- emit(0xB8 | dst.low_bits());
- emitp(value.location(), mode);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xBE);
+ emit_operand(dst, src);
}
@@ -1520,6 +1349,15 @@ void Assembler::movsxbq(Register dst, const Operand& src) {
}
+void Assembler::movsxwl(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xBF);
+ emit_operand(dst, src);
+}
+
+
void Assembler::movsxwq(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
emit_rex_64(dst, src);
@@ -1545,7 +1383,7 @@ void Assembler::movsxlq(Register dst, const Operand& src) {
}
-void Assembler::movzxbq(Register dst, const Operand& src) {
+void Assembler::emit_movzxb(Register dst, const Operand& src, int size) {
EnsureSpace ensure_space(this);
// 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
// there is no need to make this a 64 bit operation.
@@ -1556,26 +1394,10 @@ void Assembler::movzxbq(Register dst, const Operand& src) {
}
-void Assembler::movzxbl(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xB6);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzxwq(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xB7);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzxwl(Register dst, const Operand& src) {
+void Assembler::emit_movzxw(Register dst, const Operand& src, int size) {
EnsureSpace ensure_space(this);
+ // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
+ // there is no need to make this a 64 bit operation.
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xB7);
@@ -1583,8 +1405,10 @@ void Assembler::movzxwl(Register dst, const Operand& src) {
}
-void Assembler::movzxwl(Register dst, Register src) {
+void Assembler::emit_movzxw(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
+ // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
+ // there is no need to make this a 64 bit operation.
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xB7);
@@ -1607,17 +1431,10 @@ void Assembler::repmovsw() {
}
-void Assembler::repmovsl() {
+void Assembler::emit_repmovs(int size) {
EnsureSpace ensure_space(this);
emit(0xF3);
- emit(0xA5);
-}
-
-
-void Assembler::repmovsq() {
- EnsureSpace ensure_space(this);
- emit(0xF3);
- emit_rex_64();
+ emit_rex(size);
emit(0xA5);
}
@@ -1630,23 +1447,15 @@ void Assembler::mul(Register src) {
}
-void Assembler::neg(Register dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xF7);
- emit_modrm(0x3, dst);
-}
-
-
-void Assembler::negl(Register dst) {
+void Assembler::emit_neg(Register dst, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
+ emit_rex(dst, size);
emit(0xF7);
emit_modrm(0x3, dst);
}
-void Assembler::neg(const Operand& dst) {
+void Assembler::emit_neg(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xF7);
@@ -1660,30 +1469,22 @@ void Assembler::nop() {
}
-void Assembler::not_(Register dst) {
+void Assembler::emit_not(Register dst, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
+ emit_rex(dst, size);
emit(0xF7);
emit_modrm(0x2, dst);
}
-void Assembler::not_(const Operand& dst) {
+void Assembler::emit_not(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
+ emit_rex(dst, size);
emit(0xF7);
emit_operand(2, dst);
}
-void Assembler::notl(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xF7);
- emit_modrm(0x2, dst);
-}
-
-
void Assembler::Nop(int n) {
// The recommended muti-byte sequences of NOP instructions from the Intel 64
// and IA-32 Architectures Software Developer's Manual.
@@ -1761,14 +1562,14 @@ void Assembler::Nop(int n) {
}
-void Assembler::pop(Register dst) {
+void Assembler::popq(Register dst) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit(0x58 | dst.low_bits());
}
-void Assembler::pop(const Operand& dst) {
+void Assembler::popq(const Operand& dst) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit(0x8F);
@@ -1782,14 +1583,14 @@ void Assembler::popfq() {
}
-void Assembler::push(Register src) {
+void Assembler::pushq(Register src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(src);
emit(0x50 | src.low_bits());
}
-void Assembler::push(const Operand& src) {
+void Assembler::pushq(const Operand& src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(src);
emit(0xFF);
@@ -1797,7 +1598,7 @@ void Assembler::push(const Operand& src) {
}
-void Assembler::push(Immediate value) {
+void Assembler::pushq(Immediate value) {
EnsureSpace ensure_space(this);
if (is_int8(value.value_)) {
emit(0x6A);
@@ -1809,7 +1610,7 @@ void Assembler::push(Immediate value) {
}
-void Assembler::push_imm32(int32_t imm32) {
+void Assembler::pushq_imm32(int32_t imm32) {
EnsureSpace ensure_space(this);
emit(0x68);
emitl(imm32);
@@ -1869,50 +1670,42 @@ void Assembler::shrd(Register dst, Register src) {
}
-void Assembler::xchgq(Register dst, Register src) {
+void Assembler::emit_xchg(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
if (src.is(rax) || dst.is(rax)) { // Single-byte encoding
Register other = src.is(rax) ? dst : src;
- emit_rex_64(other);
+ emit_rex(other, size);
emit(0x90 | other.low_bits());
} else if (dst.low_bits() == 4) {
- emit_rex_64(dst, src);
+ emit_rex(dst, src, size);
emit(0x87);
emit_modrm(dst, src);
} else {
- emit_rex_64(src, dst);
+ emit_rex(src, dst, size);
emit(0x87);
emit_modrm(src, dst);
}
}
-void Assembler::xchgl(Register dst, Register src) {
+void Assembler::store_rax(void* dst, RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
- if (src.is(rax) || dst.is(rax)) { // Single-byte encoding
- Register other = src.is(rax) ? dst : src;
- emit_optional_rex_32(other);
- emit(0x90 | other.low_bits());
- } else if (dst.low_bits() == 4) {
- emit_optional_rex_32(dst, src);
- emit(0x87);
- emit_modrm(dst, src);
+ if (kPointerSize == kInt64Size) {
+ emit(0x48); // REX.W
+ emit(0xA3);
+ emitp(dst, mode);
} else {
- emit_optional_rex_32(src, dst);
- emit(0x87);
- emit_modrm(src, dst);
+ ASSERT(kPointerSize == kInt32Size);
+ emit(0xA3);
+ emitp(dst, mode);
+ // In 64-bit mode, need to zero extend the operand to 8 bytes.
+ // See 2.2.1.4 in Intel64 and IA32 Architectures Software
+ // Developer's Manual Volume 2.
+ emitl(0);
}
}
-void Assembler::store_rax(void* dst, RelocInfo::Mode mode) {
- EnsureSpace ensure_space(this);
- emit(0x48); // REX.W
- emit(0xA3);
- emitp(dst, mode);
-}
-
-
void Assembler::store_rax(ExternalReference ref) {
store_rax(ref.address(), RelocInfo::EXTERNAL_REFERENCE);
}
@@ -1976,21 +1769,21 @@ void Assembler::testb(const Operand& op, Register reg) {
}
-void Assembler::testl(Register dst, Register src) {
+void Assembler::emit_test(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
if (src.low_bits() == 4) {
- emit_optional_rex_32(src, dst);
+ emit_rex(src, dst, size);
emit(0x85);
emit_modrm(src, dst);
} else {
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
emit(0x85);
emit_modrm(dst, src);
}
}
-void Assembler::testl(Register reg, Immediate mask) {
+void Assembler::emit_test(Register reg, Immediate mask, int size) {
// testl with a mask that fits in the low byte is exactly testb.
if (is_uint8(mask.value_)) {
testb(reg, mask);
@@ -1998,10 +1791,11 @@ void Assembler::testl(Register reg, Immediate mask) {
}
EnsureSpace ensure_space(this);
if (reg.is(rax)) {
+ emit_rex(rax, size);
emit(0xA9);
emit(mask);
} else {
- emit_optional_rex_32(rax, reg);
+ emit_rex(reg, size);
emit(0xF7);
emit_modrm(0x0, reg);
emit(mask);
@@ -2009,69 +1803,28 @@ void Assembler::testl(Register reg, Immediate mask) {
}
-void Assembler::testl(const Operand& op, Immediate mask) {
+void Assembler::emit_test(const Operand& op, Immediate mask, int size) {
// testl with a mask that fits in the low byte is exactly testb.
if (is_uint8(mask.value_)) {
testb(op, mask);
return;
}
EnsureSpace ensure_space(this);
- emit_optional_rex_32(rax, op);
+ emit_rex(rax, op, size);
emit(0xF7);
emit_operand(rax, op); // Operation code 0
emit(mask);
}
-void Assembler::testl(const Operand& op, Register reg) {
+void Assembler::emit_test(const Operand& op, Register reg, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(reg, op);
+ emit_rex(reg, op, size);
emit(0x85);
emit_operand(reg, op);
}
-void Assembler::testq(const Operand& op, Register reg) {
- EnsureSpace ensure_space(this);
- emit_rex_64(reg, op);
- emit(0x85);
- emit_operand(reg, op);
-}
-
-
-void Assembler::testq(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- if (src.low_bits() == 4) {
- emit_rex_64(src, dst);
- emit(0x85);
- emit_modrm(src, dst);
- } else {
- emit_rex_64(dst, src);
- emit(0x85);
- emit_modrm(dst, src);
- }
-}
-
-
-void Assembler::testq(Register dst, Immediate mask) {
- if (is_uint8(mask.value_)) {
- testb(dst, mask);
- return;
- }
- EnsureSpace ensure_space(this);
- if (dst.is(rax)) {
- emit_rex_64();
- emit(0xA9);
- emit(mask);
- } else {
- emit_rex_64(dst);
- emit(0xF7);
- emit_modrm(0, dst);
- emit(mask);
- }
-}
-
-
// FPU instructions.
@@ -2435,6 +2188,7 @@ void Assembler::fnclex() {
void Assembler::sahf() {
// TODO(X64): Test for presence. Not all 64-bit intel CPU's have sahf
// in 64-bit mode. Test CpuID.
+ ASSERT(IsEnabled(SAHF));
EnsureSpace ensure_space(this);
emit(0x9E);
}
@@ -2788,6 +2542,16 @@ void Assembler::movss(const Operand& src, XMMRegister dst) {
}
+void Assembler::psllq(XMMRegister reg, byte imm8) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit(0x0F);
+ emit(0x73);
+ emit_sse_operand(rsi, reg); // rsi == 6
+ emit(imm8);
+}
+
+
void Assembler::cvttss2si(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -3028,6 +2792,16 @@ void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::sqrtsd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x51);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0x66);
@@ -3130,16 +2904,10 @@ void Assembler::dd(uint32_t data) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
ASSERT(!RelocInfo::IsNone(rmode));
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
- // Don't record external references unless the heap will be serialized.
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif
- if (!Serializer::enabled() && !emit_debug_code()) {
- return;
- }
+ // Don't record external references unless the heap will be serialized.
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
+ !serializer_enabled() && !emit_debug_code()) {
+ return;
} else if (rmode == RelocInfo::CODE_AGE_SEQUENCE) {
// Don't record psuedo relocation info for code age sequence mode.
return;
@@ -3171,6 +2939,20 @@ void Assembler::RecordComment(const char* msg, bool force) {
}
+Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
+ // No out-of-line constant pool support.
+ ASSERT(!FLAG_enable_ool_constant_pool);
+ return isolate->factory()->empty_constant_pool_array();
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+ // No out-of-line constant pool support.
+ ASSERT(!FLAG_enable_ool_constant_pool);
+ return;
+}
+
+
const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
1 << RelocInfo::RUNTIME_ENTRY |
1 << RelocInfo::INTERNAL_REFERENCE |
@@ -3184,6 +2966,12 @@ bool RelocInfo::IsCodedSpecially() {
return (1 << rmode_) & kApplyMask;
}
+
+bool RelocInfo::IsInConstantPool() {
+ return false;
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/chromium/v8/src/x64/assembler-x64.h b/chromium/v8/src/x64/assembler-x64.h
index 1f1316fa829..4259e9b50ef 100644
--- a/chromium/v8/src/x64/assembler-x64.h
+++ b/chromium/v8/src/x64/assembler-x64.h
@@ -37,34 +37,13 @@
#ifndef V8_X64_ASSEMBLER_X64_H_
#define V8_X64_ASSEMBLER_X64_H_
-#include "serialize.h"
+#include "src/serialize.h"
namespace v8 {
namespace internal {
// Utility functions
-// Test whether a 64-bit value is in a specific range.
-inline bool is_uint32(int64_t x) {
- static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
- return static_cast<uint64_t>(x) <= kMaxUInt32;
-}
-
-inline bool is_int32(int64_t x) {
- static const int64_t kMinInt32 = -V8_INT64_C(0x80000000);
- return is_uint32(x - kMinInt32);
-}
-
-inline bool uint_is_int32(uint64_t x) {
- static const uint64_t kMaxInt32 = V8_UINT64_C(0x7fffffff);
- return x <= kMaxInt32;
-}
-
-inline bool is_uint32(uint64_t x) {
- static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
- return x <= kMaxUInt32;
-}
-
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -347,8 +326,8 @@ inline Condition NegateCondition(Condition cc) {
}
-// Corresponds to transposing the operands of a comparison.
-inline Condition ReverseCondition(Condition cc) {
+// Commute a condition such that {a cond b == b cond' a}.
+inline Condition CommuteCondition(Condition cc) {
switch (cc) {
case below:
return above;
@@ -368,7 +347,7 @@ inline Condition ReverseCondition(Condition cc) {
return greater_equal;
default:
return cc;
- };
+ }
}
@@ -378,6 +357,10 @@ inline Condition ReverseCondition(Condition cc) {
class Immediate BASE_EMBEDDED {
public:
explicit Immediate(int32_t value) : value_(value) {}
+ explicit Immediate(Smi* value) {
+ ASSERT(SmiValuesAre31Bits()); // Only available for 31-bit SMI.
+ value_ = static_cast<int32_t>(reinterpret_cast<intptr_t>(value));
+ }
private:
int32_t value_;
@@ -395,7 +378,7 @@ enum ScaleFactor {
times_4 = 2,
times_8 = 3,
times_int_size = times_4,
- times_pointer_size = times_8
+ times_pointer_size = (kPointerSize == 8) ? times_8 : times_4
};
@@ -454,84 +437,39 @@ class Operand BASE_EMBEDDED {
};
-// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a CpuFeatureScope before use.
-// Example:
-// if (assembler->IsSupported(SSE3)) {
-// CpuFeatureScope fscope(assembler, SSE3);
-// // Generate SSE3 floating point code.
-// } else {
-// // Generate standard SSE2 floating point code.
-// }
-class CpuFeatures : public AllStatic {
- public:
- // Detect features of the target CPU. Set safe defaults if the serializer
- // is enabled (snapshots must be portable).
- static void Probe();
-
- // Check whether a feature is supported by the target CPU.
- static bool IsSupported(CpuFeature f) {
- if (Check(f, cross_compile_)) return true;
- ASSERT(initialized_);
- if (f == SSE3 && !FLAG_enable_sse3) return false;
- if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
- if (f == CMOV && !FLAG_enable_cmov) return false;
- if (f == SAHF && !FLAG_enable_sahf) return false;
- return Check(f, supported_);
- }
-
- static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
- ASSERT(initialized_);
- return Check(f, found_by_runtime_probing_only_);
- }
-
- static bool IsSafeForSnapshot(CpuFeature f) {
- return Check(f, cross_compile_) ||
- (IsSupported(f) &&
- (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
- }
-
- static bool VerifyCrossCompiling() {
- return cross_compile_ == 0;
- }
-
- static bool VerifyCrossCompiling(CpuFeature f) {
- uint64_t mask = flag2set(f);
- return cross_compile_ == 0 ||
- (cross_compile_ & mask) == mask;
- }
-
- private:
- static bool Check(CpuFeature f, uint64_t set) {
- return (set & flag2set(f)) != 0;
- }
-
- static uint64_t flag2set(CpuFeature f) {
- return static_cast<uint64_t>(1) << f;
- }
-
- // Safe defaults include CMOV for X64. It is always available, if
- // anyone checks, but they shouldn't need to check.
- // The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
- // fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
- static const uint64_t kDefaultCpuFeatures = (1 << CMOV);
-
-#ifdef DEBUG
- static bool initialized_;
-#endif
- static uint64_t supported_;
- static uint64_t found_by_runtime_probing_only_;
-
- static uint64_t cross_compile_;
-
- friend class ExternalReference;
- friend class PlatformFeatureScope;
- DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
-};
-
-
-#define ASSEMBLER_INSTRUCTION_LIST(V) \
- V(mov)
+#define ASSEMBLER_INSTRUCTION_LIST(V) \
+ V(add) \
+ V(and) \
+ V(cmp) \
+ V(dec) \
+ V(idiv) \
+ V(imul) \
+ V(inc) \
+ V(lea) \
+ V(mov) \
+ V(movzxb) \
+ V(movzxw) \
+ V(neg) \
+ V(not) \
+ V(or) \
+ V(repmovs) \
+ V(sbb) \
+ V(sub) \
+ V(test) \
+ V(xchg) \
+ V(xor)
+
+
+// Shift instructions on operands/registers with kPointerSize, kInt32Size and
+// kInt64Size.
+#define SHIFT_INSTRUCTION_LIST(V) \
+ V(rol, 0x0) \
+ V(ror, 0x1) \
+ V(rcl, 0x2) \
+ V(rcr, 0x3) \
+ V(shl, 0x4) \
+ V(shr, 0x5) \
+ V(sar, 0x7) \
class Assembler : public AssemblerBase {
@@ -576,8 +514,25 @@ class Assembler : public AssemblerBase {
// the absolute address of the target.
// These functions convert between absolute Addresses of Code objects and
// the relative displacements stored in the code.
- static inline Address target_address_at(Address pc);
- static inline void set_target_address_at(Address pc, Address target);
+ static inline Address target_address_at(Address pc,
+ ConstantPoolArray* constant_pool);
+ static inline void set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED) ;
+ static inline Address target_address_at(Address pc, Code* code) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+ }
+ static inline void set_target_address_at(Address pc,
+ Code* code,
+ Address target,
+ ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(pc, constant_pool, target, icache_flush_mode);
+ }
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@@ -586,8 +541,17 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the instruction on x64).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Address target) {
- set_target_address_at(instruction_payload, target);
+ Address instruction_payload, Code* code, Address target) {
+ set_target_address_at(instruction_payload, code, target);
+ }
+
+ static inline RelocInfo::Mode RelocInfoNone() {
+ if (kPointerSize == kInt64Size) {
+ return RelocInfo::NONE64;
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ return RelocInfo::NONE32;
+ }
}
inline Handle<Object> code_target_object_handle_at(Address pc);
@@ -658,11 +622,26 @@ class Assembler : public AssemblerBase {
// - Instructions on 16-bit (word) operands/registers have a trailing 'w'.
// - Instructions on 32-bit (doubleword) operands/registers use 'l'.
// - Instructions on 64-bit (quadword) operands/registers use 'q'.
- //
- // Some mnemonics, such as "and", are the same as C++ keywords.
- // Naming conflicts with C++ keywords are resolved by adding a trailing '_'.
+ // - Instructions on operands/registers with pointer size use 'p'.
+
+ STATIC_ASSERT(kPointerSize == kInt64Size || kPointerSize == kInt32Size);
#define DECLARE_INSTRUCTION(instruction) \
+ template<class P1> \
+ void instruction##p(P1 p1) { \
+ emit_##instruction(p1, kPointerSize); \
+ } \
+ \
+ template<class P1> \
+ void instruction##l(P1 p1) { \
+ emit_##instruction(p1, kInt32Size); \
+ } \
+ \
+ template<class P1> \
+ void instruction##q(P1 p1) { \
+ emit_##instruction(p1, kInt64Size); \
+ } \
+ \
template<class P1, class P2> \
void instruction##p(P1 p1, P2 p2) { \
emit_##instruction(p1, p2, kPointerSize); \
@@ -676,6 +655,21 @@ class Assembler : public AssemblerBase {
template<class P1, class P2> \
void instruction##q(P1 p1, P2 p2) { \
emit_##instruction(p1, p2, kInt64Size); \
+ } \
+ \
+ template<class P1, class P2, class P3> \
+ void instruction##p(P1 p1, P2 p2, P3 p3) { \
+ emit_##instruction(p1, p2, p3, kPointerSize); \
+ } \
+ \
+ template<class P1, class P2, class P3> \
+ void instruction##l(P1 p1, P2 p2, P3 p3) { \
+ emit_##instruction(p1, p2, p3, kInt32Size); \
+ } \
+ \
+ template<class P1, class P2, class P3> \
+ void instruction##q(P1 p1, P2 p2, P3 p3) { \
+ emit_##instruction(p1, p2, p3, kInt64Size); \
}
ASSEMBLER_INSTRUCTION_LIST(DECLARE_INSTRUCTION)
#undef DECLARE_INSTRUCTION
@@ -692,15 +686,15 @@ class Assembler : public AssemblerBase {
void pushfq();
void popfq();
- void push(Immediate value);
+ void pushq(Immediate value);
// Push a 32 bit integer, and guarantee that it is actually pushed as a
// 32 bit value, the normal push will optimize the 8 bit case.
- void push_imm32(int32_t imm32);
- void push(Register src);
- void push(const Operand& src);
+ void pushq_imm32(int32_t imm32);
+ void pushq(Register src);
+ void pushq(const Operand& src);
- void pop(Register dst);
- void pop(const Operand& dst);
+ void popq(Register dst);
+ void popq(const Operand& dst);
void enter(Immediate size);
void leave();
@@ -722,28 +716,26 @@ class Assembler : public AssemblerBase {
void movl(const Operand& dst, Label* src);
// Loads a pointer into a register with a relocation mode.
- void movq(Register dst, void* ptr, RelocInfo::Mode rmode);
+ void movp(Register dst, void* ptr, RelocInfo::Mode rmode);
+
// Loads a 64-bit immediate into a register.
void movq(Register dst, int64_t value);
void movq(Register dst, uint64_t value);
- void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode);
+ void movsxbl(Register dst, const Operand& src);
void movsxbq(Register dst, const Operand& src);
+ void movsxwl(Register dst, const Operand& src);
void movsxwq(Register dst, const Operand& src);
void movsxlq(Register dst, Register src);
void movsxlq(Register dst, const Operand& src);
- void movzxbq(Register dst, const Operand& src);
- void movzxbl(Register dst, const Operand& src);
- void movzxwq(Register dst, const Operand& src);
- void movzxwl(Register dst, const Operand& src);
- void movzxwl(Register dst, Register src);
// Repeated moves.
void repmovsb();
void repmovsw();
- void repmovsl();
- void repmovsq();
+ void repmovsp() { emit_repmovs(kPointerSize); }
+ void repmovsl() { emit_repmovs(kInt32Size); }
+ void repmovsq() { emit_repmovs(kInt64Size); }
// Instruction to load from an immediate 64-bit pointer into RAX.
void load_rax(void* ptr, RelocInfo::Mode rmode);
@@ -755,59 +747,6 @@ class Assembler : public AssemblerBase {
void cmovl(Condition cc, Register dst, Register src);
void cmovl(Condition cc, Register dst, const Operand& src);
- // Exchange two registers
- void xchgq(Register dst, Register src);
- void xchgl(Register dst, Register src);
-
- // Arithmetics
- void addl(Register dst, Register src) {
- arithmetic_op_32(0x03, dst, src);
- }
-
- void addl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x0, dst, src);
- }
-
- void addl(Register dst, const Operand& src) {
- arithmetic_op_32(0x03, dst, src);
- }
-
- void addl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x0, dst, src);
- }
-
- void addl(const Operand& dst, Register src) {
- arithmetic_op_32(0x01, src, dst);
- }
-
- void addq(Register dst, Register src) {
- arithmetic_op(0x03, dst, src);
- }
-
- void addq(Register dst, const Operand& src) {
- arithmetic_op(0x03, dst, src);
- }
-
- void addq(const Operand& dst, Register src) {
- arithmetic_op(0x01, src, dst);
- }
-
- void addq(Register dst, Immediate src) {
- immediate_arithmetic_op(0x0, dst, src);
- }
-
- void addq(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x0, dst, src);
- }
-
- void sbbl(Register dst, Register src) {
- arithmetic_op_32(0x1b, dst, src);
- }
-
- void sbbq(Register dst, Register src) {
- arithmetic_op(0x1b, dst, src);
- }
-
void cmpb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x7, dst, src);
}
@@ -815,15 +754,15 @@ class Assembler : public AssemblerBase {
void cmpb_al(Immediate src);
void cmpb(Register dst, Register src) {
- arithmetic_op(0x3A, dst, src);
+ arithmetic_op_8(0x3A, dst, src);
}
void cmpb(Register dst, const Operand& src) {
- arithmetic_op(0x3A, dst, src);
+ arithmetic_op_8(0x3A, dst, src);
}
void cmpb(const Operand& dst, Register src) {
- arithmetic_op(0x38, src, dst);
+ arithmetic_op_8(0x38, src, dst);
}
void cmpb(const Operand& dst, Immediate src) {
@@ -850,86 +789,10 @@ class Assembler : public AssemblerBase {
arithmetic_op_16(0x39, src, dst);
}
- void cmpl(Register dst, Register src) {
- arithmetic_op_32(0x3B, dst, src);
- }
-
- void cmpl(Register dst, const Operand& src) {
- arithmetic_op_32(0x3B, dst, src);
- }
-
- void cmpl(const Operand& dst, Register src) {
- arithmetic_op_32(0x39, src, dst);
- }
-
- void cmpl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x7, dst, src);
- }
-
- void cmpl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x7, dst, src);
- }
-
- void cmpq(Register dst, Register src) {
- arithmetic_op(0x3B, dst, src);
- }
-
- void cmpq(Register dst, const Operand& src) {
- arithmetic_op(0x3B, dst, src);
- }
-
- void cmpq(const Operand& dst, Register src) {
- arithmetic_op(0x39, src, dst);
- }
-
- void cmpq(Register dst, Immediate src) {
- immediate_arithmetic_op(0x7, dst, src);
- }
-
- void cmpq(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x7, dst, src);
- }
-
- void and_(Register dst, Register src) {
- arithmetic_op(0x23, dst, src);
- }
-
- void and_(Register dst, const Operand& src) {
- arithmetic_op(0x23, dst, src);
- }
-
- void and_(const Operand& dst, Register src) {
- arithmetic_op(0x21, src, dst);
- }
-
- void and_(Register dst, Immediate src) {
- immediate_arithmetic_op(0x4, dst, src);
- }
-
- void and_(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x4, dst, src);
- }
-
- void andl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x4, dst, src);
- }
-
- void andl(Register dst, Register src) {
- arithmetic_op_32(0x23, dst, src);
- }
-
- void andl(Register dst, const Operand& src) {
- arithmetic_op_32(0x23, dst, src);
- }
-
void andb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x4, dst, src);
}
- void decq(Register dst);
- void decq(const Operand& dst);
- void decl(Register dst);
- void decl(const Operand& dst);
void decb(Register dst);
void decb(const Operand& dst);
@@ -938,108 +801,35 @@ class Assembler : public AssemblerBase {
// Sign-extends eax into edx:eax.
void cdq();
- // Divide rdx:rax by src. Quotient in rax, remainder in rdx.
- void idivq(Register src);
- // Divide edx:eax by lower 32 bits of src. Quotient in eax, rem. in edx.
- void idivl(Register src);
-
- // Signed multiply instructions.
- void imul(Register src); // rdx:rax = rax * src.
- void imul(Register dst, Register src); // dst = dst * src.
- void imul(Register dst, const Operand& src); // dst = dst * src.
- void imul(Register dst, Register src, Immediate imm); // dst = src * imm.
- // Signed 32-bit multiply instructions.
- void imull(Register dst, Register src); // dst = dst * src.
- void imull(Register dst, const Operand& src); // dst = dst * src.
- void imull(Register dst, Register src, Immediate imm); // dst = src * imm.
-
- void incq(Register dst);
- void incq(const Operand& dst);
- void incl(Register dst);
- void incl(const Operand& dst);
-
- void lea(Register dst, const Operand& src);
- void leal(Register dst, const Operand& src);
-
// Multiply rax by src, put the result in rdx:rax.
void mul(Register src);
- void neg(Register dst);
- void neg(const Operand& dst);
- void negl(Register dst);
-
- void not_(Register dst);
- void not_(const Operand& dst);
- void notl(Register dst);
-
- void or_(Register dst, Register src) {
- arithmetic_op(0x0B, dst, src);
- }
-
- void orl(Register dst, Register src) {
- arithmetic_op_32(0x0B, dst, src);
- }
-
- void or_(Register dst, const Operand& src) {
- arithmetic_op(0x0B, dst, src);
- }
-
- void orl(Register dst, const Operand& src) {
- arithmetic_op_32(0x0B, dst, src);
- }
-
- void or_(const Operand& dst, Register src) {
- arithmetic_op(0x09, src, dst);
- }
-
- void orl(const Operand& dst, Register src) {
- arithmetic_op_32(0x09, src, dst);
- }
-
- void or_(Register dst, Immediate src) {
- immediate_arithmetic_op(0x1, dst, src);
- }
-
- void orl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x1, dst, src);
- }
-
- void or_(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x1, dst, src);
- }
-
- void orl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x1, dst, src);
- }
-
-
- void rcl(Register dst, Immediate imm8) {
- shift(dst, imm8, 0x2);
- }
-
- void rol(Register dst, Immediate imm8) {
- shift(dst, imm8, 0x0);
- }
-
- void roll(Register dst, Immediate imm8) {
- shift_32(dst, imm8, 0x0);
- }
-
- void rcr(Register dst, Immediate imm8) {
- shift(dst, imm8, 0x3);
- }
-
- void ror(Register dst, Immediate imm8) {
- shift(dst, imm8, 0x1);
- }
-
- void rorl(Register dst, Immediate imm8) {
- shift_32(dst, imm8, 0x1);
- }
-
- void rorl_cl(Register dst) {
- shift_32(dst, 0x1);
- }
+#define DECLARE_SHIFT_INSTRUCTION(instruction, subcode) \
+ void instruction##p(Register dst, Immediate imm8) { \
+ shift(dst, imm8, subcode, kPointerSize); \
+ } \
+ \
+ void instruction##l(Register dst, Immediate imm8) { \
+ shift(dst, imm8, subcode, kInt32Size); \
+ } \
+ \
+ void instruction##q(Register dst, Immediate imm8) { \
+ shift(dst, imm8, subcode, kInt64Size); \
+ } \
+ \
+ void instruction##p_cl(Register dst) { \
+ shift(dst, subcode, kPointerSize); \
+ } \
+ \
+ void instruction##l_cl(Register dst) { \
+ shift(dst, subcode, kInt32Size); \
+ } \
+ \
+ void instruction##q_cl(Register dst) { \
+ shift(dst, subcode, kInt64Size); \
+ }
+ SHIFT_INSTRUCTION_LIST(DECLARE_SHIFT_INSTRUCTION)
+#undef DECLARE_SHIFT_INSTRUCTION
// Shifts dst:src left by cl bits, affecting only dst.
void shld(Register dst, Register src);
@@ -1047,103 +837,9 @@ class Assembler : public AssemblerBase {
// Shifts src:dst right by cl bits, affecting only dst.
void shrd(Register dst, Register src);
- // Shifts dst right, duplicating sign bit, by shift_amount bits.
- // Shifting by 1 is handled efficiently.
- void sar(Register dst, Immediate shift_amount) {
- shift(dst, shift_amount, 0x7);
- }
-
- // Shifts dst right, duplicating sign bit, by shift_amount bits.
- // Shifting by 1 is handled efficiently.
- void sarl(Register dst, Immediate shift_amount) {
- shift_32(dst, shift_amount, 0x7);
- }
-
- // Shifts dst right, duplicating sign bit, by cl % 64 bits.
- void sar_cl(Register dst) {
- shift(dst, 0x7);
- }
-
- // Shifts dst right, duplicating sign bit, by cl % 64 bits.
- void sarl_cl(Register dst) {
- shift_32(dst, 0x7);
- }
-
- void shl(Register dst, Immediate shift_amount) {
- shift(dst, shift_amount, 0x4);
- }
-
- void shl_cl(Register dst) {
- shift(dst, 0x4);
- }
-
- void shll_cl(Register dst) {
- shift_32(dst, 0x4);
- }
-
- void shll(Register dst, Immediate shift_amount) {
- shift_32(dst, shift_amount, 0x4);
- }
-
- void shr(Register dst, Immediate shift_amount) {
- shift(dst, shift_amount, 0x5);
- }
-
- void shr_cl(Register dst) {
- shift(dst, 0x5);
- }
-
- void shrl_cl(Register dst) {
- shift_32(dst, 0x5);
- }
-
- void shrl(Register dst, Immediate shift_amount) {
- shift_32(dst, shift_amount, 0x5);
- }
-
void store_rax(void* dst, RelocInfo::Mode mode);
void store_rax(ExternalReference ref);
- void subq(Register dst, Register src) {
- arithmetic_op(0x2B, dst, src);
- }
-
- void subq(Register dst, const Operand& src) {
- arithmetic_op(0x2B, dst, src);
- }
-
- void subq(const Operand& dst, Register src) {
- arithmetic_op(0x29, src, dst);
- }
-
- void subq(Register dst, Immediate src) {
- immediate_arithmetic_op(0x5, dst, src);
- }
-
- void subq(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x5, dst, src);
- }
-
- void subl(Register dst, Register src) {
- arithmetic_op_32(0x2B, dst, src);
- }
-
- void subl(Register dst, const Operand& src) {
- arithmetic_op_32(0x2B, dst, src);
- }
-
- void subl(const Operand& dst, Register src) {
- arithmetic_op_32(0x29, src, dst);
- }
-
- void subl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x5, dst, src);
- }
-
- void subl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x5, dst, src);
- }
-
void subb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x5, dst, src);
}
@@ -1152,61 +848,11 @@ class Assembler : public AssemblerBase {
void testb(Register reg, Immediate mask);
void testb(const Operand& op, Immediate mask);
void testb(const Operand& op, Register reg);
- void testl(Register dst, Register src);
- void testl(Register reg, Immediate mask);
- void testl(const Operand& op, Register reg);
- void testl(const Operand& op, Immediate mask);
- void testq(const Operand& op, Register reg);
- void testq(Register dst, Register src);
- void testq(Register dst, Immediate mask);
-
- void xor_(Register dst, Register src) {
- if (dst.code() == src.code()) {
- arithmetic_op_32(0x33, dst, src);
- } else {
- arithmetic_op(0x33, dst, src);
- }
- }
-
- void xorl(Register dst, Register src) {
- arithmetic_op_32(0x33, dst, src);
- }
-
- void xorl(Register dst, const Operand& src) {
- arithmetic_op_32(0x33, dst, src);
- }
-
- void xorl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x6, dst, src);
- }
-
- void xorl(const Operand& dst, Register src) {
- arithmetic_op_32(0x31, src, dst);
- }
-
- void xorl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x6, dst, src);
- }
-
- void xor_(Register dst, const Operand& src) {
- arithmetic_op(0x33, dst, src);
- }
-
- void xor_(const Operand& dst, Register src) {
- arithmetic_op(0x31, src, dst);
- }
-
- void xor_(Register dst, Immediate src) {
- immediate_arithmetic_op(0x6, dst, src);
- }
-
- void xor_(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x6, dst, src);
- }
// Bit operations.
void bt(const Operand& dst, Register src);
void bts(const Operand& dst, Register src);
+ void bsrl(Register dst, Register src);
// Miscellaneous
void clc();
@@ -1252,9 +898,6 @@ class Assembler : public AssemblerBase {
// Call near absolute indirect, address in register
void call(Register adr);
- // Call near indirect
- void call(const Operand& operand);
-
// Jumps
// Jump short or near relative.
// Use a 32-bit signed displacement.
@@ -1266,9 +909,6 @@ class Assembler : public AssemblerBase {
// Jump near absolute indirect (r64)
void jmp(Register adr);
- // Jump near absolute indirect (m64)
- void jmp(const Operand& src);
-
// Conditional jumps
void j(Condition cc,
Label* L,
@@ -1399,6 +1039,8 @@ class Assembler : public AssemblerBase {
void movapd(XMMRegister dst, XMMRegister src);
+ void psllq(XMMRegister reg, byte imm8);
+
void cvttsd2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, XMMRegister src);
void cvttsd2siq(Register dst, XMMRegister src);
@@ -1427,6 +1069,7 @@ class Assembler : public AssemblerBase {
void orpd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src);
+ void sqrtsd(XMMRegister dst, const Operand& src);
void ucomisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, const Operand& src);
@@ -1464,6 +1107,12 @@ class Assembler : public AssemblerBase {
// Use --code-comments to enable.
void RecordComment(const char* msg, bool force = false);
+ // Allocate a constant pool of the correct size for the generated code.
+ Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
+
+ // Generate the constant pool for the generated code.
+ void PopulateConstantPool(ConstantPoolArray* constant_pool);
+
// Writes a single word of data in the code stream.
// Used for inline tables, e.g., jump-tables.
void db(uint8_t data);
@@ -1491,6 +1140,13 @@ class Assembler : public AssemblerBase {
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
+ protected:
+ // Call near indirect
+ void call(const Operand& operand);
+
+ // Jump near absolute indirect (m64)
+ void jmp(const Operand& src);
+
private:
byte* addr_at(int pos) { return buffer_ + pos; }
uint32_t long_at(int pos) {
@@ -1597,6 +1253,14 @@ class Assembler : public AssemblerBase {
// numbers have a high bit set.
inline void emit_optional_rex_32(const Operand& op);
+ void emit_rex(int size) {
+ if (size == kInt64Size) {
+ emit_rex_64();
+ } else {
+ ASSERT(size == kInt32Size);
+ }
+ }
+
template<class P1>
void emit_rex(P1 p1, int size) {
if (size == kInt64Size) {
@@ -1655,14 +1319,16 @@ class Assembler : public AssemblerBase {
// AND, OR, XOR, or CMP. The encodings of these operations are all
// similar, differing just in the opcode or in the reg field of the
// ModR/M byte.
+ void arithmetic_op_8(byte opcode, Register reg, Register rm_reg);
+ void arithmetic_op_8(byte opcode, Register reg, const Operand& rm_reg);
void arithmetic_op_16(byte opcode, Register reg, Register rm_reg);
void arithmetic_op_16(byte opcode, Register reg, const Operand& rm_reg);
- void arithmetic_op_32(byte opcode, Register reg, Register rm_reg);
- void arithmetic_op_32(byte opcode, Register reg, const Operand& rm_reg);
- void arithmetic_op(byte opcode, Register reg, Register rm_reg);
- void arithmetic_op(byte opcode, Register reg, const Operand& rm_reg);
- void immediate_arithmetic_op(byte subcode, Register dst, Immediate src);
- void immediate_arithmetic_op(byte subcode, const Operand& dst, Immediate src);
+ // Operate on operands/registers with pointer size, 32-bit or 64-bit size.
+ void arithmetic_op(byte opcode, Register reg, Register rm_reg, int size);
+ void arithmetic_op(byte opcode,
+ Register reg,
+ const Operand& rm_reg,
+ int size);
// Operate on a byte in memory or register.
void immediate_arithmetic_op_8(byte subcode,
Register dst,
@@ -1677,20 +1343,20 @@ class Assembler : public AssemblerBase {
void immediate_arithmetic_op_16(byte subcode,
const Operand& dst,
Immediate src);
- // Operate on a 32-bit word in memory or register.
- void immediate_arithmetic_op_32(byte subcode,
- Register dst,
- Immediate src);
- void immediate_arithmetic_op_32(byte subcode,
- const Operand& dst,
- Immediate src);
+ // Operate on operands/registers with pointer size, 32-bit or 64-bit size.
+ void immediate_arithmetic_op(byte subcode,
+ Register dst,
+ Immediate src,
+ int size);
+ void immediate_arithmetic_op(byte subcode,
+ const Operand& dst,
+ Immediate src,
+ int size);
// Emit machine code for a shift operation.
- void shift(Register dst, Immediate shift_amount, int subcode);
- void shift_32(Register dst, Immediate shift_amount, int subcode);
+ void shift(Register dst, Immediate shift_amount, int subcode, int size);
// Shift dst by cl % 64 bits.
- void shift(Register dst, int subcode);
- void shift_32(Register dst, int subcode);
+ void shift(Register dst, int subcode, int size);
void emit_farith(int b1, int b2, int i);
@@ -1701,12 +1367,183 @@ class Assembler : public AssemblerBase {
// record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+ // Arithmetics
+ void emit_add(Register dst, Register src, int size) {
+ arithmetic_op(0x03, dst, src, size);
+ }
+
+ void emit_add(Register dst, Immediate src, int size) {
+ immediate_arithmetic_op(0x0, dst, src, size);
+ }
+
+ void emit_add(Register dst, const Operand& src, int size) {
+ arithmetic_op(0x03, dst, src, size);
+ }
+
+ void emit_add(const Operand& dst, Register src, int size) {
+ arithmetic_op(0x1, src, dst, size);
+ }
+
+ void emit_add(const Operand& dst, Immediate src, int size) {
+ immediate_arithmetic_op(0x0, dst, src, size);
+ }
+
+ void emit_and(Register dst, Register src, int size) {
+ arithmetic_op(0x23, dst, src, size);
+ }
+
+ void emit_and(Register dst, const Operand& src, int size) {
+ arithmetic_op(0x23, dst, src, size);
+ }
+
+ void emit_and(const Operand& dst, Register src, int size) {
+ arithmetic_op(0x21, src, dst, size);
+ }
+
+ void emit_and(Register dst, Immediate src, int size) {
+ immediate_arithmetic_op(0x4, dst, src, size);
+ }
+
+ void emit_and(const Operand& dst, Immediate src, int size) {
+ immediate_arithmetic_op(0x4, dst, src, size);
+ }
+
+ void emit_cmp(Register dst, Register src, int size) {
+ arithmetic_op(0x3B, dst, src, size);
+ }
+
+ void emit_cmp(Register dst, const Operand& src, int size) {
+ arithmetic_op(0x3B, dst, src, size);
+ }
+
+ void emit_cmp(const Operand& dst, Register src, int size) {
+ arithmetic_op(0x39, src, dst, size);
+ }
+
+ void emit_cmp(Register dst, Immediate src, int size) {
+ immediate_arithmetic_op(0x7, dst, src, size);
+ }
+
+ void emit_cmp(const Operand& dst, Immediate src, int size) {
+ immediate_arithmetic_op(0x7, dst, src, size);
+ }
+
+ void emit_dec(Register dst, int size);
+ void emit_dec(const Operand& dst, int size);
+
+ // Divide rdx:rax by src. Quotient in rax, remainder in rdx when size is 64.
+ // Divide edx:eax by lower 32 bits of src. Quotient in eax, remainder in edx
+ // when size is 32.
+ void emit_idiv(Register src, int size);
+
+ // Signed multiply instructions.
+ // rdx:rax = rax * src when size is 64 or edx:eax = eax * src when size is 32.
+ void emit_imul(Register src, int size);
+ void emit_imul(Register dst, Register src, int size);
+ void emit_imul(Register dst, const Operand& src, int size);
+ void emit_imul(Register dst, Register src, Immediate imm, int size);
+
+ void emit_inc(Register dst, int size);
+ void emit_inc(const Operand& dst, int size);
+
+ void emit_lea(Register dst, const Operand& src, int size);
+
void emit_mov(Register dst, const Operand& src, int size);
void emit_mov(Register dst, Register src, int size);
void emit_mov(const Operand& dst, Register src, int size);
void emit_mov(Register dst, Immediate value, int size);
void emit_mov(const Operand& dst, Immediate value, int size);
+ void emit_movzxb(Register dst, const Operand& src, int size);
+ void emit_movzxw(Register dst, const Operand& src, int size);
+ void emit_movzxw(Register dst, Register src, int size);
+
+ void emit_neg(Register dst, int size);
+ void emit_neg(const Operand& dst, int size);
+
+ void emit_not(Register dst, int size);
+ void emit_not(const Operand& dst, int size);
+
+ void emit_or(Register dst, Register src, int size) {
+ arithmetic_op(0x0B, dst, src, size);
+ }
+
+ void emit_or(Register dst, const Operand& src, int size) {
+ arithmetic_op(0x0B, dst, src, size);
+ }
+
+ void emit_or(const Operand& dst, Register src, int size) {
+ arithmetic_op(0x9, src, dst, size);
+ }
+
+ void emit_or(Register dst, Immediate src, int size) {
+ immediate_arithmetic_op(0x1, dst, src, size);
+ }
+
+ void emit_or(const Operand& dst, Immediate src, int size) {
+ immediate_arithmetic_op(0x1, dst, src, size);
+ }
+
+ void emit_repmovs(int size);
+
+ void emit_sbb(Register dst, Register src, int size) {
+ arithmetic_op(0x1b, dst, src, size);
+ }
+
+ void emit_sub(Register dst, Register src, int size) {
+ arithmetic_op(0x2B, dst, src, size);
+ }
+
+ void emit_sub(Register dst, Immediate src, int size) {
+ immediate_arithmetic_op(0x5, dst, src, size);
+ }
+
+ void emit_sub(Register dst, const Operand& src, int size) {
+ arithmetic_op(0x2B, dst, src, size);
+ }
+
+ void emit_sub(const Operand& dst, Register src, int size) {
+ arithmetic_op(0x29, src, dst, size);
+ }
+
+ void emit_sub(const Operand& dst, Immediate src, int size) {
+ immediate_arithmetic_op(0x5, dst, src, size);
+ }
+
+ void emit_test(Register dst, Register src, int size);
+ void emit_test(Register reg, Immediate mask, int size);
+ void emit_test(const Operand& op, Register reg, int size);
+ void emit_test(const Operand& op, Immediate mask, int size);
+
+ // Exchange two registers
+ void emit_xchg(Register dst, Register src, int size);
+
+ void emit_xor(Register dst, Register src, int size) {
+ if (size == kInt64Size && dst.code() == src.code()) {
+ // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
+ // there is no need to make this a 64 bit operation.
+ arithmetic_op(0x33, dst, src, kInt32Size);
+ } else {
+ arithmetic_op(0x33, dst, src, size);
+ }
+ }
+
+ void emit_xor(Register dst, const Operand& src, int size) {
+ arithmetic_op(0x33, dst, src, size);
+ }
+
+ void emit_xor(Register dst, Immediate src, int size) {
+ immediate_arithmetic_op(0x6, dst, src, size);
+ }
+
+ void emit_xor(const Operand& dst, Immediate src, int size) {
+ immediate_arithmetic_op(0x6, dst, src, size);
+ }
+
+ void emit_xor(const Operand& dst, Register src, int size) {
+ arithmetic_op(0x31, src, dst, size);
+ }
+
friend class CodePatcher;
friend class EnsureSpace;
friend class RegExpMacroAssemblerX64;
diff --git a/chromium/v8/src/x64/builtins-x64.cc b/chromium/v8/src/x64/builtins-x64.cc
index f4864f899ef..fa359c5bc6e 100644
--- a/chromium/v8/src/x64/builtins-x64.cc
+++ b/chromium/v8/src/x64/builtins-x64.cc
@@ -1,37 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
+#include "src/codegen.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -60,7 +38,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
if (extra_args == NEEDS_CALLED_FUNCTION) {
num_extra_args = 1;
__ PopReturnAddressTo(kScratchRegister);
- __ push(rdi);
+ __ Push(rdi);
__ PushReturnAddressFrom(kScratchRegister);
} else {
ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
@@ -68,40 +46,42 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// JumpToExternalReference expects rax to contain the number of arguments
// including the receiver and the extra arguments.
- __ addq(rax, Immediate(num_extra_args + 1));
+ __ addp(rax, Immediate(num_extra_args + 1));
__ JumpToExternalReference(ExternalReference(id, masm->isolate()), 1);
}
-static void CallRuntimePassFunction(MacroAssembler* masm,
- Runtime::FunctionId function_id) {
+static void CallRuntimePassFunction(
+ MacroAssembler* masm, Runtime::FunctionId function_id) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
+ __ Push(rdi);
// Function is also the parameter to the runtime call.
- __ push(rdi);
+ __ Push(rdi);
__ CallRuntime(function_id, 1);
- // Restore call kind information.
- __ pop(rcx);
// Restore receiver.
- __ pop(rdi);
+ __ Pop(rdi);
}
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ movq(kScratchRegister,
+ __ movp(kScratchRegister,
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(kScratchRegister,
+ __ movp(kScratchRegister,
FieldOperand(kScratchRegister, SharedFunctionInfo::kCodeOffset));
- __ lea(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
+ __ leap(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
__ jmp(kScratchRegister);
}
-void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+ __ leap(rax, FieldOperand(rax, Code::kHeaderSize));
+ __ jmp(rax);
+}
+
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
@@ -111,43 +91,41 @@ void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &ok);
- CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
- // Tail call to returned code.
- __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
- __ jmp(rax);
+ CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
+ GenerateTailCallToReturnedCode(masm);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
-void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
- GenerateTailCallToSharedCode(masm);
-}
-
-
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool count_constructions) {
+ bool create_memento) {
// ----------- S t a t e -------------
// -- rax: number of arguments
// -- rdi: constructor function
+ // -- rbx: allocation site or undefined
// -----------------------------------
- // Should never count constructions for api objects.
- ASSERT(!is_api_function || !count_constructions);
+ // Should never create mementos for api functions.
+ ASSERT(!is_api_function || !create_memento);
// Enter a construct frame.
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
+ if (create_memento) {
+ __ AssertUndefinedOrAllocationSite(rbx);
+ __ Push(rbx);
+ }
+
// Store a smi-tagged arguments count on the stack.
__ Integer32ToSmi(rax, rax);
- __ push(rax);
+ __ Push(rax);
// Push the function to invoke on the stack.
- __ push(rdi);
+ __ Push(rdi);
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
@@ -155,18 +133,16 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
if (FLAG_inline_new) {
Label undo_allocation;
-#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(masm->isolate());
__ Move(kScratchRegister, debug_step_in_fp);
- __ cmpq(Operand(kScratchRegister, 0), Immediate(0));
+ __ cmpp(Operand(kScratchRegister, 0), Immediate(0));
__ j(not_equal, &rt_call);
-#endif
// Verified that the constructor is a JSFunction.
// Load the initial map and verify that it is in fact a map.
// rdi: constructor
- __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movp(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi
ASSERT(kSmiTag == 0);
__ JumpIfSmi(rax, &rt_call);
@@ -183,30 +159,42 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ CmpInstanceType(rax, JS_FUNCTION_TYPE);
__ j(equal, &rt_call);
- if (count_constructions) {
+ if (!is_api_function) {
Label allocate;
+ // The code below relies on these assumptions.
+ STATIC_ASSERT(JSFunction::kNoSlackTracking == 0);
+ STATIC_ASSERT(Map::ConstructionCount::kShift +
+ Map::ConstructionCount::kSize == 32);
+ // Check if slack tracking is enabled.
+ __ movl(rsi, FieldOperand(rax, Map::kBitField3Offset));
+ __ shrl(rsi, Immediate(Map::ConstructionCount::kShift));
+ __ j(zero, &allocate); // JSFunction::kNoSlackTracking
// Decrease generous allocation count.
- __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ decb(FieldOperand(rcx,
- SharedFunctionInfo::kConstructionCountOffset));
- __ j(not_zero, &allocate);
+ __ subl(FieldOperand(rax, Map::kBitField3Offset),
+ Immediate(1 << Map::ConstructionCount::kShift));
- __ push(rax);
- __ push(rdi);
+ __ cmpl(rsi, Immediate(JSFunction::kFinishSlackTracking));
+ __ j(not_equal, &allocate);
- __ push(rdi); // constructor
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+ __ Push(rax);
+ __ Push(rdi);
- __ pop(rdi);
- __ pop(rax);
+ __ Push(rdi); // constructor
+ __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
+
+ __ Pop(rdi);
+ __ Pop(rax);
+ __ xorl(rsi, rsi); // JSFunction::kNoSlackTracking
__ bind(&allocate);
}
// Now allocate the JSObject on the heap.
- __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
- __ shl(rdi, Immediate(kPointerSizeLog2));
+ __ movzxbp(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
+ __ shlp(rdi, Immediate(kPointerSizeLog2));
+ if (create_memento) {
+ __ addp(rdi, Immediate(AllocationMemento::kSize));
+ }
// rdi: size of new object
__ Allocate(rdi,
rbx,
@@ -214,35 +202,60 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
no_reg,
&rt_call,
NO_ALLOCATION_FLAGS);
+ Factory* factory = masm->isolate()->factory();
// Allocated the JSObject, now initialize the fields.
// rax: initial map
// rbx: JSObject (not HeapObject tagged - the actual address).
- // rdi: start of next object
- __ movq(Operand(rbx, JSObject::kMapOffset), rax);
+ // rdi: start of next object (including memento if create_memento)
+ __ movp(Operand(rbx, JSObject::kMapOffset), rax);
__ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
- __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx);
- __ movq(Operand(rbx, JSObject::kElementsOffset), rcx);
+ __ movp(Operand(rbx, JSObject::kPropertiesOffset), rcx);
+ __ movp(Operand(rbx, JSObject::kElementsOffset), rcx);
// Set extra fields in the newly allocated object.
// rax: initial map
// rbx: JSObject
- // rdi: start of next object
- __ lea(rcx, Operand(rbx, JSObject::kHeaderSize));
+ // rdi: start of next object (including memento if create_memento)
+ // rsi: slack tracking counter (non-API function case)
+ __ leap(rcx, Operand(rbx, JSObject::kHeaderSize));
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- if (count_constructions) {
- __ movzxbq(rsi,
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ // Check if slack tracking is enabled.
+ __ cmpl(rsi, Immediate(JSFunction::kNoSlackTracking));
+ __ j(equal, &no_inobject_slack_tracking);
+
+ // Allocate object with a slack.
+ __ movzxbp(rsi,
FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
- __ lea(rsi,
+ __ leap(rsi,
Operand(rbx, rsi, times_pointer_size, JSObject::kHeaderSize));
// rsi: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
- __ cmpq(rsi, rdi);
+ __ cmpp(rsi, rdi);
__ Assert(less_equal,
kUnexpectedNumberOfPreAllocatedPropertyFields);
}
__ InitializeFieldsWithFiller(rcx, rsi, rdx);
__ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex);
+ // Fill the remaining fields with one pointer filler map.
+
+ __ bind(&no_inobject_slack_tracking);
+ }
+ if (create_memento) {
+ __ leap(rsi, Operand(rdi, -AllocationMemento::kSize));
+ __ InitializeFieldsWithFiller(rcx, rsi, rdx);
+
+ // Fill in memento fields if necessary.
+ // rsi: points to the allocated but uninitialized memento.
+ __ Move(Operand(rsi, AllocationMemento::kMapOffset),
+ factory->allocation_memento_map());
+ // Get the cell or undefined.
+ __ movp(rdx, Operand(rsp, kPointerSize*2));
+ __ movp(Operand(rsi, AllocationMemento::kAllocationSiteOffset), rdx);
+ } else {
+ __ InitializeFieldsWithFiller(rcx, rdi, rdx);
}
- __ InitializeFieldsWithFiller(rcx, rdi, rdx);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
@@ -251,7 +264,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rax: initial map
// rbx: JSObject
// rdi: start of next object
- __ or_(rbx, Immediate(kHeapObjectTag));
+ __ orp(rbx, Immediate(kHeapObjectTag));
// Check if a non-empty properties array is needed.
// Allocate and initialize a FixedArray if it is.
@@ -259,13 +272,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rbx: JSObject
// rdi: start of next object
// Calculate total properties described map.
- __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
- __ movzxbq(rcx,
+ __ movzxbp(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
+ __ movzxbp(rcx,
FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
- __ addq(rdx, rcx);
+ __ addp(rdx, rcx);
// Calculate unused properties past the end of the in-object properties.
- __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
- __ subq(rdx, rcx);
+ __ movzxbp(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
+ __ subp(rdx, rcx);
// Done if no extra properties are to be allocated.
__ j(zero, &allocated);
__ Assert(positive, kPropertyAllocationCountFailed);
@@ -290,9 +303,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rdx: number of elements
// rax: start of next object
__ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
- __ movq(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map
+ __ movp(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map
__ Integer32ToSmi(rdx, rdx);
- __ movq(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
+ __ movp(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
// Initialize the fields to undefined.
// rbx: JSObject
@@ -301,13 +314,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rdx: number of elements
{ Label loop, entry;
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
+ __ leap(rcx, Operand(rdi, FixedArray::kHeaderSize));
__ jmp(&entry);
__ bind(&loop);
- __ movq(Operand(rcx, 0), rdx);
- __ addq(rcx, Immediate(kPointerSize));
+ __ movp(Operand(rcx, 0), rdx);
+ __ addp(rcx, Immediate(kPointerSize));
__ bind(&entry);
- __ cmpq(rcx, rax);
+ __ cmpp(rcx, rax);
__ j(below, &loop);
}
@@ -315,8 +328,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// the JSObject
// rbx: JSObject
// rdi: FixedArray
- __ or_(rdi, Immediate(kHeapObjectTag)); // add the heap tag
- __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
+ __ orp(rdi, Immediate(kHeapObjectTag)); // add the heap tag
+ __ movp(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
// Continue with JSObject being successfully allocated
@@ -334,62 +347,93 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Allocate the new receiver object using the runtime call.
// rdi: function (constructor)
__ bind(&rt_call);
- // Must restore rdi (constructor) before calling runtime.
- __ movq(rdi, Operand(rsp, 0));
- __ push(rdi);
- __ CallRuntime(Runtime::kNewObject, 1);
- __ movq(rbx, rax); // store result in rbx
+ int offset = 0;
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ movp(rdi, Operand(rsp, kPointerSize*2));
+ __ Push(rdi);
+ offset = kPointerSize;
+ }
+
+ // Must restore rsi (context) and rdi (constructor) before calling runtime.
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rdi, Operand(rsp, offset));
+ __ Push(rdi);
+ if (create_memento) {
+ __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
+ } else {
+ __ CallRuntime(Runtime::kHiddenNewObject, 1);
+ }
+ __ movp(rbx, rax); // store result in rbx
+
+ // If we ended up using the runtime, and we want a memento, then the
+ // runtime call made it for us, and we shouldn't do create count
+ // increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ jmp(&count_incremented);
+ }
// New object allocated.
// rbx: newly allocated object
__ bind(&allocated);
+
+ if (create_memento) {
+ __ movp(rcx, Operand(rsp, kPointerSize*2));
+ __ Cmp(rcx, masm->isolate()->factory()->undefined_value());
+ __ j(equal, &count_incremented);
+ // rcx is an AllocationSite. We are creating a memento from it, so we
+ // need to increment the memento create count.
+ __ SmiAddConstant(
+ FieldOperand(rcx, AllocationSite::kPretenureCreateCountOffset),
+ Smi::FromInt(1));
+ __ bind(&count_incremented);
+ }
+
// Retrieve the function from the stack.
- __ pop(rdi);
+ __ Pop(rdi);
// Retrieve smi-tagged arguments count from the stack.
- __ movq(rax, Operand(rsp, 0));
+ __ movp(rax, Operand(rsp, 0));
__ SmiToInteger32(rax, rax);
// Push the allocated receiver to the stack. We need two copies
// because we may have to return the original one and the calling
// conventions dictate that the called function pops the receiver.
- __ push(rbx);
- __ push(rbx);
+ __ Push(rbx);
+ __ Push(rbx);
// Set up pointer to last argument.
- __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
+ __ leap(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
// Copy arguments and receiver to the expression stack.
Label loop, entry;
- __ movq(rcx, rax);
+ __ movp(rcx, rax);
__ jmp(&entry);
__ bind(&loop);
- __ push(Operand(rbx, rcx, times_pointer_size, 0));
+ __ Push(Operand(rbx, rcx, times_pointer_size, 0));
__ bind(&entry);
- __ decq(rcx);
+ __ decp(rcx);
__ j(greater_equal, &loop);
// Call the function.
if (is_api_function) {
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
Handle<Code> code =
masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ __ Call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(rax);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION, NullCallWrapper());
}
// Store offset of return address for deoptimizer.
- if (!is_api_function && !count_constructions) {
+ if (!is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
// If the result is an object (in the ECMA sense), we should get rid
// of the receiver and use the result; see ECMA-262 section 13.2.2-7
@@ -407,11 +451,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
__ bind(&use_receiver);
- __ movq(rax, Operand(rsp, 0));
+ __ movp(rax, Operand(rsp, 0));
// Restore the arguments count and leave the construct frame.
__ bind(&exit);
- __ movq(rbx, Operand(rsp, kPointerSize)); // Get arguments count.
+ __ movp(rbx, Operand(rsp, kPointerSize)); // Get arguments count.
// Leave construct frame.
}
@@ -419,7 +463,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Remove caller arguments from the stack and return.
__ PopReturnAddressTo(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
- __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
+ __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
__ PushReturnAddressFrom(rcx);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->constructed_objects(), 1);
@@ -427,13 +471,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
-void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
-}
-
-
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
}
@@ -475,19 +514,19 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Load the function context into rsi.
- __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
// Push the function and the receiver onto the stack.
- __ push(rdx);
- __ push(r8);
+ __ Push(rdx);
+ __ Push(r8);
// Load the number of arguments and setup pointer to the arguments.
- __ movq(rax, r9);
+ __ movp(rax, r9);
// Load the previous frame pointer to access C argument on stack
- __ movq(kScratchRegister, Operand(rbp, 0));
- __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
+ __ movp(kScratchRegister, Operand(rbp, 0));
+ __ movp(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
// Load the function pointer into rdi.
- __ movq(rdi, rdx);
+ __ movp(rdi, rdx);
#else // _WIN64
// GCC parameters in:
// rdi : entry (ignored)
@@ -496,7 +535,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// rcx : argc
// r8 : argv
- __ movq(rdi, rsi);
+ __ movp(rdi, rsi);
// rdi : function
// Clear the context before we push it when entering the internal frame.
@@ -505,13 +544,13 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Push the function and receiver and setup the context.
- __ push(rdi);
- __ push(rdx);
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ Push(rdi);
+ __ Push(rdx);
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Load the number of arguments and setup pointer to the arguments.
- __ movq(rax, rcx);
- __ movq(rbx, r8);
+ __ movp(rax, rcx);
+ __ movp(rbx, r8);
#endif // _WIN64
// Current stack contents:
@@ -531,27 +570,24 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Set(rcx, 0); // Set loop variable to 0.
__ jmp(&entry);
__ bind(&loop);
- __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
- __ push(Operand(kScratchRegister, 0)); // dereference handle
- __ addq(rcx, Immediate(1));
+ __ movp(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
+ __ Push(Operand(kScratchRegister, 0)); // dereference handle
+ __ addp(rcx, Immediate(1));
__ bind(&entry);
- __ cmpq(rcx, rax);
+ __ cmpp(rcx, rax);
__ j(not_equal, &loop);
// Invoke the code.
if (is_construct) {
// No type feedback cell is available
- Handle<Object> undefined_sentinel(
- masm->isolate()->factory()->undefined_value());
- __ Move(rbx, undefined_sentinel);
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
// Expects rdi to hold function pointer.
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
__ CallStub(&stub);
} else {
ParameterCount actual(rax);
// Function must be in rdi.
- __ InvokeFunction(rdi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION, NullCallWrapper());
}
// Exit the internal frame. Notice that this also removes the empty
// context and the function left on the stack by the code
@@ -573,19 +609,37 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
-void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kLazyCompile);
- // Do a tail-call of the compiled function.
- __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
- __ jmp(rax);
+void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
+ GenerateTailCallToReturnedCode(masm);
}
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
- // Do a tail-call of the compiled function.
- __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
- __ jmp(rax);
+static void CallCompileOptimized(MacroAssembler* masm,
+ bool concurrent) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function onto the stack.
+ __ Push(rdi);
+ // Function is also the parameter to the runtime call.
+ __ Push(rdi);
+ // Whether to compile in a background thread.
+ __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+
+ __ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
+ // Restore receiver.
+ __ Pop(rdi);
+}
+
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+ CallCompileOptimized(masm, false);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+ CallCompileOptimized(masm, true);
+ GenerateTailCallToReturnedCode(masm);
}
@@ -598,15 +652,15 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// Re-execute the code that was patched back to the young age when
// the stub returns.
- __ subq(Operand(rsp, 0), Immediate(5));
+ __ subp(Operand(rsp, 0), Immediate(5));
__ Pushad();
__ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
- __ movq(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
+ __ movp(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
{ // NOLINT
FrameScope scope(masm, StackFrame::MANUAL);
- __ PrepareCallCFunction(1);
+ __ PrepareCallCFunction(2);
__ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 1);
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
}
__ Popad();
__ ret(0);
@@ -633,23 +687,23 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// pointers.
__ Pushad();
__ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
- __ movq(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
- __ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
+ __ movp(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
+ __ subp(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
{ // NOLINT
FrameScope scope(masm, StackFrame::MANUAL);
- __ PrepareCallCFunction(1);
+ __ PrepareCallCFunction(2);
__ CallCFunction(
ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
- 1);
+ 2);
}
__ Popad();
// Perform prologue operations usually performed by the young code stub.
__ PopReturnAddressTo(kScratchRegister);
- __ push(rbp); // Caller's frame pointer.
- __ movq(rbp, rsp);
- __ push(rsi); // Callee's context.
- __ push(rdi); // Callee's JS Function.
+ __ pushq(rbp); // Caller's frame pointer.
+ __ movp(rbp, rsp);
+ __ Push(rsi); // Callee's context.
+ __ Push(rdi); // Callee's JS Function.
__ PushReturnAddressFrom(kScratchRegister);
// Jump to point after the code-age stub.
@@ -672,12 +726,12 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ Pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles);
__ Popad();
// Tear down internal frame.
}
- __ pop(MemOperand(rsp, 0)); // Ignore state offset
+ __ DropUnderReturnAddress(1); // Ignore state offset
__ ret(0); // Return to IC Miss stub, continuation still on stack.
}
@@ -701,7 +755,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass the deoptimization type to the runtime system.
__ Push(Smi::FromInt(static_cast<int>(type)));
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
// Tear down internal frame.
}
@@ -710,13 +764,13 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Switch on the state.
Label not_no_registers, not_tos_rax;
- __ cmpq(kScratchRegister, Immediate(FullCodeGenerator::NO_REGISTERS));
+ __ cmpp(kScratchRegister, Immediate(FullCodeGenerator::NO_REGISTERS));
__ j(not_equal, &not_no_registers, Label::kNear);
__ ret(1 * kPointerSize); // Remove state.
__ bind(&not_no_registers);
- __ movq(rax, Operand(rsp, kPCOnStackSize + kPointerSize));
- __ cmpq(kScratchRegister, Immediate(FullCodeGenerator::TOS_REG));
+ __ movp(rax, Operand(rsp, kPCOnStackSize + kPointerSize));
+ __ cmpp(kScratchRegister, Immediate(FullCodeGenerator::TOS_REG));
__ j(not_equal, &not_tos_rax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, rax.
@@ -753,12 +807,12 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
//
// 1. Make sure we have at least one argument.
{ Label done;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_zero, &done);
__ PopReturnAddressTo(rbx);
__ Push(masm->isolate()->factory()->undefined_value());
__ PushReturnAddressFrom(rbx);
- __ incq(rax);
+ __ incp(rax);
__ bind(&done);
}
@@ -766,7 +820,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// if it is a function.
Label slow, non_function;
StackArgumentsAccessor args(rsp, rax);
- __ movq(rdi, args.GetReceiverOperand());
+ __ movp(rdi, args.GetReceiverOperand());
__ JumpIfSmi(rdi, &non_function);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &slow);
@@ -776,10 +830,10 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ Set(rdx, 0); // indicate regular JS_FUNCTION
{ Label convert_to_object, use_global_receiver, patch_receiver;
// Change context eagerly in case we need the global receiver.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Do not transform the receiver for strict mode functions.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ testb(FieldOperand(rbx, SharedFunctionInfo::kStrictModeByteOffset),
Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
__ j(not_equal, &shift_arguments);
@@ -790,8 +844,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
__ j(not_zero, &shift_arguments);
- // Compute the receiver in non-strict mode.
- __ movq(rbx, args.GetArgumentOperand(1));
+ // Compute the receiver in sloppy mode.
+ __ movp(rbx, args.GetArgumentOperand(1));
__ JumpIfSmi(rbx, &convert_to_object, Label::kNear);
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
@@ -808,33 +862,28 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Enter an internal frame in order to preserve argument count.
FrameScope scope(masm, StackFrame::INTERNAL);
__ Integer32ToSmi(rax, rax);
- __ push(rax);
+ __ Push(rax);
- __ push(rbx);
+ __ Push(rbx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ movq(rbx, rax);
+ __ movp(rbx, rax);
__ Set(rdx, 0); // indicate regular JS_FUNCTION
- __ pop(rax);
+ __ Pop(rax);
__ SmiToInteger32(rax, rax);
}
// Restore the function to rdi.
- __ movq(rdi, args.GetReceiverOperand());
+ __ movp(rdi, args.GetReceiverOperand());
__ jmp(&patch_receiver, Label::kNear);
- // Use the global receiver object from the called function as the
- // receiver.
__ bind(&use_global_receiver);
- const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ movq(rbx, FieldOperand(rsi, kGlobalIndex));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kNativeContextOffset));
- __ movq(rbx, FieldOperand(rbx, kGlobalIndex));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+ __ movp(rbx,
+ Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movp(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
__ bind(&patch_receiver);
- __ movq(args.GetArgumentOperand(1), rbx);
+ __ movp(args.GetArgumentOperand(1), rbx);
__ jmp(&shift_arguments);
}
@@ -851,37 +900,37 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// CALL_NON_FUNCTION builtin expects the non-function callee as
// receiver, so overwrite the first argument which will ultimately
// become the receiver.
- __ movq(args.GetArgumentOperand(1), rdi);
+ __ movp(args.GetArgumentOperand(1), rdi);
// 4. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
// the original first argument the new receiver.
__ bind(&shift_arguments);
{ Label loop;
- __ movq(rcx, rax);
+ __ movp(rcx, rax);
+ StackArgumentsAccessor args(rsp, rcx);
__ bind(&loop);
- __ movq(rbx, Operand(rsp, rcx, times_pointer_size, 0));
- __ movq(Operand(rsp, rcx, times_pointer_size, 1 * kPointerSize), rbx);
- __ decq(rcx);
- __ j(not_sign, &loop); // While non-negative (to copy return address).
- __ pop(rbx); // Discard copy of return address.
- __ decq(rax); // One fewer argument (first argument is new receiver).
+ __ movp(rbx, args.GetArgumentOperand(1));
+ __ movp(args.GetArgumentOperand(0), rbx);
+ __ decp(rcx);
+ __ j(not_zero, &loop); // While non-zero.
+ __ DropUnderReturnAddress(1, rbx); // Drop one slot under return address.
+ __ decp(rax); // One fewer argument (first argument is new receiver).
}
// 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
// or a function proxy via CALL_FUNCTION_PROXY.
{ Label function, non_proxy;
- __ testq(rdx, rdx);
+ __ testp(rdx, rdx);
__ j(zero, &function);
__ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
- __ cmpq(rdx, Immediate(1));
+ __ cmpp(rdx, Immediate(1));
__ j(not_equal, &non_proxy);
__ PopReturnAddressTo(rdx);
- __ push(rdi); // re-add proxy object as additional argument
+ __ Push(rdi); // re-add proxy object as additional argument
__ PushReturnAddressFrom(rdx);
- __ incq(rax);
+ __ incp(rax);
__ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
__ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -896,20 +945,17 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 5b. Get the code to call from the function and check that the number of
// expected arguments matches what we're providing. If so, jump
// (tail-call) to the code in register edx without checking arguments.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movsxlq(rbx,
- FieldOperand(rdx,
- SharedFunctionInfo::kFormalParameterCountOffset));
- __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- __ SetCallKind(rcx, CALL_AS_METHOD);
- __ cmpq(rax, rbx);
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadSharedFunctionInfoSpecialField(rbx, rdx,
+ SharedFunctionInfo::kFormalParameterCountOffset);
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ cmpp(rax, rbx);
__ j(not_equal,
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
ParameterCount expected(0);
- __ InvokeCode(rdx, expected, expected, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeCode(rdx, expected, expected, JUMP_FUNCTION, NullCallWrapper());
}
@@ -931,8 +977,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
static const int kReceiverOffset = kArgumentsOffset + kPointerSize;
static const int kFunctionOffset = kReceiverOffset + kPointerSize;
- __ push(Operand(rbp, kFunctionOffset));
- __ push(Operand(rbp, kArgumentsOffset));
+ __ Push(Operand(rbp, kFunctionOffset));
+ __ Push(Operand(rbp, kArgumentsOffset));
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
// Check the stack for overflow. We are not trying to catch
@@ -940,21 +986,21 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// limit" is checked.
Label okay;
__ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
- __ movq(rcx, rsp);
+ __ movp(rcx, rsp);
// Make rcx the space we have left. The stack might already be overflowed
// here which will cause rcx to become negative.
- __ subq(rcx, kScratchRegister);
+ __ subp(rcx, kScratchRegister);
// Make rdx the space we need for the array when it is unrolled onto the
// stack.
__ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
// Check if the arguments will overflow the stack.
- __ cmpq(rcx, rdx);
+ __ cmpp(rcx, rdx);
__ j(greater, &okay); // Signed comparison.
// Out of stack space.
- __ push(Operand(rbp, kFunctionOffset));
- __ push(rax);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ __ Push(Operand(rbp, kFunctionOffset));
+ __ Push(rax);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
__ bind(&okay);
// End of stack check.
@@ -962,24 +1008,24 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kLimitOffset =
StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ push(rax); // limit
- __ push(Immediate(0)); // index
+ __ Push(rax); // limit
+ __ Push(Immediate(0)); // index
// Get the receiver.
- __ movq(rbx, Operand(rbp, kReceiverOffset));
+ __ movp(rbx, Operand(rbp, kReceiverOffset));
// Check that the function is a JS function (otherwise it must be a proxy).
Label push_receiver;
- __ movq(rdi, Operand(rbp, kFunctionOffset));
+ __ movp(rdi, Operand(rbp, kFunctionOffset));
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &push_receiver);
// Change context eagerly to get the right global object if necessary.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Do not transform the receiver for strict mode functions.
Label call_to_object, use_global_receiver;
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
__ j(not_equal, &push_receiver);
@@ -989,7 +1035,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
__ j(not_equal, &push_receiver);
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ JumpIfSmi(rbx, &call_to_object, Label::kNear);
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
__ j(equal, &use_global_receiver);
@@ -1004,30 +1050,26 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Convert the receiver to an object.
__ bind(&call_to_object);
- __ push(rbx);
+ __ Push(rbx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ movq(rbx, rax);
+ __ movp(rbx, rax);
__ jmp(&push_receiver, Label::kNear);
- // Use the current global receiver object as the receiver.
__ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kNativeContextOffset));
- __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+ __ movp(rbx,
+ Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movp(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
// Push the receiver.
__ bind(&push_receiver);
- __ push(rbx);
+ __ Push(rbx);
// Copy all arguments from the array to the stack.
Label entry, loop;
- __ movq(rax, Operand(rbp, kIndexOffset));
+ __ movp(rax, Operand(rbp, kIndexOffset));
__ jmp(&entry);
__ bind(&loop);
- __ movq(rdx, Operand(rbp, kArgumentsOffset)); // load arguments
+ __ movp(rdx, Operand(rbp, kArgumentsOffset)); // load arguments
// Use inline caching to speed up access to arguments.
Handle<Code> ic =
@@ -1039,36 +1081,34 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// case, we know that we are not generating a test instruction next.
// Push the nth argument.
- __ push(rax);
+ __ Push(rax);
// Update the index on the stack and in register rax.
- __ movq(rax, Operand(rbp, kIndexOffset));
+ __ movp(rax, Operand(rbp, kIndexOffset));
__ SmiAddConstant(rax, rax, Smi::FromInt(1));
- __ movq(Operand(rbp, kIndexOffset), rax);
+ __ movp(Operand(rbp, kIndexOffset), rax);
__ bind(&entry);
- __ cmpq(rax, Operand(rbp, kLimitOffset));
+ __ cmpp(rax, Operand(rbp, kLimitOffset));
__ j(not_equal, &loop);
- // Invoke the function.
+ // Call the function.
Label call_proxy;
ParameterCount actual(rax);
__ SmiToInteger32(rax, rax);
- __ movq(rdi, Operand(rbp, kFunctionOffset));
+ __ movp(rdi, Operand(rbp, kFunctionOffset));
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &call_proxy);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION, NullCallWrapper());
frame_scope.GenerateLeaveFrame();
__ ret(3 * kPointerSize); // remove this, receiver, and arguments
- // Invoke the function proxy.
+ // Call the function proxy.
__ bind(&call_proxy);
- __ push(rdi); // add function proxy as last argument
- __ incq(rax);
+ __ Push(rdi); // add function proxy as last argument
+ __ incp(rax);
__ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
__ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
__ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -1092,7 +1132,7 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movp(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
@@ -1122,7 +1162,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movp(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
@@ -1133,10 +1173,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Run the native code for the Array function called as a normal function.
// tail call a stub
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
- __ Move(rbx, undefined_sentinel);
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -1155,7 +1192,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, rcx);
- __ cmpq(rdi, rcx);
+ __ cmpp(rdi, rcx);
__ Assert(equal, kUnexpectedStringFunction);
}
@@ -1163,13 +1200,13 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// (including the receiver).
StackArgumentsAccessor args(rsp, rax);
Label no_arguments;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(zero, &no_arguments);
- __ movq(rbx, args.GetArgumentOperand(1));
+ __ movp(rbx, args.GetArgumentOperand(1));
__ PopReturnAddressTo(rcx);
- __ lea(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+ __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
__ PushReturnAddressFrom(rcx);
- __ movq(rax, rbx);
+ __ movp(rax, rbx);
// Lookup the argument in the number to string cache.
Label not_cached, argument_is_string;
@@ -1205,15 +1242,15 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ cmpb(FieldOperand(rcx, Map::kUnusedPropertyFieldsOffset), Immediate(0));
__ Assert(equal, kUnexpectedUnusedPropertiesOfStringWrapper);
}
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
+ __ movp(FieldOperand(rax, HeapObject::kMapOffset), rcx);
// Set properties and elements.
__ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
- __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rcx);
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
+ __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), rcx);
+ __ movp(FieldOperand(rax, JSObject::kElementsOffset), rcx);
// Set the value.
- __ movq(FieldOperand(rax, JSValue::kValueOffset), rbx);
+ __ movp(FieldOperand(rax, JSValue::kValueOffset), rbx);
// Ensure the object is fully initialized.
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
@@ -1229,7 +1266,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ JumpIfSmi(rax, &convert_argument);
Condition is_string = masm->IsObjectStringType(rax, rbx, rcx);
__ j(NegateCondition(is_string), &convert_argument);
- __ movq(rbx, rax);
+ __ movp(rbx, rax);
__ IncrementCounter(counters->string_ctor_string_value(), 1);
__ jmp(&argument_is_string);
@@ -1238,12 +1275,12 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ IncrementCounter(counters->string_ctor_conversions(), 1);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rdi); // Preserve the function.
- __ push(rax);
+ __ Push(rdi); // Preserve the function.
+ __ Push(rax);
__ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
- __ pop(rdi);
+ __ Pop(rdi);
}
- __ movq(rbx, rax);
+ __ movp(rbx, rax);
__ jmp(&argument_is_string);
// Load the empty string into rbx, remove the receiver from the
@@ -1251,7 +1288,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ bind(&no_arguments);
__ LoadRoot(rbx, Heap::kempty_stringRootIndex);
__ PopReturnAddressTo(rcx);
- __ lea(rsp, Operand(rsp, kPointerSize));
+ __ leap(rsp, Operand(rsp, kPointerSize));
__ PushReturnAddressFrom(rcx);
__ jmp(&argument_is_string);
@@ -1261,43 +1298,69 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ IncrementCounter(counters->string_ctor_gc_required(), 1);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rbx);
+ __ Push(rbx);
__ CallRuntime(Runtime::kNewStringWrapper, 1);
}
__ ret(0);
}
+static void ArgumentsAdaptorStackCheck(MacroAssembler* masm,
+ Label* stack_overflow) {
+ // ----------- S t a t e -------------
+ // -- rax : actual number of arguments
+ // -- rbx : expected number of arguments
+ // -- rdi: function (passed through to callee)
+ // -----------------------------------
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadRoot(rdx, Heap::kRealStackLimitRootIndex);
+ __ movp(rcx, rsp);
+ // Make rcx the space we have left. The stack might already be overflowed
+ // here which will cause rcx to become negative.
+ __ subp(rcx, rdx);
+ // Make rdx the space we need for the array when it is unrolled onto the
+ // stack.
+ __ movp(rdx, rbx);
+ __ shlp(rdx, Immediate(kPointerSizeLog2));
+ // Check if the arguments will overflow the stack.
+ __ cmpp(rcx, rdx);
+ __ j(less_equal, stack_overflow); // Signed comparison.
+}
+
+
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ push(rbp);
- __ movq(rbp, rsp);
+ __ pushq(rbp);
+ __ movp(rbp, rsp);
// Store the arguments adaptor context sentinel.
__ Push(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
// Push the function on the stack.
- __ push(rdi);
+ __ Push(rdi);
// Preserve the number of arguments on the stack. Must preserve rax,
// rbx and rcx because these registers are used when copying the
// arguments and the receiver.
__ Integer32ToSmi(r8, rax);
- __ push(r8);
+ __ Push(r8);
}
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Retrieve the number of arguments from the stack. Number is a Smi.
- __ movq(rbx, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movp(rbx, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
// Leave the frame.
- __ movq(rsp, rbp);
- __ pop(rbp);
+ __ movp(rsp, rbp);
+ __ popq(rbp);
// Remove caller arguments from the stack.
__ PopReturnAddressTo(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
- __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
+ __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
__ PushReturnAddressFrom(rcx);
}
@@ -1306,18 +1369,21 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : actual number of arguments
// -- rbx : expected number of arguments
- // -- rcx : call kind information
- // -- rdx : code entry to call
+ // -- rdi: function (passed through to callee)
// -----------------------------------
Label invoke, dont_adapt_arguments;
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->arguments_adaptors(), 1);
+ Label stack_overflow;
+ ArgumentsAdaptorStackCheck(masm, &stack_overflow);
+
Label enough, too_few;
- __ cmpq(rax, rbx);
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ cmpp(rax, rbx);
__ j(less, &too_few);
- __ cmpq(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ __ cmpp(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ j(equal, &dont_adapt_arguments);
{ // Enough parameters: Actual >= expected.
@@ -1326,15 +1392,15 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(rax, Operand(rbp, rax, times_pointer_size, offset));
+ __ leap(rax, Operand(rbp, rax, times_pointer_size, offset));
__ Set(r8, -1); // account for receiver
Label copy;
__ bind(&copy);
- __ incq(r8);
- __ push(Operand(rax, 0));
- __ subq(rax, Immediate(kPointerSize));
- __ cmpq(r8, rbx);
+ __ incp(r8);
+ __ Push(Operand(rax, 0));
+ __ subp(rax, Immediate(kPointerSize));
+ __ cmpp(r8, rbx);
__ j(less, &copy);
__ jmp(&invoke);
}
@@ -1345,28 +1411,28 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all actual arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(rdi, Operand(rbp, rax, times_pointer_size, offset));
+ __ leap(rdi, Operand(rbp, rax, times_pointer_size, offset));
__ Set(r8, -1); // account for receiver
Label copy;
__ bind(&copy);
- __ incq(r8);
- __ push(Operand(rdi, 0));
- __ subq(rdi, Immediate(kPointerSize));
- __ cmpq(r8, rax);
+ __ incp(r8);
+ __ Push(Operand(rdi, 0));
+ __ subp(rdi, Immediate(kPointerSize));
+ __ cmpp(r8, rax);
__ j(less, &copy);
// Fill remaining expected arguments with undefined values.
Label fill;
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ bind(&fill);
- __ incq(r8);
- __ push(kScratchRegister);
- __ cmpq(r8, rbx);
+ __ incp(r8);
+ __ Push(kScratchRegister);
+ __ cmpp(r8, rbx);
__ j(less, &fill);
// Restore function pointer.
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
// Call the entry point.
@@ -1385,47 +1451,47 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -------------------------------------------
__ bind(&dont_adapt_arguments);
__ jmp(rdx);
+
+ __ bind(&stack_overflow);
+ {
+ FrameScope frame(masm, StackFrame::MANUAL);
+ EnterArgumentsAdaptorFrame(masm);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ int3();
+ }
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Lookup and calculate pc offset.
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerPCOffset));
- __ movq(rbx, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
- __ subq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ subq(rdx, FieldOperand(rbx, SharedFunctionInfo::kCodeOffset));
- __ Integer32ToSmi(rdx, rdx);
-
- // Pass both function and pc offset as arguments.
- __ push(rax);
- __ push(rdx);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
+ // Pass function as argument.
+ __ Push(rax);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
}
Label skip;
// If the code object is null, just return to the unoptimized code.
- __ cmpq(rax, Immediate(0));
+ __ cmpp(rax, Immediate(0));
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
__ bind(&skip);
// Load deoptimization data from the code object.
- __ movq(rbx, Operand(rax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+ __ movp(rbx, Operand(rax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
__ SmiToInteger32(rbx, Operand(rbx, FixedArray::OffsetOfElementAt(
DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
// Compute the target address = code_obj + header_size + osr_offset
- __ lea(rax, Operand(rax, rbx, times_1, Code::kHeaderSize - kHeapObjectTag));
+ __ leap(rax, Operand(rax, rbx, times_1, Code::kHeaderSize - kHeapObjectTag));
// Overwrite the return address on the stack.
- __ movq(Operand(rsp, 0), rax);
+ __ movq(StackOperandForReturnAddress(0), rax);
// And "return" to the OSR entry point of the function.
__ ret(0);
@@ -1439,7 +1505,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ j(above_equal, &ok);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kHiddenStackGuard, 0);
}
__ jmp(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
diff --git a/chromium/v8/src/x64/code-stubs-x64.cc b/chromium/v8/src/x64/code-stubs-x64.cc
index 0c9a0f20cdd..0d54f89d16b 100644
--- a/chromium/v8/src/x64/code-stubs-x64.cc
+++ b/chromium/v8/src/x64/code-stubs-x64.cc
@@ -1,57 +1,41 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "regexp-macro-assembler.h"
-#include "stub-cache.h"
-#include "runtime.h"
+#include "src/bootstrapper.h"
+#include "src/code-stubs.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/stub-cache.h"
+#include "src/runtime.h"
namespace v8 {
namespace internal {
void FastNewClosureStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rbx };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
+}
+
+
+void FastNewContextStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdi };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
}
void ToNumberStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rax };
descriptor->register_param_count_ = 1;
@@ -61,50 +45,51 @@ void ToNumberStub::InitializeInterfaceDescriptor(
void NumberToStringStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rax };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
}
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rax, rbx, rcx };
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
+ static Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Smi(),
+ Representation::Tagged() };
+ descriptor->register_param_representations_ = representations;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
+ Runtime::FunctionForId(
+ Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
}
void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rax, rbx, rcx, rdx };
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
}
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { rbx };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { rbx, rdx };
+ descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = NULL;
}
void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rdx, rax };
descriptor->register_param_count_ = 2;
@@ -115,7 +100,6 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rdx, rax };
descriptor->register_param_count_ = 2;
@@ -125,8 +109,27 @@ void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
}
+void RegExpConstructResultStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rcx, rbx, rax };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
+}
+
+
+void KeyedLoadGenericElementStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdx, rax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kKeyedGetProperty)->entry;
+}
+
+
void LoadFieldStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rax };
descriptor->register_param_count_ = 1;
@@ -136,7 +139,6 @@ void LoadFieldStub::InitializeInterfaceDescriptor(
void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rdx };
descriptor->register_param_count_ = 1;
@@ -145,21 +147,25 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
}
-void KeyedArrayCallStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
+void StringLengthStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { rcx };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { rax, rcx };
+ descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
- descriptor->continuation_type_ = TAIL_CALL_CONTINUATION;
- descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(KeyedCallIC_MissFromStubFailure);
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStringLengthStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdx, rax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
}
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rdx, rcx, rax };
descriptor->register_param_count_ = 3;
@@ -170,7 +176,6 @@ void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
void TransitionElementsKindStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rax, rbx };
descriptor->register_param_count_ = 2;
@@ -180,26 +185,13 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor(
}
-void BinaryOpICStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { rdx, rax };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
-}
-
-
static void InitializeArrayConstructorDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor,
int constant_stack_parameter_count) {
// register state
// rax -- number of arguments
// rdi -- function
- // rbx -- type info cell with elements kind
+ // rbx -- allocation site with elements kind
static Register registers_variable_args[] = { rdi, rbx, rax };
static Register registers_no_args[] = { rdi, rbx };
@@ -211,18 +203,22 @@ static void InitializeArrayConstructorDescriptor(
descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
descriptor->stack_parameter_count_ = rax;
descriptor->register_param_count_ = 3;
+ static Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Integer32() };
+ descriptor->register_param_representations_ = representations;
descriptor->register_params_ = registers_variable_args;
}
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
}
static void InitializeInternalArrayConstructorDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor,
int constant_stack_parameter_count) {
// register state
@@ -240,59 +236,56 @@ static void InitializeInternalArrayConstructorDescriptor(
descriptor->stack_parameter_count_ = rax;
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers_variable_args;
+ static Representation representations[] = {
+ Representation::Tagged(),
+ Representation::Integer32() };
+ descriptor->register_param_representations_ = representations;
}
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
}
void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
+ InitializeArrayConstructorDescriptor(descriptor, 0);
}
void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
+ InitializeArrayConstructorDescriptor(descriptor, 1);
}
void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
+ InitializeArrayConstructorDescriptor(descriptor, -1);
}
void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
+ InitializeInternalArrayConstructorDescriptor(descriptor, 0);
}
void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
+ InitializeInternalArrayConstructorDescriptor(descriptor, 1);
}
void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
+ InitializeInternalArrayConstructorDescriptor(descriptor, -1);
}
void CompareNilICStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rax };
descriptor->register_param_count_ = 1;
@@ -300,12 +293,11 @@ void CompareNilICStub::InitializeInterfaceDescriptor(
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(CompareNilIC_Miss);
descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
+ ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
}
void ToBooleanStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rax };
descriptor->register_param_count_ = 1;
@@ -313,12 +305,11 @@ void ToBooleanStub::InitializeInterfaceDescriptor(
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(ToBooleanIC_Miss);
descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
+ ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
}
void StoreGlobalStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rdx, rcx, rax };
descriptor->register_param_count_ = 3;
@@ -329,7 +320,6 @@ void StoreGlobalStub::InitializeInterfaceDescriptor(
void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rax, rbx, rcx, rdx };
descriptor->register_param_count_ = 4;
@@ -339,14 +329,118 @@ void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
}
-void NewStringAddStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
+void BinaryOpICStub::InitializeInterfaceDescriptor(
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rdx, rax };
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
+}
+
+
+void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rcx, rdx, rax };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kStringAdd)->entry;
+ FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
+}
+
+
+void StringAddStub::InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdx, rax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
+}
+
+
+void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
+ static Register registers[] = { rdi, // JSFunction
+ rsi, // context
+ rax, // actual number of arguments
+ rbx, // expected number of arguments
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // JSFunction
+ Representation::Tagged(), // context
+ Representation::Integer32(), // actual number of arguments
+ Representation::Integer32(), // expected number of arguments
+ };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::KeyedCall);
+ static Register registers[] = { rsi, // context
+ rcx, // key
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // key
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::NamedCall);
+ static Register registers[] = { rsi, // context
+ rcx, // name
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // name
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::CallHandler);
+ static Register registers[] = { rsi, // context
+ rdx, // receiver
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // receiver
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ApiFunctionCall);
+ static Register registers[] = { rax, // callee
+ rbx, // call_data
+ rcx, // holder
+ rdx, // api_function_address
+ rsi, // context
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ Representation::Tagged(), // context
+ };
+ descriptor->register_param_count_ = 5;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ }
}
@@ -355,10 +449,9 @@ void NewStringAddStub::InitializeInterfaceDescriptor(
void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
// Update the static counter each time a new code stub is generated.
- Isolate* isolate = masm->isolate();
- isolate->counters()->code_stubs()->Increment();
+ isolate()->counters()->code_stubs()->Increment();
- CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
+ CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
int param_count = descriptor->register_param_count_;
{
// Call the runtime system in a fresh internal frame.
@@ -367,7 +460,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
rax.is(descriptor->register_params_[param_count - 1]));
// Push arguments
for (int i = 0; i < param_count; ++i) {
- __ push(descriptor->register_params_[i]);
+ __ Push(descriptor->register_params_[i]);
}
ExternalReference miss = descriptor->miss_handler();
__ CallExternalReference(miss, descriptor->register_param_count_);
@@ -377,121 +470,16 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ Allocate((length * kPointerSize) + FixedArray::kHeaderSize,
- rax, rbx, rcx, &gc, TAG_OBJECT);
-
- // Get the function from the stack.
- StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rcx, args.GetArgumentOperand(0));
-
- // Set up the object header.
- __ LoadRoot(kScratchRegister, Heap::kFunctionContextMapRootIndex);
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
- __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
-
- // Set up the fixed slots.
- __ Set(rbx, 0); // Set to NULL.
- __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
- __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rsi);
- __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
-
- // Copy the global object from the previous context.
- __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)), rbx);
-
- // Initialize the rest of the slots to undefined.
- __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ movq(Operand(rax, Context::SlotOffset(i)), rbx);
- }
-
- // Return and remove the on-stack parameter.
- __ movq(rsi, rax);
- __ ret(1 * kPointerSize);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
-}
-
-
-void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [rsp + (1 * kPointerSize)] : function
- // [rsp + (2 * kPointerSize)] : serialized scope info
-
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ Allocate(FixedArray::SizeFor(length),
- rax, rbx, rcx, &gc, TAG_OBJECT);
-
- // Get the function from the stack.
- StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rcx, args.GetArgumentOperand(1));
- // Get the serialized scope info from the stack.
- __ movq(rbx, args.GetArgumentOperand(0));
-
- // Set up the object header.
- __ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex);
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
- __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
-
- // If this block context is nested in the native context we get a smi
- // sentinel instead of a function. The block context should get the
- // canonical empty function of the native context as its closure which
- // we still have to look up.
- Label after_sentinel;
- __ JumpIfNotSmi(rcx, &after_sentinel, Label::kNear);
- if (FLAG_debug_code) {
- __ cmpq(rcx, Immediate(0));
- __ Assert(equal, kExpected0AsASmiSentinel);
- }
- __ movq(rcx, GlobalObjectOperand());
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
- __ movq(rcx, ContextOperand(rcx, Context::CLOSURE_INDEX));
- __ bind(&after_sentinel);
-
- // Set up the fixed slots.
- __ movq(ContextOperand(rax, Context::CLOSURE_INDEX), rcx);
- __ movq(ContextOperand(rax, Context::PREVIOUS_INDEX), rsi);
- __ movq(ContextOperand(rax, Context::EXTENSION_INDEX), rbx);
-
- // Copy the global object from the previous context.
- __ movq(rbx, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
- __ movq(ContextOperand(rax, Context::GLOBAL_OBJECT_INDEX), rbx);
-
- // Initialize the rest of the slots to the hole value.
- __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
- for (int i = 0; i < slots_; i++) {
- __ movq(ContextOperand(rax, i + Context::MIN_CONTEXT_SLOTS), rbx);
- }
-
- // Return and remove the on-stack parameter.
- __ movq(rsi, rax);
- __ ret(2 * kPointerSize);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
-}
-
-
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
__ PushCallerSaved(save_doubles_);
const int argument_count = 1;
__ PrepareCallCFunction(argument_count);
__ LoadAddress(arg_reg_1,
- ExternalReference::isolate_address(masm->isolate()));
+ ExternalReference::isolate_address(isolate()));
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(
- ExternalReference::store_buffer_overflow_function(masm->isolate()),
+ ExternalReference::store_buffer_overflow_function(isolate()),
argument_count);
__ PopCallerSaved(save_doubles_);
__ ret(0);
@@ -523,7 +511,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
int double_offset = offset();
// Account for return address and saved regs if input is rsp.
- if (input_reg.is(rsp)) double_offset += 3 * kPointerSize;
+ if (input_reg.is(rsp)) double_offset += 3 * kRegisterSize;
MemOperand mantissa_operand(MemOperand(input_reg, double_offset));
MemOperand exponent_operand(MemOperand(input_reg,
@@ -543,14 +531,14 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// is the return register, then save the temp register we use in its stead
// for the result.
Register save_reg = final_result_reg.is(rcx) ? rax : rcx;
- __ push(scratch1);
- __ push(save_reg);
+ __ pushq(scratch1);
+ __ pushq(save_reg);
bool stash_exponent_copy = !input_reg.is(rsp);
__ movl(scratch1, mantissa_operand);
__ movsd(xmm0, mantissa_operand);
__ movl(rcx, exponent_operand);
- if (stash_exponent_copy) __ push(rcx);
+ if (stash_exponent_copy) __ pushq(rcx);
__ andl(rcx, Immediate(HeapNumber::kExponentMask));
__ shrl(rcx, Immediate(HeapNumber::kExponentShift));
@@ -585,338 +573,32 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// Restore registers
__ bind(&done);
if (stash_exponent_copy) {
- __ addq(rsp, Immediate(kDoubleSize));
+ __ addp(rsp, Immediate(kDoubleSize));
}
if (!final_result_reg.is(result_reg)) {
ASSERT(final_result_reg.is(rcx));
__ movl(final_result_reg, result_reg);
}
- __ pop(save_reg);
- __ pop(scratch1);
+ __ popq(save_reg);
+ __ popq(scratch1);
__ ret(0);
}
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // TAGGED case:
- // Input:
- // rsp[8] : argument (should be number).
- // rsp[0] : return address.
- // Output:
- // rax: tagged double result.
- // UNTAGGED case:
- // Input::
- // rsp[0] : return address.
- // xmm1 : untagged double input argument
- // Output:
- // xmm1 : untagged double result.
-
- Label runtime_call;
- Label runtime_call_clear_stack;
- Label skip_cache;
- const bool tagged = (argument_type_ == TAGGED);
- if (tagged) {
- Label input_not_smi, loaded;
-
- // Test that rax is a number.
- StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rax, args.GetArgumentOperand(0));
- __ JumpIfNotSmi(rax, &input_not_smi, Label::kNear);
- // Input is a smi. Untag and load it onto the FPU stack.
- // Then load the bits of the double into rbx.
- __ SmiToInteger32(rax, rax);
- __ subq(rsp, Immediate(kDoubleSize));
- __ Cvtlsi2sd(xmm1, rax);
- __ movsd(Operand(rsp, 0), xmm1);
- __ movq(rbx, xmm1);
- __ movq(rdx, xmm1);
- __ fld_d(Operand(rsp, 0));
- __ addq(rsp, Immediate(kDoubleSize));
- __ jmp(&loaded, Label::kNear);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ LoadRoot(rbx, Heap::kHeapNumberMapRootIndex);
- __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ j(not_equal, &runtime_call);
- // Input is a HeapNumber. Push it on the FPU stack and load its
- // bits into rbx.
- __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
- __ MoveDouble(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rdx, rbx);
-
- __ bind(&loaded);
- } else { // UNTAGGED.
- __ movq(rbx, xmm1);
- __ movq(rdx, xmm1);
- }
-
- // ST[0] == double value, if TAGGED.
- // rbx = bits of double value.
- // rdx = also bits of double value.
- // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
- // h = h0 = bits ^ (bits >> 32);
- // h ^= h >> 16;
- // h ^= h >> 8;
- // h = h & (cacheSize - 1);
- // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1)
- __ sar(rdx, Immediate(32));
- __ xorl(rdx, rbx);
- __ movl(rcx, rdx);
- __ movl(rax, rdx);
- __ movl(rdi, rdx);
- __ sarl(rdx, Immediate(8));
- __ sarl(rcx, Immediate(16));
- __ sarl(rax, Immediate(24));
- __ xorl(rcx, rdx);
- __ xorl(rax, rdi);
- __ xorl(rcx, rax);
- ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ andl(rcx, Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
-
- // ST[0] == double value.
- // rbx = bits of double value.
- // rcx = TranscendentalCache::hash(double value).
- ExternalReference cache_array =
- ExternalReference::transcendental_cache_array_address(masm->isolate());
- __ Move(rax, cache_array);
- int cache_array_index =
- type_ * sizeof(masm->isolate()->transcendental_cache()->caches_[0]);
- __ movq(rax, Operand(rax, cache_array_index));
- // rax points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ testq(rax, rax);
- __ j(zero, &runtime_call_clear_stack); // Only clears stack if TAGGED.
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { // NOLINT - doesn't like a single brace on a line.
- TranscendentalCache::SubCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- // Two uint_32's and a pointer per element.
- CHECK_EQ(2 * kIntSize + 1 * kPointerSize,
- static_cast<int>(elem2_start - elem_start));
- CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start));
- CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start));
- CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start));
- }
-#endif
- // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16].
- __ addl(rcx, rcx);
- __ lea(rcx, Operand(rax, rcx, times_8, 0));
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- Label cache_miss;
- __ cmpq(rbx, Operand(rcx, 0));
- __ j(not_equal, &cache_miss, Label::kNear);
- // Cache hit!
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->transcendental_cache_hit(), 1);
- __ movq(rax, Operand(rcx, 2 * kIntSize));
- if (tagged) {
- __ fstp(0); // Clear FPU stack.
- __ ret(kPointerSize);
- } else { // UNTAGGED.
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ Ret();
- }
-
- __ bind(&cache_miss);
- __ IncrementCounter(counters->transcendental_cache_miss(), 1);
- // Update cache with new value.
- if (tagged) {
- __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
- } else { // UNTAGGED.
- __ AllocateHeapNumber(rax, rdi, &skip_cache);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
- __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
- }
- GenerateOperation(masm, type_);
- __ movq(Operand(rcx, 0), rbx);
- __ movq(Operand(rcx, 2 * kIntSize), rax);
- __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
- if (tagged) {
- __ ret(kPointerSize);
- } else { // UNTAGGED.
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ Ret();
-
- // Skip cache and return answer directly, only in untagged case.
- __ bind(&skip_cache);
- __ subq(rsp, Immediate(kDoubleSize));
- __ movsd(Operand(rsp, 0), xmm1);
- __ fld_d(Operand(rsp, 0));
- GenerateOperation(masm, type_);
- __ fstp_d(Operand(rsp, 0));
- __ movsd(xmm1, Operand(rsp, 0));
- __ addq(rsp, Immediate(kDoubleSize));
- // We return the value in xmm1 without adding it to the cache, but
- // we cause a scavenging GC so that future allocations will succeed.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Allocate an unused object bigger than a HeapNumber.
- __ Push(Smi::FromInt(2 * kDoubleSize));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- }
- __ Ret();
- }
-
- // Call runtime, doing whatever allocation and cleanup is necessary.
- if (tagged) {
- __ bind(&runtime_call_clear_stack);
- __ fstp(0);
- __ bind(&runtime_call);
- __ TailCallExternalReference(
- ExternalReference(RuntimeFunction(), masm->isolate()), 1, 1);
- } else { // UNTAGGED.
- __ bind(&runtime_call_clear_stack);
- __ bind(&runtime_call);
- __ AllocateHeapNumber(rax, rdi, &skip_cache);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rax);
- __ CallRuntime(RuntimeFunction(), 1);
- }
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ Ret();
- }
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- // Add more cases when necessary.
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- case TranscendentalCache::TAN: return Runtime::kMath_tan;
- case TranscendentalCache::LOG: return Runtime::kMath_log;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
-void TranscendentalCacheStub::GenerateOperation(
- MacroAssembler* masm, TranscendentalCache::Type type) {
- // Registers:
- // rax: Newly allocated HeapNumber, which must be preserved.
- // rbx: Bits of input double. Must be preserved.
- // rcx: Pointer to cache entry. Must be preserved.
- // st(0): Input double
- Label done;
- if (type == TranscendentalCache::SIN ||
- type == TranscendentalCache::COS ||
- type == TranscendentalCache::TAN) {
- // Both fsin and fcos require arguments in the range +/-2^63 and
- // return NaN for infinities and NaN. They can share all code except
- // the actual fsin/fcos operation.
- Label in_range;
- // If argument is outside the range -2^63..2^63, fsin/cos doesn't
- // work. We must reduce it to the appropriate range.
- __ movq(rdi, rbx);
- // Move exponent and sign bits to low bits.
- __ shr(rdi, Immediate(HeapNumber::kMantissaBits));
- // Remove sign bit.
- __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1));
- int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
- __ cmpl(rdi, Immediate(supported_exponent_limit));
- __ j(below, &in_range);
- // Check for infinity and NaN. Both return NaN for sin.
- __ cmpl(rdi, Immediate(0x7ff));
- Label non_nan_result;
- __ j(not_equal, &non_nan_result, Label::kNear);
- // Input is +/-Infinity or NaN. Result is NaN.
- __ fstp(0);
- // NaN is represented by 0x7ff8000000000000.
- __ subq(rsp, Immediate(kPointerSize));
- __ movl(Operand(rsp, 4), Immediate(0x7ff80000));
- __ movl(Operand(rsp, 0), Immediate(0x00000000));
- __ fld_d(Operand(rsp, 0));
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(&done);
-
- __ bind(&non_nan_result);
-
- // Use fpmod to restrict argument to the range +/-2*PI.
- __ movq(rdi, rax); // Save rax before using fnstsw_ax.
- __ fldpi();
- __ fadd(0);
- __ fld(1);
- // FPU Stack: input, 2*pi, input.
- {
- Label no_exceptions;
- __ fwait();
- __ fnstsw_ax();
- // Clear if Illegal Operand or Zero Division exceptions are set.
- __ testl(rax, Immediate(5)); // #IO and #ZD flags of FPU status word.
- __ j(zero, &no_exceptions);
- __ fnclex();
- __ bind(&no_exceptions);
- }
-
- // Compute st(0) % st(1)
- {
- Label partial_remainder_loop;
- __ bind(&partial_remainder_loop);
- __ fprem1();
- __ fwait();
- __ fnstsw_ax();
- __ testl(rax, Immediate(0x400)); // Check C2 bit of FPU status word.
- // If C2 is set, computation only has partial result. Loop to
- // continue computation.
- __ j(not_zero, &partial_remainder_loop);
- }
- // FPU Stack: input, 2*pi, input % 2*pi
- __ fstp(2);
- // FPU Stack: input % 2*pi, 2*pi,
- __ fstp(0);
- // FPU Stack: input % 2*pi
- __ movq(rax, rdi); // Restore rax, pointer to the new HeapNumber.
- __ bind(&in_range);
- switch (type) {
- case TranscendentalCache::SIN:
- __ fsin();
- break;
- case TranscendentalCache::COS:
- __ fcos();
- break;
- case TranscendentalCache::TAN:
- // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
- // FP register stack.
- __ fptan();
- __ fstp(0); // Pop FP register stack.
- break;
- default:
- UNREACHABLE();
- }
- __ bind(&done);
- } else {
- ASSERT(type == TranscendentalCache::LOG);
- __ fldln2();
- __ fxch();
- __ fyl2x();
- }
-}
-
-
void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
Label* not_numbers) {
Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
// Load operand in rdx into xmm0, or branch to not_numbers.
__ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
__ JumpIfSmi(rdx, &load_smi_rdx);
- __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
+ __ cmpp(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
__ j(not_equal, not_numbers); // Argument in rdx is not a number.
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
// Load operand in rax into xmm1, or branch to not_numbers.
__ JumpIfSmi(rax, &load_smi_rax);
__ bind(&load_nonsmi_rax);
- __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
+ __ cmpp(FieldOperand(rax, HeapObject::kMapOffset), rcx);
__ j(not_equal, not_numbers);
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ jmp(&done);
@@ -945,7 +627,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Label call_runtime, done, exponent_not_smi, int_exponent;
// Save 1 in double_result - we need this several times later on.
- __ movq(scratch, Immediate(1));
+ __ movp(scratch, Immediate(1));
__ Cvtlsi2sd(double_result, scratch);
if (exponent_type_ == ON_STACK) {
@@ -954,8 +636,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// This can only happen if the stub is called from non-optimized code.
// Load input parameters from stack.
StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(base, args.GetArgumentOperand(0));
- __ movq(exponent, args.GetArgumentOperand(1));
+ __ movp(base, args.GetArgumentOperand(0));
+ __ movp(exponent, args.GetArgumentOperand(1));
__ JumpIfSmi(base, &base_is_smi, Label::kNear);
__ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
@@ -997,8 +679,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&try_arithmetic_simplification);
__ cvttsd2si(exponent, double_exponent);
// Skip to runtime if possibly NaN (indicated by the indefinite integer).
- __ cmpl(exponent, Immediate(0x80000000u));
- __ j(equal, &call_runtime);
+ __ cmpl(exponent, Immediate(0x1));
+ __ j(overflow, &call_runtime);
if (exponent_type_ == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
@@ -1075,7 +757,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&fast_power);
__ fnclex(); // Clear flags to catch exceptions later.
// Transfer (B)ase and (E)xponent onto the FPU register stack.
- __ subq(rsp, Immediate(kDoubleSize));
+ __ subp(rsp, Immediate(kDoubleSize));
__ movsd(Operand(rsp, 0), double_exponent);
__ fld_d(Operand(rsp, 0)); // E
__ movsd(Operand(rsp, 0), double_base);
@@ -1102,12 +784,12 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ j(not_zero, &fast_power_failed, Label::kNear);
__ fstp_d(Operand(rsp, 0));
__ movsd(double_result, Operand(rsp, 0));
- __ addq(rsp, Immediate(kDoubleSize));
+ __ addp(rsp, Immediate(kDoubleSize));
__ jmp(&done);
__ bind(&fast_power_failed);
__ fninit();
- __ addq(rsp, Immediate(kDoubleSize));
+ __ addp(rsp, Immediate(kDoubleSize));
__ jmp(&call_runtime);
}
@@ -1115,7 +797,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&int_exponent);
const XMMRegister double_scratch2 = double_exponent;
// Back up exponent as we need to check if exponent is negative later.
- __ movq(scratch, exponent); // Back up exponent.
+ __ movp(scratch, exponent); // Back up exponent.
__ movsd(double_scratch, double_base); // Back up base.
__ movsd(double_scratch2, double_result); // Load double_exponent with 1.
@@ -1158,11 +840,11 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ Cvtlsi2sd(double_exponent, exponent);
// Returning or bailing out.
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
if (exponent_type_ == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenMathPow, 2, 1);
// The stub is called from non-optimized code, which expects the result
// as heap number in rax.
@@ -1180,7 +862,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(2);
__ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()), 2);
+ ExternalReference::power_double_double_function(isolate()), 2);
}
// Return value is in xmm0.
__ movsd(double_result, xmm0);
@@ -1201,7 +883,7 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- __ Cmp(rax, masm->isolate()->factory()->prototype_string());
+ __ Cmp(rax, isolate()->factory()->prototype_string());
__ j(not_equal, &miss);
receiver = rdx;
} else {
@@ -1221,99 +903,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
-void StringLengthStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- __ Cmp(rax, masm->isolate()->factory()->length_string());
- __ j(not_equal, &miss);
- receiver = rdx;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- receiver = rax;
- }
-
- StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss);
- __ bind(&miss);
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
-void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- //
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
-
- Label miss;
-
- Register receiver = rdx;
- Register value = rax;
- Register scratch = rbx;
- if (kind() == Code::KEYED_STORE_IC) {
- __ Cmp(rcx, masm->isolate()->factory()->length_string());
- __ j(not_equal, &miss);
- }
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ movq(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
- __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ movq(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
- __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(equal, &miss);
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ PopReturnAddressTo(scratch);
- __ push(receiver);
- __ push(value);
- __ PushReturnAddressFrom(scratch);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The key is in rdx and the parameter count is in rax.
@@ -1326,7 +915,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// Smi instead of the context. We can't use SmiCompare here, because that
// only works for comparing two smis.
Label adaptor;
- __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adaptor);
@@ -1334,22 +923,22 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// Check index against formal parameters count limit passed in
// through register rax. Use unsigned comparison to get negative
// check for free.
- __ cmpq(rdx, rax);
+ __ cmpp(rdx, rax);
__ j(above_equal, &slow);
// Read the argument from the stack and return it.
__ SmiSub(rax, rax, rdx);
__ SmiToInteger32(rax, rax);
StackArgumentsAccessor args(rbp, rax, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rax, args.GetArgumentOperand(0));
+ __ movp(rax, args.GetArgumentOperand(0));
__ Ret();
// Arguments adaptor case: Check index against actual arguments
// limit found in the arguments adaptor frame. Use unsigned
// comparison to get negative check for free.
__ bind(&adaptor);
- __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmpq(rdx, rcx);
+ __ movp(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ cmpp(rdx, rcx);
__ j(above_equal, &slow);
// Read the argument from the stack and return it.
@@ -1357,20 +946,20 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ SmiToInteger32(rcx, rcx);
StackArgumentsAccessor adaptor_args(rbx, rcx,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rax, adaptor_args.GetArgumentOperand(0));
+ __ movp(rax, adaptor_args.GetArgumentOperand(0));
__ Ret();
// Slow-case: Handle non-smi or out-of-bounds access to arguments
// by calling the runtime system.
__ bind(&slow);
__ PopReturnAddressTo(rbx);
- __ push(rdx);
+ __ Push(rdx);
__ PushReturnAddressFrom(rbx);
__ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
}
-void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Stack layout:
// rsp[0] : return address
// rsp[8] : number of parameters (tagged)
@@ -1380,7 +969,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// rbx: the mapped parameter count (untagged)
// rax: the allocated object (tagged).
- Factory* factory = masm->isolate()->factory();
+ Factory* factory = isolate()->factory();
StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
__ SmiToInteger64(rbx, args.GetArgumentOperand(2));
@@ -1389,13 +978,13 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
Label adaptor_frame, try_allocate;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
__ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adaptor_frame);
// No adaptor, parameter count = argument count.
- __ movq(rcx, rbx);
+ __ movp(rcx, rbx);
__ jmp(&try_allocate, Label::kNear);
// We have an adaptor frame. Patch the parameters pointer.
@@ -1403,16 +992,16 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ SmiToInteger64(rcx,
Operand(rdx,
ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
+ __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
- __ movq(args.GetArgumentOperand(1), rdx);
+ __ movp(args.GetArgumentOperand(1), rdx);
// rbx = parameter count (untagged)
// rcx = argument count (untagged)
// Compute the mapped parameter count = min(rbx, rcx) in rbx.
- __ cmpq(rbx, rcx);
+ __ cmpp(rbx, rcx);
__ j(less_equal, &try_allocate, Label::kNear);
- __ movq(rbx, rcx);
+ __ movp(rbx, rcx);
__ bind(&try_allocate);
@@ -1421,17 +1010,17 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
const int kParameterMapHeaderSize =
FixedArray::kHeaderSize + 2 * kPointerSize;
Label no_parameter_map;
- __ xor_(r8, r8);
- __ testq(rbx, rbx);
+ __ xorp(r8, r8);
+ __ testp(rbx, rbx);
__ j(zero, &no_parameter_map, Label::kNear);
- __ lea(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
+ __ leap(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
__ bind(&no_parameter_map);
// 2. Backing store.
- __ lea(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize));
+ __ leap(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize));
// 3. Arguments object.
- __ addq(r8, Immediate(Heap::kArgumentsObjectSize));
+ __ addp(r8, Immediate(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
__ Allocate(r8, rax, rdx, rdi, &runtime, TAG_OBJECT);
@@ -1440,18 +1029,18 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// rcx = argument count (untagged)
// Get the arguments boilerplate from the current native context into rdi.
Label has_mapped_parameters, copy;
- __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
- __ testq(rbx, rbx);
+ __ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
+ __ testp(rbx, rbx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
- const int kIndex = Context::ARGUMENTS_BOILERPLATE_INDEX;
- __ movq(rdi, Operand(rdi, Context::SlotOffset(kIndex)));
+ const int kIndex = Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX;
+ __ movp(rdi, Operand(rdi, Context::SlotOffset(kIndex)));
__ jmp(&copy, Label::kNear);
const int kAliasedIndex = Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX;
__ bind(&has_mapped_parameters);
- __ movq(rdi, Operand(rdi, Context::SlotOffset(kAliasedIndex)));
+ __ movp(rdi, Operand(rdi, Context::SlotOffset(kAliasedIndex)));
__ bind(&copy);
// rax = address of new object (tagged)
@@ -1460,14 +1049,14 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// rdi = address of boilerplate object (tagged)
// Copy the JS object part.
for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
- __ movq(rdx, FieldOperand(rdi, i));
- __ movq(FieldOperand(rax, i), rdx);
+ __ movp(rdx, FieldOperand(rdi, i));
+ __ movp(FieldOperand(rax, i), rdx);
}
// Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ movq(rdx, args.GetArgumentOperand(0));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize +
+ __ movp(rdx, args.GetArgumentOperand(0));
+ __ movp(FieldOperand(rax, JSObject::kHeaderSize +
Heap::kArgumentsCalleeIndex * kPointerSize),
rdx);
@@ -1475,15 +1064,15 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Note: rcx is tagged from here on.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
__ Integer32ToSmi(rcx, rcx);
- __ movq(FieldOperand(rax, JSObject::kHeaderSize +
+ __ movp(FieldOperand(rax, JSObject::kHeaderSize +
Heap::kArgumentsLengthIndex * kPointerSize),
rcx);
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, edi will point there, otherwise to the
// backing store.
- __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
+ __ leap(rdi, Operand(rax, Heap::kSloppyArgumentsObjectSize));
+ __ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
// rax = address of new object (tagged)
// rbx = mapped parameter count (untagged)
@@ -1492,17 +1081,17 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Initialize parameter map. If there are no mapped arguments, we're done.
Label skip_parameter_map;
- __ testq(rbx, rbx);
+ __ testp(rbx, rbx);
__ j(zero, &skip_parameter_map);
- __ LoadRoot(kScratchRegister, Heap::kNonStrictArgumentsElementsMapRootIndex);
+ __ LoadRoot(kScratchRegister, Heap::kSloppyArgumentsElementsMapRootIndex);
// rbx contains the untagged argument count. Add 2 and tag to write.
- __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
+ __ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
__ Integer64PlusConstantToSmi(r9, rbx, 2);
- __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
- __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
- __ lea(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
- __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
+ __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
+ __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
+ __ leap(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
+ __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
// Copy the parameter slots and the holes in the arguments.
// We need to fill in mapped_parameter_count slots. They index the context,
@@ -1517,11 +1106,11 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Load tagged parameter count into r9.
__ Integer32ToSmi(r9, rbx);
__ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
- __ addq(r8, args.GetArgumentOperand(2));
- __ subq(r8, r9);
+ __ addp(r8, args.GetArgumentOperand(2));
+ __ subp(r8, r9);
__ Move(r11, factory->the_hole_value());
- __ movq(rdx, rdi);
- __ lea(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
+ __ movp(rdx, rdi);
+ __ leap(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
// r9 = loop variable (tagged)
// r8 = mapping index (tagged)
// r11 = the hole value
@@ -1532,11 +1121,11 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ bind(&parameters_loop);
__ SmiSubConstant(r9, r9, Smi::FromInt(1));
__ SmiToInteger64(kScratchRegister, r9);
- __ movq(FieldOperand(rdx, kScratchRegister,
+ __ movp(FieldOperand(rdx, kScratchRegister,
times_pointer_size,
kParameterMapHeaderSize),
r8);
- __ movq(FieldOperand(rdi, kScratchRegister,
+ __ movp(FieldOperand(rdi, kScratchRegister,
times_pointer_size,
FixedArray::kHeaderSize),
r11);
@@ -1552,28 +1141,28 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Copy arguments header and remaining slots (if there are any).
__ Move(FieldOperand(rdi, FixedArray::kMapOffset),
factory->fixed_array_map());
- __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
+ __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
Label arguments_loop, arguments_test;
- __ movq(r8, rbx);
- __ movq(rdx, args.GetArgumentOperand(1));
+ __ movp(r8, rbx);
+ __ movp(rdx, args.GetArgumentOperand(1));
// Untag rcx for the loop below.
__ SmiToInteger64(rcx, rcx);
- __ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
- __ subq(rdx, kScratchRegister);
+ __ leap(kScratchRegister, Operand(r8, times_pointer_size, 0));
+ __ subp(rdx, kScratchRegister);
__ jmp(&arguments_test, Label::kNear);
__ bind(&arguments_loop);
- __ subq(rdx, Immediate(kPointerSize));
- __ movq(r9, Operand(rdx, 0));
- __ movq(FieldOperand(rdi, r8,
+ __ subp(rdx, Immediate(kPointerSize));
+ __ movp(r9, Operand(rdx, 0));
+ __ movp(FieldOperand(rdi, r8,
times_pointer_size,
FixedArray::kHeaderSize),
r9);
- __ addq(r8, Immediate(1));
+ __ addp(r8, Immediate(1));
__ bind(&arguments_test);
- __ cmpq(r8, rcx);
+ __ cmpp(r8, rcx);
__ j(less, &arguments_loop, Label::kNear);
// Return and remove the on-stack parameters.
@@ -1583,12 +1172,12 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// rcx = argument count (untagged)
__ bind(&runtime);
__ Integer32ToSmi(rcx, rcx);
- __ movq(args.GetArgumentOperand(2), rcx); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ movp(args.GetArgumentOperand(2), rcx); // Patch argument count.
+ __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
}
-void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// rsp[0] : return address
// rsp[8] : number of parameters
// rsp[16] : receiver displacement
@@ -1596,22 +1185,22 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
__ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(not_equal, &runtime);
// Patch the arguments.length and the parameters pointer.
StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ movq(args.GetArgumentOperand(2), rcx);
+ __ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movp(args.GetArgumentOperand(2), rcx);
__ SmiToInteger64(rcx, rcx);
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
+ __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
- __ movq(args.GetArgumentOperand(1), rdx);
+ __ movp(args.GetArgumentOperand(1), rdx);
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
}
@@ -1623,87 +1212,87 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
__ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adaptor_frame);
// Get the length from the frame.
StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rcx, args.GetArgumentOperand(2));
+ __ movp(rcx, args.GetArgumentOperand(2));
__ SmiToInteger64(rcx, rcx);
__ jmp(&try_allocate);
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
- __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ movq(args.GetArgumentOperand(2), rcx);
+ __ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movp(args.GetArgumentOperand(2), rcx);
__ SmiToInteger64(rcx, rcx);
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
+ __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
- __ movq(args.GetArgumentOperand(1), rdx);
+ __ movp(args.GetArgumentOperand(1), rdx);
// Try the new space allocation. Start out with computing the size of
// the arguments object and the elements array.
Label add_arguments_object;
__ bind(&try_allocate);
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(zero, &add_arguments_object, Label::kNear);
- __ lea(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
+ __ leap(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
__ bind(&add_arguments_object);
- __ addq(rcx, Immediate(Heap::kArgumentsObjectSizeStrict));
+ __ addp(rcx, Immediate(Heap::kStrictArgumentsObjectSize));
// Do the allocation of both objects in one go.
__ Allocate(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
// Get the arguments boilerplate from the current native context.
- __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
+ __ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
const int offset =
- Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
- __ movq(rdi, Operand(rdi, offset));
+ Context::SlotOffset(Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX);
+ __ movp(rdi, Operand(rdi, offset));
// Copy the JS object part.
for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
- __ movq(rbx, FieldOperand(rdi, i));
- __ movq(FieldOperand(rax, i), rbx);
+ __ movp(rbx, FieldOperand(rdi, i));
+ __ movp(FieldOperand(rax, i), rbx);
}
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ movq(rcx, args.GetArgumentOperand(2));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize +
+ __ movp(rcx, args.GetArgumentOperand(2));
+ __ movp(FieldOperand(rax, JSObject::kHeaderSize +
Heap::kArgumentsLengthIndex * kPointerSize),
rcx);
// If there are no actual arguments, we're done.
Label done;
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(zero, &done);
// Get the parameters pointer from the stack.
- __ movq(rdx, args.GetArgumentOperand(1));
+ __ movp(rdx, args.GetArgumentOperand(1));
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSizeStrict));
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
+ __ leap(rdi, Operand(rax, Heap::kStrictArgumentsObjectSize));
+ __ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
__ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
- __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
+ __ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
- __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
+ __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
// Untag the length for the loop below.
__ SmiToInteger64(rcx, rcx);
// Copy the fixed array slots.
Label loop;
__ bind(&loop);
- __ movq(rbx, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
- __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
- __ addq(rdi, Immediate(kPointerSize));
- __ subq(rdx, Immediate(kPointerSize));
- __ decq(rcx);
+ __ movp(rbx, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
+ __ movp(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
+ __ addp(rdi, Immediate(kPointerSize));
+ __ subp(rdx, Immediate(kPointerSize));
+ __ decp(rcx);
__ j(not_zero, &loop);
// Return and remove the on-stack parameters.
@@ -1712,7 +1301,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewStrictArguments, 3, 1);
}
@@ -1721,7 +1310,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -1743,23 +1332,22 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
ARGUMENTS_DONT_CONTAIN_RECEIVER);
Label runtime;
// Ensure that a RegExp stack is allocated.
- Isolate* isolate = masm->isolate();
ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(isolate);
+ ExternalReference::address_of_regexp_stack_memory_address(isolate());
ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate);
+ ExternalReference::address_of_regexp_stack_memory_size(isolate());
__ Load(kScratchRegister, address_of_regexp_stack_memory_size);
- __ testq(kScratchRegister, kScratchRegister);
+ __ testp(kScratchRegister, kScratchRegister);
__ j(zero, &runtime);
// Check that the first argument is a JSRegExp object.
- __ movq(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
+ __ movp(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
__ JumpIfSmi(rax, &runtime);
__ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
__ j(not_equal, &runtime);
// Check that the RegExp has been compiled (data contains a fixed array).
- __ movq(rax, FieldOperand(rax, JSRegExp::kDataOffset));
+ __ movp(rax, FieldOperand(rax, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
Condition is_smi = masm->CheckSmi(rax);
__ Check(NegateCondition(is_smi),
@@ -1786,10 +1374,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Reset offset for possibly sliced string.
__ Set(r14, 0);
- __ movq(rdi, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
+ __ movp(rdi, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
__ JumpIfSmi(rdi, &runtime);
- __ movq(r15, rdi); // Make a copy of the original subject string.
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ movp(r15, rdi); // Make a copy of the original subject string.
+ __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
// rax: RegExp data (FixedArray)
// rdi: subject string
@@ -1841,7 +1429,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
- __ cmpq(rbx, Immediate(kExternalStringTag));
+ __ cmpp(rbx, Immediate(kExternalStringTag));
__ j(greater_equal, &not_seq_nor_cons); // Go to (7).
// (4) Cons string. Check that it's flat.
@@ -1849,10 +1437,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ CompareRoot(FieldOperand(rdi, ConsString::kSecondOffset),
Heap::kempty_stringRootIndex);
__ j(not_equal, &runtime);
- __ movq(rdi, FieldOperand(rdi, ConsString::kFirstOffset));
+ __ movp(rdi, FieldOperand(rdi, ConsString::kFirstOffset));
__ bind(&check_underlying);
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
- __ movq(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+ __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
// (5a) Is subject sequential two byte? If yes, go to (9).
__ testb(rbx, Immediate(kStringRepresentationMask | kStringEncodingMask));
@@ -1861,14 +1449,14 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (5b) Is subject external? If yes, go to (8).
__ testb(rbx, Immediate(kStringRepresentationMask));
// The underlying external string is never a short external string.
- STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
- STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
__ j(not_zero, &external_string); // Go to (8)
// (6) One byte sequential. Load regexp code for one byte.
__ bind(&seq_one_byte_string);
// rax: RegExp data (FixedArray)
- __ movq(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
+ __ movp(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
__ Set(rcx, 1); // Type is one byte.
// (E) Carry on. String handling is done.
@@ -1888,7 +1476,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// We have to use r15 instead of rdi to load the length because rdi might
// have been only made to look like a sequential string when it actually
// is an external string.
- __ movq(rbx, args.GetArgumentOperand(PREVIOUS_INDEX_ARGUMENT_INDEX));
+ __ movp(rbx, args.GetArgumentOperand(PREVIOUS_INDEX_ARGUMENT_INDEX));
__ JumpIfNotSmi(rbx, &runtime);
__ SmiCompare(rbx, FieldOperand(r15, String::kLengthOffset));
__ j(above_equal, &runtime);
@@ -1899,7 +1487,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rcx: encoding of subject string (1 if ASCII 0 if two_byte);
// r11: code
// All checks done. Now push arguments for native regexp code.
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->regexp_entry_native(), 1);
// Isolates: note we add an additional parameter here (isolate pointer).
@@ -1910,37 +1498,37 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 9: Pass current isolate address.
__ LoadAddress(kScratchRegister,
- ExternalReference::isolate_address(masm->isolate()));
- __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
+ ExternalReference::isolate_address(isolate()));
+ __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kRegisterSize),
kScratchRegister);
// Argument 8: Indicate that this is a direct call from JavaScript.
- __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize),
+ __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kRegisterSize),
Immediate(1));
// Argument 7: Start (high end) of backtracking stack memory area.
__ Move(kScratchRegister, address_of_regexp_stack_memory_address);
- __ movq(r9, Operand(kScratchRegister, 0));
+ __ movp(r9, Operand(kScratchRegister, 0));
__ Move(kScratchRegister, address_of_regexp_stack_memory_size);
- __ addq(r9, Operand(kScratchRegister, 0));
- __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r9);
+ __ addp(r9, Operand(kScratchRegister, 0));
+ __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kRegisterSize), r9);
// Argument 6: Set the number of capture registers to zero to force global
// regexps to behave as non-global. This does not affect non-global regexps.
// Argument 6 is passed in r9 on Linux and on the stack on Windows.
#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize),
+ __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kRegisterSize),
Immediate(0));
#else
__ Set(r9, 0);
#endif
// Argument 5: static offsets vector buffer.
- __ LoadAddress(r8,
- ExternalReference::address_of_static_offsets_vector(isolate));
+ __ LoadAddress(
+ r8, ExternalReference::address_of_static_offsets_vector(isolate()));
// Argument 5 passed in r8 on Linux and on the stack on Windows.
#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 5) * kPointerSize), r8);
+ __ movq(Operand(rsp, (argument_slots_on_stack - 5) * kRegisterSize), r8);
#endif
// rdi: subject string
@@ -1951,31 +1539,31 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// r15: original subject string
// Argument 2: Previous index.
- __ movq(arg_reg_2, rbx);
+ __ movp(arg_reg_2, rbx);
// Argument 4: End of string data
// Argument 3: Start of string data
Label setup_two_byte, setup_rest, got_length, length_not_from_slice;
// Prepare start and end index of the input.
// Load the length from the original sliced string if that is the case.
- __ addq(rbx, r14);
+ __ addp(rbx, r14);
__ SmiToInteger32(arg_reg_3, FieldOperand(r15, String::kLengthOffset));
- __ addq(r14, arg_reg_3); // Using arg3 as scratch.
+ __ addp(r14, arg_reg_3); // Using arg3 as scratch.
// rbx: start index of the input
// r14: end index of the input
// r15: original subject string
__ testb(rcx, rcx); // Last use of rcx as encoding of subject string.
__ j(zero, &setup_two_byte, Label::kNear);
- __ lea(arg_reg_4,
+ __ leap(arg_reg_4,
FieldOperand(rdi, r14, times_1, SeqOneByteString::kHeaderSize));
- __ lea(arg_reg_3,
+ __ leap(arg_reg_3,
FieldOperand(rdi, rbx, times_1, SeqOneByteString::kHeaderSize));
__ jmp(&setup_rest, Label::kNear);
__ bind(&setup_two_byte);
- __ lea(arg_reg_4,
+ __ leap(arg_reg_4,
FieldOperand(rdi, r14, times_2, SeqTwoByteString::kHeaderSize));
- __ lea(arg_reg_3,
+ __ leap(arg_reg_3,
FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize));
__ bind(&setup_rest);
@@ -1984,10 +1572,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// use rbp, which points exactly to one pointer size below the previous rsp.
// (Because creating a new stack frame pushes the previous rbp onto the stack
// and thereby moves up rsp by one kPointerSize.)
- __ movq(arg_reg_1, r15);
+ __ movp(arg_reg_1, r15);
// Locate the code entry and call it.
- __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ addp(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(r11);
__ LeaveApiExitFrame(true);
@@ -2012,8 +1600,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Load RegExp data.
__ bind(&success);
- __ movq(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
- __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
+ __ movp(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
+ __ movp(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
__ SmiToInteger32(rax,
FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
@@ -2021,13 +1609,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rdx: Number of capture registers
// Check that the fourth object is a JSArray object.
- __ movq(r15, args.GetArgumentOperand(LAST_MATCH_INFO_ARGUMENT_INDEX));
+ __ movp(r15, args.GetArgumentOperand(LAST_MATCH_INFO_ARGUMENT_INDEX));
__ JumpIfSmi(r15, &runtime);
__ CmpObjectType(r15, JS_ARRAY_TYPE, kScratchRegister);
__ j(not_equal, &runtime);
// Check that the JSArray is in fast case.
- __ movq(rbx, FieldOperand(r15, JSArray::kElementsOffset));
- __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(r15, JSArray::kElementsOffset));
+ __ movp(rax, FieldOperand(rbx, HeapObject::kMapOffset));
__ CompareRoot(rax, Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &runtime);
// Check that the last match info has space for the capture registers and the
@@ -2042,19 +1630,19 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rdx: number of capture registers
// Store the capture count.
__ Integer32ToSmi(kScratchRegister, rdx);
- __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
+ __ movp(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
kScratchRegister);
// Store last subject and last input.
- __ movq(rax, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
- __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
- __ movq(rcx, rax);
+ __ movp(rax, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
+ __ movp(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
+ __ movp(rcx, rax);
__ RecordWriteField(rbx,
RegExpImpl::kLastSubjectOffset,
rax,
rdi,
kDontSaveFPRegs);
- __ movq(rax, rcx);
- __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
+ __ movp(rax, rcx);
+ __ movp(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
__ RecordWriteField(rbx,
RegExpImpl::kLastInputOffset,
rax,
@@ -2062,8 +1650,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
kDontSaveFPRegs);
// Get the static offsets vector filled by the native regexp code.
- __ LoadAddress(rcx,
- ExternalReference::address_of_static_offsets_vector(isolate));
+ __ LoadAddress(
+ rcx, ExternalReference::address_of_static_offsets_vector(isolate()));
// rbx: last_match_info backing store (FixedArray)
// rcx: offsets vector
@@ -2072,13 +1660,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Capture register counter starts from number of capture registers and
// counts down until wraping after zero.
__ bind(&next_capture);
- __ subq(rdx, Immediate(1));
+ __ subp(rdx, Immediate(1));
__ j(negative, &done, Label::kNear);
// Read the value from the static offsets vector buffer and make it a smi.
__ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
__ Integer32ToSmi(rdi, rdi);
// Store the smi value in the last match info.
- __ movq(FieldOperand(rbx,
+ __ movp(FieldOperand(rbx,
rdx,
times_pointer_size,
RegExpImpl::kFirstCaptureOffset),
@@ -2087,7 +1675,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&done);
// Return last match info.
- __ movq(rax, r15);
+ __ movp(rax, r15);
__ ret(REG_EXP_EXEC_ARGUMENT_COUNT * kPointerSize);
__ bind(&exception);
@@ -2096,14 +1684,14 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, isolate);
+ Isolate::kPendingExceptionAddress, isolate());
Operand pending_exception_operand =
masm->ExternalOperand(pending_exception_address, rbx);
- __ movq(rax, pending_exception_operand);
+ __ movp(rax, pending_exception_operand);
__ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ cmpq(rax, rdx);
+ __ cmpp(rax, rdx);
__ j(equal, &runtime);
- __ movq(pending_exception_operand, rdx);
+ __ movp(pending_exception_operand, rdx);
__ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
Label termination_exception;
@@ -2115,7 +1703,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
// Deferred code for string handling.
// (7) Not a long external string? If yes, go to (10).
@@ -2125,7 +1713,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (8) External string. Short external strings have been ruled out.
__ bind(&external_string);
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
if (FLAG_debug_code) {
// Assert that we do not have a cons or slice (indirect strings) here.
@@ -2133,10 +1721,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ testb(rbx, Immediate(kIsIndirectStringMask));
__ Assert(zero, kExternalStringExpectedButNotFound);
}
- __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
+ __ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ subp(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
STATIC_ASSERT(kTwoByteStringTag == 0);
// (8a) Is the external string one byte? If yes, go to (6).
__ testb(rbx, Immediate(kStringEncodingMask));
@@ -2146,7 +1734,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rax: RegExp data (FixedArray)
// (9) Two byte sequential. Load regexp code for one byte. Go to (E).
__ bind(&seq_two_byte_string);
- __ movq(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset));
+ __ movp(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset));
__ Set(rcx, 0); // Type is two byte.
__ jmp(&check_code); // Go to (E).
@@ -2160,97 +1748,12 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (11) Sliced string. Replace subject with parent. Go to (5a).
// Load offset into r14 and replace subject string with parent.
__ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
- __ movq(rdi, FieldOperand(rdi, SlicedString::kParentOffset));
+ __ movp(rdi, FieldOperand(rdi, SlicedString::kParentOffset));
__ jmp(&check_underlying);
#endif // V8_INTERPRETED_REGEXP
}
-void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
- const int kMaxInlineLength = 100;
- Label slowcase;
- Label done;
- StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(r8, args.GetArgumentOperand(0));
- __ JumpIfNotSmi(r8, &slowcase);
- __ SmiToInteger32(rbx, r8);
- __ cmpl(rbx, Immediate(kMaxInlineLength));
- __ j(above, &slowcase);
- // Smi-tagging is equivalent to multiplying by 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- // Allocate RegExpResult followed by FixedArray with size in rbx.
- // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
- // Elements: [Map][Length][..elements..]
- __ Allocate(JSRegExpResult::kSize + FixedArray::kHeaderSize,
- times_pointer_size,
- rbx, // In: Number of elements.
- rax, // Out: Start of allocation (tagged).
- rcx, // Out: End of allocation.
- rdx, // Scratch register
- &slowcase,
- TAG_OBJECT);
- // rax: Start of allocated area, object-tagged.
- // rbx: Number of array elements as int32.
- // r8: Number of array elements as smi.
-
- // Set JSArray map to global.regexp_result_map().
- __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
- __ movq(rdx, ContextOperand(rdx, Context::REGEXP_RESULT_MAP_INDEX));
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), rdx);
-
- // Set empty properties FixedArray.
- __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
- __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
-
- // Set elements to point to FixedArray allocated right after the JSArray.
- __ lea(rcx, Operand(rax, JSRegExpResult::kSize));
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
-
- // Set input, index and length fields from arguments.
- __ movq(r8, args.GetArgumentOperand(2));
- __ movq(FieldOperand(rax, JSRegExpResult::kInputOffset), r8);
- __ movq(r8, args.GetArgumentOperand(1));
- __ movq(FieldOperand(rax, JSRegExpResult::kIndexOffset), r8);
- __ movq(r8, args.GetArgumentOperand(0));
- __ movq(FieldOperand(rax, JSArray::kLengthOffset), r8);
-
- // Fill out the elements FixedArray.
- // rax: JSArray.
- // rcx: FixedArray.
- // rbx: Number of elements in array as int32.
-
- // Set map.
- __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
- __ movq(FieldOperand(rcx, HeapObject::kMapOffset), kScratchRegister);
- // Set length.
- __ Integer32ToSmi(rdx, rbx);
- __ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
- // Fill contents of fixed-array with undefined.
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
- // Fill fixed array elements with undefined.
- // rax: JSArray.
- // rbx: Number of elements in array that remains to be filled, as int32.
- // rcx: Start of elements in FixedArray.
- // rdx: undefined.
- Label loop;
- __ testl(rbx, rbx);
- __ bind(&loop);
- __ j(less_equal, &done); // Jump if rcx is negative or zero.
- __ subl(rbx, Immediate(1));
- __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
- __ jmp(&loop);
-
- __ bind(&done);
- __ ret(3 * kPointerSize);
-
- __ bind(&slowcase);
- __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
-}
-
-
static int NegativeComparisonResult(Condition cc) {
ASSERT(cc != equal);
ASSERT((cc == less) || (cc == less_equal)
@@ -2282,8 +1785,8 @@ static void BranchIfNotInternalizedString(MacroAssembler* masm,
Register object,
Register scratch) {
__ JumpIfSmi(object, label);
- __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzxbq(scratch,
+ __ movp(scratch, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzxbp(scratch,
FieldOperand(scratch, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ testb(scratch, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
@@ -2294,7 +1797,7 @@ static void BranchIfNotInternalizedString(MacroAssembler* masm,
void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
Label check_unequal_objects, done;
Condition cc = GetCondition();
- Factory* factory = masm->isolate()->factory();
+ Factory* factory = isolate()->factory();
Label miss;
CheckInputType(masm, rdx, left_, &miss);
@@ -2303,11 +1806,11 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Compare two smis.
Label non_smi, smi_done;
__ JumpIfNotBothSmi(rax, rdx, &non_smi);
- __ subq(rdx, rax);
+ __ subp(rdx, rax);
__ j(no_overflow, &smi_done);
- __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
+ __ notp(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
__ bind(&smi_done);
- __ movq(rax, rdx);
+ __ movp(rax, rdx);
__ ret(0);
__ bind(&non_smi);
@@ -2319,7 +1822,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Two identical objects are equal unless they are both NaN or undefined.
{
Label not_identical;
- __ cmpq(rax, rdx);
+ __ cmpp(rax, rdx);
__ j(not_equal, &not_identical, Label::kNear);
if (cc != equal) {
@@ -2359,7 +1862,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ setcc(parity_even, rax);
// rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
if (cc == greater_equal || cc == greater) {
- __ neg(rax);
+ __ negp(rax);
}
__ ret(0);
@@ -2386,7 +1889,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// If heap number, handle it in the slow case.
__ j(equal, &slow);
// Return non-equal. ebx (the lower half of rbx) is not zero.
- __ movq(rax, rbx);
+ __ movp(rax, rbx);
__ ret(0);
__ bind(&not_smis);
@@ -2437,7 +1940,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Return a result of -1, 0, or 1, based on EFLAGS.
__ setcc(above, rax);
__ setcc(below, rcx);
- __ subq(rax, rcx);
+ __ subp(rax, rcx);
__ ret(0);
// If one of the numbers was NaN, then the result is always false.
@@ -2505,7 +2008,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// a heap object has the low bit clear.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagMask == 1);
- __ lea(rcx, Operand(rax, rdx, times_1, 0));
+ __ leap(rcx, Operand(rax, rdx, times_1, 0));
__ testb(rcx, Immediate(kSmiTagMask));
__ j(not_zero, &not_both_objects, Label::kNear);
__ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
@@ -2530,8 +2033,8 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Push arguments below the return address to prepare jump to builtin.
__ PopReturnAddressTo(rcx);
- __ push(rdx);
- __ push(rax);
+ __ Push(rdx);
+ __ Push(rax);
// Figure out which native to call and setup the arguments.
Builtins::JavaScript builtin;
@@ -2554,176 +2057,155 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
+ // Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// rax : number of arguments to the construct function
- // rbx : cache cell for call target
+ // rbx : Feedback vector
+ // rdx : slot in feedback vector (Smi)
// rdi : the function to call
Isolate* isolate = masm->isolate();
- Label initialize, done, miss, megamorphic, not_array_function;
+ Label initialize, done, miss, megamorphic, not_array_function,
+ done_no_smi_convert;
// Load the cache state into rcx.
- __ movq(rcx, FieldOperand(rbx, Cell::kValueOffset));
+ __ SmiToInteger32(rdx, rdx);
+ __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
- __ cmpq(rcx, rdi);
+ __ cmpp(rcx, rdi);
__ j(equal, &done);
- __ Cmp(rcx, TypeFeedbackCells::MegamorphicSentinel(isolate));
+ __ Cmp(rcx, TypeFeedbackInfo::MegamorphicSentinel(isolate));
__ j(equal, &done);
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the cell either some other function or an
- // AllocationSite. Do a map check on the object in rcx.
- Handle<Map> allocation_site_map =
- masm->isolate()->factory()->allocation_site_map();
- __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
- __ j(not_equal, &miss);
+ if (!FLAG_pretenuring_call_new) {
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite. Do a map check on the object in rcx.
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
+ __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
+ __ j(not_equal, &miss);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(rcx);
- __ cmpq(rdi, rcx);
- __ j(not_equal, &megamorphic);
- __ jmp(&done);
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
+ __ cmpp(rdi, rcx);
+ __ j(not_equal, &megamorphic);
+ __ jmp(&done);
+ }
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
- __ Cmp(rcx, TypeFeedbackCells::UninitializedSentinel(isolate));
+ __ Cmp(rcx, TypeFeedbackInfo::UninitializedSentinel(isolate));
__ j(equal, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
- __ Move(FieldOperand(rbx, Cell::kValueOffset),
- TypeFeedbackCells::MegamorphicSentinel(isolate));
+ __ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
+ TypeFeedbackInfo::MegamorphicSentinel(isolate));
__ jmp(&done);
// An uninitialized cache is patched with the function or sentinel to
// indicate the ElementsKind if function is the Array constructor.
__ bind(&initialize);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(rcx);
- __ cmpq(rdi, rcx);
- __ j(not_equal, &not_array_function);
- // The target function is the Array constructor,
- // Create an AllocationSite if we don't already have it, store it in the cell
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ if (!FLAG_pretenuring_call_new) {
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
+ __ cmpp(rdi, rcx);
+ __ j(not_equal, &not_array_function);
- // Arguments register must be smi-tagged to call out.
- __ Integer32ToSmi(rax, rax);
- __ push(rax);
- __ push(rdi);
- __ push(rbx);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- CreateAllocationSiteStub create_stub;
- __ CallStub(&create_stub);
+ // Arguments register must be smi-tagged to call out.
+ __ Integer32ToSmi(rax, rax);
+ __ Push(rax);
+ __ Push(rdi);
+ __ Integer32ToSmi(rdx, rdx);
+ __ Push(rdx);
+ __ Push(rbx);
+
+ CreateAllocationSiteStub create_stub(isolate);
+ __ CallStub(&create_stub);
+
+ __ Pop(rbx);
+ __ Pop(rdx);
+ __ Pop(rdi);
+ __ Pop(rax);
+ __ SmiToInteger32(rax, rax);
+ }
+ __ jmp(&done_no_smi_convert);
- __ pop(rbx);
- __ pop(rdi);
- __ pop(rax);
- __ SmiToInteger32(rax, rax);
+ __ bind(&not_array_function);
}
- __ jmp(&done);
- __ bind(&not_array_function);
- __ movq(FieldOperand(rbx, Cell::kValueOffset), rdi);
- // No need for a write barrier here - cells are rescanned.
+ __ movp(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
+ rdi);
+
+ // We won't need rdx or rbx anymore, just save rdi
+ __ Push(rdi);
+ __ Push(rbx);
+ __ Push(rdx);
+ __ RecordWriteArray(rbx, rdi, rdx, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Pop(rdx);
+ __ Pop(rbx);
+ __ Pop(rdi);
__ bind(&done);
+ __ Integer32ToSmi(rdx, rdx);
+
+ __ bind(&done_no_smi_convert);
}
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- // rbx : cache cell for call target
- // rdi : the function to call
- Isolate* isolate = masm->isolate();
- Label slow, non_function;
- StackArgumentsAccessor args(rsp, argc_);
-
- // The receiver might implicitly be the global object. This is
- // indicated by passing the hole as the receiver to the call
- // function stub.
- if (ReceiverMightBeImplicit()) {
- Label call;
- // Get the receiver from the stack.
- __ movq(rax, args.GetReceiverOperand());
- // Call as function is indicated with the hole.
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &call, Label::kNear);
- // Patch the receiver on the stack with the global receiver object.
- __ movq(rcx, GlobalObjectOperand());
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
- __ movq(args.GetReceiverOperand(), rcx);
- __ bind(&call);
- }
+static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
+ // Do not transform the receiver for strict mode functions.
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ testb(FieldOperand(rcx, SharedFunctionInfo::kStrictModeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+ __ j(not_equal, cont);
- // Check that the function really is a JavaScript function.
- __ JumpIfSmi(rdi, &non_function);
- // Goto slow case if we do not have a function.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &slow);
-
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
- }
-
- // Fast-case: Just invoke the function.
- ParameterCount actual(argc_);
+ // Do not transform the receiver for natives.
+ // SharedFunctionInfo is already loaded into rcx.
+ __ testb(FieldOperand(rcx, SharedFunctionInfo::kNativeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+ __ j(not_equal, cont);
+}
- if (ReceiverMightBeImplicit()) {
- Label call_as_function;
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
- __ j(equal, &call_as_function);
- __ InvokeFunction(rdi,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_METHOD);
- __ bind(&call_as_function);
- }
- __ InvokeFunction(rdi,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_FUNCTION);
- // Slow-case: Non-function called.
- __ bind(&slow);
- if (RecordCallTarget()) {
- // If there is a call target cache, mark it megamorphic in the
- // non-function case. MegamorphicSentinel is an immortal immovable
- // object (undefined) so no write barrier is needed.
- __ Move(FieldOperand(rbx, Cell::kValueOffset),
- TypeFeedbackCells::MegamorphicSentinel(isolate));
- }
+static void EmitSlowCase(Isolate* isolate,
+ MacroAssembler* masm,
+ StackArgumentsAccessor* args,
+ int argc,
+ Label* non_function) {
// Check for function proxy.
__ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
- __ j(not_equal, &non_function);
+ __ j(not_equal, non_function);
__ PopReturnAddressTo(rcx);
- __ push(rdi); // put proxy as additional argument under return address
+ __ Push(rdi); // put proxy as additional argument under return address
__ PushReturnAddressFrom(rcx);
- __ Set(rax, argc_ + 1);
+ __ Set(rax, argc + 1);
__ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
__ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
{
Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
__ jmp(adaptor, RelocInfo::CODE_TARGET);
}
// CALL_NON_FUNCTION expects the non-function callee as receiver (instead
// of the original receiver from the call site).
- __ bind(&non_function);
- __ movq(args.GetReceiverOperand(), rdi);
- __ Set(rax, argc_);
+ __ bind(non_function);
+ __ movp(args->GetReceiverOperand(), rdi);
+ __ Set(rax, argc);
__ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
__ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
Handle<Code> adaptor =
isolate->builtins()->ArgumentsAdaptorTrampoline();
@@ -2731,9 +2213,88 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
}
+static void EmitWrapCase(MacroAssembler* masm,
+ StackArgumentsAccessor* args,
+ Label* cont) {
+ // Wrap the receiver and patch it back onto the stack.
+ { FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ __ Push(rdi);
+ __ Push(rax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Pop(rdi);
+ }
+ __ movp(args->GetReceiverOperand(), rax);
+ __ jmp(cont);
+}
+
+
+static void CallFunctionNoFeedback(MacroAssembler* masm,
+ int argc, bool needs_checks,
+ bool call_as_method) {
+ // rdi : the function to call
+
+ // wrap_and_call can only be true if we are compiling a monomorphic method.
+ Isolate* isolate = masm->isolate();
+ Label slow, non_function, wrap, cont;
+ StackArgumentsAccessor args(rsp, argc);
+
+ if (needs_checks) {
+ // Check that the function really is a JavaScript function.
+ __ JumpIfSmi(rdi, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &slow);
+ }
+
+ // Fast-case: Just invoke the function.
+ ParameterCount actual(argc);
+
+ if (call_as_method) {
+ if (needs_checks) {
+ EmitContinueIfStrictOrNative(masm, &cont);
+ }
+
+ // Load the receiver from the stack.
+ __ movp(rax, args.GetReceiverOperand());
+
+ if (needs_checks) {
+ __ JumpIfSmi(rax, &wrap);
+
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(below, &wrap);
+ } else {
+ __ jmp(&wrap);
+ }
+
+ __ bind(&cont);
+ }
+
+ __ InvokeFunction(rdi, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ if (needs_checks) {
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ EmitSlowCase(isolate, masm, &args, argc, &non_function);
+ }
+
+ if (call_as_method) {
+ __ bind(&wrap);
+ EmitWrapCase(masm, &args, &cont);
+ }
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
+}
+
+
void CallConstructStub::Generate(MacroAssembler* masm) {
// rax : number of arguments
- // rbx : cache cell for call target
+ // rbx : feedback vector
+ // rdx : (only if rbx is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
// rdi : constructor function
Label slow, non_function_call;
@@ -2745,14 +2306,34 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
+
+ __ SmiToInteger32(rdx, rdx);
+ if (FLAG_pretenuring_call_new) {
+ // Put the AllocationSite from the feedback vector into ebx.
+ // By adding kPointerSize we encode that we know the AllocationSite
+ // entry is at the feedback vector slot given by rdx + 1.
+ __ movp(rbx, FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ } else {
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into rbx, or undefined.
+ __ movp(rbx, FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ CompareRoot(FieldOperand(rbx, 0), Heap::kAllocationSiteMapRootIndex);
+ __ j(equal, &feedback_register_initialized);
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
+ }
+
+ __ AssertUndefinedOrAllocationSite(rbx);
}
// Jump to the function-specific construct stub.
Register jmp_reg = rcx;
- __ movq(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(jmp_reg, FieldOperand(jmp_reg,
+ __ movp(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(jmp_reg, FieldOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset));
- __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
+ __ leap(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
__ jmp(jmp_reg);
// rdi: called object
@@ -2770,12 +2351,162 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ bind(&do_call);
// Set expected number of arguments to zero (not changing rax).
__ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ __ Jump(isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
+static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
+ __ movp(vector, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(vector, FieldOperand(vector,
+ SharedFunctionInfo::kFeedbackVectorOffset));
+}
+
+
+void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
+ // rdi - function
+ // rdx - slot id (as integer)
+ Label miss;
+ int argc = state_.arg_count();
+ ParameterCount actual(argc);
+
+ EmitLoadTypeFeedbackVector(masm, rbx);
+ __ SmiToInteger32(rdx, rdx);
+
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
+ __ cmpq(rdi, rcx);
+ __ j(not_equal, &miss);
+
+ __ movq(rax, Immediate(arg_count()));
+ __ movp(rbx, FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize));
+
+ // Verify that ecx contains an AllocationSite
+ __ AssertUndefinedOrAllocationSite(rbx);
+ ArrayConstructorStub stub(masm->isolate(), arg_count());
+ __ TailCallStub(&stub);
+
+ __ bind(&miss);
+ GenerateMiss(masm, IC::kCallIC_Customization_Miss);
+
+ // The slow case, we need this no matter what to complete a call after a miss.
+ CallFunctionNoFeedback(masm,
+ arg_count(),
+ true,
+ CallAsMethod());
+
+ // Unreachable.
+ __ int3();
+}
+
+
+void CallICStub::Generate(MacroAssembler* masm) {
+ // rdi - function
+ // rbx - vector
+ // rdx - slot id
+ Isolate* isolate = masm->isolate();
+ Label extra_checks_or_miss, slow_start;
+ Label slow, non_function, wrap, cont;
+ Label have_js_function;
+ int argc = state_.arg_count();
+ StackArgumentsAccessor args(rsp, argc);
+ ParameterCount actual(argc);
+
+ EmitLoadTypeFeedbackVector(masm, rbx);
+
+ // The checks. First, does rdi match the recorded monomorphic target?
+ __ SmiToInteger32(rdx, rdx);
+ __ cmpq(rdi, FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ j(not_equal, &extra_checks_or_miss);
+
+ __ bind(&have_js_function);
+ if (state_.CallAsMethod()) {
+ EmitContinueIfStrictOrNative(masm, &cont);
+
+ // Load the receiver from the stack.
+ __ movp(rax, args.GetReceiverOperand());
+
+ __ JumpIfSmi(rax, &wrap);
+
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(below, &wrap);
+
+ __ bind(&cont);
+ }
+
+ __ InvokeFunction(rdi, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ __ bind(&slow);
+ EmitSlowCase(isolate, masm, &args, argc, &non_function);
+
+ if (state_.CallAsMethod()) {
+ __ bind(&wrap);
+ EmitWrapCase(masm, &args, &cont);
+ }
+
+ __ bind(&extra_checks_or_miss);
+ Label miss;
+
+ __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ Cmp(rcx, TypeFeedbackInfo::MegamorphicSentinel(isolate));
+ __ j(equal, &slow_start);
+ __ Cmp(rcx, TypeFeedbackInfo::UninitializedSentinel(isolate));
+ __ j(equal, &miss);
+
+ if (!FLAG_trace_ic) {
+ // We are going megamorphic, and we don't want to visit the runtime.
+ __ Move(FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize),
+ TypeFeedbackInfo::MegamorphicSentinel(isolate));
+ __ jmp(&slow_start);
+ }
+
+ // We are here because tracing is on or we are going monomorphic.
+ __ bind(&miss);
+ GenerateMiss(masm, IC::kCallIC_Miss);
+
+ // the slow case
+ __ bind(&slow_start);
+ // Check that function is not a smi.
+ __ JumpIfSmi(rdi, &non_function);
+ // Check that function is a JSFunction.
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &slow);
+ __ jmp(&have_js_function);
+
+ // Unreachable
+ __ int3();
+}
+
+
+void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
+ // Get the receiver of the function from the stack; 1 ~ return address.
+ __ movp(rcx, Operand(rsp, (state_.arg_count() + 1) * kPointerSize));
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push the receiver and the function and feedback info.
+ __ Push(rcx);
+ __ Push(rdi);
+ __ Push(rbx);
+ __ Integer32ToSmi(rdx, rdx);
+ __ Push(rdx);
+
+ // Call the entry.
+ ExternalReference miss = ExternalReference(IC_Utility(id),
+ masm->isolate());
+ __ CallExternalReference(miss, 4);
+
+ // Move result to edi and exit the internal frame.
+ __ movp(rdi, rax);
+ }
+}
+
+
bool CEntryStub::NeedsImmovableCode() {
return false;
}
@@ -2789,6 +2520,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
+ BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
}
@@ -2797,40 +2529,35 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
- CEntryStub stub(1, kDontSaveFPRegs);
- stub.GetCode(isolate);
- CEntryStub save_doubles(1, kSaveFPRegs);
- save_doubles.GetCode(isolate);
+ CEntryStub stub(isolate, 1, kDontSaveFPRegs);
+ stub.GetCode();
+ CEntryStub save_doubles(isolate, 1, kSaveFPRegs);
+ save_doubles.GetCode();
}
-static void JumpIfOOM(MacroAssembler* masm,
- Register value,
- Register scratch,
- Label* oom_label) {
- __ movq(scratch, value);
- STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
- STATIC_ASSERT(kFailureTag == 3);
- __ and_(scratch, Immediate(0xf));
- __ cmpq(scratch, Immediate(0xf));
- __ j(equal, oom_label);
-}
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // rax: number of arguments including receiver
+ // rbx: pointer to C function (C callee-saved)
+ // rbp: frame pointer of calling JS frame (restored after C call)
+ // rsp: stack pointer (restored after C call)
+ // rsi: current context (restored)
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+ // Enter the exit frame that transitions from JavaScript to C++.
+#ifdef _WIN64
+ int arg_stack_space = (result_size_ < 2 ? 2 : 4);
+#else
+ int arg_stack_space = 0;
+#endif
+ __ EnterExitFrame(arg_stack_space, save_doubles_);
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate_scope) {
- // rax: result parameter for PerformGC, if any.
- // rbx: pointer to C function (C callee-saved).
- // rbp: frame pointer (restored after C call).
- // rsp: stack pointer (restored after C call).
+ // rbx: pointer to builtin function (C callee-saved).
+ // rbp: frame pointer of exit frame (restored after C call).
+ // rsp: stack pointer (restored after C call).
// r14: number of arguments including receiver (C callee-saved).
- // r15: pointer to the first argument (C callee-saved).
- // This pointer is reused in LeaveExitFrame(), so it is stored in a
- // callee-saved register.
+ // r15: argv pointer (C callee-saved).
// Simple results returned in rax (both AMD64 and Win64 calling conventions).
// Complex results must be written to address passed as first argument.
@@ -2841,25 +2568,6 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ CheckStackAlignment();
}
- if (do_gc) {
- // Pass failure code returned from last attempt as first argument to
- // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
- // stack is known to be aligned. This function takes one argument which is
- // passed in register.
- __ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
- __ movq(arg_reg_1, rax);
- __ Move(kScratchRegister,
- ExternalReference::perform_gc_function(masm->isolate()));
- __ call(kScratchRegister);
- }
-
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
- if (always_allocate_scope) {
- Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
- __ incl(scope_depth_operand);
- }
-
// Call C function.
#ifdef _WIN64
// Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9.
@@ -2868,36 +2576,28 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
if (result_size_ < 2) {
// Pass a pointer to the Arguments object as the first argument.
// Return result in single register (rax).
- __ movq(rcx, r14); // argc.
- __ movq(rdx, r15); // argv.
- __ Move(r8, ExternalReference::isolate_address(masm->isolate()));
+ __ movp(rcx, r14); // argc.
+ __ movp(rdx, r15); // argv.
+ __ Move(r8, ExternalReference::isolate_address(isolate()));
} else {
ASSERT_EQ(2, result_size_);
// Pass a pointer to the result location as the first argument.
- __ lea(rcx, StackSpaceOperand(2));
+ __ leap(rcx, StackSpaceOperand(2));
// Pass a pointer to the Arguments object as the second argument.
- __ movq(rdx, r14); // argc.
- __ movq(r8, r15); // argv.
- __ Move(r9, ExternalReference::isolate_address(masm->isolate()));
+ __ movp(rdx, r14); // argc.
+ __ movp(r8, r15); // argv.
+ __ Move(r9, ExternalReference::isolate_address(isolate()));
}
#else // _WIN64
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
- __ movq(rdi, r14); // argc.
- __ movq(rsi, r15); // argv.
- __ Move(rdx, ExternalReference::isolate_address(masm->isolate()));
+ __ movp(rdi, r14); // argc.
+ __ movp(rsi, r15); // argv.
+ __ Move(rdx, ExternalReference::isolate_address(isolate()));
#endif
__ call(rbx);
// Result is in rax - do not destroy this register!
- if (always_allocate_scope) {
- Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
- __ decl(scope_depth_operand);
- }
-
- // Check for failure result.
- Label failure_returned;
- STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
#ifdef _WIN64
// If return value is on the stack, pop it to registers.
if (result_size_ > 1) {
@@ -2905,147 +2605,69 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Read result values stored on stack. Result is stored
// above the four argument mirror slots and the two
// Arguments object slots.
- __ movq(rax, Operand(rsp, 6 * kPointerSize));
- __ movq(rdx, Operand(rsp, 7 * kPointerSize));
+ __ movq(rax, Operand(rsp, 6 * kRegisterSize));
+ __ movq(rdx, Operand(rsp, 7 * kRegisterSize));
}
#endif
- __ lea(rcx, Operand(rax, 1));
- // Lower 2 bits of rcx are 0 iff rax has failure tag.
- __ testl(rcx, Immediate(kFailureTagMask));
- __ j(zero, &failure_returned);
+
+ // Runtime functions should not return 'the hole'. Allowing it to escape may
+ // lead to crashes in the IC code later.
+ if (FLAG_debug_code) {
+ Label okay;
+ __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &okay, Label::kNear);
+ __ int3();
+ __ bind(&okay);
+ }
+
+ // Check result for exception sentinel.
+ Label exception_returned;
+ __ CompareRoot(rax, Heap::kExceptionRootIndex);
+ __ j(equal, &exception_returned);
+
+ ExternalReference pending_exception_address(
+ Isolate::kPendingExceptionAddress, isolate());
+
+ // Check that there is no pending exception, otherwise we
+ // should have returned the exception sentinel.
+ if (FLAG_debug_code) {
+ Label okay;
+ __ LoadRoot(r14, Heap::kTheHoleValueRootIndex);
+ Operand pending_exception_operand =
+ masm->ExternalOperand(pending_exception_address);
+ __ cmpp(r14, pending_exception_operand);
+ __ j(equal, &okay, Label::kNear);
+ __ int3();
+ __ bind(&okay);
+ }
// Exit the JavaScript to C++ exit frame.
__ LeaveExitFrame(save_doubles_);
__ ret(0);
- // Handling of failure.
- __ bind(&failure_returned);
-
- Label retry;
- // If the returned exception is RETRY_AFTER_GC continue at retry label
- STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
- __ j(zero, &retry, Label::kNear);
-
- // Special handling of out of memory exceptions.
- JumpIfOOM(masm, rax, kScratchRegister, throw_out_of_memory_exception);
+ // Handling of exception.
+ __ bind(&exception_returned);
// Retrieve the pending exception.
- ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, masm->isolate());
Operand pending_exception_operand =
masm->ExternalOperand(pending_exception_address);
- __ movq(rax, pending_exception_operand);
-
- // See if we just retrieved an OOM exception.
- JumpIfOOM(masm, rax, kScratchRegister, throw_out_of_memory_exception);
+ __ movp(rax, pending_exception_operand);
// Clear the pending exception.
- pending_exception_operand =
- masm->ExternalOperand(pending_exception_address);
__ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ movq(pending_exception_operand, rdx);
+ __ movp(pending_exception_operand, rdx);
// Special handling of termination exceptions which are uncatchable
// by javascript code.
+ Label throw_termination_exception;
__ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
- __ j(equal, throw_termination_exception);
+ __ j(equal, &throw_termination_exception);
// Handle normal exception.
- __ jmp(throw_normal_exception);
-
- // Retry.
- __ bind(&retry);
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // rax: number of arguments including receiver
- // rbx: pointer to C function (C callee-saved)
- // rbp: frame pointer of calling JS frame (restored after C call)
- // rsp: stack pointer (restored after C call)
- // rsi: current context (restored)
-
- // NOTE: Invocations of builtins may return failure objects
- // instead of a proper result. The builtin entry handles
- // this by performing a garbage collection and retrying the
- // builtin once.
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
- // Enter the exit frame that transitions from JavaScript to C++.
-#ifdef _WIN64
- int arg_stack_space = (result_size_ < 2 ? 2 : 4);
-#else
- int arg_stack_space = 0;
-#endif
- __ EnterExitFrame(arg_stack_space, save_doubles_);
-
- // rax: Holds the context at this point, but should not be used.
- // On entry to code generated by GenerateCore, it must hold
- // a failure result if the collect_garbage argument to GenerateCore
- // is true. This failure result can be the result of code
- // generated by a previous call to GenerateCore. The value
- // of rax is then passed to Runtime::PerformGC.
- // rbx: pointer to builtin function (C callee-saved).
- // rbp: frame pointer of exit frame (restored after C call).
- // rsp: stack pointer (restored after C call).
- // r14: number of arguments including receiver (C callee-saved).
- // r15: argv pointer (C callee-saved).
-
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ movq(rax, failure, RelocInfo::NONE64);
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true);
-
- __ bind(&throw_out_of_memory_exception);
- // Set external caught exception to false.
- Isolate* isolate = masm->isolate();
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate);
- __ Set(rax, static_cast<int64_t>(false));
- __ Store(external_caught, rax);
-
- // Set pending exception and rax to out of memory exception.
- ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- isolate);
- Label already_have_failure;
- JumpIfOOM(masm, rax, kScratchRegister, &already_have_failure);
- __ movq(rax, Failure::OutOfMemoryException(0x1), RelocInfo::NONE64);
- __ bind(&already_have_failure);
- __ Store(pending_exception, rax);
- // Fall through to the next label.
+ __ Throw(rax);
__ bind(&throw_termination_exception);
__ ThrowUncatchable(rax);
-
- __ bind(&throw_normal_exception);
- __ Throw(rax);
}
@@ -3058,31 +2680,31 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
{ // NOLINT. Scope block confuses linter.
MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
// Set up frame.
- __ push(rbp);
- __ movq(rbp, rsp);
+ __ pushq(rbp);
+ __ movp(rbp, rsp);
// Push the stack frame type marker twice.
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
// Scratch register is neither callee-save, nor an argument register on any
// platform. It's free to use at this point.
// Cannot use smi-register for loading yet.
- __ movq(kScratchRegister, Smi::FromInt(marker), RelocInfo::NONE64);
- __ push(kScratchRegister); // context slot
- __ push(kScratchRegister); // function slot
- // Save callee-saved registers (X64/Win64 calling conventions).
- __ push(r12);
- __ push(r13);
- __ push(r14);
- __ push(r15);
+ __ Move(kScratchRegister, Smi::FromInt(marker), Assembler::RelocInfoNone());
+ __ Push(kScratchRegister); // context slot
+ __ Push(kScratchRegister); // function slot
+ // Save callee-saved registers (X64/X32/Win64 calling conventions).
+ __ pushq(r12);
+ __ pushq(r13);
+ __ pushq(r14);
+ __ pushq(r15);
#ifdef _WIN64
- __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
- __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+ __ pushq(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+ __ pushq(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
#endif
- __ push(rbx);
+ __ pushq(rbx);
#ifdef _WIN64
// On Win64 XMM6-XMM15 are callee-save
- __ subq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
+ __ subp(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
__ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0), xmm6);
__ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1), xmm7);
__ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2), xmm8);
@@ -3101,22 +2723,20 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ InitializeRootRegister();
}
- Isolate* isolate = masm->isolate();
-
// Save copies of the top frame descriptor on the stack.
- ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
+ ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate());
{
Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
- __ push(c_entry_fp_operand);
+ __ Push(c_entry_fp_operand);
}
// If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
+ ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
__ Load(rax, js_entry_sp);
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_zero, &not_outermost_js);
__ Push(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
- __ movq(rax, rbp);
+ __ movp(rax, rbp);
__ Store(js_entry_sp, rax);
Label cont;
__ jmp(&cont);
@@ -3132,9 +2752,9 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Caught exception: Store result (exception) in the pending exception
// field in the JSEnv and return a failure sentinel.
ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- isolate);
+ isolate());
__ Store(pending_exception, rax);
- __ movq(rax, Failure::Exception(), RelocInfo::NONE64);
+ __ LoadRoot(rax, Heap::kExceptionRootIndex);
__ jmp(&exit);
// Invoke: Link this frame into the handler chain. There's only one
@@ -3147,7 +2767,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ Store(pending_exception, rax);
// Fake a receiver (NULL).
- __ push(Immediate(0)); // receiver
+ __ Push(Immediate(0)); // receiver
// Invoke the function by calling through JS entry trampoline builtin and
// pop the faked function when we return. We load the address from an
@@ -3156,13 +2776,13 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// at the time this code is generated.
if (is_construct) {
ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
- isolate);
+ isolate());
__ Load(rax, construct_entry);
} else {
- ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
+ ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
__ Load(rax, entry);
}
- __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
+ __ leap(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
__ call(kScratchRegister);
// Unlink this frame from the handler chain.
@@ -3170,16 +2790,16 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ bind(&exit);
// Check if the current stack frame is marked as the outermost JS frame.
- __ pop(rbx);
+ __ Pop(rbx);
__ Cmp(rbx, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ j(not_equal, &not_outermost_js_2);
__ Move(kScratchRegister, js_entry_sp);
- __ movq(Operand(kScratchRegister, 0), Immediate(0));
+ __ movp(Operand(kScratchRegister, 0), Immediate(0));
__ bind(&not_outermost_js_2);
// Restore the top frame descriptor from the stack.
{ Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
- __ pop(c_entry_fp_operand);
+ __ Pop(c_entry_fp_operand);
}
// Restore callee-saved registers (X64 conventions).
@@ -3195,23 +2815,23 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ movdqu(xmm13, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7));
__ movdqu(xmm14, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8));
__ movdqu(xmm15, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9));
- __ addq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
+ __ addp(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
#endif
- __ pop(rbx);
+ __ popq(rbx);
#ifdef _WIN64
// Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
- __ pop(rsi);
- __ pop(rdi);
+ __ popq(rsi);
+ __ popq(rdi);
#endif
- __ pop(r15);
- __ pop(r14);
- __ pop(r13);
- __ pop(r12);
- __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
+ __ popq(r15);
+ __ popq(r14);
+ __ popq(r13);
+ __ popq(r12);
+ __ addp(rsp, Immediate(2 * kPointerSize)); // remove markers
// Restore frame pointer and return.
- __ pop(rbp);
+ __ popq(rbp);
__ ret(0);
}
@@ -3232,17 +2852,19 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// indicate that the value is not an instance.
static const int kOffsetToMapCheckValue = 2;
- static const int kOffsetToResultValue = 18;
+ static const int kOffsetToResultValue = kPointerSize == kInt64Size ? 18 : 14;
// The last 4 bytes of the instruction sequence
- // movq(rdi, FieldOperand(rax, HeapObject::kMapOffset))
+ // movp(rdi, FieldOperand(rax, HeapObject::kMapOffset))
// Move(kScratchRegister, Factory::the_hole_value())
// in front of the hole value address.
- static const unsigned int kWordBeforeMapCheckValue = 0xBA49FF78;
+ static const unsigned int kWordBeforeMapCheckValue =
+ kPointerSize == kInt64Size ? 0xBA49FF78 : 0xBA41FF78;
// The last 4 bytes of the instruction sequence
// __ j(not_equal, &cache_miss);
// __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
// before the offset of the hole value in the root array.
- static const unsigned int kWordBeforeResultValue = 0x458B4906;
+ static const unsigned int kWordBeforeResultValue =
+ kPointerSize == kInt64Size ? 0x458B4906 : 0x458B4106;
// Only the inline check flag is supported on X64.
ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck());
int extra_argument_offset = HasCallSiteInlineCheck() ? 1 : 0;
@@ -3251,7 +2873,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
Label slow;
StackArgumentsAccessor args(rsp, 2 + extra_argument_offset,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rax, args.GetArgumentOperand(0));
+ __ movp(rax, args.GetArgumentOperand(0));
__ JumpIfSmi(rax, &slow);
// Check that the left hand is a JS object. Leave its map in rax.
@@ -3261,7 +2883,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ j(above, &slow);
// Get the prototype of the function.
- __ movq(rdx, args.GetArgumentOperand(1));
+ __ movp(rdx, args.GetArgumentOperand(1));
// rdx is function, rax is map.
// If there is a call site cache don't look in the global cache, but do the
@@ -3297,31 +2919,31 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
} else {
// Get return address and delta to inlined map check.
__ movq(kScratchRegister, StackOperandForReturnAddress(0));
- __ subq(kScratchRegister, args.GetArgumentOperand(2));
+ __ subp(kScratchRegister, args.GetArgumentOperand(2));
if (FLAG_debug_code) {
__ movl(rdi, Immediate(kWordBeforeMapCheckValue));
__ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
__ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCheck);
}
- __ movq(kScratchRegister,
+ __ movp(kScratchRegister,
Operand(kScratchRegister, kOffsetToMapCheckValue));
- __ movq(Operand(kScratchRegister, 0), rax);
+ __ movp(Operand(kScratchRegister, 0), rax);
}
- __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
+ __ movp(rcx, FieldOperand(rax, Map::kPrototypeOffset));
// Loop through the prototype chain looking for the function prototype.
Label loop, is_instance, is_not_instance;
__ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
__ bind(&loop);
- __ cmpq(rcx, rbx);
+ __ cmpp(rcx, rbx);
__ j(equal, &is_instance, Label::kNear);
- __ cmpq(rcx, kScratchRegister);
+ __ cmpp(rcx, kScratchRegister);
// The code at is_not_instance assumes that kScratchRegister contains a
// non-zero GCable value (the null object in this case).
__ j(equal, &is_not_instance, Label::kNear);
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
+ __ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movp(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
__ jmp(&loop);
__ bind(&is_instance);
@@ -3338,7 +2960,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
ASSERT(true_offset >= 0 && true_offset < 0x100);
__ movl(rax, Immediate(true_offset));
__ movq(kScratchRegister, StackOperandForReturnAddress(0));
- __ subq(kScratchRegister, args.GetArgumentOperand(2));
+ __ subp(kScratchRegister, args.GetArgumentOperand(2));
__ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
if (FLAG_debug_code) {
__ movl(rax, Immediate(kWordBeforeResultValue));
@@ -3361,7 +2983,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
ASSERT(false_offset >= 0 && false_offset < 0x100);
__ movl(rax, Immediate(false_offset));
__ movq(kScratchRegister, StackOperandForReturnAddress(0));
- __ subq(kScratchRegister, args.GetArgumentOperand(2));
+ __ subp(kScratchRegister, args.GetArgumentOperand(2));
__ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
if (FLAG_debug_code) {
__ movl(rax, Immediate(kWordBeforeResultValue));
@@ -3376,7 +2998,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
if (HasCallSiteInlineCheck()) {
// Remove extra value from the stack.
__ PopReturnAddressTo(rcx);
- __ pop(rax);
+ __ Pop(rax);
__ PushReturnAddressFrom(rcx);
}
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
@@ -3403,7 +3025,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ JumpIfSmi(object_, receiver_not_string_);
// Fetch the instance type of the receiver into result register.
- __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movp(result_, FieldOperand(object_, HeapObject::kMapOffset));
__ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
// If the receiver is not a string trigger the non-string case.
__ testb(result_, Immediate(kIsNotStringMask));
@@ -3441,23 +3063,23 @@ void StringCharCodeAtGenerator::GenerateSlow(
index_not_number_,
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
- __ push(object_);
- __ push(index_); // Consumed by runtime conversion function.
+ __ Push(object_);
+ __ Push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
} else {
ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
}
if (!index_.is(rax)) {
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
- __ movq(index_, rax);
+ __ movp(index_, rax);
}
- __ pop(object_);
+ __ Pop(object_);
// Reload the instance type.
- __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movp(result_, FieldOperand(object_, HeapObject::kMapOffset));
__ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
@@ -3470,12 +3092,12 @@ void StringCharCodeAtGenerator::GenerateSlow(
// is too complex (e.g., when the string needs to be flattened).
__ bind(&call_runtime_);
call_helper.BeforeCall(masm);
- __ push(object_);
+ __ Push(object_);
__ Integer32ToSmi(index_, index_);
- __ push(index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ __ Push(index_);
+ __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
if (!result_.is(rax)) {
- __ movq(result_, rax);
+ __ movp(result_, rax);
}
call_helper.AfterCall(masm);
__ jmp(&exit_);
@@ -3495,7 +3117,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
SmiIndex index = masm->SmiToIndex(kScratchRegister, code_, kPointerSizeLog2);
- __ movq(result_, FieldOperand(result_, index.reg, index.scale,
+ __ movp(result_, FieldOperand(result_, index.reg, index.scale,
FixedArray::kHeaderSize));
__ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
__ j(equal, &slow_case_);
@@ -3510,10 +3132,10 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
- __ push(code_);
+ __ Push(code_);
__ CallRuntime(Runtime::kCharFromCode, 1);
if (!result_.is(rax)) {
- __ movq(result_, rax);
+ __ movp(result_, rax);
}
call_helper.AfterCall(masm);
__ jmp(&exit_);
@@ -3522,548 +3144,35 @@ void StringCharFromCodeGenerator::GenerateSlow(
}
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label call_runtime, call_builtin;
- Builtins::JavaScript builtin_id = Builtins::ADD;
-
- // Load the two arguments.
- StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rax, args.GetArgumentOperand(0)); // First argument (left).
- __ movq(rdx, args.GetArgumentOperand(1)); // Second argument (right).
-
- // Make sure that both arguments are strings if not known in advance.
- // Otherwise, at least one of the arguments is definitely a string,
- // and we convert the one that is not known to be a string.
- if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
- ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT);
- ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT);
- __ JumpIfSmi(rax, &call_runtime);
- __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
- __ j(above_equal, &call_runtime);
-
- // First argument is a a string, test second.
- __ JumpIfSmi(rdx, &call_runtime);
- __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
- __ j(above_equal, &call_runtime);
- } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
- ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0);
- GenerateConvertArgument(masm, 2 * kPointerSize, rax, rbx, rcx, rdi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
- ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0);
- GenerateConvertArgument(masm, 1 * kPointerSize, rdx, rbx, rcx, rdi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
-
- // Both arguments are strings.
- // rax: first string
- // rdx: second string
- // Check if either of the strings are empty. In that case return the other.
- Label second_not_zero_length, both_not_zero_length;
- __ movq(rcx, FieldOperand(rdx, String::kLengthOffset));
- __ SmiTest(rcx);
- __ j(not_zero, &second_not_zero_length, Label::kNear);
- // Second string is empty, result is first string which is already in rax.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
- __ bind(&second_not_zero_length);
- __ movq(rbx, FieldOperand(rax, String::kLengthOffset));
- __ SmiTest(rbx);
- __ j(not_zero, &both_not_zero_length, Label::kNear);
- // First string is empty, result is second string which is in rdx.
- __ movq(rax, rdx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Both strings are non-empty.
- // rax: first string
- // rbx: length of first string
- // rcx: length of second string
- // rdx: second string
- // r8: map of first string (if flags_ == NO_STRING_ADD_FLAGS)
- // r9: map of second string (if flags_ == NO_STRING_ADD_FLAGS)
- Label string_add_flat_result, longer_than_two;
- __ bind(&both_not_zero_length);
-
- // If arguments where known to be strings, maps are not loaded to r8 and r9
- // by the code above.
- if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
- __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
- __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
- }
- // Get the instance types of the two strings as they will be needed soon.
- __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset));
- __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
-
- // Look at the length of the result of adding the two strings.
- STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
- __ SmiAdd(rbx, rbx, rcx);
- // Use the string table when adding two one character strings, as it
- // helps later optimizations to return an internalized string here.
- __ SmiCompare(rbx, Smi::FromInt(2));
- __ j(not_equal, &longer_than_two);
-
- // Check that both strings are non-external ASCII strings.
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
- &call_runtime);
-
- // Get the two characters forming the sub string.
- __ movzxbq(rbx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
- __ movzxbq(rcx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
-
- // Try to lookup two character string in string table. If it is not found
- // just allocate a new one.
- Label make_two_character_string, make_flat_ascii_string;
- StringHelper::GenerateTwoCharacterStringTableProbe(
- masm, rbx, rcx, r14, r11, rdi, r15, &make_two_character_string);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&make_two_character_string);
- __ Set(rdi, 2);
- __ AllocateAsciiString(rax, rdi, r8, r9, r11, &call_runtime);
- // rbx - first byte: first character
- // rbx - second byte: *maybe* second character
- // Make sure that the second byte of rbx contains the second character.
- __ movzxbq(rcx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
- __ shll(rcx, Immediate(kBitsPerByte));
- __ orl(rbx, rcx);
- // Write both characters to the new string.
- __ movw(FieldOperand(rax, SeqOneByteString::kHeaderSize), rbx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ SmiCompare(rbx, Smi::FromInt(ConsString::kMinLength));
- __ j(below, &string_add_flat_result);
- // Handle exceptionally long strings in the runtime system.
- STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
- __ SmiCompare(rbx, Smi::FromInt(String::kMaxLength));
- __ j(above, &call_runtime);
-
- // If result is not supposed to be flat, allocate a cons string object. If
- // both strings are ASCII the result is an ASCII cons string.
- // rax: first string
- // rbx: length of resulting flat string
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of second string
- Label non_ascii, allocated, ascii_data;
- __ movl(rcx, r8);
- __ and_(rcx, r9);
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ testl(rcx, Immediate(kStringEncodingMask));
- __ j(zero, &non_ascii);
- __ bind(&ascii_data);
- // Allocate an ASCII cons string.
- __ AllocateAsciiConsString(rcx, rdi, no_reg, &call_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
- __ movq(FieldOperand(rcx, ConsString::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-
- Label skip_write_barrier, after_writing;
- ExternalReference high_promotion_mode = ExternalReference::
- new_space_high_promotion_mode_active_address(masm->isolate());
- __ Load(rbx, high_promotion_mode);
- __ testb(rbx, Immediate(1));
- __ j(zero, &skip_write_barrier);
-
- __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
- __ RecordWriteField(rcx,
- ConsString::kFirstOffset,
- rax,
- rbx,
- kDontSaveFPRegs);
- __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
- __ RecordWriteField(rcx,
- ConsString::kSecondOffset,
- rdx,
- rbx,
- kDontSaveFPRegs);
- __ jmp(&after_writing);
-
- __ bind(&skip_write_barrier);
- __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
- __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
-
- __ bind(&after_writing);
-
- __ movq(rax, rcx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only one byte characters.
- // rcx: first instance type AND second instance type.
- // r8: first instance type.
- // r9: second instance type.
- __ testb(rcx, Immediate(kOneByteDataHintMask));
- __ j(not_zero, &ascii_data);
- __ xor_(r8, r9);
- STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0);
- __ andb(r8, Immediate(kOneByteStringTag | kOneByteDataHintTag));
- __ cmpb(r8, Immediate(kOneByteStringTag | kOneByteDataHintTag));
- __ j(equal, &ascii_data);
- // Allocate a two byte cons string.
- __ AllocateTwoByteConsString(rcx, rdi, no_reg, &call_runtime);
- __ jmp(&allocated);
-
- // We cannot encounter sliced strings or cons strings here since:
- STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
- // Handle creating a flat result from either external or sequential strings.
- // Locate the first characters' locations.
- // rax: first string
- // rbx: length of resulting flat string as smi
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of first string
- Label first_prepared, second_prepared;
- Label first_is_sequential, second_is_sequential;
- __ bind(&string_add_flat_result);
-
- __ SmiToInteger32(r14, FieldOperand(rax, SeqString::kLengthOffset));
- // r14: length of first string
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(r8, Immediate(kStringRepresentationMask));
- __ j(zero, &first_is_sequential, Label::kNear);
- // Rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ testb(r8, Immediate(kShortExternalStringMask));
- __ j(not_zero, &call_runtime);
- __ movq(rcx, FieldOperand(rax, ExternalString::kResourceDataOffset));
- __ jmp(&first_prepared, Label::kNear);
- __ bind(&first_is_sequential);
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ lea(rcx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
- __ bind(&first_prepared);
-
- // Check whether both strings have same encoding.
- __ xorl(r8, r9);
- __ testb(r8, Immediate(kStringEncodingMask));
- __ j(not_zero, &call_runtime);
-
- __ SmiToInteger32(r15, FieldOperand(rdx, SeqString::kLengthOffset));
- // r15: length of second string
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(r9, Immediate(kStringRepresentationMask));
- __ j(zero, &second_is_sequential, Label::kNear);
- // Rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ testb(r9, Immediate(kShortExternalStringMask));
- __ j(not_zero, &call_runtime);
- __ movq(rdx, FieldOperand(rdx, ExternalString::kResourceDataOffset));
- __ jmp(&second_prepared, Label::kNear);
- __ bind(&second_is_sequential);
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ lea(rdx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
- __ bind(&second_prepared);
-
- Label non_ascii_string_add_flat_result;
- // r9: instance type of second string
- // First string and second string have the same encoding.
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ SmiToInteger32(rbx, rbx);
- __ testb(r9, Immediate(kStringEncodingMask));
- __ j(zero, &non_ascii_string_add_flat_result);
-
- __ bind(&make_flat_ascii_string);
- // Both strings are ASCII strings. As they are short they are both flat.
- __ AllocateAsciiString(rax, rbx, rdi, r8, r9, &call_runtime);
- // rax: result string
- // Locate first character of result.
- __ lea(rbx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
- // rcx: first char of first string
- // rbx: first character of result
- // r14: length of first string
- StringHelper::GenerateCopyCharacters(masm, rbx, rcx, r14, true);
- // rbx: next character of result
- // rdx: first char of second string
- // r15: length of second string
- StringHelper::GenerateCopyCharacters(masm, rbx, rdx, r15, true);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&non_ascii_string_add_flat_result);
- // Both strings are ASCII strings. As they are short they are both flat.
- __ AllocateTwoByteString(rax, rbx, rdi, r8, r9, &call_runtime);
- // rax: result string
- // Locate first character of result.
- __ lea(rbx, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
- // rcx: first char of first string
- // rbx: first character of result
- // r14: length of first string
- StringHelper::GenerateCopyCharacters(masm, rbx, rcx, r14, false);
- // rbx: next character of result
- // rdx: first char of second string
- // r15: length of second string
- StringHelper::GenerateCopyCharacters(masm, rbx, rdx, r15, false);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Just jump to runtime to add the two strings.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
-
- if (call_builtin.is_linked()) {
- __ bind(&call_builtin);
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
- }
-}
-
-
-void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ push(rax);
- __ push(rdx);
-}
-
-
-void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm,
- Register temp) {
- __ PopReturnAddressTo(temp);
- __ pop(rdx);
- __ pop(rax);
- __ PushReturnAddressFrom(temp);
-}
-
-
-void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* slow) {
- // First check if the argument is already a string.
- Label not_string, done;
- __ JumpIfSmi(arg, &not_string);
- __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
- __ j(below, &done);
-
- // Check the number to string cache.
- __ bind(&not_string);
- // Puts the cached result into scratch1.
- __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, slow);
- __ movq(arg, scratch1);
- __ movq(Operand(rsp, stack_offset), arg);
- __ bind(&done);
-}
-
-
void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
- bool ascii) {
- Label loop;
- __ bind(&loop);
- // This loop just copies one character at a time, as it is only used for very
- // short strings.
- if (ascii) {
- __ movb(kScratchRegister, Operand(src, 0));
- __ movb(Operand(dest, 0), kScratchRegister);
- __ incq(src);
- __ incq(dest);
- } else {
- __ movzxwl(kScratchRegister, Operand(src, 0));
- __ movw(Operand(dest, 0), kScratchRegister);
- __ addq(src, Immediate(2));
- __ addq(dest, Immediate(2));
- }
- __ decl(count);
- __ j(not_zero, &loop);
-}
-
-
-void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii) {
- // Copy characters using rep movs of doublewords. Align destination on 4 byte
- // boundary before starting rep movs. Copy remaining characters after running
- // rep movs.
- // Count is positive int32, dest and src are character pointers.
- ASSERT(dest.is(rdi)); // rep movs destination
- ASSERT(src.is(rsi)); // rep movs source
- ASSERT(count.is(rcx)); // rep movs count
-
+ String::Encoding encoding) {
// Nothing to do for zero characters.
Label done;
__ testl(count, count);
__ j(zero, &done, Label::kNear);
// Make count the number of bytes to copy.
- if (!ascii) {
+ if (encoding == String::TWO_BYTE_ENCODING) {
STATIC_ASSERT(2 == sizeof(uc16));
__ addl(count, count);
}
- // Don't enter the rep movs if there are less than 4 bytes to copy.
- Label last_bytes;
- __ testl(count, Immediate(~(kPointerSize - 1)));
- __ j(zero, &last_bytes, Label::kNear);
-
- // Copy from edi to esi using rep movs instruction.
- __ movl(kScratchRegister, count);
- __ shr(count, Immediate(kPointerSizeLog2)); // Number of doublewords to copy.
- __ repmovsq();
-
- // Find number of bytes left.
- __ movl(count, kScratchRegister);
- __ and_(count, Immediate(kPointerSize - 1));
-
- // Check if there are more bytes to copy.
- __ bind(&last_bytes);
- __ testl(count, count);
- __ j(zero, &done, Label::kNear);
-
// Copy remaining characters.
Label loop;
__ bind(&loop);
__ movb(kScratchRegister, Operand(src, 0));
__ movb(Operand(dest, 0), kScratchRegister);
- __ incq(src);
- __ incq(dest);
+ __ incp(src);
+ __ incp(dest);
__ decl(count);
__ j(not_zero, &loop);
__ bind(&done);
}
-void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the string table.
- Label not_array_index;
- __ leal(scratch, Operand(c1, -'0'));
- __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
- __ j(above, &not_array_index, Label::kNear);
- __ leal(scratch, Operand(c2, -'0'));
- __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
- __ j(below_equal, not_found);
-
- __ bind(&not_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- GenerateHashInit(masm, hash, c1, scratch);
- GenerateHashAddCharacter(masm, hash, c2, scratch);
- GenerateHashGetHash(masm, hash, scratch);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ shl(c2, Immediate(kBitsPerByte));
- __ orl(chars, c2);
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load the string table.
- Register string_table = c2;
- __ LoadRoot(string_table, Heap::kStringTableRootIndex);
-
- // Calculate capacity mask from the string table capacity.
- Register mask = scratch2;
- __ SmiToInteger32(mask,
- FieldOperand(string_table, StringTable::kCapacityOffset));
- __ decl(mask);
-
- Register map = scratch4;
-
- // Registers
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string (32-bit int)
- // string_table: string table
- // mask: capacity mask (32-bit int)
- // map: -
- // scratch: -
-
- // Perform a number of probes in the string table.
- static const int kProbes = 4;
- Label found_in_string_table;
- Label next_probe[kProbes];
- Register candidate = scratch; // Scratch register contains candidate.
- for (int i = 0; i < kProbes; i++) {
- // Calculate entry in string table.
- __ movl(scratch, hash);
- if (i > 0) {
- __ addl(scratch, Immediate(StringTable::GetProbeOffset(i)));
- }
- __ andl(scratch, mask);
-
- // Load the entry from the string table.
- STATIC_ASSERT(StringTable::kEntrySize == 1);
- __ movq(candidate,
- FieldOperand(string_table,
- scratch,
- times_pointer_size,
- StringTable::kElementsStartOffset));
-
- // If entry is undefined no string with this hash can be found.
- Label is_string;
- __ CmpObjectType(candidate, ODDBALL_TYPE, map);
- __ j(not_equal, &is_string, Label::kNear);
-
- __ CompareRoot(candidate, Heap::kUndefinedValueRootIndex);
- __ j(equal, not_found);
- // Must be the hole (deleted entry).
- if (FLAG_debug_code) {
- __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ cmpq(kScratchRegister, candidate);
- __ Assert(equal, kOddballInStringTableIsNotUndefinedOrTheHole);
- }
- __ jmp(&next_probe[i]);
-
- __ bind(&is_string);
-
- // If length is not 2 the string is not a candidate.
- __ SmiCompare(FieldOperand(candidate, String::kLengthOffset),
- Smi::FromInt(2));
- __ j(not_equal, &next_probe[i]);
-
- // We use kScratchRegister as a temporary register in assumption that
- // JumpIfInstanceTypeIsNotSequentialAscii does not use it implicitly
- Register temp = kScratchRegister;
-
- // Check that the candidate is a non-external ASCII string.
- __ movzxbl(temp, FieldOperand(map, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(
- temp, temp, &next_probe[i]);
-
- // Check if the two characters match.
- __ movl(temp, FieldOperand(candidate, SeqOneByteString::kHeaderSize));
- __ andl(temp, Immediate(0x0000ffff));
- __ cmpl(chars, temp);
- __ j(equal, &found_in_string_table);
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = candidate;
- __ bind(&found_in_string_table);
- if (!result.is(rax)) {
- __ movq(rax, result);
- }
-}
-
void StringHelper::GenerateHashInit(MacroAssembler* masm,
Register hash,
@@ -4144,7 +3253,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
ARGUMENTS_DONT_CONTAIN_RECEIVER);
// Make sure first argument is a string.
- __ movq(rax, args.GetArgumentOperand(STRING_ARGUMENT_INDEX));
+ __ movp(rax, args.GetArgumentOperand(STRING_ARGUMENT_INDEX));
STATIC_ASSERT(kSmiTag == 0);
__ testl(rax, Immediate(kSmiTagMask));
__ j(zero, &runtime);
@@ -4154,19 +3263,19 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rax: string
// rbx: instance type
// Calculate length of sub string using the smi values.
- __ movq(rcx, args.GetArgumentOperand(TO_ARGUMENT_INDEX));
- __ movq(rdx, args.GetArgumentOperand(FROM_ARGUMENT_INDEX));
+ __ movp(rcx, args.GetArgumentOperand(TO_ARGUMENT_INDEX));
+ __ movp(rdx, args.GetArgumentOperand(FROM_ARGUMENT_INDEX));
__ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
__ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen.
- __ cmpq(rcx, FieldOperand(rax, String::kLengthOffset));
+ __ cmpp(rcx, FieldOperand(rax, String::kLengthOffset));
Label not_original_string;
// Shorter than original string's length: an actual substring.
__ j(below, &not_original_string, Label::kNear);
// Longer than original string's length or negative: unsafe arguments.
__ j(above, &runtime);
// Return original string.
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
__ bind(&not_original_string);
@@ -4197,24 +3306,24 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset),
Heap::kempty_stringRootIndex);
__ j(not_equal, &runtime);
- __ movq(rdi, FieldOperand(rax, ConsString::kFirstOffset));
+ __ movp(rdi, FieldOperand(rax, ConsString::kFirstOffset));
// Update instance type.
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
__ jmp(&underlying_unpacked, Label::kNear);
__ bind(&sliced_string);
// Sliced string. Fetch parent and correct start index by offset.
- __ addq(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
- __ movq(rdi, FieldOperand(rax, SlicedString::kParentOffset));
+ __ addp(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
+ __ movp(rdi, FieldOperand(rax, SlicedString::kParentOffset));
// Update instance type.
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
__ jmp(&underlying_unpacked, Label::kNear);
__ bind(&seq_or_external_string);
// Sequential or external string. Just move string to the correct register.
- __ movq(rdi, rax);
+ __ movp(rdi, rax);
__ bind(&underlying_unpacked);
@@ -4226,7 +3335,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rcx: length
// If coming from the make_two_character_string path, the string
// is too short to be sliced anyways.
- __ cmpq(rcx, Immediate(SlicedString::kMinLength));
+ __ cmpp(rcx, Immediate(SlicedString::kMinLength));
// Short slice. Copy instead of slicing.
__ j(less, &copy_routine);
// Allocate new sliced string. At this point we do not reload the instance
@@ -4245,11 +3354,11 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
__ bind(&set_slice_header);
__ Integer32ToSmi(rcx, rcx);
- __ movq(FieldOperand(rax, SlicedString::kLengthOffset), rcx);
- __ movq(FieldOperand(rax, SlicedString::kHashFieldOffset),
+ __ movp(FieldOperand(rax, SlicedString::kLengthOffset), rcx);
+ __ movp(FieldOperand(rax, SlicedString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
- __ movq(FieldOperand(rax, SlicedString::kParentOffset), rdi);
- __ movq(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
+ __ movp(FieldOperand(rax, SlicedString::kParentOffset), rdi);
+ __ movp(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
@@ -4270,13 +3379,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Handle external string.
// Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
+ STATIC_ASSERT(kShortExternalStringTag != 0);
__ testb(rbx, Immediate(kShortExternalStringMask));
__ j(not_zero, &runtime);
- __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
+ __ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ subp(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
__ bind(&sequential_string);
STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
@@ -4288,22 +3397,20 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rax: result string
// rcx: result string length
- __ movq(r14, rsi); // esi used by following code.
{ // Locate character of sub string start.
SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
- __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
+ __ leap(r14, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
SeqOneByteString::kHeaderSize - kHeapObjectTag));
}
// Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqOneByteString::kHeaderSize));
+ __ leap(rdi, FieldOperand(rax, SeqOneByteString::kHeaderSize));
// rax: result string
// rcx: result length
- // rdi: first character of result
+ // r14: first character of result
// rsi: character of sub string start
- // r14: original value of rsi
- StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
- __ movq(rsi, r14); // Restore rsi.
+ StringHelper::GenerateCopyCharacters(
+ masm, rdi, r14, rcx, String::ONE_BYTE_ENCODING);
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
@@ -4313,28 +3420,26 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rax: result string
// rcx: result string length
- __ movq(r14, rsi); // esi used by following code.
{ // Locate character of sub string start.
SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
- __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
+ __ leap(r14, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
SeqOneByteString::kHeaderSize - kHeapObjectTag));
}
// Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
+ __ leap(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
// rax: result string
// rcx: result length
// rdi: first character of result
- // rsi: character of sub string start
- // r14: original value of rsi
- StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
- __ movq(rsi, r14); // Restore esi.
+ // r14: character of sub string start
+ StringHelper::GenerateCopyCharacters(
+ masm, rdi, r14, rcx, String::TWO_BYTE_ENCODING);
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
__ bind(&single_char);
// rax: string
@@ -4358,7 +3463,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
// Compare lengths.
Label check_zero_length;
- __ movq(length, FieldOperand(left, String::kLengthOffset));
+ __ movp(length, FieldOperand(left, String::kLengthOffset));
__ SmiCompare(length, FieldOperand(right, String::kLengthOffset));
__ j(equal, &check_zero_length, Label::kNear);
__ Move(rax, Smi::FromInt(NOT_EQUAL));
@@ -4402,8 +3507,8 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
// Find minimum length and length difference.
- __ movq(scratch1, FieldOperand(left, String::kLengthOffset));
- __ movq(scratch4, scratch1);
+ __ movp(scratch1, FieldOperand(left, String::kLengthOffset));
+ __ movp(scratch4, scratch1);
__ SmiSub(scratch4,
scratch4,
FieldOperand(right, String::kLengthOffset));
@@ -4427,7 +3532,10 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// Compare loop.
Label result_not_equal;
GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
- &result_not_equal, Label::kNear);
+ &result_not_equal,
+ // In debug-code mode, SmiTest below might push
+ // the target label outside the near range.
+ Label::kFar);
// Completed loop without finding different characters.
// Compare lengths (precomputed).
@@ -4473,11 +3581,11 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
// start. This means that loop ends when index reaches zero, which
// doesn't need an additional compare.
__ SmiToInteger32(length, length);
- __ lea(left,
+ __ leap(left,
FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
- __ lea(right,
+ __ leap(right,
FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
- __ neg(length);
+ __ negq(length);
Register index = length; // index = -length;
// Compare loop.
@@ -4500,15 +3608,15 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// rsp[16] : left string
StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rdx, args.GetArgumentOperand(0)); // left
- __ movq(rax, args.GetArgumentOperand(1)); // right
+ __ movp(rdx, args.GetArgumentOperand(0)); // left
+ __ movp(rax, args.GetArgumentOperand(1)); // right
// Check for identity.
Label not_same;
- __ cmpq(rdx, rax);
+ __ cmpp(rdx, rax);
__ j(not_equal, &not_same, Label::kNear);
__ Move(rax, Smi::FromInt(EQUAL));
- Counters* counters = masm->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->string_compare_native(), 1);
__ ret(2 * kPointerSize);
@@ -4521,14 +3629,42 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(counters->string_compare_native(), 1);
// Drop arguments from the stack
__ PopReturnAddressTo(rcx);
- __ addq(rsp, Immediate(2 * kPointerSize));
+ __ addp(rsp, Immediate(2 * kPointerSize));
__ PushReturnAddressFrom(rcx);
GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
+}
+
+
+void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rdx : left
+ // -- rax : right
+ // -- rsp[0] : return address
+ // -----------------------------------
+
+ // Load rcx with the allocation site. We stick an undefined dummy value here
+ // and replace it with the real allocation site later when we instantiate this
+ // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
+ __ Move(rcx, handle(isolate()->heap()->undefined_value()));
+
+ // Make sure that we actually patched the allocation site.
+ if (FLAG_debug_code) {
+ __ testb(rcx, Immediate(kSmiTagMask));
+ __ Assert(not_equal, kExpectedAllocationSite);
+ __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
+ isolate()->factory()->allocation_site_map());
+ __ Assert(equal, kExpectedAllocationSite);
+ }
+
+ // Tail call into the stub that handles binary operations with allocation
+ // sites.
+ BinaryOpWithAllocationSiteStub stub(isolate(), state_);
+ __ TailCallStub(&stub);
}
@@ -4539,15 +3675,15 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
if (GetCondition() == equal) {
// For equality we do not care about the sign of the result.
- __ subq(rax, rdx);
+ __ subp(rax, rdx);
} else {
Label done;
- __ subq(rdx, rax);
+ __ subp(rdx, rax);
__ j(no_overflow, &done, Label::kNear);
// Correct sign of result in case of overflow.
- __ not_(rdx);
+ __ notp(rdx);
__ bind(&done);
- __ movq(rax, rdx);
+ __ movp(rax, rdx);
}
__ ret(0);
@@ -4573,7 +3709,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
// Load left and right operand.
Label done, left, left_smi, right_smi;
__ JumpIfSmi(rax, &right_smi, Label::kNear);
- __ CompareMap(rax, masm->isolate()->factory()->heap_number_map());
+ __ CompareMap(rax, isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined1, Label::kNear);
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ jmp(&left, Label::kNear);
@@ -4583,7 +3719,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&left);
__ JumpIfSmi(rdx, &left_smi, Label::kNear);
- __ CompareMap(rdx, masm->isolate()->factory()->heap_number_map());
+ __ CompareMap(rdx, isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined2, Label::kNear);
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
__ jmp(&done);
@@ -4603,18 +3739,18 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ movl(rax, Immediate(0));
__ movl(rcx, Immediate(0));
__ setcc(above, rax); // Add one to zero if carry clear and not equal.
- __ sbbq(rax, rcx); // Subtract one if below (aka. carry set).
+ __ sbbp(rax, rcx); // Subtract one if below (aka. carry set).
__ ret(0);
__ bind(&unordered);
__ bind(&generic_stub);
- ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
CompareIC::GENERIC);
- __ jmp(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
- __ Cmp(rax, masm->isolate()->factory()->undefined_value());
+ __ Cmp(rax, isolate()->factory()->undefined_value());
__ j(not_equal, &miss);
__ JumpIfSmi(rdx, &unordered);
__ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
@@ -4624,7 +3760,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&maybe_undefined2);
if (Token::IsOrderedRelationalCompareOp(op_)) {
- __ Cmp(rdx, masm->isolate()->factory()->undefined_value());
+ __ Cmp(rdx, isolate()->factory()->undefined_value());
__ j(equal, &unordered);
}
@@ -4649,18 +3785,18 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
__ j(cond, &miss, Label::kNear);
// Check that both operands are internalized strings.
- __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
- __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+ __ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+ __ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+ __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
- __ or_(tmp1, tmp2);
+ __ orp(tmp1, tmp2);
__ testb(tmp1, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
__ j(not_zero, &miss, Label::kNear);
// Internalized strings are compared by identity.
Label done;
- __ cmpq(left, right);
+ __ cmpp(left, right);
// Make sure rax is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(rax));
@@ -4693,17 +3829,17 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
// Check that both operands are unique names. This leaves the instance
// types loaded in tmp1 and tmp2.
- __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
- __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+ __ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+ __ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+ __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueName(tmp1, &miss, Label::kNear);
__ JumpIfNotUniqueName(tmp2, &miss, Label::kNear);
// Unique names are compared by identity.
Label done;
- __ cmpq(left, right);
+ __ cmpp(left, right);
// Make sure rax is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(rax));
@@ -4738,19 +3874,19 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// Check that both operands are strings. This leaves the instance
// types loaded in tmp1 and tmp2.
- __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
- __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
- __ movq(tmp3, tmp1);
+ __ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+ __ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+ __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+ __ movp(tmp3, tmp1);
STATIC_ASSERT(kNotStringTag != 0);
- __ or_(tmp3, tmp2);
+ __ orp(tmp3, tmp2);
__ testb(tmp3, Immediate(kIsNotStringMask));
__ j(not_zero, &miss);
// Fast check for identical strings.
Label not_same;
- __ cmpq(left, right);
+ __ cmpp(left, right);
__ j(not_equal, &not_same, Label::kNear);
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
@@ -4766,7 +3902,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
Label do_compare;
STATIC_ASSERT(kInternalizedTag == 0);
- __ or_(tmp1, tmp2);
+ __ orp(tmp1, tmp2);
__ testb(tmp1, Immediate(kIsNotInternalizedMask));
__ j(not_zero, &do_compare, Label::kNear);
// Make sure rax is non-zero. At this point input operands are
@@ -4792,13 +3928,13 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// Handle more complex cases in runtime.
__ bind(&runtime);
__ PopReturnAddressTo(tmp1);
- __ push(left);
- __ push(right);
+ __ Push(left);
+ __ Push(right);
__ PushReturnAddressFrom(tmp1);
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
__ bind(&miss);
@@ -4818,7 +3954,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
__ j(not_equal, &miss, Label::kNear);
ASSERT(GetCondition() == equal);
- __ subq(rax, rdx);
+ __ subp(rax, rdx);
__ ret(0);
__ bind(&miss);
@@ -4831,14 +3967,14 @@ void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
Condition either_smi = masm->CheckEitherSmi(rdx, rax);
__ j(either_smi, &miss, Label::kNear);
- __ movq(rcx, FieldOperand(rax, HeapObject::kMapOffset));
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(rcx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
__ Cmp(rcx, known_map_);
__ j(not_equal, &miss, Label::kNear);
__ Cmp(rbx, known_map_);
__ j(not_equal, &miss, Label::kNear);
- __ subq(rax, rdx);
+ __ subp(rax, rdx);
__ ret(0);
__ bind(&miss);
@@ -4850,20 +3986,20 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+ ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rdx);
- __ push(rax);
- __ push(rdx);
- __ push(rax);
+ __ Push(rdx);
+ __ Push(rax);
+ __ Push(rdx);
+ __ Push(rax);
__ Push(Smi::FromInt(op_));
__ CallExternalReference(miss, 3);
// Compute the entry point of the rewritten stub.
- __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
- __ pop(rax);
- __ pop(rdx);
+ __ leap(rdi, FieldOperand(rax, Code::kHeaderSize));
+ __ Pop(rax);
+ __ Pop(rdx);
}
// Do a tail call to the rewritten stub.
@@ -4890,17 +4026,17 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
// Capacity is smi 2^n.
__ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
__ decl(index);
- __ and_(index,
+ __ andp(index,
Immediate(name->Hash() + NameDictionary::GetProbeOffset(i)));
// Scale the index by multiplying by the entry size.
ASSERT(NameDictionary::kEntrySize == 3);
- __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
+ __ leap(index, Operand(index, index, times_2, 0)); // index *= 3.
Register entity_name = r0;
// Having undefined at this place means the name is not contained.
ASSERT_EQ(kSmiTagSize, 1);
- __ movq(entity_name, Operand(properties,
+ __ movp(entity_name, Operand(properties,
index,
times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
@@ -4917,17 +4053,18 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ j(equal, &good, Label::kNear);
// Check if the entry name is not a unique name.
- __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
+ __ movp(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
__ JumpIfNotUniqueName(FieldOperand(entity_name, Map::kInstanceTypeOffset),
miss);
__ bind(&good);
}
- NameDictionaryLookupStub stub(properties, r0, r0, NEGATIVE_LOOKUP);
+ NameDictionaryLookupStub stub(masm->isolate(), properties, r0, r0,
+ NEGATIVE_LOOKUP);
__ Push(Handle<Object>(name));
- __ push(Immediate(name->Hash()));
+ __ Push(Immediate(name->Hash()));
__ CallStub(&stub);
- __ testq(r0, r0);
+ __ testp(r0, r0);
__ j(not_zero, miss);
__ jmp(done);
}
@@ -4961,26 +4098,27 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
if (i > 0) {
__ addl(r1, Immediate(NameDictionary::GetProbeOffset(i)));
}
- __ and_(r1, r0);
+ __ andp(r1, r0);
// Scale the index by multiplying by the entry size.
ASSERT(NameDictionary::kEntrySize == 3);
- __ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
+ __ leap(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
// Check if the key is identical to the name.
- __ cmpq(name, Operand(elements, r1, times_pointer_size,
+ __ cmpp(name, Operand(elements, r1, times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
__ j(equal, done);
}
- NameDictionaryLookupStub stub(elements, r0, r1, POSITIVE_LOOKUP);
- __ push(name);
+ NameDictionaryLookupStub stub(masm->isolate(), elements, r0, r1,
+ POSITIVE_LOOKUP);
+ __ Push(name);
__ movl(r0, FieldOperand(name, Name::kHashFieldOffset));
__ shrl(r0, Immediate(Name::kHashShift));
- __ push(r0);
+ __ Push(r0);
__ CallStub(&stub);
- __ testq(r0, r0);
+ __ testp(r0, r0);
__ j(zero, miss);
__ jmp(done);
}
@@ -5007,7 +4145,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ SmiToInteger32(scratch, FieldOperand(dictionary_, kCapacityOffset));
__ decl(scratch);
- __ push(scratch);
+ __ Push(scratch);
// If names of slots in range from 1 to kProbes - 1 for the hash value are
// not equal to the name and kProbes-th slot is not used (its name is the
@@ -5018,27 +4156,27 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
kPointerSize);
for (int i = kInlinedProbes; i < kTotalProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ movq(scratch, args.GetArgumentOperand(1));
+ __ movp(scratch, args.GetArgumentOperand(1));
if (i > 0) {
__ addl(scratch, Immediate(NameDictionary::GetProbeOffset(i)));
}
- __ and_(scratch, Operand(rsp, 0));
+ __ andp(scratch, Operand(rsp, 0));
// Scale the index by multiplying by the entry size.
ASSERT(NameDictionary::kEntrySize == 3);
- __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
+ __ leap(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
// Having undefined at this place means the name is not contained.
- __ movq(scratch, Operand(dictionary_,
+ __ movp(scratch, Operand(dictionary_,
index_,
times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
- __ Cmp(scratch, masm->isolate()->factory()->undefined_value());
+ __ Cmp(scratch, isolate()->factory()->undefined_value());
__ j(equal, &not_in_dictionary);
// Stop if found the property.
- __ cmpq(scratch, args.GetArgumentOperand(0));
+ __ cmpp(scratch, args.GetArgumentOperand(0));
__ j(equal, &in_dictionary);
if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
@@ -5047,7 +4185,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// key we are looking for.
// Check if the entry name is not a unique name.
- __ movq(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
+ __ movp(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
__ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset),
&maybe_in_dictionary);
}
@@ -5058,18 +4196,18 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// treated as a lookup success. For positive lookup probing failure
// should be treated as lookup failure.
if (mode_ == POSITIVE_LOOKUP) {
- __ movq(scratch, Immediate(0));
+ __ movp(scratch, Immediate(0));
__ Drop(1);
__ ret(2 * kPointerSize);
}
__ bind(&in_dictionary);
- __ movq(scratch, Immediate(1));
+ __ movp(scratch, Immediate(1));
__ Drop(1);
__ ret(2 * kPointerSize);
__ bind(&not_in_dictionary);
- __ movq(scratch, Immediate(0));
+ __ movp(scratch, Immediate(0));
__ Drop(1);
__ ret(2 * kPointerSize);
}
@@ -5077,15 +4215,10 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
- StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode(isolate);
- StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode(isolate);
-}
-
-
-bool CodeStub::CanUseFPRegisters() {
- return true; // Always have SSE2 on x64.
+ StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
+ stub1.GetCode();
+ StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
+ stub2.GetCode();
}
@@ -5134,7 +4267,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
Label dont_need_remembered_set;
- __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
+ __ movp(regs_.scratch0(), Operand(regs_.address(), 0));
__ JumpIfNotInNewSpace(regs_.scratch0(),
regs_.scratch0(),
&dont_need_remembered_set);
@@ -5149,7 +4282,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
// remembered set.
CheckNeedsToInformIncrementalMarker(
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ RememberedSetHelper(object_,
address_,
@@ -5162,13 +4295,13 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
CheckNeedsToInformIncrementalMarker(
masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ ret(0);
}
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
Register address =
arg_reg_1.is(regs_.address()) ? kScratchRegister : regs_.address();
@@ -5179,23 +4312,14 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
// TODO(gc) Can we just set address arg2 in the beginning?
__ Move(arg_reg_2, address);
__ LoadAddress(arg_reg_3,
- ExternalReference::isolate_address(masm->isolate()));
+ ExternalReference::isolate_address(isolate()));
int argument_count = 3;
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(argument_count);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(isolate()),
+ argument_count);
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
}
@@ -5208,13 +4332,13 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Label need_incremental;
Label need_incremental_pop_object;
- __ movq(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
- __ and_(regs_.scratch0(), regs_.object());
- __ movq(regs_.scratch1(),
+ __ movp(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
+ __ andp(regs_.scratch0(), regs_.object());
+ __ movp(regs_.scratch1(),
Operand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset));
- __ subq(regs_.scratch1(), Immediate(1));
- __ movq(Operand(regs_.scratch0(),
+ __ subp(regs_.scratch1(), Immediate(1));
+ __ movp(Operand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset),
regs_.scratch1());
__ j(negative, &need_incremental);
@@ -5241,7 +4365,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
__ bind(&on_black);
// Get the value from the slot.
- __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
+ __ movp(regs_.scratch0(), Operand(regs_.address(), 0));
if (mode == INCREMENTAL_COMPACTION) {
Label ensure_not_white;
@@ -5264,13 +4388,13 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// We need an extra register for this, so we push the object register
// temporarily.
- __ push(regs_.object());
+ __ Push(regs_.object());
__ EnsureNotWhite(regs_.scratch0(), // The value.
regs_.scratch1(), // Scratch.
regs_.object(), // Scratch.
&need_incremental_pop_object,
Label::kNear);
- __ pop(regs_.object());
+ __ Pop(regs_.object());
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
@@ -5284,7 +4408,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
__ bind(&need_incremental_pop_object);
- __ pop(regs_.object());
+ __ Pop(regs_.object());
__ bind(&need_incremental);
@@ -5310,9 +4434,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// Get array literal index, array literal and its map.
StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rdx, args.GetArgumentOperand(1));
- __ movq(rbx, args.GetArgumentOperand(0));
- __ movq(rdi, FieldOperand(rbx, JSObject::kMapOffset));
+ __ movp(rdx, args.GetArgumentOperand(1));
+ __ movp(rbx, args.GetArgumentOperand(0));
+ __ movp(rdi, FieldOperand(rbx, JSObject::kMapOffset));
__ CheckFastElements(rdi, &double_elements);
@@ -5325,22 +4449,22 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ bind(&slow_elements);
__ PopReturnAddressTo(rdi);
- __ push(rbx);
- __ push(rcx);
- __ push(rax);
- __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
- __ push(rdx);
+ __ Push(rbx);
+ __ Push(rcx);
+ __ Push(rax);
+ __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ Push(rdx);
__ PushReturnAddressFrom(rdi);
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
// Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ SmiToInteger32(kScratchRegister, rcx);
- __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
- __ lea(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size,
+ __ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+ __ leap(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size,
FixedArrayBase::kHeaderSize));
- __ movq(Operand(rcx, 0), rax);
+ __ movp(Operand(rcx, 0), rax);
// Update the write barrier for the array store.
__ RecordWrite(rbx, rcx, rax,
kDontSaveFPRegs,
@@ -5352,15 +4476,15 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// FAST_*_ELEMENTS, and value is Smi.
__ bind(&smi_element);
__ SmiToInteger32(kScratchRegister, rcx);
- __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
- __ movq(FieldOperand(rbx, kScratchRegister, times_pointer_size,
+ __ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+ __ movp(FieldOperand(rbx, kScratchRegister, times_pointer_size,
FixedArrayBase::kHeaderSize), rax);
__ ret(0);
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
- __ movq(r9, FieldOperand(rbx, JSObject::kElementsOffset));
+ __ movp(r9, FieldOperand(rbx, JSObject::kElementsOffset));
__ SmiToInteger32(r11, rcx);
__ StoreNumberToDoubleElements(rax,
r9,
@@ -5372,42 +4496,24 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
- __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ CEntryStub ces(isolate(), 1, kSaveFPRegs);
+ __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
- __ movq(rbx, MemOperand(rbp, parameter_count_offset));
+ __ movp(rbx, MemOperand(rbp, parameter_count_offset));
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
__ PopReturnAddressTo(rcx);
int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE
? kPointerSize
: 0;
- __ lea(rsp, MemOperand(rsp, rbx, times_pointer_size, additional_offset));
+ __ leap(rsp, MemOperand(rsp, rbx, times_pointer_size, additional_offset));
__ jmp(rcx); // Return to IC Miss stub, continuation still on stack.
}
-void StubFailureTailCallTrampolineStub::Generate(MacroAssembler* masm) {
- CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
- __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
- __ movq(rdi, rax);
- int parameter_count_offset =
- StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
- __ movq(rax, MemOperand(rbp, parameter_count_offset));
- // The parameter count above includes the receiver for the arguments passed to
- // the deoptimization handler. Subtract the receiver for the parameter count
- // for the call.
- __ subl(rax, Immediate(1));
- masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
- ParameterCount argument_count(rax);
- __ InvokeFunction(
- rdi, argument_count, JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
- ProfileEntryHookStub stub;
+ ProfileEntryHookStub stub(masm->isolate());
masm->CallStub(&stub);
}
}
@@ -5417,22 +4523,23 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// This stub can be called from essentially anywhere, so it needs to save
// all volatile and callee-save registers.
const size_t kNumSavedRegisters = 2;
- __ push(arg_reg_1);
- __ push(arg_reg_2);
+ __ pushq(arg_reg_1);
+ __ pushq(arg_reg_2);
// Calculate the original stack pointer and store it in the second arg.
- __ lea(arg_reg_2, Operand(rsp, (kNumSavedRegisters + 1) * kPointerSize));
+ __ leap(arg_reg_2,
+ Operand(rsp, kNumSavedRegisters * kRegisterSize + kPCOnStackSize));
// Calculate the function address to the first arg.
- __ movq(arg_reg_1, Operand(rsp, kNumSavedRegisters * kPointerSize));
- __ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
+ __ movp(arg_reg_1, Operand(rsp, kNumSavedRegisters * kRegisterSize));
+ __ subp(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
// Save the remainder of the volatile registers.
masm->PushCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
// Call the entry hook function.
- __ movq(rax, FUNCTION_ADDR(masm->isolate()->function_entry_hook()),
- RelocInfo::NONE64);
+ __ Move(rax, FUNCTION_ADDR(isolate()->function_entry_hook()),
+ Assembler::RelocInfoNone());
AllowExternalCallThatCantCauseGC scope(masm);
@@ -5442,8 +4549,8 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// Restore volatile regs.
masm->PopCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
- __ pop(arg_reg_2);
- __ pop(arg_reg_1);
+ __ popq(arg_reg_2);
+ __ popq(arg_reg_1);
__ Ret();
}
@@ -5453,9 +4560,7 @@ template<class T>
static void CreateArrayDispatch(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
if (mode == DISABLE_ALLOCATION_SITES) {
- T stub(GetInitialFastElementsKind(),
- CONTEXT_CHECK_REQUIRED,
- mode);
+ T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
int last_index = GetSequenceIndexFromFastElementsKind(
@@ -5465,7 +4570,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
__ cmpl(rdx, Immediate(kind));
__ j(not_equal, &next);
- T stub(kind);
+ T stub(masm->isolate(), kind);
__ TailCallStub(&stub);
__ bind(&next);
}
@@ -5480,7 +4585,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
- // rbx - type info cell (if mode != DISABLE_ALLOCATION_SITES)
+ // rbx - allocation site (if mode != DISABLE_ALLOCATION_SITES)
// rdx - kind (if mode != DISABLE_ALLOCATION_SITES)
// rax - number of arguments
// rdi - constructor?
@@ -5506,41 +4611,41 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// look at the first argument
StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rcx, args.GetArgumentOperand(0));
- __ testq(rcx, rcx);
+ __ movp(rcx, args.GetArgumentOperand(0));
+ __ testp(rcx, rcx);
__ j(zero, &normal_sequence);
if (mode == DISABLE_ALLOCATION_SITES) {
ElementsKind initial = GetInitialFastElementsKind();
ElementsKind holey_initial = GetHoleyElementsKind(initial);
- ArraySingleArgumentConstructorStub stub_holey(holey_initial,
- CONTEXT_CHECK_REQUIRED,
+ ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
+ holey_initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub_holey);
__ bind(&normal_sequence);
- ArraySingleArgumentConstructorStub stub(initial,
- CONTEXT_CHECK_REQUIRED,
+ ArraySingleArgumentConstructorStub stub(masm->isolate(),
+ initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
// We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the cell).
+ // Fix kind and retry (only if we have an allocation site in the slot).
__ incl(rdx);
- __ movq(rcx, FieldOperand(rbx, Cell::kValueOffset));
+
if (FLAG_debug_code) {
Handle<Map> allocation_site_map =
masm->isolate()->factory()->allocation_site_map();
- __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
- __ Assert(equal, kExpectedAllocationSiteInCell);
+ __ Cmp(FieldOperand(rbx, 0), allocation_site_map);
+ __ Assert(equal, kExpectedAllocationSite);
}
// Save the resulting elements kind in type info. We can't just store r3
// in the AllocationSite::transition_info field because elements kind is
// restricted to a portion of the field...upper bits need to be left alone.
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ SmiAddConstant(FieldOperand(rcx, AllocationSite::kTransitionInfoOffset),
+ __ SmiAddConstant(FieldOperand(rbx, AllocationSite::kTransitionInfoOffset),
Smi::FromInt(kFastElementsKindPackedToHoley));
__ bind(&normal_sequence);
@@ -5551,7 +4656,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
__ cmpl(rdx, Immediate(kind));
__ j(not_equal, &next);
- ArraySingleArgumentConstructorStub stub(kind);
+ ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
__ TailCallStub(&stub);
__ bind(&next);
}
@@ -5566,20 +4671,15 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
- ElementsKind initial_kind = GetInitialFastElementsKind();
- ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind);
-
int to_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- T stub(kind);
- stub.GetCode(isolate);
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
- (!FLAG_track_allocation_sites &&
- (kind == initial_kind || kind == initial_holey_kind))) {
- T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
- stub1.GetCode(isolate);
+ T stub(isolate, kind);
+ stub.GetCode();
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
+ stub1.GetCode();
}
}
}
@@ -5600,12 +4700,12 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
- InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
- stubh1.GetCode(isolate);
- InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
- stubh2.GetCode(isolate);
- InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
- stubh3.GetCode(isolate);
+ InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
+ stubh1.GetCode();
+ InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
+ stubh2.GetCode();
+ InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
+ stubh3.GetCode();
}
}
@@ -5615,7 +4715,7 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
AllocationSiteOverrideMode mode) {
if (argument_count_ == ANY) {
Label not_zero_case, not_one_case;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_zero, &not_zero_case);
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
@@ -5641,21 +4741,17 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
- // -- rbx : type info cell
+ // -- rbx : AllocationSite or undefined
// -- rdi : constructor
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -----------------------------------
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
-
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
@@ -5663,31 +4759,21 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ CmpObjectType(rcx, MAP_TYPE, rcx);
__ Check(equal, kUnexpectedInitialMapForArrayFunction);
- // We should either have undefined in rbx or a valid cell
- Label okay_here;
- Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
- __ Cmp(rbx, undefined_sentinel);
- __ j(equal, &okay_here);
- __ Cmp(FieldOperand(rbx, 0), cell_map);
- __ Assert(equal, kExpectedPropertyCellInRegisterRbx);
- __ bind(&okay_here);
+ // We should either have undefined in rbx or a valid AllocationSite
+ __ AssertUndefinedOrAllocationSite(rbx);
}
Label no_info;
- // If the type cell is undefined, or contains anything other than an
- // AllocationSite, call an array constructor that doesn't use AllocationSites.
- __ Cmp(rbx, undefined_sentinel);
+ // If the feedback vector is the undefined value call an array constructor
+ // that doesn't use AllocationSites.
+ __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(equal, &no_info);
- __ movq(rdx, FieldOperand(rbx, Cell::kValueOffset));
- __ Cmp(FieldOperand(rdx, 0),
- masm->isolate()->factory()->allocation_site_map());
- __ j(not_equal, &no_info);
// Only look at the lower 16 bits of the transition info.
- __ movq(rdx, FieldOperand(rdx, AllocationSite::kTransitionInfoOffset));
+ __ movp(rdx, FieldOperand(rbx, AllocationSite::kTransitionInfoOffset));
__ SmiToInteger32(rdx, rdx);
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ and_(rdx, Immediate(AllocationSite::ElementsKindBits::kMask));
+ __ andp(rdx, Immediate(AllocationSite::ElementsKindBits::kMask));
GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
__ bind(&no_info);
@@ -5700,9 +4786,9 @@ void InternalArrayConstructorStub::GenerateCase(
Label not_zero_case, not_one_case;
Label normal_sequence;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_zero, &not_zero_case);
- InternalArrayNoArgumentConstructorStub stub0(kind);
+ InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
__ TailCallStub(&stub0);
__ bind(&not_zero_case);
@@ -5713,21 +4799,21 @@ void InternalArrayConstructorStub::GenerateCase(
// We might need to create a holey array
// look at the first argument
StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rcx, args.GetArgumentOperand(0));
- __ testq(rcx, rcx);
+ __ movp(rcx, args.GetArgumentOperand(0));
+ __ testp(rcx, rcx);
__ j(zero, &normal_sequence);
InternalArraySingleArgumentConstructorStub
- stub1_holey(GetHoleyElementsKind(kind));
+ stub1_holey(isolate(), GetHoleyElementsKind(kind));
__ TailCallStub(&stub1_holey);
}
__ bind(&normal_sequence);
- InternalArraySingleArgumentConstructorStub stub1(kind);
+ InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
__ TailCallStub(&stub1);
__ bind(&not_one_case);
- InternalArrayNArgumentsConstructorStub stubN(kind);
+ InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
__ TailCallStub(&stubN);
}
@@ -5735,7 +4821,6 @@ void InternalArrayConstructorStub::GenerateCase(
void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
- // -- rbx : type info cell
// -- rdi : constructor
// -- rsp[0] : return address
// -- rsp[8] : last argument
@@ -5746,7 +4831,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
@@ -5756,14 +4841,13 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
// Figure out the right elements kind
- __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Load the map's "bit field 2" into |result|. We only need the first byte,
// but the following masking takes care of that anyway.
- __ movzxbq(rcx, FieldOperand(rcx, Map::kBitField2Offset));
+ __ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
- __ and_(rcx, Immediate(Map::kElementsKindMask));
- __ shr(rcx, Immediate(Map::kElementsKindShift));
+ __ DecodeField<Map::ElementsKindBits>(rcx);
if (FLAG_debug_code) {
Label done;
@@ -5785,6 +4869,185 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : callee
+ // -- rbx : call_data
+ // -- rcx : holder
+ // -- rdx : api_function_address
+ // -- rsi : context
+ // --
+ // -- rsp[0] : return address
+ // -- rsp[8] : last argument
+ // -- ...
+ // -- rsp[argc * 8] : first argument
+ // -- rsp[(argc + 1) * 8] : receiver
+ // -----------------------------------
+
+ Register callee = rax;
+ Register call_data = rbx;
+ Register holder = rcx;
+ Register api_function_address = rdx;
+ Register return_address = rdi;
+ Register context = rsi;
+
+ int argc = ArgumentBits::decode(bit_field_);
+ bool is_store = IsStoreBits::decode(bit_field_);
+ bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+ STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+ STATIC_ASSERT(FCA::kArgsLength == 7);
+
+ __ PopReturnAddressTo(return_address);
+
+ // context save
+ __ Push(context);
+ // load context from callee
+ __ movp(context, FieldOperand(callee, JSFunction::kContextOffset));
+
+ // callee
+ __ Push(callee);
+
+ // call data
+ __ Push(call_data);
+ Register scratch = call_data;
+ if (!call_data_undefined) {
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ }
+ // return value
+ __ Push(scratch);
+ // return value default
+ __ Push(scratch);
+ // isolate
+ __ Move(scratch,
+ ExternalReference::isolate_address(isolate()));
+ __ Push(scratch);
+ // holder
+ __ Push(holder);
+
+ __ movp(scratch, rsp);
+ // Push return address back on stack.
+ __ PushReturnAddressFrom(return_address);
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ const int kApiStackSpace = 4;
+
+ __ PrepareCallApiFunction(kApiStackSpace);
+
+ // FunctionCallbackInfo::implicit_args_.
+ __ movp(StackSpaceOperand(0), scratch);
+ __ addp(scratch, Immediate((argc + FCA::kArgsLength - 1) * kPointerSize));
+ __ movp(StackSpaceOperand(1), scratch); // FunctionCallbackInfo::values_.
+ __ Set(StackSpaceOperand(2), argc); // FunctionCallbackInfo::length_.
+ // FunctionCallbackInfo::is_construct_call_.
+ __ Set(StackSpaceOperand(3), 0);
+
+#if defined(__MINGW64__) || defined(_WIN64)
+ Register arguments_arg = rcx;
+ Register callback_arg = rdx;
+#else
+ Register arguments_arg = rdi;
+ Register callback_arg = rsi;
+#endif
+
+ // It's okay if api_function_address == callback_arg
+ // but not arguments_arg
+ ASSERT(!api_function_address.is(arguments_arg));
+
+ // v8::InvocationCallback's argument.
+ __ leap(arguments_arg, StackSpaceOperand(0));
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_function_callback(isolate());
+
+ // Accessor for FunctionCallbackInfo and first js arg.
+ StackArgumentsAccessor args_from_rbp(rbp, FCA::kArgsLength + 1,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ Operand context_restore_operand = args_from_rbp.GetArgumentOperand(
+ FCA::kArgsLength - FCA::kContextSaveIndex);
+ // Stores return the first js argument
+ Operand return_value_operand = args_from_rbp.GetArgumentOperand(
+ is_store ? 0 : FCA::kArgsLength - FCA::kReturnValueOffset);
+ __ CallApiFunctionAndReturn(
+ api_function_address,
+ thunk_ref,
+ callback_arg,
+ argc + FCA::kArgsLength + 1,
+ return_value_operand,
+ &context_restore_operand);
+}
+
+
+void CallApiGetterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : name
+ // -- rsp[16 - kArgsLength*8] : PropertyCallbackArguments object
+ // -- ...
+ // -- r8 : api_function_address
+ // -----------------------------------
+
+#if defined(__MINGW64__) || defined(_WIN64)
+ Register getter_arg = r8;
+ Register accessor_info_arg = rdx;
+ Register name_arg = rcx;
+#else
+ Register getter_arg = rdx;
+ Register accessor_info_arg = rsi;
+ Register name_arg = rdi;
+#endif
+ Register api_function_address = r8;
+ Register scratch = rax;
+
+ // v8::Arguments::values_ and handler for name.
+ const int kStackSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ // Allocate v8::AccessorInfo in non-GCed stack space.
+ const int kArgStackSpace = 1;
+
+ __ leap(name_arg, Operand(rsp, kPCOnStackSize));
+
+ __ PrepareCallApiFunction(kArgStackSpace);
+ __ leap(scratch, Operand(name_arg, 1 * kPointerSize));
+
+ // v8::PropertyAccessorInfo::args_.
+ __ movp(StackSpaceOperand(0), scratch);
+
+ // The context register (rsi) has been saved in PrepareCallApiFunction and
+ // could be used to pass arguments.
+ __ leap(accessor_info_arg, StackSpaceOperand(0));
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback(isolate());
+
+ // It's okay if api_function_address == getter_arg
+ // but not accessor_info_arg or name_arg
+ ASSERT(!api_function_address.is(accessor_info_arg) &&
+ !api_function_address.is(name_arg));
+
+ // The name handler is counted as an argument.
+ StackArgumentsAccessor args(rbp, PropertyCallbackArguments::kArgsLength);
+ Operand return_value_operand = args.GetArgumentOperand(
+ PropertyCallbackArguments::kArgsLength - 1 -
+ PropertyCallbackArguments::kReturnValueOffset);
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ getter_arg,
+ kStackSpace,
+ return_value_operand,
+ NULL);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/chromium/v8/src/x64/code-stubs-x64.h b/chromium/v8/src/x64/code-stubs-x64.h
index 7a3f6a68691..7f9420c3bc2 100644
--- a/chromium/v8/src/x64/code-stubs-x64.h
+++ b/chromium/v8/src/x64/code-stubs-x64.h
@@ -1,35 +1,11 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_CODE_STUBS_X64_H_
#define V8_X64_CODE_STUBS_X64_H_
-#include "ic-inl.h"
-#include "type-info.h"
+#include "src/ic-inl.h"
namespace v8 {
namespace internal {
@@ -37,35 +13,10 @@ namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public PlatformCodeStub {
- public:
- enum ArgumentType {
- TAGGED = 0,
- UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
- };
-
- explicit TranscendentalCacheStub(TranscendentalCache::Type type,
- ArgumentType argument_type)
- : type_(type), argument_type_(argument_type) {}
- void Generate(MacroAssembler* masm);
- static void GenerateOperation(MacroAssembler* masm,
- TranscendentalCache::Type type);
- private:
- TranscendentalCache::Type type_;
- ArgumentType argument_type_;
-
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_ | argument_type_; }
- Runtime::FunctionId RuntimeFunction();
-};
-
-
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
- explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
- : save_doubles_(save_fp) { }
+ StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
+ : PlatformCodeStub(isolate), save_doubles_(save_fp) { }
void Generate(MacroAssembler* masm);
@@ -82,38 +33,15 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
class StringHelper : public AllStatic {
public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersREP adds too much
- // overhead. Copying of overlapping regions is not supported.
+ // Generate code for copying characters using the rep movs instruction.
+ // Copies rcx characters from rsi to rdi. Copying of overlapping regions is
+ // not supported.
static void GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
- bool ascii);
+ String::Encoding encoding);
- // Generate code for copying characters using the rep movs instruction.
- // Copies rcx characters from rsi to rdi. Copying of overlapping regions is
- // not supported.
- static void GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest, // Must be rdi.
- Register src, // Must be rsi.
- Register count, // Must be rcx.
- bool ascii);
-
-
- // Probe the string table for a two character string. If the string is
- // not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the string table. If the
- // string is found the code falls through with the string in register rax.
- static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* not_found);
// Generate string hash.
static void GenerateHashInit(MacroAssembler* masm,
@@ -133,34 +61,9 @@ class StringHelper : public AllStatic {
};
-class StringAddStub: public PlatformCodeStub {
- public:
- explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
-
- private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return flags_; }
-
- void Generate(MacroAssembler* masm);
-
- void GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* slow);
-
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateRegisterArgsPop(MacroAssembler* masm, Register temp);
-
- const StringAddFlags flags_;
-};
-
-
class SubStringStub: public PlatformCodeStub {
public:
- SubStringStub() {}
+ explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
private:
Major MajorKey() { return SubString; }
@@ -172,7 +75,7 @@ class SubStringStub: public PlatformCodeStub {
class StringCompareStub: public PlatformCodeStub {
public:
- StringCompareStub() {}
+ explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
// Compares two flat ASCII strings and returns result in rax.
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
@@ -211,11 +114,16 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
- NameDictionaryLookupStub(Register dictionary,
+ NameDictionaryLookupStub(Isolate* isolate,
+ Register dictionary,
Register result,
Register index,
LookupMode mode)
- : dictionary_(dictionary), result_(result), index_(index), mode_(mode) { }
+ : PlatformCodeStub(isolate),
+ dictionary_(dictionary),
+ result_(result),
+ index_(index),
+ mode_(mode) { }
void Generate(MacroAssembler* masm);
@@ -271,12 +179,14 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
class RecordWriteStub: public PlatformCodeStub {
public:
- RecordWriteStub(Register object,
+ RecordWriteStub(Isolate* isolate,
+ Register object,
Register value,
Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
- : object_(object),
+ : PlatformCodeStub(isolate),
+ object_(object),
value_(value),
address_(address),
remembered_set_action_(remembered_set_action),
@@ -379,20 +289,20 @@ class RecordWriteStub: public PlatformCodeStub {
// We don't have to save scratch0_orig_ because it was given to us as
// a scratch register. But if we had to switch to a different reg then
// we should save the new scratch0_.
- if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
+ if (!scratch0_.is(scratch0_orig_)) masm->Push(scratch0_);
if (!rcx.is(scratch0_orig_) &&
!rcx.is(object_orig_) &&
!rcx.is(address_orig_)) {
- masm->push(rcx);
+ masm->Push(rcx);
}
- masm->push(scratch1_);
+ masm->Push(scratch1_);
if (!address_.is(address_orig_)) {
- masm->push(address_);
- masm->movq(address_, address_orig_);
+ masm->Push(address_);
+ masm->movp(address_, address_orig_);
}
if (!object_.is(object_orig_)) {
- masm->push(object_);
- masm->movq(object_, object_orig_);
+ masm->Push(object_);
+ masm->movp(object_, object_orig_);
}
}
@@ -401,20 +311,20 @@ class RecordWriteStub: public PlatformCodeStub {
// them back. Only in one case is the orig_ reg different from the plain
// one, since only one of them can alias with rcx.
if (!object_.is(object_orig_)) {
- masm->movq(object_orig_, object_);
- masm->pop(object_);
+ masm->movp(object_orig_, object_);
+ masm->Pop(object_);
}
if (!address_.is(address_orig_)) {
- masm->movq(address_orig_, address_);
- masm->pop(address_);
+ masm->movp(address_orig_, address_);
+ masm->Pop(address_);
}
- masm->pop(scratch1_);
+ masm->Pop(scratch1_);
if (!rcx.is(scratch0_orig_) &&
!rcx.is(object_orig_) &&
!rcx.is(address_orig_)) {
- masm->pop(rcx);
+ masm->Pop(rcx);
}
- if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
+ if (!scratch0_.is(scratch0_orig_)) masm->Pop(scratch0_);
}
// If we have to call into C then we need to save and restore all caller-
@@ -475,7 +385,7 @@ class RecordWriteStub: public PlatformCodeStub {
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm);
Major MajorKey() { return RecordWrite; }
diff --git a/chromium/v8/src/x64/codegen-x64.cc b/chromium/v8/src/x64/codegen-x64.cc
index afe0e3b7f52..0f939d98dff 100644
--- a/chromium/v8/src/x64/codegen-x64.cc
+++ b/chromium/v8/src/x64/codegen-x64.cc
@@ -1,36 +1,13 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "codegen.h"
-#include "macro-assembler.h"
+#include "src/codegen.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
@@ -55,68 +32,24 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ masm.
-UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
- size_t actual_size;
- // Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
- &actual_size,
- true));
- if (buffer == NULL) {
- // Fallback to library function if function cannot be created.
- switch (type) {
- case TranscendentalCache::SIN: return &sin;
- case TranscendentalCache::COS: return &cos;
- case TranscendentalCache::TAN: return &tan;
- case TranscendentalCache::LOG: return &log;
- default: UNIMPLEMENTED();
- }
- }
-
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
- // xmm0: raw double input.
- // Move double input into registers.
- __ push(rbx);
- __ push(rdi);
- __ movq(rbx, xmm0);
- __ push(rbx);
- __ fld_d(Operand(rsp, 0));
- TranscendentalCacheStub::GenerateOperation(&masm, type);
- // The return value is expected to be in xmm0.
- __ fstp_d(Operand(rsp, 0));
- __ pop(rbx);
- __ movq(xmm0, rbx);
- __ pop(rdi);
- __ pop(rbx);
- __ Ret();
-
- CodeDesc desc;
- masm.GetCode(&desc);
- ASSERT(!RelocInfo::RequiresRelocation(desc));
-
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-}
-
-
UnaryMathFunction CreateExpFunction() {
- if (!FLAG_fast_math) return &exp;
+ if (!FLAG_fast_math) return &std::exp;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &exp;
+ if (buffer == NULL) return &std::exp;
ExternalReference::InitializeMathExpData();
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
// xmm0: raw double input.
XMMRegister input = xmm0;
XMMRegister result = xmm1;
- __ push(rax);
- __ push(rbx);
+ __ pushq(rax);
+ __ pushq(rbx);
MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx);
- __ pop(rbx);
- __ pop(rax);
+ __ popq(rbx);
+ __ popq(rax);
__ movsd(xmm0, result);
__ Ret();
@@ -136,7 +69,7 @@ UnaryMathFunction CreateSqrtFunction() {
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
&actual_size,
true));
- if (buffer == NULL) return &sqrt;
+ if (buffer == NULL) return &std::sqrt;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
// xmm0: raw double input.
@@ -175,10 +108,10 @@ ModuloFunction CreateModuloFunction() {
// Compute x mod y.
// Load y and x (use argument backing store as temporary storage).
- __ movsd(Operand(rsp, kPointerSize * 2), xmm1);
- __ movsd(Operand(rsp, kPointerSize), xmm0);
- __ fld_d(Operand(rsp, kPointerSize * 2));
- __ fld_d(Operand(rsp, kPointerSize));
+ __ movsd(Operand(rsp, kRegisterSize * 2), xmm1);
+ __ movsd(Operand(rsp, kRegisterSize), xmm0);
+ __ fld_d(Operand(rsp, kRegisterSize * 2));
+ __ fld_d(Operand(rsp, kRegisterSize));
// Clear exception flags before operation.
{
@@ -214,14 +147,14 @@ ModuloFunction CreateModuloFunction() {
__ fstp(0); // Drop result in st(0).
int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
__ movq(rcx, kNaNValue);
- __ movq(Operand(rsp, kPointerSize), rcx);
- __ movsd(xmm0, Operand(rsp, kPointerSize));
+ __ movq(Operand(rsp, kRegisterSize), rcx);
+ __ movsd(xmm0, Operand(rsp, kRegisterSize));
__ jmp(&return_result);
// If result is valid, return that.
__ bind(&valid_result);
- __ fstp_d(Operand(rsp, kPointerSize));
- __ movsd(xmm0, Operand(rsp, kPointerSize));
+ __ fstp_d(Operand(rsp, kRegisterSize));
+ __ movsd(xmm0, Operand(rsp, kRegisterSize));
// Clean up FPU stack and exceptions and return xmm0
__ bind(&return_result);
@@ -267,7 +200,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
}
// Set transitioned map.
- __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+ __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
__ RecordWriteField(rdx,
HeapObject::kMapOffset,
rbx,
@@ -296,34 +229,42 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
- __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
__ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
__ j(equal, &only_change_map);
- // Check backing store for COW-ness. For COW arrays we have to
- // allocate a new backing store.
__ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
- __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
- Heap::kFixedCOWArrayMapRootIndex);
- __ j(equal, &new_backing_store);
+ if (kPointerSize == kDoubleSize) {
+ // Check backing store for COW-ness. For COW arrays we have to
+ // allocate a new backing store.
+ __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
+ Heap::kFixedCOWArrayMapRootIndex);
+ __ j(equal, &new_backing_store);
+ } else {
+ // For x32 port we have to allocate a new backing store as SMI size is
+ // not equal with double size.
+ ASSERT(kDoubleSize == 2 * kPointerSize);
+ __ jmp(&new_backing_store);
+ }
+
// Check if the backing store is in new-space. If not, we need to allocate
// a new one since the old one is in pointer-space.
// If in new space, we can reuse the old backing store because it is
// the same size.
__ JumpIfNotInNewSpace(r8, rdi, &new_backing_store);
- __ movq(r14, r8); // Destination array equals source array.
+ __ movp(r14, r8); // Destination array equals source array.
// r8 : source FixedArray
// r9 : elements array length
// r14: destination FixedDoubleArray
// Set backing store's map
__ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
- __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
+ __ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
__ bind(&allocated);
// Set transitioned map.
- __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+ __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
__ RecordWriteField(rdx,
HeapObject::kMapOffset,
rbx,
@@ -344,14 +285,14 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Allocate new backing store.
__ bind(&new_backing_store);
- __ lea(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
+ __ leap(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
__ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT);
// Set backing store's map
__ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
- __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
+ __ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
// Set receiver's backing store.
- __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r14);
- __ movq(r11, r14);
+ __ movp(FieldOperand(rdx, JSObject::kElementsOffset), r14);
+ __ movp(r11, r14);
__ RecordWriteField(rdx,
JSObject::kElementsOffset,
r11,
@@ -361,12 +302,12 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
OMIT_SMI_CHECK);
// Set backing store's length.
__ Integer32ToSmi(r11, r9);
- __ movq(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
+ __ movp(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
__ jmp(&allocated);
__ bind(&only_change_map);
// Set transitioned map.
- __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+ __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
__ RecordWriteField(rdx,
HeapObject::kMapOffset,
rbx,
@@ -378,7 +319,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Conversion loop.
__ bind(&loop);
- __ movq(rbx,
+ __ movp(rbx,
FieldOperand(r8, r9, times_pointer_size, FixedArray::kHeaderSize));
// r9 : current element's index
// rbx: current element (smi-tagged)
@@ -397,7 +338,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
__ bind(&entry);
- __ decq(r9);
+ __ decp(r9);
__ j(not_sign, &loop);
__ bind(&done);
@@ -421,23 +362,23 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
- __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
__ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
__ j(equal, &only_change_map);
- __ push(rax);
+ __ Push(rax);
- __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
__ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
// r8 : source FixedDoubleArray
// r9 : number of elements
- __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
+ __ leap(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
__ Allocate(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
// r11: destination FixedArray
__ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
- __ movq(FieldOperand(r11, HeapObject::kMapOffset), rdi);
+ __ movp(FieldOperand(r11, HeapObject::kMapOffset), rdi);
__ Integer32ToSmi(r14, r9);
- __ movq(FieldOperand(r11, FixedArray::kLengthOffset), r14);
+ __ movp(FieldOperand(r11, FixedArray::kLengthOffset), r14);
// Prepare for conversion loop.
__ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64));
@@ -448,8 +389,8 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Call into runtime if GC is required.
__ bind(&gc_required);
- __ pop(rax);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ Pop(rax);
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ jmp(fail);
// Box doubles into heap numbers.
@@ -466,13 +407,13 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Non-hole double, copy value into a heap number.
__ AllocateHeapNumber(rax, r15, &gc_required);
// rax: new heap number
- __ MoveDouble(FieldOperand(rax, HeapNumber::kValueOffset), r14);
- __ movq(FieldOperand(r11,
+ __ movq(FieldOperand(rax, HeapNumber::kValueOffset), r14);
+ __ movp(FieldOperand(r11,
r9,
times_pointer_size,
FixedArray::kHeaderSize),
rax);
- __ movq(r15, r9);
+ __ movp(r15, r9);
__ RecordWriteArray(r11,
rax,
r15,
@@ -483,18 +424,18 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Replace the-hole NaN with the-hole pointer.
__ bind(&convert_hole);
- __ movq(FieldOperand(r11,
+ __ movp(FieldOperand(r11,
r9,
times_pointer_size,
FixedArray::kHeaderSize),
rdi);
__ bind(&entry);
- __ decq(r9);
+ __ decp(r9);
__ j(not_sign, &loop);
// Replace receiver's backing store with newly created and filled FixedArray.
- __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r11);
+ __ movp(FieldOperand(rdx, JSObject::kElementsOffset), r11);
__ RecordWriteField(rdx,
JSObject::kElementsOffset,
r11,
@@ -502,12 +443,12 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- __ pop(rax);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ Pop(rax);
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&only_change_map);
// Set transitioned map.
- __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+ __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
__ RecordWriteField(rdx,
HeapObject::kMapOffset,
rbx,
@@ -524,7 +465,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register result,
Label* call_runtime) {
// Fetch the instance type of the receiver into result register.
- __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
+ __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
__ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
// We need special handling for indirect strings.
@@ -540,8 +481,8 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Handle slices.
Label indirect_string_loaded;
__ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
- __ addq(index, result);
- __ movq(string, FieldOperand(string, SlicedString::kParentOffset));
+ __ addp(index, result);
+ __ movp(string, FieldOperand(string, SlicedString::kParentOffset));
__ jmp(&indirect_string_loaded, Label::kNear);
// Handle cons strings.
@@ -553,10 +494,10 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
Heap::kempty_stringRootIndex);
__ j(not_equal, call_runtime);
- __ movq(string, FieldOperand(string, ConsString::kFirstOffset));
+ __ movp(string, FieldOperand(string, ConsString::kFirstOffset));
__ bind(&indirect_string_loaded);
- __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
+ __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
__ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
// Distinguish sequential and external strings. Only these two string
@@ -577,13 +518,13 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ Assert(zero, kExternalStringExpectedButNotFound);
}
// Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
+ STATIC_ASSERT(kShortExternalStringTag != 0);
__ testb(result, Immediate(kShortExternalStringTag));
__ j(not_zero, call_runtime);
// Check encoding.
STATIC_ASSERT(kTwoByteStringTag == 0);
__ testb(result, Immediate(kStringEncodingMask));
- __ movq(result, FieldOperand(string, ExternalString::kResourceDataOffset));
+ __ movp(result, FieldOperand(string, ExternalString::kResourceDataOffset));
__ j(not_equal, &ascii_external, Label::kNear);
// Two-byte string.
__ movzxwl(result, Operand(result, index, times_2, 0));
@@ -650,13 +591,13 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ movq(temp2, double_scratch);
__ subsd(double_scratch, result);
__ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
- __ lea(temp1, Operand(temp2, 0x1ff800));
- __ and_(temp2, Immediate(0x7ff));
- __ shr(temp1, Immediate(11));
+ __ leaq(temp1, Operand(temp2, 0x1ff800));
+ __ andq(temp2, Immediate(0x7ff));
+ __ shrq(temp1, Immediate(11));
__ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
__ Move(kScratchRegister, ExternalReference::math_exp_log_table());
- __ shl(temp1, Immediate(52));
- __ or_(temp1, Operand(kScratchRegister, temp2, times_8, 0));
+ __ shlq(temp1, Immediate(52));
+ __ orq(temp1, Operand(kScratchRegister, temp2, times_8, 0));
__ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
__ subsd(double_scratch, input);
__ movsd(input, double_scratch);
@@ -675,37 +616,36 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
#undef __
-static byte* GetNoCodeAgeSequence(uint32_t* length) {
- static bool initialized = false;
- static byte sequence[kNoCodeAgeSequenceLength];
- *length = kNoCodeAgeSequenceLength;
- if (!initialized) {
- // The sequence of instructions that is patched out for aging code is the
- // following boilerplate stack-building prologue that is found both in
- // FUNCTION and OPTIMIZED_FUNCTION code:
- CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
- patcher.masm()->push(rbp);
- patcher.masm()->movq(rbp, rsp);
- patcher.masm()->push(rsi);
- patcher.masm()->push(rdi);
- initialized = true;
- }
- return sequence;
+CodeAgingHelper::CodeAgingHelper() {
+ ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
+ // The sequence of instructions that is patched out for aging code is the
+ // following boilerplate stack-building prologue that is found both in
+ // FUNCTION and OPTIMIZED_FUNCTION code:
+ CodePatcher patcher(young_sequence_.start(), young_sequence_.length());
+ patcher.masm()->pushq(rbp);
+ patcher.masm()->movp(rbp, rsp);
+ patcher.masm()->Push(rsi);
+ patcher.masm()->Push(rdi);
}
-bool Code::IsYoungSequence(byte* sequence) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- bool result = (!memcmp(sequence, young_sequence, young_length));
- ASSERT(result || *sequence == kCallOpcode);
+#ifdef DEBUG
+bool CodeAgingHelper::IsOld(byte* candidate) const {
+ return *candidate == kCallOpcode;
+}
+#endif
+
+
+bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
+ bool result = isolate->code_aging_helper()->IsYoung(sequence);
+ ASSERT(result || isolate->code_aging_helper()->IsOld(sequence));
return result;
}
-void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
+void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
MarkingParity* parity) {
- if (IsYoungSequence(sequence)) {
+ if (IsYoungSequence(isolate, sequence)) {
*age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
@@ -722,10 +662,9 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
byte* sequence,
Code::Age age,
MarkingParity parity) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
- CopyBytes(sequence, young_sequence, young_length);
+ isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
CPU::FlushICache(sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
diff --git a/chromium/v8/src/x64/codegen-x64.h b/chromium/v8/src/x64/codegen-x64.h
index 811ac507d53..5faa9878c01 100644
--- a/chromium/v8/src/x64/codegen-x64.h
+++ b/chromium/v8/src/x64/codegen-x64.h
@@ -1,78 +1,19 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_CODEGEN_X64_H_
#define V8_X64_CODEGEN_X64_H_
-#include "ast.h"
-#include "ic-inl.h"
+#include "src/ast.h"
+#include "src/ic-inl.h"
namespace v8 {
namespace internal {
-// Forward declarations
-class CompilationInfo;
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-// -------------------------------------------------------------------------
-// CodeGenerator
-
-class CodeGenerator: public AstVisitor {
- public:
- explicit CodeGenerator(Isolate* isolate) {
- InitializeAstVisitor(isolate);
- }
-
- static bool MakeCode(CompilationInfo* info);
-
- // Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info, const char* kind);
-
- // Allocate and install the code.
- static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
- Code::Flags flags,
- CompilationInfo* info);
-
- // Print the code after compiling it.
- static void PrintCode(Handle<Code> code, CompilationInfo* info);
-
- static bool ShouldGenerateLog(Isolate* isolate, Expression* type);
-
- static bool RecordPositions(MacroAssembler* masm,
- int pos,
- bool right_here = false);
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
-};
-
class StringCharLoadGenerator : public AllStatic {
public:
diff --git a/chromium/v8/src/x64/cpu-x64.cc b/chromium/v8/src/x64/cpu-x64.cc
index 4fa290a8b5f..ca2b89b2268 100644
--- a/chromium/v8/src/x64/cpu-x64.cc
+++ b/chromium/v8/src/x64/cpu-x64.cc
@@ -1,56 +1,23 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
// CPU specific code for x64 independent of OS goes here.
#if defined(__GNUC__) && !defined(__MINGW64__)
-#include "third_party/valgrind/valgrind.h"
+#include "src/third_party/valgrind/valgrind.h"
#endif
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "cpu.h"
-#include "macro-assembler.h"
+#include "src/cpu.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
-void CPU::SetUp() {
- CpuFeatures::Probe();
-}
-
-
-bool CPU::SupportsCrankshaft() {
- return true; // Yay!
-}
-
-
void CPU::FlushICache(void* start, size_t size) {
// No need to flush the instruction cache on Intel. On Intel instruction
// cache flushing is only necessary when multiple cores running the same
diff --git a/chromium/v8/src/x64/debug-x64.cc b/chromium/v8/src/x64/debug-x64.cc
index 5ddf69a414e..4703e423547 100644
--- a/chromium/v8/src/x64/debug-x64.cc
+++ b/chromium/v8/src/x64/debug-x64.cc
@@ -1,44 +1,19 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "assembler.h"
-#include "codegen.h"
-#include "debug.h"
+#include "src/assembler.h"
+#include "src/codegen.h"
+#include "src/debug.h"
namespace v8 {
namespace internal {
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
bool BreakLocationIterator::IsDebugBreakAtReturn() {
return Debug::IsDebugBreakAtReturn(rinfo());
}
@@ -50,7 +25,7 @@ bool BreakLocationIterator::IsDebugBreakAtReturn() {
void BreakLocationIterator::SetDebugBreakAtReturn() {
ASSERT(Assembler::kJSReturnSequenceLength >= Assembler::kCallSequenceLength);
rinfo()->PatchCodeWithCall(
- debug_info_->GetIsolate()->debug()->debug_break_return()->entry(),
+ debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry(),
Assembler::kJSReturnSequenceLength - Assembler::kCallSequenceLength);
}
@@ -73,14 +48,14 @@ bool Debug::IsDebugBreakAtReturn(v8::internal::RelocInfo* rinfo) {
bool BreakLocationIterator::IsDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
// Check whether the debug break slot instructions have been patched.
- return !Assembler::IsNop(rinfo()->pc());
+ return rinfo()->IsPatchedDebugBreakSlotSequence();
}
void BreakLocationIterator::SetDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
rinfo()->PatchCodeWithCall(
- debug_info_->GetIsolate()->debug()->debug_break_slot()->entry(),
+ debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry(),
Assembler::kDebugBreakSlotLength - Assembler::kCallSequenceLength);
}
@@ -90,8 +65,6 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
}
-const bool Debug::FramePaddingLayout::kIsSupported = true;
-
#define __ ACCESS_MASM(masm)
@@ -105,10 +78,10 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Load padding words on stack.
- for (int i = 0; i < Debug::FramePaddingLayout::kInitialSize; i++) {
- __ Push(Smi::FromInt(Debug::FramePaddingLayout::kPaddingValue));
+ for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
+ __ Push(Smi::FromInt(LiveEdit::kFramePaddingValue));
}
- __ Push(Smi::FromInt(Debug::FramePaddingLayout::kInitialSize));
+ __ Push(Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
// Store the registers containing live values on the expression stack to
// make sure that these are correctly updated during GC. Non object values
@@ -121,10 +94,10 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
Register reg = { r };
ASSERT(!reg.is(kScratchRegister));
if ((object_regs & (1 << r)) != 0) {
- __ push(reg);
+ __ Push(reg);
}
if ((non_object_regs & (1 << r)) != 0) {
- __ PushInt64AsTwoSmis(reg);
+ __ PushRegisterAsTwoSmis(reg);
}
}
@@ -134,7 +107,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ Set(rax, 0); // No arguments (argc == 0).
__ Move(rbx, ExternalReference::debug_break(masm->isolate()));
- CEntryStub ceb(1);
+ CEntryStub ceb(masm->isolate(), 1);
__ CallStub(&ceb);
// Restore the register values from the expression stack.
@@ -145,18 +118,18 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ Set(reg, kDebugZapValue);
}
if ((object_regs & (1 << r)) != 0) {
- __ pop(reg);
+ __ Pop(reg);
}
// Reconstruct the 64-bit value from two smis.
if ((non_object_regs & (1 << r)) != 0) {
- __ PopInt64AsTwoSmis(reg);
+ __ PopRegisterAsTwoSmis(reg);
}
}
// Read current padding counter and skip corresponding number of words.
- __ pop(kScratchRegister);
+ __ Pop(kScratchRegister);
__ SmiToInteger32(kScratchRegister, kScratchRegister);
- __ lea(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0));
+ __ leap(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0));
// Get rid of the internal frame.
}
@@ -164,20 +137,30 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// If this call did not replace a call but patched other code then there will
// be an unwanted return address left on the stack. Here we get rid of that.
if (convert_call_to_jmp) {
- __ addq(rsp, Immediate(kPointerSize));
+ __ addp(rsp, Immediate(kPCOnStackSize));
}
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
ExternalReference after_break_target =
- ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate());
+ ExternalReference::debug_after_break_target_address(masm->isolate());
__ Move(kScratchRegister, after_break_target);
- __ jmp(Operand(kScratchRegister, 0));
+ __ Jump(Operand(kScratchRegister, 0));
}
-void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallICStub
+ // ----------- S t a t e -------------
+ // -- rdx : type feedback slot (smi)
+ // -- rdi : function
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, rdx.bit() | rdi.bit(), 0, false);
+}
+
+
+void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Register state for IC load call (from ic-x64.cc).
// ----------- S t a t e -------------
// -- rax : receiver
@@ -187,7 +170,7 @@ void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// Register state for IC store call (from ic-x64.cc).
// ----------- S t a t e -------------
// -- rax : value
@@ -199,7 +182,7 @@ void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// Register state for keyed IC load call (from ic-x64.cc).
// ----------- S t a t e -------------
// -- rax : key
@@ -209,7 +192,7 @@ void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// Register state for keyed IC load call (from ic-x64.cc).
// ----------- S t a t e -------------
// -- rax : value
@@ -221,7 +204,7 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
// Register state for CompareNil IC
// ----------- S t a t e -------------
// -- rax : value
@@ -230,16 +213,7 @@ void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
- // Register state for IC call call (from ic-x64.cc)
- // ----------- S t a t e -------------
- // -- rcx: function name
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, rcx.bit(), 0, false);
-}
-
-
-void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
// Register state just before return from JS function (from codegen-x64.cc).
// ----------- S t a t e -------------
// -- rax: return value
@@ -248,7 +222,7 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-x64.cc).
// ----------- S t a t e -------------
// -- rdi : function
@@ -257,17 +231,7 @@ void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-x64.cc).
- // ----------- S t a t e -------------
- // -- rdi : function
- // -- rbx: cache cell for call target
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, rbx.bit() | rdi.bit(), 0, false);
-}
-
-
-void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
// Register state for CallConstructStub (from code-stubs-x64.cc).
// rax is the actual number of arguments not encoded as a smi, see comment
// above IC call.
@@ -279,20 +243,23 @@ void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
+ MacroAssembler* masm) {
// Register state for CallConstructStub (from code-stubs-x64.cc).
// rax is the actual number of arguments not encoded as a smi, see comment
// above IC call.
// ----------- S t a t e -------------
// -- rax: number of arguments
- // -- rbx: cache cell for call target
+ // -- rbx: feedback array
+ // -- rdx: feedback slot (smi)
// -----------------------------------
// The number of arguments in rax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, rbx.bit() | rdi.bit(), rax.bit(), false);
+ Generate_DebugBreakCallHelper(masm, rbx.bit() | rdx.bit() | rdi.bit(),
+ rax.bit(), false);
}
-void Debug::GenerateSlot(MacroAssembler* masm) {
+void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
// Generate enough nop's to make space for a call instruction.
Label check_codesize;
__ bind(&check_codesize);
@@ -303,49 +270,47 @@ void Debug::GenerateSlot(MacroAssembler* masm) {
}
-void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
+void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
// In the places where a debug break slot is inserted no registers can contain
// object pointers.
Generate_DebugBreakCallHelper(masm, 0, 0, true);
}
-void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
masm->ret(0);
}
-void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
ExternalReference restarter_frame_function_slot =
- ExternalReference(Debug_Address::RestarterFrameFunctionPointer(),
- masm->isolate());
+ ExternalReference::debug_restarter_frame_function_pointer_address(
+ masm->isolate());
__ Move(rax, restarter_frame_function_slot);
- __ movq(Operand(rax, 0), Immediate(0));
+ __ movp(Operand(rax, 0), Immediate(0));
// We do not know our frame height, but set rsp based on rbp.
- __ lea(rsp, Operand(rbp, -1 * kPointerSize));
+ __ leap(rsp, Operand(rbp, -1 * kPointerSize));
- __ pop(rdi); // Function.
- __ pop(rbp);
+ __ Pop(rdi); // Function.
+ __ popq(rbp);
// Load context from the function.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Get function code.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
- __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+ __ leap(rdx, FieldOperand(rdx, Code::kHeaderSize));
// Re-run JSFunction, rdi is function, rsi is context.
__ jmp(rdx);
}
-const bool Debug::kFrameDropperSupported = true;
+const bool LiveEdit::kFrameDropperSupported = true;
#undef __
-#endif // ENABLE_DEBUGGER_SUPPORT
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/chromium/v8/src/x64/deoptimizer-x64.cc b/chromium/v8/src/x64/deoptimizer-x64.cc
index ae180ec59b4..ae3a8242922 100644
--- a/chromium/v8/src/x64/deoptimizer-x64.cc
+++ b/chromium/v8/src/x64/deoptimizer-x64.cc
@@ -1,38 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "safepoint-table.h"
+#include "src/codegen.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/safepoint-table.h"
namespace v8 {
namespace internal {
@@ -51,6 +28,26 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// code patching below, and is not needed any more.
code->InvalidateRelocation();
+ if (FLAG_zap_code_space) {
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(pointer, 1);
+ patcher.masm()->int3();
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ osr_patcher.masm()->int3();
+ }
+ }
+
// For each LLazyBailout instruction insert a absolute call to the
// corresponding deoptimization entry, or a short call to an absolute
// jump if space is short. The absolute jumps are put in a table just
@@ -63,6 +60,12 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
#endif
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
+ SharedFunctionInfo* shared =
+ SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
+ shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
+ deopt_data->SetSharedFunctionInfo(Smi::FromInt(0));
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
// Position where Call will be patched in.
@@ -71,7 +74,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// LLazyBailout instructions with nops if necessary.
CodePatcher patcher(call_address, Assembler::kCallSequenceLength);
patcher.masm()->Call(GetDeoptimizationEntry(isolate, i, LAZY),
- RelocInfo::NONE64);
+ Assembler::RelocInfoNone());
ASSERT(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
ASSERT(call_address + patch_size() <= code->instruction_end());
@@ -97,7 +100,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Fill the frame content from the actual data on the frame.
for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
- input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
+ input_->SetFrameSlot(i, Memory::uintptr_at(tos + i));
}
}
@@ -126,11 +129,6 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
}
-Code* Deoptimizer::NotifyStubFailureBuiltin() {
- return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
-}
-
-
#define __ masm()->
void Deoptimizer::EntryGenerator::Generate() {
@@ -141,7 +139,7 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kDoubleRegsSize = kDoubleSize *
XMMRegister::NumAllocatableRegisters();
- __ subq(rsp, Immediate(kDoubleRegsSize));
+ __ subp(rsp, Immediate(kDoubleRegsSize));
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
@@ -153,10 +151,10 @@ void Deoptimizer::EntryGenerator::Generate() {
// to restore all later.
for (int i = 0; i < kNumberOfRegisters; i++) {
Register r = Register::from_code(i);
- __ push(r);
+ __ pushq(r);
}
- const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize +
+ const int kSavedRegistersAreaSize = kNumberOfRegisters * kRegisterSize +
kDoubleRegsSize;
// We use this to keep the value of the fifth argument temporarily.
@@ -165,32 +163,32 @@ void Deoptimizer::EntryGenerator::Generate() {
Register arg5 = r11;
// Get the bailout id from the stack.
- __ movq(arg_reg_3, Operand(rsp, kSavedRegistersAreaSize));
+ __ movp(arg_reg_3, Operand(rsp, kSavedRegistersAreaSize));
// Get the address of the location in the code object
// and compute the fp-to-sp delta in register arg5.
- __ movq(arg_reg_4,
- Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
- __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 2 * kPointerSize));
+ __ movp(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize));
+ __ leap(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize +
+ kPCOnStackSize));
- __ subq(arg5, rbp);
- __ neg(arg5);
+ __ subp(arg5, rbp);
+ __ negp(arg5);
// Allocate a new deoptimizer object.
__ PrepareCallCFunction(6);
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(arg_reg_1, rax);
+ __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(arg_reg_1, rax);
__ Set(arg_reg_2, type());
// Args 3 and 4 are already in the right registers.
// On windows put the arguments on the stack (PrepareCallCFunction
// has created space for this). On linux pass the arguments in r8 and r9.
#ifdef _WIN64
- __ movq(Operand(rsp, 4 * kPointerSize), arg5);
+ __ movq(Operand(rsp, 4 * kRegisterSize), arg5);
__ LoadAddress(arg5, ExternalReference::isolate_address(isolate()));
- __ movq(Operand(rsp, 5 * kPointerSize), arg5);
+ __ movq(Operand(rsp, 5 * kRegisterSize), arg5);
#else
- __ movq(r8, arg5);
+ __ movp(r8, arg5);
__ LoadAddress(r9, ExternalReference::isolate_address(isolate()));
#endif
@@ -199,54 +197,54 @@ void Deoptimizer::EntryGenerator::Generate() {
}
// Preserve deoptimizer object in register rax and get the input
// frame descriptor pointer.
- __ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
+ __ movp(rbx, Operand(rax, Deoptimizer::input_offset()));
// Fill in the input registers.
for (int i = kNumberOfRegisters -1; i >= 0; i--) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ pop(Operand(rbx, offset));
+ __ PopQuad(Operand(rbx, offset));
}
// Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
int dst_offset = i * kDoubleSize + double_regs_offset;
- __ pop(Operand(rbx, dst_offset));
+ __ popq(Operand(rbx, dst_offset));
}
// Remove the bailout id and return address from the stack.
- __ addq(rsp, Immediate(2 * kPointerSize));
+ __ addp(rsp, Immediate(1 * kRegisterSize + kPCOnStackSize));
// Compute a pointer to the unwinding limit in register rcx; that is
// the first stack slot not part of the input frame.
- __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
- __ addq(rcx, rsp);
+ __ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
+ __ addp(rcx, rsp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
- __ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
+ __ leap(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
Label pop_loop_header;
__ jmp(&pop_loop_header);
Label pop_loop;
__ bind(&pop_loop);
- __ pop(Operand(rdx, 0));
- __ addq(rdx, Immediate(sizeof(intptr_t)));
+ __ Pop(Operand(rdx, 0));
+ __ addp(rdx, Immediate(sizeof(intptr_t)));
__ bind(&pop_loop_header);
- __ cmpq(rcx, rsp);
+ __ cmpp(rcx, rsp);
__ j(not_equal, &pop_loop);
// Compute the output frame in the deoptimizer.
- __ push(rax);
+ __ pushq(rax);
__ PrepareCallCFunction(2);
- __ movq(arg_reg_1, rax);
+ __ movp(arg_reg_1, rax);
__ LoadAddress(arg_reg_2, ExternalReference::isolate_address(isolate()));
{
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(
ExternalReference::compute_output_frames_function(isolate()), 2);
}
- __ pop(rax);
+ __ popq(rax);
// Replace the current frame with the output frames.
Label outer_push_loop, inner_push_loop,
@@ -254,23 +252,23 @@ void Deoptimizer::EntryGenerator::Generate() {
// Outer loop state: rax = current FrameDescription**, rdx = one past the
// last FrameDescription**.
__ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
- __ movq(rax, Operand(rax, Deoptimizer::output_offset()));
- __ lea(rdx, Operand(rax, rdx, times_pointer_size, 0));
+ __ movp(rax, Operand(rax, Deoptimizer::output_offset()));
+ __ leap(rdx, Operand(rax, rdx, times_pointer_size, 0));
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: rbx = current FrameDescription*, rcx = loop index.
- __ movq(rbx, Operand(rax, 0));
- __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
+ __ movp(rbx, Operand(rax, 0));
+ __ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
- __ subq(rcx, Immediate(sizeof(intptr_t)));
- __ push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
+ __ subp(rcx, Immediate(sizeof(intptr_t)));
+ __ Push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
__ bind(&inner_loop_header);
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(not_zero, &inner_push_loop);
- __ addq(rax, Immediate(kPointerSize));
+ __ addp(rax, Immediate(kPointerSize));
__ bind(&outer_loop_header);
- __ cmpq(rax, rdx);
+ __ cmpp(rax, rdx);
__ j(below, &outer_push_loop);
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
@@ -280,14 +278,14 @@ void Deoptimizer::EntryGenerator::Generate() {
}
// Push state, pc, and continuation from the last output frame.
- __ push(Operand(rbx, FrameDescription::state_offset()));
- __ push(Operand(rbx, FrameDescription::pc_offset()));
- __ push(Operand(rbx, FrameDescription::continuation_offset()));
+ __ Push(Operand(rbx, FrameDescription::state_offset()));
+ __ PushQuad(Operand(rbx, FrameDescription::pc_offset()));
+ __ PushQuad(Operand(rbx, FrameDescription::continuation_offset()));
// Push the registers from the last output frame.
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ push(Operand(rbx, offset));
+ __ PushQuad(Operand(rbx, offset));
}
// Restore the registers from the stack.
@@ -299,7 +297,7 @@ void Deoptimizer::EntryGenerator::Generate() {
ASSERT(i > 0);
r = Register::from_code(i - 1);
}
- __ pop(r);
+ __ popq(r);
}
// Set up the roots register.
@@ -317,7 +315,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
- __ push_imm32(i);
+ __ pushq_imm32(i);
__ jmp(&done);
ASSERT(masm()->pc_offset() - start == table_entry_size_);
}
@@ -326,15 +324,29 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+ if (kPCOnStackSize == 2 * kPointerSize) {
+ // Zero out the high-32 bit of PC for x32 port.
+ SetFrameSlot(offset + kPointerSize, 0);
+ }
SetFrameSlot(offset, value);
}
void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+ if (kFPOnStackSize == 2 * kPointerSize) {
+ // Zero out the high-32 bit of FP for x32 port.
+ SetFrameSlot(offset + kPointerSize, 0);
+ }
SetFrameSlot(offset, value);
}
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
+
+
#undef __
diff --git a/chromium/v8/src/x64/disasm-x64.cc b/chromium/v8/src/x64/disasm-x64.cc
index 76b541c0100..f4c5de88d62 100644
--- a/chromium/v8/src/x64/disasm-x64.cc
+++ b/chromium/v8/src/x64/disasm-x64.cc
@@ -1,40 +1,17 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include <assert.h>
#include <stdio.h>
#include <stdarg.h>
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "disasm.h"
-#include "lazy-instance.h"
+#include "src/base/lazy-instance.h"
+#include "src/disasm.h"
namespace disasm {
@@ -271,7 +248,7 @@ void InstructionTable::AddJumpConditionalShort() {
}
-static v8::internal::LazyInstance<InstructionTable>::type instruction_table =
+static v8::base::LazyInstance<InstructionTable>::type instruction_table =
LAZY_INSTANCE_INITIALIZER;
@@ -453,7 +430,7 @@ void DisassemblerX64::AppendToBuffer(const char* format, ...) {
v8::internal::Vector<char> buf = tmp_buffer_ + tmp_buffer_pos_;
va_list args;
va_start(args, format);
- int result = v8::internal::OS::VSNPrintF(buf, format, args);
+ int result = v8::internal::VSNPrintF(buf, format, args);
va_end(args);
tmp_buffer_pos_ += result;
}
@@ -485,9 +462,11 @@ int DisassemblerX64::PrintRightOperandHelper(
} else if (base == 5) {
// base == rbp means no base register (when mod == 0).
int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
- AppendToBuffer("[%s*%d+0x%x]",
+ AppendToBuffer("[%s*%d%s0x%x]",
NameOfCPURegister(index),
- 1 << scale, disp);
+ 1 << scale,
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
return 6;
} else if (index != 4 && base != 5) {
// [base+index*scale]
@@ -512,38 +491,29 @@ int DisassemblerX64::PrintRightOperandHelper(
int scale, index, base;
get_sib(sib, &scale, &index, &base);
int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 2)
- : *reinterpret_cast<char*>(modrmp + 2);
+ : *reinterpret_cast<int8_t*>(modrmp + 2);
if (index == 4 && (base & 7) == 4 && scale == 0 /*times_1*/) {
- if (-disp > 0) {
- AppendToBuffer("[%s-0x%x]", NameOfCPURegister(base), -disp);
- } else {
- AppendToBuffer("[%s+0x%x]", NameOfCPURegister(base), disp);
- }
+ AppendToBuffer("[%s%s0x%x]",
+ NameOfCPURegister(base),
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
} else {
- if (-disp > 0) {
- AppendToBuffer("[%s+%s*%d-0x%x]",
- NameOfCPURegister(base),
- NameOfCPURegister(index),
- 1 << scale,
- -disp);
- } else {
- AppendToBuffer("[%s+%s*%d+0x%x]",
- NameOfCPURegister(base),
- NameOfCPURegister(index),
- 1 << scale,
- disp);
- }
+ AppendToBuffer("[%s+%s*%d%s0x%x]",
+ NameOfCPURegister(base),
+ NameOfCPURegister(index),
+ 1 << scale,
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
}
return mod == 2 ? 6 : 3;
} else {
// No sib.
int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 1)
- : *reinterpret_cast<char*>(modrmp + 1);
- if (-disp > 0) {
- AppendToBuffer("[%s-0x%x]", NameOfCPURegister(rm), -disp);
- } else {
- AppendToBuffer("[%s+0x%x]", NameOfCPURegister(rm), disp);
- }
+ : *reinterpret_cast<int8_t*>(modrmp + 1);
+ AppendToBuffer("[%s%s0x%x]",
+ NameOfCPURegister(rm),
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
return (mod == 2) ? 5 : 2;
}
break;
@@ -934,6 +904,7 @@ int DisassemblerX64::RegisterFPUInstruction(int escape_opcode,
case 0xF5: mnem = "fprem1"; break;
case 0xF7: mnem = "fincstp"; break;
case 0xF8: mnem = "fprem"; break;
+ case 0xFC: mnem = "frndint"; break;
case 0xFD: mnem = "fscale"; break;
case 0xFE: mnem = "fsin"; break;
case 0xFF: mnem = "fcos"; break;
@@ -956,6 +927,8 @@ int DisassemblerX64::RegisterFPUInstruction(int escape_opcode,
has_register = true;
} else if (modrm_byte == 0xE2) {
mnem = "fclex";
+ } else if (modrm_byte == 0xE3) {
+ mnem = "fninit";
} else {
UnimplementedInstruction();
}
@@ -1093,6 +1066,11 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0x50) {
AppendToBuffer("movmskpd %s,", NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
+ } else if (opcode == 0x73) {
+ current += 1;
+ ASSERT(regop == 6);
+ AppendToBuffer("psllq,%s,%d", NameOfXMMRegister(rm), *current & 0x7f);
+ current += 1;
} else {
const char* mnemonic = "?";
if (opcode == 0x54) {
@@ -1323,6 +1301,12 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else {
AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
}
+ } else if (opcode == 0xBD) {
+ AppendToBuffer("%s%c ", mnemonic, operand_size_code());
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("%s,", NameOfCPURegister(regop));
+ current += PrintRightOperand(current);
} else {
UnimplementedInstruction();
}
@@ -1365,6 +1349,8 @@ const char* DisassemblerX64::TwoByteMnemonic(byte opcode) {
return "movzxb";
case 0xB7:
return "movzxw";
+ case 0xBD:
+ return "bsr";
case 0xBE:
return "movsxb";
case 0xBF:
@@ -1448,7 +1434,8 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += 3;
break;
case OPERAND_DOUBLEWORD_SIZE:
- addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
+ addr =
+ reinterpret_cast<byte*>(*reinterpret_cast<uint32_t*>(data + 1));
data += 5;
break;
case OPERAND_QUADWORD_SIZE:
@@ -1806,14 +1793,14 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
int outp = 0;
// Instruction bytes.
for (byte* bp = instr; bp < data; bp++) {
- outp += v8::internal::OS::SNPrintF(out_buffer + outp, "%02x", *bp);
+ outp += v8::internal::SNPrintF(out_buffer + outp, "%02x", *bp);
}
for (int i = 6 - instr_len; i >= 0; i--) {
- outp += v8::internal::OS::SNPrintF(out_buffer + outp, " ");
+ outp += v8::internal::SNPrintF(out_buffer + outp, " ");
}
- outp += v8::internal::OS::SNPrintF(out_buffer + outp, " %s",
- tmp_buffer_.start());
+ outp += v8::internal::SNPrintF(out_buffer + outp, " %s",
+ tmp_buffer_.start());
return instr_len;
}
@@ -1840,7 +1827,7 @@ static const char* xmm_regs[16] = {
const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
+ v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
return tmp_buffer_.start();
}
diff --git a/chromium/v8/src/x64/frames-x64.cc b/chromium/v8/src/x64/frames-x64.cc
index 5cc27a6e12b..5513308828e 100644
--- a/chromium/v8/src/x64/frames-x64.cc
+++ b/chromium/v8/src/x64/frames-x64.cc
@@ -1,38 +1,15 @@
// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "assembler.h"
-#include "assembler-x64.h"
-#include "assembler-x64-inl.h"
-#include "frames.h"
+#include "src/assembler.h"
+#include "src/x64/assembler-x64.h"
+#include "src/x64/assembler-x64-inl.h"
+#include "src/frames.h"
namespace v8 {
namespace internal {
@@ -40,10 +17,24 @@ namespace internal {
Register JavaScriptFrame::fp_register() { return rbp; }
Register JavaScriptFrame::context_register() { return rsi; }
+Register JavaScriptFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
Register StubFailureTrampolineFrame::fp_register() { return rbp; }
Register StubFailureTrampolineFrame::context_register() { return rsi; }
+Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
+
+
+Object*& ExitFrame::constant_pool_slot() const {
+ UNREACHABLE();
+ return Memory::Object_at(NULL);
+}
} } // namespace v8::internal
diff --git a/chromium/v8/src/x64/frames-x64.h b/chromium/v8/src/x64/frames-x64.h
index fb17964adae..88130302849 100644
--- a/chromium/v8/src/x64/frames-x64.h
+++ b/chromium/v8/src/x64/frames-x64.h
@@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_FRAMES_X64_H_
#define V8_X64_FRAMES_X64_H_
@@ -41,8 +18,6 @@ const RegList kJSCallerSaved =
const int kNumJSCallerSaved = 5;
-typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
-
// Number of registers for which space is reserved in safepoints.
const int kNumSafepointRegisters = 16;
@@ -56,16 +31,19 @@ class EntryFrameConstants : public AllStatic {
static const int kXMMRegistersBlockSize =
kXMMRegisterSize * kCalleeSaveXMMRegisters;
static const int kCallerFPOffset =
- -10 * kPointerSize - kXMMRegistersBlockSize;
+ -3 * kPointerSize + -7 * kRegisterSize - kXMMRegistersBlockSize;
#else
- static const int kCallerFPOffset = -8 * kPointerSize;
+ // We have 3 Push and 5 pushq in the JSEntryStub::GenerateBody.
+ static const int kCallerFPOffset = -3 * kPointerSize + -5 * kRegisterSize;
#endif
- static const int kArgvOffset = 6 * kPointerSize;
+ static const int kArgvOffset = 6 * kPointerSize;
};
class ExitFrameConstants : public AllStatic {
public:
+ static const int kFrameSize = 2 * kPointerSize;
+
static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
@@ -75,6 +53,8 @@ class ExitFrameConstants : public AllStatic {
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
static const int kCallerSPDisplacement = kCallerPCOffset + kPCOnStackSize;
+
+ static const int kConstantPoolOffset = 0; // Not used
};
@@ -128,6 +108,10 @@ inline Object* JavaScriptFrame::function_slot_object() const {
inline void StackHandler::SetFp(Address slot, Address fp) {
+ if (kFPOnStackSize == 2 * kPointerSize) {
+ // Zero out the high-32 bit of FP for x32 port.
+ Memory::Address_at(slot + kPointerSize) = 0;
+ }
Memory::Address_at(slot) = fp;
}
diff --git a/chromium/v8/src/x64/full-codegen-x64.cc b/chromium/v8/src/x64/full-codegen-x64.cc
index e4793683ee7..fa1eee6e517 100644
--- a/chromium/v8/src/x64/full-codegen-x64.cc
+++ b/chromium/v8/src/x64/full-codegen-x64.cc
@@ -1,43 +1,20 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "code-stubs.h"
-#include "codegen.h"
-#include "compiler.h"
-#include "debug.h"
-#include "full-codegen.h"
-#include "isolate-inl.h"
-#include "parser.h"
-#include "scopes.h"
-#include "stub-cache.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/compiler.h"
+#include "src/debug.h"
+#include "src/full-codegen.h"
+#include "src/isolate-inl.h"
+#include "src/parser.h"
+#include "src/scopes.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -74,7 +51,7 @@ class JumpPatchSite BASE_EMBEDDED {
void EmitPatchInfo() {
if (patch_site_.is_bound()) {
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
- ASSERT(is_int8(delta_to_patch_site));
+ ASSERT(is_uint8(delta_to_patch_site));
__ testl(rax, Immediate(delta_to_patch_site));
#ifdef DEBUG
info_emitted_ = true;
@@ -118,6 +95,7 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -132,17 +110,23 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). rcx is zero for method calls and non-zero for
- // function calls.
- if (!info->is_classic_mode() || info->is_native()) {
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
Label ok;
- __ testq(rcx, rcx);
- __ j(zero, &ok, Label::kNear);
+ // +1 for return address.
StackArgumentsAccessor args(rsp, info->scope()->num_parameters());
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ movq(args.GetReceiverOperand(), kScratchRegister);
+ __ movp(rcx, args.GetReceiverOperand());
+
+ __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &ok, Label::kNear);
+
+ __ movp(rcx, GlobalObjectOperand());
+ __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
+
+ __ movp(args.GetReceiverOperand(), rcx);
+
__ bind(&ok);
}
@@ -152,7 +136,7 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
- __ Prologue(BUILD_FUNCTION_FRAME);
+ __ Prologue(info->IsCodePreAgingActive());
info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
@@ -162,9 +146,34 @@ void FullCodeGenerator::Generate() {
if (locals_count == 1) {
__ PushRoot(Heap::kUndefinedValueRootIndex);
} else if (locals_count > 1) {
+ if (locals_count >= 128) {
+ Label ok;
+ __ movp(rcx, rsp);
+ __ subp(rcx, Immediate(locals_count * kPointerSize));
+ __ CompareRoot(rcx, Heap::kRealStackLimitRootIndex);
+ __ j(above_equal, &ok, Label::kNear);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ bind(&ok);
+ }
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < locals_count; i++) {
- __ push(rdx);
+ const int kMaxPushes = 32;
+ if (locals_count >= kMaxPushes) {
+ int loop_iterations = locals_count / kMaxPushes;
+ __ movp(rcx, Immediate(loop_iterations));
+ Label loop_header;
+ __ bind(&loop_header);
+ // Do pushes.
+ for (int i = 0; i < kMaxPushes; i++) {
+ __ Push(rdx);
+ }
+ // Continue loop if not done.
+ __ decp(rcx);
+ __ j(not_zero, &loop_header, Label::kNear);
+ }
+ int remaining = locals_count % kMaxPushes;
+ // Emit the remaining pushes.
+ for (int i = 0; i < remaining; i++) {
+ __ Push(rdx);
}
}
}
@@ -175,21 +184,26 @@ void FullCodeGenerator::Generate() {
int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate context");
+ bool need_write_barrier = true;
// Argument to NewContext is the function, which is still in rdi.
- __ push(rdi);
if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+ __ Push(rdi);
__ Push(info->scope()->GetScopeInfo());
- __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
+ FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
} else {
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ Push(rdi);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
function_in_register = false;
- // Context is returned in both rax and rsi. It replaces the context
- // passed to us. It's saved in the stack and kept live in rsi.
- __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+ // Context is returned in rax. It replaces the context passed to us.
+ // It's saved in the stack and kept live in rsi.
+ __ movp(rsi, rax);
+ __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rax);
// Copy any necessary parameters into the context.
int num_parameters = info->scope()->num_parameters();
@@ -199,13 +213,20 @@ void FullCodeGenerator::Generate() {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
// Load parameter from stack.
- __ movq(rax, Operand(rbp, parameter_offset));
+ __ movp(rax, Operand(rbp, parameter_offset));
// Store it in the context.
int context_offset = Context::SlotOffset(var->index());
- __ movq(Operand(rsi, context_offset), rax);
+ __ movp(Operand(rsi, context_offset), rax);
// Update the write barrier. This clobbers rax and rbx.
- __ RecordWriteContextSlot(
- rsi, context_offset, rax, rbx, kDontSaveFPRegs);
+ if (need_write_barrier) {
+ __ RecordWriteContextSlot(
+ rsi, context_offset, rax, rbx, kDontSaveFPRegs);
+ } else if (FLAG_debug_code) {
+ Label done;
+ __ JumpIfInNewSpace(rsi, rax, &done, Label::kNear);
+ __ Abort(kExpectedNewSpaceObject);
+ __ bind(&done);
+ }
}
}
}
@@ -217,30 +238,30 @@ void FullCodeGenerator::Generate() {
// case the "arguments" or ".arguments" variables are in the context.
Comment cmnt(masm_, "[ Allocate arguments object");
if (function_in_register) {
- __ push(rdi);
+ __ Push(rdi);
} else {
- __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
// The receiver is just before the parameters on the caller's stack.
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
- __ lea(rdx,
+ __ leap(rdx,
Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
- __ push(rdx);
+ __ Push(rdx);
__ Push(Smi::FromInt(num_parameters));
// Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (!is_classic_mode()) {
+ if (strict_mode() == STRICT) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
} else {
- type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
- ArgumentsAccessStub stub(type);
+ ArgumentsAccessStub stub(isolate(), type);
__ CallStub(&stub);
SetVar(arguments, rax, rbx, rdx);
@@ -264,7 +285,7 @@ void FullCodeGenerator::Generate() {
if (scope()->is_function_scope() && scope()->function() != NULL) {
VariableDeclaration* function = scope()->function();
ASSERT(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_HARMONY);
+ function->proxy()->var()->mode() == CONST_LEGACY);
ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
VisitVariableDeclaration(function);
}
@@ -273,11 +294,11 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok, Label::kNear);
- __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
- __ bind(&ok);
+ Label ok;
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ j(above_equal, &ok, Label::kNear);
+ __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ bind(&ok);
}
{ Comment cmnt(masm_, "[ Body");
@@ -302,7 +323,7 @@ void FullCodeGenerator::ClearAccumulator() {
void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
- __ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
+ __ Move(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
__ SmiAddConstant(FieldOperand(rbx, Cell::kValueOffset),
Smi::FromInt(-delta));
}
@@ -310,40 +331,41 @@ void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
void FullCodeGenerator::EmitProfilingCounterReset() {
int reset_value = FLAG_interrupt_budget;
- if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
- // Self-optimization is a one-off thing; if it fails, don't try again.
- reset_value = Smi::kMaxValue;
- }
- __ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
+ __ Move(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
__ Move(kScratchRegister, Smi::FromInt(reset_value));
- __ movq(FieldOperand(rbx, Cell::kValueOffset), kScratchRegister);
+ __ movp(FieldOperand(rbx, Cell::kValueOffset), kScratchRegister);
}
+static const byte kJnsOffset = kPointerSize == kInt64Size ? 0x1d : 0x14;
+
+
void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Back edge bookkeeping");
Label ok;
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ int weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
EmitProfilingCounterDecrement(weight);
- __ j(positive, &ok, Label::kNear);
- __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
- // Record a mapping of this PC offset to the OSR id. This is used to find
- // the AST id from the unoptimized code in order to use it as a key into
- // the deoptimization input data found in the optimized code.
- RecordBackEdge(stmt->OsrEntryId());
+ __ j(positive, &ok, Label::kNear);
+ {
+ PredictableCodeSizeScope predictible_code_size_scope(masm_, kJnsOffset);
+ DontEmitDebugCodeScope dont_emit_debug_code_scope(masm_);
+ __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
- EmitProfilingCounterReset();
+ // Record a mapping of this PC offset to the OSR id. This is used to find
+ // the AST id from the unoptimized code in order to use it as a key into
+ // the deoptimization input data found in the optimized code.
+ RecordBackEdge(stmt->OsrEntryId());
+ EmitProfilingCounterReset();
+ }
__ bind(&ok);
+
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
// entry becomes the target of a bailout. We don't expect it to be, but
@@ -359,34 +381,27 @@ void FullCodeGenerator::EmitReturnSequence() {
} else {
__ bind(&return_label_);
if (FLAG_trace) {
- __ push(rax);
+ __ Push(rax);
__ CallRuntime(Runtime::kTraceExit, 1);
}
- if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else if (FLAG_weighted_back_edges) {
- int distance = masm_->pc_offset();
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ j(positive, &ok, Label::kNear);
- __ push(rax);
- if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
- __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
- } else {
- __ call(isolate()->builtins()->InterruptCheck(),
- RelocInfo::CODE_TARGET);
- }
- __ pop(rax);
- EmitProfilingCounterReset();
- __ bind(&ok);
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
}
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ j(positive, &ok, Label::kNear);
+ __ Push(rax);
+ __ call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
+ __ Pop(rax);
+ EmitProfilingCounterReset();
+ __ bind(&ok);
#ifdef DEBUG
// Add a label for checking the size of the code used for returning.
Label check_exit_codesize;
@@ -396,18 +411,18 @@ void FullCodeGenerator::EmitReturnSequence() {
__ RecordJSReturn();
// Do not use the leave instruction here because it is too short to
// patch with the code required by the debugger.
- __ movq(rsp, rbp);
- __ pop(rbp);
+ __ movp(rsp, rbp);
+ __ popq(rbp);
int no_frame_start = masm_->pc_offset();
int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
__ Ret(arguments_bytes, rcx);
-#ifdef ENABLE_DEBUGGER_SUPPORT
// Add padding that will be overwritten by a debugger breakpoint. We
- // have just generated at least 7 bytes: "movq rsp, rbp; pop rbp; ret k"
- // (3 + 1 + 3).
- const int kPadding = Assembler::kJSReturnSequenceLength - 7;
+ // have just generated at least 7 bytes: "movp rsp, rbp; pop rbp; ret k"
+ // (3 + 1 + 3) for x64 and at least 6 (2 + 1 + 3) bytes for x32.
+ const int kPadding = Assembler::kJSReturnSequenceLength -
+ kPointerSize == kInt64Size ? 7 : 6;
for (int i = 0; i < kPadding; ++i) {
masm_->int3();
}
@@ -415,7 +430,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// for the debugger's requirements.
ASSERT(Assembler::kJSReturnSequenceLength <=
masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
-#endif
+
info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
@@ -435,7 +450,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
MemOperand operand = codegen()->VarOperand(var, result_register());
- __ push(operand);
+ __ Push(operand);
}
@@ -553,7 +568,7 @@ void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
Register reg) const {
ASSERT(count > 0);
if (count > 1) __ Drop(count - 1);
- __ movq(Operand(rsp, 0), reg);
+ __ movp(Operand(rsp, 0), reg);
}
@@ -644,8 +659,8 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_false,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(ic, RelocInfo::CODE_TARGET, condition->test_id());
- __ testq(result_register(), result_register());
+ CallIC(ic, condition->test_id());
+ __ testp(result_register(), result_register());
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
}
@@ -696,7 +711,7 @@ MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
void FullCodeGenerator::GetVar(Register dest, Variable* var) {
ASSERT(var->IsContextSlot() || var->IsStackAllocated());
MemOperand location = VarOperand(var, dest);
- __ movq(dest, location);
+ __ movp(dest, location);
}
@@ -709,7 +724,7 @@ void FullCodeGenerator::SetVar(Variable* var,
ASSERT(!scratch0.is(scratch1));
ASSERT(!scratch1.is(src));
MemOperand location = VarOperand(var, scratch0);
- __ movq(location, src);
+ __ movp(location, src);
// Emit the write barrier code if the location is in the heap.
if (var->IsContextSlot()) {
@@ -744,7 +759,7 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (generate_debug_code_) {
// Check that we're not inside a with or catch context.
- __ movq(rbx, FieldOperand(rsi, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rsi, HeapObject::kMapOffset));
__ CompareRoot(rbx, Heap::kWithContextMapRootIndex);
__ Check(not_equal, kDeclarationInWithContext);
__ CompareRoot(rbx, Heap::kCatchContextMapRootIndex);
@@ -761,7 +776,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case Variable::UNALLOCATED:
globals_->Add(variable->name(), zone());
@@ -776,7 +791,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ movq(StackOperand(variable), kScratchRegister);
+ __ movp(StackOperand(variable), kScratchRegister);
}
break;
@@ -785,7 +800,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ movq(ContextOperand(rsi, variable->index()), kScratchRegister);
+ __ movp(ContextOperand(rsi, variable->index()), kScratchRegister);
// No write barrier since the hole value is in old space.
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
}
@@ -793,7 +808,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
case Variable::LOOKUP: {
Comment cmnt(masm_, "[ VariableDeclaration");
- __ push(rsi);
+ __ Push(rsi);
__ Push(variable->name());
// Declaration nodes are always introduced in one of four modes.
ASSERT(IsDeclaredVariableMode(mode));
@@ -809,7 +824,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
} else {
__ Push(Smi::FromInt(0)); // Indicates no initial value.
}
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -835,7 +850,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
case Variable::LOCAL: {
Comment cmnt(masm_, "[ FunctionDeclaration");
VisitForAccumulatorValue(declaration->fun());
- __ movq(StackOperand(variable), result_register());
+ __ movp(StackOperand(variable), result_register());
break;
}
@@ -843,7 +858,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Comment cmnt(masm_, "[ FunctionDeclaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
- __ movq(ContextOperand(rsi, variable->index()), result_register());
+ __ movp(ContextOperand(rsi, variable->index()), result_register());
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
__ RecordWriteContextSlot(rsi,
@@ -859,11 +874,11 @@ void FullCodeGenerator::VisitFunctionDeclaration(
case Variable::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
- __ push(rsi);
+ __ Push(rsi);
__ Push(variable->name());
__ Push(Smi::FromInt(NONE));
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -880,11 +895,11 @@ void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
// Load instance object.
__ LoadContext(rax, scope_->ContextChainLength(scope_->GlobalScope()));
- __ movq(rax, ContextOperand(rax, variable->interface()->Index()));
- __ movq(rax, ContextOperand(rax, Context::EXTENSION_INDEX));
+ __ movp(rax, ContextOperand(rax, variable->interface()->Index()));
+ __ movp(rax, ContextOperand(rax, Context::EXTENSION_INDEX));
// Assign it.
- __ movq(ContextOperand(rsi, variable->index()), rax);
+ __ movp(ContextOperand(rsi, variable->index()), rax);
// We know that we have written a module, which is not a smi.
__ RecordWriteContextSlot(rsi,
Context::SlotOffset(variable->index()),
@@ -930,10 +945,10 @@ void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
- __ push(rsi); // The context is the first argument.
+ __ Push(rsi); // The context is the first argument.
__ Push(pairs);
__ Push(Smi::FromInt(DeclareGlobalsFlags()));
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3);
// Return value is ignored.
}
@@ -941,7 +956,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kHiddenDeclareModules, 1);
// Return value is ignored.
}
@@ -978,16 +993,16 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
VisitForAccumulatorValue(clause->label());
// Perform the comparison as if via '==='.
- __ movq(rdx, Operand(rsp, 0)); // Switch value.
+ __ movp(rdx, Operand(rsp, 0)); // Switch value.
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
- __ movq(rcx, rdx);
- __ or_(rcx, rax);
+ __ movp(rcx, rdx);
+ __ orp(rcx, rax);
patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
- __ cmpq(rdx, rax);
+ __ cmpp(rdx, rax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target());
@@ -997,10 +1012,19 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+ CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
- __ testq(rax, rax);
+ Label skip;
+ __ jmp(&skip, Label::kNear);
+ PrepareForBailout(clause, TOS_REG);
+ __ CompareRoot(rax, Heap::kTrueValueRootIndex);
+ __ j(not_equal, &next_test);
+ __ Drop(1);
+ __ jmp(clause->body_target());
+ __ bind(&skip);
+
+ __ testp(rax, rax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target());
@@ -1032,6 +1056,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
SetStatementPosition(stmt);
Label loop, exit;
@@ -1045,7 +1070,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ j(equal, &exit);
Register null_value = rdi;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ cmpq(rax, null_value);
+ __ cmpp(rax, null_value);
__ j(equal, &exit);
PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
@@ -1056,10 +1081,10 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
__ j(above_equal, &done_convert);
__ bind(&convert);
- __ push(rax);
+ __ Push(rax);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ bind(&done_convert);
- __ push(rax);
+ __ Push(rax);
// Check for proxies.
Label call_runtime;
@@ -1076,12 +1101,12 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration.
Label use_cache;
- __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
__ jmp(&use_cache, Label::kNear);
// Get the set of properties to enumerate.
__ bind(&call_runtime);
- __ push(rax); // Duplicate the enumerable object on the stack.
+ __ Push(rax); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
// If we got a map from the runtime call, we can do a fast
@@ -1102,69 +1127,65 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ j(equal, &no_descriptors);
__ LoadInstanceDescriptors(rax, rcx);
- __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheOffset));
- __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ __ movp(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheOffset));
+ __ movp(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
- __ push(rax); // Map.
- __ push(rcx); // Enumeration cache.
- __ push(rdx); // Number of valid entries for the map in the enum cache.
+ __ Push(rax); // Map.
+ __ Push(rcx); // Enumeration cache.
+ __ Push(rdx); // Number of valid entries for the map in the enum cache.
__ Push(Smi::FromInt(0)); // Initial index.
__ jmp(&loop);
__ bind(&no_descriptors);
- __ addq(rsp, Immediate(kPointerSize));
+ __ addp(rsp, Immediate(kPointerSize));
__ jmp(&exit);
// We got a fixed array in register rax. Iterate through that.
Label non_proxy;
__ bind(&fixed_array);
- Handle<Cell> cell = isolate()->factory()->NewCell(
- Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
- RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ Move(rbx, cell);
- __ Move(FieldOperand(rbx, Cell::kValueOffset),
- Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker));
-
+ // No need for a write barrier, we are storing a Smi in the feedback vector.
+ __ Move(rbx, FeedbackVector());
+ __ Move(FieldOperand(rbx, FixedArray::OffsetOfElementAt(slot)),
+ TypeFeedbackInfo::MegamorphicSentinel(isolate()));
__ Move(rbx, Smi::FromInt(1)); // Smi indicates slow check
- __ movq(rcx, Operand(rsp, 0 * kPointerSize)); // Get enumerated object
+ __ movp(rcx, Operand(rsp, 0 * kPointerSize)); // Get enumerated object
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(rcx, LAST_JS_PROXY_TYPE, rcx);
__ j(above, &non_proxy);
__ Move(rbx, Smi::FromInt(0)); // Zero indicates proxy
__ bind(&non_proxy);
- __ push(rbx); // Smi
- __ push(rax); // Array
- __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
- __ push(rax); // Fixed array length (as smi).
+ __ Push(rbx); // Smi
+ __ Push(rax); // Array
+ __ movp(rax, FieldOperand(rax, FixedArray::kLengthOffset));
+ __ Push(rax); // Fixed array length (as smi).
__ Push(Smi::FromInt(0)); // Initial index.
// Generate code for doing the condition check.
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
- __ movq(rax, Operand(rsp, 0 * kPointerSize)); // Get the current index.
- __ cmpq(rax, Operand(rsp, 1 * kPointerSize)); // Compare to the array length.
+ __ movp(rax, Operand(rsp, 0 * kPointerSize)); // Get the current index.
+ __ cmpp(rax, Operand(rsp, 1 * kPointerSize)); // Compare to the array length.
__ j(above_equal, loop_statement.break_label());
// Get the current entry of the array into register rbx.
- __ movq(rbx, Operand(rsp, 2 * kPointerSize));
+ __ movp(rbx, Operand(rsp, 2 * kPointerSize));
SmiIndex index = masm()->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ movq(rbx, FieldOperand(rbx,
+ __ movp(rbx, FieldOperand(rbx,
index.reg,
index.scale,
FixedArray::kHeaderSize));
// Get the expected map from the stack or a smi in the
// permanent slow case into register rdx.
- __ movq(rdx, Operand(rsp, 3 * kPointerSize));
+ __ movp(rdx, Operand(rsp, 3 * kPointerSize));
// Check if the expected map still matches that of the enumerable.
// If not, we may have to filter the key.
Label update_each;
- __ movq(rcx, Operand(rsp, 4 * kPointerSize));
- __ cmpq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movp(rcx, Operand(rsp, 4 * kPointerSize));
+ __ cmpp(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
// For proxies, no filtering is done.
@@ -1175,17 +1196,17 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Convert the entry to a string or null if it isn't a property
// anymore. If the property has been removed while iterating, we
// just skip it.
- __ push(rcx); // Enumerable.
- __ push(rbx); // Current entry.
+ __ Push(rcx); // Enumerable.
+ __ Push(rbx); // Current entry.
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
__ Cmp(rax, Smi::FromInt(0));
__ j(equal, loop_statement.continue_label());
- __ movq(rbx, rax);
+ __ movp(rbx, rax);
// Update the 'each' property or variable from the possibly filtered
// entry in register rbx.
__ bind(&update_each);
- __ movq(result_register(), rbx);
+ __ movp(result_register(), rbx);
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitAssignment(stmt->each());
@@ -1204,7 +1225,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Remove the pointers stored on the stack.
__ bind(loop_statement.break_label());
- __ addq(rsp, Immediate(5 * kPointerSize));
+ __ addp(rsp, Immediate(5 * kPointerSize));
// Exit and decrement the loop depth.
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1220,24 +1241,17 @@ void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
Iteration loop_statement(this, stmt);
increment_loop_depth();
- // var iterator = iterable[@@iterator]()
- VisitForAccumulatorValue(stmt->assign_iterator());
+ // var iterable = subject
+ VisitForAccumulatorValue(stmt->assign_iterable());
- // As with for-in, skip the loop if the iterator is null or undefined.
+ // As with for-in, skip the loop if the iterable is null or undefined.
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, loop_statement.break_label());
__ CompareRoot(rax, Heap::kNullValueRootIndex);
__ j(equal, loop_statement.break_label());
- // Convert the iterator to a JS object.
- Label convert, done_convert;
- __ JumpIfSmi(rax, &convert);
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &done_convert);
- __ bind(&convert);
- __ push(rax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ bind(&done_convert);
+ // var iterator = iterable[Symbol.iterator]();
+ VisitForEffect(stmt->assign_iterator());
// Loop entry.
__ bind(loop_statement.continue_label());
@@ -1284,16 +1298,18 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode(), info->is_generator());
+ FastNewClosureStub stub(isolate(),
+ info->strict_mode(),
+ info->is_generator());
__ Move(rbx, info);
__ CallStub(&stub);
} else {
- __ push(rsi);
+ __ Push(rsi);
__ Push(info);
__ Push(pretenure
? isolate()->factory()->true_value()
: isolate()->factory()->false_value());
- __ CallRuntime(Runtime::kNewClosure, 3);
+ __ CallRuntime(Runtime::kHiddenNewClosure, 3);
}
context()->Plug(rax);
}
@@ -1314,21 +1330,21 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
+ __ cmpp(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
__ j(not_equal, slow);
}
// Load next context in chain.
- __ movq(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
+ __ movp(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering rsi.
context = temp;
}
// If no outer scope calls eval, we do not need to check more
// context extensions. If we have reached an eval scope, we check
// all extensions from this point.
- if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
+ if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
s = s->outer_scope();
}
@@ -1337,32 +1353,31 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
// safe to use raw labels here.
Label next, fast;
if (!context.is(temp)) {
- __ movq(temp, context);
+ __ movp(temp, context);
}
// Load map for comparison into register, outside loop.
__ LoadRoot(kScratchRegister, Heap::kNativeContextMapRootIndex);
__ bind(&next);
// Terminate at native context.
- __ cmpq(kScratchRegister, FieldOperand(temp, HeapObject::kMapOffset));
+ __ cmpp(kScratchRegister, FieldOperand(temp, HeapObject::kMapOffset));
__ j(equal, &fast, Label::kNear);
// Check that extension is NULL.
- __ cmpq(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
+ __ cmpp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
__ j(not_equal, slow);
// Load next context in chain.
- __ movq(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
+ __ movp(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
__ jmp(&next);
__ bind(&fast);
}
// All extension objects were empty and it is safe to use a global
// load IC call.
- __ movq(rax, GlobalObjectOperand());
+ __ movp(rax, GlobalObjectOperand());
__ Move(rcx, var->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- CallIC(ic, mode);
+ ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
+ ? NOT_CONTEXTUAL
+ : CONTEXTUAL;
+ CallLoadIC(mode);
}
@@ -1374,19 +1389,19 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
+ __ cmpp(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
__ j(not_equal, slow);
}
- __ movq(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
+ __ movp(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering rsi.
context = temp;
}
}
// Check that last extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
+ __ cmpp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
__ j(not_equal, slow);
// This function is used only for loads, not stores, so it's safe to
@@ -1410,17 +1425,16 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
__ jmp(done);
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
- __ movq(rax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
+ __ movp(rax, ContextSlotOperandCheckExtensions(local, slow));
+ if (local->mode() == LET || local->mode() == CONST ||
+ local->mode() == CONST_LEGACY) {
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(not_equal, done);
- if (local->mode() == CONST) {
+ if (local->mode() == CONST_LEGACY) {
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- } else { // LET || CONST_HARMONY
+ } else { // LET || CONST
__ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
}
}
__ jmp(done);
@@ -1437,13 +1451,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// variables.
switch (var->location()) {
case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
// Use inline caching. Variable name is passed in rcx and the global
// object on the stack.
__ Move(rcx, var->name());
- __ movq(rax, GlobalObjectOperand());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ __ movp(rax, GlobalObjectOperand());
+ CallLoadIC(CONTEXTUAL);
context()->Plug(rax);
break;
}
@@ -1451,7 +1464,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot() ? "Context slot" : "Stack slot");
+ Comment cmnt(masm_, var->IsContextSlot() ? "[ Context slot"
+ : "[ Stack slot");
if (var->binding_needs_init()) {
// var->scope() may be NULL when the proxy is located in eval code and
// refers to a potential outside binding. Currently those bindings are
@@ -1483,7 +1497,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Check that we always have valid source position.
ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
ASSERT(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST &&
+ skip_init_check = var->mode() != CONST_LEGACY &&
var->initializer_position() < proxy->position();
}
@@ -1493,14 +1507,14 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
GetVar(rax, var);
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &done, Label::kNear);
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ if (var->mode() == LET || var->mode() == CONST) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
__ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
} else {
// Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
+ ASSERT(var->mode() == CONST_LEGACY);
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
}
__ bind(&done);
@@ -1513,15 +1527,15 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
}
case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- Comment cmnt(masm_, "Lookup slot");
- __ push(rsi); // Context.
+ __ Push(rsi); // Context.
__ Push(var->name());
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
__ bind(&done);
context()->Plug(rax);
break;
@@ -1538,22 +1552,22 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// rcx = literals array.
// rbx = regexp literal.
// rax = regexp literal clone.
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
int literal_offset =
FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ movq(rbx, FieldOperand(rcx, literal_offset));
+ __ movp(rbx, FieldOperand(rcx, literal_offset));
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &materialized, Label::kNear);
// Create regexp literal using runtime function
// Result will be in rax.
- __ push(rcx);
+ __ Push(rcx);
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(expr->pattern());
__ Push(expr->flags());
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ movq(rbx, rax);
+ __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4);
+ __ movp(rbx, rax);
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
@@ -1562,23 +1576,23 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ jmp(&allocated);
__ bind(&runtime_allocate);
- __ push(rbx);
+ __ Push(rbx);
__ Push(Smi::FromInt(size));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(rbx);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+ __ Pop(rbx);
__ bind(&allocated);
// Copy the content into the newly allocated memory.
// (Unroll copy loop once for better throughput).
for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ movq(rdx, FieldOperand(rbx, i));
- __ movq(rcx, FieldOperand(rbx, i + kPointerSize));
- __ movq(FieldOperand(rax, i), rdx);
- __ movq(FieldOperand(rax, i + kPointerSize), rcx);
+ __ movp(rdx, FieldOperand(rbx, i));
+ __ movp(rcx, FieldOperand(rbx, i + kPointerSize));
+ __ movp(FieldOperand(rax, i), rdx);
+ __ movp(FieldOperand(rax, i + kPointerSize), rcx);
}
if ((size % (2 * kPointerSize)) != 0) {
- __ movq(rdx, FieldOperand(rbx, size - kPointerSize));
- __ movq(FieldOperand(rax, size - kPointerSize), rdx);
+ __ movp(rdx, FieldOperand(rbx, size - kPointerSize));
+ __ movp(FieldOperand(rax, size - kPointerSize), rdx);
}
context()->Plug(rax);
}
@@ -1605,23 +1619,22 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
int properties_count = constant_properties->length() / 2;
- if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1 || Serializer::enabled() ||
- flags != ObjectLiteral::kFastElements ||
+ if (expr->may_store_doubles() || expr->depth() > 1 ||
+ masm()->serializer_enabled() || flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(constant_properties);
__ Push(Smi::FromInt(flags));
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
} else {
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rax, FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rax, FieldOperand(rdi, JSFunction::kLiteralsOffset));
__ Move(rbx, Smi::FromInt(expr->literal_index()));
__ Move(rcx, constant_properties);
__ Move(rdx, Smi::FromInt(flags));
- FastCloneShallowObjectStub stub(properties_count);
+ FastCloneShallowObjectStub stub(isolate(), properties_count);
__ CallStub(&stub);
}
@@ -1642,7 +1655,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Literal* key = property->key();
Expression* value = property->value();
if (!result_saved) {
- __ push(rax); // Save result on the stack
+ __ Push(rax); // Save result on the stack
result_saved = true;
}
switch (property->kind()) {
@@ -1656,18 +1669,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
__ Move(rcx, key->value());
- __ movq(rdx, Operand(rsp, 0));
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, key->LiteralFeedbackId());
+ __ movp(rdx, Operand(rsp, 0));
+ CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
}
break;
}
- __ push(Operand(rsp, 0)); // Duplicate receiver.
+ __ Push(Operand(rsp, 0)); // Duplicate receiver.
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
@@ -1678,7 +1688,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
break;
case ObjectLiteral::Property::PROTOTYPE:
- __ push(Operand(rsp, 0)); // Duplicate receiver.
+ __ Push(Operand(rsp, 0)); // Duplicate receiver.
VisitForStackValue(value);
if (property->emit_store()) {
__ CallRuntime(Runtime::kSetPrototype, 2);
@@ -1700,7 +1710,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
for (AccessorTable::Iterator it = accessor_table.begin();
it != accessor_table.end();
++it) {
- __ push(Operand(rsp, 0)); // Duplicate receiver.
+ __ Push(Operand(rsp, 0)); // Duplicate receiver.
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
EmitAccessor(it->second->setter);
@@ -1710,7 +1720,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (expr->has_function()) {
ASSERT(result_saved);
- __ push(Operand(rsp, 0));
+ __ Push(Operand(rsp, 0));
__ CallRuntime(Runtime::kToFastProperties, 1);
}
@@ -1741,54 +1751,26 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
+ AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
if (has_constant_fast_elements && !FLAG_allocation_site_pretenuring) {
// If the only customer of allocation sites is transitioning, then
// we can turn it off if we don't have anywhere else to transition to.
allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
}
- Heap* heap = isolate()->heap();
- if (has_constant_fast_elements &&
- constant_elements_values->map() == heap->fixed_cow_array_map()) {
- // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
- // change, so it's possible to specialize the stub in advance.
- __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
- __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rax, FieldOperand(rbx, JSFunction::kLiteralsOffset));
- __ Move(rbx, Smi::FromInt(expr->literal_index()));
- __ Move(rcx, constant_elements);
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- allocation_site_mode,
- length);
- __ CallStub(&stub);
- } else if (expr->depth() > 1 || Serializer::enabled() ||
- length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ if (expr->depth() > 1 || length > JSObject::kInitialMaxFastElementArray) {
+ __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(constant_elements);
__ Push(Smi::FromInt(flags));
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
} else {
- ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
- FLAG_smi_only_arrays);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
-
- // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
- // change, so it's possible to specialize the stub in advance.
- if (has_constant_fast_elements) {
- mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- }
-
- __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rax, FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rax, FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ Move(rbx, Smi::FromInt(expr->literal_index()));
__ Move(rcx, constant_elements);
- FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
+ FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
}
@@ -1803,7 +1785,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (!result_saved) {
- __ push(rax); // array literal
+ __ Push(rax); // array literal
__ Push(Smi::FromInt(expr->literal_index()));
result_saved = true;
}
@@ -1813,10 +1795,10 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
// cannot transition and don't need to call the runtime stub.
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ movq(rbx, Operand(rsp, kPointerSize)); // Copy of array literal.
- __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+ __ movp(rbx, Operand(rsp, kPointerSize)); // Copy of array literal.
+ __ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
// Store the subexpression value in the array's elements.
- __ movq(FieldOperand(rbx, offset), result_register());
+ __ movp(FieldOperand(rbx, offset), result_register());
// Update the write barrier for the array store.
__ RecordWriteField(rbx, offset, result_register(), rcx,
kDontSaveFPRegs,
@@ -1825,7 +1807,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else {
// Store the subexpression value in the array's elements.
__ Move(rcx, Smi::FromInt(i));
- StoreArrayLiteralElementStub stub;
+ StoreArrayLiteralElementStub stub(isolate());
__ CallStub(&stub);
}
@@ -1833,7 +1815,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
if (result_saved) {
- __ addq(rsp, Immediate(kPointerSize)); // literal index
+ __ addp(rsp, Immediate(kPointerSize)); // literal index
context()->PlugTOS();
} else {
context()->Plug(rax);
@@ -1842,13 +1824,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ ASSERT(expr->target()->IsValidReferenceExpression());
+
Comment cmnt(masm_, "[ Assignment");
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // on the left-hand side.
- if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
- return;
- }
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -1870,7 +1848,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
if (expr->is_compound()) {
// We need the receiver both on the stack and in the accumulator.
VisitForAccumulatorValue(property->obj());
- __ push(result_register());
+ __ Push(result_register());
} else {
VisitForStackValue(property->obj());
}
@@ -1879,8 +1857,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
if (expr->is_compound()) {
VisitForStackValue(property->obj());
VisitForAccumulatorValue(property->key());
- __ movq(rdx, Operand(rsp, 0));
- __ push(rax);
+ __ movp(rdx, Operand(rsp, 0));
+ __ Push(rax);
} else {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
@@ -1910,7 +1888,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
Token::Value op = expr->binary_op();
- __ push(rax); // Left operand goes on the stack.
+ __ Push(rax); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
@@ -1964,7 +1942,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
case Yield::SUSPEND:
// Pop value from top-of-stack slot; box result into result register.
EmitCreateIteratorResult(false);
- __ push(result_register());
+ __ Push(result_register());
// Fall through.
case Yield::INITIAL: {
Label suspend, continuation, post_runtime, resume;
@@ -1979,20 +1957,20 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
__ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
Smi::FromInt(continuation.pos()));
- __ movq(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
- __ movq(rcx, rsi);
+ __ movp(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
+ __ movp(rcx, rsi);
__ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
kDontSaveFPRegs);
- __ lea(rbx, Operand(rbp, StandardFrameConstants::kExpressionsOffset));
- __ cmpq(rsp, rbx);
+ __ leap(rbx, Operand(rbp, StandardFrameConstants::kExpressionsOffset));
+ __ cmpp(rsp, rbx);
__ j(equal, &post_runtime);
- __ push(rax); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ movq(context_register(),
+ __ Push(rax); // generator object
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
+ __ movp(context_register(),
Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&post_runtime);
- __ pop(result_register());
+ __ Pop(result_register());
EmitReturnSequence();
__ bind(&resume);
@@ -2029,37 +2007,37 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ bind(&l_catch);
handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
__ LoadRoot(rcx, Heap::kthrow_stringRootIndex); // "throw"
- __ push(rcx);
- __ push(Operand(rsp, 2 * kPointerSize)); // iter
- __ push(rax); // exception
+ __ Push(rcx);
+ __ Push(Operand(rsp, 2 * kPointerSize)); // iter
+ __ Push(rax); // exception
__ jmp(&l_call);
// try { received = %yield result }
// Shuffle the received result above a try handler and yield it without
// re-boxing.
__ bind(&l_try);
- __ pop(rax); // result
+ __ Pop(rax); // result
__ PushTryHandler(StackHandler::CATCH, expr->index());
const int handler_size = StackHandlerConstants::kSize;
- __ push(rax); // result
+ __ Push(rax); // result
__ jmp(&l_suspend);
__ bind(&l_continuation);
__ jmp(&l_resume);
__ bind(&l_suspend);
const int generator_object_depth = kPointerSize + handler_size;
- __ movq(rax, Operand(rsp, generator_object_depth));
- __ push(rax); // g
+ __ movp(rax, Operand(rsp, generator_object_depth));
+ __ Push(rax); // g
ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
__ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
Smi::FromInt(l_continuation.pos()));
- __ movq(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
- __ movq(rcx, rsi);
+ __ movp(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
+ __ movp(rcx, rsi);
__ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ movq(context_register(),
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
+ __ movp(context_register(),
Operand(rbp, StandardFrameConstants::kContextOffset));
- __ pop(rax); // result
+ __ Pop(rax); // result
EmitReturnSequence();
__ bind(&l_resume); // received in rax
__ PopTryHandler();
@@ -2067,33 +2045,38 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// receiver = iter; f = 'next'; arg = received;
__ bind(&l_next);
__ LoadRoot(rcx, Heap::knext_stringRootIndex); // "next"
- __ push(rcx);
- __ push(Operand(rsp, 2 * kPointerSize)); // iter
- __ push(rax); // received
+ __ Push(rcx);
+ __ Push(Operand(rsp, 2 * kPointerSize)); // iter
+ __ Push(rax); // received
// result = receiver[f](arg);
__ bind(&l_call);
- Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(1);
- CallIC(ic);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ Drop(1); // The key is still on the stack; drop it.
+ __ movp(rdx, Operand(rsp, kPointerSize));
+ __ movp(rax, Operand(rsp, 2 * kPointerSize));
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallIC(ic, TypeFeedbackId::None());
+ __ movp(rdi, rax);
+ __ movp(Operand(rsp, 2 * kPointerSize), rdi);
+ CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
+ __ CallStub(&stub);
+
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ Drop(1); // The function is still on the stack; drop it.
// if (!result.done) goto l_try;
__ bind(&l_loop);
- __ push(rax); // save result
+ __ Push(rax); // save result
__ LoadRoot(rcx, Heap::kdone_stringRootIndex); // "done"
- Handle<Code> done_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(done_ic); // result.done in rax
+ CallLoadIC(NOT_CONTEXTUAL); // result.done in rax
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
- __ testq(result_register(), result_register());
+ __ testp(result_register(), result_register());
__ j(zero, &l_try);
// result.value
- __ pop(rax); // result
+ __ Pop(rax); // result
__ LoadRoot(rcx, Heap::kvalue_stringRootIndex); // "value"
- Handle<Code> value_ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(value_ic); // result.value in rax
+ CallLoadIC(NOT_CONTEXTUAL); // result.value in rax
context()->DropAndPlug(2, rax); // drop iter and g
break;
}
@@ -2105,38 +2088,39 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
Expression *value,
JSGeneratorObject::ResumeMode resume_mode) {
// The value stays in rax, and is ultimately read by the resumed generator, as
- // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. rbx
- // will hold the generator object until the activation has been resumed.
+ // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it
+ // is read to throw the value when the resumed generator is already closed.
+ // rbx will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
VisitForAccumulatorValue(value);
- __ pop(rbx);
+ __ Pop(rbx);
// Check generator state.
- Label wrong_state, done;
- STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0);
- STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0);
+ Label wrong_state, closed_state, done;
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
__ SmiCompare(FieldOperand(rbx, JSGeneratorObject::kContinuationOffset),
Smi::FromInt(0));
- __ j(less_equal, &wrong_state);
+ __ j(equal, &closed_state);
+ __ j(less, &wrong_state);
// Load suspended function and context.
- __ movq(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset));
- __ movq(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
+ __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset));
+ __ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
// Push receiver.
- __ push(FieldOperand(rbx, JSGeneratorObject::kReceiverOffset));
+ __ Push(FieldOperand(rbx, JSGeneratorObject::kReceiverOffset));
// Push holes for arguments to generator function.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movsxlq(rdx,
- FieldOperand(rdx,
- SharedFunctionInfo::kFormalParameterCountOffset));
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadSharedFunctionInfoSpecialField(rdx, rdx,
+ SharedFunctionInfo::kFormalParameterCountOffset);
__ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
Label push_argument_holes, push_frame;
__ bind(&push_argument_holes);
- __ subq(rdx, Immediate(1));
+ __ subp(rdx, Immediate(1));
__ j(carry, &push_frame);
- __ push(rcx);
+ __ Push(rcx);
__ jmp(&push_argument_holes);
// Enter a new JavaScript frame, and initialize its slots as they were when
@@ -2146,26 +2130,26 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ call(&resume_frame);
__ jmp(&done);
__ bind(&resume_frame);
- __ push(rbp); // Caller's frame pointer.
- __ movq(rbp, rsp);
- __ push(rsi); // Callee's context.
- __ push(rdi); // Callee's JS Function.
+ __ pushq(rbp); // Caller's frame pointer.
+ __ movp(rbp, rsp);
+ __ Push(rsi); // Callee's context.
+ __ Push(rdi); // Callee's JS Function.
// Load the operand stack size.
- __ movq(rdx, FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset));
- __ movq(rdx, FieldOperand(rdx, FixedArray::kLengthOffset));
+ __ movp(rdx, FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset));
+ __ movp(rdx, FieldOperand(rdx, FixedArray::kLengthOffset));
__ SmiToInteger32(rdx, rdx);
// If we are sending a value and there is no operand stack, we can jump back
// in directly.
if (resume_mode == JSGeneratorObject::NEXT) {
Label slow_resume;
- __ cmpq(rdx, Immediate(0));
+ __ cmpp(rdx, Immediate(0));
__ j(not_zero, &slow_resume);
- __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
__ SmiToInteger64(rcx,
FieldOperand(rbx, JSGeneratorObject::kContinuationOffset));
- __ addq(rdx, rcx);
+ __ addp(rdx, rcx);
__ Move(FieldOperand(rbx, JSGeneratorObject::kContinuationOffset),
Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
__ jmp(rdx);
@@ -2176,22 +2160,36 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// up the stack and the handlers.
Label push_operand_holes, call_resume;
__ bind(&push_operand_holes);
- __ subq(rdx, Immediate(1));
+ __ subp(rdx, Immediate(1));
__ j(carry, &call_resume);
- __ push(rcx);
+ __ Push(rcx);
__ jmp(&push_operand_holes);
__ bind(&call_resume);
- __ push(rbx);
- __ push(result_register());
+ __ Push(rbx);
+ __ Push(result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3);
// Not reached: the runtime call returns elsewhere.
__ Abort(kGeneratorFailedToResume);
+ // Reach here when generator is closed.
+ __ bind(&closed_state);
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ // Return completed iterator result when generator is closed.
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(true);
+ } else {
+ // Throw the provided value.
+ __ Push(rax);
+ __ CallRuntime(Runtime::kHiddenThrow, 1);
+ }
+ __ jmp(&done);
+
// Throw error if we attempt to operate on a running generator.
__ bind(&wrong_state);
- __ push(rbx);
- __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
+ __ Push(rbx);
+ __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1);
__ bind(&done);
context()->Plug(result_register());
@@ -2202,30 +2200,30 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label gc_required;
Label allocated;
- Handle<Map> map(isolate()->native_context()->generator_result_map());
+ Handle<Map> map(isolate()->native_context()->iterator_result_map());
__ Allocate(map->instance_size(), rax, rcx, rdx, &gc_required, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&gc_required);
__ Push(Smi::FromInt(map->instance_size()));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ movq(context_register(),
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+ __ movp(context_register(),
Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&allocated);
__ Move(rbx, map);
- __ pop(rcx);
+ __ Pop(rcx);
__ Move(rdx, isolate()->factory()->ToBoolean(done));
ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), rbx);
+ __ movp(FieldOperand(rax, HeapObject::kMapOffset), rbx);
__ Move(FieldOperand(rax, JSObject::kPropertiesOffset),
isolate()->factory()->empty_fixed_array());
__ Move(FieldOperand(rax, JSObject::kElementsOffset),
isolate()->factory()->empty_fixed_array());
- __ movq(FieldOperand(rax, JSGeneratorObject::kResultValuePropertyOffset),
+ __ movp(FieldOperand(rax, JSGeneratorObject::kResultValuePropertyOffset),
rcx);
- __ movq(FieldOperand(rax, JSGeneratorObject::kResultDonePropertyOffset),
+ __ movp(FieldOperand(rax, JSGeneratorObject::kResultDonePropertyOffset),
rdx);
// Only the value field needs a write barrier, as the other values are in the
@@ -2239,15 +2237,14 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
__ Move(rcx, key->value());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
+ CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
+ CallIC(ic, prop->PropertyFeedbackId());
}
@@ -2260,17 +2257,16 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
// stack (popped into rdx). Right operand is in rax but moved into
// rcx to make the shifts easier.
Label done, stub_call, smi_case;
- __ pop(rdx);
- __ movq(rcx, rax);
- __ or_(rax, rdx);
+ __ Pop(rdx);
+ __ movp(rcx, rax);
+ __ orp(rax, rdx);
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(rax, &smi_case, Label::kNear);
__ bind(&stub_call);
- __ movq(rax, rcx);
- BinaryOpICStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
+ __ movp(rax, rcx);
+ BinaryOpICStub stub(isolate(), op, mode);
+ CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -2280,7 +2276,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ SmiShiftArithmeticRight(rax, rdx, rcx);
break;
case Token::SHL:
- __ SmiShiftLeft(rax, rdx, rcx);
+ __ SmiShiftLeft(rax, rdx, rcx, &stub_call);
break;
case Token::SHR:
__ SmiShiftLogicalRight(rax, rdx, rcx, &stub_call);
@@ -2316,23 +2312,17 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
- __ pop(rdx);
- BinaryOpICStub stub(op, mode);
+ __ Pop(rdx);
+ BinaryOpICStub stub(isolate(), op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(rax);
}
void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten by the parser to have a 'throw
- // ReferenceError' on the left-hand side.
- if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
- return;
- }
+ ASSERT(expr->IsValidReferenceExpression());
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -2353,25 +2343,22 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
break;
}
case NAMED_PROPERTY: {
- __ push(rax); // Preserve value.
+ __ Push(rax); // Preserve value.
VisitForAccumulatorValue(prop->obj());
- __ movq(rdx, rax);
- __ pop(rax); // Restore value.
+ __ movp(rdx, rax);
+ __ Pop(rax); // Restore value.
__ Move(rcx, prop->key()->AsLiteral()->value());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic);
+ CallStoreIC();
break;
}
case KEYED_PROPERTY: {
- __ push(rax); // Preserve value.
+ __ Push(rax); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
- __ movq(rcx, rax);
- __ pop(rdx);
- __ pop(rax); // Restore value.
- Handle<Code> ic = is_classic_mode()
+ __ movp(rcx, rax);
+ __ Pop(rdx);
+ __ Pop(rax); // Restore value.
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
CallIC(ic);
@@ -2382,90 +2369,86 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ movp(location, rax);
+ if (var->IsContextSlot()) {
+ __ movp(rdx, rax);
+ __ RecordWriteContextSlot(
+ rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallStoreContextSlot(
+ Handle<String> name, StrictMode strict_mode) {
+ __ Push(rax); // Value.
+ __ Push(rsi); // Context.
+ __ Push(name);
+ __ Push(Smi::FromInt(strict_mode));
+ __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4);
+}
+
+
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ Move(rcx, var->name());
- __ movq(rdx, GlobalObjectOperand());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
- } else if (op == Token::INIT_CONST) {
+ __ movp(rdx, GlobalObjectOperand());
+ CallStoreIC();
+
+ } else if (op == Token::INIT_CONST_LEGACY) {
// Const initializers need a write barrier.
ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsStackLocal()) {
+ if (var->IsLookupSlot()) {
+ __ Push(rax);
+ __ Push(rsi);
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3);
+ } else {
+ ASSERT(var->IsStackLocal() || var->IsContextSlot());
Label skip;
- __ movq(rdx, StackOperand(var));
+ MemOperand location = VarOperand(var, rcx);
+ __ movp(rdx, location);
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &skip);
- __ movq(StackOperand(var), rax);
+ EmitStoreToStackLocalOrContextSlot(var, location);
__ bind(&skip);
- } else {
- ASSERT(var->IsContextSlot() || var->IsLookupSlot());
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are
- // able to drill a hole to that function context, even from inside a
- // 'with' context. We thus bypass the normal static scope lookup for
- // var->IsContextSlot().
- __ push(rax);
- __ push(rsi);
- __ Push(var->name());
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
- __ push(rax); // Value.
- __ push(rsi); // Context.
- __ Push(var->name());
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitCallStoreContextSlot(var->name(), strict_mode());
} else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label assign;
MemOperand location = VarOperand(var, rcx);
- __ movq(rdx, location);
+ __ movp(rdx, location);
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &assign, Label::kNear);
__ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
__ bind(&assign);
- __ movq(location, rax);
- if (var->IsContextSlot()) {
- __ movq(rdx, rax);
- __ RecordWriteContextSlot(
- rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
- }
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
// Assignment to var or initializing assignment to let/const
// in harmony mode.
- if (var->IsStackAllocated() || var->IsContextSlot()) {
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), strict_mode());
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, rcx);
if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
- __ movq(rdx, location);
+ __ movp(rdx, location);
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ Check(equal, kLetBindingReInitialization);
}
- // Perform the assignment.
- __ movq(location, rax);
- if (var->IsContextSlot()) {
- __ movq(rdx, rax);
- __ RecordWriteContextSlot(
- rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
- }
- } else {
- ASSERT(var->IsLookupSlot());
- __ push(rax); // Value.
- __ push(rsi); // Context.
- __ Push(var->name());
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
// Non-initializing assignments to consts are ignored.
@@ -2476,16 +2459,13 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a named store IC.
Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
+ ASSERT(prop->key()->IsLiteral());
// Record source code position before IC call.
SetSourcePosition(expr->position());
__ Move(rcx, prop->key()->AsLiteral()->value());
- __ pop(rdx);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
+ __ Pop(rdx);
+ CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
@@ -2495,14 +2475,14 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
- __ pop(rcx);
- __ pop(rdx);
+ __ Pop(rcx);
+ __ Pop(rdx);
// Record source code position before IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
+ CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
@@ -2521,7 +2501,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
} else {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
- __ pop(rdx);
+ __ Pop(rdx);
EmitKeyedPropertyLoad(expr);
context()->Plug(rax);
}
@@ -2529,73 +2509,67 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
- RelocInfo::Mode rmode,
TypeFeedbackId ast_id) {
ic_total_count_++;
- __ call(code, rmode, ast_id);
+ __ call(code, RelocInfo::CODE_TARGET, ast_id);
}
-void FullCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> name,
- RelocInfo::Mode mode) {
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+
+ CallIC::CallType call_type = callee->IsVariableProxy()
+ ? CallIC::FUNCTION
+ : CallIC::METHOD;
+ // Get the target function.
+ if (call_type == CallIC::FUNCTION) {
+ { StackValueContext context(this);
+ EmitVariableLoad(callee->AsVariableProxy());
+ PrepareForBailout(callee, NO_REGISTERS);
}
- __ Move(rcx, name);
+ // Push undefined as receiver. This is patched in the method prologue if it
+ // is a sloppy mode method.
+ __ Push(isolate()->factory()->undefined_value());
+ } else {
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ movp(rax, Operand(rsp, 0));
+ EmitNamedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ // Push the target function under the receiver.
+ __ Push(Operand(rsp, 0));
+ __ movp(Operand(rsp, kPointerSize), rax);
}
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- context()->Plug(rax);
+
+ EmitCall(expr, call_type);
}
-void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
- Expression* key) {
+// Common code for calls using the IC.
+void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
+ Expression* key) {
// Load the key.
VisitForAccumulatorValue(key);
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- __ pop(rcx);
- __ push(rax);
- __ push(rcx);
+ Expression* callee = expr->expression();
- // Load the arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
- __ movq(rcx, Operand(rsp, (arg_count + 1) * kPointerSize)); // Key.
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, rax); // Drop the key still on the stack.
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ movp(rdx, Operand(rsp, 0));
+ EmitKeyedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+
+ // Push the target function under the receiver.
+ __ Push(Operand(rsp, 0));
+ __ movp(Operand(rsp, kPointerSize), rax);
+
+ EmitCall(expr, CallIC::METHOD);
}
-void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
- // Code common for calls using the call stub.
+void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) {
+ // Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
{ PreservePositionScope scope(masm()->positions_recorder());
@@ -2603,23 +2577,21 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
VisitForStackValue(args->at(i));
}
}
- // Record source position for debugger.
+
+ // Record source position of the IC call.
SetSourcePosition(expr->position());
+ Handle<Code> ic = CallIC::initialize_stub(
+ isolate(), arg_count, call_type);
+ __ Move(rdx, Smi::FromInt(expr->CallFeedbackSlot()));
+ __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
+ // Don't assign a type feedback id to the IC, since type feedback is provided
+ // by the vector above.
+ CallIC(ic);
- // Record call targets in unoptimized code.
- flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
- __ Move(rbx, cell);
-
- CallFunctionStub stub(arg_count, flags);
- __ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->CallFeedbackId());
RecordJSReturnSite(expr);
+
// Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
context()->DropAndPlug(1, rax);
}
@@ -2628,23 +2600,23 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Push copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
- __ push(Operand(rsp, arg_count * kPointerSize));
+ __ Push(Operand(rsp, arg_count * kPointerSize));
} else {
__ PushRoot(Heap::kUndefinedValueRootIndex);
}
// Push the receiver of the enclosing function and do runtime call.
StackArgumentsAccessor args(rbp, info_->scope()->num_parameters());
- __ push(args.GetReceiverOperand());
+ __ Push(args.GetReceiverOperand());
// Push the language mode.
- __ Push(Smi::FromInt(language_mode()));
+ __ Push(Smi::FromInt(strict_mode()));
// Push the start position of the scope the calls resides in.
__ Push(Smi::FromInt(scope()->start_position()));
// Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5);
}
@@ -2657,12 +2629,11 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Comment cmnt(masm_, "[ Call");
Expression* callee = expr->expression();
- VariableProxy* proxy = callee->AsVariableProxy();
- Property* property = callee->AsProperty();
+ Call::CallType call_type = expr->GetCallType(isolate());
- if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the call.
+ if (call_type == Call::POSSIBLY_EVAL_CALL) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call and the receiver of the call.
// Then we call the resolved function using the given arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2677,30 +2648,29 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Push a copy of the function (found below the arguments) and resolve
// eval.
- __ push(Operand(rsp, (arg_count + 1) * kPointerSize));
+ __ Push(Operand(rsp, (arg_count + 1) * kPointerSize));
EmitResolvePossiblyDirectEval(arg_count);
// The runtime call returns a pair of values in rax (function) and
// rdx (receiver). Touch up the stack with the right values.
- __ movq(Operand(rsp, (arg_count + 0) * kPointerSize), rdx);
- __ movq(Operand(rsp, (arg_count + 1) * kPointerSize), rax);
+ __ movp(Operand(rsp, (arg_count + 0) * kPointerSize), rdx);
+ __ movp(Operand(rsp, (arg_count + 1) * kPointerSize), rax);
}
// Record source position for debugger.
SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
- __ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
+ CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, rax);
- } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
- // Call to a global variable. Push global object as receiver for the
- // call IC lookup.
- __ push(GlobalObjectOperand());
- EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ } else if (call_type == Call::GLOBAL_CALL) {
+ EmitCallWithLoadIC(expr);
+
+ } else if (call_type == Call::LOOKUP_SLOT_CALL) {
// Call to a lookup slot (dynamically introduced variable).
+ VariableProxy* proxy = callee->AsVariableProxy();
Label slow, done;
{ PreservePositionScope scope(masm()->positions_recorder());
@@ -2711,11 +2681,11 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ bind(&slow);
// Call the runtime to find the function to call (returned in rax) and
// the object holding it (returned in rdx).
- __ push(context_register());
+ __ Push(context_register());
__ Push(proxy->name());
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ push(rax); // Function.
- __ push(rdx); // Receiver.
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+ __ Push(rax); // Function.
+ __ Push(rdx); // Receiver.
// If fast case code has been generated, emit code to push the function
// and receiver and have the slow path jump around this code.
@@ -2724,38 +2694,35 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ jmp(&call, Label::kNear);
__ bind(&done);
// Push function.
- __ push(rax);
+ __ Push(rax);
// The receiver is implicitly the global receiver. Indicate this by
// passing the hole to the call function stub.
- __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
__ bind(&call);
}
// The receiver is either the global receiver or an object found by
- // LoadContextSlot. That object could be the hole if the receiver is
- // implicitly the global object.
- EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
- } else if (property != NULL) {
+ // LoadContextSlot.
+ EmitCall(expr);
+ } else if (call_type == Call::PROPERTY_CALL) {
+ Property* property = callee->AsProperty();
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(property->obj());
}
if (property->key()->IsPropertyName()) {
- EmitCallWithIC(expr,
- property->key()->AsLiteral()->value(),
- RelocInfo::CODE_TARGET);
+ EmitCallWithLoadIC(expr);
} else {
- EmitKeyedCallWithIC(expr, property->key());
+ EmitKeyedCallWithLoadIC(expr, property->key());
}
} else {
+ ASSERT(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(callee);
}
- // Load global receiver object.
- __ movq(rbx, GlobalObjectOperand());
- __ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
// Emit function call.
- EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
+ EmitCall(expr);
}
#ifdef DEBUG
@@ -2789,17 +2756,20 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Load function and argument count into rdi and rax.
__ Set(rax, arg_count);
- __ movq(rdi, Operand(rsp, arg_count * kPointerSize));
+ __ movp(rdi, Operand(rsp, arg_count * kPointerSize));
// Record call targets in unoptimized code, but not in the snapshot.
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
- __ Move(rbx, cell);
-
- CallConstructStub stub(RECORD_CALL_TARGET);
- __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
+ if (FLAG_pretenuring_call_new) {
+ EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
+ ASSERT(expr->AllocationSiteFeedbackSlot() ==
+ expr->CallNewFeedbackSlot() + 1);
+ }
+
+ __ Move(rbx, FeedbackVector());
+ __ Move(rdx, Smi::FromInt(expr->CallNewFeedbackSlot()));
+
+ CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(rax);
}
@@ -2863,15 +2833,15 @@ void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
__ JumpIfSmi(rax, if_false);
__ CompareRoot(rax, Heap::kNullValueRootIndex);
__ j(equal, if_true);
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rax, HeapObject::kMapOffset));
// Undetectable objects behave like undefined when tested with typeof.
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, if_false);
- __ movzxbq(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- __ cmpq(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ movzxbp(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+ __ cmpp(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(below, if_false);
- __ cmpq(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ cmpp(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(below_equal, if_true, if_false, fall_through);
@@ -2915,7 +2885,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rax, HeapObject::kMapOffset));
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -2943,14 +2913,14 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// Check whether this map has already been checked to be safe for default
// valueOf.
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rax, HeapObject::kMapOffset));
__ testb(FieldOperand(rbx, Map::kBitField2Offset),
Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
__ j(not_zero, &skip_lookup);
// Check for fast case object. Generate false result for slow case object.
- __ movq(rcx, FieldOperand(rax, JSObject::kPropertiesOffset));
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movp(rcx, FieldOperand(rax, JSObject::kPropertiesOffset));
+ __ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
__ CompareRoot(rcx, Heap::kHashTableMapRootIndex);
__ j(equal, if_false);
@@ -2961,49 +2931,46 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// Skip loop if no descriptors are valid.
__ NumberOfOwnDescriptors(rcx, rbx);
- __ cmpq(rcx, Immediate(0));
+ __ cmpp(rcx, Immediate(0));
__ j(equal, &done);
__ LoadInstanceDescriptors(rbx, r8);
// rbx: descriptor array.
// rcx: valid entries in the descriptor array.
// Calculate the end of the descriptor array.
- __ imul(rcx, rcx, Immediate(DescriptorArray::kDescriptorSize));
- SmiIndex index = masm_->SmiToIndex(rdx, rcx, kPointerSizeLog2);
- __ lea(rcx,
- Operand(
- r8, index.reg, index.scale, DescriptorArray::kFirstOffset));
+ __ imulp(rcx, rcx, Immediate(DescriptorArray::kDescriptorSize));
+ __ leap(rcx, Operand(r8, rcx, times_8, DescriptorArray::kFirstOffset));
// Calculate location of the first key name.
- __ addq(r8, Immediate(DescriptorArray::kFirstOffset));
+ __ addp(r8, Immediate(DescriptorArray::kFirstOffset));
// Loop through all the keys in the descriptor array. If one of these is the
// internalized string "valueOf" the result is false.
__ jmp(&entry);
__ bind(&loop);
- __ movq(rdx, FieldOperand(r8, 0));
+ __ movp(rdx, FieldOperand(r8, 0));
__ Cmp(rdx, isolate()->factory()->value_of_string());
__ j(equal, if_false);
- __ addq(r8, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
+ __ addp(r8, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
- __ cmpq(r8, rcx);
+ __ cmpp(r8, rcx);
__ j(not_equal, &loop);
__ bind(&done);
// Set the bit in the map to indicate that there is no local valueOf field.
- __ or_(FieldOperand(rbx, Map::kBitField2Offset),
+ __ orp(FieldOperand(rbx, Map::kBitField2Offset),
Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
__ bind(&skip_lookup);
// If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
- __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
- __ testq(rcx, Immediate(kSmiTagMask));
+ __ movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
+ __ testp(rcx, Immediate(kSmiTagMask));
__ j(zero, if_false);
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
- __ cmpq(rcx,
+ __ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movp(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movp(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
+ __ cmpp(rcx,
ContextOperand(rdx, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -3050,8 +3017,8 @@ void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
__ CheckMap(rax, map, if_false, DO_SMI_CHECK);
__ cmpl(FieldOperand(rax, HeapNumber::kExponentOffset),
- Immediate(0x80000000));
- __ j(not_equal, if_false);
+ Immediate(0x1));
+ __ j(no_overflow, if_false);
__ cmpl(FieldOperand(rax, HeapNumber::kMantissaOffset),
Immediate(0x00000000));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -3117,14 +3084,14 @@ void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
// Get the frame pointer for the calling frame.
- __ movq(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
// Skip the arguments adaptor frame if it exists.
Label check_frame_marker;
__ Cmp(Operand(rax, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(not_equal, &check_frame_marker);
- __ movq(rax, Operand(rax, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rax, Operand(rax, StandardFrameConstants::kCallerFPOffset));
// Check the marker in the calling frame.
__ bind(&check_frame_marker);
@@ -3152,8 +3119,8 @@ void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ pop(rbx);
- __ cmpq(rax, rbx);
+ __ Pop(rbx);
+ __ cmpp(rax, rbx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -3168,9 +3135,9 @@ void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
// ArgumentsAccessStub expects the key in rdx and the formal
// parameter count in rax.
VisitForAccumulatorValue(args->at(0));
- __ movq(rdx, rax);
+ __ movp(rdx, rax);
__ Move(rax, Smi::FromInt(info_->scope()->num_parameters()));
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+ ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
context()->Plug(rax);
}
@@ -3184,14 +3151,14 @@ void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
__ Move(rax, Smi::FromInt(info_->scope()->num_parameters()));
// Check if the calling frame is an arguments adaptor frame.
- __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(not_equal, &exit, Label::kNear);
// Arguments adaptor case: Read the arguments length from the
// adaptor frame.
- __ movq(rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movp(rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ bind(&exit);
__ AssertSmi(rax);
@@ -3229,14 +3196,14 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
// Check if the constructor in the map is a JS function.
- __ movq(rax, FieldOperand(rax, Map::kConstructorOffset));
+ __ movp(rax, FieldOperand(rax, Map::kConstructorOffset));
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
__ j(not_equal, &non_function_constructor);
// rax now contains the constructor function. Grab the
// instance class name from there.
- __ movq(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rax, FieldOperand(rax, SharedFunctionInfo::kInstanceClassNameOffset));
+ __ movp(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rax, FieldOperand(rax, SharedFunctionInfo::kInstanceClassNameOffset));
__ jmp(&done);
// Functions have class 'Function'.
@@ -3260,30 +3227,9 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitLog(CallRuntime* expr) {
- // Conditionally generate a log call.
- // Args:
- // 0 (literal string): The type of logging (corresponds to the flags).
- // This is used to determine whether or not to generate the log call.
- // 1 (string): Format string. Access the string at argument index 2
- // with '%2s' (see Logger::LogRuntime for all the formats).
- // 2 (array): Arguments to the format string.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
- }
- // Finally, we're expected to leave a value on the top of the stack.
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
- SubStringStub stub;
+ SubStringStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
@@ -3296,7 +3242,7 @@ void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
- RegExpExecStub stub;
+ RegExpExecStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 4);
VisitForStackValue(args->at(0));
@@ -3320,7 +3266,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
// If the object is not a value type, return the object.
__ CmpObjectType(rax, JS_VALUE_TYPE, rbx);
__ j(not_equal, &done);
- __ movq(rax, FieldOperand(rax, JSValue::kValueOffset));
+ __ movp(rax, FieldOperand(rax, JSValue::kValueOffset));
__ bind(&done);
context()->Plug(rax);
@@ -3345,30 +3291,30 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
__ j(not_equal, &not_date_object);
if (index->value() == 0) {
- __ movq(result, FieldOperand(object, JSDate::kValueOffset));
+ __ movp(result, FieldOperand(object, JSDate::kValueOffset));
__ jmp(&done);
} else {
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
Operand stamp_operand = __ ExternalOperand(stamp);
- __ movq(scratch, stamp_operand);
- __ cmpq(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
+ __ movp(scratch, stamp_operand);
+ __ cmpp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
__ j(not_equal, &runtime, Label::kNear);
- __ movq(result, FieldOperand(object, JSDate::kValueOffset +
+ __ movp(result, FieldOperand(object, JSDate::kValueOffset +
kPointerSize * index->value()));
__ jmp(&done);
}
__ bind(&runtime);
__ PrepareCallCFunction(2);
- __ movq(arg_reg_1, object);
- __ movq(arg_reg_2, index, RelocInfo::NONE64);
+ __ movp(arg_reg_1, object);
+ __ Move(arg_reg_2, index, Assembler::RelocInfoNone());
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ jmp(&done);
}
__ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0);
__ bind(&done);
context()->Plug(rax);
}
@@ -3385,12 +3331,12 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
VisitForAccumulatorValue(args->at(0)); // string
- __ pop(value);
- __ pop(index);
+ __ Pop(value);
+ __ Pop(index);
if (FLAG_debug_code) {
- __ ThrowIf(NegateCondition(__ CheckSmi(value)), kNonSmiValue);
- __ ThrowIf(NegateCondition(__ CheckSmi(index)), kNonSmiValue);
+ __ Check(__ CheckSmi(value), kNonSmiValue);
+ __ Check(__ CheckSmi(index), kNonSmiValue);
}
__ SmiToInteger32(value, value);
@@ -3418,12 +3364,12 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
VisitForAccumulatorValue(args->at(0)); // string
- __ pop(value);
- __ pop(index);
+ __ Pop(value);
+ __ Pop(index);
if (FLAG_debug_code) {
- __ ThrowIf(NegateCondition(__ CheckSmi(value)), kNonSmiValue);
- __ ThrowIf(NegateCondition(__ CheckSmi(index)), kNonSmiValue);
+ __ Check(__ CheckSmi(value), kNonSmiValue);
+ __ Check(__ CheckSmi(index), kNonSmiValue);
}
__ SmiToInteger32(value, value);
@@ -3446,7 +3392,7 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- MathPowStub stub(MathPowStub::ON_STACK);
+ MathPowStub stub(isolate(), MathPowStub::ON_STACK);
__ CallStub(&stub);
context()->Plug(rax);
}
@@ -3458,7 +3404,7 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
VisitForStackValue(args->at(0)); // Load the object.
VisitForAccumulatorValue(args->at(1)); // Load the value.
- __ pop(rbx); // rax = value. rbx = object.
+ __ Pop(rbx); // rax = value. rbx = object.
Label done;
// If the object is a smi, return the value.
@@ -3469,10 +3415,10 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
__ j(not_equal, &done);
// Store the value.
- __ movq(FieldOperand(rbx, JSValue::kValueOffset), rax);
+ __ movp(FieldOperand(rbx, JSValue::kValueOffset), rax);
// Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward.
- __ movq(rdx, rax);
+ __ movp(rdx, rax);
__ RecordWriteField(rbx, JSValue::kValueOffset, rdx, rcx, kDontSaveFPRegs);
__ bind(&done);
@@ -3487,7 +3433,7 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
// Load the argument into rax and call the stub.
VisitForAccumulatorValue(args->at(0));
- NumberToStringStub stub;
+ NumberToStringStub stub(isolate());
__ CallStub(&stub);
context()->Plug(rax);
}
@@ -3523,7 +3469,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
Register index = rax;
Register result = rdx;
- __ pop(object);
+ __ Pop(object);
Label need_conversion;
Label index_out_of_range;
@@ -3570,7 +3516,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
Register scratch = rdx;
Register result = rax;
- __ pop(object);
+ __ Pop(object);
Label need_conversion;
Label index_out_of_range;
@@ -3609,21 +3555,12 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
- if (FLAG_new_string_add) {
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- __ pop(rdx);
- NewStringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED);
- __ CallStub(&stub);
- } else {
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringAddStub stub(STRING_ADD_CHECK_BOTH);
- __ CallStub(&stub);
- }
+ __ Pop(rdx);
+ StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
+ __ CallStub(&stub);
context()->Plug(rax);
}
@@ -3635,34 +3572,12 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- StringCompareStub stub;
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
+ StringCompareStub stub(isolate());
__ CallStub(&stub);
context()->Plug(rax);
}
-void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
- // Load the argument on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_sqrt, 1);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() >= 2);
@@ -3680,15 +3595,14 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
__ j(not_equal, &runtime);
// InvokeFunction requires the function in rdi. Move it in there.
- __ movq(rdi, result_register());
+ __ movp(rdi, result_register());
ParameterCount count(arg_count);
- __ InvokeFunction(rdi, count, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ InvokeFunction(rdi, count, CALL_FUNCTION, NullCallWrapper());
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ jmp(&done);
__ bind(&runtime);
- __ push(rax);
+ __ Push(rax);
__ CallRuntime(Runtime::kCall, args->length());
__ bind(&done);
@@ -3697,12 +3611,14 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- RegExpConstructResultStub stub;
+ RegExpConstructResultStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
+ VisitForAccumulatorValue(args->at(2));
+ __ Pop(rbx);
+ __ Pop(rcx);
__ CallStub(&stub);
context()->Plug(rax);
}
@@ -3729,26 +3645,26 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
Register key = rax;
Register cache = rbx;
Register tmp = rcx;
- __ movq(cache, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
- __ movq(cache,
+ __ movp(cache, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
+ __ movp(cache,
FieldOperand(cache, GlobalObject::kNativeContextOffset));
- __ movq(cache,
+ __ movp(cache,
ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ movq(cache,
+ __ movp(cache,
FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
Label done, not_found;
- // tmp now holds finger offset as a smi.
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ movq(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
+ __ movp(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
+ // tmp now holds finger offset as a smi.
SmiIndex index =
__ SmiToIndex(kScratchRegister, tmp, kPointerSizeLog2);
- __ cmpq(key, FieldOperand(cache,
+ __ cmpp(key, FieldOperand(cache,
index.reg,
index.scale,
FixedArray::kHeaderSize));
__ j(not_equal, &not_found, Label::kNear);
- __ movq(rax, FieldOperand(cache,
+ __ movp(rax, FieldOperand(cache,
index.reg,
index.scale,
FixedArray::kHeaderSize + kPointerSize));
@@ -3756,50 +3672,11 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
__ bind(&not_found);
// Call runtime to perform the lookup.
- __ push(cache);
- __ push(key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
-
- __ bind(&done);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- Register right = rax;
- Register left = rbx;
- Register tmp = rcx;
+ __ Push(cache);
+ __ Push(key);
+ __ CallRuntime(Runtime::kHiddenGetFromCache, 2);
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
- __ pop(left);
-
- Label done, fail, ok;
- __ cmpq(left, right);
- __ j(equal, &ok, Label::kNear);
- // Fail if either is a non-HeapObject.
- Condition either_smi = masm()->CheckEitherSmi(left, right, tmp);
- __ j(either_smi, &fail, Label::kNear);
- __ j(zero, &fail, Label::kNear);
- __ movq(tmp, FieldOperand(left, HeapObject::kMapOffset));
- __ cmpb(FieldOperand(tmp, Map::kInstanceTypeOffset),
- Immediate(JS_REGEXP_TYPE));
- __ j(not_equal, &fail, Label::kNear);
- __ cmpq(tmp, FieldOperand(right, HeapObject::kMapOffset));
- __ j(not_equal, &fail, Label::kNear);
- __ movq(tmp, FieldOperand(left, JSRegExp::kDataOffset));
- __ cmpq(tmp, FieldOperand(right, JSRegExp::kDataOffset));
- __ j(equal, &ok, Label::kNear);
- __ bind(&fail);
- __ Move(rax, isolate()->factory()->false_value());
- __ jmp(&done, Label::kNear);
- __ bind(&ok);
- __ Move(rax, isolate()->factory()->true_value());
__ bind(&done);
-
context()->Plug(rax);
}
@@ -3873,7 +3750,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Separator operand is already pushed. Make room for the two
// other stack fields, and clear the direction flag in anticipation
// of calling CopyBytes.
- __ subq(rsp, Immediate(2 * kPointerSize));
+ __ subp(rsp, Immediate(2 * kPointerSize));
__ cld();
// Check that the array is a JSArray
__ JumpIfSmi(array, &bailout);
@@ -3885,7 +3762,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Array has fast elements, so its length must be a smi.
// If the array has length zero, return the empty string.
- __ movq(array_length, FieldOperand(array, JSArray::kLengthOffset));
+ __ movp(array_length, FieldOperand(array, JSArray::kLengthOffset));
__ SmiCompare(array_length, Smi::FromInt(0));
__ j(not_zero, &non_trivial_array);
__ LoadRoot(rax, Heap::kempty_stringRootIndex);
@@ -3899,7 +3776,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Save the FixedArray containing array's elements.
// End of array's live range.
elements = array;
- __ movq(elements, FieldOperand(array, JSArray::kElementsOffset));
+ __ movp(elements, FieldOperand(array, JSArray::kElementsOffset));
array = no_reg;
@@ -3911,16 +3788,16 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Live loop registers: index(int32), array_length(int32), string(String*),
// scratch, string_length(int32), elements(FixedArray*).
if (generate_debug_code_) {
- __ cmpq(index, array_length);
+ __ cmpp(index, array_length);
__ Assert(below, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
}
__ bind(&loop);
- __ movq(string, FieldOperand(elements,
+ __ movp(string, FieldOperand(elements,
index,
times_pointer_size,
FixedArray::kHeaderSize));
__ JumpIfSmi(string, &bailout);
- __ movq(scratch, FieldOperand(string, HeapObject::kMapOffset));
+ __ movp(scratch, FieldOperand(string, HeapObject::kMapOffset));
__ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ andb(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
@@ -3942,7 +3819,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// If array_length is 1, return elements[0], a string.
__ cmpl(array_length, Immediate(1));
__ j(not_equal, &not_size_one_array);
- __ movq(rax, FieldOperand(elements, FixedArray::kHeaderSize));
+ __ movp(rax, FieldOperand(elements, FixedArray::kHeaderSize));
__ jmp(&return_result);
__ bind(&not_size_one_array);
@@ -3957,9 +3834,9 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// index: Array length.
// Check that the separator is a sequential ASCII string.
- __ movq(string, separator_operand);
+ __ movp(string, separator_operand);
__ JumpIfSmi(string, &bailout);
- __ movq(scratch, FieldOperand(string, HeapObject::kMapOffset));
+ __ movp(scratch, FieldOperand(string, HeapObject::kMapOffset));
__ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ andb(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
@@ -3986,10 +3863,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// elements: FixedArray of strings.
__ AllocateAsciiString(result_pos, string_length, scratch,
index, string, &bailout);
- __ movq(result_operand, result_pos);
- __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
+ __ movp(result_operand, result_pos);
+ __ leap(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
- __ movq(string, separator_operand);
+ __ movp(string, separator_operand);
__ SmiCompare(FieldOperand(string, SeqOneByteString::kLengthOffset),
Smi::FromInt(1));
__ j(equal, &one_char_separator);
@@ -4010,12 +3887,12 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// scratch: array length.
// Get string = array[index].
- __ movq(string, FieldOperand(elements, index,
+ __ movp(string, FieldOperand(elements, index,
times_pointer_size,
FixedArray::kHeaderSize));
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
- __ lea(string,
+ __ leap(string,
FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incl(index);
@@ -4050,16 +3927,16 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Copy the separator character to the result.
__ movb(Operand(result_pos, 0), scratch);
- __ incq(result_pos);
+ __ incp(result_pos);
__ bind(&loop_2_entry);
// Get string = array[index].
- __ movq(string, FieldOperand(elements, index,
+ __ movp(string, FieldOperand(elements, index,
times_pointer_size,
FixedArray::kHeaderSize));
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
- __ lea(string,
+ __ leap(string,
FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incl(index);
@@ -4075,18 +3952,18 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// count from -array_length to zero, so we don't need to maintain
// a loop limit.
__ movl(index, array_length_operand);
- __ lea(elements, FieldOperand(elements, index, times_pointer_size,
+ __ leap(elements, FieldOperand(elements, index, times_pointer_size,
FixedArray::kHeaderSize));
- __ neg(index);
+ __ negq(index);
// Replace separator string with pointer to its first character, and
// make scratch be its length.
- __ movq(string, separator_operand);
+ __ movp(string, separator_operand);
__ SmiToInteger32(scratch,
FieldOperand(string, String::kLengthOffset));
- __ lea(string,
+ __ leap(string,
FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ movq(separator_operand, string);
+ __ movp(separator_operand, string);
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
@@ -4101,35 +3978,35 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// separator_operand (rsp[0x10]): Address of first char of separator.
// Copy the separator to the result.
- __ movq(string, separator_operand);
+ __ movp(string, separator_operand);
__ movl(string_length, scratch);
__ CopyBytes(result_pos, string, string_length, 2);
__ bind(&loop_3_entry);
// Get string = array[index].
- __ movq(string, Operand(elements, index, times_pointer_size, 0));
+ __ movp(string, Operand(elements, index, times_pointer_size, 0));
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
- __ lea(string,
+ __ leap(string,
FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incq(index);
__ j(not_equal, &loop_3); // Loop while (index < 0).
__ bind(&done);
- __ movq(rax, result_operand);
+ __ movp(rax, result_operand);
__ bind(&return_result);
// Drop temp values from the stack, and restore context register.
- __ addq(rsp, Immediate(3 * kPointerSize));
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ addp(rsp, Immediate(3 * kPointerSize));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
context()->Plug(rax);
}
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->length() > 0 && name->Get(0) == '_') {
+ if (expr->function() != NULL &&
+ expr->function()->intrinsic_type == Runtime::INLINE) {
Comment cmnt(masm_, "[ InlineRuntimeCall");
EmitInlineRuntimeCall(expr);
return;
@@ -4137,32 +4014,47 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
-
- if (expr->is_jsruntime()) {
- // Prepare for calling JS runtime function.
- __ movq(rax, GlobalObjectOperand());
- __ push(FieldOperand(rax, GlobalObject::kBuiltinsOffset));
- }
-
- // Push the arguments ("left-to-right").
int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
if (expr->is_jsruntime()) {
- // Call the JS runtime function using a call IC.
+ // Push the builtins object as receiver.
+ __ movp(rax, GlobalObjectOperand());
+ __ Push(FieldOperand(rax, GlobalObject::kBuiltinsOffset));
+
+ // Load the function from the receiver.
+ __ movp(rax, Operand(rsp, 0));
__ Move(rcx, expr->name());
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallRuntimeFeedbackId());
+ CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+
+ // Push the target function under the receiver.
+ __ Push(Operand(rsp, 0));
+ __ movp(Operand(rsp, kPointerSize), rax);
+
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
+ __ CallStub(&stub);
+
// Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, rax);
+
} else {
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the C runtime.
__ CallRuntime(expr->function(), arg_count);
+ context()->Plug(rax);
}
- context()->Plug(rax);
}
@@ -4176,20 +4068,18 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
- __ Push(Smi::FromInt(strict_mode_flag));
+ __ Push(Smi::FromInt(strict_mode()));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(rax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
+ ASSERT(strict_mode() == SLOPPY || var->is_this());
if (var->IsUnallocated()) {
- __ push(GlobalObjectOperand());
+ __ Push(GlobalObjectOperand());
__ Push(var->name());
- __ Push(Smi::FromInt(kNonStrictMode));
+ __ Push(Smi::FromInt(SLOPPY));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(rax);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
@@ -4200,9 +4090,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
} else {
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
- __ push(context_register());
+ __ Push(context_register());
__ Push(var->name());
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2);
context()->Plug(rax);
}
} else {
@@ -4283,16 +4173,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ ASSERT(expr->expression()->IsValidReferenceExpression());
+
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
- // Invalid left-hand-sides are rewritten to have a 'throw
- // ReferenceError' as the left-hand side.
- if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
- return;
- }
-
// Expression can only be a property, a global or a (parameter or local)
// slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@@ -4317,13 +4202,13 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
if (assign_type == NAMED_PROPERTY) {
VisitForAccumulatorValue(prop->obj());
- __ push(rax); // Copy of receiver, needed for later store.
+ __ Push(rax); // Copy of receiver, needed for later store.
EmitNamedPropertyLoad(prop);
} else {
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
- __ movq(rdx, Operand(rsp, 0)); // Leave receiver on stack
- __ push(rax); // Copy of key, needed for later store.
+ __ movp(rdx, Operand(rsp, 0)); // Leave receiver on stack
+ __ Push(rax); // Copy of key, needed for later store.
EmitKeyedPropertyLoad(prop);
}
}
@@ -4351,13 +4236,13 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// of the stack.
switch (assign_type) {
case VARIABLE:
- __ push(rax);
+ __ Push(rax);
break;
case NAMED_PROPERTY:
- __ movq(Operand(rsp, kPointerSize), rax);
+ __ movp(Operand(rsp, kPointerSize), rax);
break;
case KEYED_PROPERTY:
- __ movq(Operand(rsp, 2 * kPointerSize), rax);
+ __ movp(Operand(rsp, 2 * kPointerSize), rax);
break;
}
}
@@ -4375,7 +4260,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ bind(&slow);
}
- ToNumberStub convert_stub;
+ ToNumberStub convert_stub(isolate());
__ CallStub(&convert_stub);
// Save result for postfix expressions.
@@ -4386,13 +4271,13 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// of the stack.
switch (assign_type) {
case VARIABLE:
- __ push(rax);
+ __ Push(rax);
break;
case NAMED_PROPERTY:
- __ movq(Operand(rsp, kPointerSize), rax);
+ __ movp(Operand(rsp, kPointerSize), rax);
break;
case KEYED_PROPERTY:
- __ movq(Operand(rsp, 2 * kPointerSize), rax);
+ __ movp(Operand(rsp, 2 * kPointerSize), rax);
break;
}
}
@@ -4403,12 +4288,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Call stub for +1/-1.
__ bind(&stub_call);
- __ movq(rdx, rax);
+ __ movp(rdx, rax);
__ Move(rax, Smi::FromInt(1));
- BinaryOpICStub stub(expr->binary_op(), NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()),
- RelocInfo::CODE_TARGET,
- expr->CountBinOpFeedbackId());
+ BinaryOpICStub stub(isolate(), expr->binary_op(), NO_OVERWRITE);
+ CallIC(stub.GetCode(), expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4438,11 +4321,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
case NAMED_PROPERTY: {
__ Move(rcx, prop->key()->AsLiteral()->value());
- __ pop(rdx);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
+ __ Pop(rdx);
+ CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4454,12 +4334,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case KEYED_PROPERTY: {
- __ pop(rcx);
- __ pop(rdx);
- Handle<Code> ic = is_classic_mode()
+ __ Pop(rcx);
+ __ Pop(rdx);
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
+ CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4480,16 +4360,16 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
ASSERT(!context()->IsTest());
if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
__ Move(rcx, proxy->name());
- __ movq(rax, GlobalObjectOperand());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ __ movp(rax, GlobalObjectOperand());
// Use a regular load, not a contextual load, to avoid a reference
// error.
- CallIC(ic);
+ CallLoadIC(NOT_CONTEXTUAL);
PrepareForBailout(expr, TOS_REG);
context()->Plug(rax);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -4497,9 +4377,9 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- __ push(rsi);
+ __ Push(rsi);
__ Push(proxy->name());
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2);
PrepareForBailout(expr, TOS_REG);
__ bind(&done);
@@ -4526,12 +4406,13 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
}
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- if (check->Equals(isolate()->heap()->number_string())) {
+ Factory* factory = isolate()->factory();
+ if (String::Equals(check, factory->number_string())) {
__ JumpIfSmi(rax, if_true);
- __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
__ CompareRoot(rax, Heap::kHeapNumberMapRootIndex);
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->string_string())) {
+ } else if (String::Equals(check, factory->string_string())) {
__ JumpIfSmi(rax, if_false);
// Check for undetectable objects => false.
__ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx);
@@ -4539,36 +4420,36 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
Split(zero, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->symbol_string())) {
+ } else if (String::Equals(check, factory->symbol_string())) {
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, SYMBOL_TYPE, rdx);
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->boolean_string())) {
+ } else if (String::Equals(check, factory->boolean_string())) {
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
__ j(equal, if_true);
__ CompareRoot(rax, Heap::kFalseValueRootIndex);
Split(equal, if_true, if_false, fall_through);
} else if (FLAG_harmony_typeof &&
- check->Equals(isolate()->heap()->null_string())) {
+ String::Equals(check, factory->null_string())) {
__ CompareRoot(rax, Heap::kNullValueRootIndex);
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->undefined_string())) {
+ } else if (String::Equals(check, factory->undefined_string())) {
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, if_true);
__ JumpIfSmi(rax, if_false);
// Check for undetectable objects => true.
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rdx, FieldOperand(rax, HeapObject::kMapOffset));
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
Split(not_zero, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->function_string())) {
+ } else if (String::Equals(check, factory->function_string())) {
__ JumpIfSmi(rax, if_false);
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rdx);
__ j(equal, if_true);
__ CmpInstanceType(rdx, JS_FUNCTION_PROXY_TYPE);
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->object_string())) {
+ } else if (String::Equals(check, factory->object_string())) {
__ JumpIfSmi(rax, if_false);
if (!FLAG_harmony_typeof) {
__ CompareRoot(rax, Heap::kNullValueRootIndex);
@@ -4619,10 +4500,10 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: {
VisitForStackValue(expr->right());
- InstanceofStub stub(InstanceofStub::kNoFlags);
+ InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ testq(rax, rax);
+ __ testp(rax, rax);
// The stub returns 0 for true.
Split(zero, if_true, if_false, fall_through);
break;
@@ -4631,16 +4512,16 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
Condition cc = CompareIC::ComputeCondition(op);
- __ pop(rdx);
+ __ Pop(rdx);
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
- __ movq(rcx, rdx);
- __ or_(rcx, rax);
+ __ movp(rcx, rdx);
+ __ orp(rcx, rax);
patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
- __ cmpq(rdx, rax);
+ __ cmpp(rdx, rax);
Split(cc, if_true, if_false, NULL);
__ bind(&slow_case);
}
@@ -4648,11 +4529,11 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ testq(rax, rax);
+ __ testp(rax, rax);
Split(cc, if_true, if_false, fall_through);
}
}
@@ -4683,8 +4564,8 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Split(equal, if_true, if_false, fall_through);
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
- __ testq(rax, rax);
+ CallIC(ic, expr->CompareOperationFeedbackId());
+ __ testp(rax, rax);
Split(not_zero, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
@@ -4692,7 +4573,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
context()->Plug(rax);
}
@@ -4709,12 +4590,12 @@ Register FullCodeGenerator::context_register() {
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT(IsAligned(frame_offset, kPointerSize));
- __ movq(Operand(rbp, frame_offset), value);
+ __ movp(Operand(rbp, frame_offset), value);
}
void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ movq(dst, ContextOperand(rsi, context_index));
+ __ movp(dst, ContextOperand(rsi, context_index));
}
@@ -4731,10 +4612,10 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
- __ push(ContextOperand(rsi, Context::CLOSURE_INDEX));
+ __ Push(ContextOperand(rsi, Context::CLOSURE_INDEX));
} else {
ASSERT(declaration_scope->is_function_scope());
- __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
}
@@ -4749,29 +4630,29 @@ void FullCodeGenerator::EnterFinallyBlock() {
// Cook return address on top of stack (smi encoded Code* delta)
__ PopReturnAddressTo(rdx);
__ Move(rcx, masm_->CodeObject());
- __ subq(rdx, rcx);
+ __ subp(rdx, rcx);
__ Integer32ToSmi(rdx, rdx);
- __ push(rdx);
+ __ Push(rdx);
// Store result register while executing finally block.
- __ push(result_register());
+ __ Push(result_register());
// Store pending message while executing finally block.
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ Load(rdx, pending_message_obj);
- __ push(rdx);
+ __ Push(rdx);
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ Load(rdx, has_pending_message);
__ Integer32ToSmi(rdx, rdx);
- __ push(rdx);
+ __ Push(rdx);
ExternalReference pending_message_script =
ExternalReference::address_of_pending_message_script(isolate());
__ Load(rdx, pending_message_script);
- __ push(rdx);
+ __ Push(rdx);
}
@@ -4779,30 +4660,30 @@ void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(rdx));
ASSERT(!result_register().is(rcx));
// Restore pending message from stack.
- __ pop(rdx);
+ __ Pop(rdx);
ExternalReference pending_message_script =
ExternalReference::address_of_pending_message_script(isolate());
__ Store(pending_message_script, rdx);
- __ pop(rdx);
+ __ Pop(rdx);
__ SmiToInteger32(rdx, rdx);
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ Store(has_pending_message, rdx);
- __ pop(rdx);
+ __ Pop(rdx);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ Store(pending_message_obj, rdx);
// Restore result register from stack.
- __ pop(result_register());
+ __ Pop(result_register());
// Uncook return address.
- __ pop(rdx);
+ __ Pop(rdx);
__ SmiToInteger32(rdx, rdx);
__ Move(rcx, masm_->CodeObject());
- __ addq(rdx, rcx);
+ __ addp(rdx, rcx);
__ jmp(rdx);
}
@@ -4823,8 +4704,8 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
__ Drop(*stack_depth); // Down to the handler block.
if (*context_length > 0) {
// Restore the context to its dedicated register and the stack.
- __ movq(rsi, Operand(rsp, StackHandlerConstants::kContextOffset));
- __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+ __ movp(rsi, Operand(rsp, StackHandlerConstants::kContextOffset));
+ __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
}
__ PopTryHandler();
__ call(finally_entry_);
@@ -4839,10 +4720,11 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
static const byte kJnsInstruction = 0x79;
-static const byte kJnsOffset = 0x1d;
-static const byte kCallInstruction = 0xe8;
static const byte kNopByteOne = 0x66;
static const byte kNopByteTwo = 0x90;
+#ifdef DEBUG
+static const byte kCallInstruction = 0xe8;
+#endif
void BackEdgeTable::PatchAt(Code* unoptimized_code,
@@ -4875,6 +4757,7 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
}
Assembler::set_target_address_at(call_target_address,
+ unoptimized_code,
replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, call_target_address, replacement_code);
@@ -4892,20 +4775,23 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
if (*jns_instr_address == kJnsInstruction) {
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
ASSERT_EQ(isolate->builtins()->InterruptCheck()->entry(),
- Assembler::target_address_at(call_target_address));
+ Assembler::target_address_at(call_target_address,
+ unoptimized_code));
return INTERRUPT;
}
ASSERT_EQ(kNopByteOne, *jns_instr_address);
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- if (Assembler::target_address_at(call_target_address) ==
+ if (Assembler::target_address_at(call_target_address,
+ unoptimized_code) ==
isolate->builtins()->OnStackReplacement()->entry()) {
return ON_STACK_REPLACEMENT;
}
ASSERT_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
- Assembler::target_address_at(call_target_address));
+ Assembler::target_address_at(call_target_address,
+ unoptimized_code));
return OSR_AFTER_STACK_CHECK;
}
diff --git a/chromium/v8/src/x64/ic-x64.cc b/chromium/v8/src/x64/ic-x64.cc
index 9448d3771a7..0cda1df16bd 100644
--- a/chromium/v8/src/x64/ic-x64.cc
+++ b/chromium/v8/src/x64/ic-x64.cc
@@ -1,38 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "codegen.h"
-#include "ic-inl.h"
-#include "runtime.h"
-#include "stub-cache.h"
+#include "src/codegen.h"
+#include "src/ic-inl.h"
+#include "src/runtime.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -74,7 +51,7 @@ static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
__ JumpIfSmi(receiver, miss);
// Check that the receiver is a valid JS object.
- __ movq(r1, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ movp(r1, FieldOperand(receiver, HeapObject::kMapOffset));
__ movb(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
__ cmpb(r0, Immediate(FIRST_SPEC_OBJECT_TYPE));
__ j(below, miss);
@@ -90,7 +67,7 @@ static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
(1 << Map::kHasNamedInterceptor)));
__ j(not_zero, miss);
- __ movq(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
+ __ movp(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
__ CompareRoot(FieldOperand(r0, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(not_equal, miss);
@@ -150,7 +127,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// Get the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ movq(result,
+ __ movp(result,
Operand(elements, r1, times_pointer_size,
kValueOffset - kHeapObjectTag));
}
@@ -212,14 +189,14 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Store the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ lea(scratch1, Operand(elements,
+ __ leap(scratch1, Operand(elements,
scratch1,
times_pointer_size,
kValueOffset - kHeapObjectTag));
- __ movq(Operand(scratch1, 0), value);
+ __ movp(Operand(scratch1, 0), value);
// Update write barrier. Make sure not to clobber the value.
- __ movq(scratch0, value);
+ __ movp(scratch0, value);
__ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs);
}
@@ -284,7 +261,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
//
// scratch - used to hold elements of the receiver and the loaded value.
- __ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset));
+ __ movp(elements, FieldOperand(receiver, JSObject::kElementsOffset));
if (not_fast_array != NULL) {
// Check that the object is in fast mode and writable.
__ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
@@ -299,7 +276,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
__ j(above_equal, out_of_range);
// Fast case: Do the load.
SmiIndex index = masm->SmiToIndex(scratch, key, kPointerSizeLog2);
- __ movq(scratch, FieldOperand(elements,
+ __ movp(scratch, FieldOperand(elements,
index.reg,
index.scale,
FixedArray::kHeaderSize));
@@ -308,7 +285,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// to ensure the prototype chain is searched.
__ j(equal, out_of_range);
if (!result.is(scratch)) {
- __ movq(result, scratch);
+ __ movp(result, scratch);
}
}
@@ -384,7 +361,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_number_dictionary);
__ SmiToInteger32(rbx, rax);
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check whether the elements is a number dictionary.
// rdx: receiver
@@ -412,21 +389,21 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary leaving result in rcx.
- __ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(equal, &probe_dictionary);
// Load the map of the receiver, compute the keyed lookup cache hash
// based on 32 bits of the map pointer and the string hash.
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
__ movl(rcx, rbx);
- __ shr(rcx, Immediate(KeyedLookupCache::kMapHashShift));
+ __ shrl(rcx, Immediate(KeyedLookupCache::kMapHashShift));
__ movl(rdi, FieldOperand(rax, String::kHashFieldOffset));
- __ shr(rdi, Immediate(String::kHashShift));
- __ xor_(rcx, rdi);
+ __ shrl(rdi, Immediate(String::kHashShift));
+ __ xorp(rcx, rdi);
int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
- __ and_(rcx, Immediate(mask));
+ __ andp(rcx, Immediate(mask));
// Load the key (consisting of map and internalized string) from the cache and
// check for match.
@@ -438,21 +415,21 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
for (int i = 0; i < kEntriesPerBucket - 1; i++) {
Label try_next_entry;
- __ movq(rdi, rcx);
- __ shl(rdi, Immediate(kPointerSizeLog2 + 1));
+ __ movp(rdi, rcx);
+ __ shlp(rdi, Immediate(kPointerSizeLog2 + 1));
__ LoadAddress(kScratchRegister, cache_keys);
int off = kPointerSize * i * 2;
- __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
+ __ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
__ j(not_equal, &try_next_entry);
- __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
+ __ cmpp(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
__ j(equal, &hit_on_nth_entry[i]);
__ bind(&try_next_entry);
}
int off = kPointerSize * (kEntriesPerBucket - 1) * 2;
- __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
+ __ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
__ j(not_equal, &slow);
- __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
+ __ cmpp(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
__ j(not_equal, &slow);
// Get field offset, which is a 32-bit integer.
@@ -467,8 +444,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
}
__ LoadAddress(kScratchRegister, cache_field_offsets);
__ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
- __ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
- __ subq(rdi, rcx);
+ __ movzxbp(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
+ __ subp(rdi, rcx);
__ j(above_equal, &property_array_property);
if (i != 0) {
__ jmp(&load_in_object_property);
@@ -477,16 +454,16 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Load in-object property.
__ bind(&load_in_object_property);
- __ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
- __ addq(rcx, rdi);
- __ movq(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
+ __ movzxbp(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
+ __ addp(rcx, rdi);
+ __ movp(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
__ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
// Load property array property.
__ bind(&property_array_property);
- __ movq(rax, FieldOperand(rdx, JSObject::kPropertiesOffset));
- __ movq(rax, FieldOperand(rax, rdi, times_pointer_size,
+ __ movp(rax, FieldOperand(rdx, JSObject::kPropertiesOffset));
+ __ movp(rax, FieldOperand(rax, rdi, times_pointer_size,
FixedArray::kHeaderSize));
__ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
@@ -498,7 +475,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// rax: key
// rbx: elements
- __ movq(rcx, FieldOperand(rdx, JSObject::kMapOffset));
+ __ movp(rcx, FieldOperand(rdx, JSObject::kMapOffset));
__ movb(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
GenerateGlobalInstanceTypeCheck(masm, rcx, &slow);
@@ -560,7 +537,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
__ JumpUnlessNonNegativeSmi(rax, &slow);
// Get the map of the receiver.
- __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
// Check that it has indexed interceptor and access checks
// are not enabled for this object.
@@ -571,8 +548,8 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// Everything is fine, call runtime.
__ PopReturnAddressTo(rcx);
- __ push(rdx); // receiver
- __ push(rax); // key
+ __ Push(rdx); // receiver
+ __ Push(rax); // key
__ PushReturnAddressFrom(rcx);
// Perform tail call to the entry.
@@ -605,7 +582,7 @@ static void KeyedStoreGenerateGenericHelper(
// rdx: receiver (a JSArray)
// r9: map of receiver
if (check_map == kCheckMap) {
- __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ movp(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
__ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
__ j(not_equal, fast_double);
}
@@ -614,7 +591,7 @@ static void KeyedStoreGenerateGenericHelper(
// We have to go to the runtime if the current value is the hole because
// there may be a callback on the element
Label holecheck_passed1;
- __ movq(kScratchRegister, FieldOperand(rbx,
+ __ movp(kScratchRegister, FieldOperand(rbx,
rcx,
times_pointer_size,
FixedArray::kHeaderSize));
@@ -633,7 +610,7 @@ static void KeyedStoreGenerateGenericHelper(
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
}
// It's irrelevant whether array is smi-only or not when writing a smi.
- __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
+ __ movp(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
rax);
__ ret(0);
@@ -648,9 +625,9 @@ static void KeyedStoreGenerateGenericHelper(
__ leal(rdi, Operand(rcx, 1));
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
}
- __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
+ __ movp(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
rax);
- __ movq(rdx, rax); // Preserve the value which is returned.
+ __ movp(rdx, rax); // Preserve the value which is returned.
__ RecordWriteArray(
rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ ret(0);
@@ -683,10 +660,10 @@ static void KeyedStoreGenerateGenericHelper(
__ ret(0);
__ bind(&transition_smi_elements);
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
// Transition the array appropriately depending on the value type.
- __ movq(r9, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(r9, FieldOperand(rax, HeapObject::kMapOffset));
__ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &non_double_value);
@@ -700,7 +677,7 @@ static void KeyedStoreGenerateGenericHelper(
AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
@@ -713,14 +690,14 @@ static void KeyedStoreGenerateGenericHelper(
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
slow);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
__ bind(&transition_double_elements);
// Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
FAST_ELEMENTS,
rbx,
@@ -728,13 +705,13 @@ static void KeyedStoreGenerateGenericHelper(
slow);
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
}
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -748,7 +725,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow_with_tagged_index);
// Get the map from the receiver.
- __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movp(r9, FieldOperand(rdx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks and is not observed.
// The generic stub does not perform map checks or handle observed objects.
__ testb(FieldOperand(r9, Map::kBitFieldOffset),
@@ -768,7 +745,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// rax: value
// rdx: JSObject
// rcx: index
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check array bounds.
__ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
// rax: value
@@ -796,7 +773,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
__ j(below_equal, &slow);
// Increment index to get new length.
- __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ movp(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
__ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &check_if_double_array);
__ jmp(&fast_object_grow);
@@ -814,7 +791,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// rax: value
// rdx: receiver (a JSArray)
// rcx: index
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check the key against the length in the array, compute the
// address to store into and fall through to fast case.
@@ -828,347 +805,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
}
-// The generated code does not accept smi keys.
-// The generated code falls through if both probes miss.
-void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind,
- ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rdx : receiver
- // -----------------------------------
- Label number, non_number, non_string, boolean, probe, miss;
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(kind,
- MONOMORPHIC,
- extra_state,
- Code::NORMAL,
- argc);
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, rdx, rcx, rbx, rax);
-
- // If the stub cache probing failed, the receiver might be a value.
- // For value objects, we use the map of the prototype objects for
- // the corresponding JSValue for the cache and that is what we need
- // to probe.
- //
- // Check for number.
- __ JumpIfSmi(rdx, &number);
- __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rbx);
- __ j(not_equal, &non_number);
- __ bind(&number);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::NUMBER_FUNCTION_INDEX, rdx);
- __ jmp(&probe);
-
- // Check for string.
- __ bind(&non_number);
- __ CmpInstanceType(rbx, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &non_string);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::STRING_FUNCTION_INDEX, rdx);
- __ jmp(&probe);
-
- // Check for boolean.
- __ bind(&non_string);
- __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
- __ j(equal, &boolean);
- __ CompareRoot(rdx, Heap::kFalseValueRootIndex);
- __ j(not_equal, &miss);
- __ bind(&boolean);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::BOOLEAN_FUNCTION_INDEX, rdx);
-
- // Probe the stub cache for the value object.
- __ bind(&probe);
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, rdx, rcx, rbx, no_reg);
-
- __ bind(&miss);
-}
-
-
-static void GenerateFunctionTailCall(MacroAssembler* masm,
- int argc,
- Label* miss) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rdi : function
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- __ JumpIfSmi(rdi, miss);
- // Check that the value is a JavaScript function.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rdx);
- __ j(not_equal, miss);
-
- // Invoke the function.
- ParameterCount actual(argc);
- __ InvokeFunction(rdi, actual, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
-// The generated code falls through if the call should be handled by runtime.
-void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- Label miss;
-
- StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
-
- GenerateNameDictionaryReceiverCheck(masm, rdx, rax, rbx, &miss);
-
- // rax: elements
- // Search the dictionary placing the result in rdi.
- GenerateDictionaryLoad(masm, &miss, rax, rcx, rbx, rdi, rdi);
-
- GenerateFunctionTailCall(masm, argc, &miss);
-
- __ bind(&miss);
-}
-
-
-void CallICBase::GenerateMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id,
- ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- Counters* counters = masm->isolate()->counters();
- if (id == IC::kCallIC_Miss) {
- __ IncrementCounter(counters->call_miss(), 1);
- } else {
- __ IncrementCounter(counters->keyed_call_miss(), 1);
- }
-
- StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
-
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push the receiver and the name of the function.
- __ push(rdx);
- __ push(rcx);
-
- // Call the entry.
- CEntryStub stub(1);
- __ Set(rax, 2);
- __ LoadAddress(rbx, ExternalReference(IC_Utility(id), masm->isolate()));
- __ CallStub(&stub);
-
- // Move result to rdi and exit the internal frame.
- __ movq(rdi, rax);
- }
-
- // Check if the receiver is a global object of some sort.
- // This can happen only for regular CallIC but not KeyedCallIC.
- if (id == IC::kCallIC_Miss) {
- Label invoke, global;
- __ movq(rdx, args.GetReceiverOperand());
- __ JumpIfSmi(rdx, &invoke);
- __ CmpObjectType(rdx, JS_GLOBAL_OBJECT_TYPE, rcx);
- __ j(equal, &global);
- __ CmpInstanceType(rcx, JS_BUILTINS_OBJECT_TYPE);
- __ j(not_equal, &invoke);
-
- // Patch the receiver on the stack.
- __ bind(&global);
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(args.GetReceiverOperand(), rdx);
- __ bind(&invoke);
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_state)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount actual(argc);
- __ InvokeFunction(rdi,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- call_kind);
-}
-
-
-void CallIC::GenerateMegamorphic(MacroAssembler* masm,
- int argc,
- ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
- GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
- GenerateMiss(masm, argc, extra_ic_state);
-}
-
-
-void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
-
- Label do_call, slow_call, slow_load;
- Label check_number_dictionary, check_name, lookup_monomorphic_cache;
- Label index_smi, index_name;
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(rcx, &check_name);
-
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, rdx, rax, Map::kHasIndexedInterceptor, &slow_call);
-
- GenerateFastArrayLoad(
- masm, rdx, rcx, rax, rbx, rdi, &check_number_dictionary, &slow_load);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1);
-
- __ bind(&do_call);
- // receiver in rdx is not used after this point.
- // rcx: key
- // rdi: function
- GenerateFunctionTailCall(masm, argc, &slow_call);
-
- __ bind(&check_number_dictionary);
- // rax: elements
- // rcx: smi key
- // Check whether the elements is a number dictionary.
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(not_equal, &slow_load);
- __ SmiToInteger32(rbx, rcx);
- // ebx: untagged index
- __ LoadFromNumberDictionary(&slow_load, rax, rcx, rbx, r9, rdi, rdi);
- __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1);
- __ jmp(&do_call);
-
- __ bind(&slow_load);
- // This branch is taken when calling KeyedCallIC_Miss is neither required
- // nor beneficial.
- __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rcx); // save the key
- __ push(rdx); // pass the receiver
- __ push(rcx); // pass the key
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(rcx); // restore the key
- }
- __ movq(rdi, rax);
- __ jmp(&do_call);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, rcx, rax, rbx, &index_name, &slow_call);
-
- // The key is known to be a unique name.
- // If the receiver is a regular JS object with slow properties then do
- // a quick inline probe of the receiver's dictionary.
- // Otherwise do the monomorphic cache probe.
- GenerateKeyedLoadReceiverCheck(
- masm, rdx, rax, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
-
- __ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(not_equal, &lookup_monomorphic_cache);
-
- GenerateDictionaryLoad(masm, &slow_load, rbx, rcx, rax, rdi, rdi);
- __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1);
- __ jmp(&do_call);
-
- __ bind(&lookup_monomorphic_cache);
- __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1);
- GenerateMonomorphicCacheProbe(masm,
- argc,
- Code::KEYED_CALL_IC,
- kNoExtraICState);
- // Fall through on miss.
-
- __ bind(&slow_call);
- // This branch is taken if:
- // - the receiver requires boxing or access check,
- // - the key is neither smi nor a unique name,
- // - the value loaded is not a function,
- // - there is hope that the runtime will create a monomorphic call stub
- // that will get fetched next time.
- __ IncrementCounter(counters->keyed_call_generic_slow(), 1);
- GenerateMiss(masm, argc);
-
- __ bind(&index_name);
- __ IndexFromHash(rbx, rcx);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- // Check if the name is really a name.
- Label miss;
- __ JumpIfSmi(rcx, &miss);
- Condition cond = masm->IsObjectNameType(rcx, rax, rax);
- __ j(NegateCondition(cond), &miss);
- CallICBase::GenerateNormal(masm, argc);
- __ bind(&miss);
- GenerateMiss(masm, argc);
-}
-
-
static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
Register object,
Register key,
@@ -1193,20 +829,20 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// Load the elements into scratch1 and check its map. If not, jump
// to the unmapped lookup with the parameter map in scratch1.
- Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
- __ movq(scratch1, FieldOperand(object, JSObject::kElementsOffset));
+ Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
+ __ movp(scratch1, FieldOperand(object, JSObject::kElementsOffset));
__ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
// Check if element is in the range of mapped arguments.
- __ movq(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
+ __ movp(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
__ SmiSubConstant(scratch2, scratch2, Smi::FromInt(2));
- __ cmpq(key, scratch2);
+ __ cmpp(key, scratch2);
__ j(greater_equal, unmapped_case);
// Load element index and check whether it is the hole.
const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
__ SmiToInteger64(scratch3, key);
- __ movq(scratch2, FieldOperand(scratch1,
+ __ movp(scratch2, FieldOperand(scratch1,
scratch3,
times_pointer_size,
kHeaderSize));
@@ -1216,7 +852,7 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// Load value from context and return it. We can reuse scratch1 because
// we do not jump to the unmapped lookup (which requires the parameter
// map in scratch1).
- __ movq(scratch1, FieldOperand(scratch1, FixedArray::kHeaderSize));
+ __ movp(scratch1, FieldOperand(scratch1, FixedArray::kHeaderSize));
__ SmiToInteger64(scratch3, scratch2);
return FieldOperand(scratch1,
scratch3,
@@ -1236,11 +872,11 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
// overwritten.
const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
Register backing_store = parameter_map;
- __ movq(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
+ __ movp(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
__ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
- __ movq(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
- __ cmpq(key, scratch);
+ __ movp(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
+ __ cmpp(key, scratch);
__ j(greater_equal, slow_case);
__ SmiToInteger64(scratch, key);
return FieldOperand(backing_store,
@@ -1250,7 +886,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
}
-void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
@@ -1260,7 +896,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
Operand mapped_location =
GenerateMappedArgumentsLookup(
masm, rdx, rax, rbx, rcx, rdi, &notin, &slow);
- __ movq(rax, mapped_location);
+ __ movp(rax, mapped_location);
__ Ret();
__ bind(&notin);
// The unmapped lookup expects that the parameter map is in rbx.
@@ -1268,14 +904,14 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
GenerateUnmappedArgumentsLookup(masm, rax, rbx, rcx, &slow);
__ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
__ j(equal, &slow);
- __ movq(rax, unmapped_location);
+ __ movp(rax, unmapped_location);
__ Ret();
__ bind(&slow);
GenerateMiss(masm);
}
-void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -1285,9 +921,9 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
Label slow, notin;
Operand mapped_location = GenerateMappedArgumentsLookup(
masm, rdx, rcx, rbx, rdi, r8, &notin, &slow);
- __ movq(mapped_location, rax);
- __ lea(r9, mapped_location);
- __ movq(r8, rax);
+ __ movp(mapped_location, rax);
+ __ leap(r9, mapped_location);
+ __ movp(r8, rax);
__ RecordWrite(rbx,
r9,
r8,
@@ -1299,9 +935,9 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
// The unmapped lookup expects that the parameter map is in rbx.
Operand unmapped_location =
GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rdi, &slow);
- __ movq(unmapped_location, rax);
- __ lea(r9, unmapped_location);
- __ movq(r8, rax);
+ __ movp(unmapped_location, rax);
+ __ leap(r9, unmapped_location);
+ __ movp(r8, rax);
__ RecordWrite(rbx,
r9,
r8,
@@ -1314,37 +950,6 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
}
-void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
- int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- Label slow, notin;
- StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
- Operand mapped_location = GenerateMappedArgumentsLookup(
- masm, rdx, rcx, rbx, rax, r8, &notin, &slow);
- __ movq(rdi, mapped_location);
- GenerateFunctionTailCall(masm, argc, &slow);
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in rbx.
- Operand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rax, &slow);
- __ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
- __ j(equal, &slow);
- __ movq(rdi, unmapped_location);
- GenerateFunctionTailCall(masm, argc, &slow);
- __ bind(&slow);
- GenerateMiss(masm, argc);
-}
-
-
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : receiver
@@ -1353,9 +958,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, kNoExtraICState,
- Code::NORMAL, Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, rax, rcx, rbx, rdx);
@@ -1369,15 +972,19 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -- rcx : name
// -- rsp[0] : return address
// -----------------------------------
- Label miss;
+ Label miss, slow;
GenerateNameDictionaryReceiverCheck(masm, rax, rdx, rbx, &miss);
// rdx: elements
// Search the dictionary placing the result in rax.
- GenerateDictionaryLoad(masm, &miss, rdx, rcx, rbx, rdi, rax);
+ GenerateDictionaryLoad(masm, &slow, rdx, rcx, rbx, rdi, rax);
__ ret(0);
+ // Dictionary load failed, go slow (but don't miss).
+ __ bind(&slow);
+ GenerateRuntimeGetProperty(masm);
+
// Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm);
@@ -1395,8 +1002,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
__ IncrementCounter(counters->load_miss(), 1);
__ PopReturnAddressTo(rbx);
- __ push(rax); // receiver
- __ push(rcx); // name
+ __ Push(rax); // receiver
+ __ Push(rcx); // name
__ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
@@ -1414,8 +1021,8 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rax); // receiver
- __ push(rcx); // name
+ __ Push(rax); // receiver
+ __ Push(rcx); // name
__ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
@@ -1434,8 +1041,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
__ IncrementCounter(counters->keyed_load_miss(), 1);
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rax); // name
+ __ Push(rdx); // receiver
+ __ Push(rax); // name
__ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
@@ -1453,8 +1060,8 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rax); // name
+ __ Push(rdx); // receiver
+ __ Push(rax); // name
__ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
@@ -1462,8 +1069,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_ic_state) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -1472,9 +1078,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_ic_state,
- Code::NORMAL, Code::STORE_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, rdx, rcx, rbx, no_reg);
@@ -1492,9 +1096,9 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // name
- __ push(rax); // value
+ __ Push(rdx); // receiver
+ __ Push(rcx); // name
+ __ Push(rax); // value
__ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
@@ -1528,7 +1132,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -1536,9 +1140,9 @@ void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
// -- rsp[0] : return address
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx);
- __ push(rcx);
- __ push(rax);
+ __ Push(rdx);
+ __ Push(rcx);
+ __ Push(rax);
__ Push(Smi::FromInt(NONE)); // PropertyAttributes
__ Push(Smi::FromInt(strict_mode));
__ PushReturnAddressFrom(rbx);
@@ -1549,7 +1153,7 @@ void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -1558,9 +1162,9 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
+ __ Push(rdx); // receiver
+ __ Push(rcx); // key
+ __ Push(rax); // value
__ Push(Smi::FromInt(NONE)); // PropertyAttributes
__ Push(Smi::FromInt(strict_mode)); // Strict mode.
__ PushReturnAddressFrom(rbx);
@@ -1579,9 +1183,9 @@ void StoreIC::GenerateSlow(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
+ __ Push(rdx); // receiver
+ __ Push(rcx); // key
+ __ Push(rax); // value
__ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
@@ -1599,9 +1203,9 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
+ __ Push(rdx); // receiver
+ __ Push(rcx); // key
+ __ Push(rax); // value
__ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
@@ -1619,9 +1223,9 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
+ __ Push(rdx); // receiver
+ __ Push(rcx); // key
+ __ Push(rax); // value
__ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
@@ -1680,7 +1284,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
Address delta_address = test_instruction_address + 1;
// The delta to the start of the map check instruction and the
// condition code uses at the patched jump.
- int8_t delta = *reinterpret_cast<int8_t*>(delta_address);
+ uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
if (FLAG_trace_ic) {
PrintF("[ patching ic at %p, test=%p, delta=%d\n",
address, test_instruction_address, delta);
diff --git a/chromium/v8/src/x64/lithium-codegen-x64.cc b/chromium/v8/src/x64/lithium-codegen-x64.cc
index 80024e78e17..81e8e9b99a1 100644
--- a/chromium/v8/src/x64/lithium-codegen-x64.cc
+++ b/chromium/v8/src/x64/lithium-codegen-x64.cc
@@ -1,38 +1,15 @@
// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "x64/lithium-codegen-x64.h"
-#include "code-stubs.h"
-#include "stub-cache.h"
-#include "hydrogen-osr.h"
+#include "src/x64/lithium-codegen-x64.h"
+#include "src/code-stubs.h"
+#include "src/stub-cache.h"
+#include "src/hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -50,9 +27,7 @@ class SafepointGenerator V8_FINAL : public CallWrapper {
deopt_mode_(mode) { }
virtual ~SafepointGenerator() {}
- virtual void BeforeCall(int call_size) const V8_OVERRIDE {
- codegen_->EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - call_size);
- }
+ virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
virtual void AfterCall() const V8_OVERRIDE {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
@@ -89,15 +64,8 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- RegisterDependentCodeForEmbeddedMaps(code);
+ if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
- info()->CommitDependencies(code);
-}
-
-
-void LChunkBuilder::Abort(BailoutReason reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
}
@@ -105,7 +73,7 @@ void LChunkBuilder::Abort(BailoutReason reason) {
void LCodeGen::MakeSureStackPagesMapped(int offset) {
const int kPageSize = 4 * KB;
for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
- __ movq(Operand(rsp, offset), rax);
+ __ movp(Operand(rsp, offset), rax);
}
}
#endif
@@ -156,17 +124,23 @@ bool LCodeGen::GeneratePrologue() {
}
#endif
- // Strict mode functions need to replace the receiver with undefined
- // when called as functions (without an explicit receiver
- // object). rcx is zero for method calls and non-zero for function
- // calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
+ // Sloppy mode functions need to replace the receiver with the global proxy
+ // when called as functions (without an explicit receiver object).
+ if (info_->this_has_uses() &&
+ info_->strict_mode() == SLOPPY &&
+ !info_->is_native()) {
Label ok;
- __ testq(rcx, rcx);
- __ j(zero, &ok, Label::kNear);
StackArgumentsAccessor args(rsp, scope()->num_parameters());
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ movq(args.GetReceiverOperand(), kScratchRegister);
+ __ movp(rcx, args.GetReceiverOperand());
+
+ __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &ok, Label::kNear);
+
+ __ movp(rcx, GlobalObjectOperand());
+ __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
+
+ __ movp(args.GetReceiverOperand(), rcx);
+
__ bind(&ok);
}
}
@@ -175,7 +149,11 @@ bool LCodeGen::GeneratePrologue() {
if (NeedsEagerFrame()) {
ASSERT(!frame_is_built_);
frame_is_built_ = true;
- __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
+ if (info()->IsStub()) {
+ __ StubPrologue();
+ } else {
+ __ Prologue(info()->IsCodePreAgingActive());
+ }
info()->AddNoFrameRange(0, masm_->pc_offset());
}
@@ -183,22 +161,22 @@ bool LCodeGen::GeneratePrologue() {
int slots = GetStackSlotCount();
if (slots > 0) {
if (FLAG_debug_code) {
- __ subq(rsp, Immediate(slots * kPointerSize));
+ __ subp(rsp, Immediate(slots * kPointerSize));
#ifdef _MSC_VER
MakeSureStackPagesMapped(slots * kPointerSize);
#endif
- __ push(rax);
+ __ Push(rax);
__ Set(rax, slots);
__ movq(kScratchRegister, kSlotsZapValue);
Label loop;
__ bind(&loop);
- __ movq(MemOperand(rsp, rax, times_pointer_size, 0),
+ __ movp(MemOperand(rsp, rax, times_pointer_size, 0),
kScratchRegister);
__ decl(rax);
__ j(not_zero, &loop);
- __ pop(rax);
+ __ Pop(rax);
} else {
- __ subq(rsp, Immediate(slots * kPointerSize));
+ __ subp(rsp, Immediate(slots * kPointerSize));
#ifdef _MSC_VER
MakeSureStackPagesMapped(slots * kPointerSize);
#endif
@@ -213,18 +191,22 @@ bool LCodeGen::GeneratePrologue() {
int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment(";;; Allocate local context");
+ bool need_write_barrier = true;
// Argument to NewContext is the function, which is still in rdi.
- __ push(rdi);
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
+ FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
} else {
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ Push(rdi);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoLazyDeopt);
- // Context is returned in both rax and rsi. It replaces the context
- // passed to us. It's saved in the stack and kept live in rsi.
- __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+ // Context is returned in rax. It replaces the context passed to us.
+ // It's saved in the stack and kept live in rsi.
+ __ movp(rsi, rax);
+ __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rax);
// Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters();
@@ -234,12 +216,19 @@ bool LCodeGen::GeneratePrologue() {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
// Load parameter from stack.
- __ movq(rax, Operand(rbp, parameter_offset));
+ __ movp(rax, Operand(rbp, parameter_offset));
// Store it in the context.
int context_offset = Context::SlotOffset(var->index());
- __ movq(Operand(rsi, context_offset), rax);
+ __ movp(Operand(rsi, context_offset), rax);
// Update the write barrier. This clobbers rax and rbx.
- __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
+ if (need_write_barrier) {
+ __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
+ } else if (FLAG_debug_code) {
+ Label done;
+ __ JumpIfInNewSpace(rsi, rax, &done, Label::kNear);
+ __ Abort(kExpectedNewSpaceObject);
+ __ bind(&done);
+ }
}
}
Comment(";;; End allocate local context");
@@ -264,17 +253,47 @@ void LCodeGen::GenerateOsrPrologue() {
// optimized frame.
int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
ASSERT(slots >= 0);
- __ subq(rsp, Immediate(slots * kPointerSize));
+ __ subp(rsp, Immediate(slots * kPointerSize));
}
void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (instr->IsCall()) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ }
if (!instr->IsLazyBailout() && !instr->IsGap()) {
safepoints_.BumpLastLazySafepointIndex();
}
}
+void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts && instr->HasResult() &&
+ instr->hydrogen_value()->representation().IsInteger32() &&
+ instr->result()->IsRegister()) {
+ __ AssertZeroExtended(ToRegister(instr->result()));
+ }
+
+ if (instr->HasResult() && instr->MustSignExtendResult(chunk())) {
+ // We sign extend the dehoisted key at the definition point when the pointer
+ // size is 64-bit. For x32 port, we sign extend the dehoisted key at the use
+ // points and MustSignExtendResult is always false. We can't use
+ // STATIC_ASSERT here as the pointer size is 32-bit for x32.
+ ASSERT(kPointerSize == kInt64Size);
+ if (instr->result()->IsRegister()) {
+ Register result_reg = ToRegister(instr->result());
+ __ movsxlq(result_reg, result_reg);
+ } else {
+ // Sign extend the 32bit result in the stack slots.
+ ASSERT(instr->result()->IsStackSlot());
+ Operand src = ToOperand(instr->result());
+ __ movsxlq(kScratchRegister, src);
+ __ movq(src, kScratchRegister);
+ }
+ }
+}
+
+
bool LCodeGen::GenerateJumpTable() {
Label needs_frame;
if (jump_table_.length() > 0) {
@@ -297,17 +316,17 @@ bool LCodeGen::GenerateJumpTable() {
__ jmp(&needs_frame);
} else {
__ bind(&needs_frame);
- __ movq(rsi, MemOperand(rbp, StandardFrameConstants::kContextOffset));
- __ push(rbp);
- __ movq(rbp, rsp);
- __ push(rsi);
+ __ movp(rsi, MemOperand(rbp, StandardFrameConstants::kContextOffset));
+ __ pushq(rbp);
+ __ movp(rbp, rsp);
+ __ Push(rsi);
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
ASSERT(info()->IsStub());
__ Move(rsi, Smi::FromInt(StackFrame::STUB));
- __ push(rsi);
- __ movq(rsi, MemOperand(rsp, kPointerSize));
+ __ Push(rsi);
+ __ movp(rsi, MemOperand(rsp, kPointerSize));
__ call(kScratchRegister);
}
} else {
@@ -330,7 +349,8 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -344,10 +364,10 @@ bool LCodeGen::GenerateDeferredCode() {
ASSERT(info()->IsStub());
frame_is_built_ = true;
// Build the frame in such a way that esi isn't trashed.
- __ push(rbp); // Caller's frame pointer.
- __ push(Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ pushq(rbp); // Caller's frame pointer.
+ __ Push(Operand(rbp, StandardFrameConstants::kContextOffset));
__ Push(Smi::FromInt(StackFrame::STUB));
- __ lea(rbp, Operand(rsp, 2 * kPointerSize));
+ __ leap(rbp, Operand(rsp, 2 * kPointerSize));
Comment(";;; Deferred code");
}
code->Generate();
@@ -356,8 +376,8 @@ bool LCodeGen::GenerateDeferredCode() {
Comment(";;; Destroy frame");
ASSERT(frame_is_built_);
frame_is_built_ = false;
- __ movq(rsp, rbp);
- __ pop(rbp);
+ __ movp(rsp, rbp);
+ __ popq(rbp);
}
__ jmp(code->exit());
}
@@ -400,26 +420,33 @@ XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
- return op->IsConstantOperand() &&
- chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+ return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
}
-bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
+bool LCodeGen::IsDehoistedKeyConstant(LConstantOperand* op) const {
return op->IsConstantOperand() &&
- chunk_->LookupLiteralRepresentation(op).IsSmi();
+ chunk_->IsDehoistedKey(chunk_->LookupConstant(op));
}
-bool LCodeGen::IsTaggedConstant(LConstantOperand* op) const {
- return op->IsConstantOperand() &&
- chunk_->LookupLiteralRepresentation(op).IsTagged();
+bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmi();
}
int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+ return ToRepresentation(op, Representation::Integer32());
+}
+
+
+int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
+ const Representation& r) const {
HConstant* constant = chunk_->LookupConstant(op);
- return constant->Integer32Value();
+ int32_t value = constant->Integer32Value();
+ if (r.IsInteger32()) return value;
+ ASSERT(SmiValuesAre31Bits() && r.IsSmiOrTagged());
+ return static_cast<int32_t>(reinterpret_cast<intptr_t>(Smi::FromInt(value)));
}
@@ -572,10 +599,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
} else if (op->IsDoubleStackSlot()) {
translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsArgument()) {
- ASSERT(is_tagged);
- int src_index = GetStackSlotCount() + op->index();
- translation->StoreStackSlot(src_index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
@@ -603,7 +626,6 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
LInstruction* instr,
SafepointMode safepoint_mode,
int argc) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - masm()->CallSize(code));
ASSERT(instr != NULL);
__ call(code, mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
@@ -640,10 +662,10 @@ void LCodeGen::CallRuntime(const Runtime::Function* function,
void LCodeGen::LoadContextFromDeferred(LOperand* context) {
if (context->IsRegister()) {
if (!ToRegister(context).is(rsi)) {
- __ movq(rsi, ToRegister(context));
+ __ movp(rsi, ToRegister(context));
}
} else if (context->IsStackSlot()) {
- __ movq(rsi, ToOperand(context));
+ __ movp(rsi, ToOperand(context));
} else if (context->IsConstantOperand()) {
HConstant* constant =
chunk_->LookupConstant(LConstantOperand::cast(context));
@@ -669,6 +691,7 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode) {
+ environment->set_has_been_used();
if (!environment->HasBeenRegistered()) {
// Physical stack frame layout:
// -x ............. -4 0 ..................................... y
@@ -721,7 +744,7 @@ void LCodeGen::DeoptimizeIf(Condition cc,
ExternalReference count = ExternalReference::stress_deopt_count(isolate());
Label no_deopt;
__ pushfq();
- __ push(rax);
+ __ Push(rax);
Operand count_operand = masm()->ExternalOperand(count, kScratchRegister);
__ movl(rax, count_operand);
__ subl(rax, Immediate(1));
@@ -729,13 +752,13 @@ void LCodeGen::DeoptimizeIf(Condition cc,
if (FLAG_trap_on_deopt) __ int3();
__ movl(rax, Immediate(FLAG_deopt_every_n_times));
__ movl(count_operand, rax);
- __ pop(rax);
+ __ Pop(rax);
__ popfq();
ASSERT(frame_is_built_);
__ call(entry, RelocInfo::RUNTIME_ENTRY);
__ bind(&no_deopt);
__ movl(count_operand, rax);
- __ pop(rax);
+ __ Pop(rax);
__ popfq();
}
@@ -784,46 +807,24 @@ void LCodeGen::DeoptimizeIf(Condition cc,
}
-void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
- ZoneList<Handle<Map> > maps(1, zone());
- ZoneList<Handle<JSObject> > objects(1, zone());
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
- if (it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- maps.Add(map, zone());
- } else if (it.rinfo()->target_object()->IsJSObject()) {
- Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
- objects.Add(object, zone());
- }
- }
- }
-#ifdef VERIFY_HEAP
- // This disables verification of weak embedded objects after full GC.
- // AddDependentCode can cause a GC, which would observe the state where
- // this code is not yet in the depended code lists of the embedded maps.
- NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
-#endif
- for (int i = 0; i < maps.length(); i++) {
- maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
- }
- for (int i = 0; i < objects.length(); i++) {
- AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
- }
-}
-
-
void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
int length = deoptimizations_.length();
if (length == 0) return;
Handle<DeoptimizationInputData> data =
- factory()->NewDeoptimizationInputData(length, TENURED);
+ DeoptimizationInputData::New(isolate(), length, TENURED);
Handle<ByteArray> translations =
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+ if (info_->IsOptimizing()) {
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
+ data->SetSharedFunctionInfo(*info_->shared_info());
+ } else {
+ data->SetSharedFunctionInfo(Smi::FromInt(0));
+ }
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -985,30 +986,19 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->result()).is(rax));
switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpConstructResult: {
- RegExpConstructResultStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::RegExpExec: {
- RegExpExecStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ RegExpExecStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::SubString: {
- SubStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ SubStringStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringCompare: {
- StringCompareStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::TranscendentalCache: {
- TranscendentalCacheStub stub(instr->transcendental_type(),
- TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ StringCompareStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
default:
@@ -1022,281 +1012,376 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
}
-void LCodeGen::DoModI(LModI* instr) {
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
+
+ // Theoretically, a variation of the branch-free code for integer division by
+ // a power of 2 (calculating the remainder via an additional multiplication
+ // (which gets simplified to an 'and') and subtraction) should be faster, and
+ // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+ // indicate that positive dividends are heavily favored, so the branching
+ // version performs better.
HMod* hmod = instr->hydrogen();
- HValue* left = hmod->left();
- HValue* right = hmod->right();
- if (hmod->HasPowerOf2Divisor()) {
- // TODO(svenpanne) We should really do the strength reduction on the
- // Hydrogen level.
- Register left_reg = ToRegister(instr->left());
- ASSERT(left_reg.is(ToRegister(instr->result())));
-
- // Note: The code below even works when right contains kMinInt.
- int32_t divisor = Abs(right->GetInteger32Constant());
-
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ testl(left_reg, left_reg);
- __ j(not_sign, &left_is_not_negative, Label::kNear);
- __ negl(left_reg);
- __ andl(left_reg, Immediate(divisor - 1));
- __ negl(left_reg);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- __ jmp(&done, Label::kNear);
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ Label dividend_is_not_negative, done;
+ if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+ __ testl(dividend, dividend);
+ __ j(not_sign, &dividend_is_not_negative, Label::kNear);
+ // Note that this is correct even for kMinInt operands.
+ __ negl(dividend);
+ __ andl(dividend, Immediate(mask));
+ __ negl(dividend);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
}
+ __ jmp(&done, Label::kNear);
+ }
- __ bind(&left_is_not_negative);
- __ andl(left_reg, Immediate(divisor - 1));
- __ bind(&done);
- } else {
- Register left_reg = ToRegister(instr->left());
- ASSERT(left_reg.is(rax));
- Register right_reg = ToRegister(instr->right());
- ASSERT(!right_reg.is(rax));
- ASSERT(!right_reg.is(rdx));
- Register result_reg = ToRegister(instr->result());
- ASSERT(result_reg.is(rdx));
+ __ bind(&dividend_is_not_negative);
+ __ andl(dividend, Immediate(mask));
+ __ bind(&done);
+}
- Label done;
- // Check for x % 0, idiv would signal a divide error. We have to
- // deopt in this case because we can't return a NaN.
- if (right->CanBeZero()) {
- __ testl(right_reg, right_reg);
- DeoptimizeIf(zero, instr->environment());
- }
- // Check for kMinInt % -1, idiv would signal a divide error. We
- // have to deopt if we care about -0, because we can't return that.
- if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
- Label no_overflow_possible;
- __ cmpl(left_reg, Immediate(kMinInt));
- __ j(not_zero, &no_overflow_possible, Label::kNear);
- __ cmpl(right_reg, Immediate(-1));
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(equal, instr->environment());
- } else {
- __ j(not_equal, &no_overflow_possible, Label::kNear);
- __ Set(result_reg, 0);
- __ jmp(&done, Label::kNear);
- }
- __ bind(&no_overflow_possible);
- }
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(rax));
- // Sign extend dividend in eax into edx:eax, since we are using only the low
- // 32 bits of the values.
- __ cdq();
-
- // If we care about -0, test if the dividend is <0 and the result is 0.
- if (left->CanBeNegative() &&
- hmod->CanBeZero() &&
- hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label positive_left;
- __ testl(left_reg, left_reg);
- __ j(not_sign, &positive_left, Label::kNear);
- __ idivl(right_reg);
- __ testl(result_reg, result_reg);
- DeoptimizeIf(zero, instr->environment());
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+
+ __ TruncatingDiv(dividend, Abs(divisor));
+ __ imull(rdx, rdx, Immediate(Abs(divisor)));
+ __ movl(rax, dividend);
+ __ subl(rax, rdx);
+
+ // Check for negative zero.
+ HMod* hmod = instr->hydrogen();
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label remainder_not_zero;
+ __ j(not_zero, &remainder_not_zero, Label::kNear);
+ __ cmpl(dividend, Immediate(0));
+ DeoptimizeIf(less, instr->environment());
+ __ bind(&remainder_not_zero);
+ }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+ HMod* hmod = instr->hydrogen();
+
+ Register left_reg = ToRegister(instr->left());
+ ASSERT(left_reg.is(rax));
+ Register right_reg = ToRegister(instr->right());
+ ASSERT(!right_reg.is(rax));
+ ASSERT(!right_reg.is(rdx));
+ Register result_reg = ToRegister(instr->result());
+ ASSERT(result_reg.is(rdx));
+
+ Label done;
+ // Check for x % 0, idiv would signal a divide error. We have to
+ // deopt in this case because we can't return a NaN.
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ testl(right_reg, right_reg);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ // Check for kMinInt % -1, idiv would signal a divide error. We
+ // have to deopt if we care about -0, because we can't return that.
+ if (hmod->CheckFlag(HValue::kCanOverflow)) {
+ Label no_overflow_possible;
+ __ cmpl(left_reg, Immediate(kMinInt));
+ __ j(not_zero, &no_overflow_possible, Label::kNear);
+ __ cmpl(right_reg, Immediate(-1));
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ __ j(not_equal, &no_overflow_possible, Label::kNear);
+ __ Set(result_reg, 0);
__ jmp(&done, Label::kNear);
- __ bind(&positive_left);
}
+ __ bind(&no_overflow_possible);
+ }
+
+ // Sign extend dividend in eax into edx:eax, since we are using only the low
+ // 32 bits of the values.
+ __ cdq();
+
+ // If we care about -0, test if the dividend is <0 and the result is 0.
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label positive_left;
+ __ testl(left_reg, left_reg);
+ __ j(not_sign, &positive_left, Label::kNear);
__ idivl(right_reg);
- __ bind(&done);
+ __ testl(result_reg, result_reg);
+ DeoptimizeIf(zero, instr->environment());
+ __ jmp(&done, Label::kNear);
+ __ bind(&positive_left);
}
+ __ idivl(right_reg);
+ __ bind(&done);
}
-void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
- ASSERT(instr->right()->IsConstantOperand());
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
- const Register dividend = ToRegister(instr->left());
- int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
- const Register result = ToRegister(instr->result());
-
- switch (divisor) {
- case 0:
- DeoptimizeIf(no_condition, instr->environment());
+ // If the divisor is positive, things are easy: There can be no deopts and we
+ // can simply do an arithmetic right shift.
+ if (divisor == 1) return;
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (divisor > 1) {
+ __ sarl(dividend, Immediate(shift));
return;
+ }
- case 1:
- if (!result.is(dividend)) {
- __ movl(result, dividend);
- }
- return;
+ // If the divisor is negative, we have to negate and handle edge cases.
+ __ negl(dividend);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
+ }
- case -1:
- if (!result.is(dividend)) {
- __ movl(result, dividend);
- }
- __ negl(result);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ // Dividing by -1 is basically negation, unless we overflow.
+ if (divisor == -1) {
+ if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
DeoptimizeIf(overflow, instr->environment());
}
return;
}
- uint32_t divisor_abs = abs(divisor);
- if (IsPowerOf2(divisor_abs)) {
- int32_t power = WhichPowerOf2(divisor_abs);
- if (divisor < 0) {
- __ movsxlq(result, dividend);
- __ neg(result);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- __ sar(result, Immediate(power));
- } else {
- if (!result.is(dividend)) {
- __ movl(result, dividend);
- }
- __ sarl(result, Immediate(power));
- }
- } else {
- Register reg1 = ToRegister(instr->temp());
- Register reg2 = ToRegister(instr->result());
-
- // Find b which: 2^b < divisor_abs < 2^(b+1).
- unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
- unsigned shift = 32 + b; // Precision +1bit (effectively).
- double multiplier_f =
- static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
- int64_t multiplier;
- if (multiplier_f - floor(multiplier_f) < 0.5) {
- multiplier = static_cast<int64_t>(floor(multiplier_f));
- } else {
- multiplier = static_cast<int64_t>(floor(multiplier_f)) + 1;
- }
- // The multiplier is a uint32.
- ASSERT(multiplier > 0 &&
- multiplier < (static_cast<int64_t>(1) << 32));
- // The multiply is int64, so sign-extend to r64.
- __ movsxlq(reg1, dividend);
- if (divisor < 0 &&
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ neg(reg1);
- DeoptimizeIf(zero, instr->environment());
- }
- __ Set(reg2, multiplier);
- // Result just fit in r64, because it's int32 * uint32.
- __ imul(reg2, reg1);
+ // If the negation could not overflow, simply shifting is OK.
+ if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ __ sarl(dividend, Immediate(shift));
+ return;
+ }
+
+ Label not_kmin_int, done;
+ __ j(no_overflow, &not_kmin_int, Label::kNear);
+ __ movl(dividend, Immediate(kMinInt / divisor));
+ __ jmp(&done, Label::kNear);
+ __ bind(&not_kmin_int);
+ __ sarl(dividend, Immediate(shift));
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(rdx));
+
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ HMathFloorOfDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ testl(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ // Easy case: We need no dynamic check for the dividend and the flooring
+ // division is the same as the truncating division.
+ if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ negl(rdx);
+ return;
+ }
+
+ // In the general case we may need to adjust before and after the truncating
+ // division to get a flooring division.
+ Register temp = ToRegister(instr->temp3());
+ ASSERT(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx));
+ Label needs_adjustment, done;
+ __ cmpl(dividend, Immediate(0));
+ __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ negl(rdx);
+ __ jmp(&done, Label::kNear);
+ __ bind(&needs_adjustment);
+ __ leal(temp, Operand(dividend, divisor > 0 ? 1 : -1));
+ __ TruncatingDiv(temp, Abs(divisor));
+ if (divisor < 0) __ negl(rdx);
+ __ decl(rdx);
+ __ bind(&done);
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister(instr->dividend());
+ Register divisor = ToRegister(instr->divisor());
+ Register remainder = ToRegister(instr->temp());
+ Register result = ToRegister(instr->result());
+ ASSERT(dividend.is(rax));
+ ASSERT(remainder.is(rdx));
+ ASSERT(result.is(rax));
+ ASSERT(!divisor.is(rax));
+ ASSERT(!divisor.is(rdx));
- __ addq(reg2, Immediate(1 << 30));
- __ sar(reg2, Immediate(shift));
+ // Check for x / 0.
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ testl(divisor, divisor);
+ DeoptimizeIf(zero, instr->environment());
}
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label dividend_not_zero;
+ __ testl(dividend, dividend);
+ __ j(not_zero, &dividend_not_zero, Label::kNear);
+ __ testl(divisor, divisor);
+ DeoptimizeIf(sign, instr->environment());
+ __ bind(&dividend_not_zero);
+ }
+
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ Label dividend_not_min_int;
+ __ cmpl(dividend, Immediate(kMinInt));
+ __ j(not_zero, &dividend_not_min_int, Label::kNear);
+ __ cmpl(divisor, Immediate(-1));
+ DeoptimizeIf(zero, instr->environment());
+ __ bind(&dividend_not_min_int);
+ }
+
+ // Sign extend to rdx (= remainder).
+ __ cdq();
+ __ idivl(divisor);
+
+ Label done;
+ __ testl(remainder, remainder);
+ __ j(zero, &done, Label::kNear);
+ __ xorl(remainder, divisor);
+ __ sarl(remainder, Immediate(31));
+ __ addl(result, remainder);
+ __ bind(&done);
}
-void LCodeGen::DoDivI(LDivI* instr) {
- if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->left());
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
- int32_t test_value = 0;
- int32_t power = 0;
-
- if (divisor > 0) {
- test_value = divisor - 1;
- power = WhichPowerOf2(divisor);
- } else {
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ testl(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
- }
- // Check for (kMinInt / -1).
- if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- __ cmpl(dividend, Immediate(kMinInt));
- DeoptimizeIf(zero, instr->environment());
- }
- test_value = - divisor - 1;
- power = WhichPowerOf2(-divisor);
- }
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
+ ASSERT(!result.is(dividend));
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ testl(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+ __ cmpl(dividend, Immediate(kMinInt));
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1) {
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ __ testl(dividend, Immediate(mask));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+ __ Move(result, dividend);
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (shift > 0) {
+ // The arithmetic shift is always OK, the 'if' is an optimization only.
+ if (shift > 1) __ sarl(result, Immediate(31));
+ __ shrl(result, Immediate(32 - shift));
+ __ addl(result, dividend);
+ __ sarl(result, Immediate(shift));
+ }
+ if (divisor < 0) __ negl(result);
+}
- if (test_value != 0) {
- if (instr->hydrogen()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
- Label done, negative;
- __ cmpl(dividend, Immediate(0));
- __ j(less, &negative, Label::kNear);
- __ sarl(dividend, Immediate(power));
- if (divisor < 0) __ negl(dividend);
- __ jmp(&done, Label::kNear);
-
- __ bind(&negative);
- __ negl(dividend);
- __ sarl(dividend, Immediate(power));
- if (divisor > 0) __ negl(dividend);
- __ bind(&done);
- return; // Don't fall through to "__ neg" below.
- } else {
- // Deoptimize if remainder is not 0.
- __ testl(dividend, Immediate(test_value));
- DeoptimizeIf(not_zero, instr->environment());
- __ sarl(dividend, Immediate(power));
- }
- }
- if (divisor < 0) __ negl(dividend);
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(rdx));
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
return;
}
- LOperand* right = instr->right();
- ASSERT(ToRegister(instr->result()).is(rax));
- ASSERT(ToRegister(instr->left()).is(rax));
- ASSERT(!ToRegister(instr->right()).is(rax));
- ASSERT(!ToRegister(instr->right()).is(rdx));
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ testl(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
- Register left_reg = rax;
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ negl(rdx);
+
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ __ movl(rax, rdx);
+ __ imull(rax, rax, Immediate(divisor));
+ __ subl(rax, dividend);
+ DeoptimizeIf(not_equal, instr->environment());
+ }
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
+void LCodeGen::DoDivI(LDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister(instr->dividend());
+ Register divisor = ToRegister(instr->divisor());
+ Register remainder = ToRegister(instr->temp());
+ ASSERT(dividend.is(rax));
+ ASSERT(remainder.is(rdx));
+ ASSERT(ToRegister(instr->result()).is(rax));
+ ASSERT(!divisor.is(rax));
+ ASSERT(!divisor.is(rdx));
// Check for x / 0.
- Register right_reg = ToRegister(right);
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ testl(right_reg, right_reg);
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ testl(divisor, divisor);
DeoptimizeIf(zero, instr->environment());
}
// Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
- __ testl(left_reg, left_reg);
- __ j(not_zero, &left_not_zero, Label::kNear);
- __ testl(right_reg, right_reg);
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label dividend_not_zero;
+ __ testl(dividend, dividend);
+ __ j(not_zero, &dividend_not_zero, Label::kNear);
+ __ testl(divisor, divisor);
DeoptimizeIf(sign, instr->environment());
- __ bind(&left_not_zero);
+ __ bind(&dividend_not_zero);
}
// Check for (kMinInt / -1).
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ cmpl(left_reg, Immediate(kMinInt));
- __ j(not_zero, &left_not_min_int, Label::kNear);
- __ cmpl(right_reg, Immediate(-1));
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ Label dividend_not_min_int;
+ __ cmpl(dividend, Immediate(kMinInt));
+ __ j(not_zero, &dividend_not_min_int, Label::kNear);
+ __ cmpl(divisor, Immediate(-1));
DeoptimizeIf(zero, instr->environment());
- __ bind(&left_not_min_int);
+ __ bind(&dividend_not_min_int);
}
- // Sign extend to rdx.
+ // Sign extend to rdx (= remainder).
__ cdq();
- __ idivl(right_reg);
+ __ idivl(divisor);
- if (instr->is_flooring()) {
- Label done;
- __ testl(rdx, rdx);
- __ j(zero, &done, Label::kNear);
- __ xorl(rdx, right_reg);
- __ sarl(rdx, Immediate(31));
- __ addl(rax, rdx);
- __ bind(&done);
- } else if (!instr->hydrogen()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
+ if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
- __ testl(rdx, rdx);
+ __ testl(remainder, remainder);
DeoptimizeIf(not_zero, instr->environment());
}
}
@@ -1308,7 +1393,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ movq(kScratchRegister, left);
+ __ movp(kScratchRegister, left);
} else {
__ movl(kScratchRegister, left);
}
@@ -1360,14 +1445,14 @@ void LCodeGen::DoMulI(LMulI* instr) {
} else if (right->IsStackSlot()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
__ SmiToInteger64(left, left);
- __ imul(left, ToOperand(right));
+ __ imulp(left, ToOperand(right));
} else {
__ imull(left, ToOperand(right));
}
} else {
if (instr->hydrogen_value()->representation().IsSmi()) {
__ SmiToInteger64(left, left);
- __ imul(left, ToRegister(right));
+ __ imulp(left, ToRegister(right));
} else {
__ imull(left, ToRegister(right));
}
@@ -1381,14 +1466,17 @@ void LCodeGen::DoMulI(LMulI* instr) {
// Bail out if the result is supposed to be negative zero.
Label done;
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ testq(left, left);
+ __ testp(left, left);
} else {
__ testl(left, left);
}
__ j(not_zero, &done, Label::kNear);
if (right->IsConstantOperand()) {
- // Constant can't be represented as Smi due to immediate size limit.
- ASSERT(!instr->hydrogen_value()->representation().IsSmi());
+ // Constant can't be represented as 32-bit Smi due to immediate size
+ // limit.
+ ASSERT(SmiValuesAre32Bits()
+ ? !instr->hydrogen_value()->representation().IsSmi()
+ : SmiValuesAre31Bits());
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
DeoptimizeIf(no_condition, instr->environment());
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
@@ -1397,7 +1485,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
} else if (right->IsStackSlot()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ or_(kScratchRegister, ToOperand(right));
+ __ orp(kScratchRegister, ToOperand(right));
} else {
__ orl(kScratchRegister, ToOperand(right));
}
@@ -1405,7 +1493,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
} else {
// Test the non-zero operand for negative sign.
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ or_(kScratchRegister, ToRegister(right));
+ __ orp(kScratchRegister, ToRegister(right));
} else {
__ orl(kScratchRegister, ToRegister(right));
}
@@ -1423,7 +1511,9 @@ void LCodeGen::DoBitI(LBitI* instr) {
ASSERT(left->IsRegister());
if (right->IsConstantOperand()) {
- int32_t right_operand = ToInteger32(LConstantOperand::cast(right));
+ int32_t right_operand =
+ ToRepresentation(LConstantOperand::cast(right),
+ instr->hydrogen()->right()->representation());
switch (instr->op()) {
case Token::BIT_AND:
__ andl(ToRegister(left), Immediate(right_operand));
@@ -1445,13 +1535,25 @@ void LCodeGen::DoBitI(LBitI* instr) {
} else if (right->IsStackSlot()) {
switch (instr->op()) {
case Token::BIT_AND:
- __ and_(ToRegister(left), ToOperand(right));
+ if (instr->IsInteger32()) {
+ __ andl(ToRegister(left), ToOperand(right));
+ } else {
+ __ andp(ToRegister(left), ToOperand(right));
+ }
break;
case Token::BIT_OR:
- __ or_(ToRegister(left), ToOperand(right));
+ if (instr->IsInteger32()) {
+ __ orl(ToRegister(left), ToOperand(right));
+ } else {
+ __ orp(ToRegister(left), ToOperand(right));
+ }
break;
case Token::BIT_XOR:
- __ xor_(ToRegister(left), ToOperand(right));
+ if (instr->IsInteger32()) {
+ __ xorl(ToRegister(left), ToOperand(right));
+ } else {
+ __ xorp(ToRegister(left), ToOperand(right));
+ }
break;
default:
UNREACHABLE();
@@ -1461,13 +1563,25 @@ void LCodeGen::DoBitI(LBitI* instr) {
ASSERT(right->IsRegister());
switch (instr->op()) {
case Token::BIT_AND:
- __ and_(ToRegister(left), ToRegister(right));
+ if (instr->IsInteger32()) {
+ __ andl(ToRegister(left), ToRegister(right));
+ } else {
+ __ andp(ToRegister(left), ToRegister(right));
+ }
break;
case Token::BIT_OR:
- __ or_(ToRegister(left), ToRegister(right));
+ if (instr->IsInteger32()) {
+ __ orl(ToRegister(left), ToRegister(right));
+ } else {
+ __ orp(ToRegister(left), ToRegister(right));
+ }
break;
case Token::BIT_XOR:
- __ xor_(ToRegister(left), ToRegister(right));
+ if (instr->IsInteger32()) {
+ __ xorl(ToRegister(left), ToRegister(right));
+ } else {
+ __ xorp(ToRegister(left), ToRegister(right));
+ }
break;
default:
UNREACHABLE();
@@ -1521,17 +1635,30 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
}
break;
case Token::SHR:
- if (shift_count == 0 && instr->can_deopt()) {
+ if (shift_count != 0) {
+ __ shrl(ToRegister(left), Immediate(shift_count));
+ } else if (instr->can_deopt()) {
__ testl(ToRegister(left), ToRegister(left));
DeoptimizeIf(negative, instr->environment());
- } else {
- __ shrl(ToRegister(left), Immediate(shift_count));
}
break;
case Token::SHL:
if (shift_count != 0) {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ shl(ToRegister(left), Immediate(shift_count));
+ if (SmiValuesAre32Bits()) {
+ __ shlp(ToRegister(left), Immediate(shift_count));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ if (instr->can_deopt()) {
+ if (shift_count != 1) {
+ __ shll(ToRegister(left), Immediate(shift_count - 1));
+ }
+ __ Integer32ToSmi(ToRegister(left), ToRegister(left));
+ DeoptimizeIf(overflow, instr->environment());
+ } else {
+ __ shll(ToRegister(left), Immediate(shift_count));
+ }
+ }
} else {
__ shll(ToRegister(left), Immediate(shift_count));
}
@@ -1551,17 +1678,19 @@ void LCodeGen::DoSubI(LSubI* instr) {
ASSERT(left->Equals(instr->result()));
if (right->IsConstantOperand()) {
- __ subl(ToRegister(left),
- Immediate(ToInteger32(LConstantOperand::cast(right))));
+ int32_t right_operand =
+ ToRepresentation(LConstantOperand::cast(right),
+ instr->hydrogen()->right()->representation());
+ __ subl(ToRegister(left), Immediate(right_operand));
} else if (right->IsRegister()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ subq(ToRegister(left), ToRegister(right));
+ __ subp(ToRegister(left), ToRegister(right));
} else {
__ subl(ToRegister(left), ToRegister(right));
}
} else {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ subq(ToRegister(left), ToOperand(right));
+ __ subp(ToRegister(left), ToOperand(right));
} else {
__ subl(ToRegister(left), ToOperand(right));
}
@@ -1574,7 +1703,12 @@ void LCodeGen::DoSubI(LSubI* instr) {
void LCodeGen::DoConstantI(LConstantI* instr) {
- __ Set(ToRegister(instr->result()), instr->value());
+ Register dst = ToRegister(instr->result());
+ if (instr->value() == 0) {
+ __ xorl(dst, dst);
+ } else {
+ __ movl(dst, Immediate(instr->value()));
+ }
}
@@ -1606,8 +1740,9 @@ void LCodeGen::DoConstantE(LConstantE* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> value = instr->value(isolate());
- __ Move(ToRegister(instr->result()), value);
+ Handle<Object> object = instr->value(isolate());
+ AllowDeferredHandleDereference smi_check;
+ __ Move(ToRegister(instr->result()), object);
}
@@ -1618,40 +1753,6 @@ void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
}
-void LCodeGen::DoElementsKind(LElementsKind* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->value());
-
- // Load map into |result|.
- __ movq(result, FieldOperand(input, HeapObject::kMapOffset));
- // Load the map's "bit field 2" into |result|. We only need the first byte.
- __ movzxbq(result, FieldOperand(result, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ and_(result, Immediate(Map::kElementsKindMask));
- __ shr(result, Immediate(Map::kElementsKindShift));
-}
-
-
-void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- ASSERT(input.is(result));
- Label done;
-
- if (!instr->hydrogen()->value()->IsHeapObject()) {
- // If the object is a smi return the object.
- __ JumpIfSmi(input, &done, Label::kNear);
- }
-
- // If the object is not a value type, return the object.
- __ CmpObjectType(input, JS_VALUE_TYPE, kScratchRegister);
- __ j(not_equal, &done, Label::kNear);
- __ movq(result, FieldOperand(input, JSValue::kValueOffset));
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoDateField(LDateField* instr) {
Register object = ToRegister(instr->date());
Register result = ToRegister(instr->result());
@@ -1666,23 +1767,23 @@ void LCodeGen::DoDateField(LDateField* instr) {
DeoptimizeIf(not_equal, instr->environment());
if (index->value() == 0) {
- __ movq(result, FieldOperand(object, JSDate::kValueOffset));
+ __ movp(result, FieldOperand(object, JSDate::kValueOffset));
} else {
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
Operand stamp_operand = __ ExternalOperand(stamp);
- __ movq(kScratchRegister, stamp_operand);
- __ cmpq(kScratchRegister, FieldOperand(object,
+ __ movp(kScratchRegister, stamp_operand);
+ __ cmpp(kScratchRegister, FieldOperand(object,
JSDate::kCacheStampOffset));
__ j(not_equal, &runtime, Label::kNear);
- __ movq(result, FieldOperand(object, JSDate::kValueOffset +
+ __ movp(result, FieldOperand(object, JSDate::kValueOffset +
kPointerSize * index->value()));
__ jmp(&done, Label::kNear);
}
__ bind(&runtime);
__ PrepareCallCFunction(2);
- __ movq(arg_reg_1, object);
- __ movq(arg_reg_2, index, RelocInfo::NONE64);
+ __ movp(arg_reg_1, object);
+ __ Move(arg_reg_2, index, Assembler::RelocInfoNone());
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
__ bind(&done);
}
@@ -1713,17 +1814,17 @@ void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
Register string = ToRegister(instr->string());
if (FLAG_debug_code) {
- __ push(string);
- __ movq(string, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbq(string, FieldOperand(string, Map::kInstanceTypeOffset));
+ __ Push(string);
+ __ movp(string, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzxbp(string, FieldOperand(string, Map::kInstanceTypeOffset));
__ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmpq(string, Immediate(encoding == String::ONE_BYTE_ENCODING
+ __ cmpp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
? one_byte_seq_type : two_byte_seq_type));
__ Check(equal, kUnexpectedStringType);
- __ pop(string);
+ __ Pop(string);
}
Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
@@ -1772,61 +1873,56 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
}
-void LCodeGen::DoThrow(LThrow* instr) {
- __ push(ToRegister(instr->value()));
- ASSERT(ToRegister(instr->context()).is(rsi));
- CallRuntime(Runtime::kThrow, 1, instr);
-
- if (FLAG_debug_code) {
- Comment("Unreachable code.");
- __ int3();
- }
-}
-
-
void LCodeGen::DoAddI(LAddI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
Representation target_rep = instr->hydrogen()->representation();
- bool is_q = target_rep.IsSmi() || target_rep.IsExternal();
+ bool is_p = target_rep.IsSmi() || target_rep.IsExternal();
if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
if (right->IsConstantOperand()) {
- int32_t offset = ToInteger32(LConstantOperand::cast(right));
- if (is_q) {
- __ lea(ToRegister(instr->result()),
- MemOperand(ToRegister(left), offset));
+ // No support for smi-immediates for 32-bit SMI.
+ ASSERT(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
+ int32_t offset =
+ ToRepresentation(LConstantOperand::cast(right),
+ instr->hydrogen()->right()->representation());
+ if (is_p) {
+ __ leap(ToRegister(instr->result()),
+ MemOperand(ToRegister(left), offset));
} else {
__ leal(ToRegister(instr->result()),
MemOperand(ToRegister(left), offset));
}
} else {
Operand address(ToRegister(left), ToRegister(right), times_1, 0);
- if (is_q) {
- __ lea(ToRegister(instr->result()), address);
+ if (is_p) {
+ __ leap(ToRegister(instr->result()), address);
} else {
__ leal(ToRegister(instr->result()), address);
}
}
} else {
if (right->IsConstantOperand()) {
- if (is_q) {
- __ addq(ToRegister(left),
- Immediate(ToInteger32(LConstantOperand::cast(right))));
+ // No support for smi-immediates for 32-bit SMI.
+ ASSERT(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
+ int32_t right_operand =
+ ToRepresentation(LConstantOperand::cast(right),
+ instr->hydrogen()->right()->representation());
+ if (is_p) {
+ __ addp(ToRegister(left), Immediate(right_operand));
} else {
- __ addl(ToRegister(left),
- Immediate(ToInteger32(LConstantOperand::cast(right))));
+ __ addl(ToRegister(left), Immediate(right_operand));
}
} else if (right->IsRegister()) {
- if (is_q) {
- __ addq(ToRegister(left), ToRegister(right));
+ if (is_p) {
+ __ addp(ToRegister(left), ToRegister(right));
} else {
__ addl(ToRegister(left), ToRegister(right));
}
} else {
- if (is_q) {
- __ addq(ToRegister(left), ToOperand(right));
+ if (is_p) {
+ __ addp(ToRegister(left), ToOperand(right));
} else {
__ addl(ToRegister(left), ToOperand(right));
}
@@ -1850,30 +1946,33 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
: greater_equal;
Register left_reg = ToRegister(left);
if (right->IsConstantOperand()) {
- Immediate right_imm =
- Immediate(ToInteger32(LConstantOperand::cast(right)));
- ASSERT(!instr->hydrogen_value()->representation().IsSmi());
+ Immediate right_imm = Immediate(
+ ToRepresentation(LConstantOperand::cast(right),
+ instr->hydrogen()->right()->representation()));
+ ASSERT(SmiValuesAre32Bits()
+ ? !instr->hydrogen()->representation().IsSmi()
+ : SmiValuesAre31Bits());
__ cmpl(left_reg, right_imm);
__ j(condition, &return_left, Label::kNear);
- __ movq(left_reg, right_imm);
+ __ movp(left_reg, right_imm);
} else if (right->IsRegister()) {
Register right_reg = ToRegister(right);
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ cmpq(left_reg, right_reg);
+ __ cmpp(left_reg, right_reg);
} else {
__ cmpl(left_reg, right_reg);
}
__ j(condition, &return_left, Label::kNear);
- __ movq(left_reg, right_reg);
+ __ movp(left_reg, right_reg);
} else {
Operand right_op = ToOperand(right);
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ cmpq(left_reg, right_op);
+ __ cmpp(left_reg, right_op);
} else {
__ cmpl(left_reg, right_op);
}
__ j(condition, &return_left, Label::kNear);
- __ movq(left_reg, right_op);
+ __ movp(left_reg, right_op);
}
__ bind(&return_left);
} else {
@@ -1941,7 +2040,7 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
__ movaps(xmm_scratch, left);
ASSERT(right.is(xmm1));
__ CallCFunction(
- ExternalReference::double_fp_operation(Token::MOD, isolate()), 2);
+ ExternalReference::mod_two_doubles_operation(isolate()), 2);
__ movaps(result, xmm_scratch);
break;
}
@@ -1958,8 +2057,8 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->right()).is(rax));
ASSERT(ToRegister(instr->result()).is(rax));
- BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -2007,7 +2106,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (r.IsSmi()) {
ASSERT(!info()->IsStub());
Register reg = ToRegister(instr->value());
- __ testq(reg, reg);
+ __ testp(reg, reg);
EmitBranch(instr, not_zero);
} else if (r.IsDouble()) {
ASSERT(!info()->IsStub());
@@ -2039,7 +2138,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
EmitBranch(instr, not_equal);
} else if (type.IsString()) {
ASSERT(!info()->IsStub());
- __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+ __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
EmitBranch(instr, not_equal);
} else {
ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
@@ -2078,7 +2177,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
const Register map = kScratchRegister;
if (expected.NeedsMap()) {
- __ movq(map, FieldOperand(reg, HeapObject::kMapOffset));
+ __ movp(map, FieldOperand(reg, HeapObject::kMapOffset));
if (expected.CanBeUndetectable()) {
// Undetectable -> false.
@@ -2099,7 +2198,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
Label not_string;
__ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
__ j(above_equal, &not_string, Label::kNear);
- __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+ __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
__ j(not_zero, instr->TrueLabel(chunk_));
__ jmp(instr->FalseLabel(chunk_));
__ bind(&not_string);
@@ -2181,7 +2280,11 @@ inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
- Condition cc = TokenToCondition(instr->op(), instr->is_double());
+ bool is_unsigned =
+ instr->is_double() ||
+ instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
+ instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
+ Condition cc = TokenToCondition(instr->op(), is_unsigned);
if (left->IsConstantOperand() && right->IsConstantOperand()) {
// We can statically evaluate the comparison.
@@ -2218,13 +2321,13 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
} else {
__ cmpl(ToOperand(right), Immediate(value));
}
- // We transposed the operands. Reverse the condition.
- cc = ReverseCondition(cc);
+ // We commuted the operands, so commute the condition.
+ cc = CommuteCondition(cc);
} else if (instr->hydrogen_value()->representation().IsSmi()) {
if (right->IsRegister()) {
- __ cmpq(ToRegister(left), ToRegister(right));
+ __ cmpp(ToRegister(left), ToRegister(right));
} else {
- __ cmpq(ToRegister(left), ToOperand(right));
+ __ cmpp(ToRegister(left), ToOperand(right));
}
} else {
if (right->IsRegister()) {
@@ -2247,7 +2350,7 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
__ Cmp(left, right);
} else {
Register right = ToRegister(instr->right());
- __ cmpq(left, right);
+ __ cmpp(left, right);
}
EmitBranch(instr, equal);
}
@@ -2265,9 +2368,9 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
__ ucomisd(input_reg, input_reg);
EmitFalseBranch(instr, parity_odd);
- __ subq(rsp, Immediate(kDoubleSize));
+ __ subp(rsp, Immediate(kDoubleSize));
__ movsd(MemOperand(rsp, 0), input_reg);
- __ addq(rsp, Immediate(kDoubleSize));
+ __ addp(rsp, Immediate(kDoubleSize));
int offset = sizeof(kHoleNanUpper32);
__ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
@@ -2293,8 +2396,8 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
__ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
__ cmpl(FieldOperand(value, HeapNumber::kExponentOffset),
- Immediate(0x80000000));
- EmitFalseBranch(instr, not_equal);
+ Immediate(0x1));
+ EmitFalseBranch(instr, no_overflow);
__ cmpl(FieldOperand(value, HeapNumber::kMantissaOffset),
Immediate(0x00000000));
EmitBranch(instr, equal);
@@ -2312,7 +2415,7 @@ Condition LCodeGen::EmitIsObject(Register input,
__ CompareRoot(input, Heap::kNullValueRootIndex);
__ j(equal, is_object);
- __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
+ __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
// Undetectable objects behave like undefined.
__ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
@@ -2356,7 +2459,7 @@ void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
Register temp = ToRegister(instr->temp());
SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
+ instr->hydrogen()->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
Condition true_cond = EmitIsString(
@@ -2383,10 +2486,10 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
Register input = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- if (!instr->hydrogen()->value()->IsHeapObject()) {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
__ JumpIfSmi(input, instr->FalseLabel(chunk_));
}
- __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
+ __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
__ testb(FieldOperand(temp, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
EmitBranch(instr, not_zero);
@@ -2401,7 +2504,7 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = TokenToCondition(op, false);
- __ testq(rax, rax);
+ __ testp(rax, rax);
EmitBranch(instr, condition);
}
@@ -2430,7 +2533,7 @@ static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register input = ToRegister(instr->value());
- if (!instr->hydrogen()->value()->IsHeapObject()) {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
__ JumpIfSmi(input, instr->FalseLabel(chunk_));
}
@@ -2492,17 +2595,17 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
} else {
// Faster code path to avoid two compares: subtract lower bound from the
// actual type and do a signed compare with the width of the type range.
- __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
+ __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
__ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ subq(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ cmpq(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+ __ subp(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ cmpp(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(above, is_false);
}
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
- __ movq(temp, FieldOperand(temp, Map::kConstructorOffset));
+ __ movp(temp, FieldOperand(temp, Map::kConstructorOffset));
// Objects with a non-function constructor have class 'Object'.
__ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
@@ -2514,8 +2617,8 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// temp now contains the constructor function. Grab the
// instance class name from there.
- __ movq(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
- __ movq(temp, FieldOperand(temp,
+ __ movp(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(temp, FieldOperand(temp,
SharedFunctionInfo::kInstanceClassNameOffset));
// The class name we are testing against is internalized since it's a literal.
// The name in the constructor is internalized because of the way the context
@@ -2552,12 +2655,12 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
ASSERT(ToRegister(instr->context()).is(rsi));
- InstanceofStub stub(InstanceofStub::kNoFlags);
- __ push(ToRegister(instr->left()));
- __ push(ToRegister(instr->right()));
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
+ __ Push(ToRegister(instr->left()));
+ __ Push(ToRegister(instr->right()));
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
Label true_value, done;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(zero, &true_value, Label::kNear);
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
__ jmp(&done, Label::kNear);
@@ -2599,11 +2702,11 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Label cache_miss;
// Use a temp register to avoid memory operands with variable lengths.
Register map = ToRegister(instr->temp());
- __ movq(map, FieldOperand(object, HeapObject::kMapOffset));
+ __ movp(map, FieldOperand(object, HeapObject::kMapOffset));
__ bind(deferred->map_check()); // Label for calculating code patching.
Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
- __ movq(kScratchRegister, cache_cell, RelocInfo::CELL);
- __ cmpq(map, Operand(kScratchRegister, 0));
+ __ Move(kScratchRegister, cache_cell, RelocInfo::CELL);
+ __ cmpp(map, Operand(kScratchRegister, 0));
__ j(not_equal, &cache_miss, Label::kNear);
// Patched to load either true or false.
__ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
@@ -2638,22 +2741,22 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
PushSafepointRegistersScope scope(this);
InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
- InstanceofStub stub(flags);
+ InstanceofStub stub(isolate(), flags);
- __ push(ToRegister(instr->value()));
+ __ Push(ToRegister(instr->value()));
__ Push(instr->function());
static const int kAdditionalDelta = 10;
int delta =
masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
ASSERT(delta >= 0);
- __ push_imm32(delta);
+ __ PushImm32(delta);
// We are pushing three values on the stack but recording a
// safepoint with two arguments because stub is going to
// remove the third argument from the stack before jumping
// to instanceof builtin on the slow path.
- CallCodeGeneric(stub.GetCode(isolate()),
+ CallCodeGeneric(stub.GetCode(),
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS,
@@ -2663,9 +2766,9 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
// Move result to a register that survives the end of the
// PushSafepointRegisterScope.
- __ movq(kScratchRegister, rax);
+ __ movp(kScratchRegister, rax);
}
- __ testq(kScratchRegister, kScratchRegister);
+ __ testp(kScratchRegister, kScratchRegister);
Label load_false;
Label done;
__ j(not_zero, &load_false, Label::kNear);
@@ -2686,7 +2789,7 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
Condition condition = TokenToCondition(op, false);
Label true_value, done;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(condition, &true_value, Label::kNear);
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
__ jmp(&done, Label::kNear);
@@ -2702,8 +2805,8 @@ void LCodeGen::DoReturn(LReturn* instr) {
// to return the value in the same register. We're leaving the code
// managed by the register allocator and tearing down the frame, it's
// safe to write to the context register.
- __ push(rax);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ Push(rax);
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit, 1);
}
if (info()->saves_caller_doubles()) {
@@ -2711,8 +2814,8 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
int no_frame_start = -1;
if (NeedsEagerFrame()) {
- __ movq(rsp, rbp);
- __ pop(rbp);
+ __ movp(rsp, rbp);
+ __ popq(rbp);
no_frame_start = masm_->pc_offset();
}
if (instr->has_constant_parameter_count()) {
@@ -2724,8 +2827,8 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ SmiToInteger32(reg, reg);
Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
__ PopReturnAddressTo(return_addr_reg);
- __ shl(reg, Immediate(kPointerSizeLog2));
- __ addq(rsp, reg);
+ __ shlp(reg, Immediate(kPointerSizeLog2));
+ __ addp(rsp, reg);
__ jmp(return_addr_reg);
}
if (no_frame_start != -1) {
@@ -2750,10 +2853,9 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
__ Move(rcx, instr->name());
- RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
- RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, mode, instr);
+ ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2769,37 +2871,24 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// We have a temp because CompareRoot might clobber kScratchRegister.
Register cell = ToRegister(instr->temp());
ASSERT(!value.is(cell));
- __ movq(cell, cell_handle, RelocInfo::CELL);
+ __ Move(cell, cell_handle, RelocInfo::CELL);
__ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr->environment());
// Store the value.
- __ movq(Operand(cell, 0), value);
+ __ movp(Operand(cell, 0), value);
} else {
// Store the value.
- __ movq(kScratchRegister, cell_handle, RelocInfo::CELL);
- __ movq(Operand(kScratchRegister, 0), value);
+ __ Move(kScratchRegister, cell_handle, RelocInfo::CELL);
+ __ movp(Operand(kScratchRegister, 0), value);
}
// Cells are always rescanned, so no write barrier here.
}
-void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(rsi));
- ASSERT(ToRegister(instr->global_object()).is(rdx));
- ASSERT(ToRegister(instr->value()).is(rax));
-
- __ Move(rcx, instr->name());
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ movq(result, ContextOperand(context, instr->slot_index()));
+ __ movp(result, ContextOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
@@ -2829,11 +2918,11 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ j(not_equal, &skip_assignment);
}
}
- __ movq(target, value);
+ __ movp(target, value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
+ instr->hydrogen()->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
int offset = Context::SlotOffset(instr->slot_index());
Register scratch = ToRegister(instr->temp());
@@ -2867,8 +2956,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
Register object = ToRegister(instr->object());
- if (FLAG_track_double_fields &&
- instr->hydrogen()->representation().IsDouble()) {
+ if (instr->hydrogen()->representation().IsDouble()) {
XMMRegister result = ToDoubleRegister(instr->result());
__ movsd(result, FieldOperand(object, offset));
return;
@@ -2876,10 +2964,26 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
Register result = ToRegister(instr->result());
if (!access.IsInobject()) {
- __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ movp(result, FieldOperand(object, JSObject::kPropertiesOffset));
object = result;
}
- __ Load(result, FieldOperand(object, offset), access.representation());
+
+ Representation representation = access.representation();
+ if (representation.IsSmi() && SmiValuesAre32Bits() &&
+ instr->hydrogen()->representation().IsInteger32()) {
+ if (FLAG_debug_code) {
+ Register scratch = kScratchRegister;
+ __ Load(scratch, FieldOperand(object, offset), representation);
+ __ AssertSmi(scratch);
+ }
+
+ // Read int value directly from upper half of the smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+ offset += kPointerSize / 2;
+ representation = Representation::Integer32();
+ }
+ __ Load(result, FieldOperand(object, offset), representation);
}
@@ -2889,7 +2993,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
__ Move(rcx, instr->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2909,7 +3013,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
__ j(not_zero, &non_instance, Label::kNear);
// Get the prototype or initial map from the function.
- __ movq(result,
+ __ movp(result,
FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
// Check that the function has a prototype or an initial map.
@@ -2922,13 +3026,13 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
__ j(not_equal, &done, Label::kNear);
// Get the prototype from the initial map.
- __ movq(result, FieldOperand(result, Map::kPrototypeOffset));
+ __ movp(result, FieldOperand(result, Map::kPrototypeOffset));
__ jmp(&done, Label::kNear);
// Non-instance prototype: Fetch prototype from constructor field
// in the function's map.
__ bind(&non_instance);
- __ movq(result, FieldOperand(result, Map::kConstructorOffset));
+ __ movp(result, FieldOperand(result, Map::kConstructorOffset));
// All done.
__ bind(&done);
@@ -2941,15 +3045,6 @@ void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
}
-void LCodeGen::DoLoadExternalArrayPointer(
- LLoadExternalArrayPointer* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->object());
- __ movq(result, FieldOperand(input,
- ExternalPixelArray::kExternalPointerOffset));
-}
-
-
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
Register result = ToRegister(instr->result());
@@ -2958,9 +3053,13 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
instr->index()->IsConstantOperand()) {
int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
- StackArgumentsAccessor args(arguments, const_length,
- ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(result, args.GetArgumentOperand(const_index));
+ if (const_index >= 0 && const_index < const_length) {
+ StackArgumentsAccessor args(arguments, const_length,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movp(result, args.GetArgumentOperand(const_index));
+ } else if (FLAG_debug_code) {
+ __ int3();
+ }
} else {
Register length = ToRegister(instr->length());
// There are two words between the frame pointer and the last argument.
@@ -2972,7 +3071,7 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
}
StackArgumentsAccessor args(arguments, length,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(result, args.GetArgumentOperand(0));
+ __ movp(result, args.GetArgumentOperand(0));
}
}
@@ -2980,14 +3079,13 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
+ if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyed (in this case) instructions force
- // the input representation for the key to be an integer, the input
- // gets replaced during bound check elimination with the index argument
- // to the bounds check, which can be tagged, so that case must be
- // handled here, too.
- if (instr->hydrogen()->IsDehoisted()) {
+ Representation key_representation =
+ instr->hydrogen()->key()->representation();
+ if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
+ __ SmiToInteger64(key_reg, key_reg);
+ } else if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(key_reg, key_reg);
@@ -2996,44 +3094,55 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
Operand operand(BuildFastArrayOperand(
instr->elements(),
key,
+ instr->hydrogen()->key()->representation(),
elements_kind,
- 0,
- instr->additional_index()));
+ instr->base_offset()));
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
XMMRegister result(ToDoubleRegister(instr->result()));
__ movss(result, operand);
__ cvtss2sd(result, result);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
__ movsd(ToDoubleRegister(instr->result()), operand);
} else {
Register result(ToRegister(instr->result()));
switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ movsxbq(result, operand);
+ case EXTERNAL_INT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ __ movsxbl(result, operand);
break;
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- __ movzxbq(result, operand);
+ case EXTERNAL_UINT8_ELEMENTS:
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ __ movzxbl(result, operand);
break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ movsxwq(result, operand);
+ case EXTERNAL_INT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ __ movsxwl(result, operand);
break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movzxwq(result, operand);
+ case EXTERNAL_UINT16_ELEMENTS:
+ case UINT16_ELEMENTS:
+ __ movzxwl(result, operand);
break;
- case EXTERNAL_INT_ELEMENTS:
- __ movsxlq(result, operand);
+ case EXTERNAL_INT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ __ movl(result, operand);
break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case UINT32_ELEMENTS:
__ movl(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ testl(result, result);
DeoptimizeIf(negative, instr->environment());
}
break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
@@ -3041,7 +3150,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3052,28 +3161,19 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
XMMRegister result(ToDoubleRegister(instr->result()));
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyed instructions force the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
+ if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
+ instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(ToRegister(key), ToRegister(key));
}
-
if (instr->hydrogen()->RequiresHoleCheck()) {
- int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
- sizeof(kHoleNanLower32);
Operand hole_check_operand = BuildFastArrayOperand(
instr->elements(),
key,
+ instr->hydrogen()->key()->representation(),
FAST_DOUBLE_ELEMENTS,
- offset,
- instr->additional_index());
+ instr->base_offset() + sizeof(kHoleNanLower32));
__ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
DeoptimizeIf(equal, instr->environment());
}
@@ -3081,41 +3181,58 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
Operand double_load_operand = BuildFastArrayOperand(
instr->elements(),
key,
+ instr->hydrogen()->key()->representation(),
FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
+ instr->base_offset());
__ movsd(result, double_load_operand);
}
void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
+ HLoadKeyed* hinstr = instr->hydrogen();
Register result = ToRegister(instr->result());
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the input
- // gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that
- // case must be handled here, too.
- if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
+ bool requires_hole_check = hinstr->RequiresHoleCheck();
+ Representation representation = hinstr->representation();
+ int offset = instr->base_offset();
+
+ if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
+ instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(ToRegister(key), ToRegister(key));
+ }
+ if (representation.IsInteger32() && SmiValuesAre32Bits() &&
+ hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
+ ASSERT(!requires_hole_check);
+ if (FLAG_debug_code) {
+ Register scratch = kScratchRegister;
+ __ Load(scratch,
+ BuildFastArrayOperand(instr->elements(),
+ key,
+ instr->hydrogen()->key()->representation(),
+ FAST_ELEMENTS,
+ offset),
+ Representation::Smi());
+ __ AssertSmi(scratch);
}
+ // Read int value directly from upper half of the smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+ offset += kPointerSize / 2;
}
- // Load the result.
- __ movq(result,
+ __ Load(result,
BuildFastArrayOperand(instr->elements(),
key,
+ instr->hydrogen()->key()->representation(),
FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index()));
+ offset),
+ representation);
// Check for the hole value.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+ if (requires_hole_check) {
+ if (IsFastSmiElementsKind(hinstr->elements_kind())) {
Condition smi = __ CheckSmi(result);
DeoptimizeIf(NegateCondition(smi), instr->environment());
} else {
@@ -3127,7 +3244,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_external()) {
+ if (instr->is_typed_elements()) {
DoLoadKeyedExternalArray(instr);
} else if (instr->hydrogen()->representation().IsDouble()) {
DoLoadKeyedFixedDoubleArray(instr);
@@ -3140,9 +3257,9 @@ void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
Operand LCodeGen::BuildFastArrayOperand(
LOperand* elements_pointer,
LOperand* key,
+ Representation key_representation,
ElementsKind elements_kind,
- uint32_t offset,
- uint32_t additional_index) {
+ uint32_t offset) {
Register elements_pointer_reg = ToRegister(elements_pointer);
int shift_size = ElementsKindToShiftSize(elements_kind);
if (key->IsConstantOperand()) {
@@ -3151,14 +3268,18 @@ Operand LCodeGen::BuildFastArrayOperand(
Abort(kArrayIndexConstantValueTooBig);
}
return Operand(elements_pointer_reg,
- ((constant_value + additional_index) << shift_size)
- + offset);
+ (constant_value << shift_size) + offset);
} else {
+ // Take the tag bit into account while computing the shift size.
+ if (key_representation.IsSmi() && (shift_size >= 1)) {
+ ASSERT(SmiValuesAre31Bits());
+ shift_size -= kSmiTagSize;
+ }
ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
return Operand(elements_pointer_reg,
ToRegister(key),
scale_factor,
- offset + (additional_index << shift_size));
+ offset);
}
}
@@ -3177,22 +3298,22 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register result = ToRegister(instr->result());
if (instr->hydrogen()->from_inlined()) {
- __ lea(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
+ __ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
} else {
// Check for arguments adapter frame.
Label done, adapted;
- __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adapted, Label::kNear);
// No arguments adaptor frame.
- __ movq(result, rbp);
+ __ movp(result, rbp);
__ jmp(&done, Label::kNear);
// Arguments adaptor frame present.
__ bind(&adapted);
- __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
@@ -3208,15 +3329,15 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
// If no arguments adaptor frame the number of arguments is fixed.
if (instr->elements()->IsRegister()) {
- __ cmpq(rbp, ToRegister(instr->elements()));
+ __ cmpp(rbp, ToRegister(instr->elements()));
} else {
- __ cmpq(rbp, ToOperand(instr->elements()));
+ __ cmpp(rbp, ToOperand(instr->elements()));
}
__ movl(result, Immediate(scope()->num_parameters()));
__ j(equal, &done, Label::kNear);
// Arguments adaptor frame present. Get argument length from there.
- __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ SmiToInteger32(result,
Operand(result,
ArgumentsAdaptorFrameConstants::kLengthOffset));
@@ -3236,20 +3357,22 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
Label global_object, receiver_ok;
Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
- // Do not transform the receiver to object for strict mode
- // functions.
- __ movq(kScratchRegister,
- FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ testb(FieldOperand(kScratchRegister,
- SharedFunctionInfo::kStrictModeByteOffset),
- Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
- __ j(not_equal, &receiver_ok, dist);
-
- // Do not transform the receiver to object for builtins.
- __ testb(FieldOperand(kScratchRegister,
- SharedFunctionInfo::kNativeByteOffset),
- Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
- __ j(not_equal, &receiver_ok, dist);
+ if (!instr->hydrogen()->known_function()) {
+ // Do not transform the receiver to object for strict mode
+ // functions.
+ __ movp(kScratchRegister,
+ FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ testb(FieldOperand(kScratchRegister,
+ SharedFunctionInfo::kStrictModeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+ __ j(not_equal, &receiver_ok, dist);
+
+ // Do not transform the receiver to object for builtins.
+ __ testb(FieldOperand(kScratchRegister,
+ SharedFunctionInfo::kNativeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+ __ j(not_equal, &receiver_ok, dist);
+ }
// Normal function. Replace undefined or null with global receiver.
__ CompareRoot(receiver, Heap::kNullValueRootIndex);
@@ -3262,16 +3385,16 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
DeoptimizeIf(is_smi, instr->environment());
__ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
DeoptimizeIf(below, instr->environment());
- __ jmp(&receiver_ok, Label::kNear);
+ __ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
- // TODO(kmillikin): We have a hydrogen value for the global object. See
- // if it's better to use it than to explicitly fetch it from the context
- // here.
- __ movq(receiver, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ movq(receiver, ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX));
- __ movq(receiver,
- FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
+ __ movp(receiver, FieldOperand(function, JSFunction::kContextOffset));
+ __ movp(receiver,
+ Operand(receiver,
+ Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movp(receiver,
+ FieldOperand(receiver, GlobalObject::kGlobalReceiverOffset));
+
__ bind(&receiver_ok);
}
@@ -3288,11 +3411,11 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// Copy the arguments to this function possibly from the
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
- __ cmpq(length, Immediate(kArgumentsLimit));
+ __ cmpp(length, Immediate(kArgumentsLimit));
DeoptimizeIf(above, instr->environment());
- __ push(receiver);
- __ movq(receiver, length);
+ __ Push(receiver);
+ __ movp(receiver, length);
// Loop through the arguments pushing them onto the execution
// stack.
@@ -3303,7 +3426,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ bind(&loop);
StackArgumentsAccessor args(elements, length,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ push(args.GetArgumentOperand(0));
+ __ Push(args.GetArgumentOperand(0));
__ decl(length);
__ j(not_zero, &loop);
@@ -3314,8 +3437,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount actual(rax);
- __ InvokeFunction(function, actual, CALL_FUNCTION,
- safepoint_generator, CALL_AS_METHOD);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
}
@@ -3332,14 +3454,14 @@ void LCodeGen::DoDrop(LDrop* instr) {
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- __ movq(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
void LCodeGen::DoContext(LContext* instr) {
Register result = ToRegister(instr->result());
if (info()->IsOptimizing()) {
- __ movq(result, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(result, Operand(rbp, StandardFrameConstants::kContextOffset));
} else {
// If there is no frame, the context must be in rsi.
ASSERT(result.is(rsi));
@@ -3347,35 +3469,12 @@ void LCodeGen::DoContext(LContext* instr) {
}
-void LCodeGen::DoOuterContext(LOuterContext* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ movq(result,
- Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
-}
-
-
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
ASSERT(ToRegister(instr->context()).is(rsi));
- __ push(rsi); // The context is the first argument.
+ __ Push(rsi); // The context is the first argument.
__ Push(instr->hydrogen()->pairs());
__ Push(Smi::FromInt(instr->hydrogen()->flags()));
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
-}
-
-
-void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ movq(result,
- Operand(context, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-}
-
-
-void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
- Register global = ToRegister(instr->global());
- Register result = ToRegister(instr->result());
- __ movq(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
+ CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
}
@@ -3383,7 +3482,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count,
int arity,
LInstruction* instr,
- CallKind call_kind,
RDIState rdi_state) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
@@ -3398,7 +3496,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
// Change context.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Set rax to arguments count if adaption is not needed. Assumes that rax
// is available to write to at this point.
@@ -3407,11 +3505,10 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
// Invoke function.
- __ SetCallKind(rcx, call_kind);
if (function.is_identical_to(info()->closure())) {
__ CallSelf();
} else {
- __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ Call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
}
// Set up deoptimization.
@@ -3422,20 +3519,63 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(
- function, expected, count, CALL_FUNCTION, generator, call_kind);
+ __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
}
}
-void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
- CallKnownFunction(instr->hydrogen()->function(),
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- CALL_AS_METHOD,
- RDI_UNINITIALIZED);
+
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code));
+ __ call(code, RelocInfo::CODE_TARGET);
+ } else {
+ ASSERT(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(target));
+ __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(target);
+ }
+ generator.AfterCall();
+}
+
+
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
+ ASSERT(ToRegister(instr->function()).is(rdi));
+ ASSERT(ToRegister(instr->result()).is(rax));
+
+ if (instr->hydrogen()->pass_argument_count()) {
+ __ Set(rax, instr->arity());
+ }
+
+ // Change context.
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ bool is_self_call = false;
+ if (instr->hydrogen()->function()->IsConstant()) {
+ Handle<JSFunction> jsfun = Handle<JSFunction>::null();
+ HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
+ jsfun = Handle<JSFunction>::cast(fun_const->handle(isolate()));
+ is_self_call = jsfun.is_identical_to(info()->closure());
+ }
+
+ if (is_self_call) {
+ __ CallSelf();
+ } else {
+ Operand target = FieldOperand(rdi, JSFunction::kCodeEntryOffset);
+ generator.BeforeCall(__ CallSize(target));
+ __ Call(target);
+ }
+ generator.AfterCall();
}
@@ -3466,17 +3606,17 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
CallRuntimeFromDeferred(
- Runtime::kAllocateHeapNumber, 0, instr, instr->context());
+ Runtime::kHiddenAllocateHeapNumber, 0, instr, instr->context());
// Set the pointer to the new heap number in tmp.
- if (!tmp.is(rax)) __ movq(tmp, rax);
+ if (!tmp.is(rax)) __ movp(tmp, rax);
// Restore input_reg after call to runtime.
__ LoadFromSafepointRegisterSlot(input_reg, input_reg);
__ bind(&allocated);
- __ MoveDouble(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ shl(tmp2, Immediate(1));
- __ shr(tmp2, Immediate(1));
- __ MoveDouble(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
+ __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ shlq(tmp2, Immediate(1));
+ __ shrq(tmp2, Immediate(1));
+ __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
__ StoreToSafepointRegisterSlot(input_reg, tmp);
__ bind(&done);
@@ -3496,10 +3636,10 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
- __ testq(input_reg, input_reg);
+ __ testp(input_reg, input_reg);
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
- __ neg(input_reg); // Sets flags.
+ __ negp(input_reg); // Sets flags.
DeoptimizeIf(negative, instr->environment());
__ bind(&is_positive);
}
@@ -3559,8 +3699,8 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
}
__ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
__ cvttsd2si(output_reg, xmm_scratch);
- __ cmpl(output_reg, Immediate(0x80000000));
- DeoptimizeIf(equal, instr->environment());
+ __ cmpl(output_reg, Immediate(0x1));
+ DeoptimizeIf(overflow, instr->environment());
} else {
Label negative_sign, done;
// Deoptimize on unordered.
@@ -3577,15 +3717,15 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ testq(output_reg, Immediate(1));
DeoptimizeIf(not_zero, instr->environment());
__ Set(output_reg, 0);
- __ jmp(&done, Label::kNear);
+ __ jmp(&done);
__ bind(&positive_sign);
}
// Use truncating instruction (OK because input is positive).
__ cvttsd2si(output_reg, input_reg);
// Overflow is signalled with minint.
- __ cmpl(output_reg, Immediate(0x80000000));
- DeoptimizeIf(equal, instr->environment());
+ __ cmpl(output_reg, Immediate(0x1));
+ DeoptimizeIf(overflow, instr->environment());
__ jmp(&done, Label::kNear);
// Non-zero negative reaches here.
@@ -3622,9 +3762,9 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ addsd(xmm_scratch, input_reg);
__ cvttsd2si(output_reg, xmm_scratch);
// Overflow is signalled with minint.
- __ cmpl(output_reg, Immediate(0x80000000));
+ __ cmpl(output_reg, Immediate(0x1));
__ RecordComment("D2I conversion overflow");
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(overflow, instr->environment());
__ jmp(&done, dist);
__ bind(&below_one_half);
@@ -3639,9 +3779,9 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ subsd(input_temp, xmm_scratch);
__ cvttsd2si(output_reg, input_temp);
// Catch minint due to overflow, and to prevent overflow when compensating.
- __ cmpl(output_reg, Immediate(0x80000000));
+ __ cmpl(output_reg, Immediate(0x1));
__ RecordComment("D2I conversion overflow");
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(overflow, instr->environment());
__ Cvtlsi2sd(xmm_scratch, output_reg);
__ ucomisd(xmm_scratch, input_temp);
@@ -3665,9 +3805,14 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
- __ sqrtsd(input_reg, input_reg);
+ XMMRegister output = ToDoubleRegister(instr->result());
+ if (instr->value()->IsDoubleRegister()) {
+ XMMRegister input = ToDoubleRegister(instr->value());
+ __ sqrtsd(output, input);
+ } else {
+ Operand input = ToOperand(instr->value());
+ __ sqrtsd(output, input);
+ }
}
@@ -3717,7 +3862,7 @@ void LCodeGen::DoPower(LPower* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
if (exponent_type.IsSmi()) {
- MathPowStub stub(MathPowStub::TAGGED);
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
} else if (exponent_type.IsTagged()) {
Label no_deopt;
@@ -3725,14 +3870,14 @@ void LCodeGen::DoPower(LPower* instr) {
__ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
DeoptimizeIf(not_equal, instr->environment());
__ bind(&no_deopt);
- MathPowStub stub(MathPowStub::TAGGED);
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
} else if (exponent_type.IsInteger32()) {
- MathPowStub stub(MathPowStub::INTEGER);
+ MathPowStub stub(isolate(), MathPowStub::INTEGER);
__ CallStub(&stub);
} else {
ASSERT(exponent_type.IsDouble());
- MathPowStub stub(MathPowStub::DOUBLE);
+ MathPowStub stub(isolate(), MathPowStub::DOUBLE);
__ CallStub(&stub);
}
}
@@ -3757,7 +3902,7 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(input_reg, xmm_scratch);
__ j(above, &positive, Label::kNear);
- __ j(equal, &zero, Label::kNear);
+ __ j(not_carry, &zero, Label::kNear);
ExternalReference nan =
ExternalReference::address_of_canonical_non_hole_nan();
Operand nan_operand = masm()->ExternalOperand(nan);
@@ -3771,47 +3916,28 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
__ jmp(&done, Label::kNear);
__ bind(&positive);
__ fldln2();
- __ subq(rsp, Immediate(kDoubleSize));
+ __ subp(rsp, Immediate(kDoubleSize));
__ movsd(Operand(rsp, 0), input_reg);
__ fld_d(Operand(rsp, 0));
__ fyl2x();
__ fstp_d(Operand(rsp, 0));
__ movsd(input_reg, Operand(rsp, 0));
- __ addq(rsp, Immediate(kDoubleSize));
+ __ addp(rsp, Immediate(kDoubleSize));
__ bind(&done);
}
-void LCodeGen::DoMathTan(LMathTan* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ Set(rsi, 0);
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathCos(LMathCos* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ Set(rsi, 0);
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Label not_zero_input;
+ __ bsrl(result, input);
+ __ j(not_zero, &not_zero_input);
+ __ Set(result, 63); // 63^31 == 32
-void LCodeGen::DoMathSin(LMathSin* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ Set(rsi, 0);
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ __ bind(&not_zero_input);
+ __ xorl(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
}
@@ -3825,79 +3951,25 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
- __ InvokeFunction(rdi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+ __ InvokeFunction(rdi, count, CALL_FUNCTION, generator);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
instr->arity(),
instr,
- CALL_AS_METHOD,
RDI_CONTAINS_TARGET);
}
}
-void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
- ASSERT(ToRegister(instr->context()).is(rsi));
- ASSERT(ToRegister(instr->key()).is(rcx));
- ASSERT(ToRegister(instr->result()).is(rax));
-
- int arity = instr->arity();
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoCallNamed(LCallNamed* instr) {
- ASSERT(ToRegister(instr->context()).is(rsi));
- ASSERT(ToRegister(instr->result()).is(rax));
-
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ Move(rcx, instr->name());
- CallCode(ic, mode, instr);
-}
-
-
void LCodeGen::DoCallFunction(LCallFunction* instr) {
ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->function()).is(rdi));
ASSERT(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
- CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- if (instr->hydrogen()->IsTailCall()) {
- if (NeedsEagerFrame()) __ leave();
- __ jmp(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
- } else {
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
- ASSERT(ToRegister(instr->context()).is(rsi));
- ASSERT(ToRegister(instr->result()).is(rax));
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ Move(rcx, instr->name());
- CallCode(ic, mode, instr);
-}
-
-
-void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
- ASSERT(ToRegister(instr->result()).is(rax));
- CallKnownFunction(instr->hydrogen()->target(),
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- CALL_AS_FUNCTION,
- RDI_UNINITIALIZED);
+ CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -3908,10 +3980,9 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
__ Set(rax, instr->arity());
// No cell in ebx for construct type feedback in optimized code
- Handle<Object> undefined_value(isolate()->factory()->undefined_value());
- __ Move(rbx, undefined_value);
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
+ CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -3921,41 +3992,41 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
__ Set(rax, instr->arity());
- __ Move(rbx, instr->hydrogen()->property_cell());
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
? DISABLE_ALLOCATION_SITES
: DONT_OVERRIDE;
- ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
} else if (instr->arity() == 1) {
Label done;
if (IsFastPackedElementsKind(kind)) {
Label packed_case;
// We might need a change here
// look at the first argument
- __ movq(rcx, Operand(rsp, 0));
- __ testq(rcx, rcx);
+ __ movp(rcx, Operand(rsp, 0));
+ __ testp(rcx, rcx);
__ j(zero, &packed_case, Label::kNear);
ElementsKind holey_kind = GetHoleyElementsKind(kind);
- ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
+ ArraySingleArgumentConstructorStub stub(isolate(),
+ holey_kind,
override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
__ jmp(&done, Label::kNear);
__ bind(&packed_case);
}
- ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
__ bind(&done);
} else {
- ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
}
@@ -3969,8 +4040,8 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
Register function = ToRegister(instr->function());
Register code_object = ToRegister(instr->code_object());
- __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
- __ movq(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
+ __ leap(code_object, FieldOperand(code_object, Code::kHeaderSize));
+ __ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
}
@@ -3979,26 +4050,26 @@ void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register base = ToRegister(instr->base_object());
if (instr->offset()->IsConstantOperand()) {
LConstantOperand* offset = LConstantOperand::cast(instr->offset());
- __ lea(result, Operand(base, ToInteger32(offset)));
+ __ leap(result, Operand(base, ToInteger32(offset)));
} else {
Register offset = ToRegister(instr->offset());
- __ lea(result, Operand(base, offset, times_1, 0));
+ __ leap(result, Operand(base, offset, times_1, 0));
}
}
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ HStoreNamedField* hinstr = instr->hydrogen();
Representation representation = instr->representation();
- HObjectAccess access = instr->hydrogen()->access();
+ HObjectAccess access = hinstr->access();
int offset = access.offset();
if (access.IsExternalMemory()) {
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ ASSERT(!hinstr->NeedsWriteBarrier());
Register value = ToRegister(instr->value());
if (instr->object()->IsConstantOperand()) {
ASSERT(value.is(rax));
- ASSERT(!access.representation().IsSpecialization());
LConstantOperand* object = LConstantOperand::cast(instr->object());
__ store_rax(ToExternalReference(object));
} else {
@@ -4009,86 +4080,84 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
Register object = ToRegister(instr->object());
- Handle<Map> transition = instr->transition();
+ __ AssertNotSmi(object);
- if (FLAG_track_fields && representation.IsSmi()) {
- if (instr->value()->IsConstantOperand()) {
- LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (!IsSmiConstant(operand_value)) {
- DeoptimizeIf(no_condition, instr->environment());
- }
- }
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
- if (instr->value()->IsConstantOperand()) {
- LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (IsInteger32Constant(operand_value)) {
- DeoptimizeIf(no_condition, instr->environment());
- }
- } else {
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- Register value = ToRegister(instr->value());
- Condition cc = masm()->CheckSmi(value);
- DeoptimizeIf(cc, instr->environment());
- }
- }
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
- ASSERT(transition.is_null());
+ ASSERT(!representation.IsSmi() ||
+ !instr->value()->IsConstantOperand() ||
+ IsInteger32Constant(LConstantOperand::cast(instr->value())));
+ if (representation.IsDouble()) {
ASSERT(access.IsInobject());
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ ASSERT(!hinstr->has_transition());
+ ASSERT(!hinstr->NeedsWriteBarrier());
XMMRegister value = ToDoubleRegister(instr->value());
__ movsd(FieldOperand(object, offset), value);
return;
}
- if (!transition.is_null()) {
- if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
+ if (hinstr->has_transition()) {
+ Handle<Map> transition = hinstr->transition_map();
+ AddDeprecationDependency(transition);
+ if (!hinstr->NeedsWriteBarrierForMap()) {
__ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
} else {
Register temp = ToRegister(instr->temp());
__ Move(kScratchRegister, transition);
- __ movq(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
+ __ movp(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
// Update the write barrier for the map field.
- __ RecordWriteField(object,
- HeapObject::kMapOffset,
- kScratchRegister,
- temp,
- kSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ __ RecordWriteForMap(object,
+ kScratchRegister,
+ temp,
+ kSaveFPRegs);
}
}
// Do the store.
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
-
Register write_register = object;
if (!access.IsInobject()) {
write_register = ToRegister(instr->temp());
- __ movq(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
}
- if (instr->value()->IsConstantOperand()) {
+ if (representation.IsSmi() && SmiValuesAre32Bits() &&
+ hinstr->value()->representation().IsInteger32()) {
+ ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+ if (FLAG_debug_code) {
+ Register scratch = kScratchRegister;
+ __ Load(scratch, FieldOperand(write_register, offset), representation);
+ __ AssertSmi(scratch);
+ }
+ // Store int value directly to upper half of the smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+ offset += kPointerSize / 2;
+ representation = Representation::Integer32();
+ }
+
+ Operand operand = FieldOperand(write_register, offset);
+
+ if (instr->value()->IsRegister()) {
+ Register value = ToRegister(instr->value());
+ __ Store(operand, value, representation);
+ } else {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (operand_value->IsRegister()) {
- Register value = ToRegister(operand_value);
- __ Store(FieldOperand(write_register, offset), value, representation);
- } else if (representation.IsInteger32()) {
+ if (IsInteger32Constant(operand_value)) {
+ ASSERT(!hinstr->NeedsWriteBarrier());
int32_t value = ToInteger32(operand_value);
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
- __ movl(FieldOperand(write_register, offset), Immediate(value));
+ if (representation.IsSmi()) {
+ __ Move(operand, Smi::FromInt(value));
+
+ } else {
+ __ movl(operand, Immediate(value));
+ }
+
} else {
Handle<Object> handle_value = ToHandle(operand_value);
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
- __ Move(FieldOperand(write_register, offset), handle_value);
+ ASSERT(!hinstr->NeedsWriteBarrier());
+ __ Move(operand, handle_value);
}
- } else {
- Register value = ToRegister(instr->value());
- __ Store(FieldOperand(write_register, offset), value, representation);
}
- if (instr->hydrogen()->NeedsWriteBarrier()) {
+ if (hinstr->NeedsWriteBarrier()) {
Register value = ToRegister(instr->value());
Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
// Update the write barrier for the object for in-object properties.
@@ -4098,7 +4167,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
temp,
kSaveFPRegs,
EMIT_REMEMBERED_SET,
- check_needed);
+ hinstr->SmiCheckForWriteBarrier(),
+ hinstr->PointersToHereCheckForValue());
}
}
@@ -4109,86 +4179,82 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
ASSERT(ToRegister(instr->value()).is(rax));
__ Move(rcx, instr->hydrogen()->name());
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
-void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
- if (FLAG_debug_code && check->hydrogen()->skip_check()) {
- Label done;
- __ j(NegateCondition(cc), &done, Label::kNear);
- __ int3();
- __ bind(&done);
- } else {
- DeoptimizeIf(cc, check->environment());
- }
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- HBoundsCheck* hinstr = instr->hydrogen();
- if (hinstr->skip_check()) return;
-
- Representation representation = hinstr->length()->representation();
- ASSERT(representation.Equals(hinstr->index()->representation()));
+ Representation representation = instr->hydrogen()->length()->representation();
+ ASSERT(representation.Equals(instr->hydrogen()->index()->representation()));
ASSERT(representation.IsSmiOrInteger32());
- if (instr->length()->IsRegister()) {
- Register reg = ToRegister(instr->length());
-
- if (instr->index()->IsConstantOperand()) {
- int32_t constant_index =
- ToInteger32(LConstantOperand::cast(instr->index()));
+ Condition cc = instr->hydrogen()->allow_equality() ? below : below_equal;
+ if (instr->length()->IsConstantOperand()) {
+ int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
+ Register index = ToRegister(instr->index());
+ if (representation.IsSmi()) {
+ __ Cmp(index, Smi::FromInt(length));
+ } else {
+ __ cmpl(index, Immediate(length));
+ }
+ cc = CommuteCondition(cc);
+ } else if (instr->index()->IsConstantOperand()) {
+ int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
+ if (instr->length()->IsRegister()) {
+ Register length = ToRegister(instr->length());
if (representation.IsSmi()) {
- __ Cmp(reg, Smi::FromInt(constant_index));
+ __ Cmp(length, Smi::FromInt(index));
} else {
- __ cmpl(reg, Immediate(constant_index));
+ __ cmpl(length, Immediate(index));
}
} else {
- Register reg2 = ToRegister(instr->index());
+ Operand length = ToOperand(instr->length());
if (representation.IsSmi()) {
- __ cmpq(reg, reg2);
+ __ Cmp(length, Smi::FromInt(index));
} else {
- __ cmpl(reg, reg2);
+ __ cmpl(length, Immediate(index));
}
}
} else {
- Operand length = ToOperand(instr->length());
- if (instr->index()->IsConstantOperand()) {
- int32_t constant_index =
- ToInteger32(LConstantOperand::cast(instr->index()));
+ Register index = ToRegister(instr->index());
+ if (instr->length()->IsRegister()) {
+ Register length = ToRegister(instr->length());
if (representation.IsSmi()) {
- __ Cmp(length, Smi::FromInt(constant_index));
+ __ cmpp(length, index);
} else {
- __ cmpl(length, Immediate(constant_index));
+ __ cmpl(length, index);
}
} else {
+ Operand length = ToOperand(instr->length());
if (representation.IsSmi()) {
- __ cmpq(length, ToRegister(instr->index()));
+ __ cmpp(length, index);
} else {
- __ cmpl(length, ToRegister(instr->index()));
+ __ cmpl(length, index);
}
}
}
- Condition condition = hinstr->allow_equality() ? below : below_equal;
- ApplyCheckIf(condition, instr);
+ if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
+ Label done;
+ __ j(NegateCondition(cc), &done, Label::kNear);
+ __ int3();
+ __ bind(&done);
+ } else {
+ DeoptimizeIf(cc, instr->environment());
+ }
}
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
+ if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the input
- // gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that case
- // must be handled here, too.
- if (instr->hydrogen()->IsDehoisted()) {
+ Representation key_representation =
+ instr->hydrogen()->key()->representation();
+ if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
+ __ SmiToInteger64(key_reg, key_reg);
+ } else if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(key_reg, key_reg);
@@ -4197,34 +4263,45 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
Operand operand(BuildFastArrayOperand(
instr->elements(),
key,
+ instr->hydrogen()->key()->representation(),
elements_kind,
- 0,
- instr->additional_index()));
+ instr->base_offset()));
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS) {
XMMRegister value(ToDoubleRegister(instr->value()));
__ cvtsd2ss(value, value);
__ movss(operand, value);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+ elements_kind == FLOAT64_ELEMENTS) {
__ movsd(operand, ToDoubleRegister(instr->value()));
} else {
Register value(ToRegister(instr->value()));
switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
__ movb(operand, value);
break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT16_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ case UINT16_ELEMENTS:
__ movw(operand, value);
break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_INT32_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ case UINT32_ELEMENTS:
__ movl(operand, value);
break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
@@ -4232,7 +4309,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4243,20 +4320,12 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
XMMRegister value = ToDoubleRegister(instr->value());
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the
- // input gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that case
- // must be handled here, too.
- if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
+ if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
+ instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(ToRegister(key), ToRegister(key));
}
-
if (instr->NeedsCanonicalization()) {
Label have_value;
@@ -4273,72 +4342,93 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
Operand double_store_operand = BuildFastArrayOperand(
instr->elements(),
key,
+ instr->hydrogen()->key()->representation(),
FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
+ instr->base_offset());
__ movsd(double_store_operand, value);
}
void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
- Register elements = ToRegister(instr->elements());
+ HStoreKeyed* hinstr = instr->hydrogen();
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the
- // input gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that case
- // must be handled here, too.
- if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
+ int offset = instr->base_offset();
+ Representation representation = hinstr->value()->representation();
+
+ if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
+ instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(ToRegister(key), ToRegister(key));
+ }
+ if (representation.IsInteger32() && SmiValuesAre32Bits()) {
+ ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+ ASSERT(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
+ if (FLAG_debug_code) {
+ Register scratch = kScratchRegister;
+ __ Load(scratch,
+ BuildFastArrayOperand(instr->elements(),
+ key,
+ instr->hydrogen()->key()->representation(),
+ FAST_ELEMENTS,
+ offset),
+ Representation::Smi());
+ __ AssertSmi(scratch);
}
+ // Store int value directly to upper half of the smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+ offset += kPointerSize / 2;
}
Operand operand =
BuildFastArrayOperand(instr->elements(),
key,
+ instr->hydrogen()->key()->representation(),
FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
+ offset);
if (instr->value()->IsRegister()) {
- __ movq(operand, ToRegister(instr->value()));
+ __ Store(operand, ToRegister(instr->value()), representation);
} else {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
if (IsInteger32Constant(operand_value)) {
- Smi* smi_value = Smi::FromInt(ToInteger32(operand_value));
- __ Move(operand, smi_value);
+ int32_t value = ToInteger32(operand_value);
+ if (representation.IsSmi()) {
+ __ Move(operand, Smi::FromInt(value));
+
+ } else {
+ __ movl(operand, Immediate(value));
+ }
} else {
Handle<Object> handle_value = ToHandle(operand_value);
__ Move(operand, handle_value);
}
}
- if (instr->hydrogen()->NeedsWriteBarrier()) {
+ if (hinstr->NeedsWriteBarrier()) {
+ Register elements = ToRegister(instr->elements());
ASSERT(instr->value()->IsRegister());
Register value = ToRegister(instr->value());
- ASSERT(!instr->key()->IsConstantOperand());
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
+ ASSERT(!key->IsConstantOperand());
+ SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
Register key_reg(ToRegister(key));
- __ lea(key_reg, operand);
+ __ leap(key_reg, operand);
__ RecordWrite(elements,
key_reg,
value,
kSaveFPRegs,
EMIT_REMEMBERED_SET,
- check_needed);
+ check_needed,
+ hinstr->PointersToHereCheckForValue());
}
}
void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
- if (instr->is_external()) {
+ if (instr->is_typed_elements()) {
DoStoreKeyedExternalArray(instr);
} else if (instr->hydrogen()->value()->representation().IsDouble()) {
DoStoreKeyedFixedDoubleArray(instr);
@@ -4354,7 +4444,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(rcx));
ASSERT(ToRegister(instr->value()).is(rax));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+ Handle<Code> ic = instr->strict_mode() == STRICT
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -4374,23 +4464,20 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ j(not_equal, &not_applicable);
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
Register new_map_reg = ToRegister(instr->new_map_temp());
- __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
- __ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
+ __ Move(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
+ __ movp(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
// Write barrier.
- ASSERT_NE(instr->temp(), NULL);
- __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
- ToRegister(instr->temp()), kDontSaveFPRegs);
+ __ RecordWriteForMap(object_reg, new_map_reg, ToRegister(instr->temp()),
+ kDontSaveFPRegs);
} else {
+ ASSERT(object_reg.is(rax));
ASSERT(ToRegister(instr->context()).is(rsi));
PushSafepointRegistersScope scope(this);
- if (!object_reg.is(rax)) {
- __ movq(rax, object_reg);
- }
__ Move(rbx, to_map);
- TransitionElementsKindStub stub(from_kind, to_kind);
+ bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+ TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
__ CallStub(&stub);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
}
__ bind(&not_applicable);
}
@@ -4408,18 +4495,12 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
void LCodeGen::DoStringAdd(LStringAdd* instr) {
ASSERT(ToRegister(instr->context()).is(rsi));
- if (FLAG_new_string_add) {
- ASSERT(ToRegister(instr->left()).is(rdx));
- ASSERT(ToRegister(instr->right()).is(rax));
- NewStringAddStub stub(instr->hydrogen()->flags(),
- isolate()->heap()->GetPretenureMode());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- } else {
- EmitPushTaggedOperand(instr->left());
- EmitPushTaggedOperand(instr->right());
- StringAddStub stub(instr->hydrogen()->flags());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
+ ASSERT(ToRegister(instr->left()).is(rdx));
+ ASSERT(ToRegister(instr->right()).is(rax));
+ StringAddStub stub(isolate(),
+ instr->hydrogen()->flags(),
+ instr->hydrogen()->pretenure_flag());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -4458,7 +4539,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ Set(result, 0);
PushSafepointRegistersScope scope(this);
- __ push(string);
+ __ Push(string);
// Push the index as a smi. This is safe because of the checks in
// DoStringCharCodeAt above.
STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
@@ -4468,10 +4549,10 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
} else {
Register index = ToRegister(instr->index());
__ Integer32ToSmi(index, index);
- __ push(index);
+ __ Push(index);
}
CallRuntimeFromDeferred(
- Runtime::kStringCharCodeAt, 2, instr, instr->context());
+ Runtime::kHiddenStringCharCodeAt, 2, instr, instr->context());
__ AssertSmi(rax);
__ SmiToInteger32(rax, rax);
__ StoreToSafepointRegisterSlot(result, rax);
@@ -4503,7 +4584,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
__ j(above, deferred->entry());
__ movsxlq(char_code, char_code);
__ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
- __ movq(result, FieldOperand(result,
+ __ movp(result, FieldOperand(result,
char_code, times_pointer_size,
FixedArray::kHeaderSize));
__ CompareRoot(result, Heap::kUndefinedValueRootIndex);
@@ -4523,7 +4604,7 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
PushSafepointRegistersScope scope(this);
__ Integer32ToSmi(char_code, char_code);
- __ push(char_code);
+ __ Push(char_code);
CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, rax);
}
@@ -4542,51 +4623,41 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
-void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- LOperand* output = instr->result();
- __ Integer32ToSmi(ToRegister(output), ToRegister(input));
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange()) {
- DeoptimizeIf(overflow, instr->environment());
- }
-}
-
-
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
LOperand* input = instr->value();
LOperand* output = instr->result();
- LOperand* temp = instr->temp();
-
- __ LoadUint32(ToDoubleRegister(output),
- ToRegister(input),
- ToDoubleRegister(temp));
-}
-
-void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- LOperand* output = instr->result();
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange() ||
- instr->hydrogen()->value()->range()->upper() == kMaxInt) {
- // The Range class can't express upper bounds in the (kMaxInt, kMaxUint32]
- // interval, so we treat kMaxInt as a sentinel for this entire interval.
- __ testl(ToRegister(input), Immediate(0x80000000));
- DeoptimizeIf(not_zero, instr->environment());
- }
- __ Integer32ToSmi(ToRegister(output), ToRegister(input));
+ __ LoadUint32(ToDoubleRegister(output), ToRegister(input));
}
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
+ class DeferredNumberTagI V8_FINAL : public LDeferredCode {
+ public:
+ DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
+ instr_->temp2(), SIGNED_INT32);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LNumberTagI* instr_;
+ };
+
LOperand* input = instr->value();
ASSERT(input->IsRegister() && input->Equals(instr->result()));
Register reg = ToRegister(input);
- __ Integer32ToSmi(reg, reg);
+ if (SmiValuesAre32Bits()) {
+ __ Integer32ToSmi(reg, reg);
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
+ __ Integer32ToSmi(reg, reg);
+ __ j(overflow, deferred->entry());
+ __ bind(deferred->exit());
+ }
}
@@ -4596,7 +4667,8 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredNumberTagU(instr_);
+ codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
+ instr_->temp2(), UNSIGNED_INT32);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
@@ -4615,21 +4687,31 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
}
-void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
- Label slow;
- Register reg = ToRegister(instr->value());
- Register tmp = reg.is(rax) ? rcx : rax;
- XMMRegister temp_xmm = ToDoubleRegister(instr->temp());
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ IntegerSignedness signedness) {
+ Label done, slow;
+ Register reg = ToRegister(value);
+ Register tmp = ToRegister(temp1);
+ XMMRegister temp_xmm = ToDoubleRegister(temp2);
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
-
- Label done;
// Load value into temp_xmm which will be preserved across potential call to
// runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
// XMM registers on x64).
- XMMRegister xmm_scratch = double_scratch0();
- __ LoadUint32(temp_xmm, reg, xmm_scratch);
+ if (signedness == SIGNED_INT32) {
+ ASSERT(SmiValuesAre31Bits());
+ // There was overflow, so bits 30 and 31 of the original integer
+ // disagree. Try to allocate a heap number in new space and store
+ // the value in there. If that fails, call the runtime system.
+ __ SmiToInteger32(reg, reg);
+ __ xorl(reg, Immediate(0x80000000));
+ __ cvtlsi2sd(temp_xmm, reg);
+ } else {
+ ASSERT(signedness == UNSIGNED_INT32);
+ __ LoadUint32(temp_xmm, reg);
+ }
if (FLAG_inline_new) {
__ AllocateHeapNumber(reg, tmp, &slow);
@@ -4638,29 +4720,31 @@ void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
+ {
+ // Put a valid pointer value in the stack slot where the result
+ // register is stored, as this register is in the pointer map, but contains
+ // an integer value.
+ __ Set(reg, 0);
- // Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains an
- // integer value.
- __ StoreToSafepointRegisterSlot(reg, Immediate(0));
-
- // NumberTagU uses the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this);
- if (!reg.is(rax)) __ movq(reg, rax);
+ // NumberTagIU uses the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(reg, rax);
+ }
// Done. Put the value in temp_xmm into the value of the allocated heap
// number.
__ bind(&done);
__ movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
- __ StoreToSafepointRegisterSlot(reg, reg);
}
@@ -4703,24 +4787,33 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
PushSafepointRegistersScope scope(this);
// NumberTagD uses the context from the frame, rather than
// the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
// The corresponding HChange instructions are added in a phase that does
// not have easy access to the local context.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ movq(kScratchRegister, rax);
+ __ movp(kScratchRegister, rax);
}
- __ movq(reg, kScratchRegister);
+ __ movp(reg, kScratchRegister);
}
void LCodeGen::DoSmiTag(LSmiTag* instr) {
- ASSERT(instr->value()->Equals(instr->result()));
+ HChange* hchange = instr->hydrogen();
Register input = ToRegister(instr->value());
- ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ Integer32ToSmi(input, input);
+ Register output = ToRegister(instr->result());
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ hchange->value()->CheckFlag(HValue::kUint32)) {
+ Condition is_smi = __ CheckUInteger32ValidSmiValue(input);
+ DeoptimizeIf(NegateCondition(is_smi), instr->environment());
+ }
+ __ Integer32ToSmi(output, input);
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ !hchange->value()->CheckFlag(HValue::kUint32)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
}
@@ -4950,7 +5043,7 @@ void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- if (!instr->hydrogen()->value()->IsHeapObject()) {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
Condition cc = masm()->CheckSmi(ToRegister(input));
DeoptimizeIf(cc, instr->environment());
@@ -4961,7 +5054,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
Register input = ToRegister(instr->value());
- __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
+ __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
if (instr->hydrogen()->is_interval_check()) {
InstanceType first;
@@ -5014,13 +5107,13 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
{
PushSafepointRegistersScope scope(this);
- __ push(object);
+ __ Push(object);
__ Set(rsi, 0);
- __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
+ __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
RecordSafepointWithRegisters(
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
- __ testq(rax, Immediate(kSmiTagMask));
+ __ testp(rax, Immediate(kSmiTagMask));
}
DeoptimizeIf(zero, instr->environment());
}
@@ -5044,29 +5137,35 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
Register object_;
};
- if (instr->hydrogen()->CanOmitMapChecks()) return;
+ if (instr->hydrogen()->IsStabilityCheck()) {
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+ for (int i = 0; i < maps->size(); ++i) {
+ AddStabilityDependency(maps->at(i).handle());
+ }
+ return;
+ }
LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
DeferredCheckMaps* deferred = NULL;
- if (instr->hydrogen()->has_migration_target()) {
+ if (instr->hydrogen()->HasMigrationTarget()) {
deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
__ bind(deferred->check_maps());
}
- UniqueSet<Map> map_set = instr->hydrogen()->map_set();
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
Label success;
- for (int i = 0; i < map_set.size() - 1; i++) {
- Handle<Map> map = map_set.at(i).handle();
+ for (int i = 0; i < maps->size() - 1; i++) {
+ Handle<Map> map = maps->at(i).handle();
__ CompareMap(reg, map);
__ j(equal, &success, Label::kNear);
}
- Handle<Map> map = map_set.at(map_set.size() - 1).handle();
+ Handle<Map> map = maps->at(maps->size() - 1).handle();
__ CompareMap(reg, map);
- if (instr->hydrogen()->has_migration_target()) {
+ if (instr->hydrogen()->HasMigrationTarget()) {
__ j(not_equal, deferred->entry());
} else {
DeoptimizeIf(not_equal, instr->environment());
@@ -5109,7 +5208,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// conversions.
__ Cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, instr->environment());
- __ movq(input_reg, Immediate(0));
+ __ xorl(input_reg, input_reg);
__ jmp(&done, Label::kNear);
// Heap number
@@ -5127,6 +5226,30 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+ XMMRegister value_reg = ToDoubleRegister(instr->value());
+ Register result_reg = ToRegister(instr->result());
+ if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+ __ movq(result_reg, value_reg);
+ __ shrq(result_reg, Immediate(32));
+ } else {
+ __ movd(result_reg, value_reg);
+ }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+ Register hi_reg = ToRegister(instr->hi());
+ Register lo_reg = ToRegister(instr->lo());
+ XMMRegister result_reg = ToDoubleRegister(instr->result());
+ XMMRegister xmm_scratch = double_scratch0();
+ __ movd(result_reg, hi_reg);
+ __ psllq(result_reg, 32);
+ __ movd(xmm_scratch, lo_reg);
+ __ orps(result_reg, xmm_scratch);
+}
+
+
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
@@ -5180,7 +5303,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
__ movl(temp, Immediate((size / kPointerSize) - 1));
} else {
temp = ToRegister(instr->size());
- __ sar(temp, Immediate(kPointerSizeLog2));
+ __ sarp(temp, Immediate(kPointerSizeLog2));
__ decl(temp);
}
Label loop;
@@ -5206,7 +5329,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
Register size = ToRegister(instr->size());
ASSERT(!size.is(result));
__ Integer32ToSmi(size, size);
- __ push(size);
+ __ Push(size);
} else {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
__ Push(Smi::FromInt(size));
@@ -5226,14 +5349,14 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ Push(Smi::FromInt(flags));
CallRuntimeFromDeferred(
- Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+ Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, rax);
}
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
ASSERT(ToRegister(instr->value()).is(rax));
- __ push(rax);
+ __ Push(rax);
CallRuntime(Runtime::kToFastProperties, 1, instr);
}
@@ -5248,18 +5371,18 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
int literal_offset =
FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
__ Move(rcx, instr->hydrogen()->literals());
- __ movq(rbx, FieldOperand(rcx, literal_offset));
+ __ movp(rbx, FieldOperand(rcx, literal_offset));
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &materialized, Label::kNear);
// Create regexp literal using runtime function
// Result will be in rax.
- __ push(rcx);
+ __ Push(rcx);
__ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
__ Push(instr->hydrogen()->pattern());
__ Push(instr->hydrogen()->flags());
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ movq(rbx, rax);
+ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
+ __ movp(rbx, rax);
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
@@ -5268,23 +5391,23 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ jmp(&allocated, Label::kNear);
__ bind(&runtime_allocate);
- __ push(rbx);
+ __ Push(rbx);
__ Push(Smi::FromInt(size));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ pop(rbx);
+ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
+ __ Pop(rbx);
__ bind(&allocated);
// Copy the content into the newly allocated memory.
// (Unroll copy loop once for better throughput).
for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ movq(rdx, FieldOperand(rbx, i));
- __ movq(rcx, FieldOperand(rbx, i + kPointerSize));
- __ movq(FieldOperand(rax, i), rdx);
- __ movq(FieldOperand(rax, i + kPointerSize), rcx);
+ __ movp(rdx, FieldOperand(rbx, i));
+ __ movp(rcx, FieldOperand(rbx, i + kPointerSize));
+ __ movp(FieldOperand(rax, i), rdx);
+ __ movp(FieldOperand(rax, i + kPointerSize), rcx);
}
if ((size % (2 * kPointerSize)) != 0) {
- __ movq(rdx, FieldOperand(rbx, size - kPointerSize));
- __ movq(FieldOperand(rax, size - kPointerSize), rdx);
+ __ movp(rdx, FieldOperand(rbx, size - kPointerSize));
+ __ movp(FieldOperand(rax, size - kPointerSize), rdx);
}
}
@@ -5295,16 +5418,17 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(instr->hydrogen()->language_mode(),
+ FastNewClosureStub stub(isolate(),
+ instr->hydrogen()->strict_mode(),
instr->hydrogen()->is_generator());
__ Move(rbx, instr->hydrogen()->shared_info());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
- __ push(rsi);
+ __ Push(rsi);
__ Push(instr->hydrogen()->shared_info());
__ PushRoot(pretenure ? Heap::kTrueValueRootIndex :
Heap::kFalseValueRootIndex);
- CallRuntime(Runtime::kNewClosure, 3, instr);
+ CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
}
}
@@ -5322,9 +5446,9 @@ void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
if (operand->IsConstantOperand()) {
__ Push(ToHandle(LConstantOperand::cast(operand)));
} else if (operand->IsRegister()) {
- __ push(ToRegister(operand));
+ __ Push(ToRegister(operand));
} else {
- __ push(ToOperand(operand));
+ __ Push(ToOperand(operand));
}
}
@@ -5351,14 +5475,15 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
Label::Distance false_distance = right_block == next_block ? Label::kNear
: Label::kFar;
Condition final_branch_condition = no_condition;
- if (type_name->Equals(heap()->number_string())) {
+ Factory* factory = isolate()->factory();
+ if (String::Equals(type_name, factory->number_string())) {
__ JumpIfSmi(input, true_label, true_distance);
__ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->string_string())) {
+ } else if (String::Equals(type_name, factory->string_string())) {
__ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
__ j(above_equal, false_label, false_distance);
@@ -5366,32 +5491,33 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
Immediate(1 << Map::kIsUndetectable));
final_branch_condition = zero;
- } else if (type_name->Equals(heap()->symbol_string())) {
+ } else if (String::Equals(type_name, factory->symbol_string())) {
__ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, SYMBOL_TYPE, input);
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->boolean_string())) {
+ } else if (String::Equals(type_name, factory->boolean_string())) {
__ CompareRoot(input, Heap::kTrueValueRootIndex);
__ j(equal, true_label, true_distance);
__ CompareRoot(input, Heap::kFalseValueRootIndex);
final_branch_condition = equal;
- } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
+ } else if (FLAG_harmony_typeof &&
+ String::Equals(type_name, factory->null_string())) {
__ CompareRoot(input, Heap::kNullValueRootIndex);
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->undefined_string())) {
+ } else if (String::Equals(type_name, factory->undefined_string())) {
__ CompareRoot(input, Heap::kUndefinedValueRootIndex);
__ j(equal, true_label, true_distance);
__ JumpIfSmi(input, false_label, false_distance);
// Check for undetectable objects => true.
- __ movq(input, FieldOperand(input, HeapObject::kMapOffset));
+ __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
__ testb(FieldOperand(input, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
final_branch_condition = not_zero;
- } else if (type_name->Equals(heap()->function_string())) {
+ } else if (String::Equals(type_name, factory->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, JS_FUNCTION_TYPE, input);
@@ -5399,7 +5525,7 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
__ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->object_string())) {
+ } else if (String::Equals(type_name, factory->object_string())) {
__ JumpIfSmi(input, false_label, false_distance);
if (!FLAG_harmony_typeof) {
__ CompareRoot(input, Heap::kNullValueRootIndex);
@@ -5432,14 +5558,14 @@ void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
void LCodeGen::EmitIsConstructCall(Register temp) {
// Get the frame pointer for the calling frame.
- __ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
// Skip the arguments adaptor frame if it exists.
Label check_frame_marker;
__ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(not_equal, &check_frame_marker, Label::kNear);
- __ movq(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
// Check the marker in the calling frame.
__ bind(&check_frame_marker);
@@ -5449,19 +5575,20 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (info()->IsStub()) return;
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- __ Nop(padding_size);
+ if (!info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ __ Nop(padding_size);
+ }
}
+ last_lazy_deopt_pc_ = masm()->pc_offset();
}
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
@@ -5497,8 +5624,8 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
@@ -5534,11 +5661,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
CallCode(isolate()->builtins()->StackCheck(),
RelocInfo::CODE_TARGET,
instr);
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
} else {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
@@ -5547,7 +5670,6 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(below, deferred_stack_check->entry());
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5580,7 +5702,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
Register null_value = rdi;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ cmpq(rax, null_value);
+ __ cmpp(rax, null_value);
DeoptimizeIf(equal, instr->environment());
Condition cc = masm()->CheckSmi(rax);
@@ -5593,12 +5715,12 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
Label use_cache, call_runtime;
__ CheckEnumCache(null_value, &call_runtime);
- __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
__ jmp(&use_cache, Label::kNear);
// Get the set of properties to enumerate.
__ bind(&call_runtime);
- __ push(rax);
+ __ Push(rax);
CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
@@ -5619,9 +5741,9 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ jmp(&done, Label::kNear);
__ bind(&load_cache);
__ LoadInstanceDescriptors(map, result);
- __ movq(result,
+ __ movp(result,
FieldOperand(result, DescriptorArray::kEnumCacheOffset));
- __ movq(result,
+ __ movp(result,
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
Condition cc = masm()->CheckSmi(result);
@@ -5631,38 +5753,98 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
- __ cmpq(ToRegister(instr->map()),
+ __ cmpp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
DeoptimizeIf(not_equal, instr->environment());
}
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register object,
+ Register index) {
+ PushSafepointRegistersScope scope(this);
+ __ Push(object);
+ __ Push(index);
+ __ xorp(rsi, rsi);
+ __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(object, rax);
+}
+
+
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+ public:
+ DeferredLoadMutableDouble(LCodeGen* codegen,
+ LLoadFieldByIndex* instr,
+ Register object,
+ Register index)
+ : LDeferredCode(codegen),
+ instr_(instr),
+ object_(object),
+ index_(index) {
+ }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LLoadFieldByIndex* instr_;
+ Register object_;
+ Register index_;
+ };
+
Register object = ToRegister(instr->object());
Register index = ToRegister(instr->index());
+ DeferredLoadMutableDouble* deferred;
+ deferred = new(zone()) DeferredLoadMutableDouble(this, instr, object, index);
+
Label out_of_object, done;
+ __ Move(kScratchRegister, Smi::FromInt(1));
+ __ testp(index, kScratchRegister);
+ __ j(not_zero, deferred->entry());
+
+ __ sarp(index, Immediate(1));
+
__ SmiToInteger32(index, index);
__ cmpl(index, Immediate(0));
__ j(less, &out_of_object, Label::kNear);
- __ movq(object, FieldOperand(object,
+ __ movp(object, FieldOperand(object,
index,
times_pointer_size,
JSObject::kHeaderSize));
__ jmp(&done, Label::kNear);
__ bind(&out_of_object);
- __ movq(object, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ movp(object, FieldOperand(object, JSObject::kPropertiesOffset));
__ negl(index);
// Index is now equal to out of object property index plus 1.
- __ movq(object, FieldOperand(object,
+ __ movp(object, FieldOperand(object,
index,
times_pointer_size,
FixedArray::kHeaderSize - kPointerSize));
+ __ bind(deferred->exit());
__ bind(&done);
}
+void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
+ Register context = ToRegister(instr->context());
+ __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), context);
+}
+
+
+void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
+ Handle<ScopeInfo> scope_info = instr->scope_info();
+ __ Push(scope_info);
+ __ Push(ToRegister(instr->function()));
+ CallRuntime(Runtime::kHiddenPushBlockContext, 2, instr);
+ RecordSafepoint(Safepoint::kNoLazyDeopt);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/chromium/v8/src/x64/lithium-codegen-x64.h b/chromium/v8/src/x64/lithium-codegen-x64.h
index 63bfe187f14..5621a3d367b 100644
--- a/chromium/v8/src/x64/lithium-codegen-x64.h
+++ b/chromium/v8/src/x64/lithium-codegen-x64.h
@@ -1,42 +1,19 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_LITHIUM_CODEGEN_X64_H_
#define V8_X64_LITHIUM_CODEGEN_X64_H_
-#include "x64/lithium-x64.h"
+#include "src/x64/lithium-x64.h"
-#include "checks.h"
-#include "deoptimizer.h"
-#include "lithium-codegen.h"
-#include "safepoint-table.h"
-#include "scopes.h"
-#include "v8utils.h"
-#include "x64/lithium-gap-resolver-x64.h"
+#include "src/checks.h"
+#include "src/deoptimizer.h"
+#include "src/lithium-codegen.h"
+#include "src/safepoint-table.h"
+#include "src/scopes.h"
+#include "src/utils.h"
+#include "src/x64/lithium-gap-resolver-x64.h"
namespace v8 {
namespace internal {
@@ -86,12 +63,13 @@ class LCodeGen: public LCodeGenBase {
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
bool IsInteger32Constant(LConstantOperand* op) const;
+ bool IsDehoistedKeyConstant(LConstantOperand* op) const;
bool IsSmiConstant(LConstantOperand* op) const;
+ int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
int32_t ToInteger32(LConstantOperand* op) const;
Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
ExternalReference ToExternalReference(LConstantOperand* op) const;
- bool IsTaggedConstant(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
Operand ToOperand(LOperand* op) const;
@@ -106,7 +84,14 @@ class LCodeGen: public LCodeGenBase {
// Deferred code support.
void DoDeferredNumberTagD(LNumberTagD* instr);
- void DoDeferredNumberTagU(LNumberTagU* instr);
+
+ enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+ void DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ IntegerSignedness signedness);
+
void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
@@ -116,6 +101,9 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+ void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register object,
+ Register index);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -130,9 +118,7 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
- StrictModeFlag strict_mode_flag() const {
- return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
- }
+ StrictMode strict_mode() const { return info()->strict_mode(); }
LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
@@ -149,8 +135,6 @@ class LCodeGen: public LCodeGenBase {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- void Abort(BailoutReason reason);
-
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
@@ -160,6 +144,7 @@ class LCodeGen: public LCodeGenBase {
// Code generation passes. Returns true if code generation should
// continue.
void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
+ void GenerateBodyInstructionPost(LInstruction* instr) V8_OVERRIDE;
bool GeneratePrologue();
bool GenerateDeferredCode();
bool GenerateJumpTable();
@@ -214,7 +199,6 @@ class LCodeGen: public LCodeGenBase {
int formal_parameter_count,
int arity,
LInstruction* instr,
- CallKind call_kind,
RDIState rdi_state);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
@@ -226,7 +210,6 @@ class LCodeGen: public LCodeGenBase {
LEnvironment* environment,
Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
- void ApplyCheckIf(Condition cc, LBoundsCheck* check);
bool DeoptEveryNTimes() {
return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
@@ -239,7 +222,6 @@ class LCodeGen: public LCodeGenBase {
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
- void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -250,9 +232,9 @@ class LCodeGen: public LCodeGenBase {
Operand BuildFastArrayOperand(
LOperand* elements_pointer,
LOperand* key,
+ Representation key_representation,
ElementsKind elements_kind,
- uint32_t offset,
- uint32_t additional_index = 0);
+ uint32_t base_offset);
Operand BuildSeqStringOperand(Register string,
LOperand* index,
diff --git a/chromium/v8/src/x64/lithium-gap-resolver-x64.cc b/chromium/v8/src/x64/lithium-gap-resolver-x64.cc
index 6059c50b726..93c1512625a 100644
--- a/chromium/v8/src/x64/lithium-gap-resolver-x64.cc
+++ b/chromium/v8/src/x64/lithium-gap-resolver-x64.cc
@@ -1,36 +1,13 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "x64/lithium-gap-resolver-x64.h"
-#include "x64/lithium-codegen-x64.h"
+#include "src/x64/lithium-gap-resolver-x64.h"
+#include "src/x64/lithium-codegen-x64.h"
namespace v8 {
namespace internal {
@@ -172,23 +149,23 @@ void LGapResolver::EmitMove(int index) {
Register src = cgen_->ToRegister(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
- __ movq(dst, src);
+ __ movp(dst, src);
} else {
ASSERT(destination->IsStackSlot());
Operand dst = cgen_->ToOperand(destination);
- __ movq(dst, src);
+ __ movp(dst, src);
}
} else if (source->IsStackSlot()) {
Operand src = cgen_->ToOperand(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
- __ movq(dst, src);
+ __ movp(dst, src);
} else {
ASSERT(destination->IsStackSlot());
Operand dst = cgen_->ToOperand(destination);
- __ movq(kScratchRegister, src);
- __ movq(dst, kScratchRegister);
+ __ movp(kScratchRegister, src);
+ __ movp(dst, kScratchRegister);
}
} else if (source->IsConstantOperand()) {
@@ -198,7 +175,14 @@ void LGapResolver::EmitMove(int index) {
if (cgen_->IsSmiConstant(constant_source)) {
__ Move(dst, cgen_->ToSmi(constant_source));
} else if (cgen_->IsInteger32Constant(constant_source)) {
- __ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
+ int32_t constant = cgen_->ToInteger32(constant_source);
+ // Do sign extension only for constant used as de-hoisted array key.
+ // Others only need zero extension, which saves 2 bytes.
+ if (cgen_->IsDehoistedKeyConstant(constant_source)) {
+ __ Set(dst, constant);
+ } else {
+ __ Set(dst, static_cast<uint32_t>(constant));
+ }
} else {
__ Move(dst, cgen_->ToHandle(constant_source));
}
@@ -218,12 +202,11 @@ void LGapResolver::EmitMove(int index) {
if (cgen_->IsSmiConstant(constant_source)) {
__ Move(dst, cgen_->ToSmi(constant_source));
} else if (cgen_->IsInteger32Constant(constant_source)) {
- // Zero top 32 bits of a 64 bit spill slot that holds a 32 bit untagged
- // value.
- __ movq(dst, Immediate(cgen_->ToInteger32(constant_source)));
+ // Do sign extension to 64 bits when stored into stack slot.
+ __ movp(dst, Immediate(cgen_->ToInteger32(constant_source)));
} else {
__ Move(kScratchRegister, cgen_->ToHandle(constant_source));
- __ movq(dst, kScratchRegister);
+ __ movp(dst, kScratchRegister);
}
}
@@ -271,9 +254,9 @@ void LGapResolver::EmitSwap(int index) {
cgen_->ToRegister(source->IsRegister() ? source : destination);
Operand mem =
cgen_->ToOperand(source->IsRegister() ? destination : source);
- __ movq(kScratchRegister, mem);
- __ movq(mem, reg);
- __ movq(reg, kScratchRegister);
+ __ movp(kScratchRegister, mem);
+ __ movp(mem, reg);
+ __ movp(reg, kScratchRegister);
} else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
(source->IsDoubleStackSlot() && destination->IsDoubleStackSlot())) {
@@ -281,9 +264,9 @@ void LGapResolver::EmitSwap(int index) {
Operand src = cgen_->ToOperand(source);
Operand dst = cgen_->ToOperand(destination);
__ movsd(xmm0, src);
- __ movq(kScratchRegister, dst);
+ __ movp(kScratchRegister, dst);
__ movsd(dst, xmm0);
- __ movq(src, kScratchRegister);
+ __ movp(src, kScratchRegister);
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
// Swap two double registers.
diff --git a/chromium/v8/src/x64/lithium-gap-resolver-x64.h b/chromium/v8/src/x64/lithium-gap-resolver-x64.h
index f218455b675..fd4b91ab348 100644
--- a/chromium/v8/src/x64/lithium-gap-resolver-x64.h
+++ b/chromium/v8/src/x64/lithium-gap-resolver-x64.h
@@ -1,36 +1,13 @@
// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
#define V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
-#include "v8.h"
+#include "src/v8.h"
-#include "lithium.h"
+#include "src/lithium.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/x64/lithium-x64.cc b/chromium/v8/src/x64/lithium-x64.cc
index 449eb2b6a11..325f2c0da30 100644
--- a/chromium/v8/src/x64/lithium-x64.cc
+++ b/chromium/v8/src/x64/lithium-x64.cc
@@ -1,38 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "lithium-allocator-inl.h"
-#include "x64/lithium-x64.h"
-#include "x64/lithium-codegen-x64.h"
-#include "hydrogen-osr.h"
+#include "src/lithium-allocator-inl.h"
+#include "src/x64/lithium-x64.h"
+#include "src/x64/lithium-codegen-x64.h"
+#include "src/hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -175,6 +152,16 @@ bool LGoto::HasInterestingComment(LCodeGen* gen) const {
}
+template<int R>
+bool LTemplateResultInstruction<R>::MustSignExtendResult(
+ LPlatformChunk* chunk) const {
+ HValue* hvalue = this->hydrogen_value();
+ return hvalue != NULL &&
+ hvalue->representation().IsInteger32() &&
+ chunk->GetDehoistedKeyIds()->Contains(hvalue->id());
+}
+
+
void LGoto::PrintDataTo(StringStream* stream) {
stream->Add("B%d", block_id());
}
@@ -259,7 +246,7 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
value()->PrintTo(stream);
stream->Add(" == \"%s\" then B%d else B%d",
- *hydrogen()->type_literal()->ToCString(),
+ hydrogen()->type_literal()->ToCString().get(),
true_block_id(), false_block_id());
}
@@ -280,7 +267,18 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
-void LCallConstantFunction::PrintDataTo(StringStream* stream) {
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
+ stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < InputCount(); i++) {
+ InputAt(i)->PrintTo(stream);
+ stream->Add(" ");
+ }
stream->Add("#%d / ", arity());
}
@@ -305,28 +303,6 @@ void LInvokeFunction::PrintDataTo(StringStream* stream) {
}
-void LCallKeyed::PrintDataTo(StringStream* stream) {
- stream->Add("[rcx] #%d / ", arity());
-}
-
-
-void LCallNamed::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallGlobal::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
void LCallNew::PrintDataTo(StringStream* stream) {
stream->Add("= ");
constructor()->PrintTo(stream);
@@ -384,7 +360,7 @@ void LStoreNamedField::PrintDataTo(StringStream* stream) {
void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(String::cast(*name())->ToCString().get());
stream->Add(" <- ");
value()->PrintTo(stream);
}
@@ -395,7 +371,7 @@ void LLoadKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[");
key()->PrintTo(stream);
if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", additional_index());
+ stream->Add(" + %d]", base_offset());
} else {
stream->Add("]");
}
@@ -407,7 +383,7 @@ void LStoreKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[");
key()->PrintTo(stream);
if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", additional_index());
+ stream->Add(" + %d] <-", base_offset());
} else {
stream->Add("] <- ");
}
@@ -463,7 +439,7 @@ LPlatformChunk* LChunkBuilder::Build() {
}
-void LCodeGen::Abort(BailoutReason reason) {
+void LChunkBuilder::Abort(BailoutReason reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -508,6 +484,13 @@ LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
}
+LOperand* LChunkBuilder::UseTempRegisterOrConstant(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseTempRegister(value);
+}
+
+
LOperand* LChunkBuilder::Use(HValue* value) {
return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
}
@@ -569,8 +552,7 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
}
-template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
LUnallocated* result) {
result->set_virtual_register(current_instruction_->id());
instr->set_result(result);
@@ -578,41 +560,36 @@ LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateInstruction<1, I, T>* instr) {
+ LTemplateResultInstruction<1>* instr) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateInstruction<1, I, T>* instr,
+ LTemplateResultInstruction<1>* instr,
int index) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateInstruction<1, I, T>* instr) {
+ LTemplateResultInstruction<1>* instr) {
return Define(instr,
new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
}
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineFixed(LTemplateInstruction<1, I, T>* instr,
+LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
Register reg) {
return Define(instr, ToUnallocated(reg));
}
-template<int I, int T>
LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateInstruction<1, I, T>* instr,
+ LTemplateResultInstruction<1>* instr,
XMMRegister reg) {
return Define(instr, ToUnallocated(reg));
}
@@ -649,6 +626,8 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
!hinstr->HasObservableSideEffects();
if (needs_environment && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
+ // We can't really figure out if the environment is needed or not.
+ instr->environment()->set_has_been_used();
}
return instr;
@@ -720,17 +699,23 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
HValue* right_value = instr->right();
LOperand* right = NULL;
int constant_value = 0;
+ bool does_deopt = false;
if (right_value->IsConstant()) {
HConstant* constant = HConstant::cast(right_value);
right = chunk_->DefineConstantOperand(constant);
constant_value = constant->Integer32Value() & 0x1f;
+ if (SmiValuesAre31Bits() && instr->representation().IsSmi() &&
+ constant_value > 0) {
+ // Left shift can deoptimize if we shift by > 0 and the result
+ // cannot be truncated to smi.
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ }
} else {
right = UseFixed(right_value, rcx);
}
// Shift operations can only deoptimize if we do a logical shift by 0 and
// the result cannot be truncated to int32.
- bool does_deopt = false;
if (op == Token::SHR && constant_value == 0) {
if (FLAG_opt_safe_uint32_operations) {
does_deopt = !instr->CheckFlag(HInstruction::kUint32);
@@ -858,169 +843,102 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
if (current->OperandCount() == 0) {
instr = DefineAsRegister(new(zone()) LDummy());
} else {
+ ASSERT(!current->OperandAt(0)->IsControlInstruction());
instr = DefineAsRegister(new(zone())
LDummyUse(UseAny(current->OperandAt(0))));
}
for (int i = 1; i < current->OperandCount(); ++i) {
+ if (current->OperandAt(i)->IsControlInstruction()) continue;
LInstruction* dummy =
new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
dummy->set_hydrogen_value(current);
chunk_->AddInstruction(dummy, current_block_);
}
} else {
- instr = current->CompileToLithium(this);
+ HBasicBlock* successor;
+ if (current->IsControlInstruction() &&
+ HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
+ successor != NULL) {
+ instr = new(zone()) LGoto(successor);
+ } else {
+ instr = current->CompileToLithium(this);
+ }
}
argument_count_ += current->argument_delta();
ASSERT(argument_count_ >= 0);
if (instr != NULL) {
- // Associate the hydrogen instruction first, since we may need it for
- // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
- instr->set_hydrogen_value(current);
-
-#if DEBUG
- // Make sure that the lithium instruction has either no fixed register
- // constraints in temps or the result OR no uses that are only used at
- // start. If this invariant doesn't hold, the register allocator can decide
- // to insert a split of a range immediately before the instruction due to an
- // already allocated register needing to be used for the instruction's fixed
- // register constraint. In this case, The register allocator won't see an
- // interference between the split child and the use-at-start (it would if
- // the it was just a plain use), so it is free to move the split child into
- // the same register that is used for the use-at-start.
- // See https://code.google.com/p/chromium/issues/detail?id=201590
- if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) {
- int fixed = 0;
- int used_at_start = 0;
- for (UseIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->IsUsedAtStart()) ++used_at_start;
- }
- if (instr->Output() != NULL) {
- if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
- }
- for (TempIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->HasFixedPolicy()) ++fixed;
- }
- ASSERT(fixed == 0 || used_at_start == 0);
- }
-#endif
-
- if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
- instr = AssignPointerMap(instr);
- }
- if (FLAG_stress_environments && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
- chunk_->AddInstruction(instr, current_block_);
-
- if (instr->IsCall()) {
- HValue* hydrogen_value_for_lazy_bailout = current;
- LInstruction* instruction_needing_environment = NULL;
- if (current->HasObservableSideEffects()) {
- HSimulate* sim = HSimulate::cast(current->next());
- instruction_needing_environment = instr;
- sim->ReplayEnvironment(current_block_->last_environment());
- hydrogen_value_for_lazy_bailout = sim;
- }
- LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
- bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
- chunk_->AddInstruction(bailout, current_block_);
- if (instruction_needing_environment != NULL) {
- // Store the lazy deopt environment with the instruction if needed.
- // Right now it is only used for LInstanceOfKnownGlobal.
- instruction_needing_environment->
- SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
- }
- }
+ AddInstruction(instr, current);
}
+
current_instruction_ = old_current;
}
-LEnvironment* LChunkBuilder::CreateEnvironment(
- HEnvironment* hydrogen_env,
- int* argument_index_accumulator,
- ZoneList<HValue*>* objects_to_materialize) {
- if (hydrogen_env == NULL) return NULL;
-
- LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
- argument_index_accumulator,
- objects_to_materialize);
- BailoutId ast_id = hydrogen_env->ast_id();
- ASSERT(!ast_id.IsNone() ||
- hydrogen_env->frame_type() != JS_FUNCTION);
- int value_count = hydrogen_env->length() - hydrogen_env->specials_count();
- LEnvironment* result = new(zone()) LEnvironment(
- hydrogen_env->closure(),
- hydrogen_env->frame_type(),
- ast_id,
- hydrogen_env->parameter_count(),
- argument_count_,
- value_count,
- outer,
- hydrogen_env->entry(),
- zone());
- int argument_index = *argument_index_accumulator;
- int object_index = objects_to_materialize->length();
- for (int i = 0; i < hydrogen_env->length(); ++i) {
- if (hydrogen_env->is_special_index(i)) continue;
-
- LOperand* op;
- HValue* value = hydrogen_env->values()->at(i);
- if (value->IsArgumentsObject() || value->IsCapturedObject()) {
- objects_to_materialize->Add(value, zone());
- op = LEnvironment::materialization_marker();
- } else if (value->IsPushArgument()) {
- op = new(zone()) LArgument(argument_index++);
- } else {
- op = UseAny(value);
- }
- result->AddValue(op,
- value->representation(),
- value->CheckFlag(HInstruction::kUint32));
- }
-
- for (int i = object_index; i < objects_to_materialize->length(); ++i) {
- HValue* object_to_materialize = objects_to_materialize->at(i);
- int previously_materialized_object = -1;
- for (int prev = 0; prev < i; ++prev) {
- if (objects_to_materialize->at(prev) == objects_to_materialize->at(i)) {
- previously_materialized_object = prev;
- break;
- }
+void LChunkBuilder::AddInstruction(LInstruction* instr,
+ HInstruction* hydrogen_val) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(hydrogen_val);
+
+#if DEBUG
+ // Make sure that the lithium instruction has either no fixed register
+ // constraints in temps or the result OR no uses that are only used at
+ // start. If this invariant doesn't hold, the register allocator can decide
+ // to insert a split of a range immediately before the instruction due to an
+ // already allocated register needing to be used for the instruction's fixed
+ // register constraint. In this case, The register allocator won't see an
+ // interference between the split child and the use-at-start (it would if
+ // the it was just a plain use), so it is free to move the split child into
+ // the same register that is used for the use-at-start.
+ // See https://code.google.com/p/chromium/issues/detail?id=201590
+ if (!(instr->ClobbersRegisters() &&
+ instr->ClobbersDoubleRegisters(isolate()))) {
+ int fixed = 0;
+ int used_at_start = 0;
+ for (UseIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->IsUsedAtStart()) ++used_at_start;
}
- int length = object_to_materialize->OperandCount();
- bool is_arguments = object_to_materialize->IsArgumentsObject();
- if (previously_materialized_object >= 0) {
- result->AddDuplicateObject(previously_materialized_object);
- continue;
- } else {
- result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
+ if (instr->Output() != NULL) {
+ if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
}
- for (int i = is_arguments ? 1 : 0; i < length; ++i) {
- LOperand* op;
- HValue* value = object_to_materialize->OperandAt(i);
- if (value->IsArgumentsObject() || value->IsCapturedObject()) {
- objects_to_materialize->Add(value, zone());
- op = LEnvironment::materialization_marker();
- } else {
- ASSERT(!value->IsPushArgument());
- op = UseAny(value);
- }
- result->AddValue(op,
- value->representation(),
- value->CheckFlag(HInstruction::kUint32));
+ for (TempIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->HasFixedPolicy()) ++fixed;
}
+ ASSERT(fixed == 0 || used_at_start == 0);
}
+#endif
- if (hydrogen_env->frame_type() == JS_FUNCTION) {
- *argument_index_accumulator = argument_index;
+ if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+ instr = AssignPointerMap(instr);
+ }
+ if (FLAG_stress_environments && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
}
+ chunk_->AddInstruction(instr, current_block_);
- return result;
+ if (instr->IsCall()) {
+ HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
+ LInstruction* instruction_needing_environment = NULL;
+ if (hydrogen_val->HasObservableSideEffects()) {
+ HSimulate* sim = HSimulate::cast(hydrogen_val->next());
+ instruction_needing_environment = instr;
+ sim->ReplayEnvironment(current_block_->last_environment());
+ hydrogen_value_for_lazy_bailout = sim;
+ }
+ LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
+ bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+ chunk_->AddInstruction(bailout, current_block_);
+ if (instruction_needing_environment != NULL) {
+ // Store the lazy deopt environment with the instruction if needed.
+ // Right now it is only used for LInstanceOfKnownGlobal.
+ instruction_needing_environment->
+ SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
+ }
+ }
}
@@ -1035,22 +953,21 @@ LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
-
HValue* value = instr->value();
- LBranch* result = new(zone()) LBranch(UseRegister(value));
- // Tagged values that are not known smis or booleans require a
- // deoptimization environment. If the instruction is generic no
- // environment is needed since all cases are handled.
- ToBooleanStub::Types expected = instr->expected_input_types();
- Representation rep = value->representation();
+ Representation r = value->representation();
HType type = value->type();
- if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean() &&
- !expected.IsGeneric()) {
- return AssignEnvironment(result);
+ ToBooleanStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+ bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
+ type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+ LInstruction* branch = new(zone()) LBranch(UseRegister(value));
+ if (!easy_case &&
+ ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ !expected.IsGeneric())) {
+ branch = AssignEnvironment(branch);
}
- return result;
+ return branch;
}
@@ -1113,9 +1030,13 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
}
-LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- LOperand* argument = UseOrConstant(instr->argument());
- return new(zone()) LPushArgument(argument);
+LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
+ int argc = instr->OperandCount();
+ for (int i = 0; i < argc; ++i) {
+ LOperand* argument = UseOrConstant(instr->argument(i));
+ AddInstruction(new(zone()) LPushArgument(argument), instr);
+ }
+ return NULL;
}
@@ -1154,33 +1075,38 @@ LInstruction* LChunkBuilder::DoContext(HContext* instr) {
}
-LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LOuterContext(context));
-}
-
-
LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
}
-LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalObject(context));
-}
+LInstruction* LChunkBuilder::DoCallJSFunction(
+ HCallJSFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), rdi);
+ LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- LOperand* global_object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
-LInstruction* LChunkBuilder::DoCallConstantFunction(
- HCallConstantFunction* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, rax), instr);
+LInstruction* LChunkBuilder::DoCallWithDescriptor(
+ HCallWithDescriptor* instr) {
+ const CallInterfaceDescriptor* descriptor = instr->descriptor();
+
+ LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+ ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ ops.Add(target, zone());
+ for (int i = 1; i < instr->OperandCount(); i++) {
+ LOperand* op = UseFixed(instr->OperandAt(i),
+ descriptor->GetParameterRegister(i - 1));
+ ops.Add(op, zone());
+ }
+
+ LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
+ descriptor, ops, zone());
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1198,12 +1124,10 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathRound: return DoMathRound(instr);
case kMathAbs: return DoMathAbs(instr);
case kMathLog: return DoMathLog(instr);
- case kMathSin: return DoMathSin(instr);
- case kMathCos: return DoMathCos(instr);
- case kMathTan: return DoMathTan(instr);
case kMathExp: return DoMathExp(instr);
case kMathSqrt: return DoMathSqrt(instr);
case kMathPowHalf: return DoMathPowHalf(instr);
+ case kMathClz32: return DoMathClz32(instr);
default:
UNREACHABLE();
return NULL;
@@ -1229,8 +1153,12 @@ LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
LOperand* context = UseAny(instr->context());
LOperand* input = UseRegisterAtStart(instr->value());
- LMathAbs* result = new(zone()) LMathAbs(context, input);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LMathAbs(context, input));
+ Representation r = instr->value()->representation();
+ if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
+ if (!r.IsDouble()) result = AssignEnvironment(result);
+ return result;
}
@@ -1238,29 +1166,14 @@ LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
LOperand* input = UseRegisterAtStart(instr->value());
- LMathLog* result = new(zone()) LMathLog(input);
- return DefineSameAsFirst(result);
+ return MarkAsCall(DefineSameAsFirst(new(zone()) LMathLog(input)), instr);
}
-LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LMathSin* result = new(zone()) LMathSin(input);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LMathCos* result = new(zone()) LMathCos(input);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LMathTan* result = new(zone()) LMathTan(input);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathClz32* result = new(zone()) LMathClz32(input);
+ return DefineAsRegister(result);
}
@@ -1276,9 +1189,8 @@ LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- LMathSqrt* result = new(zone()) LMathSqrt(input);
- return DefineSameAsFirst(result);
+ LOperand* input = UseAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LMathSqrt(input));
}
@@ -1289,34 +1201,6 @@ LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
}
-LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
- ASSERT(instr->key()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* key = UseFixed(instr->key(), rcx);
- LCallKeyed* result = new(zone()) LCallKeyed(context, key);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LCallNamed* result = new(zone()) LCallNamed(context);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LCallGlobal* result = new(zone()) LCallGlobal(context);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, rax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
LOperand* constructor = UseFixed(instr->constructor(), rdi);
@@ -1337,9 +1221,7 @@ LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
LOperand* function = UseFixed(instr->function(), rdi);
LCallFunction* call = new(zone()) LCallFunction(context, function);
- LInstruction* result = DefineFixed(call, rax);
- if (instr->IsTailCall()) return result;
- return MarkAsCall(result, instr);
+ return MarkAsCall(DefineFixed(call, rax), instr);
}
@@ -1385,24 +1267,71 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
}
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+ (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(rax);
+ LOperand* temp2 = FixedTemp(rdx);
+ LInstruction* result = DefineFixed(new(zone()) LDivByConstI(
+ dividend, divisor, temp1, temp2), rdx);
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseFixed(instr->left(), rax);
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = FixedTemp(rdx);
+ LInstruction* result = DefineFixed(new(zone()) LDivI(
+ dividend, divisor, temp), rax);
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ instr->CheckFlag(HValue::kCanOverflow) ||
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- LDivI* div =
- new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
- return AssignEnvironment(DefineSameAsFirst(div));
+ if (instr->RightIsPowerOf2()) {
+ return DoDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
}
- // The temporary operand is necessary to ensure that right is not allocated
- // into rdx.
- LOperand* temp = FixedTemp(rdx);
- LOperand* dividend = UseFixed(instr->left(), rax);
- LOperand* divisor = UseRegister(instr->right());
- LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineFixed(result, rax));
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else {
@@ -1411,93 +1340,132 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
}
-HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
- if (divisor->IsConstant() &&
- HConstant::cast(divisor)->HasInteger32Value()) {
- HConstant* constant_val = HConstant::cast(divisor);
- return constant_val->CopyToRepresentation(Representation::Integer32(),
- divisor->block()->zone());
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LFlooringDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+ result = AssignEnvironment(result);
}
- // A value with an integer representation does not need to be transformed.
- if (divisor->representation().IsInteger32()) {
- return divisor;
- // A change from an integer32 can be replaced by the integer32 value.
- } else if (divisor->IsChange() &&
- HChange::cast(divisor)->from().IsInteger32()) {
- return HChange::cast(divisor)->value();
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(rax);
+ LOperand* temp2 = FixedTemp(rdx);
+ LOperand* temp3 =
+ ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+ NULL : TempRegister();
+ LInstruction* result =
+ DefineFixed(new(zone()) LFlooringDivByConstI(dividend,
+ divisor,
+ temp1,
+ temp2,
+ temp3),
+ rdx);
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+ result = AssignEnvironment(result);
}
- return NULL;
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseFixed(instr->left(), rax);
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = FixedTemp(rdx);
+ LInstruction* result = DefineFixed(new(zone()) LFlooringDivI(
+ dividend, divisor, temp), rax);
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- HValue* right = instr->right();
- if (!right->IsConstant()) {
- ASSERT(right->representation().IsInteger32());
- // The temporary operand is necessary to ensure that right is not allocated
- // into rdx.
- LOperand* temp = FixedTemp(rdx);
- LOperand* dividend = UseFixed(instr->left(), rax);
- LOperand* divisor = UseRegister(instr->right());
- LDivI* flooring_div = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineFixed(flooring_div, rax));
- }
-
- ASSERT(right->IsConstant() && HConstant::cast(right)->HasInteger32Value());
- LOperand* divisor = chunk_->DefineConstantOperand(HConstant::cast(right));
- int32_t divisor_si = HConstant::cast(right)->Integer32Value();
- if (divisor_si == 0) {
- LOperand* dividend = UseRegister(instr->left());
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, NULL)));
- } else if (IsPowerOf2(abs(divisor_si))) {
- LOperand* dividend = UseRegisterAtStart(instr->left());
- LInstruction* result = DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, NULL));
- return divisor_si < 0 ? AssignEnvironment(result) : result;
+ if (instr->RightIsPowerOf2()) {
+ return DoFlooringDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoFlooringDivByConstI(instr);
} else {
- // use two r64
- LOperand* dividend = UseRegisterAtStart(instr->left());
- LOperand* temp = TempRegister();
- LInstruction* result = DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, temp));
- return divisor_si < 0 ? AssignEnvironment(result) : result;
+ return DoFlooringDivI(instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+ dividend, divisor));
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(rax);
+ LOperand* temp2 = FixedTemp(rdx);
+ LInstruction* result = DefineFixed(new(zone()) LModByConstI(
+ dividend, divisor, temp1, temp2), rax);
+ if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
}
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseFixed(instr->left(), rax);
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = FixedTemp(rdx);
+ LInstruction* result = DefineFixed(new(zone()) LModI(
+ dividend, divisor, temp), rdx);
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- HValue* left = instr->left();
- HValue* right = instr->right();
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(left->representation().Equals(instr->representation()));
- ASSERT(right->representation().Equals(instr->representation()));
- if (instr->HasPowerOf2Divisor()) {
- ASSERT(!right->CanBeZero());
- LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
- UseOrConstant(right),
- NULL);
- LInstruction* result = DefineSameAsFirst(mod);
- return (left->CanBeNegative() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero))
- ? AssignEnvironment(result)
- : result;
+ if (instr->RightIsPowerOf2()) {
+ return DoModByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoModByConstI(instr);
} else {
- // The temporary operand is necessary to ensure that right is not
- // allocated into edx.
- LModI* mod = new(zone()) LModI(UseFixed(left, rax),
- UseRegister(right),
- FixedTemp(rdx));
- LInstruction* result = DefineFixed(mod, rdx);
- return (right->CanBeZero() ||
- (left->RangeCanInclude(kMinInt) &&
- right->RangeCanInclude(-1) &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) ||
- (left->CanBeNegative() &&
- instr->CanBeZero() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)))
- ? AssignEnvironment(result)
- : result;
+ return DoModI(instr);
}
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MOD, instr);
@@ -1558,14 +1526,19 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
HValue* right_candidate = instr->BetterRightOperand();
- LOperand* right = use_lea
- ? UseRegisterOrConstantAtStart(right_candidate)
- : UseOrConstantAtStart(right_candidate);
+ LOperand* right;
+ if (SmiValuesAre32Bits() && instr->representation().IsSmi()) {
+ // We cannot add a tagged immediate to a tagged value,
+ // so we request it in a register.
+ right = UseRegisterAtStart(right_candidate);
+ } else {
+ right = use_lea ? UseRegisterOrConstantAtStart(right_candidate)
+ : UseOrConstantAtStart(right_candidate);
+ }
LAddI* add = new(zone()) LAddI(left, right);
bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
- LInstruction* result = use_lea
- ? DefineAsRegister(add)
- : DefineSameAsFirst(add);
+ LInstruction* result = use_lea ? DefineAsRegister(add)
+ : DefineSameAsFirst(add);
if (can_overflow) {
result = AssignEnvironment(result);
}
@@ -1670,8 +1643,6 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
return new(zone()) LCmpObjectEqAndBranch(left, right);
@@ -1687,8 +1658,6 @@ LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
HCompareMinusZeroAndBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
LOperand* value = UseRegister(instr->value());
return new(zone()) LCompareMinusZeroAndBranch(value);
}
@@ -1778,19 +1747,6 @@ LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
}
-LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
- LOperand* object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LElementsKind(object));
-}
-
-
-LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
- LOperand* object = UseRegister(instr->value());
- LValueOf* result = new(zone()) LValueOf(object);
- return DefineSameAsFirst(result);
-}
-
-
LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), rax);
LDateField* result = new(zone()) LDateField(object, instr->index());
@@ -1824,9 +1780,16 @@ LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- LOperand* value = UseRegisterOrConstantAtStart(instr->index());
- LOperand* length = Use(instr->length());
- return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
+ if (!FLAG_debug_code && instr->skip_check()) return NULL;
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ LOperand* length = !index->IsConstantOperand()
+ ? UseOrConstantAtStart(instr->length())
+ : UseAtStart(instr->length());
+ LInstruction* result = new(zone()) LBoundsCheck(index, length);
+ if (!FLAG_debug_code || !instr->skip_check()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -1844,13 +1807,6 @@ LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
}
-LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* value = UseFixed(instr->value(), rax);
- return MarkAsCall(new(zone()) LThrow(context, value), instr);
-}
-
-
LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
return NULL;
}
@@ -1867,23 +1823,21 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
+ HValue* val = instr->value();
if (from.IsSmi()) {
if (to.IsTagged()) {
- LOperand* value = UseRegister(instr->value());
+ LOperand* value = UseRegister(val);
return DefineSameAsFirst(new(zone()) LDummyUse(value));
}
from = Representation::Tagged();
}
- // Only mark conversions that might need to allocate as calling rather than
- // all changes. This makes simple, non-allocating conversion not have to force
- // building a stack frame.
if (from.IsTagged()) {
if (to.IsDouble()) {
- LOperand* value = UseRegister(instr->value());
- LNumberUntagD* res = new(zone()) LNumberUntagD(value);
- return AssignEnvironment(DefineAsRegister(res));
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
} else if (to.IsSmi()) {
- HValue* val = instr->value();
LOperand* value = UseRegister(val);
if (val->type().IsSmi()) {
return DefineSameAsFirst(new(zone()) LDummyUse(value));
@@ -1891,77 +1845,70 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
} else {
ASSERT(to.IsInteger32());
- HValue* val = instr->value();
- LOperand* value = UseRegister(val);
if (val->type().IsSmi() || val->representation().IsSmi()) {
+ LOperand* value = UseRegister(val);
return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
} else {
+ LOperand* value = UseRegister(val);
bool truncating = instr->CanTruncateToInt32();
LOperand* xmm_temp = truncating ? NULL : FixedTemp(xmm1);
- LTaggedToI* res = new(zone()) LTaggedToI(value, xmm_temp);
- return AssignEnvironment(DefineSameAsFirst(res));
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LTaggedToI(value, xmm_temp));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
}
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
info()->MarkAsDeferredCalling();
- LOperand* value = UseRegister(instr->value());
+ LOperand* value = UseRegister(val);
LOperand* temp = TempRegister();
-
- // Make sure that temp and result_temp are different registers.
LUnallocated* result_temp = TempRegister();
LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
return AssignPointerMap(Define(result, result_temp));
} else if (to.IsSmi()) {
- LOperand* value = UseRegister(instr->value());
+ LOperand* value = UseRegister(val);
return AssignEnvironment(
DefineAsRegister(new(zone()) LDoubleToSmi(value)));
} else {
ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
- return AssignEnvironment(
- DefineAsRegister(new(zone()) LDoubleToI(value)));
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
+ if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
+ return result;
}
} else if (from.IsInteger32()) {
info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
- HValue* val = instr->value();
- LOperand* value = UseRegister(val);
- if (val->CheckFlag(HInstruction::kUint32)) {
- LOperand* temp = FixedTemp(xmm1);
- LNumberTagU* result = new(zone()) LNumberTagU(value, temp);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- } else if (val->HasRange() && val->range()->IsInSmiRange()) {
- return DefineSameAsFirst(new(zone()) LSmiTag(value));
+ if (!instr->CheckFlag(HValue::kCanOverflow)) {
+ LOperand* value = UseRegister(val);
+ return DefineAsRegister(new(zone()) LSmiTag(value));
+ } else if (val->CheckFlag(HInstruction::kUint32)) {
+ LOperand* value = UseRegister(val);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = FixedTemp(xmm1);
+ LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
+ return AssignPointerMap(DefineSameAsFirst(result));
} else {
- LNumberTagI* result = new(zone()) LNumberTagI(value);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ LOperand* value = UseRegister(val);
+ LOperand* temp1 = SmiValuesAre32Bits() ? NULL : TempRegister();
+ LOperand* temp2 = SmiValuesAre32Bits() ? NULL : FixedTemp(xmm1);
+ LNumberTagI* result = new(zone()) LNumberTagI(value, temp1, temp2);
+ return AssignPointerMap(DefineSameAsFirst(result));
}
} else if (to.IsSmi()) {
- HValue* val = instr->value();
LOperand* value = UseRegister(val);
- LInstruction* result = NULL;
- if (val->CheckFlag(HInstruction::kUint32)) {
- result = DefineAsRegister(new(zone()) LUint32ToSmi(value));
- if (val->HasRange() && val->range()->IsInSmiRange() &&
- val->range()->upper() != kMaxInt) {
- return result;
- }
- } else {
- result = DefineAsRegister(new(zone()) LInteger32ToSmi(value));
- if (val->HasRange() && val->range()->IsInSmiRange()) {
- return result;
- }
+ LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
}
- return AssignEnvironment(result);
+ return result;
} else {
- if (instr->value()->CheckFlag(HInstruction::kUint32)) {
- LOperand* temp = FixedTemp(xmm1);
- return DefineAsRegister(
- new(zone()) LUint32ToDouble(UseRegister(instr->value()), temp));
+ ASSERT(to.IsDouble());
+ if (val->CheckFlag(HInstruction::kUint32)) {
+ return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
} else {
- ASSERT(to.IsDouble());
- LOperand* value = Use(instr->value());
+ LOperand* value = Use(val);
return DefineAsRegister(new(zone()) LInteger32ToDouble(value));
}
}
@@ -1973,7 +1920,11 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckNonSmi(value));
+ LInstruction* result = new(zone()) LCheckNonSmi(value);
+ if (!instr->value()->type().IsHeapObject()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -1997,15 +1948,12 @@ LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- LOperand* value = NULL;
- if (!instr->CanOmitMapChecks()) {
- value = UseRegisterAtStart(instr->value());
- if (instr->has_migration_target()) info()->MarkAsDeferredCalling();
- }
- LCheckMaps* result = new(zone()) LCheckMaps(value);
- if (!instr->CanOmitMapChecks()) {
- AssignEnvironment(result);
- if (instr->has_migration_target()) return AssignPointerMap(result);
+ if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value));
+ if (instr->HasMigrationTarget()) {
+ info()->MarkAsDeferredCalling();
+ result = AssignPointerMap(result);
}
return result;
}
@@ -2030,6 +1978,20 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
}
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+ HValue* value = instr->value();
+ ASSERT(value->representation().IsDouble());
+ return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+ LOperand* lo = UseRegister(instr->lo());
+ LOperand* hi = UseRegister(instr->hi());
+ return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LOperand* context = info()->IsStub() ? UseFixed(instr->context(), rsi) : NULL;
LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
@@ -2085,21 +2047,14 @@ LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* global_object = UseFixed(instr->global_object(), rdx);
- LOperand* value = UseFixed(instr->value(), rax);
- LStoreGlobalGeneric* result =
- new(zone()) LStoreGlobalGeneric(context, global_object, value);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
DefineAsRegister(new(zone()) LLoadContextSlot(context));
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2116,7 +2071,10 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
temp = NULL;
}
LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2157,40 +2115,69 @@ LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
}
-LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
- HLoadExternalArrayPointer* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
+void LChunkBuilder::FindDehoistedKeyDefinitions(HValue* candidate) {
+ // We sign extend the dehoisted key at the definition point when the pointer
+ // size is 64-bit. For x32 port, we sign extend the dehoisted key at the use
+ // points and should not invoke this function. We can't use STATIC_ASSERT
+ // here as the pointer size is 32-bit for x32.
+ ASSERT(kPointerSize == kInt64Size);
+ BitVector* dehoisted_key_ids = chunk_->GetDehoistedKeyIds();
+ if (dehoisted_key_ids->Contains(candidate->id())) return;
+ dehoisted_key_ids->Add(candidate->id());
+ if (!candidate->IsPhi()) return;
+ for (int i = 0; i < candidate->OperandCount(); ++i) {
+ FindDehoistedKeyDefinitions(candidate->OperandAt(i));
+ }
}
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT((kPointerSize == kInt64Size &&
+ instr->key()->representation().IsInteger32()) ||
+ (kPointerSize == kInt32Size &&
+ instr->key()->representation().IsSmiOrInteger32()));
ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyed* result = NULL;
+ LOperand* key = NULL;
+ LInstruction* result = NULL;
- if (!instr->is_external()) {
+ if (kPointerSize == kInt64Size) {
+ key = UseRegisterOrConstantAtStart(instr->key());
+ } else {
+ bool clobbers_key = ExternalArrayOpRequiresTemp(
+ instr->key()->representation(), elements_kind);
+ key = clobbers_key
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ }
+
+ if ((kPointerSize == kInt64Size) && instr->IsDehoisted()) {
+ FindDehoistedKeyDefinitions(instr->key());
+ }
+
+ if (!instr->is_typed_elements()) {
LOperand* obj = UseRegisterAtStart(instr->elements());
- result = new(zone()) LLoadKeyed(obj, key);
+ result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
} else {
ASSERT(
(instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ !(IsDoubleOrFloatElementsKind(elements_kind))) ||
(instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- LOperand* external_pointer = UseRegister(instr->elements());
- result = new(zone()) LLoadKeyed(external_pointer, key);
+ (IsDoubleOrFloatElementsKind(elements_kind))));
+ LOperand* backing_store = UseRegister(instr->elements());
+ result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
}
- DefineAsRegister(result);
- bool can_deoptimize = instr->RequiresHoleCheck() ||
- (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
- // An unsigned int array load might overflow and cause a deopt, make sure it
- // has an environment.
- return can_deoptimize ? AssignEnvironment(result) : result;
+ if ((instr->is_external() || instr->is_fixed_typed_array()) ?
+ // see LCodeGen::DoLoadKeyedExternalArray
+ ((elements_kind == EXTERNAL_UINT32_ELEMENTS ||
+ elements_kind == UINT32_ELEMENTS) &&
+ !instr->CheckFlag(HInstruction::kUint32)) :
+ // see LCodeGen::DoLoadKeyedFixedDoubleArray and
+ // LCodeGen::DoLoadKeyedFixedArray
+ instr->RequiresHoleCheck()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2208,24 +2195,31 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
- if (!instr->is_external()) {
+ if ((kPointerSize == kInt64Size) && instr->IsDehoisted()) {
+ FindDehoistedKeyDefinitions(instr->key());
+ }
+
+ if (!instr->is_typed_elements()) {
ASSERT(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* object = NULL;
LOperand* key = NULL;
LOperand* val = NULL;
- if (instr->value()->representation().IsDouble()) {
+ Representation value_representation = instr->value()->representation();
+ if (value_representation.IsDouble()) {
object = UseRegisterAtStart(instr->elements());
- val = UseTempRegister(instr->value());
+ val = UseRegisterAtStart(instr->value());
key = UseRegisterOrConstantAtStart(instr->key());
} else {
- ASSERT(instr->value()->representation().IsSmiOrTagged());
- object = UseTempRegister(instr->elements());
+ ASSERT(value_representation.IsSmiOrTagged() ||
+ value_representation.IsInteger32());
if (needs_write_barrier) {
+ object = UseTempRegister(instr->elements());
val = UseTempRegister(instr->value());
key = UseTempRegister(instr->key());
} else {
+ object = UseRegisterAtStart(instr->elements());
val = UseRegisterOrConstantAtStart(instr->value());
key = UseRegisterOrConstantAtStart(instr->key());
}
@@ -2235,21 +2229,32 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
}
ASSERT(
- (instr->value()->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->value()->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->elements()->representation().IsExternal());
+ (instr->value()->representation().IsInteger32() &&
+ !IsDoubleOrFloatElementsKind(elements_kind)) ||
+ (instr->value()->representation().IsDouble() &&
+ IsDoubleOrFloatElementsKind(elements_kind)));
+ ASSERT((instr->is_fixed_typed_array() &&
+ instr->elements()->representation().IsTagged()) ||
+ (instr->is_external() &&
+ instr->elements()->representation().IsExternal()));
bool val_is_temp_register =
- elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT_ELEMENTS;
+ elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
+ elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS;
LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
: UseRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LOperand* external_pointer = UseRegister(instr->elements());
- return new(zone()) LStoreKeyed(external_pointer, key, val);
+ LOperand* key = NULL;
+ if (kPointerSize == kInt64Size) {
+ key = UseRegisterOrConstantAtStart(instr->key());
+ } else {
+ bool clobbers_key = ExternalArrayOpRequiresTemp(
+ instr->key()->representation(), elements_kind);
+ key = clobbers_key
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ }
+ LOperand* backing_store = UseRegister(instr->elements());
+ return new(zone()) LStoreKeyed(backing_store, key, val);
}
@@ -2271,7 +2276,6 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
- LOperand* object = UseRegister(instr->object());
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
@@ -2280,10 +2284,11 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
object, NULL, new_map_reg, temp_reg);
return result;
} else {
+ LOperand* object = UseFixed(instr->object(), rax);
LOperand* context = UseFixed(instr->context(), rsi);
LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, context, NULL, NULL);
- return AssignPointerMap(result);
+ return MarkAsCall(result, instr);
}
}
@@ -2324,7 +2329,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool can_be_constant = instr->value()->IsConstant() &&
HConstant::cast(instr->value())->NotInNewSpace() &&
- !(FLAG_track_double_fields && instr->field_representation().IsDouble());
+ !instr->field_representation().IsDouble();
LOperand* val;
if (needs_write_barrier) {
@@ -2333,10 +2338,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
val = UseFixed(instr->value(), rax);
} else if (can_be_constant) {
val = UseRegisterOrConstant(instr->value());
- } else if (FLAG_track_fields && instr->field_representation().IsSmi()) {
- val = UseTempRegister(instr->value());
- } else if (FLAG_track_double_fields &&
- instr->field_representation().IsDouble()) {
+ } else if (instr->field_representation().IsSmi()) {
+ val = UseRegister(instr->value());
+ } else if (instr->field_representation().IsDouble()) {
val = UseRegisterAtStart(instr->value());
} else {
val = UseRegister(instr->value());
@@ -2347,14 +2351,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LOperand* temp = (!is_in_object || needs_write_barrier ||
needs_write_barrier_for_map) ? TempRegister() : NULL;
- LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
- if (FLAG_track_heap_object_fields &&
- instr->field_representation().IsHeapObject()) {
- if (!instr->value()->type().IsHeapObject()) {
- return AssignEnvironment(result);
- }
- }
- return result;
+ return new(zone()) LStoreNamedField(obj, val, temp);
}
@@ -2371,12 +2368,8 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* left = FLAG_new_string_add
- ? UseFixed(instr->left(), rdx)
- : UseOrConstantAtStart(instr->left());
- LOperand* right = FLAG_new_string_add
- ? UseFixed(instr->right(), rax)
- : UseOrConstantAtStart(instr->right());
+ LOperand* left = UseFixed(instr->left(), rdx);
+ LOperand* right = UseFixed(instr->right(), rax);
return MarkAsCall(
DefineFixed(new(zone()) LStringAdd(context, left, right), rax), instr);
}
@@ -2388,7 +2381,7 @@ LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* context = UseAny(instr->context());
LStringCharCodeAt* result =
new(zone()) LStringCharCodeAt(context, string, index);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ return AssignPointerMap(DefineAsRegister(result));
}
@@ -2443,7 +2436,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
} else {
ASSERT(info()->IsStub());
CodeStubInterfaceDescriptor* descriptor =
- info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+ info()->code_stub()->GetInterfaceDescriptor();
int index = static_cast<int>(instr->index());
Register reg = descriptor->GetParameterRegister(index);
return DefineFixed(result, reg);
@@ -2525,9 +2518,6 @@ LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
-
return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
@@ -2560,13 +2550,13 @@ LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment();
+ outer->set_ast_id(instr->ReturnId());
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
instr->arguments_count(),
instr->function(),
undefined,
- instr->inlining_kind(),
- instr->undefined_receiver());
+ instr->inlining_kind());
// Only replay binding of arguments object if it wasn't removed from graph.
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2622,7 +2612,25 @@ LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
LOperand* object = UseRegister(instr->object());
LOperand* index = UseTempRegister(instr->index());
- return DefineSameAsFirst(new(zone()) LLoadFieldByIndex(object, index));
+ LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+ LInstruction* result = DefineSameAsFirst(load);
+ return AssignPointerMap(result);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
+ LOperand* context = UseRegisterAtStart(instr->context());
+ return new(zone()) LStoreFrameContext(context);
+}
+
+
+LInstruction* LChunkBuilder::DoAllocateBlockContext(
+ HAllocateBlockContext* instr) {
+ LOperand* context = UseFixed(instr->context(), rsi);
+ LOperand* function = UseRegisterAtStart(instr->function());
+ LAllocateBlockContext* result =
+ new(zone()) LAllocateBlockContext(context, function);
+ return MarkAsCall(DefineFixed(result, rsi), instr);
}
diff --git a/chromium/v8/src/x64/lithium-x64.h b/chromium/v8/src/x64/lithium-x64.h
index dc15c97c44c..9609cfc9dce 100644
--- a/chromium/v8/src/x64/lithium-x64.h
+++ b/chromium/v8/src/x64/lithium-x64.h
@@ -1,38 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_LITHIUM_X64_H_
#define V8_X64_LITHIUM_X64_H_
-#include "hydrogen.h"
-#include "lithium-allocator.h"
-#include "lithium.h"
-#include "safepoint-table.h"
-#include "utils.h"
+#include "src/hydrogen.h"
+#include "src/lithium-allocator.h"
+#include "src/lithium.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -44,6 +21,7 @@ class LCodeGen;
V(AccessArgumentsAt) \
V(AddI) \
V(Allocate) \
+ V(AllocateBlockContext) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@@ -52,12 +30,9 @@ class LCodeGen;
V(BitI) \
V(BoundsCheck) \
V(Branch) \
- V(CallConstantFunction) \
+ V(CallJSFunction) \
+ V(CallWithDescriptor) \
V(CallFunction) \
- V(CallGlobal) \
- V(CallKeyed) \
- V(CallKnownGlobal) \
- V(CallNamed) \
V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
@@ -83,24 +58,28 @@ class LCodeGen;
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
+ V(ConstructDouble) \
V(Context) \
V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
+ V(DivByConstI) \
+ V(DivByPowerOf2I) \
V(DivI) \
+ V(DoubleBits) \
V(DoubleToI) \
V(DoubleToSmi) \
V(Drop) \
V(DummyUse) \
V(Dummy) \
- V(ElementsKind) \
+ V(FlooringDivByConstI) \
+ V(FlooringDivByPowerOf2I) \
+ V(FlooringDivI) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
- V(GlobalObject) \
- V(GlobalReceiver) \
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
@@ -109,7 +88,6 @@ class LCodeGen;
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
- V(Integer32ToSmi) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
@@ -119,7 +97,6 @@ class LCodeGen;
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
- V(LoadExternalArrayPointer) \
V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
@@ -131,17 +108,16 @@ class LCodeGen;
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathAbs) \
- V(MathCos) \
+ V(MathClz32) \
V(MathExp) \
V(MathFloor) \
- V(MathFloorOfDiv) \
V(MathLog) \
V(MathMinMax) \
V(MathPowHalf) \
V(MathRound) \
- V(MathSin) \
V(MathSqrt) \
- V(MathTan) \
+ V(ModByConstI) \
+ V(ModByPowerOf2I) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -149,7 +125,6 @@ class LCodeGen;
V(NumberTagU) \
V(NumberUntagD) \
V(OsrEntry) \
- V(OuterContext) \
V(Parameter) \
V(Power) \
V(PushArgument) \
@@ -163,8 +138,8 @@ class LCodeGen;
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
+ V(StoreFrameContext) \
V(StoreGlobalCell) \
- V(StoreGlobalGeneric) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -176,16 +151,13 @@ class LCodeGen;
V(SubI) \
V(TaggedToI) \
V(ThisFunction) \
- V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
V(Uint32ToDouble) \
- V(Uint32ToSmi) \
V(UnknownOSRValue) \
- V(ValueOf) \
V(WrapReceiver)
@@ -264,7 +236,9 @@ class LInstruction : public ZoneObject {
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
- virtual bool ClobbersDoubleRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
+ return IsCall();
+ }
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
@@ -279,6 +253,10 @@ class LInstruction : public ZoneObject {
virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+ virtual bool MustSignExtendResult(LPlatformChunk* chunk) const {
+ return false;
+ }
+
#ifdef DEBUG
void VerifyCall();
#endif
@@ -303,10 +281,8 @@ class LInstruction : public ZoneObject {
// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction : public LInstruction {
+template<int R>
+class LTemplateResultInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
@@ -316,8 +292,20 @@ class LTemplateInstruction : public LInstruction {
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() const { return results_[0]; }
+ virtual bool MustSignExtendResult(
+ LPlatformChunk* chunk) const V8_FINAL V8_OVERRIDE;
+
protected:
EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
EmbeddedContainer<LOperand*, I> inputs_;
EmbeddedContainer<LOperand*, T> temps_;
@@ -442,6 +430,7 @@ class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
@@ -492,10 +481,6 @@ class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
-
- TranscendentalCache::Type transcendental_type() {
- return hydrogen()->transcendental_type();
- }
};
@@ -563,6 +548,7 @@ class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
LOperand* function() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+ DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
};
@@ -624,6 +610,49 @@ class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
+class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LModByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LModI(LOperand* left, LOperand* right, LOperand* temp) {
@@ -641,40 +670,126 @@ class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
+class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LDivByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- LDivI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
temps_[0] = temp;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
- bool is_flooring() { return hydrogen_value()->IsMathFloorOfDiv(); }
-
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
+ DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
};
-class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LMathFloorOfDiv(LOperand* left,
- LOperand* right,
- LOperand* temp = NULL) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+ "flooring-div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+ public:
+ LFlooringDivByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* temp3() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
temps_[0] = temp;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
};
@@ -772,39 +887,15 @@ class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathSin V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LMathSin(LOperand* value) {
+ explicit LMathClz32(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
-};
-
-
-class LMathCos V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathCos(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
-};
-
-
-class LMathTan V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathTan(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathTan, "math-tan")
+ DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
};
@@ -1137,6 +1228,9 @@ class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
LOperand* right() { return inputs_[1]; }
Token::Value op() const { return hydrogen()->op(); }
+ bool IsInteger32() const {
+ return hydrogen()->representation().IsInteger32();
+ }
DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
DECLARE_HYDROGEN_ACCESSOR(Bitwise)
@@ -1282,32 +1376,6 @@ class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LElementsKind V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LElementsKind(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
-};
-
-
-class LValueOf V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LValueOf(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-};
-
-
class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LDateField(LOperand* date, Smi* index) : index_(index) {
@@ -1361,20 +1429,6 @@ class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
};
-class LThrow V8_FINAL : public LTemplateInstruction<0, 2, 0> {
- public:
- explicit LThrow(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
-};
-
-
class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
@@ -1554,18 +1608,20 @@ class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadExternalArrayPointer V8_FINAL
- : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadExternalArrayPointer(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
- "load-external-array-pointer")
-};
+inline static bool ExternalArrayOpRequiresTemp(
+ Representation key_representation,
+ ElementsKind elements_kind) {
+ // Operations that require the key to be divided by two to be converted into
+ // an index cannot fold the scale operation into a load and need an extra
+ // temp register to do the work.
+ return SmiValuesAre31Bits() && key_representation.IsSmi() &&
+ (elements_kind == EXTERNAL_INT8_ELEMENTS ||
+ elements_kind == EXTERNAL_UINT8_ELEMENTS ||
+ elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
+ elements_kind == UINT8_ELEMENTS ||
+ elements_kind == INT8_ELEMENTS ||
+ elements_kind == UINT8_CLAMPED_ELEMENTS);
+}
class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
@@ -1581,10 +1637,16 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
bool is_external() const {
return hydrogen()->is_external();
}
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
@@ -1646,28 +1708,6 @@ class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
- public:
- explicit LStoreGlobalGeneric(LOperand* context,
- LOperand* global_object,
- LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = global_object;
- inputs_[2] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* global_object() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -1731,15 +1771,15 @@ class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
+class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> {
public:
LStoreCodeEntry(LOperand* function, LOperand* code_object) {
inputs_[0] = function;
- temps_[0] = code_object;
+ inputs_[1] = code_object;
}
LOperand* function() { return inputs_[0]; }
- LOperand* code_object() { return temps_[0]; }
+ LOperand* code_object() { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream);
@@ -1778,18 +1818,6 @@ class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LOuterContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
-};
-
-
class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LDeclareGlobals(LOperand* context) {
@@ -1803,94 +1831,69 @@ class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LGlobalObject V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LGlobalObject(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
-};
-
-
-class LGlobalReceiver V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGlobalReceiver(LOperand* global_object) {
- inputs_[0] = global_object;
+ explicit LCallJSFunction(LOperand* function) {
+ inputs_[0] = function;
}
- LOperand* global() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
-};
-
+ LOperand* function() { return inputs_[0]; }
-class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
- DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<JSFunction> function() { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
public:
- LInvokeFunction(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
+ LCallWithDescriptor(const CallInterfaceDescriptor* descriptor,
+ const ZoneList<LOperand*>& operands,
+ Zone* zone)
+ : inputs_(descriptor->environment_length() + 1, zone) {
+ ASSERT(descriptor->environment_length() + 1 == operands.length());
+ inputs_.AddAll(operands, zone);
}
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
+ LOperand* target() const { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+ private:
+ DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
-};
+ ZoneList<LOperand*> inputs_;
-class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallKeyed(LOperand* context, LOperand* key) {
- inputs_[0] = context;
- inputs_[1] = key;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
- DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
-
- LOperand* context() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ // Iterator support.
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
- int arity() const { return hydrogen()->argument_count() - 1; }
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
};
-class LCallNamed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallNamed(LOperand* context) {
+ LInvokeFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
+ inputs_[1] = function;
}
LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
- DECLARE_HYDROGEN_ACCESSOR(CallNamed)
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
@@ -1911,35 +1914,6 @@ class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallGlobal(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
- DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- Handle<String> name() const {return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
- DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNew(LOperand* context, LOperand* constructor) {
@@ -1989,7 +1963,7 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
return save_doubles() == kDontSaveFPRegs;
}
@@ -2011,67 +1985,45 @@ class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LInteger32ToSmi(LOperand* value) {
+ explicit LUint32ToDouble(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToSmi, "int32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LUint32ToDouble(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
};
-class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
- explicit LUint32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberTagI(LOperand* value) {
+ LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
};
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
- explicit LNumberTagU(LOperand* value, LOperand* temp) {
+ LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
- temps_[0] = temp;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
};
@@ -2148,6 +2100,7 @@ class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
};
@@ -2198,7 +2151,6 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> transition() const { return hydrogen()->transition_map(); }
Representation representation() const {
return hydrogen()->field_representation();
}
@@ -2223,7 +2175,7 @@ class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2236,6 +2188,12 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
}
bool is_external() const { return hydrogen()->is_external(); }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
@@ -2246,7 +2204,7 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
};
@@ -2272,7 +2230,7 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2401,7 +2359,7 @@ class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckMaps(LOperand* value) {
+ explicit LCheckMaps(LOperand* value = NULL) {
inputs_[0] = value;
}
@@ -2476,6 +2434,33 @@ class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
+class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleBits(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+ DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LConstructDouble(LOperand* hi, LOperand* lo) {
+ inputs_[0] = hi;
+ inputs_[1] = lo;
+ }
+
+ LOperand* hi() { return inputs_[0]; }
+ LOperand* lo() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
@@ -2664,53 +2649,94 @@ class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
+class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LStoreFrameContext(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
+};
+
+
+class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAllocateBlockContext(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
+ DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
+};
+
+
class LChunkBuilder;
class LPlatformChunk V8_FINAL : public LChunk {
public:
LPlatformChunk(CompilationInfo* info, HGraph* graph)
- : LChunk(info, graph) { }
+ : LChunk(info, graph),
+ dehoisted_key_ids_(graph->GetMaximumValueID(), graph->zone()) { }
int GetNextSpillIndex(RegisterKind kind);
LOperand* GetNextSpillSlot(RegisterKind kind);
+ BitVector* GetDehoistedKeyIds() { return &dehoisted_key_ids_; }
+ bool IsDehoistedKey(HValue* value) {
+ return dehoisted_key_ids_.Contains(value->id());
+ }
+
+ private:
+ BitVector dehoisted_key_ids_;
};
-class LChunkBuilder V8_FINAL BASE_EMBEDDED {
+class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : chunk_(NULL),
+ : LChunkBuilderBase(graph->zone()),
+ chunk_(NULL),
info_(info),
graph_(graph),
- zone_(graph->zone()),
status_(UNUSED),
current_instruction_(NULL),
current_block_(NULL),
next_block_(NULL),
- argument_count_(0),
allocator_(allocator) { }
+ Isolate* isolate() const { return graph_->isolate(); }
+
// Build the sequence for the graph.
LPlatformChunk* Build();
- LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
-
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
- static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
-
LInstruction* DoMathFloor(HUnaryMathOperation* instr);
LInstruction* DoMathRound(HUnaryMathOperation* instr);
LInstruction* DoMathAbs(HUnaryMathOperation* instr);
LInstruction* DoMathLog(HUnaryMathOperation* instr);
- LInstruction* DoMathSin(HUnaryMathOperation* instr);
- LInstruction* DoMathCos(HUnaryMathOperation* instr);
- LInstruction* DoMathTan(HUnaryMathOperation* instr);
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+ LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+ LInstruction* DoDivByPowerOf2I(HDiv* instr);
+ LInstruction* DoDivByConstI(HDiv* instr);
+ LInstruction* DoDivI(HDiv* instr);
+ LInstruction* DoModByPowerOf2I(HMod* instr);
+ LInstruction* DoModByConstI(HMod* instr);
+ LInstruction* DoModI(HMod* instr);
+ LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
private:
enum Status {
@@ -2723,7 +2749,6 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LPlatformChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
- Zone* zone() const { return zone_; }
bool is_unused() const { return status_ == UNUSED; }
bool is_building() const { return status_ == BUILDING; }
@@ -2756,6 +2781,9 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
// An input operand in a register that may be trashed.
MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
+ // An input operand in a register that may be trashed or a constant operand.
+ MUST_USE_RESULT LOperand* UseTempRegisterOrConstant(HValue* value);
+
// An input operand in a register or stack slot.
MUST_USE_RESULT LOperand* Use(HValue* value);
MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
@@ -2773,7 +2801,7 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value);
+ virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) V8_OVERRIDE;
// Temporary operand that must be in a register.
MUST_USE_RESULT LUnallocated* TempRegister();
@@ -2782,22 +2810,16 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
// Methods for setting up define-use relationships.
// Return the same instruction that they are passed.
- template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result);
- template<int I, int T>
- LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
- int index);
- template<int I, int T>
- LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
- Register reg);
- template<int I, int T>
- LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
- XMMRegister reg);
+ LInstruction* Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result);
+ LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+ int index);
+ LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+ Register reg);
+ LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+ XMMRegister reg);
// Assigns an environment to an instruction. An instruction which can
// deoptimize must have an environment.
LInstruction* AssignEnvironment(LInstruction* instr);
@@ -2815,11 +2837,8 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
HInstruction* hinstr,
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator,
- ZoneList<HValue*>* objects_to_materialize);
-
void VisitInstruction(HInstruction* current);
+ void AddInstruction(LInstruction* instr, HInstruction* current);
void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
@@ -2827,16 +2846,15 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
HArithmeticBinaryOperation* instr);
LInstruction* DoArithmeticT(Token::Value op,
HBinaryOperation* instr);
+ void FindDehoistedKeyDefinitions(HValue* candidate);
LPlatformChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
- Zone* zone_;
Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;
HBasicBlock* next_block_;
- int argument_count_;
LAllocator* allocator_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
diff --git a/chromium/v8/src/x64/macro-assembler-x64.cc b/chromium/v8/src/x64/macro-assembler-x64.cc
index 6c3f50163ef..39acf800340 100644
--- a/chromium/v8/src/x64/macro-assembler-x64.cc
+++ b/chromium/v8/src/x64/macro-assembler-x64.cc
@@ -1,43 +1,20 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "cpu-profiler.h"
-#include "assembler-x64.h"
-#include "macro-assembler-x64.h"
-#include "serialize.h"
-#include "debug.h"
-#include "heap.h"
-#include "isolate-inl.h"
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/cpu-profiler.h"
+#include "src/x64/assembler-x64.h"
+#include "src/x64/macro-assembler-x64.h"
+#include "src/serialize.h"
+#include "src/debug.h"
+#include "src/heap.h"
+#include "src/isolate-inl.h"
namespace v8 {
namespace internal {
@@ -54,10 +31,10 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
}
-static const int kInvalidRootRegisterDelta = -1;
+static const int64_t kInvalidRootRegisterDelta = -1;
-intptr_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
+int64_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
if (predictable_code_size() &&
(other.address() < reinterpret_cast<Address>(isolate()) ||
other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
@@ -65,17 +42,27 @@ intptr_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
}
Address roots_register_value = kRootRegisterBias +
reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
- intptr_t delta = other.address() - roots_register_value;
+
+ int64_t delta = kInvalidRootRegisterDelta; // Bogus initialization.
+ if (kPointerSize == kInt64Size) {
+ delta = other.address() - roots_register_value;
+ } else {
+ // For x32, zero extend the address to 64-bit and calculate the delta.
+ uint64_t o = static_cast<uint32_t>(
+ reinterpret_cast<intptr_t>(other.address()));
+ uint64_t r = static_cast<uint32_t>(
+ reinterpret_cast<intptr_t>(roots_register_value));
+ delta = o - r;
+ }
return delta;
}
Operand MacroAssembler::ExternalOperand(ExternalReference target,
Register scratch) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(target);
+ if (root_array_available_ && !serializer_enabled()) {
+ int64_t delta = RootRegisterDelta(target);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
return Operand(kRootRegister, static_cast<int32_t>(delta));
}
}
@@ -85,11 +72,10 @@ Operand MacroAssembler::ExternalOperand(ExternalReference target,
void MacroAssembler::Load(Register destination, ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(source);
+ if (root_array_available_ && !serializer_enabled()) {
+ int64_t delta = RootRegisterDelta(source);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
+ movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
return;
}
}
@@ -98,17 +84,16 @@ void MacroAssembler::Load(Register destination, ExternalReference source) {
load_rax(source);
} else {
Move(kScratchRegister, source);
- movq(destination, Operand(kScratchRegister, 0));
+ movp(destination, Operand(kScratchRegister, 0));
}
}
void MacroAssembler::Store(ExternalReference destination, Register source) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(destination);
+ if (root_array_available_ && !serializer_enabled()) {
+ int64_t delta = RootRegisterDelta(destination);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
+ movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
return;
}
}
@@ -117,18 +102,17 @@ void MacroAssembler::Store(ExternalReference destination, Register source) {
store_rax(destination);
} else {
Move(kScratchRegister, destination);
- movq(Operand(kScratchRegister, 0), source);
+ movp(Operand(kScratchRegister, 0), source);
}
}
void MacroAssembler::LoadAddress(Register destination,
ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(source);
+ if (root_array_available_ && !serializer_enabled()) {
+ int64_t delta = RootRegisterDelta(source);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- lea(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
+ leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
return;
}
}
@@ -138,14 +122,13 @@ void MacroAssembler::LoadAddress(Register destination,
int MacroAssembler::LoadAddressSize(ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled()) {
+ if (root_array_available_ && !serializer_enabled()) {
// This calculation depends on the internals of LoadAddress.
// It's correctness is ensured by the asserts in the Call
// instruction below.
- intptr_t delta = RootRegisterDelta(source);
+ int64_t delta = RootRegisterDelta(source);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- // Operand is lea(scratch, Operand(kRootRegister, delta));
+ // Operand is leap(scratch, Operand(kRootRegister, delta));
// Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
int size = 4;
if (!is_int8(static_cast<int32_t>(delta))) {
@@ -154,28 +137,28 @@ int MacroAssembler::LoadAddressSize(ExternalReference source) {
return size;
}
}
- // Size of movq(destination, src);
+ // Size of movp(destination, src);
return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
}
void MacroAssembler::PushAddress(ExternalReference source) {
int64_t address = reinterpret_cast<int64_t>(source.address());
- if (is_int32(address) && !Serializer::enabled()) {
+ if (is_int32(address) && !serializer_enabled()) {
if (emit_debug_code()) {
- movq(kScratchRegister, kZapValue, RelocInfo::NONE64);
+ Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
}
- push(Immediate(static_cast<int32_t>(address)));
+ Push(Immediate(static_cast<int32_t>(address)));
return;
}
LoadAddress(kScratchRegister, source);
- push(kScratchRegister);
+ Push(kScratchRegister);
}
void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
ASSERT(root_array_available_);
- movq(destination, Operand(kRootRegister,
+ movp(destination, Operand(kRootRegister,
(index << kPointerSizeLog2) - kRootRegisterBias));
}
@@ -184,7 +167,7 @@ void MacroAssembler::LoadRootIndexed(Register destination,
Register variable_offset,
int fixed_offset) {
ASSERT(root_array_available_);
- movq(destination,
+ movp(destination,
Operand(kRootRegister,
variable_offset, times_pointer_size,
(fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
@@ -193,20 +176,20 @@ void MacroAssembler::LoadRootIndexed(Register destination,
void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
ASSERT(root_array_available_);
- movq(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
+ movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
source);
}
void MacroAssembler::PushRoot(Heap::RootListIndex index) {
ASSERT(root_array_available_);
- push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
+ Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
}
void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
ASSERT(root_array_available_);
- cmpq(with, Operand(kRootRegister,
+ cmpp(with, Operand(kRootRegister,
(index << kPointerSizeLog2) - kRootRegisterBias));
}
@@ -216,7 +199,7 @@ void MacroAssembler::CompareRoot(const Operand& with,
ASSERT(root_array_available_);
ASSERT(!with.AddressUsesRegister(kScratchRegister));
LoadRoot(kScratchRegister, index);
- cmpq(with, kScratchRegister);
+ cmpp(with, kScratchRegister);
}
@@ -234,15 +217,15 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
// Load store buffer top.
LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
// Store pointer to buffer.
- movq(Operand(scratch, 0), addr);
+ movp(Operand(scratch, 0), addr);
// Increment buffer top.
- addq(scratch, Immediate(kPointerSize));
+ addp(scratch, Immediate(kPointerSize));
// Write back new top of buffer.
StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
// Call stub on end of buffer.
Label done;
// Check for end of buffer.
- testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
+ testp(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
if (and_then == kReturnAtEnd) {
Label buffer_overflowed;
j(not_equal, &buffer_overflowed, Label::kNear);
@@ -253,7 +236,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
j(equal, &done, Label::kNear);
}
StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(save_fp);
+ StoreBufferOverflowStub(isolate(), save_fp);
CallStub(&store_buffer_overflow);
if (and_then == kReturnAtEnd) {
ret(0);
@@ -269,33 +252,35 @@ void MacroAssembler::InNewSpace(Register object,
Condition cc,
Label* branch,
Label::Distance distance) {
- if (Serializer::enabled()) {
+ if (serializer_enabled()) {
// Can't do arithmetic on external references if it might get serialized.
// The mask isn't really an address. We load it as an external reference in
// case the size of the new space is different between the snapshot maker
// and the running system.
if (scratch.is(object)) {
Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
- and_(scratch, kScratchRegister);
+ andp(scratch, kScratchRegister);
} else {
Move(scratch, ExternalReference::new_space_mask(isolate()));
- and_(scratch, object);
+ andp(scratch, object);
}
Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
- cmpq(scratch, kScratchRegister);
+ cmpp(scratch, kScratchRegister);
j(cc, branch, distance);
} else {
- ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask())));
+ ASSERT(kPointerSize == kInt64Size
+ ? is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))
+ : kPointerSize == kInt32Size);
intptr_t new_space_start =
reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
- movq(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
- RelocInfo::NONE64);
+ Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
+ Assembler::RelocInfoNone());
if (scratch.is(object)) {
- addq(scratch, kScratchRegister);
+ addp(scratch, kScratchRegister);
} else {
- lea(scratch, Operand(object, kScratchRegister, times_1, 0));
+ leap(scratch, Operand(object, kScratchRegister, times_1, 0));
}
- and_(scratch,
+ andp(scratch,
Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
j(cc, branch, distance);
}
@@ -309,7 +294,8 @@ void MacroAssembler::RecordWriteField(
Register dst,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
@@ -323,7 +309,7 @@ void MacroAssembler::RecordWriteField(
// of the object, so so offset must be a multiple of kPointerSize.
ASSERT(IsAligned(offset, kPointerSize));
- lea(dst, FieldOperand(object, offset));
+ leap(dst, FieldOperand(object, offset));
if (emit_debug_code()) {
Label ok;
testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
@@ -332,26 +318,28 @@ void MacroAssembler::RecordWriteField(
bind(&ok);
}
- RecordWrite(
- object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
+ RecordWrite(object, dst, value, save_fp, remembered_set_action,
+ OMIT_SMI_CHECK, pointers_to_here_check_for_value);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(value, kZapValue, RelocInfo::NONE64);
- movq(dst, kZapValue, RelocInfo::NONE64);
+ Move(value, kZapValue, Assembler::RelocInfoNone());
+ Move(dst, kZapValue, Assembler::RelocInfoNone());
}
}
-void MacroAssembler::RecordWriteArray(Register object,
- Register value,
- Register index,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+void MacroAssembler::RecordWriteArray(
+ Register object,
+ Register value,
+ Register index,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
@@ -363,29 +351,102 @@ void MacroAssembler::RecordWriteArray(Register object,
// Array access: calculate the destination address. Index is not a smi.
Register dst = index;
- lea(dst, Operand(object, index, times_pointer_size,
+ leap(dst, Operand(object, index, times_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag));
- RecordWrite(
- object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
+ RecordWrite(object, dst, value, save_fp, remembered_set_action,
+ OMIT_SMI_CHECK, pointers_to_here_check_for_value);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(value, kZapValue, RelocInfo::NONE64);
- movq(index, kZapValue, RelocInfo::NONE64);
+ Move(value, kZapValue, Assembler::RelocInfoNone());
+ Move(index, kZapValue, Assembler::RelocInfoNone());
}
}
-void MacroAssembler::RecordWrite(Register object,
- Register address,
- Register value,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+void MacroAssembler::RecordWriteForMap(Register object,
+ Register map,
+ Register dst,
+ SaveFPRegsMode fp_mode) {
+ ASSERT(!object.is(kScratchRegister));
+ ASSERT(!object.is(map));
+ ASSERT(!object.is(dst));
+ ASSERT(!map.is(dst));
+ AssertNotSmi(object);
+
+ if (emit_debug_code()) {
+ Label ok;
+ if (map.is(kScratchRegister)) pushq(map);
+ CompareMap(map, isolate()->factory()->meta_map());
+ if (map.is(kScratchRegister)) popq(map);
+ j(equal, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
+
+ if (!FLAG_incremental_marking) {
+ return;
+ }
+
+ if (emit_debug_code()) {
+ Label ok;
+ if (map.is(kScratchRegister)) pushq(map);
+ cmpp(map, FieldOperand(object, HeapObject::kMapOffset));
+ if (map.is(kScratchRegister)) popq(map);
+ j(equal, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
+
+ // Compute the address.
+ leap(dst, FieldOperand(object, HeapObject::kMapOffset));
+
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
+ Label done;
+
+ // A single check of the map's pages interesting flag suffices, since it is
+ // only set during incremental collection, and then it's also guaranteed that
+ // the from object's page's interesting flag is also set. This optimization
+ // relies on the fact that maps can never be in new space.
+ CheckPageFlag(map,
+ map, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ zero,
+ &done,
+ Label::kNear);
+
+ RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
+ fp_mode);
+ CallStub(&stub);
+
+ bind(&done);
+
+ // Clobber clobbered registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ Move(dst, kZapValue, Assembler::RelocInfoNone());
+ Move(map, kZapValue, Assembler::RelocInfoNone());
+ }
+}
+
+
+void MacroAssembler::RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
ASSERT(!object.is(value));
ASSERT(!object.is(address));
ASSERT(!value.is(address));
@@ -398,7 +459,7 @@ void MacroAssembler::RecordWrite(Register object,
if (emit_debug_code()) {
Label ok;
- cmpq(value, Operand(address, 0));
+ cmpp(value, Operand(address, 0));
j(equal, &ok, Label::kNear);
int3();
bind(&ok);
@@ -417,12 +478,14 @@ void MacroAssembler::RecordWrite(Register object,
JumpIfSmi(value, &done);
}
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- zero,
- &done,
- Label::kNear);
+ if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ zero,
+ &done,
+ Label::kNear);
+ }
CheckPageFlag(object,
value, // Used as scratch.
@@ -431,7 +494,8 @@ void MacroAssembler::RecordWrite(Register object,
&done,
Label::kNear);
- RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+ RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
+ fp_mode);
CallStub(&stub);
bind(&done);
@@ -439,8 +503,8 @@ void MacroAssembler::RecordWrite(Register object,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(address, kZapValue, RelocInfo::NONE64);
- movq(value, kZapValue, RelocInfo::NONE64);
+ Move(address, kZapValue, Assembler::RelocInfoNone());
+ Move(value, kZapValue, Assembler::RelocInfoNone());
}
}
@@ -483,7 +547,7 @@ void MacroAssembler::CheckStackAlignment() {
if (frame_alignment > kPointerSize) {
ASSERT(IsPowerOf2(frame_alignment));
Label alignment_as_expected;
- testq(rsp, Immediate(frame_alignment_mask));
+ testp(rsp, Immediate(frame_alignment_mask));
j(zero, &alignment_as_expected, Label::kNear);
// Abort if stack is not aligned.
int3();
@@ -505,17 +569,8 @@ void MacroAssembler::NegativeZeroTest(Register result,
void MacroAssembler::Abort(BailoutReason reason) {
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- const char* msg = GetBailoutReason(reason);
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- // Note: p0 might not be a valid Smi _value_, but it has a valid Smi tag.
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -527,20 +582,17 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- push(rax);
- movq(kScratchRegister, reinterpret_cast<Smi*>(p0), RelocInfo::NONE64);
- push(kScratchRegister);
- movq(kScratchRegister, Smi::FromInt(static_cast<int>(p1 - p0)),
- RelocInfo::NONE64);
- push(kScratchRegister);
+ Move(kScratchRegister, Smi::FromInt(static_cast<int>(reason)),
+ Assembler::RelocInfoNone());
+ Push(kScratchRegister);
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
} else {
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
}
// Control will not return here.
int3();
@@ -549,12 +601,12 @@ void MacroAssembler::Abort(BailoutReason reason) {
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
- Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
void MacroAssembler::TailCallStub(CodeStub* stub) {
- Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
+ Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
}
@@ -569,30 +621,16 @@ bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
}
-void MacroAssembler::IllegalOperation(int num_arguments) {
- if (num_arguments > 0) {
- addq(rsp, Immediate(num_arguments * kPointerSize));
- }
- LoadRoot(rax, Heap::kUndefinedValueRootIndex);
-}
-
-
void MacroAssembler::IndexFromHash(Register hash, Register index) {
// The assert checks that the constants for the maximum number of digits
// for an array index cached in the hash field and the number of bits
// reserved for it does not conflict.
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. Even if we subsequently go to
- // the slow case, converting the key to a smi is always valid.
- // key: string key
- // hash: key's hash field, including its array index value.
- and_(hash, Immediate(String::kArrayIndexValueMask));
- shr(hash, Immediate(String::kHashShift));
- // Here we actually clobber the key which will be used if calling into
- // runtime later. However as the new key is the numeric value of a string key
- // there is no difference in using either key.
- Integer32ToSmi(index, hash);
+ if (!hash.is(index)) {
+ movl(index, hash);
+ }
+ DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
}
@@ -602,10 +640,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- return;
- }
+ CHECK(f->nargs < 0 || f->nargs == num_arguments);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
@@ -613,7 +648,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter.
Set(rax, num_arguments);
LoadAddress(rbx, ExternalReference(f, isolate()));
- CEntryStub ces(f->result_size, save_doubles);
+ CEntryStub ces(isolate(), f->result_size, save_doubles);
CallStub(&ces);
}
@@ -623,7 +658,7 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
Set(rax, num_arguments);
LoadAddress(rbx, ext);
- CEntryStub stub(1);
+ CEntryStub stub(isolate(), 1);
CallStub(&stub);
}
@@ -670,8 +705,8 @@ void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
void MacroAssembler::CallApiFunctionAndReturn(
- Address function_address,
- Address thunk_address,
+ Register function_address,
+ ExternalReference thunk_ref,
Register thunk_last_arg,
int stack_space,
Operand return_value_operand,
@@ -696,13 +731,14 @@ void MacroAssembler::CallApiFunctionAndReturn(
ExternalReference scheduled_exception_address =
ExternalReference::scheduled_exception_address(isolate());
+ ASSERT(rdx.is(function_address) || r8.is(function_address));
// Allocate HandleScope in callee-save registers.
Register prev_next_address_reg = r14;
Register prev_limit_reg = rbx;
Register base_reg = r15;
Move(base_reg, next_address);
- movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
- movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
+ movp(prev_next_address_reg, Operand(base_reg, kNextOffset));
+ movp(prev_limit_reg, Operand(base_reg, kLimitOffset));
addl(Operand(base_reg, kLevelOffset), Immediate(1));
if (FLAG_log_timer_events) {
@@ -717,22 +753,18 @@ void MacroAssembler::CallApiFunctionAndReturn(
Label profiler_disabled;
Label end_profiler_check;
- bool* is_profiling_flag =
- isolate()->cpu_profiler()->is_profiling_address();
- STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
- movq(rax, is_profiling_flag, RelocInfo::EXTERNAL_REFERENCE);
+ Move(rax, ExternalReference::is_profiling_address(isolate()));
cmpb(Operand(rax, 0), Immediate(0));
j(zero, &profiler_disabled);
// Third parameter is the address of the actual getter function.
- movq(thunk_last_arg, function_address, RelocInfo::EXTERNAL_REFERENCE);
- movq(rax, thunk_address, RelocInfo::EXTERNAL_REFERENCE);
+ Move(thunk_last_arg, function_address);
+ Move(rax, thunk_ref);
jmp(&end_profiler_check);
bind(&profiler_disabled);
// Call the api function!
- movq(rax, reinterpret_cast<Address>(function_address),
- RelocInfo::EXTERNAL_REFERENCE);
+ Move(rax, function_address);
bind(&end_profiler_check);
@@ -749,14 +781,14 @@ void MacroAssembler::CallApiFunctionAndReturn(
}
// Load the value from ReturnValue
- movq(rax, return_value_operand);
+ movp(rax, return_value_operand);
bind(&prologue);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
subl(Operand(base_reg, kLevelOffset), Immediate(1));
- movq(Operand(base_reg, kNextOffset), prev_next_address_reg);
- cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
+ movp(Operand(base_reg, kNextOffset), prev_next_address_reg);
+ cmpp(prev_limit_reg, Operand(base_reg, kLimitOffset));
j(not_equal, &delete_allocated_handles);
bind(&leave_exit_frame);
@@ -773,7 +805,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
Register map = rcx;
JumpIfSmi(return_value, &ok, Label::kNear);
- movq(map, FieldOperand(return_value, HeapObject::kMapOffset));
+ movp(map, FieldOperand(return_value, HeapObject::kMapOffset));
CmpInstanceType(map, FIRST_NONSTRING_TYPE);
j(below, &ok, Label::kNear);
@@ -803,7 +835,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
bool restore_context = context_restore_operand != NULL;
if (restore_context) {
- movq(rsi, *context_restore_operand);
+ movp(rsi, *context_restore_operand);
}
LeaveApiExitFrame(!restore_context);
ret(stack_space * kPointerSize);
@@ -811,19 +843,19 @@ void MacroAssembler::CallApiFunctionAndReturn(
bind(&promote_scheduled_exception);
{
FrameScope frame(this, StackFrame::INTERNAL);
- CallRuntime(Runtime::kPromoteScheduledException, 0);
+ CallRuntime(Runtime::kHiddenPromoteScheduledException, 0);
}
jmp(&exception_handled);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
- movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
- movq(prev_limit_reg, rax);
+ movp(Operand(base_reg, kLimitOffset), prev_limit_reg);
+ movp(prev_limit_reg, rax);
LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
LoadAddress(rax,
ExternalReference::delete_handle_scope_extensions(isolate()));
call(rax);
- movq(rax, prev_limit_reg);
+ movp(rax, prev_limit_reg);
jmp(&leave_exit_frame);
}
@@ -832,8 +864,8 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
int result_size) {
// Set the entry point and jump to the C entry runtime stub.
LoadAddress(rbx, ext);
- CEntryStub ces(result_size);
- jmp(ces.GetCode(isolate()), RelocInfo::CODE_TARGET);
+ CEntryStub ces(isolate(), result_size);
+ jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
}
@@ -848,16 +880,16 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
// parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
GetBuiltinEntry(rdx, id);
- InvokeCode(rdx, expected, expected, flag, call_wrapper, CALL_AS_METHOD);
+ InvokeCode(rdx, expected, expected, flag, call_wrapper);
}
void MacroAssembler::GetBuiltinFunction(Register target,
Builtins::JavaScript id) {
// Load the builtins object into target register.
- movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
- movq(target, FieldOperand(target,
+ movp(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ movp(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
+ movp(target, FieldOperand(target,
JSBuiltinsObject::OffsetOfFunctionWithId(id)));
}
@@ -866,7 +898,7 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
ASSERT(!target.is(rdi));
// Load the JavaScript builtin function from the builtins object.
GetBuiltinFunction(rdi, id);
- movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ movp(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
}
@@ -892,12 +924,12 @@ void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
for (int i = 0; i < kNumberOfSavedRegs; i++) {
Register reg = saved_regs[i];
if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
- push(reg);
+ pushq(reg);
}
}
// R12 to r15 are callee save on all platforms.
if (fp_mode == kSaveFPRegs) {
- subq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
+ subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movsd(Operand(rsp, i * kDoubleSize), reg);
@@ -915,12 +947,12 @@ void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
XMMRegister reg = XMMRegister::from_code(i);
movsd(reg, Operand(rsp, i * kDoubleSize));
}
- addq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
+ addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
}
for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
Register reg = saved_regs[i];
if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
- pop(reg);
+ popq(reg);
}
}
}
@@ -951,7 +983,7 @@ void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
} else if (r.IsInteger32()) {
movl(dst, src);
} else {
- movq(dst, src);
+ movp(dst, src);
}
}
@@ -965,7 +997,12 @@ void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
} else if (r.IsInteger32()) {
movl(dst, src);
} else {
- movq(dst, src);
+ if (r.IsHeapObject()) {
+ AssertNotSmi(src);
+ } else if (r.IsSmi()) {
+ AssertSmi(src);
+ }
+ movp(dst, src);
}
}
@@ -983,12 +1020,16 @@ void MacroAssembler::Set(Register dst, int64_t x) {
}
-void MacroAssembler::Set(const Operand& dst, int64_t x) {
- if (is_int32(x)) {
- movq(dst, Immediate(static_cast<int32_t>(x)));
+void MacroAssembler::Set(const Operand& dst, intptr_t x) {
+ if (kPointerSize == kInt64Size) {
+ if (is_int32(x)) {
+ movp(dst, Immediate(static_cast<int32_t>(x)));
+ } else {
+ Set(kScratchRegister, x);
+ movp(dst, kScratchRegister);
+ }
} else {
- Set(kScratchRegister, x);
- movq(dst, kScratchRegister);
+ movp(dst, Immediate(static_cast<int32_t>(x)));
}
}
@@ -1004,11 +1045,18 @@ bool MacroAssembler::IsUnsafeInt(const int32_t x) {
void MacroAssembler::SafeMove(Register dst, Smi* src) {
ASSERT(!dst.is(kScratchRegister));
- ASSERT(SmiValuesAre32Bits()); // JIT cookie can be converted to Smi.
if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
- Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
- Move(kScratchRegister, Smi::FromInt(jit_cookie()));
- xor_(dst, kScratchRegister);
+ if (SmiValuesAre32Bits()) {
+ // JIT cookie can be converted to Smi.
+ Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
+ Move(kScratchRegister, Smi::FromInt(jit_cookie()));
+ xorp(dst, kScratchRegister);
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
+ movp(dst, Immediate(value ^ jit_cookie()));
+ xorp(dst, Immediate(jit_cookie()));
+ }
} else {
Move(dst, src);
}
@@ -1016,11 +1064,18 @@ void MacroAssembler::SafeMove(Register dst, Smi* src) {
void MacroAssembler::SafePush(Smi* src) {
- ASSERT(SmiValuesAre32Bits()); // JIT cookie can be converted to Smi.
if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
- Push(Smi::FromInt(src->value() ^ jit_cookie()));
- Move(kScratchRegister, Smi::FromInt(jit_cookie()));
- xor_(Operand(rsp, 0), kScratchRegister);
+ if (SmiValuesAre32Bits()) {
+ // JIT cookie can be converted to Smi.
+ Push(Smi::FromInt(src->value() ^ jit_cookie()));
+ Move(kScratchRegister, Smi::FromInt(jit_cookie()));
+ xorp(Operand(rsp, 0), kScratchRegister);
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
+ Push(Immediate(value ^ jit_cookie()));
+ xorp(Operand(rsp, 0), Immediate(jit_cookie()));
+ }
} else {
Push(src);
}
@@ -1043,7 +1098,8 @@ Register MacroAssembler::GetSmiConstant(Smi* source) {
void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
if (emit_debug_code()) {
- movq(dst, Smi::FromInt(kSmiConstantRegisterValue), RelocInfo::NONE64);
+ Move(dst, Smi::FromInt(kSmiConstantRegisterValue),
+ Assembler::RelocInfoNone());
cmpq(dst, kSmiConstantRegister);
Assert(equal, kUninitializedKSmiConstantRegister);
}
@@ -1057,37 +1113,41 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
switch (uvalue) {
case 9:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
+ leap(dst,
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
break;
case 8:
xorl(dst, dst);
- lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
+ leap(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
break;
case 4:
xorl(dst, dst);
- lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
+ leap(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
break;
case 5:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
+ leap(dst,
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
break;
case 3:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
+ leap(dst,
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
break;
case 2:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
+ leap(dst,
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
break;
case 1:
- movq(dst, kSmiConstantRegister);
+ movp(dst, kSmiConstantRegister);
break;
case 0:
UNREACHABLE();
return;
default:
- movq(dst, source, RelocInfo::NONE64);
+ Move(dst, source, Assembler::RelocInfoNone());
return;
}
if (negative) {
- neg(dst);
+ negp(dst);
}
}
@@ -1097,7 +1157,7 @@ void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
if (!dst.is(src)) {
movl(dst, src);
}
- shl(dst, Immediate(kSmiShift));
+ shlp(dst, Immediate(kSmiShift));
}
@@ -1109,8 +1169,15 @@ void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
bind(&ok);
}
- ASSERT(kSmiShift % kBitsPerByte == 0);
- movl(Operand(dst, kSmiShift / kBitsPerByte), src);
+
+ if (SmiValuesAre32Bits()) {
+ ASSERT(kSmiShift % kBitsPerByte == 0);
+ movl(Operand(dst, kSmiShift / kBitsPerByte), src);
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ Integer32ToSmi(kScratchRegister, src);
+ movp(dst, kScratchRegister);
+ }
}
@@ -1122,48 +1189,70 @@ void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
} else {
leal(dst, Operand(src, constant));
}
- shl(dst, Immediate(kSmiShift));
+ shlp(dst, Immediate(kSmiShift));
}
void MacroAssembler::SmiToInteger32(Register dst, Register src) {
STATIC_ASSERT(kSmiTag == 0);
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
+ }
+
+ if (SmiValuesAre32Bits()) {
+ shrp(dst, Immediate(kSmiShift));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ sarl(dst, Immediate(kSmiShift));
}
- shr(dst, Immediate(kSmiShift));
}
void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
- movl(dst, Operand(src, kSmiShift / kBitsPerByte));
+ if (SmiValuesAre32Bits()) {
+ movl(dst, Operand(src, kSmiShift / kBitsPerByte));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ movl(dst, src);
+ sarl(dst, Immediate(kSmiShift));
+ }
}
void MacroAssembler::SmiToInteger64(Register dst, Register src) {
STATIC_ASSERT(kSmiTag == 0);
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
+ }
+ sarp(dst, Immediate(kSmiShift));
+ if (kPointerSize == kInt32Size) {
+ // Sign extend to 64-bit.
+ movsxlq(dst, dst);
}
- sar(dst, Immediate(kSmiShift));
}
void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
- movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
+ if (SmiValuesAre32Bits()) {
+ movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ movp(dst, src);
+ SmiToInteger64(dst, dst);
+ }
}
void MacroAssembler::SmiTest(Register src) {
AssertSmi(src);
- testq(src, src);
+ testp(src, src);
}
void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
AssertSmi(smi1);
AssertSmi(smi2);
- cmpq(smi1, smi2);
+ cmpp(smi1, smi2);
}
@@ -1176,10 +1265,10 @@ void MacroAssembler::SmiCompare(Register dst, Smi* src) {
void MacroAssembler::Cmp(Register dst, Smi* src) {
ASSERT(!dst.is(kScratchRegister));
if (src->value() == 0) {
- testq(dst, dst);
+ testp(dst, dst);
} else {
Register constant_reg = GetSmiConstant(src);
- cmpq(dst, constant_reg);
+ cmpp(dst, constant_reg);
}
}
@@ -1187,20 +1276,25 @@ void MacroAssembler::Cmp(Register dst, Smi* src) {
void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
AssertSmi(dst);
AssertSmi(src);
- cmpq(dst, src);
+ cmpp(dst, src);
}
void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
AssertSmi(dst);
AssertSmi(src);
- cmpq(dst, src);
+ cmpp(dst, src);
}
void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
AssertSmi(dst);
- cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
+ if (SmiValuesAre32Bits()) {
+ cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ cmpl(dst, Immediate(src));
+ }
}
@@ -1208,12 +1302,18 @@ void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
// The Operand cannot use the smi register.
Register smi_reg = GetSmiConstant(src);
ASSERT(!dst.AddressUsesRegister(smi_reg));
- cmpq(dst, smi_reg);
+ cmpp(dst, smi_reg);
}
void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
- cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
+ if (SmiValuesAre32Bits()) {
+ cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ SmiToInteger32(kScratchRegister, dst);
+ cmpl(kScratchRegister, src);
+ }
}
@@ -1227,12 +1327,12 @@ void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
return;
}
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
if (power < kSmiShift) {
- sar(dst, Immediate(kSmiShift - power));
+ sarp(dst, Immediate(kSmiShift - power));
} else if (power > kSmiShift) {
- shl(dst, Immediate(power - kSmiShift));
+ shlp(dst, Immediate(power - kSmiShift));
}
}
@@ -1242,7 +1342,7 @@ void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
int power) {
ASSERT((0 <= power) && (power < 32));
if (dst.is(src)) {
- shr(dst, Immediate(power + kSmiShift));
+ shrp(dst, Immediate(power + kSmiShift));
} else {
UNIMPLEMENTED(); // Not used.
}
@@ -1255,13 +1355,13 @@ void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
if (dst.is(src1) || dst.is(src2)) {
ASSERT(!src1.is(kScratchRegister));
ASSERT(!src2.is(kScratchRegister));
- movq(kScratchRegister, src1);
- or_(kScratchRegister, src2);
+ movp(kScratchRegister, src1);
+ orp(kScratchRegister, src2);
JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
- movq(dst, kScratchRegister);
+ movp(dst, kScratchRegister);
} else {
- movq(dst, src1);
- or_(dst, src2);
+ movp(dst, src1);
+ orp(dst, src2);
JumpIfNotSmi(dst, on_not_smis, near_jump);
}
}
@@ -1284,8 +1384,8 @@ Condition MacroAssembler::CheckSmi(const Operand& src) {
Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
STATIC_ASSERT(kSmiTag == 0);
// Test that both bits of the mask 0x8000000000000001 are zero.
- movq(kScratchRegister, src);
- rol(kScratchRegister, Immediate(1));
+ movp(kScratchRegister, src);
+ rolp(kScratchRegister, Immediate(1));
testb(kScratchRegister, Immediate(3));
return zero;
}
@@ -1296,8 +1396,15 @@ Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
return CheckSmi(first);
}
STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
- leal(kScratchRegister, Operand(first, second, times_1, 0));
- testb(kScratchRegister, Immediate(0x03));
+ if (SmiValuesAre32Bits()) {
+ leal(kScratchRegister, Operand(first, second, times_1, 0));
+ testb(kScratchRegister, Immediate(0x03));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ movl(kScratchRegister, first);
+ orl(kScratchRegister, second);
+ testb(kScratchRegister, Immediate(kSmiTagMask));
+ }
return zero;
}
@@ -1307,9 +1414,9 @@ Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
if (first.is(second)) {
return CheckNonNegativeSmi(first);
}
- movq(kScratchRegister, first);
- or_(kScratchRegister, second);
- rol(kScratchRegister, Immediate(1));
+ movp(kScratchRegister, first);
+ orp(kScratchRegister, second);
+ rolp(kScratchRegister, Immediate(1));
testl(kScratchRegister, Immediate(3));
return zero;
}
@@ -1337,22 +1444,34 @@ Condition MacroAssembler::CheckEitherSmi(Register first,
Condition MacroAssembler::CheckIsMinSmi(Register src) {
ASSERT(!src.is(kScratchRegister));
// If we overflow by subtracting one, it's the minimal smi value.
- cmpq(src, kSmiConstantRegister);
+ cmpp(src, kSmiConstantRegister);
return overflow;
}
Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
- // A 32-bit integer value can always be converted to a smi.
- return always;
+ if (SmiValuesAre32Bits()) {
+ // A 32-bit integer value can always be converted to a smi.
+ return always;
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ cmpl(src, Immediate(0xc0000000));
+ return positive;
+ }
}
Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
- // An unsigned 32-bit integer value is valid as long as the high bit
- // is not set.
- testl(src, src);
- return positive;
+ if (SmiValuesAre32Bits()) {
+ // An unsigned 32-bit integer value is valid as long as the high bit
+ // is not set.
+ testl(src, src);
+ return positive;
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ testl(src, Immediate(0xc0000000));
+ return zero;
+ }
}
@@ -1377,6 +1496,14 @@ void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
}
+void MacroAssembler::JumpIfValidSmiValue(Register src,
+ Label* on_valid,
+ Label::Distance near_jump) {
+ Condition is_valid = CheckInteger32ValidSmiValue(src);
+ j(is_valid, on_valid, near_jump);
+}
+
+
void MacroAssembler::JumpIfNotValidSmiValue(Register src,
Label* on_invalid,
Label::Distance near_jump) {
@@ -1385,6 +1512,14 @@ void MacroAssembler::JumpIfNotValidSmiValue(Register src,
}
+void MacroAssembler::JumpIfUIntValidSmiValue(Register src,
+ Label* on_valid,
+ Label::Distance near_jump) {
+ Condition is_valid = CheckUInteger32ValidSmiValue(src);
+ j(is_valid, on_valid, near_jump);
+}
+
+
void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
Label* on_invalid,
Label::Distance near_jump) {
@@ -1447,46 +1582,46 @@ void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
if (constant->value() == 0) {
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
return;
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
switch (constant->value()) {
case 1:
- addq(dst, kSmiConstantRegister);
+ addp(dst, kSmiConstantRegister);
return;
case 2:
- lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
return;
case 4:
- lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
return;
case 8:
- lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
return;
default:
Register constant_reg = GetSmiConstant(constant);
- addq(dst, constant_reg);
+ addp(dst, constant_reg);
return;
}
} else {
switch (constant->value()) {
case 1:
- lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_1, 0));
return;
case 2:
- lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
return;
case 4:
- lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
return;
case 8:
- lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
return;
default:
LoadSmiConstant(dst, constant);
- addq(dst, src);
+ addp(dst, src);
return;
}
}
@@ -1495,7 +1630,13 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
if (constant->value() != 0) {
- addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value()));
+ if (SmiValuesAre32Bits()) {
+ addl(Operand(dst, kSmiShift / kBitsPerByte),
+ Immediate(constant->value()));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ addp(dst, Immediate(constant));
+ }
}
}
@@ -1508,21 +1649,21 @@ void MacroAssembler::SmiAddConstant(Register dst,
Label::Distance near_jump) {
if (constant->value() == 0) {
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
LoadSmiConstant(kScratchRegister, constant);
- addq(dst, kScratchRegister);
+ addp(dst, kScratchRegister);
if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
j(no_overflow, bailout_label, near_jump);
ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
- subq(dst, kScratchRegister);
+ subp(dst, kScratchRegister);
} else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
Label done;
j(no_overflow, &done, Label::kNear);
- subq(dst, kScratchRegister);
+ subp(dst, kScratchRegister);
jmp(bailout_label, near_jump);
bind(&done);
} else {
@@ -1536,7 +1677,7 @@ void MacroAssembler::SmiAddConstant(Register dst,
ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
LoadSmiConstant(dst, constant);
- addq(dst, src);
+ addp(dst, src);
j(overflow, bailout_label, near_jump);
}
}
@@ -1545,22 +1686,22 @@ void MacroAssembler::SmiAddConstant(Register dst,
void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
if (constant->value() == 0) {
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- subq(dst, constant_reg);
+ subp(dst, constant_reg);
} else {
if (constant->value() == Smi::kMinValue) {
LoadSmiConstant(dst, constant);
// Adding and subtracting the min-value gives the same result, it only
// differs on the overflow bit, which we don't check here.
- addq(dst, src);
+ addp(dst, src);
} else {
// Subtract by adding the negation.
LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
- addq(dst, src);
+ addp(dst, src);
}
}
}
@@ -1574,21 +1715,21 @@ void MacroAssembler::SmiSubConstant(Register dst,
Label::Distance near_jump) {
if (constant->value() == 0) {
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
LoadSmiConstant(kScratchRegister, constant);
- subq(dst, kScratchRegister);
+ subp(dst, kScratchRegister);
if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
j(no_overflow, bailout_label, near_jump);
ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
- addq(dst, kScratchRegister);
+ addp(dst, kScratchRegister);
} else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
Label done;
j(no_overflow, &done, Label::kNear);
- addq(dst, kScratchRegister);
+ addp(dst, kScratchRegister);
jmp(bailout_label, near_jump);
bind(&done);
} else {
@@ -1603,14 +1744,14 @@ void MacroAssembler::SmiSubConstant(Register dst,
ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
if (constant->value() == Smi::kMinValue) {
ASSERT(!dst.is(kScratchRegister));
- movq(dst, src);
+ movp(dst, src);
LoadSmiConstant(kScratchRegister, constant);
- subq(dst, kScratchRegister);
+ subp(dst, kScratchRegister);
j(overflow, bailout_label, near_jump);
} else {
// Subtract by adding the negation.
LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
- addq(dst, src);
+ addp(dst, src);
j(overflow, bailout_label, near_jump);
}
}
@@ -1623,16 +1764,16 @@ void MacroAssembler::SmiNeg(Register dst,
Label::Distance near_jump) {
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
- movq(kScratchRegister, src);
- neg(dst); // Low 32 bits are retained as zero by negation.
+ movp(kScratchRegister, src);
+ negp(dst); // Low 32 bits are retained as zero by negation.
// Test if result is zero or Smi::kMinValue.
- cmpq(dst, kScratchRegister);
+ cmpp(dst, kScratchRegister);
j(not_equal, on_smi_result, near_jump);
- movq(src, kScratchRegister);
+ movp(src, kScratchRegister);
} else {
- movq(dst, src);
- neg(dst);
- cmpq(dst, src);
+ movp(dst, src);
+ negp(dst);
+ cmpp(dst, src);
// If the result is zero or Smi::kMinValue, negation failed to create a smi.
j(not_equal, on_smi_result, near_jump);
}
@@ -1648,15 +1789,15 @@ static void SmiAddHelper(MacroAssembler* masm,
Label::Distance near_jump) {
if (dst.is(src1)) {
Label done;
- masm->addq(dst, src2);
+ masm->addp(dst, src2);
masm->j(no_overflow, &done, Label::kNear);
// Restore src1.
- masm->subq(dst, src2);
+ masm->subp(dst, src2);
masm->jmp(on_not_smi_result, near_jump);
masm->bind(&done);
} else {
- masm->movq(dst, src1);
- masm->addq(dst, src2);
+ masm->movp(dst, src1);
+ masm->addp(dst, src2);
masm->j(overflow, on_not_smi_result, near_jump);
}
}
@@ -1691,13 +1832,13 @@ void MacroAssembler::SmiAdd(Register dst,
// overflowing is impossible.
if (!dst.is(src1)) {
if (emit_debug_code()) {
- movq(kScratchRegister, src1);
- addq(kScratchRegister, src2);
+ movp(kScratchRegister, src1);
+ addp(kScratchRegister, src2);
Check(no_overflow, kSmiAdditionOverflow);
}
- lea(dst, Operand(src1, src2, times_1, 0));
+ leap(dst, Operand(src1, src2, times_1, 0));
} else {
- addq(dst, src2);
+ addp(dst, src2);
Assert(no_overflow, kSmiAdditionOverflow);
}
}
@@ -1712,15 +1853,15 @@ static void SmiSubHelper(MacroAssembler* masm,
Label::Distance near_jump) {
if (dst.is(src1)) {
Label done;
- masm->subq(dst, src2);
+ masm->subp(dst, src2);
masm->j(no_overflow, &done, Label::kNear);
// Restore src1.
- masm->addq(dst, src2);
+ masm->addp(dst, src2);
masm->jmp(on_not_smi_result, near_jump);
masm->bind(&done);
} else {
- masm->movq(dst, src1);
- masm->subq(dst, src2);
+ masm->movp(dst, src1);
+ masm->subp(dst, src2);
masm->j(overflow, on_not_smi_result, near_jump);
}
}
@@ -1756,9 +1897,9 @@ static void SmiSubNoOverflowHelper(MacroAssembler* masm,
// No overflow checking. Use only when it's known that
// overflowing is impossible (e.g., subtracting two positive smis).
if (!dst.is(src1)) {
- masm->movq(dst, src1);
+ masm->movp(dst, src1);
}
- masm->subq(dst, src2);
+ masm->subp(dst, src2);
masm->Assert(no_overflow, kSmiSubtractionOverflow);
}
@@ -1788,24 +1929,24 @@ void MacroAssembler::SmiMul(Register dst,
if (dst.is(src1)) {
Label failure, zero_correct_result;
- movq(kScratchRegister, src1); // Create backup for later testing.
+ movp(kScratchRegister, src1); // Create backup for later testing.
SmiToInteger64(dst, src1);
- imul(dst, src2);
+ imulp(dst, src2);
j(overflow, &failure, Label::kNear);
// Check for negative zero result. If product is zero, and one
// argument is negative, go to slow case.
Label correct_result;
- testq(dst, dst);
+ testp(dst, dst);
j(not_zero, &correct_result, Label::kNear);
- movq(dst, kScratchRegister);
- xor_(dst, src2);
+ movp(dst, kScratchRegister);
+ xorp(dst, src2);
// Result was positive zero.
j(positive, &zero_correct_result, Label::kNear);
bind(&failure); // Reused failure exit, restores src1.
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
jmp(on_not_smi_result, near_jump);
bind(&zero_correct_result);
@@ -1814,17 +1955,17 @@ void MacroAssembler::SmiMul(Register dst,
bind(&correct_result);
} else {
SmiToInteger64(dst, src1);
- imul(dst, src2);
+ imulp(dst, src2);
j(overflow, on_not_smi_result, near_jump);
// Check for negative zero result. If product is zero, and one
// argument is negative, go to slow case.
Label correct_result;
- testq(dst, dst);
+ testp(dst, dst);
j(not_zero, &correct_result, Label::kNear);
// One of src1 and src2 is zero, the check whether the other is
// negative.
- movq(kScratchRegister, src1);
- xor_(kScratchRegister, src2);
+ movp(kScratchRegister, src1);
+ xorp(kScratchRegister, src2);
j(negative, on_not_smi_result, near_jump);
bind(&correct_result);
}
@@ -1844,11 +1985,11 @@ void MacroAssembler::SmiDiv(Register dst,
ASSERT(!src1.is(rdx));
// Check for 0 divisor (result is +/-Infinity).
- testq(src2, src2);
+ testp(src2, src2);
j(zero, on_not_smi_result, near_jump);
if (src1.is(rax)) {
- movq(kScratchRegister, src1);
+ movp(kScratchRegister, src1);
}
SmiToInteger32(rax, src1);
// We need to rule out dividing Smi::kMinValue by -1, since that would
@@ -1859,12 +2000,12 @@ void MacroAssembler::SmiDiv(Register dst,
// We overshoot a little and go to slow case if we divide min-value
// by any negative value, not just -1.
Label safe_div;
- testl(rax, Immediate(0x7fffffff));
+ testl(rax, Immediate(~Smi::kMinValue));
j(not_zero, &safe_div, Label::kNear);
- testq(src2, src2);
+ testp(src2, src2);
if (src1.is(rax)) {
j(positive, &safe_div, Label::kNear);
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
jmp(on_not_smi_result, near_jump);
} else {
j(negative, on_not_smi_result, near_jump);
@@ -1881,14 +2022,14 @@ void MacroAssembler::SmiDiv(Register dst,
if (src1.is(rax)) {
Label smi_result;
j(zero, &smi_result, Label::kNear);
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
jmp(on_not_smi_result, near_jump);
bind(&smi_result);
} else {
j(not_zero, on_not_smi_result, near_jump);
}
if (!dst.is(src1) && src1.is(rax)) {
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
}
Integer32ToSmi(dst, rax);
}
@@ -1907,11 +2048,11 @@ void MacroAssembler::SmiMod(Register dst,
ASSERT(!src1.is(rdx));
ASSERT(!src1.is(src2));
- testq(src2, src2);
+ testp(src2, src2);
j(zero, on_not_smi_result, near_jump);
if (src1.is(rax)) {
- movq(kScratchRegister, src1);
+ movp(kScratchRegister, src1);
}
SmiToInteger32(rax, src1);
SmiToInteger32(src2, src2);
@@ -1925,7 +2066,7 @@ void MacroAssembler::SmiMod(Register dst,
// Retag inputs and go slow case.
Integer32ToSmi(src2, src2);
if (src1.is(rax)) {
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
}
jmp(on_not_smi_result, near_jump);
bind(&safe_div);
@@ -1936,14 +2077,14 @@ void MacroAssembler::SmiMod(Register dst,
// Restore smi tags on inputs.
Integer32ToSmi(src2, src2);
if (src1.is(rax)) {
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
}
// Check for a negative zero result. If the result is zero, and the
// dividend is negative, go slow to return a floating point negative zero.
Label smi_result;
testl(rdx, rdx);
j(not_zero, &smi_result, Label::kNear);
- testq(src1, src1);
+ testp(src1, src1);
j(negative, on_not_smi_result, near_jump);
bind(&smi_result);
Integer32ToSmi(dst, rdx);
@@ -1953,23 +2094,29 @@ void MacroAssembler::SmiMod(Register dst,
void MacroAssembler::SmiNot(Register dst, Register src) {
ASSERT(!dst.is(kScratchRegister));
ASSERT(!src.is(kScratchRegister));
- // Set tag and padding bits before negating, so that they are zero afterwards.
- movl(kScratchRegister, Immediate(~0));
+ if (SmiValuesAre32Bits()) {
+ // Set tag and padding bits before negating, so that they are zero
+ // afterwards.
+ movl(kScratchRegister, Immediate(~0));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ movl(kScratchRegister, Immediate(1));
+ }
if (dst.is(src)) {
- xor_(dst, kScratchRegister);
+ xorp(dst, kScratchRegister);
} else {
- lea(dst, Operand(src, kScratchRegister, times_1, 0));
+ leap(dst, Operand(src, kScratchRegister, times_1, 0));
}
- not_(dst);
+ notp(dst);
}
void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
ASSERT(!dst.is(src2));
if (!dst.is(src1)) {
- movq(dst, src1);
+ movp(dst, src1);
}
- and_(dst, src2);
+ andp(dst, src2);
}
@@ -1979,10 +2126,10 @@ void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- and_(dst, constant_reg);
+ andp(dst, constant_reg);
} else {
LoadSmiConstant(dst, constant);
- and_(dst, src);
+ andp(dst, src);
}
}
@@ -1990,9 +2137,9 @@ void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
ASSERT(!src1.is(src2));
- movq(dst, src1);
+ movp(dst, src1);
}
- or_(dst, src2);
+ orp(dst, src2);
}
@@ -2000,10 +2147,10 @@ void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- or_(dst, constant_reg);
+ orp(dst, constant_reg);
} else {
LoadSmiConstant(dst, constant);
- or_(dst, src);
+ orp(dst, src);
}
}
@@ -2011,9 +2158,9 @@ void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
ASSERT(!src1.is(src2));
- movq(dst, src1);
+ movp(dst, src1);
}
- xor_(dst, src2);
+ xorp(dst, src2);
}
@@ -2021,10 +2168,10 @@ void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- xor_(dst, constant_reg);
+ xorp(dst, constant_reg);
} else {
LoadSmiConstant(dst, constant);
- xor_(dst, src);
+ xorp(dst, src);
}
}
@@ -2035,8 +2182,8 @@ void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
ASSERT(is_uint5(shift_value));
if (shift_value > 0) {
if (dst.is(src)) {
- sar(dst, Immediate(shift_value + kSmiShift));
- shl(dst, Immediate(kSmiShift));
+ sarp(dst, Immediate(shift_value + kSmiShift));
+ shlp(dst, Immediate(kSmiShift));
} else {
UNIMPLEMENTED(); // Not used.
}
@@ -2046,12 +2193,27 @@ void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
void MacroAssembler::SmiShiftLeftConstant(Register dst,
Register src,
- int shift_value) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
- if (shift_value > 0) {
- shl(dst, Immediate(shift_value));
+ int shift_value,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ if (SmiValuesAre32Bits()) {
+ if (!dst.is(src)) {
+ movp(dst, src);
+ }
+ if (shift_value > 0) {
+ // Shift amount specified by lower 5 bits, not six as the shl opcode.
+ shlq(dst, Immediate(shift_value & 0x1f));
+ }
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ if (dst.is(src)) {
+ UNIMPLEMENTED(); // Not used.
+ } else {
+ SmiToInteger32(dst, src);
+ shll(dst, Immediate(shift_value));
+ JumpIfNotValidSmiValue(dst, on_not_smi_result, near_jump);
+ Integer32ToSmi(dst, dst);
+ }
}
}
@@ -2063,29 +2225,73 @@ void MacroAssembler::SmiShiftLogicalRightConstant(
if (dst.is(src)) {
UNIMPLEMENTED(); // Not used.
} else {
- movq(dst, src);
if (shift_value == 0) {
- testq(dst, dst);
+ testp(src, src);
j(negative, on_not_smi_result, near_jump);
}
- shr(dst, Immediate(shift_value + kSmiShift));
- shl(dst, Immediate(kSmiShift));
+ if (SmiValuesAre32Bits()) {
+ movp(dst, src);
+ shrp(dst, Immediate(shift_value + kSmiShift));
+ shlp(dst, Immediate(kSmiShift));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ SmiToInteger32(dst, src);
+ shrp(dst, Immediate(shift_value));
+ JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump);
+ Integer32ToSmi(dst, dst);
+ }
}
}
void MacroAssembler::SmiShiftLeft(Register dst,
Register src1,
- Register src2) {
- ASSERT(!dst.is(rcx));
- // Untag shift amount.
- if (!dst.is(src1)) {
- movq(dst, src1);
+ Register src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ if (SmiValuesAre32Bits()) {
+ ASSERT(!dst.is(rcx));
+ if (!dst.is(src1)) {
+ movp(dst, src1);
+ }
+ // Untag shift amount.
+ SmiToInteger32(rcx, src2);
+ // Shift amount specified by lower 5 bits, not six as the shl opcode.
+ andp(rcx, Immediate(0x1f));
+ shlq_cl(dst);
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!dst.is(src2));
+ ASSERT(!dst.is(rcx));
+
+ if (src1.is(rcx) || src2.is(rcx)) {
+ movq(kScratchRegister, rcx);
+ }
+ if (dst.is(src1)) {
+ UNIMPLEMENTED(); // Not used.
+ } else {
+ Label valid_result;
+ SmiToInteger32(dst, src1);
+ SmiToInteger32(rcx, src2);
+ shll_cl(dst);
+ JumpIfValidSmiValue(dst, &valid_result, Label::kNear);
+ // As src1 or src2 could not be dst, we do not need to restore them for
+ // clobbering dst.
+ if (src1.is(rcx) || src2.is(rcx)) {
+ if (src1.is(rcx)) {
+ movq(src1, kScratchRegister);
+ } else {
+ movq(src2, kScratchRegister);
+ }
+ }
+ jmp(on_not_smi_result, near_jump);
+ bind(&valid_result);
+ Integer32ToSmi(dst, dst);
+ }
}
- SmiToInteger32(rcx, src2);
- // Shift amount specified by lower 5 bits, not six as the shl opcode.
- and_(rcx, Immediate(0x1f));
- shl_cl(dst);
}
@@ -2097,33 +2303,31 @@ void MacroAssembler::SmiShiftLogicalRight(Register dst,
ASSERT(!dst.is(kScratchRegister));
ASSERT(!src1.is(kScratchRegister));
ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!dst.is(src2));
ASSERT(!dst.is(rcx));
- // dst and src1 can be the same, because the one case that bails out
- // is a shift by 0, which leaves dst, and therefore src1, unchanged.
if (src1.is(rcx) || src2.is(rcx)) {
movq(kScratchRegister, rcx);
}
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- SmiToInteger32(rcx, src2);
- orl(rcx, Immediate(kSmiShift));
- shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
- shl(dst, Immediate(kSmiShift));
- testq(dst, dst);
- if (src1.is(rcx) || src2.is(rcx)) {
- Label positive_result;
- j(positive, &positive_result, Label::kNear);
- if (src1.is(rcx)) {
- movq(src1, kScratchRegister);
- } else {
- movq(src2, kScratchRegister);
- }
- jmp(on_not_smi_result, near_jump);
- bind(&positive_result);
+ if (dst.is(src1)) {
+ UNIMPLEMENTED(); // Not used.
} else {
- // src2 was zero and src1 negative.
- j(negative, on_not_smi_result, near_jump);
+ Label valid_result;
+ SmiToInteger32(dst, src1);
+ SmiToInteger32(rcx, src2);
+ shrl_cl(dst);
+ JumpIfUIntValidSmiValue(dst, &valid_result, Label::kNear);
+ // As src1 or src2 could not be dst, we do not need to restore them for
+ // clobbering dst.
+ if (src1.is(rcx) || src2.is(rcx)) {
+ if (src1.is(rcx)) {
+ movq(src1, kScratchRegister);
+ } else {
+ movq(src2, kScratchRegister);
+ }
+ }
+ jmp(on_not_smi_result, near_jump);
+ bind(&valid_result);
+ Integer32ToSmi(dst, dst);
}
}
@@ -2135,23 +2339,14 @@ void MacroAssembler::SmiShiftArithmeticRight(Register dst,
ASSERT(!src1.is(kScratchRegister));
ASSERT(!src2.is(kScratchRegister));
ASSERT(!dst.is(rcx));
- if (src1.is(rcx)) {
- movq(kScratchRegister, src1);
- } else if (src2.is(rcx)) {
- movq(kScratchRegister, src2);
- }
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
+
SmiToInteger32(rcx, src2);
- orl(rcx, Immediate(kSmiShift));
- sar_cl(dst); // Shift 32 + original rcx & 0x1f.
- shl(dst, Immediate(kSmiShift));
- if (src1.is(rcx)) {
- movq(src1, kScratchRegister);
- } else if (src2.is(rcx)) {
- movq(src2, kScratchRegister);
+ if (!dst.is(src1)) {
+ movp(dst, src1);
}
+ SmiToInteger32(dst, dst);
+ sarl_cl(dst);
+ Integer32ToSmi(dst, dst);
}
@@ -2173,7 +2368,7 @@ void MacroAssembler::SelectNonSmi(Register dst,
STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0));
movl(kScratchRegister, Immediate(kSmiTagMask));
- and_(kScratchRegister, src1);
+ andp(kScratchRegister, src1);
testl(kScratchRegister, src2);
// If non-zero then both are smis.
j(not_zero, on_not_smis, near_jump);
@@ -2181,13 +2376,13 @@ void MacroAssembler::SelectNonSmi(Register dst,
// Exactly one operand is a smi.
ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
// kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
- subq(kScratchRegister, Immediate(1));
+ subp(kScratchRegister, Immediate(1));
// If src1 is a smi, then scratch register all 1s, else it is all 0s.
- movq(dst, src1);
- xor_(dst, src2);
- and_(dst, kScratchRegister);
+ movp(dst, src1);
+ xorp(dst, src2);
+ andp(dst, kScratchRegister);
// If src1 is a smi, dst holds src1 ^ src2, else it is zero.
- xor_(dst, src1);
+ xorp(dst, src1);
// If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
}
@@ -2195,81 +2390,125 @@ void MacroAssembler::SelectNonSmi(Register dst,
SmiIndex MacroAssembler::SmiToIndex(Register dst,
Register src,
int shift) {
- ASSERT(is_uint6(shift));
- // There is a possible optimization if shift is in the range 60-63, but that
- // will (and must) never happen.
- if (!dst.is(src)) {
- movq(dst, src);
- }
- if (shift < kSmiShift) {
- sar(dst, Immediate(kSmiShift - shift));
+ if (SmiValuesAre32Bits()) {
+ ASSERT(is_uint6(shift));
+ // There is a possible optimization if shift is in the range 60-63, but that
+ // will (and must) never happen.
+ if (!dst.is(src)) {
+ movp(dst, src);
+ }
+ if (shift < kSmiShift) {
+ sarp(dst, Immediate(kSmiShift - shift));
+ } else {
+ shlp(dst, Immediate(shift - kSmiShift));
+ }
+ return SmiIndex(dst, times_1);
} else {
- shl(dst, Immediate(shift - kSmiShift));
+ ASSERT(SmiValuesAre31Bits());
+ ASSERT(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
+ if (!dst.is(src)) {
+ movp(dst, src);
+ }
+ // We have to sign extend the index register to 64-bit as the SMI might
+ // be negative.
+ movsxlq(dst, dst);
+ if (shift == times_1) {
+ sarq(dst, Immediate(kSmiShift));
+ return SmiIndex(dst, times_1);
+ }
+ return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
}
- return SmiIndex(dst, times_1);
}
+
SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
Register src,
int shift) {
- // Register src holds a positive smi.
- ASSERT(is_uint6(shift));
- if (!dst.is(src)) {
- movq(dst, src);
- }
- neg(dst);
- if (shift < kSmiShift) {
- sar(dst, Immediate(kSmiShift - shift));
+ if (SmiValuesAre32Bits()) {
+ // Register src holds a positive smi.
+ ASSERT(is_uint6(shift));
+ if (!dst.is(src)) {
+ movp(dst, src);
+ }
+ negp(dst);
+ if (shift < kSmiShift) {
+ sarp(dst, Immediate(kSmiShift - shift));
+ } else {
+ shlp(dst, Immediate(shift - kSmiShift));
+ }
+ return SmiIndex(dst, times_1);
} else {
- shl(dst, Immediate(shift - kSmiShift));
+ ASSERT(SmiValuesAre31Bits());
+ ASSERT(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
+ if (!dst.is(src)) {
+ movp(dst, src);
+ }
+ negq(dst);
+ if (shift == times_1) {
+ sarq(dst, Immediate(kSmiShift));
+ return SmiIndex(dst, times_1);
+ }
+ return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
}
- return SmiIndex(dst, times_1);
}
void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
- ASSERT_EQ(0, kSmiShift % kBitsPerByte);
- addl(dst, Operand(src, kSmiShift / kBitsPerByte));
+ if (SmiValuesAre32Bits()) {
+ ASSERT_EQ(0, kSmiShift % kBitsPerByte);
+ addl(dst, Operand(src, kSmiShift / kBitsPerByte));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ SmiToInteger32(kScratchRegister, src);
+ addl(dst, kScratchRegister);
+ }
}
void MacroAssembler::Push(Smi* source) {
intptr_t smi = reinterpret_cast<intptr_t>(source);
if (is_int32(smi)) {
- push(Immediate(static_cast<int32_t>(smi)));
+ Push(Immediate(static_cast<int32_t>(smi)));
} else {
Register constant = GetSmiConstant(source);
- push(constant);
+ Push(constant);
}
}
-void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) {
- movq(scratch, src);
+void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
+ ASSERT(!src.is(scratch));
+ movp(scratch, src);
// High bits.
- shr(src, Immediate(64 - kSmiShift));
- shl(src, Immediate(kSmiShift));
- push(src);
+ shrp(src, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
+ shlp(src, Immediate(kSmiShift));
+ Push(src);
// Low bits.
- shl(scratch, Immediate(kSmiShift));
- push(scratch);
+ shlp(scratch, Immediate(kSmiShift));
+ Push(scratch);
}
-void MacroAssembler::PopInt64AsTwoSmis(Register dst, Register scratch) {
- pop(scratch);
+void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
+ ASSERT(!dst.is(scratch));
+ Pop(scratch);
// Low bits.
- shr(scratch, Immediate(kSmiShift));
- pop(dst);
- shr(dst, Immediate(kSmiShift));
+ shrp(scratch, Immediate(kSmiShift));
+ Pop(dst);
+ shrp(dst, Immediate(kSmiShift));
// High bits.
- shl(dst, Immediate(64 - kSmiShift));
- or_(dst, scratch);
+ shlp(dst, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
+ orp(dst, scratch);
}
void MacroAssembler::Test(const Operand& src, Smi* source) {
- testl(Operand(src, kIntSize), Immediate(source->value()));
+ if (SmiValuesAre32Bits()) {
+ testl(Operand(src, kIntSize), Immediate(source->value()));
+ } else {
+ ASSERT(SmiValuesAre31Bits());
+ testl(src, Immediate(source));
+ }
}
@@ -2294,7 +2533,7 @@ void MacroAssembler::LookupNumberStringCache(Register object,
SmiToInteger32(
mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
shrl(mask, Immediate(1));
- subq(mask, Immediate(1)); // Make mask.
+ subp(mask, Immediate(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
// number string cache for smis is just the smi value, and the hash for
@@ -2310,17 +2549,17 @@ void MacroAssembler::LookupNumberStringCache(Register object,
STATIC_ASSERT(8 == kDoubleSize);
movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- and_(scratch, mask);
+ xorp(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ andp(scratch, mask);
// Each entry in string cache consists of two pointer sized fields,
// but times_twice_pointer_size (multiplication by 16) scale factor
// is not supported by addrmode on x64 platform.
// So we have to premultiply entry index before lookup.
- shl(scratch, Immediate(kPointerSizeLog2 + 1));
+ shlp(scratch, Immediate(kPointerSizeLog2 + 1));
Register index = scratch;
Register probe = mask;
- movq(probe,
+ movp(probe,
FieldOperand(number_string_cache,
index,
times_1,
@@ -2334,15 +2573,15 @@ void MacroAssembler::LookupNumberStringCache(Register object,
bind(&is_smi);
SmiToInteger32(scratch, object);
- and_(scratch, mask);
+ andp(scratch, mask);
// Each entry in string cache consists of two pointer sized fields,
// but times_twice_pointer_size (multiplication by 16) scale factor
// is not supported by addrmode on x64 platform.
// So we have to premultiply entry index before lookup.
- shl(scratch, Immediate(kPointerSizeLog2 + 1));
+ shlp(scratch, Immediate(kPointerSizeLog2 + 1));
// Check if the entry is the smi we are looking for.
- cmpq(object,
+ cmpp(object,
FieldOperand(number_string_cache,
index,
times_1,
@@ -2351,7 +2590,7 @@ void MacroAssembler::LookupNumberStringCache(Register object,
// Get the result from the cache.
bind(&load_result_from_cache);
- movq(result,
+ movp(result,
FieldOperand(number_string_cache,
index,
times_1,
@@ -2383,8 +2622,8 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
j(either_smi, on_fail, near_jump);
// Load instance type for both strings.
- movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
- movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
+ movp(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
+ movp(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
@@ -2399,7 +2638,7 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
andl(scratch2, Immediate(kFlatAsciiStringMask));
// Interleave the bits to check both scratch1 and scratch2 in one test.
ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
cmpl(scratch1,
Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
j(not_equal, on_fail, near_jump);
@@ -2432,8 +2671,8 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
Label* on_fail,
Label::Distance near_jump) {
// Load instance type for both strings.
- movq(scratch1, first_object_instance_type);
- movq(scratch2, second_object_instance_type);
+ movp(scratch1, first_object_instance_type);
+ movp(scratch2, second_object_instance_type);
// Check that both are flat ASCII strings.
ASSERT(kNotStringTag != 0);
@@ -2446,7 +2685,7 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
andl(scratch2, Immediate(kFlatAsciiStringMask));
// Interleave the bits to check both scratch1 and scratch2 in one test.
ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
cmpl(scratch1,
Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
j(not_equal, on_fail, near_jump);
@@ -2486,7 +2725,7 @@ void MacroAssembler::JumpIfNotUniqueName(Register reg,
void MacroAssembler::Move(Register dst, Register src) {
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
}
@@ -2507,7 +2746,7 @@ void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
Move(dst, Smi::cast(*source));
} else {
MoveHeapObject(kScratchRegister, source);
- movq(dst, kScratchRegister);
+ movp(dst, kScratchRegister);
}
}
@@ -2518,7 +2757,7 @@ void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
Cmp(dst, Smi::cast(*source));
} else {
MoveHeapObject(kScratchRegister, source);
- cmpq(dst, kScratchRegister);
+ cmpp(dst, kScratchRegister);
}
}
@@ -2529,7 +2768,7 @@ void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
Cmp(dst, Smi::cast(*source));
} else {
MoveHeapObject(kScratchRegister, source);
- cmpq(dst, kScratchRegister);
+ cmpp(dst, kScratchRegister);
}
}
@@ -2540,7 +2779,7 @@ void MacroAssembler::Push(Handle<Object> source) {
Push(Smi::cast(*source));
} else {
MoveHeapObject(kScratchRegister, source);
- push(kScratchRegister);
+ Push(kScratchRegister);
}
}
@@ -2551,10 +2790,10 @@ void MacroAssembler::MoveHeapObject(Register result,
ASSERT(object->IsHeapObject());
if (isolate()->heap()->InNewSpace(*object)) {
Handle<Cell> cell = isolate()->factory()->NewCell(object);
- movq(result, cell, RelocInfo::CELL);
- movq(result, Operand(result, 0));
+ Move(result, cell, RelocInfo::CELL);
+ movp(result, Operand(result, 0));
} else {
- movq(result, object, RelocInfo::EMBEDDED_OBJECT);
+ Move(result, object, RelocInfo::EMBEDDED_OBJECT);
}
}
@@ -2564,23 +2803,155 @@ void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
AllowDeferredHandleDereference embedding_raw_address;
load_rax(cell.location(), RelocInfo::CELL);
} else {
- movq(dst, cell, RelocInfo::CELL);
- movq(dst, Operand(dst, 0));
+ Move(dst, cell, RelocInfo::CELL);
+ movp(dst, Operand(dst, 0));
}
}
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
- addq(rsp, Immediate(stack_elements * kPointerSize));
+ addp(rsp, Immediate(stack_elements * kPointerSize));
+ }
+}
+
+
+void MacroAssembler::DropUnderReturnAddress(int stack_elements,
+ Register scratch) {
+ ASSERT(stack_elements > 0);
+ if (kPointerSize == kInt64Size && stack_elements == 1) {
+ popq(MemOperand(rsp, 0));
+ return;
+ }
+
+ PopReturnAddressTo(scratch);
+ Drop(stack_elements);
+ PushReturnAddressFrom(scratch);
+}
+
+
+void MacroAssembler::Push(Register src) {
+ if (kPointerSize == kInt64Size) {
+ pushq(src);
+ } else {
+ // x32 uses 64-bit push for rbp in the prologue.
+ ASSERT(src.code() != rbp.code());
+ leal(rsp, Operand(rsp, -4));
+ movp(Operand(rsp, 0), src);
+ }
+}
+
+
+void MacroAssembler::Push(const Operand& src) {
+ if (kPointerSize == kInt64Size) {
+ pushq(src);
+ } else {
+ movp(kScratchRegister, src);
+ leal(rsp, Operand(rsp, -4));
+ movp(Operand(rsp, 0), kScratchRegister);
+ }
+}
+
+
+void MacroAssembler::PushQuad(const Operand& src) {
+ if (kPointerSize == kInt64Size) {
+ pushq(src);
+ } else {
+ movp(kScratchRegister, src);
+ pushq(kScratchRegister);
+ }
+}
+
+
+void MacroAssembler::Push(Immediate value) {
+ if (kPointerSize == kInt64Size) {
+ pushq(value);
+ } else {
+ leal(rsp, Operand(rsp, -4));
+ movp(Operand(rsp, 0), value);
+ }
+}
+
+
+void MacroAssembler::PushImm32(int32_t imm32) {
+ if (kPointerSize == kInt64Size) {
+ pushq_imm32(imm32);
+ } else {
+ leal(rsp, Operand(rsp, -4));
+ movp(Operand(rsp, 0), Immediate(imm32));
}
}
-void MacroAssembler::TestBit(const Operand& src, int bits) {
+void MacroAssembler::Pop(Register dst) {
+ if (kPointerSize == kInt64Size) {
+ popq(dst);
+ } else {
+ // x32 uses 64-bit pop for rbp in the epilogue.
+ ASSERT(dst.code() != rbp.code());
+ movp(dst, Operand(rsp, 0));
+ leal(rsp, Operand(rsp, 4));
+ }
+}
+
+
+void MacroAssembler::Pop(const Operand& dst) {
+ if (kPointerSize == kInt64Size) {
+ popq(dst);
+ } else {
+ Register scratch = dst.AddressUsesRegister(kScratchRegister)
+ ? kSmiConstantRegister : kScratchRegister;
+ movp(scratch, Operand(rsp, 0));
+ movp(dst, scratch);
+ leal(rsp, Operand(rsp, 4));
+ if (scratch.is(kSmiConstantRegister)) {
+ // Restore kSmiConstantRegister.
+ movp(kSmiConstantRegister,
+ reinterpret_cast<void*>(Smi::FromInt(kSmiConstantRegisterValue)),
+ Assembler::RelocInfoNone());
+ }
+ }
+}
+
+
+void MacroAssembler::PopQuad(const Operand& dst) {
+ if (kPointerSize == kInt64Size) {
+ popq(dst);
+ } else {
+ popq(kScratchRegister);
+ movp(dst, kScratchRegister);
+ }
+}
+
+
+void MacroAssembler::LoadSharedFunctionInfoSpecialField(Register dst,
+ Register base,
+ int offset) {
+ ASSERT(offset > SharedFunctionInfo::kLengthOffset &&
+ offset <= SharedFunctionInfo::kSize &&
+ (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
+ if (kPointerSize == kInt64Size) {
+ movsxlq(dst, FieldOperand(base, offset));
+ } else {
+ movp(dst, FieldOperand(base, offset));
+ SmiToInteger32(dst, dst);
+ }
+}
+
+
+void MacroAssembler::TestBitSharedFunctionInfoSpecialField(Register base,
+ int offset,
+ int bits) {
+ ASSERT(offset > SharedFunctionInfo::kLengthOffset &&
+ offset <= SharedFunctionInfo::kSize &&
+ (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
+ if (kPointerSize == kInt32Size) {
+ // On x32, this field is represented by SMI.
+ bits += kSmiShift;
+ }
int byte_offset = bits / kBitsPerByte;
int bit_in_byte = bits & (kBitsPerByte - 1);
- testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte));
+ testb(FieldOperand(base, offset + byte_offset), Immediate(1 << bit_in_byte));
}
@@ -2590,8 +2961,18 @@ void MacroAssembler::Jump(ExternalReference ext) {
}
+void MacroAssembler::Jump(const Operand& op) {
+ if (kPointerSize == kInt64Size) {
+ jmp(op);
+ } else {
+ movp(kScratchRegister, op);
+ jmp(kScratchRegister);
+ }
+}
+
+
void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
- movq(kScratchRegister, destination, rmode);
+ Move(kScratchRegister, destination, rmode);
jmp(kScratchRegister);
}
@@ -2621,11 +3002,21 @@ void MacroAssembler::Call(ExternalReference ext) {
}
+void MacroAssembler::Call(const Operand& op) {
+ if (kPointerSize == kInt64Size) {
+ call(op);
+ } else {
+ movp(kScratchRegister, op);
+ call(kScratchRegister);
+ }
+}
+
+
void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
#ifdef DEBUG
- int end_position = pc_offset() + CallSize(destination, rmode);
+ int end_position = pc_offset() + CallSize(destination);
#endif
- movq(kScratchRegister, destination, rmode);
+ Move(kScratchRegister, destination, rmode);
call(kScratchRegister);
#ifdef DEBUG
CHECK_EQ(pc_offset(), end_position);
@@ -2649,26 +3040,26 @@ void MacroAssembler::Call(Handle<Code> code_object,
void MacroAssembler::Pushad() {
- push(rax);
- push(rcx);
- push(rdx);
- push(rbx);
+ Push(rax);
+ Push(rcx);
+ Push(rdx);
+ Push(rbx);
// Not pushing rsp or rbp.
- push(rsi);
- push(rdi);
- push(r8);
- push(r9);
+ Push(rsi);
+ Push(rdi);
+ Push(r8);
+ Push(r9);
// r10 is kScratchRegister.
- push(r11);
+ Push(r11);
// r12 is kSmiConstantRegister.
// r13 is kRootRegister.
- push(r14);
- push(r15);
+ Push(r14);
+ Push(r15);
STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
// Use lea for symmetry with Popad.
int sp_delta =
(kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
- lea(rsp, Operand(rsp, -sp_delta));
+ leap(rsp, Operand(rsp, -sp_delta));
}
@@ -2676,23 +3067,23 @@ void MacroAssembler::Popad() {
// Popad must not change the flags, so use lea instead of addq.
int sp_delta =
(kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
- lea(rsp, Operand(rsp, sp_delta));
- pop(r15);
- pop(r14);
- pop(r11);
- pop(r9);
- pop(r8);
- pop(rdi);
- pop(rsi);
- pop(rbx);
- pop(rdx);
- pop(rcx);
- pop(rax);
+ leap(rsp, Operand(rsp, sp_delta));
+ Pop(r15);
+ Pop(r14);
+ Pop(r11);
+ Pop(r9);
+ Pop(r8);
+ Pop(rdi);
+ Pop(rsi);
+ Pop(rbx);
+ Pop(rdx);
+ Pop(rcx);
+ Pop(rax);
}
void MacroAssembler::Dropad() {
- addq(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
+ addp(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
}
@@ -2721,17 +3112,17 @@ MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
const Immediate& imm) {
- movq(SafepointRegisterSlot(dst), imm);
+ movp(SafepointRegisterSlot(dst), imm);
}
void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
- movq(SafepointRegisterSlot(dst), src);
+ movp(SafepointRegisterSlot(dst), src);
}
void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
- movq(dst, SafepointRegisterSlot(src));
+ movp(dst, SafepointRegisterSlot(src));
}
@@ -2757,33 +3148,33 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
// The frame pointer does not point to a JS frame so we save NULL for
// rbp. We expect the code throwing an exception to check rbp before
// dereferencing it to restore the context.
- push(Immediate(0)); // NULL frame pointer.
+ pushq(Immediate(0)); // NULL frame pointer.
Push(Smi::FromInt(0)); // No context.
} else {
- push(rbp);
- push(rsi);
+ pushq(rbp);
+ Push(rsi);
}
// Push the state and the code object.
unsigned state =
StackHandler::IndexField::encode(handler_index) |
StackHandler::KindField::encode(kind);
- push(Immediate(state));
+ Push(Immediate(state));
Push(CodeObject());
// Link the current handler as the next handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- push(ExternalOperand(handler_address));
+ Push(ExternalOperand(handler_address));
// Set this new handler as the current one.
- movq(ExternalOperand(handler_address), rsp);
+ movp(ExternalOperand(handler_address), rsp);
}
void MacroAssembler::PopTryHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- pop(ExternalOperand(handler_address));
- addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+ Pop(ExternalOperand(handler_address));
+ addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
}
@@ -2791,12 +3182,12 @@ void MacroAssembler::JumpToHandlerEntry() {
// Compute the handler entry address and jump to it. The handler table is
// a fixed array of (smi-tagged) code offsets.
// rax = exception, rdi = code object, rdx = state.
- movq(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
- shr(rdx, Immediate(StackHandler::kKindWidth));
- movq(rdx,
+ movp(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
+ shrp(rdx, Immediate(StackHandler::kKindWidth));
+ movp(rdx,
FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
SmiToInteger64(rdx, rdx);
- lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
+ leap(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
jmp(rdi);
}
@@ -2813,29 +3204,29 @@ void MacroAssembler::Throw(Register value) {
// The exception is expected in rax.
if (!value.is(rax)) {
- movq(rax, value);
+ movp(rax, value);
}
// Drop the stack pointer to the top of the top handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- movq(rsp, ExternalOperand(handler_address));
+ movp(rsp, ExternalOperand(handler_address));
// Restore the next handler.
- pop(ExternalOperand(handler_address));
+ Pop(ExternalOperand(handler_address));
// Remove the code object and state, compute the handler address in rdi.
- pop(rdi); // Code object.
- pop(rdx); // Offset and state.
+ Pop(rdi); // Code object.
+ Pop(rdx); // Offset and state.
// Restore the context and frame pointer.
- pop(rsi); // Context.
- pop(rbp); // Frame pointer.
+ Pop(rsi); // Context.
+ popq(rbp); // Frame pointer.
// If the handler is a JS frame, restore the context to the frame.
// (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
// rbp or rsi.
Label skip;
- testq(rsi, rsi);
+ testp(rsi, rsi);
j(zero, &skip, Label::kNear);
- movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
bind(&skip);
JumpToHandlerEntry();
@@ -2854,7 +3245,7 @@ void MacroAssembler::ThrowUncatchable(Register value) {
// The exception is expected in rax.
if (!value.is(rax)) {
- movq(rax, value);
+ movp(rax, value);
}
// Drop the stack pointer to the top of the top stack handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
@@ -2864,7 +3255,7 @@ void MacroAssembler::ThrowUncatchable(Register value) {
Label fetch_next, check_kind;
jmp(&check_kind, Label::kNear);
bind(&fetch_next);
- movq(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
+ movp(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
bind(&check_kind);
STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
@@ -2873,15 +3264,15 @@ void MacroAssembler::ThrowUncatchable(Register value) {
j(not_zero, &fetch_next);
// Set the top handler address to next handler past the top ENTRY handler.
- pop(ExternalOperand(handler_address));
+ Pop(ExternalOperand(handler_address));
// Remove the code object and state, compute the handler address in rdi.
- pop(rdi); // Code object.
- pop(rdx); // Offset and state.
+ Pop(rdi); // Code object.
+ Pop(rdx); // Offset and state.
// Clear the context pointer and frame pointer (0 was saved in the handler).
- pop(rsi);
- pop(rbp);
+ Pop(rsi);
+ popq(rbp);
JumpToHandlerEntry();
}
@@ -2897,7 +3288,7 @@ void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
ret(bytes_dropped);
} else {
PopReturnAddressTo(scratch);
- addq(rsp, Immediate(bytes_dropped));
+ addp(rsp, Immediate(bytes_dropped));
PushReturnAddressFrom(scratch);
ret(0);
}
@@ -2913,7 +3304,7 @@ void MacroAssembler::FCmp() {
void MacroAssembler::CmpObjectType(Register heap_object,
InstanceType type,
Register map) {
- movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
CmpInstanceType(map, type);
}
@@ -3057,10 +3448,10 @@ void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
cvtsd2si(result_reg, input_reg);
testl(result_reg, Immediate(0xFFFFFF00));
j(zero, &done, Label::kNear);
- cmpl(result_reg, Immediate(0x80000000));
- j(equal, &conv_failure, Label::kNear);
+ cmpl(result_reg, Immediate(1));
+ j(overflow, &conv_failure, Label::kNear);
movl(result_reg, Immediate(0));
- setcc(above, result_reg);
+ setcc(sign, result_reg);
subl(result_reg, Immediate(1));
andl(result_reg, Immediate(255));
jmp(&done, Label::kNear);
@@ -3074,8 +3465,7 @@ void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
void MacroAssembler::LoadUint32(XMMRegister dst,
- Register src,
- XMMRegister scratch) {
+ Register src) {
if (FLAG_debug_code) {
cmpq(src, Immediate(0xffffffff));
Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
@@ -3087,8 +3477,8 @@ void MacroAssembler::LoadUint32(XMMRegister dst,
void MacroAssembler::SlowTruncateToI(Register result_reg,
Register input_reg,
int offset) {
- DoubleToIStub stub(input_reg, result_reg, offset, true);
- call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+ DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
+ call(stub.GetCode(), RelocInfo::CODE_TARGET);
}
@@ -3097,21 +3487,22 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
Label done;
movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
cvttsd2siq(result_reg, xmm0);
- Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
- cmpq(result_reg, kScratchRegister);
- j(not_equal, &done, Label::kNear);
+ cmpq(result_reg, Immediate(1));
+ j(no_overflow, &done, Label::kNear);
// Slow case.
if (input_reg.is(result_reg)) {
- subq(rsp, Immediate(kDoubleSize));
+ subp(rsp, Immediate(kDoubleSize));
movsd(MemOperand(rsp, 0), xmm0);
SlowTruncateToI(result_reg, rsp, 0);
- addq(rsp, Immediate(kDoubleSize));
+ addp(rsp, Immediate(kDoubleSize));
} else {
SlowTruncateToI(result_reg, input_reg);
}
bind(&done);
+ // Keep our invariant that the upper 32 bits are zero.
+ movl(result_reg, result_reg);
}
@@ -3119,16 +3510,17 @@ void MacroAssembler::TruncateDoubleToI(Register result_reg,
XMMRegister input_reg) {
Label done;
cvttsd2siq(result_reg, input_reg);
- movq(kScratchRegister, V8_INT64_C(0x8000000000000000));
- cmpq(result_reg, kScratchRegister);
- j(not_equal, &done, Label::kNear);
+ cmpq(result_reg, Immediate(1));
+ j(no_overflow, &done, Label::kNear);
- subq(rsp, Immediate(kDoubleSize));
+ subp(rsp, Immediate(kDoubleSize));
movsd(MemOperand(rsp, 0), input_reg);
SlowTruncateToI(result_reg, rsp, 0);
- addq(rsp, Immediate(kDoubleSize));
+ addp(rsp, Immediate(kDoubleSize));
bind(&done);
+ // Keep our invariant that the upper 32 bits are zero.
+ movl(result_reg, result_reg);
}
@@ -3193,56 +3585,23 @@ void MacroAssembler::TaggedToI(Register result_reg,
}
-void MacroAssembler::Throw(BailoutReason reason) {
-#ifdef DEBUG
- const char* msg = GetBailoutReason(reason);
- if (msg != NULL) {
- RecordComment("Throw message: ");
- RecordComment(msg);
- }
-#endif
-
- push(rax);
- Push(Smi::FromInt(reason));
- if (!has_frame_) {
- // We don't actually want to generate a pile of code for this, so just
- // claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kThrowMessage, 1);
- } else {
- CallRuntime(Runtime::kThrowMessage, 1);
- }
- // Control will not return here.
- int3();
-}
-
-
-void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
- Label L;
- j(NegateCondition(cc), &L);
- Throw(reason);
- // will not return here
- bind(&L);
-}
-
-
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
- movq(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
+ movp(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
}
void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
- movq(dst, FieldOperand(map, Map::kBitField3Offset));
+ movl(dst, FieldOperand(map, Map::kBitField3Offset));
DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
}
void MacroAssembler::EnumLength(Register dst, Register map) {
STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
- movq(dst, FieldOperand(map, Map::kBitField3Offset));
- Move(kScratchRegister, Smi::FromInt(Map::EnumLengthBits::kMask));
- and_(dst, kScratchRegister);
+ movl(dst, FieldOperand(map, Map::kBitField3Offset));
+ andl(dst, Immediate(Map::EnumLengthBits::kMask));
+ Integer32ToSmi(dst, dst);
}
@@ -3313,10 +3672,10 @@ void MacroAssembler::AssertString(Register object) {
if (emit_debug_code()) {
testb(object, Immediate(kSmiTagMask));
Check(not_equal, kOperandIsASmiAndNotAString);
- push(object);
- movq(object, FieldOperand(object, HeapObject::kMapOffset));
+ Push(object);
+ movp(object, FieldOperand(object, HeapObject::kMapOffset));
CmpInstanceType(object, FIRST_NONSTRING_TYPE);
- pop(object);
+ Pop(object);
Check(below, kOperandIsNotAString);
}
}
@@ -3326,22 +3685,35 @@ void MacroAssembler::AssertName(Register object) {
if (emit_debug_code()) {
testb(object, Immediate(kSmiTagMask));
Check(not_equal, kOperandIsASmiAndNotAName);
- push(object);
- movq(object, FieldOperand(object, HeapObject::kMapOffset));
+ Push(object);
+ movp(object, FieldOperand(object, HeapObject::kMapOffset));
CmpInstanceType(object, LAST_NAME_TYPE);
- pop(object);
+ Pop(object);
Check(below_equal, kOperandIsNotAName);
}
}
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ Cmp(object, isolate()->factory()->undefined_value());
+ j(equal, &done_checking);
+ Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
+ Assert(equal, kExpectedUndefinedOrCell);
+ bind(&done_checking);
+ }
+}
+
+
void MacroAssembler::AssertRootValue(Register src,
Heap::RootListIndex root_value_index,
BailoutReason reason) {
if (emit_debug_code()) {
ASSERT(!src.is(kScratchRegister));
LoadRoot(kScratchRegister, root_value_index);
- cmpq(src, kScratchRegister);
+ cmpp(src, kScratchRegister);
Check(equal, reason);
}
}
@@ -3351,7 +3723,7 @@ void MacroAssembler::AssertRootValue(Register src,
Condition MacroAssembler::IsObjectStringType(Register heap_object,
Register map,
Register instance_type) {
- movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
STATIC_ASSERT(kNotStringTag != 0);
testb(instance_type, Immediate(kIsNotStringMask));
@@ -3362,7 +3734,7 @@ Condition MacroAssembler::IsObjectStringType(Register heap_object,
Condition MacroAssembler::IsObjectNameType(Register heap_object,
Register map,
Register instance_type) {
- movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
cmpb(instance_type, Immediate(static_cast<uint8_t>(LAST_NAME_TYPE)));
return below_equal;
@@ -3382,13 +3754,13 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
j(not_equal, miss);
if (miss_on_bound_function) {
- movq(kScratchRegister,
+ movp(kScratchRegister,
FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
// It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
// field).
- TestBit(FieldOperand(kScratchRegister,
- SharedFunctionInfo::kCompilerHintsOffset),
- SharedFunctionInfo::kBoundFunction);
+ TestBitSharedFunctionInfoSpecialField(kScratchRegister,
+ SharedFunctionInfo::kCompilerHintsOffset,
+ SharedFunctionInfo::kBoundFunction);
j(not_zero, miss);
}
@@ -3399,7 +3771,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
j(not_zero, &non_instance, Label::kNear);
// Get the prototype or initial map from the function.
- movq(result,
+ movp(result,
FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
// If the prototype or initial map is the hole, don't return it and
@@ -3414,13 +3786,13 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
j(not_equal, &done, Label::kNear);
// Get the prototype from the initial map.
- movq(result, FieldOperand(result, Map::kPrototypeOffset));
+ movp(result, FieldOperand(result, Map::kPrototypeOffset));
jmp(&done, Label::kNear);
// Non-instance prototype: Fetch prototype from constructor field
// in initial map.
bind(&non_instance);
- movq(result, FieldOperand(result, Map::kConstructorOffset));
+ movp(result, FieldOperand(result, Map::kConstructorOffset));
// All done.
bind(&done);
@@ -3461,28 +3833,12 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
Set(rax, 0); // No arguments.
LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
- CEntryStub ces(1);
+ CEntryStub ces(isolate(), 1);
ASSERT(AllowThisStubCall(&ces));
- Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
-}
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
-void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
- // This macro takes the dst register to make the code more readable
- // at the call sites. However, the dst register has to be rcx to
- // follow the calling convention which requires the call type to be
- // in rcx.
- ASSERT(dst.is(rcx));
- if (call_kind == CALL_AS_FUNCTION) {
- LoadSmiConstant(dst, Smi::FromInt(1));
- } else {
- LoadSmiConstant(dst, Smi::FromInt(0));
- }
+ Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
@@ -3490,8 +3846,7 @@ void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
@@ -3505,17 +3860,14 @@ void MacroAssembler::InvokeCode(Register code,
&definitely_mismatches,
flag,
Label::kNear,
- call_wrapper,
- call_kind);
+ call_wrapper);
if (!definitely_mismatches) {
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(rcx, call_kind);
call(code);
call_wrapper.AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(rcx, call_kind);
jmp(code);
}
bind(&done);
@@ -3523,64 +3875,24 @@ void MacroAssembler::InvokeCode(Register code,
}
-void MacroAssembler::InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- Label done;
- bool definitely_mismatches = false;
- Register dummy = rax;
- InvokePrologue(expected,
- actual,
- code,
- dummy,
- &done,
- &definitely_mismatches,
- flag,
- Label::kNear,
- call_wrapper,
- call_kind);
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(rcx, call_kind);
- Call(code, rmode);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(rcx, call_kind);
- Jump(code, rmode);
- }
- bind(&done);
- }
-}
-
-
void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
ASSERT(function.is(rdi));
- movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
- movsxlq(rbx,
- FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
+ movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
+ LoadSharedFunctionInfoSpecialField(rbx, rdx,
+ SharedFunctionInfo::kFormalParameterCountOffset);
// Advances rdx to the end of the Code object header, to the start of
// the executable code.
- movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
ParameterCount expected(rbx);
- InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
+ InvokeCode(rdx, expected, actual, flag, call_wrapper);
}
@@ -3588,18 +3900,17 @@ void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
ASSERT(function.is(rdi));
- movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
+ movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
// Advances rdx to the end of the Code object header, to the start of
// the executable code.
- movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
+ InvokeCode(rdx, expected, actual, flag, call_wrapper);
}
@@ -3607,10 +3918,9 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
Move(rdi, function);
- InvokeFunction(rdi, expected, actual, flag, call_wrapper, call_kind);
+ InvokeFunction(rdi, expected, actual, flag, call_wrapper);
}
@@ -3622,8 +3932,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
bool* definitely_mismatches,
InvokeFlag flag,
Label::Distance near_jump,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
bool definitely_matches = false;
*definitely_mismatches = false;
Label invoke;
@@ -3650,14 +3959,14 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// Expected is in register, actual is immediate. This is the
// case when we invoke function values without going through the
// IC mechanism.
- cmpq(expected.reg(), Immediate(actual.immediate()));
+ cmpp(expected.reg(), Immediate(actual.immediate()));
j(equal, &invoke, Label::kNear);
ASSERT(expected.reg().is(rbx));
Set(rax, actual.immediate());
} else if (!expected.reg().is(actual.reg())) {
// Both expected and actual are in (different) registers. This
// is the case when we invoke functions using call and apply.
- cmpq(expected.reg(), actual.reg());
+ cmpp(expected.reg(), actual.reg());
j(equal, &invoke, Label::kNear);
ASSERT(actual.reg().is(rax));
ASSERT(expected.reg().is(rbx));
@@ -3667,22 +3976,20 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
if (!definitely_matches) {
Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (!code_constant.is_null()) {
- movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
- addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
+ addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
} else if (!code_register.is(rdx)) {
- movq(rdx, code_register);
+ movp(rdx, code_register);
}
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(adaptor));
- SetCallKind(rcx, call_kind);
Call(adaptor, RelocInfo::CODE_TARGET);
call_wrapper.AfterCall();
if (!*definitely_mismatches) {
jmp(done, near_jump);
}
} else {
- SetCallKind(rcx, call_kind);
Jump(adaptor, RelocInfo::CODE_TARGET);
}
bind(&invoke);
@@ -3690,42 +3997,43 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
-void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
- if (frame_mode == BUILD_STUB_FRAME) {
- push(rbp); // Caller's frame pointer.
- movq(rbp, rsp);
- push(rsi); // Callee's context.
+void MacroAssembler::StubPrologue() {
+ pushq(rbp); // Caller's frame pointer.
+ movp(rbp, rsp);
+ Push(rsi); // Callee's context.
Push(Smi::FromInt(StackFrame::STUB));
+}
+
+
+void MacroAssembler::Prologue(bool code_pre_aging) {
+ PredictableCodeSizeScope predictible_code_size_scope(this,
+ kNoCodeAgeSequenceLength);
+ if (code_pre_aging) {
+ // Pre-age the code.
+ Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
+ RelocInfo::CODE_AGE_SEQUENCE);
+ Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
} else {
- PredictableCodeSizeScope predictible_code_size_scope(this,
- kNoCodeAgeSequenceLength);
- if (isolate()->IsCodePreAgingActive()) {
- // Pre-age the code.
- Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
- RelocInfo::CODE_AGE_SEQUENCE);
- Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
- } else {
- push(rbp); // Caller's frame pointer.
- movq(rbp, rsp);
- push(rsi); // Callee's context.
- push(rdi); // Callee's JS function.
- }
+ pushq(rbp); // Caller's frame pointer.
+ movp(rbp, rsp);
+ Push(rsi); // Callee's context.
+ Push(rdi); // Callee's JS function.
}
}
void MacroAssembler::EnterFrame(StackFrame::Type type) {
- push(rbp);
- movq(rbp, rsp);
- push(rsi); // Context.
+ pushq(rbp);
+ movp(rbp, rsp);
+ Push(rsi); // Context.
Push(Smi::FromInt(type));
- movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
- push(kScratchRegister);
+ Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
+ Push(kScratchRegister);
if (emit_debug_code()) {
- movq(kScratchRegister,
+ Move(kScratchRegister,
isolate()->factory()->undefined_value(),
RelocInfo::EMBEDDED_OBJECT);
- cmpq(Operand(rsp, 0), kScratchRegister);
+ cmpp(Operand(rsp, 0), kScratchRegister);
Check(not_equal, kCodeObjectNotProperlyPatched);
}
}
@@ -3734,11 +4042,11 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
Move(kScratchRegister, Smi::FromInt(type));
- cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
+ cmpp(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
Check(equal, kStackFrameTypesMustMatch);
}
- movq(rsp, rbp);
- pop(rbp);
+ movp(rsp, rbp);
+ popq(rbp);
}
@@ -3749,18 +4057,18 @@ void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
kFPOnStackSize + kPCOnStackSize);
ASSERT(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
- push(rbp);
- movq(rbp, rsp);
+ pushq(rbp);
+ movp(rbp, rsp);
// Reserve room for entry stack pointer and push the code object.
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
- push(Immediate(0)); // Saved entry sp, patched before call.
- movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
- push(kScratchRegister); // Accessed from EditFrame::code_slot.
+ Push(Immediate(0)); // Saved entry sp, patched before call.
+ Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
+ Push(kScratchRegister); // Accessed from EditFrame::code_slot.
// Save the frame pointer and the context in top.
if (save_rax) {
- movq(r14, rax); // Backup rax in callee-save register.
+ movp(r14, rax); // Backup rax in callee-save register.
}
Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
@@ -3777,15 +4085,15 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
// Optionally save all XMM registers.
if (save_doubles) {
int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize +
- arg_stack_space * kPointerSize;
- subq(rsp, Immediate(space));
+ arg_stack_space * kRegisterSize;
+ subp(rsp, Immediate(space));
int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
XMMRegister reg = XMMRegister::FromAllocationIndex(i);
movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else if (arg_stack_space > 0) {
- subq(rsp, Immediate(arg_stack_space * kPointerSize));
+ subp(rsp, Immediate(arg_stack_space * kRegisterSize));
}
// Get the required frame alignment for the OS.
@@ -3793,11 +4101,11 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
if (kFrameAlignment > 0) {
ASSERT(IsPowerOf2(kFrameAlignment));
ASSERT(is_int8(kFrameAlignment));
- and_(rsp, Immediate(-kFrameAlignment));
+ andp(rsp, Immediate(-kFrameAlignment));
}
// Patch the saved entry sp.
- movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
+ movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
}
@@ -3807,7 +4115,7 @@ void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
// Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
// so it must be retained across the C-call.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
- lea(r15, Operand(rbp, r14, times_pointer_size, offset));
+ leap(r15, Operand(rbp, r14, times_pointer_size, offset));
EnterExitFrameEpilogue(arg_stack_space, save_doubles);
}
@@ -3830,12 +4138,12 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
}
}
// Get the return address from the stack and restore the frame pointer.
- movq(rcx, Operand(rbp, 1 * kPointerSize));
- movq(rbp, Operand(rbp, 0 * kPointerSize));
+ movp(rcx, Operand(rbp, kFPOnStackSize));
+ movp(rbp, Operand(rbp, 0 * kPointerSize));
// Drop everything up to and including the arguments and the receiver
// from the caller stack.
- lea(rsp, Operand(r15, 1 * kPointerSize));
+ leap(rsp, Operand(r15, 1 * kPointerSize));
PushReturnAddressFrom(rcx);
@@ -3844,8 +4152,8 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
- movq(rsp, rbp);
- pop(rbp);
+ movp(rsp, rbp);
+ popq(rbp);
LeaveExitFrameEpilogue(restore_context);
}
@@ -3856,17 +4164,17 @@ void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
ExternalReference context_address(Isolate::kContextAddress, isolate());
Operand context_operand = ExternalOperand(context_address);
if (restore_context) {
- movq(rsi, context_operand);
+ movp(rsi, context_operand);
}
#ifdef DEBUG
- movq(context_operand, Immediate(0));
+ movp(context_operand, Immediate(0));
#endif
// Clear the top frame.
ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
isolate());
Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
- movq(c_entry_fp_operand, Immediate(0));
+ movp(c_entry_fp_operand, Immediate(0));
}
@@ -3878,18 +4186,18 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
ASSERT(!holder_reg.is(scratch));
ASSERT(!scratch.is(kScratchRegister));
// Load current lexical context from the stack frame.
- movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
+ movp(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
// When generating debug code, make sure the lexical context is set.
if (emit_debug_code()) {
- cmpq(scratch, Immediate(0));
+ cmpp(scratch, Immediate(0));
Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
}
// Load the native context of the current context.
int offset =
Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- movq(scratch, FieldOperand(scratch, offset));
- movq(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+ movp(scratch, FieldOperand(scratch, offset));
+ movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -3899,7 +4207,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
// Check if both contexts are the same.
- cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
+ cmpp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
j(equal, &same_contexts);
// Compare security tokens.
@@ -3910,25 +4218,25 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Check the context is a native context.
if (emit_debug_code()) {
// Preserve original value of holder_reg.
- push(holder_reg);
- movq(holder_reg,
+ Push(holder_reg);
+ movp(holder_reg,
FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
CompareRoot(holder_reg, Heap::kNullValueRootIndex);
Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
// Read the first word and compare to native_context_map(),
- movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
+ movp(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
- pop(holder_reg);
+ Pop(holder_reg);
}
- movq(kScratchRegister,
+ movp(kScratchRegister,
FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
int token_offset =
Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
- movq(scratch, FieldOperand(scratch, token_offset));
- cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
+ movp(scratch, FieldOperand(scratch, token_offset));
+ cmpp(scratch, FieldOperand(kScratchRegister, token_offset));
j(not_equal, miss);
bind(&same_contexts);
@@ -4014,19 +4322,19 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
// Generate an unrolled loop that performs a few probes before giving up.
for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Use r2 for index calculations and keep the hash intact in r0.
- movq(r2, r0);
+ movp(r2, r0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
}
- and_(r2, r1);
+ andp(r2, r1);
// Scale the index by multiplying by the entry size.
ASSERT(SeededNumberDictionary::kEntrySize == 3);
- lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
+ leap(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
// Check if the key matches.
- cmpq(key, FieldOperand(elements,
+ cmpp(key, FieldOperand(elements,
r2,
times_pointer_size,
SeededNumberDictionary::kElementsStartOffset));
@@ -4049,7 +4357,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
// Get the value at the masked, scaled index.
const int kValueOffset =
SeededNumberDictionary::kElementsStartOffset + kPointerSize;
- movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
+ movp(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
}
@@ -4066,7 +4374,7 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
#ifdef DEBUG
// Assert that result actually contains top on entry.
Operand top_operand = ExternalOperand(allocation_top);
- cmpq(result, top_operand);
+ cmpp(result, top_operand);
Check(equal, kUnexpectedAllocationTop);
#endif
return;
@@ -4076,18 +4384,53 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
// and keep address in scratch until call to UpdateAllocationTopHelper.
if (scratch.is_valid()) {
LoadAddress(scratch, allocation_top);
- movq(result, Operand(scratch, 0));
+ movp(result, Operand(scratch, 0));
} else {
Load(result, allocation_top);
}
}
+void MacroAssembler::MakeSureDoubleAlignedHelper(Register result,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
+ if (kPointerSize == kDoubleSize) {
+ if (FLAG_debug_code) {
+ testl(result, Immediate(kDoubleAlignmentMask));
+ Check(zero, kAllocationIsNotDoubleAligned);
+ }
+ } else {
+ // Align the next allocation. Storing the filler map without checking top
+ // is safe in new-space because the limit of the heap is aligned there.
+ ASSERT(kPointerSize * 2 == kDoubleSize);
+ ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+ ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ // Make sure scratch is not clobbered by this function as it might be
+ // used in UpdateAllocationTopHelper later.
+ ASSERT(!scratch.is(kScratchRegister));
+ Label aligned;
+ testl(result, Immediate(kDoubleAlignmentMask));
+ j(zero, &aligned, Label::kNear);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+ cmpp(result, ExternalOperand(allocation_limit));
+ j(above_equal, gc_required);
+ }
+ LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex);
+ movp(Operand(result, 0), kScratchRegister);
+ addp(result, Immediate(kDoubleSize / 2));
+ bind(&aligned);
+ }
+}
+
+
void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
Register scratch,
AllocationFlags flags) {
if (emit_debug_code()) {
- testq(result_end, Immediate(kObjectAlignmentMask));
+ testp(result_end, Immediate(kObjectAlignmentMask));
Check(zero, kUnalignedAllocationInNewSpace);
}
@@ -4097,7 +4440,7 @@ void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
// Update new top.
if (scratch.is_valid()) {
// Scratch already contains address of allocation top.
- movq(Operand(scratch, 0), result_end);
+ movp(Operand(scratch, 0), result_end);
} else {
Store(allocation_top, result_end);
}
@@ -4111,7 +4454,7 @@ void MacroAssembler::Allocate(int object_size,
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
- ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
+ ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -4131,11 +4474,8 @@ void MacroAssembler::Allocate(int object_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
- // Align the next allocation. Storing the filler map without checking top is
- // safe in new-space because the limit of the heap is aligned there.
- if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
- testq(result, Immediate(kDoubleAlignmentMask));
- Check(zero, kAllocationIsNotDoubleAligned);
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
}
// Calculate new top and bail out if new space is exhausted.
@@ -4145,12 +4485,12 @@ void MacroAssembler::Allocate(int object_size,
Register top_reg = result_end.is_valid() ? result_end : result;
if (!top_reg.is(result)) {
- movq(top_reg, result);
+ movp(top_reg, result);
}
- addq(top_reg, Immediate(object_size));
+ addp(top_reg, Immediate(object_size));
j(carry, gc_required);
Operand limit_operand = ExternalOperand(allocation_limit);
- cmpq(top_reg, limit_operand);
+ cmpp(top_reg, limit_operand);
j(above, gc_required);
// Update allocation top.
@@ -4159,14 +4499,14 @@ void MacroAssembler::Allocate(int object_size,
bool tag_result = (flags & TAG_OBJECT) != 0;
if (top_reg.is(result)) {
if (tag_result) {
- subq(result, Immediate(object_size - kHeapObjectTag));
+ subp(result, Immediate(object_size - kHeapObjectTag));
} else {
- subq(result, Immediate(object_size));
+ subp(result, Immediate(object_size));
}
} else if (tag_result) {
// Tag the result if requested.
ASSERT(kHeapObjectTag == 1);
- incq(result);
+ incp(result);
}
}
@@ -4180,7 +4520,7 @@ void MacroAssembler::Allocate(int header_size,
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & SIZE_IN_WORDS) == 0);
- lea(result_end, Operand(element_count, element_size, header_size));
+ leap(result_end, Operand(element_count, element_size, header_size));
Allocate(result_end, result, result_end, scratch, gc_required, flags);
}
@@ -4210,23 +4550,20 @@ void MacroAssembler::Allocate(Register object_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
- // Align the next allocation. Storing the filler map without checking top is
- // safe in new-space because the limit of the heap is aligned there.
- if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
- testq(result, Immediate(kDoubleAlignmentMask));
- Check(zero, kAllocationIsNotDoubleAligned);
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
}
// Calculate new top and bail out if new space is exhausted.
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
if (!object_size.is(result_end)) {
- movq(result_end, object_size);
+ movp(result_end, object_size);
}
- addq(result_end, result);
+ addp(result_end, result);
j(carry, gc_required);
Operand limit_operand = ExternalOperand(allocation_limit);
- cmpq(result_end, limit_operand);
+ cmpp(result_end, limit_operand);
j(above, gc_required);
// Update allocation top.
@@ -4234,7 +4571,7 @@ void MacroAssembler::Allocate(Register object_size,
// Tag the result if requested.
if ((flags & TAG_OBJECT) != 0) {
- addq(result, Immediate(kHeapObjectTag));
+ addp(result, Immediate(kHeapObjectTag));
}
}
@@ -4244,13 +4581,13 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object) {
ExternalReference::new_space_allocation_top_address(isolate());
// Make sure the object has no tag before resetting top.
- and_(object, Immediate(~kHeapObjectTagMask));
+ andp(object, Immediate(~kHeapObjectTagMask));
Operand top_operand = ExternalOperand(new_space_allocation_top);
#ifdef DEBUG
- cmpq(object, top_operand);
+ cmpp(object, top_operand);
Check(below, kUndoAllocationOfNonAllocatedMemory);
#endif
- movq(top_operand, object);
+ movp(top_operand, object);
}
@@ -4262,7 +4599,7 @@ void MacroAssembler::AllocateHeapNumber(Register result,
// Set the map.
LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
@@ -4278,11 +4615,11 @@ void MacroAssembler::AllocateTwoByteString(Register result,
kObjectAlignmentMask;
ASSERT(kShortSize == 2);
// scratch1 = length * 2 + kObjectAlignmentMask.
- lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
+ leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
kHeaderAlignment));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
+ andp(scratch1, Immediate(~kObjectAlignmentMask));
if (kHeaderAlignment > 0) {
- subq(scratch1, Immediate(kHeaderAlignment));
+ subp(scratch1, Immediate(kHeaderAlignment));
}
// Allocate two byte string in new space.
@@ -4297,10 +4634,10 @@ void MacroAssembler::AllocateTwoByteString(Register result,
// Set the map, length and hash field.
LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
Integer32ToSmi(scratch1, length);
- movq(FieldOperand(result, String::kLengthOffset), scratch1);
- movq(FieldOperand(result, String::kHashFieldOffset),
+ movp(FieldOperand(result, String::kLengthOffset), scratch1);
+ movp(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
@@ -4317,10 +4654,10 @@ void MacroAssembler::AllocateAsciiString(Register result,
kObjectAlignmentMask;
movl(scratch1, length);
ASSERT(kCharSize == 1);
- addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
+ addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
+ andp(scratch1, Immediate(~kObjectAlignmentMask));
if (kHeaderAlignment > 0) {
- subq(scratch1, Immediate(kHeaderAlignment));
+ subp(scratch1, Immediate(kHeaderAlignment));
}
// Allocate ASCII string in new space.
@@ -4335,10 +4672,10 @@ void MacroAssembler::AllocateAsciiString(Register result,
// Set the map, length and hash field.
LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
Integer32ToSmi(scratch1, length);
- movq(FieldOperand(result, String::kLengthOffset), scratch1);
- movq(FieldOperand(result, String::kHashFieldOffset),
+ movp(FieldOperand(result, String::kLengthOffset), scratch1);
+ movp(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
@@ -4353,7 +4690,7 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
@@ -4361,37 +4698,16 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- Label allocate_new_space, install_map;
- AllocationFlags flags = TAG_OBJECT;
-
- ExternalReference high_promotion_mode = ExternalReference::
- new_space_high_promotion_mode_active_address(isolate());
-
- Load(scratch1, high_promotion_mode);
- testb(scratch1, Immediate(1));
- j(zero, &allocate_new_space);
Allocate(ConsString::kSize,
result,
scratch1,
scratch2,
gc_required,
- static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
-
- jmp(&install_map);
-
- bind(&allocate_new_space);
- Allocate(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- flags);
-
- bind(&install_map);
+ TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
@@ -4405,7 +4721,7 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
@@ -4419,7 +4735,7 @@ void MacroAssembler::AllocateAsciiSlicedString(Register result,
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kSlicedAsciiStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
@@ -4464,30 +4780,30 @@ void MacroAssembler::CopyBytes(Register destination,
// Because source is 8-byte aligned in our uses of this function,
// we keep source aligned for the rep movs operation by copying the odd bytes
// at the end of the ranges.
- movq(scratch, length);
+ movp(scratch, length);
shrl(length, Immediate(kPointerSizeLog2));
- repmovsq();
+ repmovsp();
// Move remaining bytes of length.
andl(scratch, Immediate(kPointerSize - 1));
- movq(length, Operand(source, scratch, times_1, -kPointerSize));
- movq(Operand(destination, scratch, times_1, -kPointerSize), length);
- addq(destination, scratch);
+ movp(length, Operand(source, scratch, times_1, -kPointerSize));
+ movp(Operand(destination, scratch, times_1, -kPointerSize), length);
+ addp(destination, scratch);
if (min_length <= kLongStringLimit) {
jmp(&done, Label::kNear);
bind(&len24);
- movq(scratch, Operand(source, 2 * kPointerSize));
- movq(Operand(destination, 2 * kPointerSize), scratch);
+ movp(scratch, Operand(source, 2 * kPointerSize));
+ movp(Operand(destination, 2 * kPointerSize), scratch);
bind(&len16);
- movq(scratch, Operand(source, kPointerSize));
- movq(Operand(destination, kPointerSize), scratch);
+ movp(scratch, Operand(source, kPointerSize));
+ movp(Operand(destination, kPointerSize), scratch);
bind(&len8);
- movq(scratch, Operand(source, 0));
- movq(Operand(destination, 0), scratch);
+ movp(scratch, Operand(source, 0));
+ movp(Operand(destination, 0), scratch);
// Move remaining bytes of length.
- movq(scratch, Operand(source, length, times_1, -kPointerSize));
- movq(Operand(destination, length, times_1, -kPointerSize), scratch);
- addq(destination, length);
+ movp(scratch, Operand(source, length, times_1, -kPointerSize));
+ movp(Operand(destination, length, times_1, -kPointerSize), scratch);
+ addp(destination, length);
jmp(&done, Label::kNear);
bind(&short_string);
@@ -4499,8 +4815,8 @@ void MacroAssembler::CopyBytes(Register destination,
bind(&short_loop);
movb(scratch, Operand(source, 0));
movb(Operand(destination, 0), scratch);
- incq(source);
- incq(destination);
+ incp(source);
+ incp(destination);
decl(length);
j(not_zero, &short_loop);
}
@@ -4515,10 +4831,10 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
Label loop, entry;
jmp(&entry);
bind(&loop);
- movq(Operand(start_offset, 0), filler);
- addq(start_offset, Immediate(kPointerSize));
+ movp(Operand(start_offset, 0), filler);
+ addp(start_offset, Immediate(kPointerSize));
bind(&entry);
- cmpq(start_offset, end_offset);
+ cmpp(start_offset, end_offset);
j(less, &loop);
}
@@ -4526,15 +4842,15 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
- movq(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ movp(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
for (int i = 1; i < context_chain_length; i++) {
- movq(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ movp(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
} else {
// Slot is in the current function context. Move it into the
// destination register in case we store into it (the write barrier
// cannot be allowed to destroy the context in rsi).
- movq(dst, rsi);
+ movp(dst, rsi);
}
// We should not have found a with context by walking the context
@@ -4556,50 +4872,26 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
Register scratch,
Label* no_map_match) {
// Load the global or builtins object from the current context.
- movq(scratch,
+ movp(scratch,
Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movq(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+ movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
// Check that the function's map is the same as the expected cached map.
- movq(scratch, Operand(scratch,
+ movp(scratch, Operand(scratch,
Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
int offset = expected_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
- cmpq(map_in_out, FieldOperand(scratch, offset));
+ cmpp(map_in_out, FieldOperand(scratch, offset));
j(not_equal, no_map_match);
// Use the transitioned cached map.
offset = transitioned_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
- movq(map_in_out, FieldOperand(scratch, offset));
+ movp(map_in_out, FieldOperand(scratch, offset));
}
-void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
- ASSERT(!function_in.is(map_out));
- Label done;
- movq(map_out, FieldOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
- if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
- map_out,
- scratch,
- &done);
- }
- bind(&done);
-}
-
#ifdef _WIN64
static const int kRegisterPassedArguments = 4;
#else
@@ -4608,28 +4900,19 @@ static const int kRegisterPassedArguments = 6;
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
- movq(function,
+ movp(function,
Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Load the native context from the global or builtins object.
- movq(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
+ movp(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
// Load the function from the native context.
- movq(function, Operand(function, Context::SlotOffset(index)));
-}
-
-
-void MacroAssembler::LoadArrayFunction(Register function) {
- movq(function,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movq(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
- movq(function,
- Operand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+ movp(function, Operand(function, Context::SlotOffset(index)));
}
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map) {
// Load the initial map. The global functions all have initial maps.
- movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ movp(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
if (emit_debug_code()) {
Label ok, fail;
CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
@@ -4666,27 +4949,27 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
uint32_t encoding_mask) {
Label is_object;
JumpIfNotSmi(string, &is_object);
- Throw(kNonObject);
+ Abort(kNonObject);
bind(&is_object);
- push(value);
- movq(value, FieldOperand(string, HeapObject::kMapOffset));
- movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
+ Push(value);
+ movp(value, FieldOperand(string, HeapObject::kMapOffset));
+ movzxbp(value, FieldOperand(value, Map::kInstanceTypeOffset));
andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
- cmpq(value, Immediate(encoding_mask));
- pop(value);
- ThrowIf(not_equal, kUnexpectedStringType);
+ cmpp(value, Immediate(encoding_mask));
+ Pop(value);
+ Check(equal, kUnexpectedStringType);
// The index is assumed to be untagged coming in, tag it to compare with the
// string length without using a temp register, it is restored at the end of
// this function.
Integer32ToSmi(index, index);
SmiCompare(index, FieldOperand(string, String::kLengthOffset));
- ThrowIf(greater_equal, kIndexIsTooLarge);
+ Check(less, kIndexIsTooLarge);
SmiCompare(index, Smi::FromInt(0));
- ThrowIf(less, kIndexIsNegative);
+ Check(greater_equal, kIndexIsNegative);
// Restore the index
SmiToInteger32(index, index);
@@ -4699,13 +4982,13 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments) {
ASSERT(num_arguments >= 0);
// Make stack end at alignment and allocate space for arguments and old rsp.
- movq(kScratchRegister, rsp);
+ movp(kScratchRegister, rsp);
ASSERT(IsPowerOf2(frame_alignment));
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
- subq(rsp, Immediate((argument_slots_on_stack + 1) * kPointerSize));
- and_(rsp, Immediate(-frame_alignment));
- movq(Operand(rsp, argument_slots_on_stack * kPointerSize), kScratchRegister);
+ subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
+ andp(rsp, Immediate(-frame_alignment));
+ movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
}
@@ -4728,7 +5011,7 @@ void MacroAssembler::CallCFunction(Register function, int num_arguments) {
ASSERT(num_arguments >= 0);
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
- movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
+ movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
}
@@ -4773,10 +5056,10 @@ void MacroAssembler::CheckPageFlag(
Label::Distance condition_met_distance) {
ASSERT(cc == zero || cc == not_zero);
if (scratch.is(object)) {
- and_(scratch, Immediate(~Page::kPageAlignmentMask));
+ andp(scratch, Immediate(~Page::kPageAlignmentMask));
} else {
- movq(scratch, Immediate(~Page::kPageAlignmentMask));
- and_(scratch, object);
+ movp(scratch, Immediate(~Page::kPageAlignmentMask));
+ andp(scratch, object);
}
if (mask < (1 << kBitsPerByte)) {
testb(Operand(scratch, MemoryChunk::kFlagsOffset),
@@ -4793,9 +5076,8 @@ void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
Label* if_deprecated) {
if (map->CanBeDeprecated()) {
Move(scratch, map);
- movq(scratch, FieldOperand(scratch, Map::kBitField3Offset));
- SmiToInteger32(scratch, scratch);
- and_(scratch, Immediate(Map::Deprecated::kMask));
+ movl(scratch, FieldOperand(scratch, Map::kBitField3Offset));
+ andl(scratch, Immediate(Map::Deprecated::kMask));
j(not_zero, if_deprecated);
}
}
@@ -4812,13 +5094,13 @@ void MacroAssembler::JumpIfBlack(Register object,
ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
// The mask_scratch register contains a 1 at the position of the first bit
// and a 0 at all other positions, including the position of the second bit.
- movq(rcx, mask_scratch);
+ movp(rcx, mask_scratch);
// Make rcx into a mask that covers both marking bits using the operation
// rcx = mask | (mask << 1).
- lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
+ leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
// Note that we are using a 4-byte aligned 8-byte load.
- and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- cmpq(mask_scratch, rcx);
+ andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ cmpp(mask_scratch, rcx);
j(equal, on_black, on_black_distance);
}
@@ -4832,7 +5114,7 @@ void MacroAssembler::JumpIfDataObject(
Label* not_data_object,
Label::Distance not_data_object_distance) {
Label is_data_object;
- movq(scratch, FieldOperand(value, HeapObject::kMapOffset));
+ movp(scratch, FieldOperand(value, HeapObject::kMapOffset));
CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
j(equal, &is_data_object, Label::kNear);
ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
@@ -4850,23 +5132,23 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg) {
ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
- movq(bitmap_reg, addr_reg);
+ movp(bitmap_reg, addr_reg);
// Sign extended 32 bit immediate.
- and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
- movq(rcx, addr_reg);
+ andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
+ movp(rcx, addr_reg);
int shift =
Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
shrl(rcx, Immediate(shift));
- and_(rcx,
+ andp(rcx,
Immediate((Page::kPageAlignmentMask >> shift) &
~(Bitmap::kBytesPerCell - 1)));
- addq(bitmap_reg, rcx);
- movq(rcx, addr_reg);
+ addp(bitmap_reg, rcx);
+ movp(rcx, addr_reg);
shrl(rcx, Immediate(kPointerSizeLog2));
- and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
+ andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
movl(mask_reg, Immediate(1));
- shl_cl(mask_reg);
+ shlp_cl(mask_reg);
}
@@ -4889,20 +5171,20 @@ void MacroAssembler::EnsureNotWhite(
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
- testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
j(not_zero, &done, Label::kNear);
if (emit_debug_code()) {
// Check for impossible bit pattern.
Label ok;
- push(mask_scratch);
+ Push(mask_scratch);
// shl. May overflow making the check conservative.
- addq(mask_scratch, mask_scratch);
- testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ addp(mask_scratch, mask_scratch);
+ testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
j(zero, &ok, Label::kNear);
int3();
bind(&ok);
- pop(mask_scratch);
+ Pop(mask_scratch);
}
// Value is white. We check whether it is data that doesn't need scanning.
@@ -4913,10 +5195,10 @@ void MacroAssembler::EnsureNotWhite(
Label is_data_object;
// Check for heap-number
- movq(map, FieldOperand(value, HeapObject::kMapOffset));
+ movp(map, FieldOperand(value, HeapObject::kMapOffset));
CompareRoot(map, Heap::kHeapNumberMapRootIndex);
j(not_equal, &not_heap_number, Label::kNear);
- movq(length, Immediate(HeapNumber::kSize));
+ movp(length, Immediate(HeapNumber::kSize));
jmp(&is_data_object, Label::kNear);
bind(&not_heap_number);
@@ -4939,27 +5221,27 @@ void MacroAssembler::EnsureNotWhite(
ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
testb(instance_type, Immediate(kExternalStringTag));
j(zero, &not_external, Label::kNear);
- movq(length, Immediate(ExternalString::kSize));
+ movp(length, Immediate(ExternalString::kSize));
jmp(&is_data_object, Label::kNear);
bind(&not_external);
// Sequential string, either ASCII or UC16.
ASSERT(kOneByteStringTag == 0x04);
- and_(length, Immediate(kStringEncodingMask));
- xor_(length, Immediate(kStringEncodingMask));
- addq(length, Immediate(0x04));
+ andp(length, Immediate(kStringEncodingMask));
+ xorp(length, Immediate(kStringEncodingMask));
+ addp(length, Immediate(0x04));
// Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
- imul(length, FieldOperand(value, String::kLengthOffset));
- shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
- addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
- and_(length, Immediate(~kObjectAlignmentMask));
+ imulp(length, FieldOperand(value, String::kLengthOffset));
+ shrp(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
+ addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
+ andp(length, Immediate(~kObjectAlignmentMask));
bind(&is_data_object);
// Value is a data object, and it is white. Mark it black. Since we know
// that the object is white we can make it black by flipping one bit.
- or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ orp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
- and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
+ andp(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
bind(&done);
@@ -4970,11 +5252,11 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
Label next, start;
Register empty_fixed_array_value = r8;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
- movq(rcx, rax);
+ movp(rcx, rax);
// Check if the enum length field is properly initialized, indicating that
// there is an enum cache.
- movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
+ movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
EnumLength(rdx, rbx);
Cmp(rdx, Smi::FromInt(kInvalidEnumCacheSentinel));
@@ -4984,7 +5266,7 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
bind(&next);
- movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
+ movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
// For all objects but the receiver, check that the cache is empty.
EnumLength(rdx, rbx);
@@ -4995,12 +5277,19 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
// Check that there are no elements. Register rcx contains the current JS
// object we've reached through the prototype chain.
- cmpq(empty_fixed_array_value,
+ Label no_elements;
+ cmpp(empty_fixed_array_value,
FieldOperand(rcx, JSObject::kElementsOffset));
+ j(equal, &no_elements);
+
+ // Second chance, the object may be using the empty slow element dictionary.
+ LoadRoot(kScratchRegister, Heap::kEmptySlowElementDictionaryRootIndex);
+ cmpp(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
j(not_equal, call_runtime);
- movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
- cmpq(rcx, null_value);
+ bind(&no_elements);
+ movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
+ cmpp(rcx, null_value);
j(not_equal, &next);
}
@@ -5013,12 +5302,12 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
- lea(scratch_reg, Operand(receiver_reg,
+ leap(scratch_reg, Operand(receiver_reg,
JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
Move(kScratchRegister, new_space_start);
- cmpq(scratch_reg, kScratchRegister);
+ cmpp(scratch_reg, kScratchRegister);
j(less, no_memento_found);
- cmpq(scratch_reg, ExternalOperand(new_space_allocation_top));
+ cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
j(greater, no_memento_found);
CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
Heap::kAllocationMementoMapRootIndex);
@@ -5035,22 +5324,36 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
Register current = scratch0;
Label loop_again;
- movq(current, object);
+ movp(current, object);
// Loop based on the map going up the prototype chain.
bind(&loop_again);
- movq(current, FieldOperand(current, HeapObject::kMapOffset));
- movq(scratch1, FieldOperand(current, Map::kBitField2Offset));
- and_(scratch1, Immediate(Map::kElementsKindMask));
- shr(scratch1, Immediate(Map::kElementsKindShift));
- cmpq(scratch1, Immediate(DICTIONARY_ELEMENTS));
+ movp(current, FieldOperand(current, HeapObject::kMapOffset));
+ movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
+ DecodeField<Map::ElementsKindBits>(scratch1);
+ cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
j(equal, found);
- movq(current, FieldOperand(current, Map::kPrototypeOffset));
+ movp(current, FieldOperand(current, Map::kPrototypeOffset));
CompareRoot(current, Heap::kNullValueRootIndex);
j(not_equal, &loop_again);
}
+void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
+ ASSERT(!dividend.is(rax));
+ ASSERT(!dividend.is(rdx));
+ MultiplierAndShift ms(divisor);
+ movl(rax, Immediate(ms.multiplier()));
+ imull(dividend);
+ if (divisor > 0 && ms.multiplier() < 0) addl(rdx, dividend);
+ if (divisor < 0 && ms.multiplier() > 0) subl(rdx, dividend);
+ if (ms.shift() > 0) sarl(rdx, Immediate(ms.shift()));
+ movl(rax, dividend);
+ shrl(rax, Immediate(31));
+ addl(rdx, rax);
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/chromium/v8/src/x64/macro-assembler-x64.h b/chromium/v8/src/x64/macro-assembler-x64.h
index 98808a86722..8a0ffa61540 100644
--- a/chromium/v8/src/x64/macro-assembler-x64.h
+++ b/chromium/v8/src/x64/macro-assembler-x64.h
@@ -1,36 +1,13 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
#define V8_X64_MACRO_ASSEMBLER_X64_H_
-#include "assembler.h"
-#include "frames.h"
-#include "v8globals.h"
+#include "src/assembler.h"
+#include "src/frames.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -52,6 +29,10 @@ typedef Operand MemOperand;
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum PointersToHereCheck {
+ kPointersToHereMaybeInteresting,
+ kPointersToHereAreAlwaysInteresting
+};
enum SmiOperationConstraint {
PRESERVE_SOURCE_REGISTER,
@@ -243,7 +224,9 @@ class MacroAssembler: public Assembler {
Register scratch,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
// As above, but the offset has the tag presubtracted. For use with
// Operand(reg, off).
@@ -254,14 +237,17 @@ class MacroAssembler: public Assembler {
Register scratch,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK) {
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting) {
RecordWriteField(context,
offset + kHeapObjectTag,
value,
scratch,
save_fp,
remembered_set_action,
- smi_check);
+ smi_check,
+ pointers_to_here_check_for_value);
}
// Notify the garbage collector that we wrote a pointer into a fixed array.
@@ -276,7 +262,15 @@ class MacroAssembler: public Assembler {
Register index,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
+
+ void RecordWriteForMap(
+ Register object,
+ Register map,
+ Register dst,
+ SaveFPRegsMode save_fp);
// For page containing |object| mark region covering |address|
// dirty. |object| is the object being stored into, |value| is the
@@ -289,17 +283,18 @@ class MacroAssembler: public Assembler {
Register value,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
-#ifdef ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
// Debugger Support
void DebugBreak();
-#endif
// Generates function and stub prologue code.
- void Prologue(PrologueFrameMode frame_mode);
+ void StubPrologue();
+ void Prologue(bool code_pre_aging);
// Enter specific kind of exit frame; either in normal or
// debug mode. Expects the number of arguments in register rax and
@@ -336,54 +331,37 @@ class MacroAssembler: public Assembler {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
Move(kRootRegister, roots_array_start);
- addq(kRootRegister, Immediate(kRootRegisterBias));
+ addp(kRootRegister, Immediate(kRootRegisterBias));
}
// ---------------------------------------------------------------------------
// JavaScript invokes
- // Set up call kind marking in rcx. The method takes rcx as an
- // explicit first parameter to make the code more readable at the
- // call sites.
- void SetCallKind(Register dst, CallKind kind);
-
// Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
void InvokeFunction(Register function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ const CallWrapper& call_wrapper);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
@@ -407,8 +385,8 @@ class MacroAssembler: public Assembler {
void SafePush(Smi* src);
void InitializeSmiConstantRegister() {
- movq(kSmiConstantRegister, Smi::FromInt(kSmiConstantRegisterValue),
- RelocInfo::NONE64);
+ Move(kSmiConstantRegister, Smi::FromInt(kSmiConstantRegisterValue),
+ Assembler::RelocInfoNone());
}
// Conversions between tagged smi values and non-tagged integer values.
@@ -511,10 +489,18 @@ class MacroAssembler: public Assembler {
// Test-and-jump functions. Typically combines a check function
// above with a conditional jump.
+ // Jump if the value can be represented by a smi.
+ void JumpIfValidSmiValue(Register src, Label* on_valid,
+ Label::Distance near_jump = Label::kFar);
+
// Jump if the value cannot be represented by a smi.
void JumpIfNotValidSmiValue(Register src, Label* on_invalid,
Label::Distance near_jump = Label::kFar);
+ // Jump if the unsigned integer value can be represented by a smi.
+ void JumpIfUIntValidSmiValue(Register src, Label* on_valid,
+ Label::Distance near_jump = Label::kFar);
+
// Jump if the unsigned integer value cannot be represented by a smi.
void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid,
Label::Distance near_jump = Label::kFar);
@@ -672,12 +658,14 @@ class MacroAssembler: public Assembler {
void SmiShiftLeftConstant(Register dst,
Register src,
- int shift_value);
+ int shift_value,
+ Label* on_not_smi_result = NULL,
+ Label::Distance near_jump = Label::kFar);
void SmiShiftLogicalRightConstant(Register dst,
- Register src,
- int shift_value,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
+ Register src,
+ int shift_value,
+ Label* on_not_smi_result,
+ Label::Distance near_jump = Label::kFar);
void SmiShiftArithmeticRightConstant(Register dst,
Register src,
int shift_value);
@@ -686,7 +674,9 @@ class MacroAssembler: public Assembler {
// Uses and clobbers rcx, so dst may not be rcx.
void SmiShiftLeft(Register dst,
Register src1,
- Register src2);
+ Register src2,
+ Label* on_not_smi_result = NULL,
+ Label::Distance near_jump = Label::kFar);
// Shifts a smi value to the right, shifting in zero bits at the top, and
// returns the unsigned intepretation of the result if that is a smi.
// Uses and clobbers rcx, so dst may not be rcx.
@@ -738,17 +728,17 @@ class MacroAssembler: public Assembler {
void Move(const Operand& dst, Smi* source) {
Register constant = GetSmiConstant(source);
- movq(dst, constant);
+ movp(dst, constant);
}
void Push(Smi* smi);
- // Save away a 64-bit integer on the stack as two 32-bit integers
+ // Save away a raw integer with pointer size on the stack as two integers
// masquerading as smis so that the garbage collector skips visiting them.
- void PushInt64AsTwoSmis(Register src, Register scratch = kScratchRegister);
- // Reconstruct a 64-bit integer from two 32-bit integers masquerading as
- // smis on the top of stack.
- void PopInt64AsTwoSmis(Register dst, Register scratch = kScratchRegister);
+ void PushRegisterAsTwoSmis(Register src, Register scratch = kScratchRegister);
+ // Reconstruct a raw integer with pointer size from two integers masquerading
+ // as smis on the top of stack.
+ void PopRegisterAsTwoSmis(Register dst, Register scratch = kScratchRegister);
void Test(const Operand& dst, Smi* source);
@@ -819,7 +809,7 @@ class MacroAssembler: public Assembler {
// Load a register with a long value as efficiently as possible.
void Set(Register dst, int64_t x);
- void Set(const Operand& dst, int64_t x);
+ void Set(const Operand& dst, intptr_t x);
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
// hinders register renaming and makes dependence chains longer. So we use
@@ -830,8 +820,13 @@ class MacroAssembler: public Assembler {
// Move if the registers are not identical.
void Move(Register target, Register source);
- // Bit-field support.
- void TestBit(const Operand& dst, int bit_index);
+ // TestBit and Load SharedFunctionInfo special field.
+ void TestBitSharedFunctionInfoSpecialField(Register base,
+ int offset,
+ int bit_index);
+ void LoadSharedFunctionInfoSpecialField(Register dst,
+ Register base,
+ int offset);
// Handle support
void Move(Register dst, Handle<Object> source);
@@ -852,32 +847,59 @@ class MacroAssembler: public Assembler {
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the rsp register.
void Drop(int stack_elements);
+ // Emit code to discard a positive number of pointer-sized elements
+ // from the stack under the return address which remains on the top,
+ // clobbering the rsp register.
+ void DropUnderReturnAddress(int stack_elements,
+ Register scratch = kScratchRegister);
void Call(Label* target) { call(target); }
- void Push(Register src) { push(src); }
- void Pop(Register dst) { pop(dst); }
- void PushReturnAddressFrom(Register src) { push(src); }
- void PopReturnAddressTo(Register dst) { pop(dst); }
- void MoveDouble(Register dst, const Operand& src) { movq(dst, src); }
- void MoveDouble(const Operand& dst, Register src) { movq(dst, src); }
+ void Push(Register src);
+ void Push(const Operand& src);
+ void PushQuad(const Operand& src);
+ void Push(Immediate value);
+ void PushImm32(int32_t imm32);
+ void Pop(Register dst);
+ void Pop(const Operand& dst);
+ void PopQuad(const Operand& dst);
+ void PushReturnAddressFrom(Register src) { pushq(src); }
+ void PopReturnAddressTo(Register dst) { popq(dst); }
void Move(Register dst, ExternalReference ext) {
- movq(dst, reinterpret_cast<Address>(ext.address()),
+ movp(dst, reinterpret_cast<void*>(ext.address()),
RelocInfo::EXTERNAL_REFERENCE);
}
+ // Loads a pointer into a register with a relocation mode.
+ void Move(Register dst, void* ptr, RelocInfo::Mode rmode) {
+ // This method must not be used with heap object references. The stored
+ // address is not GC safe. Use the handle version instead.
+ ASSERT(rmode > RelocInfo::LAST_GCED_ENUM);
+ movp(dst, ptr, rmode);
+ }
+
+ void Move(Register dst, Handle<Object> value, RelocInfo::Mode rmode) {
+ AllowDeferredHandleDereference using_raw_address;
+ ASSERT(!RelocInfo::IsNone(rmode));
+ ASSERT(value->IsHeapObject());
+ ASSERT(!isolate()->heap()->InNewSpace(*value));
+ movp(dst, reinterpret_cast<void*>(value.location()), rmode);
+ }
+
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);
void Jump(ExternalReference ext);
+ void Jump(const Operand& op);
void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
void Call(Address destination, RelocInfo::Mode rmode);
void Call(ExternalReference ext);
+ void Call(const Operand& op);
void Call(Handle<Code> code_object,
RelocInfo::Mode rmode,
TypeFeedbackId ast_id = TypeFeedbackId::None());
// The size of the code generated for different call instructions.
- int CallSize(Address destination, RelocInfo::Mode rmode) {
+ int CallSize(Address destination) {
return kCallSequenceLength;
}
int CallSize(ExternalReference ext);
@@ -1013,7 +1035,7 @@ class MacroAssembler: public Assembler {
MinusZeroMode minus_zero_mode, Label* lost_precision,
Label::Distance dst = Label::kFar);
- void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch);
+ void LoadUint32(XMMRegister dst, Register src);
void LoadInstanceDescriptors(Register map, Register descriptors);
void EnumLength(Register dst, Register map);
@@ -1021,11 +1043,32 @@ class MacroAssembler: public Assembler {
template<typename Field>
void DecodeField(Register reg) {
- static const int shift = Field::kShift + kSmiShift;
+ static const int shift = Field::kShift;
static const int mask = Field::kMask >> Field::kShift;
- shr(reg, Immediate(shift));
- and_(reg, Immediate(mask));
- shl(reg, Immediate(kSmiShift));
+ if (shift != 0) {
+ shrp(reg, Immediate(shift));
+ }
+ andp(reg, Immediate(mask));
+ }
+
+ template<typename Field>
+ void DecodeFieldToSmi(Register reg) {
+ if (SmiValuesAre32Bits()) {
+ andp(reg, Immediate(Field::kMask));
+ shlp(reg, Immediate(kSmiShift - Field::kShift));
+ } else {
+ static const int shift = Field::kShift;
+ static const int mask = (Field::kMask >> Field::kShift) << kSmiTagSize;
+ ASSERT(SmiValuesAre31Bits());
+ ASSERT(kSmiShift == kSmiTagSize);
+ ASSERT((mask & 0x80000000u) == 0);
+ if (shift < kSmiShift) {
+ shlp(reg, Immediate(kSmiShift - shift));
+ } else if (shift > kSmiShift) {
+ sarp(reg, Immediate(shift - kSmiShift));
+ }
+ andp(reg, Immediate(mask));
+ }
}
// Abort execution if argument is not a number, enabled via --debug-code.
@@ -1048,6 +1091,10 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object);
+
// Abort execution if argument is not the root value with the given index,
// enabled via --debug-code.
void AssertRootValue(Register src,
@@ -1070,12 +1117,6 @@ class MacroAssembler: public Assembler {
// Propagate an uncatchable exception out of the current JS stack.
void ThrowUncatchable(Register value);
- // Throw a message string as an exception.
- void Throw(BailoutReason reason);
-
- // Throw a message string as an exception if a condition is not true.
- void ThrowIf(Condition cc, BailoutReason reason);
-
// ---------------------------------------------------------------------------
// Inline caching support
@@ -1211,10 +1252,6 @@ class MacroAssembler: public Assembler {
Label* miss,
bool miss_on_bound_function = false);
- // Generates code for reporting that an illegal operation has
- // occurred.
- void IllegalOperation(int num_arguments);
-
// Picks out an array index from the hash field.
// Register use:
// hash - holds the index's hash. Clobbered.
@@ -1235,15 +1272,8 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* no_map_match);
- // Load the initial map for new Arrays from a JSFunction.
- void LoadInitialArrayMap(Register function_in,
- Register scratch,
- Register map_out,
- bool can_have_holes);
-
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
- void LoadArrayFunction(Register function);
// Load the initial map from the global function. The registers
// function and map can be the same.
@@ -1309,8 +1339,8 @@ class MacroAssembler: public Assembler {
// from handle and propagates exceptions. Clobbers r14, r15, rbx and
// caller-save registers. Restores context. On return removes
// stack_space * kPointerSize (GCed).
- void CallApiFunctionAndReturn(Address function_address,
- Address thunk_address,
+ void CallApiFunctionAndReturn(Register function_address,
+ ExternalReference thunk_ref,
Register thunk_last_arg,
int stack_space,
Operand return_value_operand,
@@ -1371,6 +1401,10 @@ class MacroAssembler: public Assembler {
Register filler);
+ // Emit code for a truncating division by a constant. The dividend register is
+ // unchanged, the result is in rdx, and rax gets clobbered.
+ void TruncatingDiv(Register dividend, int32_t divisor);
+
// ---------------------------------------------------------------------------
// StatsCounter support
@@ -1456,7 +1490,7 @@ class MacroAssembler: public Assembler {
// modified. It may be the "smi 1 constant" register.
Register GetSmiConstant(Smi* value);
- intptr_t RootRegisterDelta(ExternalReference other);
+ int64_t RootRegisterDelta(ExternalReference other);
// Moves the smi value to the destination register.
void LoadSmiConstant(Register dst, Smi* value);
@@ -1473,8 +1507,7 @@ class MacroAssembler: public Assembler {
bool* definitely_mismatches,
InvokeFlag flag,
Label::Distance near_jump = Label::kFar,
- const CallWrapper& call_wrapper = NullCallWrapper(),
- CallKind call_kind = CALL_AS_METHOD);
+ const CallWrapper& call_wrapper = NullCallWrapper());
void EnterExitFramePrologue(bool save_rax);
@@ -1492,19 +1525,17 @@ class MacroAssembler: public Assembler {
Register scratch,
AllocationFlags flags);
+ void MakeSureDoubleAlignedHelper(Register result,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
+
// Update allocation top with value in result_end register.
// If scratch is valid, it contains the address of the allocation top.
void UpdateAllocationTopHelper(Register result_end,
Register scratch,
AllocationFlags flags);
- // Helper for PopHandleScope. Allowed to perform a GC and returns
- // NULL if gc_allowed. Does not perform a GC if !gc_allowed, and
- // possibly returns a failure object indicating an allocation failure.
- Object* PopHandleScopeHelper(Register saved,
- Register scratch,
- bool gc_allowed);
-
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object,
Register scratch,
@@ -1609,9 +1640,9 @@ extern void LogGeneratedCodeCoverage(const char* file_line);
Address x64_coverage_function = FUNCTION_ADDR(LogGeneratedCodeCoverage); \
masm->pushfq(); \
masm->Pushad(); \
- masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
+ masm->Push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
masm->Call(x64_coverage_function, RelocInfo::EXTERNAL_REFERENCE); \
- masm->pop(rax); \
+ masm->Pop(rax); \
masm->Popad(); \
masm->popfq(); \
} \
diff --git a/chromium/v8/src/x64/regexp-macro-assembler-x64.cc b/chromium/v8/src/x64/regexp-macro-assembler-x64.cc
index 3e65a68b831..a8c1cb47a7f 100644
--- a/chromium/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/chromium/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -1,42 +1,19 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "cpu-profiler.h"
-#include "serialize.h"
-#include "unicode.h"
-#include "log.h"
-#include "regexp-stack.h"
-#include "macro-assembler.h"
-#include "regexp-macro-assembler.h"
-#include "x64/regexp-macro-assembler-x64.h"
+#include "src/cpu-profiler.h"
+#include "src/serialize.h"
+#include "src/unicode.h"
+#include "src/log.h"
+#include "src/regexp-stack.h"
+#include "src/macro-assembler.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/x64/regexp-macro-assembler-x64.h"
namespace v8 {
namespace internal {
@@ -166,7 +143,7 @@ void RegExpMacroAssemblerX64::AdvanceRegister(int reg, int by) {
ASSERT(reg >= 0);
ASSERT(reg < num_registers_);
if (by != 0) {
- __ addq(register_location(reg), Immediate(by));
+ __ addp(register_location(reg), Immediate(by));
}
}
@@ -175,7 +152,7 @@ void RegExpMacroAssemblerX64::Backtrack() {
CheckPreemption();
// Pop Code* offset from backtrack stack, add Code* and jump to location.
Pop(rbx);
- __ addq(rbx, code_object_pointer());
+ __ addp(rbx, code_object_pointer());
__ jmp(rbx);
}
@@ -203,8 +180,8 @@ void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
__ cmpl(Operand(rbp, kStartIndex), Immediate(0));
BranchOrBacktrack(not_equal, &not_at_start);
// If we did, are we still at the start of the input?
- __ lea(rax, Operand(rsi, rdi, times_1, 0));
- __ cmpq(rax, Operand(rbp, kInputStart));
+ __ leap(rax, Operand(rsi, rdi, times_1, 0));
+ __ cmpp(rax, Operand(rbp, kInputStart));
BranchOrBacktrack(equal, on_at_start);
__ bind(&not_at_start);
}
@@ -215,8 +192,8 @@ void RegExpMacroAssemblerX64::CheckNotAtStart(Label* on_not_at_start) {
__ cmpl(Operand(rbp, kStartIndex), Immediate(0));
BranchOrBacktrack(not_equal, on_not_at_start);
// If we did, are we still at the start of the input?
- __ lea(rax, Operand(rsi, rdi, times_1, 0));
- __ cmpq(rax, Operand(rbp, kInputStart));
+ __ leap(rax, Operand(rsi, rdi, times_1, 0));
+ __ cmpp(rax, Operand(rbp, kInputStart));
BranchOrBacktrack(not_equal, on_not_at_start);
}
@@ -241,9 +218,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
int start_reg,
Label* on_no_match) {
Label fallthrough;
- __ movq(rdx, register_location(start_reg)); // Offset of start of capture
- __ movq(rbx, register_location(start_reg + 1)); // Offset of end of capture
- __ subq(rbx, rdx); // Length of capture.
+ ReadPositionFromRegister(rdx, start_reg); // Offset of start of capture
+ ReadPositionFromRegister(rbx, start_reg + 1); // Offset of end of capture
+ __ subp(rbx, rdx); // Length of capture.
// -----------------------
// rdx = Start offset of capture.
@@ -273,9 +250,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
on_no_match = &backtrack_label_;
}
- __ lea(r9, Operand(rsi, rdx, times_1, 0));
- __ lea(r11, Operand(rsi, rdi, times_1, 0));
- __ addq(rbx, r9); // End of capture
+ __ leap(r9, Operand(rsi, rdx, times_1, 0));
+ __ leap(r11, Operand(rsi, rdi, times_1, 0));
+ __ addp(rbx, r9); // End of capture
// ---------------------
// r11 - current input character address
// r9 - current capture character address
@@ -293,8 +270,8 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Mismatch, try case-insensitive match (converting letters to lower-case).
// I.e., if or-ing with 0x20 makes values equal and in range 'a'-'z', it's
// a match.
- __ or_(rax, Immediate(0x20)); // Convert match character to lower-case.
- __ or_(rdx, Immediate(0x20)); // Convert capture character to lower-case.
+ __ orp(rax, Immediate(0x20)); // Convert match character to lower-case.
+ __ orp(rdx, Immediate(0x20)); // Convert capture character to lower-case.
__ cmpb(rax, rdx);
__ j(not_equal, on_no_match); // Definitely not equal.
__ subb(rax, Immediate('a'));
@@ -308,24 +285,24 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
__ j(equal, on_no_match);
__ bind(&loop_increment);
// Increment pointers into match and capture strings.
- __ addq(r11, Immediate(1));
- __ addq(r9, Immediate(1));
+ __ addp(r11, Immediate(1));
+ __ addp(r9, Immediate(1));
// Compare to end of capture, and loop if not done.
- __ cmpq(r9, rbx);
+ __ cmpp(r9, rbx);
__ j(below, &loop);
// Compute new value of character position after the matched part.
- __ movq(rdi, r11);
+ __ movp(rdi, r11);
__ subq(rdi, rsi);
} else {
ASSERT(mode_ == UC16);
// Save important/volatile registers before calling C function.
#ifndef _WIN64
// Caller save on Linux and callee save in Windows.
- __ push(rsi);
- __ push(rdi);
+ __ pushq(rsi);
+ __ pushq(rdi);
#endif
- __ push(backtrack_stackpointer());
+ __ pushq(backtrack_stackpointer());
static const int num_arguments = 4;
__ PrepareCallCFunction(num_arguments);
@@ -337,22 +314,22 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Isolate* isolate
#ifdef _WIN64
// Compute and set byte_offset1 (start of capture).
- __ lea(rcx, Operand(rsi, rdx, times_1, 0));
+ __ leap(rcx, Operand(rsi, rdx, times_1, 0));
// Set byte_offset2.
- __ lea(rdx, Operand(rsi, rdi, times_1, 0));
+ __ leap(rdx, Operand(rsi, rdi, times_1, 0));
// Set byte_length.
- __ movq(r8, rbx);
+ __ movp(r8, rbx);
// Isolate.
__ LoadAddress(r9, ExternalReference::isolate_address(isolate()));
#else // AMD64 calling convention
// Compute byte_offset2 (current position = rsi+rdi).
- __ lea(rax, Operand(rsi, rdi, times_1, 0));
+ __ leap(rax, Operand(rsi, rdi, times_1, 0));
// Compute and set byte_offset1 (start of capture).
- __ lea(rdi, Operand(rsi, rdx, times_1, 0));
+ __ leap(rdi, Operand(rsi, rdx, times_1, 0));
// Set byte_offset2.
- __ movq(rsi, rax);
+ __ movp(rsi, rax);
// Set byte_length.
- __ movq(rdx, rbx);
+ __ movp(rdx, rbx);
// Isolate.
__ LoadAddress(rcx, ExternalReference::isolate_address(isolate()));
#endif
@@ -367,14 +344,14 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Restore original values before reacting on result value.
__ Move(code_object_pointer(), masm_.CodeObject());
- __ pop(backtrack_stackpointer());
+ __ popq(backtrack_stackpointer());
#ifndef _WIN64
- __ pop(rdi);
- __ pop(rsi);
+ __ popq(rdi);
+ __ popq(rsi);
#endif
// Check if function returned non-zero for success or zero for failure.
- __ testq(rax, rax);
+ __ testp(rax, rax);
BranchOrBacktrack(zero, on_no_match);
// On success, increment position by length of capture.
// Requires that rbx is callee save (true for both Win64 and AMD64 ABIs).
@@ -390,9 +367,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
Label fallthrough;
// Find length of back-referenced capture.
- __ movq(rdx, register_location(start_reg));
- __ movq(rax, register_location(start_reg + 1));
- __ subq(rax, rdx); // Length to check.
+ ReadPositionFromRegister(rdx, start_reg); // Offset of start of capture
+ ReadPositionFromRegister(rax, start_reg + 1); // Offset of end of capture
+ __ subp(rax, rdx); // Length to check.
// Fail on partial or illegal capture (start of capture after end of capture).
// This must not happen (no back-reference can reference a capture that wasn't
@@ -412,9 +389,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
BranchOrBacktrack(greater, on_no_match);
// Compute pointers to match string and capture string
- __ lea(rbx, Operand(rsi, rdi, times_1, 0)); // Start of match.
- __ addq(rdx, rsi); // Start of capture.
- __ lea(r9, Operand(rdx, rax, times_1, 0)); // End of capture
+ __ leap(rbx, Operand(rsi, rdi, times_1, 0)); // Start of match.
+ __ addp(rdx, rsi); // Start of capture.
+ __ leap(r9, Operand(rdx, rax, times_1, 0)); // End of capture
// -----------------------
// rbx - current capture character address.
@@ -433,15 +410,15 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
}
BranchOrBacktrack(not_equal, on_no_match);
// Increment pointers into capture and match string.
- __ addq(rbx, Immediate(char_size()));
- __ addq(rdx, Immediate(char_size()));
+ __ addp(rbx, Immediate(char_size()));
+ __ addp(rdx, Immediate(char_size()));
// Check if we have reached end of match area.
- __ cmpq(rdx, r9);
+ __ cmpp(rdx, r9);
__ j(below, &loop);
// Success.
// Set current character position to position after match.
- __ movq(rdi, rbx);
+ __ movp(rdi, rbx);
__ subq(rdi, rsi);
__ bind(&fallthrough);
@@ -462,7 +439,7 @@ void RegExpMacroAssemblerX64::CheckCharacterAfterAnd(uint32_t c,
__ testl(current_character(), Immediate(mask));
} else {
__ movl(rax, Immediate(mask));
- __ and_(rax, current_character());
+ __ andp(rax, current_character());
__ cmpl(rax, Immediate(c));
}
BranchOrBacktrack(equal, on_equal);
@@ -476,7 +453,7 @@ void RegExpMacroAssemblerX64::CheckNotCharacterAfterAnd(uint32_t c,
__ testl(current_character(), Immediate(mask));
} else {
__ movl(rax, Immediate(mask));
- __ and_(rax, current_character());
+ __ andp(rax, current_character());
__ cmpl(rax, Immediate(c));
}
BranchOrBacktrack(not_equal, on_not_equal);
@@ -489,8 +466,8 @@ void RegExpMacroAssemblerX64::CheckNotCharacterAfterMinusAnd(
uc16 mask,
Label* on_not_equal) {
ASSERT(minus < String::kMaxUtf16CodeUnit);
- __ lea(rax, Operand(current_character(), -minus));
- __ and_(rax, Immediate(mask));
+ __ leap(rax, Operand(current_character(), -minus));
+ __ andp(rax, Immediate(mask));
__ cmpl(rax, Immediate(c));
BranchOrBacktrack(not_equal, on_not_equal);
}
@@ -522,8 +499,8 @@ void RegExpMacroAssemblerX64::CheckBitInTable(
__ Move(rax, table);
Register index = current_character();
if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
- __ movq(rbx, current_character());
- __ and_(rbx, Immediate(kTableMask));
+ __ movp(rbx, current_character());
+ __ andp(rbx, Immediate(kTableMask));
index = rbx;
}
__ cmpb(FieldOperand(rax, index, times_1, ByteArray::kHeaderSize),
@@ -536,7 +513,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
Label* on_no_match) {
// Range checks (c in min..max) are generally implemented by an unsigned
// (c - min) <= (max - min) check, using the sequence:
- // lea(rax, Operand(current_character(), -min)) or sub(rax, Immediate(min))
+ // leap(rax, Operand(current_character(), -min)) or sub(rax, Immediate(min))
// cmp(rax, Immediate(max - min))
switch (type) {
case 's':
@@ -547,7 +524,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
__ cmpl(current_character(), Immediate(' '));
__ j(equal, &success, Label::kNear);
// Check range 0x09..0x0d
- __ lea(rax, Operand(current_character(), -'\t'));
+ __ leap(rax, Operand(current_character(), -'\t'));
__ cmpl(rax, Immediate('\r' - '\t'));
__ j(below_equal, &success, Label::kNear);
// \u00a0 (NBSP).
@@ -562,20 +539,20 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
return false;
case 'd':
// Match ASCII digits ('0'..'9')
- __ lea(rax, Operand(current_character(), -'0'));
+ __ leap(rax, Operand(current_character(), -'0'));
__ cmpl(rax, Immediate('9' - '0'));
BranchOrBacktrack(above, on_no_match);
return true;
case 'D':
// Match non ASCII-digits
- __ lea(rax, Operand(current_character(), -'0'));
+ __ leap(rax, Operand(current_character(), -'0'));
__ cmpl(rax, Immediate('9' - '0'));
BranchOrBacktrack(below_equal, on_no_match);
return true;
case '.': {
// Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
__ movl(rax, current_character());
- __ xor_(rax, Immediate(0x01));
+ __ xorp(rax, Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
__ subl(rax, Immediate(0x0b));
__ cmpl(rax, Immediate(0x0c - 0x0b));
@@ -593,7 +570,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
case 'n': {
// Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
__ movl(rax, current_character());
- __ xor_(rax, Immediate(0x01));
+ __ xorp(rax, Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
__ subl(rax, Immediate(0x0b));
__ cmpl(rax, Immediate(0x0c - 0x0b));
@@ -674,8 +651,8 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
FrameScope scope(&masm_, StackFrame::MANUAL);
// Actually emit code to start a new stack frame.
- __ push(rbp);
- __ movq(rbp, rsp);
+ __ pushq(rbp);
+ __ movp(rbp, rsp);
// Save parameters and callee-save registers. Order here should correspond
// to order of kBackup_ebx etc.
#ifdef _WIN64
@@ -686,30 +663,30 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movq(Operand(rbp, kInputStart), r8);
__ movq(Operand(rbp, kInputEnd), r9);
// Callee-save on Win64.
- __ push(rsi);
- __ push(rdi);
- __ push(rbx);
+ __ pushq(rsi);
+ __ pushq(rdi);
+ __ pushq(rbx);
#else
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9 (and then on stack).
// Push register parameters on stack for reference.
- ASSERT_EQ(kInputString, -1 * kPointerSize);
- ASSERT_EQ(kStartIndex, -2 * kPointerSize);
- ASSERT_EQ(kInputStart, -3 * kPointerSize);
- ASSERT_EQ(kInputEnd, -4 * kPointerSize);
- ASSERT_EQ(kRegisterOutput, -5 * kPointerSize);
- ASSERT_EQ(kNumOutputRegisters, -6 * kPointerSize);
- __ push(rdi);
- __ push(rsi);
- __ push(rdx);
- __ push(rcx);
- __ push(r8);
- __ push(r9);
-
- __ push(rbx); // Callee-save
+ ASSERT_EQ(kInputString, -1 * kRegisterSize);
+ ASSERT_EQ(kStartIndex, -2 * kRegisterSize);
+ ASSERT_EQ(kInputStart, -3 * kRegisterSize);
+ ASSERT_EQ(kInputEnd, -4 * kRegisterSize);
+ ASSERT_EQ(kRegisterOutput, -5 * kRegisterSize);
+ ASSERT_EQ(kNumOutputRegisters, -6 * kRegisterSize);
+ __ pushq(rdi);
+ __ pushq(rsi);
+ __ pushq(rdx);
+ __ pushq(rcx);
+ __ pushq(r8);
+ __ pushq(r9);
+
+ __ pushq(rbx); // Callee-save
#endif
- __ push(Immediate(0)); // Number of successful matches in a global regexp.
- __ push(Immediate(0)); // Make room for "input start - 1" constant.
+ __ Push(Immediate(0)); // Number of successful matches in a global regexp.
+ __ Push(Immediate(0)); // Make room for "input start - 1" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -717,14 +694,14 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
- __ movq(rcx, rsp);
+ __ movp(rcx, rsp);
__ Move(kScratchRegister, stack_limit);
- __ subq(rcx, Operand(kScratchRegister, 0));
+ __ subp(rcx, Operand(kScratchRegister, 0));
// Handle it if the stack pointer is already below the stack limit.
__ j(below_equal, &stack_limit_hit);
// Check if there is room for the variable number of registers above
// the stack limit.
- __ cmpq(rcx, Immediate(num_registers_ * kPointerSize));
+ __ cmpp(rcx, Immediate(num_registers_ * kPointerSize));
__ j(above_equal, &stack_ok);
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
@@ -734,32 +711,32 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ bind(&stack_limit_hit);
__ Move(code_object_pointer(), masm_.CodeObject());
CallCheckStackGuardState(); // Preserves no registers beside rbp and rsp.
- __ testq(rax, rax);
+ __ testp(rax, rax);
// If returned value is non-zero, we exit with the returned value as result.
__ j(not_zero, &return_rax);
__ bind(&stack_ok);
// Allocate space on stack for registers.
- __ subq(rsp, Immediate(num_registers_ * kPointerSize));
+ __ subp(rsp, Immediate(num_registers_ * kPointerSize));
// Load string length.
- __ movq(rsi, Operand(rbp, kInputEnd));
+ __ movp(rsi, Operand(rbp, kInputEnd));
// Load input position.
- __ movq(rdi, Operand(rbp, kInputStart));
+ __ movp(rdi, Operand(rbp, kInputStart));
// Set up rdi to be negative offset from string end.
__ subq(rdi, rsi);
// Set rax to address of char before start of the string
// (effectively string position -1).
- __ movq(rbx, Operand(rbp, kStartIndex));
- __ neg(rbx);
+ __ movp(rbx, Operand(rbp, kStartIndex));
+ __ negq(rbx);
if (mode_ == UC16) {
- __ lea(rax, Operand(rdi, rbx, times_2, -char_size()));
+ __ leap(rax, Operand(rdi, rbx, times_2, -char_size()));
} else {
- __ lea(rax, Operand(rdi, rbx, times_1, -char_size()));
+ __ leap(rax, Operand(rdi, rbx, times_1, -char_size()));
}
// Store this value in a local variable, for use when clearing
// position registers.
- __ movq(Operand(rbp, kInputStartMinusOne), rax);
+ __ movp(Operand(rbp, kInputStartMinusOne), rax);
#if V8_OS_WIN
// Ensure that we have written to each stack page, in order. Skipping a page
@@ -769,7 +746,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
for (int i = num_saved_registers_ + kRegistersPerPage - 1;
i < num_registers_;
i += kRegistersPerPage) {
- __ movq(register_location(i), rax); // One write every page.
+ __ movp(register_location(i), rax); // One write every page.
}
#endif // V8_OS_WIN
@@ -798,20 +775,20 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ Set(rcx, kRegisterZero);
Label init_loop;
__ bind(&init_loop);
- __ movq(Operand(rbp, rcx, times_1, 0), rax);
+ __ movp(Operand(rbp, rcx, times_1, 0), rax);
__ subq(rcx, Immediate(kPointerSize));
__ cmpq(rcx,
Immediate(kRegisterZero - num_saved_registers_ * kPointerSize));
__ j(greater, &init_loop);
} else { // Unroll the loop.
for (int i = 0; i < num_saved_registers_; i++) {
- __ movq(register_location(i), rax);
+ __ movp(register_location(i), rax);
}
}
}
// Initialize backtrack stack pointer.
- __ movq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
+ __ movp(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
__ jmp(&start_label_);
@@ -821,24 +798,24 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ bind(&success_label_);
if (num_saved_registers_ > 0) {
// copy captures to output
- __ movq(rdx, Operand(rbp, kStartIndex));
- __ movq(rbx, Operand(rbp, kRegisterOutput));
- __ movq(rcx, Operand(rbp, kInputEnd));
- __ subq(rcx, Operand(rbp, kInputStart));
+ __ movp(rdx, Operand(rbp, kStartIndex));
+ __ movp(rbx, Operand(rbp, kRegisterOutput));
+ __ movp(rcx, Operand(rbp, kInputEnd));
+ __ subp(rcx, Operand(rbp, kInputStart));
if (mode_ == UC16) {
- __ lea(rcx, Operand(rcx, rdx, times_2, 0));
+ __ leap(rcx, Operand(rcx, rdx, times_2, 0));
} else {
- __ addq(rcx, rdx);
+ __ addp(rcx, rdx);
}
for (int i = 0; i < num_saved_registers_; i++) {
- __ movq(rax, register_location(i));
+ __ movp(rax, register_location(i));
if (i == 0 && global_with_zero_length_check()) {
// Keep capture start in rdx for the zero-length check later.
- __ movq(rdx, rax);
+ __ movp(rdx, rax);
}
- __ addq(rax, rcx); // Convert to index from start, not end.
+ __ addp(rax, rcx); // Convert to index from start, not end.
if (mode_ == UC16) {
- __ sar(rax, Immediate(1)); // Convert byte index to character index.
+ __ sarp(rax, Immediate(1)); // Convert byte index to character index.
}
__ movl(Operand(rbx, i * kIntSize), rax);
}
@@ -847,31 +824,31 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
if (global()) {
// Restart matching if the regular expression is flagged as global.
// Increment success counter.
- __ incq(Operand(rbp, kSuccessfulCaptures));
+ __ incp(Operand(rbp, kSuccessfulCaptures));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ movsxlq(rcx, Operand(rbp, kNumOutputRegisters));
- __ subq(rcx, Immediate(num_saved_registers_));
+ __ subp(rcx, Immediate(num_saved_registers_));
// Check whether we have enough room for another set of capture results.
- __ cmpq(rcx, Immediate(num_saved_registers_));
+ __ cmpp(rcx, Immediate(num_saved_registers_));
__ j(less, &exit_label_);
- __ movq(Operand(rbp, kNumOutputRegisters), rcx);
+ __ movp(Operand(rbp, kNumOutputRegisters), rcx);
// Advance the location for output.
- __ addq(Operand(rbp, kRegisterOutput),
+ __ addp(Operand(rbp, kRegisterOutput),
Immediate(num_saved_registers_ * kIntSize));
// Prepare rax to initialize registers with its value in the next run.
- __ movq(rax, Operand(rbp, kInputStartMinusOne));
+ __ movp(rax, Operand(rbp, kInputStartMinusOne));
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
// rdx: capture start index
- __ cmpq(rdi, rdx);
+ __ cmpp(rdi, rdx);
// Not a zero-length match, restart.
__ j(not_equal, &load_char_start_regexp);
// rdi (offset from the end) is zero if we already reached the end.
- __ testq(rdi, rdi);
+ __ testp(rdi, rdi);
__ j(zero, &exit_label_, Label::kNear);
// Advance current position after a zero-length match.
if (mode_ == UC16) {
@@ -883,32 +860,32 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ jmp(&load_char_start_regexp);
} else {
- __ movq(rax, Immediate(SUCCESS));
+ __ movp(rax, Immediate(SUCCESS));
}
}
__ bind(&exit_label_);
if (global()) {
// Return the number of successful captures.
- __ movq(rax, Operand(rbp, kSuccessfulCaptures));
+ __ movp(rax, Operand(rbp, kSuccessfulCaptures));
}
__ bind(&return_rax);
#ifdef _WIN64
// Restore callee save registers.
- __ lea(rsp, Operand(rbp, kLastCalleeSaveRegister));
- __ pop(rbx);
- __ pop(rdi);
- __ pop(rsi);
+ __ leap(rsp, Operand(rbp, kLastCalleeSaveRegister));
+ __ popq(rbx);
+ __ popq(rdi);
+ __ popq(rsi);
// Stack now at rbp.
#else
// Restore callee save register.
- __ movq(rbx, Operand(rbp, kBackup_rbx));
+ __ movp(rbx, Operand(rbp, kBackup_rbx));
// Skip rsp to rbp.
- __ movq(rsp, rbp);
+ __ movp(rsp, rbp);
#endif
// Exit function frame, restore previous one.
- __ pop(rbp);
+ __ popq(rbp);
__ ret(0);
// Backtrack code (branch target for conditional backtracks).
@@ -923,21 +900,21 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
if (check_preempt_label_.is_linked()) {
SafeCallTarget(&check_preempt_label_);
- __ push(backtrack_stackpointer());
- __ push(rdi);
+ __ pushq(backtrack_stackpointer());
+ __ pushq(rdi);
CallCheckStackGuardState();
- __ testq(rax, rax);
+ __ testp(rax, rax);
// If returning non-zero, we should end execution with the given
// result as return value.
__ j(not_zero, &return_rax);
// Restore registers.
__ Move(code_object_pointer(), masm_.CodeObject());
- __ pop(rdi);
- __ pop(backtrack_stackpointer());
+ __ popq(rdi);
+ __ popq(backtrack_stackpointer());
// String might have moved: Reload esi from frame.
- __ movq(rsi, Operand(rbp, kInputEnd));
+ __ movp(rsi, Operand(rbp, kInputEnd));
SafeReturn();
}
@@ -950,8 +927,8 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Save registers before calling C function
#ifndef _WIN64
// Callee-save in Microsoft 64-bit ABI, but not in AMD64 ABI.
- __ push(rsi);
- __ push(rdi);
+ __ pushq(rsi);
+ __ pushq(rdi);
#endif
// Call GrowStack(backtrack_stackpointer())
@@ -960,12 +937,12 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
#ifdef _WIN64
// Microsoft passes parameters in rcx, rdx, r8.
// First argument, backtrack stackpointer, is already in rcx.
- __ lea(rdx, Operand(rbp, kStackHighEnd)); // Second argument
+ __ leap(rdx, Operand(rbp, kStackHighEnd)); // Second argument
__ LoadAddress(r8, ExternalReference::isolate_address(isolate()));
#else
// AMD64 ABI passes parameters in rdi, rsi, rdx.
- __ movq(rdi, backtrack_stackpointer()); // First argument.
- __ lea(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
+ __ movp(rdi, backtrack_stackpointer()); // First argument.
+ __ leap(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
__ LoadAddress(rdx, ExternalReference::isolate_address(isolate()));
#endif
ExternalReference grow_stack =
@@ -973,15 +950,15 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(equal, &exit_with_exception);
// Otherwise use return value as new stack pointer.
- __ movq(backtrack_stackpointer(), rax);
+ __ movp(backtrack_stackpointer(), rax);
// Restore saved registers and continue.
__ Move(code_object_pointer(), masm_.CodeObject());
#ifndef _WIN64
- __ pop(rdi);
- __ pop(rsi);
+ __ popq(rdi);
+ __ popq(rsi);
#endif
SafeReturn();
}
@@ -1015,7 +992,7 @@ void RegExpMacroAssemblerX64::GoTo(Label* to) {
void RegExpMacroAssemblerX64::IfRegisterGE(int reg,
int comparand,
Label* if_ge) {
- __ cmpq(register_location(reg), Immediate(comparand));
+ __ cmpp(register_location(reg), Immediate(comparand));
BranchOrBacktrack(greater_equal, if_ge);
}
@@ -1023,14 +1000,14 @@ void RegExpMacroAssemblerX64::IfRegisterGE(int reg,
void RegExpMacroAssemblerX64::IfRegisterLT(int reg,
int comparand,
Label* if_lt) {
- __ cmpq(register_location(reg), Immediate(comparand));
+ __ cmpp(register_location(reg), Immediate(comparand));
BranchOrBacktrack(less, if_lt);
}
void RegExpMacroAssemblerX64::IfRegisterEqPos(int reg,
Label* if_eq) {
- __ cmpq(rdi, register_location(reg));
+ __ cmpp(rdi, register_location(reg));
BranchOrBacktrack(equal, if_eq);
}
@@ -1061,7 +1038,7 @@ void RegExpMacroAssemblerX64::PopCurrentPosition() {
void RegExpMacroAssemblerX64::PopRegister(int register_index) {
Pop(rax);
- __ movq(register_location(register_index), rax);
+ __ movp(register_location(register_index), rax);
}
@@ -1078,26 +1055,44 @@ void RegExpMacroAssemblerX64::PushCurrentPosition() {
void RegExpMacroAssemblerX64::PushRegister(int register_index,
StackCheckFlag check_stack_limit) {
- __ movq(rax, register_location(register_index));
+ __ movp(rax, register_location(register_index));
Push(rax);
if (check_stack_limit) CheckStackLimit();
}
+STATIC_ASSERT(kPointerSize == kInt64Size || kPointerSize == kInt32Size);
+
+
void RegExpMacroAssemblerX64::ReadCurrentPositionFromRegister(int reg) {
- __ movq(rdi, register_location(reg));
+ if (kPointerSize == kInt64Size) {
+ __ movq(rdi, register_location(reg));
+ } else {
+ // Need sign extension for x32 as rdi might be used as an index register.
+ __ movsxlq(rdi, register_location(reg));
+ }
+}
+
+
+void RegExpMacroAssemblerX64::ReadPositionFromRegister(Register dst, int reg) {
+ if (kPointerSize == kInt64Size) {
+ __ movq(dst, register_location(reg));
+ } else {
+ // Need sign extension for x32 as dst might be used as an index register.
+ __ movsxlq(dst, register_location(reg));
+ }
}
void RegExpMacroAssemblerX64::ReadStackPointerFromRegister(int reg) {
- __ movq(backtrack_stackpointer(), register_location(reg));
- __ addq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
+ __ movp(backtrack_stackpointer(), register_location(reg));
+ __ addp(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
}
void RegExpMacroAssemblerX64::SetCurrentPositionFromEnd(int by) {
Label after_position;
- __ cmpq(rdi, Immediate(-by * char_size()));
+ __ cmpp(rdi, Immediate(-by * char_size()));
__ j(greater_equal, &after_position, Label::kNear);
__ movq(rdi, Immediate(-by * char_size()));
// On RegExp code entry (where this operation is used), the character before
@@ -1110,7 +1105,7 @@ void RegExpMacroAssemblerX64::SetCurrentPositionFromEnd(int by) {
void RegExpMacroAssemblerX64::SetRegister(int register_index, int to) {
ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
- __ movq(register_location(register_index), Immediate(to));
+ __ movp(register_location(register_index), Immediate(to));
}
@@ -1123,27 +1118,27 @@ bool RegExpMacroAssemblerX64::Succeed() {
void RegExpMacroAssemblerX64::WriteCurrentPositionToRegister(int reg,
int cp_offset) {
if (cp_offset == 0) {
- __ movq(register_location(reg), rdi);
+ __ movp(register_location(reg), rdi);
} else {
- __ lea(rax, Operand(rdi, cp_offset * char_size()));
- __ movq(register_location(reg), rax);
+ __ leap(rax, Operand(rdi, cp_offset * char_size()));
+ __ movp(register_location(reg), rax);
}
}
void RegExpMacroAssemblerX64::ClearRegisters(int reg_from, int reg_to) {
ASSERT(reg_from <= reg_to);
- __ movq(rax, Operand(rbp, kInputStartMinusOne));
+ __ movp(rax, Operand(rbp, kInputStartMinusOne));
for (int reg = reg_from; reg <= reg_to; reg++) {
- __ movq(register_location(reg), rax);
+ __ movp(register_location(reg), rax);
}
}
void RegExpMacroAssemblerX64::WriteStackPointerToRegister(int reg) {
- __ movq(rax, backtrack_stackpointer());
- __ subq(rax, Operand(rbp, kStackHighEnd));
- __ movq(register_location(reg), rax);
+ __ movp(rax, backtrack_stackpointer());
+ __ subp(rax, Operand(rbp, kStackHighEnd));
+ __ movp(register_location(reg), rax);
}
@@ -1156,20 +1151,20 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
__ PrepareCallCFunction(num_arguments);
#ifdef _WIN64
// Second argument: Code* of self. (Do this before overwriting r8).
- __ movq(rdx, code_object_pointer());
+ __ movp(rdx, code_object_pointer());
// Third argument: RegExp code frame pointer.
- __ movq(r8, rbp);
+ __ movp(r8, rbp);
// First argument: Next address on the stack (will be address of
// return address).
- __ lea(rcx, Operand(rsp, -kPointerSize));
+ __ leap(rcx, Operand(rsp, -kPointerSize));
#else
// Third argument: RegExp code frame pointer.
- __ movq(rdx, rbp);
+ __ movp(rdx, rbp);
// Second argument: Code* of self.
- __ movq(rsi, code_object_pointer());
+ __ movp(rsi, code_object_pointer());
// First argument: Next address on the stack (will be address of
// return address).
- __ lea(rdi, Operand(rsp, -kPointerSize));
+ __ leap(rdi, Operand(rsp, -kRegisterSize));
#endif
ExternalReference stack_check =
ExternalReference::re_check_stack_guard_state(isolate());
@@ -1188,7 +1183,8 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- if (isolate->stack_guard()->IsStackOverflow()) {
+ StackLimitCheck check(isolate);
+ if (check.JsHasOverflowed()) {
isolate->StackOverflow();
return EXCEPTION;
}
@@ -1215,7 +1211,7 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
ASSERT(*return_address <=
re_code->instruction_start() + re_code->instruction_size());
- MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
+ Object* result = isolate->stack_guard()->HandleInterrupts();
if (*code_handle != re_code) { // Return address no longer valid
intptr_t delta = code_handle->address() - re_code->address();
@@ -1323,12 +1319,12 @@ void RegExpMacroAssemblerX64::SafeCall(Label* to) {
void RegExpMacroAssemblerX64::SafeCallTarget(Label* label) {
__ bind(label);
- __ subq(Operand(rsp, 0), code_object_pointer());
+ __ subp(Operand(rsp, 0), code_object_pointer());
}
void RegExpMacroAssemblerX64::SafeReturn() {
- __ addq(Operand(rsp, 0), code_object_pointer());
+ __ addp(Operand(rsp, 0), code_object_pointer());
__ ret(0);
}
@@ -1336,14 +1332,14 @@ void RegExpMacroAssemblerX64::SafeReturn() {
void RegExpMacroAssemblerX64::Push(Register source) {
ASSERT(!source.is(backtrack_stackpointer()));
// Notice: This updates flags, unlike normal Push.
- __ subq(backtrack_stackpointer(), Immediate(kIntSize));
+ __ subp(backtrack_stackpointer(), Immediate(kIntSize));
__ movl(Operand(backtrack_stackpointer(), 0), source);
}
void RegExpMacroAssemblerX64::Push(Immediate value) {
// Notice: This updates flags, unlike normal Push.
- __ subq(backtrack_stackpointer(), Immediate(kIntSize));
+ __ subp(backtrack_stackpointer(), Immediate(kIntSize));
__ movl(Operand(backtrack_stackpointer(), 0), value);
}
@@ -1367,7 +1363,7 @@ void RegExpMacroAssemblerX64::FixupCodeRelativePositions() {
void RegExpMacroAssemblerX64::Push(Label* backtrack_target) {
- __ subq(backtrack_stackpointer(), Immediate(kIntSize));
+ __ subp(backtrack_stackpointer(), Immediate(kIntSize));
__ movl(Operand(backtrack_stackpointer(), 0), backtrack_target);
MarkPositionForCodeRelativeFixup();
}
@@ -1377,12 +1373,12 @@ void RegExpMacroAssemblerX64::Pop(Register target) {
ASSERT(!target.is(backtrack_stackpointer()));
__ movsxlq(target, Operand(backtrack_stackpointer(), 0));
// Notice: This updates flags, unlike normal Pop.
- __ addq(backtrack_stackpointer(), Immediate(kIntSize));
+ __ addp(backtrack_stackpointer(), Immediate(kIntSize));
}
void RegExpMacroAssemblerX64::Drop() {
- __ addq(backtrack_stackpointer(), Immediate(kIntSize));
+ __ addp(backtrack_stackpointer(), Immediate(kIntSize));
}
@@ -1392,7 +1388,7 @@ void RegExpMacroAssemblerX64::CheckPreemption() {
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
__ load_rax(stack_limit);
- __ cmpq(rsp, rax);
+ __ cmpp(rsp, rax);
__ j(above, &no_preempt);
SafeCall(&check_preempt_label_);
@@ -1406,7 +1402,7 @@ void RegExpMacroAssemblerX64::CheckStackLimit() {
ExternalReference stack_limit =
ExternalReference::address_of_regexp_stack_limit(isolate());
__ load_rax(stack_limit);
- __ cmpq(backtrack_stackpointer(), rax);
+ __ cmpp(backtrack_stackpointer(), rax);
__ j(above, &no_stack_overflow);
SafeCall(&stack_overflow_label_);
diff --git a/chromium/v8/src/x64/regexp-macro-assembler-x64.h b/chromium/v8/src/x64/regexp-macro-assembler-x64.h
index b230ea47fc6..89d8d3b5cc7 100644
--- a/chromium/v8/src/x64/regexp-macro-assembler-x64.h
+++ b/chromium/v8/src/x64/regexp-macro-assembler-x64.h
@@ -1,38 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
#define V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
-#include "x64/assembler-x64.h"
-#include "x64/assembler-x64-inl.h"
-#include "macro-assembler.h"
-#include "code.h"
-#include "x64/macro-assembler-x64.h"
+#include "src/x64/assembler-x64.h"
+#include "src/x64/assembler-x64-inl.h"
+#include "src/macro-assembler.h"
+#include "src/code.h"
+#include "src/x64/macro-assembler-x64.h"
namespace v8 {
namespace internal {
@@ -135,8 +112,8 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// Offsets from rbp of function parameters and stored registers.
static const int kFramePointer = 0;
// Above the frame pointer - function parameters and return address.
- static const int kReturn_eip = kFramePointer + kPointerSize;
- static const int kFrameAlign = kReturn_eip + kPointerSize;
+ static const int kReturn_eip = kFramePointer + kRegisterSize;
+ static const int kFrameAlign = kReturn_eip + kRegisterSize;
#ifdef _WIN64
// Parameters (first four passed as registers, but with room on stack).
@@ -145,49 +122,50 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// use this space to store the register passed parameters.
static const int kInputString = kFrameAlign;
// StartIndex is passed as 32 bit int.
- static const int kStartIndex = kInputString + kPointerSize;
- static const int kInputStart = kStartIndex + kPointerSize;
- static const int kInputEnd = kInputStart + kPointerSize;
- static const int kRegisterOutput = kInputEnd + kPointerSize;
+ static const int kStartIndex = kInputString + kRegisterSize;
+ static const int kInputStart = kStartIndex + kRegisterSize;
+ static const int kInputEnd = kInputStart + kRegisterSize;
+ static const int kRegisterOutput = kInputEnd + kRegisterSize;
// For the case of global regular expression, we have room to store at least
// one set of capture results. For the case of non-global regexp, we ignore
// this value. NumOutputRegisters is passed as 32-bit value. The upper
// 32 bit of this 64-bit stack slot may contain garbage.
- static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
+ static const int kNumOutputRegisters = kRegisterOutput + kRegisterSize;
+ static const int kStackHighEnd = kNumOutputRegisters + kRegisterSize;
// DirectCall is passed as 32 bit int (values 0 or 1).
- static const int kDirectCall = kStackHighEnd + kPointerSize;
- static const int kIsolate = kDirectCall + kPointerSize;
+ static const int kDirectCall = kStackHighEnd + kRegisterSize;
+ static const int kIsolate = kDirectCall + kRegisterSize;
#else
// In AMD64 ABI Calling Convention, the first six integer parameters
// are passed as registers, and caller must allocate space on the stack
// if it wants them stored. We push the parameters after the frame pointer.
- static const int kInputString = kFramePointer - kPointerSize;
- static const int kStartIndex = kInputString - kPointerSize;
- static const int kInputStart = kStartIndex - kPointerSize;
- static const int kInputEnd = kInputStart - kPointerSize;
- static const int kRegisterOutput = kInputEnd - kPointerSize;
+ static const int kInputString = kFramePointer - kRegisterSize;
+ static const int kStartIndex = kInputString - kRegisterSize;
+ static const int kInputStart = kStartIndex - kRegisterSize;
+ static const int kInputEnd = kInputStart - kRegisterSize;
+ static const int kRegisterOutput = kInputEnd - kRegisterSize;
+
// For the case of global regular expression, we have room to store at least
// one set of capture results. For the case of non-global regexp, we ignore
// this value.
- static const int kNumOutputRegisters = kRegisterOutput - kPointerSize;
+ static const int kNumOutputRegisters = kRegisterOutput - kRegisterSize;
static const int kStackHighEnd = kFrameAlign;
- static const int kDirectCall = kStackHighEnd + kPointerSize;
- static const int kIsolate = kDirectCall + kPointerSize;
+ static const int kDirectCall = kStackHighEnd + kRegisterSize;
+ static const int kIsolate = kDirectCall + kRegisterSize;
#endif
#ifdef _WIN64
// Microsoft calling convention has three callee-saved registers
// (that we are using). We push these after the frame pointer.
- static const int kBackup_rsi = kFramePointer - kPointerSize;
- static const int kBackup_rdi = kBackup_rsi - kPointerSize;
- static const int kBackup_rbx = kBackup_rdi - kPointerSize;
+ static const int kBackup_rsi = kFramePointer - kRegisterSize;
+ static const int kBackup_rdi = kBackup_rsi - kRegisterSize;
+ static const int kBackup_rbx = kBackup_rdi - kRegisterSize;
static const int kLastCalleeSaveRegister = kBackup_rbx;
#else
// AMD64 Calling Convention has only one callee-save register that
// we use. We push this after the frame pointer (and after the
// parameters).
- static const int kBackup_rbx = kNumOutputRegisters - kPointerSize;
+ static const int kBackup_rbx = kNumOutputRegisters - kRegisterSize;
static const int kLastCalleeSaveRegister = kBackup_rbx;
#endif
@@ -268,6 +246,8 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// Increments the stack pointer (rcx) by a word size.
inline void Drop();
+ inline void ReadPositionFromRegister(Register dst, int reg);
+
Isolate* isolate() const { return masm_.isolate(); }
MacroAssembler masm_;
diff --git a/chromium/v8/src/x64/simulator-x64.cc b/chromium/v8/src/x64/simulator-x64.cc
index 448b025a6bf..f7f2fb4bb4f 100644
--- a/chromium/v8/src/x64/simulator-x64.cc
+++ b/chromium/v8/src/x64/simulator-x64.cc
@@ -1,26 +1,3 @@
// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
diff --git a/chromium/v8/src/x64/simulator-x64.h b/chromium/v8/src/x64/simulator-x64.h
index 8aba70181f4..35cbdc78884 100644
--- a/chromium/v8/src/x64/simulator-x64.h
+++ b/chromium/v8/src/x64/simulator-x64.h
@@ -1,34 +1,11 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_X64_SIMULATOR_X64_H_
#define V8_X64_SIMULATOR_X64_H_
-#include "allocation.h"
+#include "src/allocation.h"
namespace v8 {
namespace internal {
@@ -47,9 +24,6 @@ typedef int (*regexp_matcher)(String*, int, const byte*,
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
(FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- (reinterpret_cast<TryCatch*>(try_catch_address))
-
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on x64 uses the C stack, we
// just use the C stack limit.
diff --git a/chromium/v8/src/x64/stub-cache-x64.cc b/chromium/v8/src/x64/stub-cache-x64.cc
index c87f00fc4db..422ef2e0626 100644
--- a/chromium/v8/src/x64/stub-cache-x64.cc
+++ b/chromium/v8/src/x64/stub-cache-x64.cc
@@ -1,38 +1,15 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "arguments.h"
-#include "ic-inl.h"
-#include "codegen.h"
-#include "stub-cache.h"
+#include "src/arguments.h"
+#include "src/ic-inl.h"
+#include "src/codegen.h"
+#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@@ -49,10 +26,12 @@ static void ProbeTable(Isolate* isolate,
// The offset is scaled by 4, based on
// kHeapObjectTagSize, which is two bits
Register offset) {
- // We need to scale up the pointer by 2 because the offset is scaled by less
+ // We need to scale up the pointer by 2 when the offset is scaled by less
// than the pointer size.
- ASSERT(kPointerSizeLog2 == kHeapObjectTagSize + 1);
- ScaleFactor scale_factor = times_2;
+ ASSERT(kPointerSize == kInt64Size
+ ? kPointerSizeLog2 == kHeapObjectTagSize + 1
+ : kPointerSizeLog2 == kHeapObjectTagSize);
+ ScaleFactor scale_factor = kPointerSize == kInt64Size ? times_2 : times_1;
ASSERT_EQ(3 * kPointerSize, sizeof(StubCache::Entry));
// The offset register holds the entry offset times four (due to masking
@@ -62,7 +41,7 @@ static void ProbeTable(Isolate* isolate,
Label miss;
// Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ lea(offset, Operand(offset, offset, times_2, 0));
+ __ leap(offset, Operand(offset, offset, times_2, 0));
__ LoadAddress(kScratchRegister, key_offset);
@@ -75,19 +54,19 @@ static void ProbeTable(Isolate* isolate,
// Get the map entry from the cache.
// Use key_offset + kPointerSize * 2, rather than loading map_offset.
- __ movq(kScratchRegister,
+ __ movp(kScratchRegister,
Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2));
- __ cmpq(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ cmpp(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
__ j(not_equal, &miss);
// Get the code entry from the cache.
__ LoadAddress(kScratchRegister, value_offset);
- __ movq(kScratchRegister,
+ __ movp(kScratchRegister,
Operand(kScratchRegister, offset, scale_factor, 0));
// Check that the flags match what we're looking for.
__ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
- __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
+ __ andp(offset, Immediate(~Code::kFlagsNotUsedInLookup));
__ cmpl(offset, Immediate(flags));
__ j(not_equal, &miss);
@@ -100,7 +79,7 @@ static void ProbeTable(Isolate* isolate,
#endif
// Jump to the first instruction in the code stub.
- __ addq(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ addp(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(kScratchRegister);
__ bind(&miss);
@@ -119,7 +98,7 @@ void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
__ IncrementCounter(counters->negative_lookups(), 1);
__ IncrementCounter(counters->negative_lookups_miss(), 1);
- __ movq(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ movp(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
const int kInterceptorOrAccessCheckNeededMask =
(1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
@@ -135,7 +114,7 @@ void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
// Load properties array.
Register properties = scratch0;
- __ movq(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
+ __ movp(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
// Check that the properties array is a dictionary.
__ CompareRoot(FieldOperand(properties, HeapObject::kMapOffset),
@@ -193,10 +172,10 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
// Use only the low 32 bits of the map pointer.
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(scratch, Immediate(flags));
+ __ xorp(scratch, Immediate(flags));
// We mask out the last two bits because they are not part of the hash and
// they are always 01 for maps. Also in the two 'and' instructions below.
- __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+ __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
// Probe the primary table.
ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch);
@@ -204,11 +183,11 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
// Primary miss: Compute hash for secondary probe.
__ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(scratch, Immediate(flags));
- __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+ __ xorp(scratch, Immediate(flags));
+ __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
__ subl(scratch, name);
__ addl(scratch, Immediate(flags));
- __ and_(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
+ __ andp(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
// Probe the secondary table.
ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch);
@@ -224,18 +203,18 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
int index,
Register prototype) {
// Load the global or builtins object from the current context.
- __ movq(prototype,
+ __ movp(prototype,
Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Load the native context from the global or builtins object.
- __ movq(prototype,
+ __ movp(prototype,
FieldOperand(prototype, GlobalObject::kNativeContextOffset));
// Load the function from the native context.
- __ movq(prototype, Operand(prototype, Context::SlotOffset(index)));
+ __ movp(prototype, Operand(prototype, Context::SlotOffset(index)));
// Load the initial map. The global functions all have initial maps.
- __ movq(prototype,
+ __ movp(prototype,
FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
// Load the prototype from the initial map.
- __ movq(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+ __ movp(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
}
@@ -245,18 +224,22 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
Register prototype,
Label* miss) {
Isolate* isolate = masm->isolate();
- // Check we're still in the same context.
- __ Move(prototype, isolate->global_object());
- __ cmpq(Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)),
- prototype);
- __ j(not_equal, miss);
// Get the global function with the given index.
Handle<JSFunction> function(
JSFunction::cast(isolate->native_context()->get(index)));
+
+ // Check we're still in the same context.
+ Register scratch = prototype;
+ const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
+ __ movp(scratch, Operand(rsi, offset));
+ __ movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+ __ Cmp(Operand(scratch, Context::SlotOffset(index)), function);
+ __ j(not_equal, miss);
+
// Load its initial map. The global functions all have initial maps.
__ Move(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
- __ movq(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+ __ movp(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
}
@@ -272,55 +255,7 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
__ j(not_equal, miss_label);
// Load length directly from the JS array.
- __ movq(rax, FieldOperand(receiver, JSArray::kLengthOffset));
- __ ret(0);
-}
-
-
-// Generate code to check if an object is a string. If the object is
-// a string, the map's instance type is left in the scratch register.
-static void GenerateStringCheck(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* smi,
- Label* non_string_object) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, smi);
-
- // Check that the object is a string.
- __ movq(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ movzxbq(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- __ testl(scratch, Immediate(kNotStringTag));
- __ j(not_zero, non_string_object);
-}
-
-
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- Label check_wrapper;
-
- // Check if the object is a string leaving the instance type in the
- // scratch register.
- GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
-
- // Load length directly from the string.
- __ movq(rax, FieldOperand(receiver, String::kLengthOffset));
- __ ret(0);
-
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmpl(scratch1, Immediate(JS_VALUE_TYPE));
- __ j(not_equal, miss);
-
- // Check if the wrapped value is a string and load the length
- // directly if it is.
- __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ movq(rax, FieldOperand(scratch2, String::kLengthOffset));
+ __ movp(rax, FieldOperand(receiver, JSArray::kLengthOffset));
__ ret(0);
}
@@ -331,7 +266,7 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register scratch,
Label* miss_label) {
__ TryGetFunctionPrototype(receiver, result, miss_label);
- if (!result.is(rax)) __ movq(rax, result);
+ if (!result.is(rax)) __ movp(rax, result);
__ ret(0);
}
@@ -342,15 +277,15 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
bool inobject,
int index,
Representation representation) {
- ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
+ ASSERT(!representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
offset = offset + FixedArray::kHeaderSize;
- __ movq(dst, FieldOperand(src, JSObject::kPropertiesOffset));
+ __ movp(dst, FieldOperand(src, JSObject::kPropertiesOffset));
src = dst;
}
- __ movq(dst, FieldOperand(src, offset));
+ __ movp(dst, FieldOperand(src, offset));
}
@@ -364,13 +299,13 @@ static void PushInterceptorArguments(MacroAssembler* masm,
STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
- __ push(name);
+ __ Push(name);
Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
__ Move(kScratchRegister, interceptor);
- __ push(kScratchRegister);
- __ push(receiver);
- __ push(holder);
+ __ Push(kScratchRegister);
+ __ Push(receiver);
+ __ Push(holder);
}
@@ -388,444 +323,85 @@ static void CompileCallLoadPropertyWithInterceptor(
}
-// Number of pointers to be reserved on stack for fast API call.
-static const int kFastApiCallArguments = FunctionCallbackArguments::kArgsLength;
-
-
-// Reserves space for the extra arguments to API function in the
-// caller's frame.
-//
-// These arguments are set by CheckPrototypes and GenerateFastApiCall.
-static void ReserveSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] : last argument in the internal frame of the caller
- // -----------------------------------
- __ movq(scratch, StackOperandForReturnAddress(0));
- __ subq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
- __ movq(StackOperandForReturnAddress(0), scratch);
- __ Move(scratch, Smi::FromInt(0));
- StackArgumentsAccessor args(rsp, kFastApiCallArguments,
- ARGUMENTS_DONT_CONTAIN_RECEIVER);
- for (int i = 0; i < kFastApiCallArguments; i++) {
- __ movq(args.GetArgumentOperand(i), scratch);
- }
-}
-
-
-// Undoes the effects of ReserveSpaceForFastApiCall.
-static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address.
- // -- rsp[8] : last fast api call extra argument.
- // -- ...
- // -- rsp[kFastApiCallArguments * 8] : first fast api call extra
- // argument.
- // -- rsp[kFastApiCallArguments * 8 + 8] : last argument in the internal
- // frame.
- // -----------------------------------
- __ movq(scratch, StackOperandForReturnAddress(0));
- __ movq(StackOperandForReturnAddress(kFastApiCallArguments * kPointerSize),
- scratch);
- __ addq(rsp, Immediate(kPointerSize * kFastApiCallArguments));
-}
-
-
-static void GenerateFastApiCallBody(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc,
- bool restore_context);
-
-
-// Generates call to API function.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc) {
- typedef FunctionCallbackArguments FCA;
- StackArgumentsAccessor args(rsp, argc + kFastApiCallArguments);
-
- // Save calling context.
- int offset = argc + kFastApiCallArguments;
- __ movq(args.GetArgumentOperand(offset - FCA::kContextSaveIndex), rsi);
-
- // Get the function and setup the context.
- Handle<JSFunction> function = optimization.constant_function();
- __ Move(rdi, function);
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- // Construct the FunctionCallbackInfo on the stack.
- __ movq(args.GetArgumentOperand(offset - FCA::kCalleeIndex), rdi);
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data(), masm->isolate());
- if (masm->isolate()->heap()->InNewSpace(*call_data)) {
- __ Move(rcx, api_call_info);
- __ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
- __ movq(args.GetArgumentOperand(offset - FCA::kDataIndex), rbx);
- } else {
- __ Move(args.GetArgumentOperand(offset - FCA::kDataIndex), call_data);
- }
- __ Move(kScratchRegister,
- ExternalReference::isolate_address(masm->isolate()));
- __ movq(args.GetArgumentOperand(offset - FCA::kIsolateIndex),
- kScratchRegister);
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ movq(args.GetArgumentOperand(offset - FCA::kReturnValueDefaultValueIndex),
- kScratchRegister);
- __ movq(args.GetArgumentOperand(offset - FCA::kReturnValueOffset),
- kScratchRegister);
-
- // Prepare arguments.
- STATIC_ASSERT(kFastApiCallArguments == 7);
- __ lea(rax, Operand(rsp, 1 * kPointerSize));
-
- GenerateFastApiCallBody(masm, optimization, argc, false);
-}
-
-
// Generate call to api function.
-// This function uses push() to generate smaller, faster code than
-// the version above. It is an optimization that should will be removed
-// when api call ICs are generated in hydrogen.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- int argc,
- Register* values) {
+void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch_in,
+ bool is_store,
+ int argc,
+ Register* values) {
ASSERT(optimization.is_simple_api_call());
- // Copy return value.
- __ pop(scratch1);
-
+ __ PopReturnAddressTo(scratch_in);
// receiver
- __ push(receiver);
-
+ __ Push(receiver);
// Write the arguments to stack frame.
for (int i = 0; i < argc; i++) {
Register arg = values[argc-1-i];
ASSERT(!receiver.is(arg));
- ASSERT(!scratch1.is(arg));
- ASSERT(!scratch2.is(arg));
- ASSERT(!scratch3.is(arg));
- __ push(arg);
+ ASSERT(!scratch_in.is(arg));
+ __ Push(arg);
+ }
+ __ PushReturnAddressFrom(scratch_in);
+ // Stack now matches JSFunction abi.
+
+ // Abi for CallApiFunctionStub.
+ Register callee = rax;
+ Register call_data = rbx;
+ Register holder = rcx;
+ Register api_function_address = rdx;
+ Register scratch = rdi; // scratch_in is no longer valid.
+
+ // Put holder in place.
+ CallOptimization::HolderLookup holder_lookup;
+ Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
+ receiver_map,
+ &holder_lookup);
+ switch (holder_lookup) {
+ case CallOptimization::kHolderIsReceiver:
+ __ Move(holder, receiver);
+ break;
+ case CallOptimization::kHolderFound:
+ __ Move(holder, api_holder);
+ break;
+ case CallOptimization::kHolderNotFound:
+ UNREACHABLE();
+ break;
}
-
- typedef FunctionCallbackArguments FCA;
-
- STATIC_ASSERT(FCA::kHolderIndex == 0);
- STATIC_ASSERT(FCA::kIsolateIndex == 1);
- STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
- STATIC_ASSERT(FCA::kReturnValueOffset == 3);
- STATIC_ASSERT(FCA::kDataIndex == 4);
- STATIC_ASSERT(FCA::kCalleeIndex == 5);
- STATIC_ASSERT(FCA::kContextSaveIndex == 6);
- STATIC_ASSERT(FCA::kArgsLength == 7);
-
- // context save
- __ push(rsi);
-
- // Get the function and setup the context.
- Handle<JSFunction> function = optimization.constant_function();
- __ Move(scratch2, function);
- __ push(scratch2);
Isolate* isolate = masm->isolate();
+ Handle<JSFunction> function = optimization.constant_function();
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data(), isolate);
- // Push data from ExecutableAccessorInfo.
+ Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+ // Put callee in place.
+ __ Move(callee, function);
+
bool call_data_undefined = false;
- if (isolate->heap()->InNewSpace(*call_data)) {
- __ Move(scratch2, api_call_info);
- __ movq(scratch3, FieldOperand(scratch2, CallHandlerInfo::kDataOffset));
- } else if (call_data->IsUndefined()) {
+ // Put call_data in place.
+ if (isolate->heap()->InNewSpace(*call_data_obj)) {
+ __ Move(scratch, api_call_info);
+ __ movp(call_data, FieldOperand(scratch, CallHandlerInfo::kDataOffset));
+ } else if (call_data_obj->IsUndefined()) {
call_data_undefined = true;
- __ LoadRoot(scratch3, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
} else {
- __ Move(scratch3, call_data);
- }
- // call data
- __ push(scratch3);
- if (!call_data_undefined) {
- __ LoadRoot(scratch3, Heap::kUndefinedValueRootIndex);
+ __ Move(call_data, call_data_obj);
}
- // return value
- __ push(scratch3);
- // return value default
- __ push(scratch3);
- // isolate
- __ Move(scratch3,
- ExternalReference::isolate_address(masm->isolate()));
- __ push(scratch3);
- // holder
- __ push(receiver);
-
- ASSERT(!scratch1.is(rax));
- // store receiver address for GenerateFastApiCallBody
- __ movq(rax, rsp);
-
- // return address
- __ push(scratch1);
-
- GenerateFastApiCallBody(masm, optimization, argc, true);
-}
-
-static void GenerateFastApiCallBody(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc,
- bool restore_context) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] - rsp[56] : FunctionCallbackInfo, incl.
- // : object passing the type check
- // (set by CheckPrototypes)
- // -- rsp[64] : last argument
- // -- ...
- // -- rsp[(argc + 7) * 8] : first argument
- // -- rsp[(argc + 8) * 8] : receiver
- //
- // rax : receiver address
- // -----------------------------------
- typedef FunctionCallbackArguments FCA;
-
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- // Function address is a foreign pointer outside V8's heap.
+ // Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ __ Move(
+ api_function_address, function_address, RelocInfo::EXTERNAL_REFERENCE);
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- const int kApiStackSpace = 4;
-
- __ PrepareCallApiFunction(kApiStackSpace);
-
- __ movq(StackSpaceOperand(0), rax); // FunctionCallbackInfo::implicit_args_.
- __ addq(rax, Immediate((argc + kFastApiCallArguments - 1) * kPointerSize));
- __ movq(StackSpaceOperand(1), rax); // FunctionCallbackInfo::values_.
- __ Set(StackSpaceOperand(2), argc); // FunctionCallbackInfo::length_.
- // FunctionCallbackInfo::is_construct_call_.
- __ Set(StackSpaceOperand(3), 0);
-
-#if defined(__MINGW64__) || defined(_WIN64)
- Register arguments_arg = rcx;
- Register callback_arg = rdx;
-#else
- Register arguments_arg = rdi;
- Register callback_arg = rsi;
-#endif
-
- // v8::InvocationCallback's argument.
- __ lea(arguments_arg, StackSpaceOperand(0));
-
- Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
-
- StackArgumentsAccessor args_from_rbp(rbp, kFastApiCallArguments,
- ARGUMENTS_DONT_CONTAIN_RECEIVER);
- Operand context_restore_operand = args_from_rbp.GetArgumentOperand(
- kFastApiCallArguments - 1 - FCA::kContextSaveIndex);
- Operand return_value_operand = args_from_rbp.GetArgumentOperand(
- kFastApiCallArguments - 1 - FCA::kReturnValueOffset);
- __ CallApiFunctionAndReturn(
- function_address,
- thunk_address,
- callback_arg,
- argc + kFastApiCallArguments + 1,
- return_value_operand,
- restore_context ? &context_restore_operand : NULL);
+ // Jump to stub.
+ CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+ __ TailCallStub(&stub);
}
-class CallInterceptorCompiler BASE_EMBEDDED {
- public:
- CallInterceptorCompiler(CallStubCompiler* stub_compiler,
- const ParameterCount& arguments,
- Register name,
- ExtraICState extra_ic_state)
- : stub_compiler_(stub_compiler),
- arguments_(arguments),
- name_(name),
- extra_ic_state_(extra_ic_state) {}
-
- void Compile(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- CallOptimization optimization(lookup);
- if (optimization.is_constant_call()) {
- CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
- holder, lookup, name, optimization, miss);
- } else {
- CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
- name, holder, miss);
- }
- }
-
- private:
- void CompileCacheable(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Handle<Name> name,
- const CallOptimization& optimization,
- Label* miss_label) {
- ASSERT(optimization.is_constant_call());
- ASSERT(!lookup->holder()->IsGlobalObject());
-
- int depth1 = kInvalidProtoDepth;
- int depth2 = kInvalidProtoDepth;
- bool can_do_fast_api_call = false;
- if (optimization.is_simple_api_call() &&
- !lookup->holder()->IsGlobalObject()) {
- depth1 = optimization.GetPrototypeDepthOfExpectedType(
- object, interceptor_holder);
- if (depth1 == kInvalidProtoDepth) {
- depth2 = optimization.GetPrototypeDepthOfExpectedType(
- interceptor_holder, Handle<JSObject>(lookup->holder()));
- }
- can_do_fast_api_call =
- depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
- }
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->call_const_interceptor(), 1);
-
- if (can_do_fast_api_call) {
- __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1);
- ReserveSpaceForFastApiCall(masm, scratch1);
- }
-
- // Check that the maps from receiver to interceptor's holder
- // haven't changed and thus we can invoke interceptor.
- Label miss_cleanup;
- Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
- Register holder =
- stub_compiler_->CheckPrototypes(
- IC::CurrentTypeOf(object, masm->isolate()), receiver,
- interceptor_holder, scratch1, scratch2, scratch3,
- name, depth1, miss);
-
- // Invoke an interceptor and if it provides a value,
- // branch to |regular_invoke|.
- Label regular_invoke;
- LoadWithInterceptor(masm, receiver, holder, interceptor_holder,
- &regular_invoke);
-
- // Interceptor returned nothing for this property. Try to use cached
- // constant function.
-
- // Check that the maps from interceptor's holder to constant function's
- // holder haven't changed and thus we can use cached constant function.
- if (*interceptor_holder != lookup->holder()) {
- stub_compiler_->CheckPrototypes(
- IC::CurrentTypeOf(interceptor_holder, masm->isolate()), holder,
- handle(lookup->holder()), scratch1, scratch2, scratch3,
- name, depth2, miss);
- } else {
- // CheckPrototypes has a side effect of fetching a 'holder'
- // for API (object which is instanceof for the signature). It's
- // safe to omit it here, as if present, it should be fetched
- // by the previous CheckPrototypes.
- ASSERT(depth2 == kInvalidProtoDepth);
- }
-
- // Invoke function.
- if (can_do_fast_api_call) {
- GenerateFastApiCall(masm, optimization, arguments_.immediate());
- } else {
- Handle<JSFunction> fun = optimization.constant_function();
- stub_compiler_->GenerateJumpFunction(object, fun);
- }
-
- // Deferred code for fast API call case---clean preallocated space.
- if (can_do_fast_api_call) {
- __ bind(&miss_cleanup);
- FreeSpaceForFastApiCall(masm, scratch1);
- __ jmp(miss_label);
- }
-
- // Invoke a regular function.
- __ bind(&regular_invoke);
- if (can_do_fast_api_call) {
- FreeSpaceForFastApiCall(masm, scratch1);
- }
- }
-
- void CompileRegular(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<Name> name,
- Handle<JSObject> interceptor_holder,
- Label* miss_label) {
- Register holder =
- stub_compiler_->CheckPrototypes(
- IC::CurrentTypeOf(object, masm->isolate()), receiver,
- interceptor_holder, scratch1, scratch2, scratch3, name, miss_label);
-
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Save the name_ register across the call.
- __ push(name_);
-
- CompileCallLoadPropertyWithInterceptor(
- masm, receiver, holder, name_, interceptor_holder,
- IC::kLoadPropertyWithInterceptorForCall);
-
- // Restore the name_ register.
- __ pop(name_);
-
- // Leave the internal frame.
- }
-
- void LoadWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Handle<JSObject> holder_obj,
- Label* interceptor_succeeded) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(receiver);
- __ push(holder);
- __ push(name_);
-
- CompileCallLoadPropertyWithInterceptor(
- masm, receiver, holder, name_, holder_obj,
- IC::kLoadPropertyWithInterceptorOnly);
-
- __ pop(name_);
- __ pop(holder);
- __ pop(receiver);
- // Leave the internal frame.
- }
-
- __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
- __ j(not_equal, interceptor_succeeded);
- }
-
- CallStubCompiler* stub_compiler_;
- const ParameterCount& arguments_;
- Register name_;
- ExtraICState extra_ic_state_;
-};
-
-
void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
Label* label,
Handle<Name> name) {
@@ -892,11 +468,26 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
__ Cmp(value_reg, constant);
__ j(not_equal, miss_label);
- } else if (FLAG_track_fields && representation.IsSmi()) {
+ } else if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ HeapType* field_type = descriptors->GetFieldType(descriptor);
+ HeapType::Iterator<Map> it = field_type->Classes();
+ if (!it.Done()) {
+ Label do_store;
+ while (true) {
+ __ CompareMap(value_reg, it.Current());
+ it.Advance();
+ if (it.Done()) {
+ __ j(not_equal, miss_label);
+ break;
+ }
+ __ j(equal, &do_store, Label::kNear);
+ }
+ __ bind(&do_store);
+ }
+ } else if (representation.IsDouble()) {
Label do_store, heap_number;
__ AllocateHeapNumber(storage_reg, scratch1, slow);
@@ -924,9 +515,9 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
__ PopReturnAddressTo(scratch1);
- __ push(receiver_reg);
+ __ Push(receiver_reg);
__ Push(transition);
- __ push(value_reg);
+ __ Push(value_reg);
__ PushReturnAddressFrom(scratch1);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
@@ -938,7 +529,7 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Update the map of the object.
__ Move(scratch1, transition);
- __ movq(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
+ __ movp(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
// Update the write barrier for the map field.
__ RecordWriteField(receiver_reg,
@@ -969,16 +560,16 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- if (FLAG_track_double_fields && representation.IsDouble()) {
- __ movq(FieldOperand(receiver_reg, offset), storage_reg);
+ if (representation.IsDouble()) {
+ __ movp(FieldOperand(receiver_reg, offset), storage_reg);
} else {
- __ movq(FieldOperand(receiver_reg, offset), value_reg);
+ __ movp(FieldOperand(receiver_reg, offset), value_reg);
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
- __ movq(storage_reg, value_reg);
+ if (!representation.IsDouble()) {
+ __ movp(storage_reg, value_reg);
}
__ RecordWriteField(
receiver_reg, offset, storage_reg, scratch1, kDontSaveFPRegs,
@@ -988,17 +579,17 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
- __ movq(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- if (FLAG_track_double_fields && representation.IsDouble()) {
- __ movq(FieldOperand(scratch1, offset), storage_reg);
+ __ movp(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ if (representation.IsDouble()) {
+ __ movp(FieldOperand(scratch1, offset), storage_reg);
} else {
- __ movq(FieldOperand(scratch1, offset), value_reg);
+ __ movp(FieldOperand(scratch1, offset), value_reg);
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
- __ movq(storage_reg, value_reg);
+ if (!representation.IsDouble()) {
+ __ movp(storage_reg, value_reg);
}
__ RecordWriteField(
scratch1, offset, storage_reg, receiver_reg, kDontSaveFPRegs,
@@ -1027,29 +618,37 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
- int index = lookup->GetFieldIndex().field_index();
-
- // Adjust for the number of properties stored in the object. Even in the
- // face of a transition we can use the old map here because the size of the
- // object and the number of in-object properties is not going to change.
- index -= object->map()->inobject_properties();
+ FieldIndex index = lookup->GetFieldIndex();
Representation representation = lookup->representation();
ASSERT(!representation.IsNone());
- if (FLAG_track_fields && representation.IsSmi()) {
+ if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ HeapType* field_type = lookup->GetFieldType();
+ HeapType::Iterator<Map> it = field_type->Classes();
+ if (!it.Done()) {
+ Label do_store;
+ while (true) {
+ __ CompareMap(value_reg, it.Current());
+ it.Advance();
+ if (it.Done()) {
+ __ j(not_equal, miss_label);
+ break;
+ }
+ __ j(equal, &do_store, Label::kNear);
+ }
+ __ bind(&do_store);
+ }
+ } else if (representation.IsDouble()) {
// Load the double storage.
- if (index < 0) {
- int offset = object->map()->instance_size() + (index * kPointerSize);
- __ movq(scratch1, FieldOperand(receiver_reg, offset));
+ if (index.is_inobject()) {
+ __ movp(scratch1, FieldOperand(receiver_reg, index.offset()));
} else {
- __ movq(scratch1,
+ __ movp(scratch1,
FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- __ movq(scratch1, FieldOperand(scratch1, offset));
+ __ movp(scratch1, FieldOperand(scratch1, index.offset()));
}
// Store the value into the storage.
@@ -1074,32 +673,30 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
// TODO(verwaest): Share this code as a code stub.
SmiCheck smi_check = representation.IsTagged()
? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
- if (index < 0) {
+ if (index.is_inobject()) {
// Set the property straight into the object.
- int offset = object->map()->instance_size() + (index * kPointerSize);
- __ movq(FieldOperand(receiver_reg, offset), value_reg);
+ __ movp(FieldOperand(receiver_reg, index.offset()), value_reg);
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, value_reg);
+ __ movp(name_reg, value_reg);
__ RecordWriteField(
- receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs,
+ receiver_reg, index.offset(), name_reg, scratch1, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, smi_check);
}
} else {
// Write to the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
- __ movq(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ movq(FieldOperand(scratch1, offset), value_reg);
+ __ movp(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ movp(FieldOperand(scratch1, index.offset()), value_reg);
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, value_reg);
+ __ movp(name_reg, value_reg);
__ RecordWriteField(
- scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs,
+ scratch1, index.offset(), name_reg, receiver_reg, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, smi_check);
}
}
@@ -1119,20 +716,16 @@ void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
#define __ ACCESS_MASM((masm()))
-Register StubCompiler::CheckPrototypes(Handle<Type> type,
+Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Register holder_reg,
Register scratch1,
Register scratch2,
Handle<Name> name,
- int save_at_depth,
Label* miss,
PrototypeCheckType check) {
Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
- // Make sure that the type feedback oracle harvests the receiver map.
- // TODO(svenpanne) Remove this hack when all ICs are reworked.
- __ Move(scratch1, receiver_map);
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@@ -1145,17 +738,10 @@ Register StubCompiler::CheckPrototypes(Handle<Type> type,
Register reg = object_reg;
int depth = 0;
- StackArgumentsAccessor args(rsp, kFastApiCallArguments,
- ARGUMENTS_DONT_CONTAIN_RECEIVER);
- const int kHolderIndex = kFastApiCallArguments - 1 -
- FunctionCallbackArguments::kHolderIndex;
-
- if (save_at_depth == depth) {
- __ movq(args.GetArgumentOperand(kHolderIndex), object_reg);
- }
-
Handle<JSObject> current = Handle<JSObject>::null();
- if (type->IsConstant()) current = Handle<JSObject>::cast(type->AsConstant());
+ if (type->IsConstant()) {
+ current = Handle<JSObject>::cast(type->AsConstant()->Value());
+ }
Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map;
Handle<Map> holder_map(holder->map());
@@ -1178,20 +764,20 @@ Register StubCompiler::CheckPrototypes(Handle<Type> type,
name = factory()->InternalizeString(Handle<String>::cast(name));
}
ASSERT(current.is_null() ||
- current->property_dictionary()->FindEntry(*name) ==
+ current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
scratch1, scratch2);
- __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ __ movp(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
reg = holder_reg; // From now on the object will be in holder_reg.
- __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+ __ movp(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
} else {
bool in_new_space = heap()->InNewSpace(*prototype);
if (in_new_space) {
// Save the map in scratch1 for later.
- __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ __ movp(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
}
if (depth != 1 || check == CHECK_ALL_MAPS) {
__ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
@@ -1212,17 +798,13 @@ Register StubCompiler::CheckPrototypes(Handle<Type> type,
if (in_new_space) {
// The prototype is in new space; we cannot store a reference to it
// in the code. Load it from the map.
- __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+ __ movp(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
} else {
// The prototype is in old space; load it directly.
__ Move(reg, prototype);
}
}
- if (save_at_depth == depth) {
- __ movq(args.GetArgumentOperand(kHolderIndex), reg);
- }
-
// Go to the next object in the prototype chain.
current = prototype;
current_map = handle(current->map());
@@ -1271,7 +853,7 @@ void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
Register LoadStubCompiler::CallbackHandlerFrontend(
- Handle<Type> type,
+ Handle<HeapType> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
@@ -1287,7 +869,7 @@ Register LoadStubCompiler::CallbackHandlerFrontend(
// Load the properties dictionary.
Register dictionary = scratch4();
- __ movq(dictionary, FieldOperand(reg, JSObject::kPropertiesOffset));
+ __ movp(dictionary, FieldOperand(reg, JSObject::kPropertiesOffset));
// Probe the dictionary.
Label probe_done;
@@ -1307,11 +889,11 @@ Register LoadStubCompiler::CallbackHandlerFrontend(
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ movq(scratch2(),
+ __ movp(scratch2(),
Operand(dictionary, index, times_pointer_size,
kValueOffset - kHeapObjectTag));
- __ movq(scratch3(), callback, RelocInfo::EMBEDDED_OBJECT);
- __ cmpq(scratch2(), scratch3());
+ __ Move(scratch3(), callback, RelocInfo::EMBEDDED_OBJECT);
+ __ cmpp(scratch2(), scratch3());
__ j(not_equal, &miss);
}
@@ -1322,32 +904,20 @@ Register LoadStubCompiler::CallbackHandlerFrontend(
void LoadStubCompiler::GenerateLoadField(Register reg,
Handle<JSObject> holder,
- PropertyIndex field,
+ FieldIndex field,
Representation representation) {
- if (!reg.is(receiver())) __ movq(receiver(), reg);
+ if (!reg.is(receiver())) __ movp(receiver(), reg);
if (kind() == Code::LOAD_IC) {
- LoadFieldStub stub(field.is_inobject(holder),
- field.translate(holder),
- representation);
- GenerateTailCall(masm(), stub.GetCode(isolate()));
+ LoadFieldStub stub(isolate(), field);
+ GenerateTailCall(masm(), stub.GetCode());
} else {
- KeyedLoadFieldStub stub(field.is_inobject(holder),
- field.translate(holder),
- representation);
- GenerateTailCall(masm(), stub.GetCode(isolate()));
+ KeyedLoadFieldStub stub(isolate(), field);
+ GenerateTailCall(masm(), stub.GetCode());
}
}
void LoadStubCompiler::GenerateLoadCallback(
- const CallOptimization& call_optimization) {
- GenerateFastApiCall(
- masm(), call_optimization, receiver(),
- scratch1(), scratch2(), name(), 0, NULL);
-}
-
-
-void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Insert additional parameters into the stack frame above return address.
@@ -1361,70 +931,34 @@ void LoadStubCompiler::GenerateLoadCallback(
STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
- __ push(receiver()); // receiver
+ __ Push(receiver()); // receiver
if (heap()->InNewSpace(callback->data())) {
ASSERT(!scratch2().is(reg));
__ Move(scratch2(), callback);
- __ push(FieldOperand(scratch2(),
+ __ Push(FieldOperand(scratch2(),
ExecutableAccessorInfo::kDataOffset)); // data
} else {
__ Push(Handle<Object>(callback->data(), isolate()));
}
ASSERT(!kScratchRegister.is(reg));
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ push(kScratchRegister); // return value
- __ push(kScratchRegister); // return value default
+ __ Push(kScratchRegister); // return value
+ __ Push(kScratchRegister); // return value default
__ PushAddress(ExternalReference::isolate_address(isolate()));
- __ push(reg); // holder
- __ push(name()); // name
+ __ Push(reg); // holder
+ __ Push(name()); // name
// Save a pointer to where we pushed the arguments pointer. This will be
// passed as the const PropertyAccessorInfo& to the C++ callback.
- Address getter_address = v8::ToCData<Address>(callback->getter());
-
-#if defined(__MINGW64__) || defined(_WIN64)
- Register getter_arg = r8;
- Register accessor_info_arg = rdx;
- Register name_arg = rcx;
-#else
- Register getter_arg = rdx;
- Register accessor_info_arg = rsi;
- Register name_arg = rdi;
-#endif
-
- ASSERT(!name_arg.is(scratch4()));
- __ movq(name_arg, rsp);
__ PushReturnAddressFrom(scratch4());
- // v8::Arguments::values_ and handler for name.
- const int kStackSpace = PropertyCallbackArguments::kArgsLength + 1;
-
- // Allocate v8::AccessorInfo in non-GCed stack space.
- const int kArgStackSpace = 1;
-
- __ PrepareCallApiFunction(kArgStackSpace);
- __ lea(rax, Operand(name_arg, 1 * kPointerSize));
-
- // v8::PropertyAccessorInfo::args_.
- __ movq(StackSpaceOperand(0), rax);
-
- // The context register (rsi) has been saved in PrepareCallApiFunction and
- // could be used to pass arguments.
- __ lea(accessor_info_arg, StackSpaceOperand(0));
-
- Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+ // Abi for CallApiGetter
+ Register api_function_address = r8;
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ __ Move(api_function_address, getter_address, RelocInfo::EXTERNAL_REFERENCE);
- // The name handler is counted as an argument.
- StackArgumentsAccessor args(rbp, PropertyCallbackArguments::kArgsLength);
- Operand return_value_operand = args.GetArgumentOperand(
- PropertyCallbackArguments::kArgsLength - 1 -
- PropertyCallbackArguments::kReturnValueOffset);
- __ CallApiFunctionAndReturn(getter_address,
- thunk_address,
- getter_arg,
- kStackSpace,
- return_value_operand,
- NULL);
+ CallApiGetterStub stub(isolate());
+ __ TailCallStub(&stub);
}
@@ -1480,10 +1014,10 @@ void LoadStubCompiler::GenerateLoadInterceptor(
FrameScope frame_scope(masm(), StackFrame::INTERNAL);
if (must_preserve_receiver_reg) {
- __ push(receiver());
+ __ Push(receiver());
}
- __ push(holder_reg);
- __ push(this->name());
+ __ Push(holder_reg);
+ __ Push(this->name());
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
@@ -1501,10 +1035,10 @@ void LoadStubCompiler::GenerateLoadInterceptor(
__ ret(0);
__ bind(&interceptor_failed);
- __ pop(this->name());
- __ pop(holder_reg);
+ __ Pop(this->name());
+ __ Pop(holder_reg);
if (must_preserve_receiver_reg) {
- __ pop(receiver());
+ __ Pop(receiver());
}
// Leave the internal frame.
@@ -1520,1067 +1054,32 @@ void LoadStubCompiler::GenerateLoadInterceptor(
__ PushReturnAddressFrom(scratch2());
ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), isolate());
+ IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
__ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
}
}
-void CallStubCompiler::GenerateNameCheck(Handle<Name> name, Label* miss) {
- if (kind_ == Code::KEYED_CALL_IC) {
- __ Cmp(rcx, name);
- __ j(not_equal, miss);
- }
-}
-
-
-void CallStubCompiler::GenerateFunctionCheck(Register function,
- Register scratch,
- Label* miss) {
- __ JumpIfSmi(function, miss);
- __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
- __ j(not_equal, miss);
-}
-
-
-void CallStubCompiler::GenerateLoadFunctionFromCell(
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Label* miss) {
- // Get the value from the cell.
- __ Move(rdi, cell);
- __ movq(rdi, FieldOperand(rdi, Cell::kValueOffset));
-
- // Check that the cell contains the same function.
- if (heap()->InNewSpace(*function)) {
- // We can't embed a pointer to a function in new space so we have
- // to verify that the shared function info is unchanged. This has
- // the nice side effect that multiple closures based on the same
- // function can all use this call IC. Before we load through the
- // function, we have to verify that it still is a function.
- GenerateFunctionCheck(rdi, rax, miss);
-
- // Check the shared function info. Make sure it hasn't changed.
- __ Move(rax, Handle<SharedFunctionInfo>(function->shared()));
- __ cmpq(FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset), rax);
- } else {
- __ Cmp(rdi, function);
- }
- __ j(not_equal, miss);
-}
-
-
-void CallStubCompiler::GenerateMissBranch() {
- Handle<Code> code =
- isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
- kind_,
- extra_state());
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
- Handle<JSObject> holder,
- PropertyIndex index,
- Handle<Name> name) {
- Label miss;
-
- Register reg = HandlerFrontendHeader(
- object, holder, name, RECEIVER_MAP_CHECK, &miss);
-
- GenerateFastPropertyLoad(masm(), rdi, reg, index.is_inobject(holder),
- index.translate(holder), Representation::Tagged());
- GenerateJumpFunction(object, rdi, &miss);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(Code::FAST, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayCodeCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- Label miss;
-
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- if (!cell.is_null()) {
- ASSERT(cell->value() == *function);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- Handle<AllocationSite> site = isolate()->factory()->NewAllocationSite();
- site->SetElementsKind(GetInitialFastElementsKind());
- Handle<Cell> site_feedback_cell = isolate()->factory()->NewCell(site);
- const int argc = arguments().immediate();
- __ movq(rax, Immediate(argc));
- __ Move(rbx, site_feedback_cell);
- __ Move(rdi, function);
-
- ArrayConstructorStub stub(isolate());
- __ TailCallStub(&stub);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPushCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // If object is not an array or is observed or sealed, bail out to regular
- // call.
- if (!object->IsJSArray() ||
- !cell.is_null() ||
- Handle<JSArray>::cast(object)->map()->is_observed() ||
- !Handle<JSArray>::cast(object)->map()->is_extensible()) {
- return Handle<Code>::null();
- }
-
- Label miss;
-
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
-
- const int argc = arguments().immediate();
- StackArgumentsAccessor args(rsp, argc);
- if (argc == 0) {
- // Noop, return the length.
- __ movq(rax, FieldOperand(rdx, JSArray::kLengthOffset));
- __ ret((argc + 1) * kPointerSize);
- } else {
- Label call_builtin;
-
- if (argc == 1) { // Otherwise fall through to call builtin.
- Label attempt_to_grow_elements, with_write_barrier, check_double;
-
- // Get the elements array of the object.
- __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
- factory()->fixed_array_map());
- __ j(not_equal, &check_double);
-
- // Get the array's length into rax and calculate new length.
- __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
- STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
- __ addl(rax, Immediate(argc));
-
- // Get the elements' length into rcx.
- __ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmpl(rax, rcx);
- __ j(greater, &attempt_to_grow_elements);
-
- // Check if value is a smi.
- __ movq(rcx, args.GetArgumentOperand(1));
- __ JumpIfNotSmi(rcx, &with_write_barrier);
-
- // Save new length.
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
-
- // Store the value.
- __ movq(FieldOperand(rdi,
- rax,
- times_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize),
- rcx);
-
- __ Integer32ToSmi(rax, rax); // Return new length as smi.
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&check_double);
-
- // Check that the elements are in double mode.
- __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
- factory()->fixed_double_array_map());
- __ j(not_equal, &call_builtin);
-
- // Get the array's length into rax and calculate new length.
- __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
- STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
- __ addl(rax, Immediate(argc));
-
- // Get the elements' length into rcx.
- __ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmpl(rax, rcx);
- __ j(greater, &call_builtin);
-
- __ movq(rcx, args.GetArgumentOperand(1));
- __ StoreNumberToDoubleElements(
- rcx, rdi, rax, xmm0, &call_builtin, argc * kDoubleSize);
-
- // Save new length.
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
- __ Integer32ToSmi(rax, rax); // Return new length as smi.
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&with_write_barrier);
-
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
-
- if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
- Label fast_object, not_fast_object;
- __ CheckFastObjectElements(rbx, &not_fast_object, Label::kNear);
- __ jmp(&fast_object);
- // In case of fast smi-only, convert to fast object, otherwise bail out.
- __ bind(&not_fast_object);
- __ CheckFastSmiElements(rbx, &call_builtin);
- __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
- factory()->heap_number_map());
- __ j(equal, &call_builtin);
- // rdx: receiver
- // rbx: map
-
- Label try_holey_map;
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- rbx,
- rdi,
- &try_holey_map);
-
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- // Restore edi.
- __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
- __ jmp(&fast_object);
-
- __ bind(&try_holey_map);
- __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
- FAST_HOLEY_ELEMENTS,
- rbx,
- rdi,
- &call_builtin);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
- __ bind(&fast_object);
- } else {
- __ CheckFastObjectElements(rbx, &call_builtin);
- }
-
- // Save new length.
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
-
- // Store the value.
- __ lea(rdx, FieldOperand(rdi,
- rax, times_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize));
- __ movq(Operand(rdx, 0), rcx);
-
- __ RecordWrite(rdi, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- __ Integer32ToSmi(rax, rax); // Return new length as smi.
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&attempt_to_grow_elements);
- if (!FLAG_inline_new) {
- __ jmp(&call_builtin);
- }
-
- __ movq(rbx, args.GetArgumentOperand(1));
- // Growing elements that are SMI-only requires special handling in case
- // the new element is non-Smi. For now, delegate to the builtin.
- Label no_fast_elements_check;
- __ JumpIfSmi(rbx, &no_fast_elements_check);
- __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ CheckFastObjectElements(rcx, &call_builtin, Label::kFar);
- __ bind(&no_fast_elements_check);
-
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- const int kAllocationDelta = 4;
- // Load top.
- __ Load(rcx, new_space_allocation_top);
-
- // Check if it's the end of elements.
- __ lea(rdx, FieldOperand(rdi,
- rax, times_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize));
- __ cmpq(rdx, rcx);
- __ j(not_equal, &call_builtin);
- __ addq(rcx, Immediate(kAllocationDelta * kPointerSize));
- Operand limit_operand =
- masm()->ExternalOperand(new_space_allocation_limit);
- __ cmpq(rcx, limit_operand);
- __ j(above, &call_builtin);
-
- // We fit and could grow elements.
- __ Store(new_space_allocation_top, rcx);
-
- // Push the argument...
- __ movq(Operand(rdx, 0), rbx);
- // ... and fill the rest with holes.
- __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < kAllocationDelta; i++) {
- __ movq(Operand(rdx, i * kPointerSize), kScratchRegister);
- }
-
- // We know the elements array is in new space so we don't need the
- // remembered set, but we just pushed a value onto it so we may have to
- // tell the incremental marker to rescan the object that we just grew. We
- // don't need to worry about the holes because they are in old space and
- // already marked black.
- __ RecordWrite(rdi, rdx, rbx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
-
- // Restore receiver to rdx as finish sequence assumes it's here.
- __ movq(rdx, args.GetReceiverOperand());
-
- // Increment element's and array's sizes.
- __ SmiAddConstant(FieldOperand(rdi, FixedArray::kLengthOffset),
- Smi::FromInt(kAllocationDelta));
-
- // Make new length a smi before returning it.
- __ Integer32ToSmi(rax, rax);
- __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
-
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush,
- isolate()),
- argc + 1,
- 1);
- }
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPopCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // If object is not an array or is observed or sealed, bail out to regular
- // call.
- if (!object->IsJSArray() ||
- !cell.is_null() ||
- Handle<JSArray>::cast(object)->map()->is_observed() ||
- !Handle<JSArray>::cast(object)->map()->is_extensible()) {
- return Handle<Code>::null();
- }
-
- Label miss, return_undefined, call_builtin;
-
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
-
- // Get the elements array of the object.
- __ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &call_builtin);
-
- // Get the array's length into rcx and calculate new length.
- __ SmiToInteger32(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
- __ subl(rcx, Immediate(1));
- __ j(negative, &return_undefined);
-
- // Get the last element.
- __ LoadRoot(r9, Heap::kTheHoleValueRootIndex);
- __ movq(rax, FieldOperand(rbx,
- rcx, times_pointer_size,
- FixedArray::kHeaderSize));
- // Check if element is already the hole.
- __ cmpq(rax, r9);
- // If so, call slow-case to also check prototypes for value.
- __ j(equal, &call_builtin);
-
- // Set the array's length.
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
-
- // Fill with the hole and return original value.
- __ movq(FieldOperand(rbx,
- rcx, times_pointer_size,
- FixedArray::kHeaderSize),
- r9);
- const int argc = arguments().immediate();
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&return_undefined);
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPop, isolate()),
- argc + 1,
- 1);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state()) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
-
- HandlerFrontendHeader(object, holder, name, STRING_CHECK, &name_miss);
-
- Register receiver = rbx;
- Register index = rdi;
- Register result = rax;
- const int argc = arguments().immediate();
- StackArgumentsAccessor args(rsp, argc);
-
- __ movq(receiver, args.GetReceiverOperand());
- if (argc > 0) {
- __ movq(index, args.GetArgumentOperand(1));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharCodeAtGenerator generator(receiver,
- index,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ ret((argc + 1) * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(rax, Heap::kNanValueRootIndex);
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&miss);
- // Restore function name in rcx.
- __ Move(rcx, name);
- HandlerFrontendFooter(&name_miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
- const int argc = arguments().immediate();
- StackArgumentsAccessor args(rsp, argc);
-
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state()) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
-
- HandlerFrontendHeader(object, holder, name, STRING_CHECK, &name_miss);
-
- Register receiver = rax;
- Register index = rdi;
- Register scratch = rdx;
- Register result = rax;
- __ movq(receiver, args.GetReceiverOperand());
- if (argc > 0) {
- __ movq(index, args.GetArgumentOperand(1));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharAtGenerator generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ ret((argc + 1) * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(rax, Heap::kempty_stringRootIndex);
- __ ret((argc + 1) * kPointerSize);
- }
- __ bind(&miss);
- // Restore function name in rcx.
- __ Move(rcx, name);
- HandlerFrontendFooter(&name_miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- const int argc = arguments().immediate();
- StackArgumentsAccessor args(rsp, argc);
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss;
-
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- if (!cell.is_null()) {
- ASSERT(cell->value() == *function);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the char code argument.
- Register code = rbx;
- __ movq(code, args.GetArgumentOperand(1));
-
- // Check the code is a smi.
- Label slow;
- __ JumpIfNotSmi(code, &slow);
-
- // Convert the smi code to uint16.
- __ SmiAndConstant(code, code, Smi::FromInt(0xffff));
-
- StringCharFromCodeGenerator generator(code, rax);
- generator.GenerateFast(masm());
- __ ret(2 * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- __ bind(&slow);
- // We do not have to patch the receiver because the function makes no use of
- // it.
- GenerateJumpFunctionIgnoreReceiver(function);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathFloorCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- const int argc = arguments().immediate();
- StackArgumentsAccessor args(rsp, argc);
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) {
- return Handle<Code>::null();
- }
-
- Label miss, slow;
-
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- if (!cell.is_null()) {
- ASSERT(cell->value() == *function);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into rax.
- __ movq(rax, args.GetArgumentOperand(1));
-
- // Check if the argument is a smi.
- Label smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(rax, &smi);
-
- // Check if the argument is a heap number and load its value into xmm0.
- __ CheckMap(rax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
- __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
-
- // Check if the argument is strictly positive. Note this also discards NaN.
- __ xorpd(xmm1, xmm1);
- __ ucomisd(xmm0, xmm1);
- __ j(below_equal, &slow);
-
- // Do a truncating conversion.
- __ cvttsd2si(rax, xmm0);
-
- // Checks for 0x80000000 which signals a failed conversion.
- Label conversion_failure;
- __ cmpl(rax, Immediate(0x80000000));
- __ j(equal, &conversion_failure);
-
- // Smi tag and return.
- __ Integer32ToSmi(rax, rax);
- __ bind(&smi);
- __ ret(2 * kPointerSize);
-
- // Check if the argument is < 2^kMantissaBits.
- Label already_round;
- __ bind(&conversion_failure);
- int64_t kTwoMantissaBits= V8_INT64_C(0x4330000000000000);
- __ movq(rbx, kTwoMantissaBits);
- __ movq(xmm1, rbx);
- __ ucomisd(xmm0, xmm1);
- __ j(above_equal, &already_round);
-
- // Save a copy of the argument.
- __ movaps(xmm2, xmm0);
-
- // Compute (argument + 2^kMantissaBits) - 2^kMantissaBits.
- __ addsd(xmm0, xmm1);
- __ subsd(xmm0, xmm1);
-
- // Compare the argument and the tentative result to get the right mask:
- // if xmm2 < xmm0:
- // xmm2 = 1...1
- // else:
- // xmm2 = 0...0
- __ cmpltsd(xmm2, xmm0);
-
- // Subtract 1 if the argument was less than the tentative result.
- int64_t kOne = V8_INT64_C(0x3ff0000000000000);
- __ movq(rbx, kOne);
- __ movq(xmm1, rbx);
- __ andpd(xmm1, xmm2);
- __ subsd(xmm0, xmm1);
-
- // Return a new heap number.
- __ AllocateHeapNumber(rax, rbx, &slow);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- __ ret(2 * kPointerSize);
-
- // Return the argument (when it's an already round heap number).
- __ bind(&already_round);
- __ movq(rax, args.GetArgumentOperand(1));
- __ ret(2 * kPointerSize);
-
- __ bind(&slow);
- // We do not have to patch the receiver because the function makes no use of
- // it.
- GenerateJumpFunctionIgnoreReceiver(function);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathAbsCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name,
- Code::StubType type) {
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- const int argc = arguments().immediate();
- StackArgumentsAccessor args(rsp, argc);
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss;
-
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- if (!cell.is_null()) {
- ASSERT(cell->value() == *function);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into rax.
- __ movq(rax, args.GetArgumentOperand(1));
-
- // Check if the argument is a smi.
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(rax, &not_smi);
-
- // Branchless abs implementation, refer to below:
- // http://graphics.stanford.edu/~seander/bithacks.html#IntegerAbs
- // Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
- // otherwise.
- __ movq(rbx, rax);
- __ sar(rbx, Immediate(kBitsPerPointer - 1));
-
- // Do bitwise not or do nothing depending on ebx.
- __ xor_(rax, rbx);
-
- // Add 1 or do nothing depending on ebx.
- __ subq(rax, rbx);
-
- // If the result is still negative, go to the slow case.
- // This only happens for the most negative smi.
- Label slow;
- __ j(negative, &slow);
-
- __ ret(2 * kPointerSize);
-
- // Check if the argument is a heap number and load its value.
- __ bind(&not_smi);
- __ CheckMap(rax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
- __ MoveDouble(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
-
- // Check the sign of the argument. If the argument is positive,
- // just return it.
- Label negative_sign;
- const int sign_mask_shift =
- (HeapNumber::kExponentOffset - HeapNumber::kValueOffset) * kBitsPerByte;
- __ Set(rdi, static_cast<int64_t>(HeapNumber::kSignMask) << sign_mask_shift);
- __ testq(rbx, rdi);
- __ j(not_zero, &negative_sign);
- __ ret(2 * kPointerSize);
-
- // If the argument is negative, clear the sign, and return a new
- // number. We still have the sign mask in rdi.
- __ bind(&negative_sign);
- __ xor_(rbx, rdi);
- __ AllocateHeapNumber(rax, rdx, &slow);
- __ MoveDouble(FieldOperand(rax, HeapNumber::kValueOffset), rbx);
- __ ret(2 * kPointerSize);
-
- __ bind(&slow);
- // We do not have to patch the receiver because the function makes no use of
- // it.
- GenerateJumpFunctionIgnoreReceiver(function);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(type, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileFastApiCall(
- const CallOptimization& optimization,
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Cell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- ASSERT(optimization.is_simple_api_call());
- // Bail out if object is a global object as we don't want to
- // repatch it to global receiver.
- if (object->IsGlobalObject()) return Handle<Code>::null();
- if (!cell.is_null()) return Handle<Code>::null();
- if (!object->IsJSObject()) return Handle<Code>::null();
- int depth = optimization.GetPrototypeDepthOfExpectedType(
- Handle<JSObject>::cast(object), holder);
- if (depth == kInvalidProtoDepth) return Handle<Code>::null();
-
- Label miss, miss_before_stack_reserved;
- GenerateNameCheck(name, &miss_before_stack_reserved);
-
- const int argc = arguments().immediate();
- StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss_before_stack_reserved);
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->call_const(), 1);
- __ IncrementCounter(counters->call_const_fast_api(), 1);
-
- // Allocate space for v8::Arguments implicit values. Must be initialized
- // before calling any runtime function.
- __ subq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
-
- // Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(IC::CurrentTypeOf(object, isolate()), rdx, holder,
- rbx, rax, rdi, name, depth, &miss);
-
- // Move the return address on top of the stack.
- __ movq(rax,
- StackOperandForReturnAddress(kFastApiCallArguments * kPointerSize));
- __ movq(StackOperandForReturnAddress(0), rax);
-
- GenerateFastApiCall(masm(), optimization, argc);
-
- __ bind(&miss);
- __ addq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
-
- HandlerFrontendFooter(&miss_before_stack_reserved);
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
- Label success;
- // Check that the object is a boolean.
- __ CompareRoot(object, Heap::kTrueValueRootIndex);
- __ j(equal, &success);
- __ CompareRoot(object, Heap::kFalseValueRootIndex);
- __ j(not_equal, miss);
- __ bind(&success);
-}
-
-
-void CallStubCompiler::PatchGlobalProxy(Handle<Object> object) {
- if (object->IsGlobalObject()) {
- StackArgumentsAccessor args(rsp, arguments());
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(args.GetReceiverOperand(), rdx);
- }
-}
-
-
-Register CallStubCompiler::HandlerFrontendHeader(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Label* miss) {
- GenerateNameCheck(name, miss);
-
- Register reg = rdx;
-
- StackArgumentsAccessor args(rsp, arguments());
- __ movq(reg, args.GetReceiverOperand());
-
- // Check that the receiver isn't a smi.
- if (check != NUMBER_CHECK) {
- __ JumpIfSmi(reg, miss);
- }
-
- // Make sure that it's okay not to patch the on stack receiver
- // unless we're doing a receiver map check.
- ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
-
- Counters* counters = isolate()->counters();
- switch (check) {
- case RECEIVER_MAP_CHECK:
- __ IncrementCounter(counters->call_const(), 1);
-
- // Check that the maps haven't changed.
- reg = CheckPrototypes(IC::CurrentTypeOf(object, isolate()), reg, holder,
- rbx, rax, rdi, name, miss);
- break;
-
- case STRING_CHECK: {
- // Check that the object is a string.
- __ CmpObjectType(reg, FIRST_NONSTRING_TYPE, rax);
- __ j(above_equal, miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, rax, miss);
- break;
- }
- case SYMBOL_CHECK: {
- // Check that the object is a symbol.
- __ CmpObjectType(reg, SYMBOL_TYPE, rax);
- __ j(not_equal, miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::SYMBOL_FUNCTION_INDEX, rax, miss);
- break;
- }
- case NUMBER_CHECK: {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(reg, &fast);
- __ CmpObjectType(reg, HEAP_NUMBER_TYPE, rax);
- __ j(not_equal, miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, rax, miss);
- break;
- }
- case BOOLEAN_CHECK: {
- GenerateBooleanCheck(reg, miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, rax, miss);
- break;
- }
- }
-
- if (check != RECEIVER_MAP_CHECK) {
- Handle<Object> prototype(object->GetPrototype(isolate()), isolate());
- reg = CheckPrototypes(
- IC::CurrentTypeOf(prototype, isolate()),
- rax, holder, rbx, rdx, rdi, name, miss);
- }
-
- return reg;
-}
-
-
-void CallStubCompiler::GenerateJumpFunction(Handle<Object> object,
- Register function,
- Label* miss) {
- // Check that the function really is a function.
- GenerateFunctionCheck(function, rbx, miss);
-
- if (!function.is(rdi)) __ movq(rdi, function);
- PatchGlobalProxy(object);
-
- // Invoke the function.
- __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind());
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name) {
- Label miss;
- GenerateNameCheck(name, &miss);
-
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
-
- // Get the receiver from the stack.
- StackArgumentsAccessor args(rsp, arguments());
- __ movq(rdx, args.GetReceiverOperand());
-
- CallInterceptorCompiler compiler(this, arguments(), rcx, extra_state());
- compiler.Compile(masm(), object, holder, name, &lookup, rdx, rbx, rdi, rax,
- &miss);
-
- // Restore receiver.
- __ movq(rdx, args.GetReceiverOperand());
-
- GenerateJumpFunction(object, rax, &miss);
-
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(Code::FAST, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallGlobal(
- Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<PropertyCell> cell,
- Handle<JSFunction> function,
- Handle<Name> name) {
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(
- object, holder, cell, function, Handle<String>::cast(name),
- Code::NORMAL);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label miss;
- HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- // Potentially loads a closure that matches the shared function info of the
- // function, rather than function.
- GenerateLoadFunctionFromCell(cell, function, &miss);
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->call_global_inline(), 1);
- GenerateJumpFunction(object, rdi, function);
- HandlerFrontendFooter(&miss);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
- receiver(), holder, name);
+ Register holder_reg = HandlerFrontend(
+ IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
__ PopReturnAddressTo(scratch1());
- __ push(receiver());
+ __ Push(receiver());
+ __ Push(holder_reg);
__ Push(callback); // callback info
__ Push(name);
- __ push(value());
+ __ Push(value());
__ PushReturnAddressFrom(scratch1());
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 4, 1);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- const CallOptimization& call_optimization) {
- HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
- receiver(), holder, name);
-
- Register values[] = { value() };
- GenerateFastApiCall(
- masm(), call_optimization, receiver(), scratch1(),
- scratch2(), this->name(), 1, values);
+ __ TailCallExternalReference(store_callback_property, 5, 1);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -2593,27 +1092,31 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
void StoreStubCompiler::GenerateStoreViaSetter(
MacroAssembler* masm,
+ Handle<HeapType> type,
+ Register receiver,
Handle<JSFunction> setter) {
// ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Save value register, so we can restore it later.
- __ push(rax);
+ __ Push(value());
if (!setter.is_null()) {
// Call the JavaScript setter with receiver and value on the stack.
- __ push(rdx);
- __ push(rax);
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ movp(receiver,
+ FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
+ __ Push(receiver);
+ __ Push(value());
ParameterCount actual(1);
ParameterCount expected(setter);
__ InvokeFunction(setter, expected, actual,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -2621,10 +1124,10 @@ void StoreStubCompiler::GenerateStoreViaSetter(
}
// We have to return the passed value, not the return value of the setter.
- __ pop(rax);
+ __ Pop(rax);
// Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
__ ret(0);
}
@@ -2638,9 +1141,9 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> object,
Handle<Name> name) {
__ PopReturnAddressTo(scratch1());
- __ push(receiver());
- __ push(this->name());
- __ push(value());
+ __ Push(receiver());
+ __ Push(this->name());
+ __ Push(value());
__ PushReturnAddressFrom(scratch1());
// Do tail-call to the runtime system.
@@ -2653,6 +1156,20 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
}
+void StoreStubCompiler::GenerateStoreArrayLength() {
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ PopReturnAddressTo(scratch1());
+ __ Push(receiver());
+ __ Push(value());
+ __ PushReturnAddressFrom(scratch1());
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
+ masm()->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,
@@ -2660,7 +1177,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
Label miss;
__ JumpIfSmi(receiver(), &miss, Label::kNear);
- __ movq(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
+ __ movp(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
int receiver_count = receiver_maps->length();
for (int i = 0; i < receiver_count; ++i) {
// Check map and tail call if there's a match
@@ -2670,7 +1187,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
} else {
Label next_map;
__ j(not_equal, &next_map, Label::kNear);
- __ movq(transition_map(),
+ __ Move(transition_map(),
transitioned_maps->at(i),
RelocInfo::EMBEDDED_OBJECT);
__ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
@@ -2688,7 +1205,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
}
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<Type> type,
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
Handle<JSObject> last,
Handle<Name> name) {
NonexistentHandlerFrontend(type, last, name);
@@ -2717,33 +1234,22 @@ Register* KeyedLoadStubCompiler::registers() {
}
-Register* StoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { rdx, rcx, rax, rbx, rdi, r8 };
- return registers;
+Register StoreStubCompiler::value() {
+ return rax;
}
-Register* KeyedStoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { rdx, rcx, rax, rbx, rdi, r8 };
+Register* StoreStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { rdx, rcx, rbx, rdi, r8 };
return registers;
}
-void KeyedLoadStubCompiler::GenerateNameCheck(Handle<Name> name,
- Register name_reg,
- Label* miss) {
- __ Cmp(name_reg, name);
- __ j(not_equal, miss);
-}
-
-
-void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
- Register name_reg,
- Label* miss) {
- __ Cmp(name_reg, name);
- __ j(not_equal, miss);
+Register* KeyedStoreStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { rdx, rcx, rbx, rdi, r8 };
+ return registers;
}
@@ -2752,6 +1258,7 @@ void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<HeapType> type,
Register receiver,
Handle<JSFunction> getter) {
// ----------- S t a t e -------------
@@ -2764,11 +1271,16 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
- __ push(receiver);
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ movp(receiver,
+ FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
+ __ Push(receiver);
ParameterCount actual(0);
ParameterCount expected(getter);
__ InvokeFunction(getter, expected, actual,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ CALL_FUNCTION, NullCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -2776,7 +1288,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
}
// Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
__ ret(0);
}
@@ -2787,7 +1299,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
- Handle<Type> type,
+ Handle<HeapType> type,
Handle<GlobalObject> global,
Handle<PropertyCell> cell,
Handle<Name> name,
@@ -2800,7 +1312,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
// Get the value from the cell.
__ Move(rbx, cell);
- __ movq(rbx, FieldOperand(rbx, PropertyCell::kValueOffset));
+ __ movp(rbx, FieldOperand(rbx, PropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
if (!is_dont_delete) {
@@ -2811,13 +1323,13 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ Check(not_equal, kDontDeleteCellsCannotContainTheHole);
}
- HandlerFrontendFooter(name, &miss);
-
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1);
- __ movq(rax, rbx);
+ __ movp(rax, rbx);
__ ret(0);
+ HandlerFrontendFooter(name, &miss);
+
// Return the generated code.
return GetCode(kind(), Code::NORMAL, name);
}
@@ -2831,8 +1343,10 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
IcCheckType check) {
Label miss;
- if (check == PROPERTY) {
- GenerateNameCheck(name, this->name(), &miss);
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ __ Cmp(this->name(), name);
+ __ j(not_equal, &miss);
}
Label number_case;
@@ -2840,17 +1354,17 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
__ JumpIfSmi(receiver(), smi_target);
Register map_reg = scratch1();
- __ movq(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
+ __ movp(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
int receiver_count = types->length();
int number_of_handled_maps = 0;
for (int current = 0; current < receiver_count; ++current) {
- Handle<Type> type = types->at(current);
+ Handle<HeapType> type = types->at(current);
Handle<Map> map = IC::TypeToMap(*type, isolate());
if (!map->is_deprecated()) {
number_of_handled_maps++;
// Check map and tail call if there's a match
__ Cmp(map_reg, map);
- if (type->Is(Type::Number())) {
+ if (type->Is(HeapType::Number())) {
ASSERT(!number_case.is_unused());
__ bind(&number_case);
}
@@ -2887,7 +1401,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
__ JumpIfNotSmi(rax, &miss);
__ SmiToInteger32(rbx, rax);
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movp(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check whether the elements is a number dictionary.
// rdx: receiver