summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src')
-rw-r--r--deps/v8/src/a64/OWNERS1
-rw-r--r--deps/v8/src/a64/assembler-a64-inl.h1200
-rw-r--r--deps/v8/src/a64/assembler-a64.cc2606
-rw-r--r--deps/v8/src/a64/assembler-a64.h2085
-rw-r--r--deps/v8/src/a64/builtins-a64.cc1479
-rw-r--r--deps/v8/src/a64/code-stubs-a64.cc5809
-rw-r--r--deps/v8/src/a64/code-stubs-a64.h469
-rw-r--r--deps/v8/src/a64/codegen-a64.cc616
-rw-r--r--deps/v8/src/a64/codegen-a64.h70
-rw-r--r--deps/v8/src/a64/constants-a64.h1262
-rw-r--r--deps/v8/src/a64/cpu-a64.cc199
-rw-r--r--deps/v8/src/a64/cpu-a64.h107
-rw-r--r--deps/v8/src/a64/debug-a64.cc394
-rw-r--r--deps/v8/src/a64/debugger-a64.cc111
-rw-r--r--deps/v8/src/a64/debugger-a64.h56
-rw-r--r--deps/v8/src/a64/decoder-a64.cc726
-rw-r--r--deps/v8/src/a64/decoder-a64.h202
-rw-r--r--deps/v8/src/a64/deoptimizer-a64.cc376
-rw-r--r--deps/v8/src/a64/disasm-a64.cc1854
-rw-r--r--deps/v8/src/a64/disasm-a64.h115
-rw-r--r--deps/v8/src/a64/frames-a64.cc57
-rw-r--r--deps/v8/src/a64/frames-a64.h131
-rw-r--r--deps/v8/src/a64/full-codegen-a64.cc5010
-rw-r--r--deps/v8/src/a64/ic-a64.cc1413
-rw-r--r--deps/v8/src/a64/instructions-a64.cc334
-rw-r--r--deps/v8/src/a64/instructions-a64.h516
-rw-r--r--deps/v8/src/a64/instrument-a64.cc618
-rw-r--r--deps/v8/src/a64/instrument-a64.h108
-rw-r--r--deps/v8/src/a64/lithium-a64.cc2449
-rw-r--r--deps/v8/src/a64/lithium-a64.h2967
-rw-r--r--deps/v8/src/a64/lithium-codegen-a64.cc5692
-rw-r--r--deps/v8/src/a64/lithium-codegen-a64.h473
-rw-r--r--deps/v8/src/a64/lithium-gap-resolver-a64.cc326
-rw-r--r--deps/v8/src/a64/lithium-gap-resolver-a64.h90
-rw-r--r--deps/v8/src/a64/macro-assembler-a64-inl.h1647
-rw-r--r--deps/v8/src/a64/macro-assembler-a64.cc4975
-rw-r--r--deps/v8/src/a64/macro-assembler-a64.h2238
-rw-r--r--deps/v8/src/a64/regexp-macro-assembler-a64.cc1730
-rw-r--r--deps/v8/src/a64/regexp-macro-assembler-a64.h315
-rw-r--r--deps/v8/src/a64/simulator-a64.cc3414
-rw-r--r--deps/v8/src/a64/simulator-a64.h868
-rw-r--r--deps/v8/src/a64/stub-cache-a64.cc1548
-rw-r--r--deps/v8/src/a64/utils-a64.cc112
-rw-r--r--deps/v8/src/a64/utils-a64.h109
-rw-r--r--deps/v8/src/allocation-tracker.cc1
-rw-r--r--deps/v8/src/api.cc26
-rw-r--r--deps/v8/src/arm/OWNERS1
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc177
-rw-r--r--deps/v8/src/arm/debug-arm.cc10
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc167
-rw-r--r--deps/v8/src/arm/ic-arm.cc14
-rw-r--r--deps/v8/src/arm/lithium-arm.cc3
-rw-r--r--deps/v8/src/arm/lithium-arm.h2
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc100
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc21
-rw-r--r--deps/v8/src/arm/simulator-arm.h4
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc63
-rw-r--r--deps/v8/src/arraybuffer.js11
-rw-r--r--deps/v8/src/assembler.cc7
-rw-r--r--deps/v8/src/assembler.h26
-rw-r--r--deps/v8/src/ast.cc35
-rw-r--r--deps/v8/src/ast.h79
-rw-r--r--deps/v8/src/atomicops.h2
-rw-r--r--deps/v8/src/atomicops_internals_a64_gcc.h416
-rw-r--r--deps/v8/src/bootstrapper.cc7
-rw-r--r--deps/v8/src/builtins.cc8
-rw-r--r--deps/v8/src/char-predicates.h21
-rw-r--r--deps/v8/src/checks.h18
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc89
-rw-r--r--deps/v8/src/code-stubs.cc3
-rw-r--r--deps/v8/src/code-stubs.h59
-rw-r--r--deps/v8/src/codegen.cc2
-rw-r--r--deps/v8/src/codegen.h2
-rw-r--r--deps/v8/src/compiler.cc30
-rw-r--r--deps/v8/src/compiler.h11
-rw-r--r--deps/v8/src/contexts.h2
-rw-r--r--deps/v8/src/conversions-inl.h2
-rw-r--r--deps/v8/src/dateparser.h2
-rw-r--r--deps/v8/src/debug.cc2
-rw-r--r--deps/v8/src/deoptimizer.cc65
-rw-r--r--deps/v8/src/deoptimizer.h20
-rw-r--r--deps/v8/src/execution.cc28
-rw-r--r--deps/v8/src/execution.h9
-rw-r--r--deps/v8/src/factory.cc8
-rw-r--r--deps/v8/src/factory.h4
-rw-r--r--deps/v8/src/feedback-slots.h110
-rw-r--r--deps/v8/src/flag-definitions.h43
-rw-r--r--deps/v8/src/frames-inl.h2
-rw-r--r--deps/v8/src/frames.h4
-rw-r--r--deps/v8/src/full-codegen.cc39
-rw-r--r--deps/v8/src/full-codegen.h36
-rw-r--r--deps/v8/src/globals.h18
-rw-r--r--deps/v8/src/harmony-math.js34
-rw-r--r--deps/v8/src/heap-inl.h22
-rw-r--r--deps/v8/src/heap-snapshot-generator.cc105
-rw-r--r--deps/v8/src/heap-snapshot-generator.h14
-rw-r--r--deps/v8/src/heap.cc152
-rw-r--r--deps/v8/src/heap.h68
-rw-r--r--deps/v8/src/hydrogen-bce.cc163
-rw-r--r--deps/v8/src/hydrogen-check-elimination.cc324
-rw-r--r--deps/v8/src/hydrogen-flow-engine.h19
-rw-r--r--deps/v8/src/hydrogen-gvn.cc454
-rw-r--r--deps/v8/src/hydrogen-gvn.h74
-rw-r--r--deps/v8/src/hydrogen-instructions.cc130
-rw-r--r--deps/v8/src/hydrogen-instructions.h467
-rw-r--r--deps/v8/src/hydrogen-load-elimination.cc96
-rw-r--r--deps/v8/src/hydrogen-representation-changes.cc5
-rw-r--r--deps/v8/src/hydrogen.cc755
-rw-r--r--deps/v8/src/hydrogen.h162
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc157
-rw-r--r--deps/v8/src/ia32/debug-ia32.cc12
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc168
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc14
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc93
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc4
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc17
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc44
-rw-r--r--deps/v8/src/ic.cc165
-rw-r--r--deps/v8/src/ic.h24
-rw-r--r--deps/v8/src/incremental-marking.h2
-rw-r--r--deps/v8/src/isolate.cc23
-rw-r--r--deps/v8/src/isolate.h146
-rw-r--r--deps/v8/src/json-stringifier.h1
-rw-r--r--deps/v8/src/json.js15
-rw-r--r--deps/v8/src/jsregexp.cc16
-rw-r--r--deps/v8/src/lithium-allocator-inl.h2
-rw-r--r--deps/v8/src/lithium-allocator.cc2
-rw-r--r--deps/v8/src/lithium-codegen.cc11
-rw-r--r--deps/v8/src/lithium.cc3
-rw-r--r--deps/v8/src/macro-assembler.h8
-rw-r--r--deps/v8/src/mark-compact.cc76
-rw-r--r--deps/v8/src/mark-compact.h10
-rw-r--r--deps/v8/src/messages.cc6
-rw-r--r--deps/v8/src/messages.h1
-rw-r--r--deps/v8/src/messages.js14
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc194
-rw-r--r--deps/v8/src/mips/debug-mips.cc10
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc169
-rw-r--r--deps/v8/src/mips/ic-mips.cc19
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc19
-rw-r--r--deps/v8/src/mips/lithium-mips.cc1
-rw-r--r--deps/v8/src/mips/lithium-mips.h2
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc26
-rw-r--r--deps/v8/src/mips/simulator-mips.h4
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc73
-rw-r--r--deps/v8/src/object-observe.js7
-rw-r--r--deps/v8/src/objects-debug.cc3
-rw-r--r--deps/v8/src/objects-inl.h114
-rw-r--r--deps/v8/src/objects-printer.cc6
-rw-r--r--deps/v8/src/objects-visiting-inl.h2
-rw-r--r--deps/v8/src/objects.cc48
-rw-r--r--deps/v8/src/objects.h215
-rw-r--r--deps/v8/src/parser.cc1089
-rw-r--r--deps/v8/src/parser.h224
-rw-r--r--deps/v8/src/platform-linux.cc3
-rw-r--r--deps/v8/src/platform-posix.cc2
-rw-r--r--deps/v8/src/preparser.cc534
-rw-r--r--deps/v8/src/preparser.h1078
-rw-r--r--deps/v8/src/promise.js44
-rw-r--r--deps/v8/src/property-details.h18
-rw-r--r--deps/v8/src/property.h87
-rw-r--r--deps/v8/src/regexp-macro-assembler-tracer.cc4
-rw-r--r--deps/v8/src/regexp-macro-assembler.h1
-rw-r--r--deps/v8/src/runtime.cc55
-rw-r--r--deps/v8/src/runtime.h7
-rw-r--r--deps/v8/src/sampler.cc41
-rw-r--r--deps/v8/src/scanner.cc17
-rw-r--r--deps/v8/src/scanner.h7
-rw-r--r--deps/v8/src/scopes.cc2
-rw-r--r--deps/v8/src/serialize.cc4
-rw-r--r--deps/v8/src/serialize.h1
-rw-r--r--deps/v8/src/simulator.h2
-rw-r--r--deps/v8/src/spaces.cc78
-rw-r--r--deps/v8/src/spaces.h78
-rw-r--r--deps/v8/src/stub-cache.cc40
-rw-r--r--deps/v8/src/stub-cache.h15
-rw-r--r--deps/v8/src/sweeper-thread.cc1
-rw-r--r--deps/v8/src/type-info.cc87
-rw-r--r--deps/v8/src/type-info.h18
-rw-r--r--deps/v8/src/typedarray.js8
-rw-r--r--deps/v8/src/types.cc1
-rw-r--r--deps/v8/src/typing.cc7
-rw-r--r--deps/v8/src/unicode.cc40
-rw-r--r--deps/v8/src/unicode.h3
-rw-r--r--deps/v8/src/utils.h71
-rw-r--r--deps/v8/src/v8.cc22
-rw-r--r--deps/v8/src/v8.h2
-rw-r--r--deps/v8/src/v8natives.js24
-rw-r--r--deps/v8/src/version.cc4
-rw-r--r--deps/v8/src/vm-state-inl.h3
-rw-r--r--deps/v8/src/x64/assembler-x64.h34
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc157
-rw-r--r--deps/v8/src/x64/debug-x64.cc14
-rw-r--r--deps/v8/src/x64/disasm-x64.cc3
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc164
-rw-r--r--deps/v8/src/x64/ic-x64.cc14
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc183
-rw-r--r--deps/v8/src/x64/lithium-gap-resolver-x64.cc2
-rw-r--r--deps/v8/src/x64/lithium-x64.cc20
-rw-r--r--deps/v8/src/x64/lithium-x64.h6
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc57
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h4
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc44
203 files changed, 4839 insertions, 63261 deletions
diff --git a/deps/v8/src/a64/OWNERS b/deps/v8/src/a64/OWNERS
deleted file mode 100644
index 906a5ce641..0000000000
--- a/deps/v8/src/a64/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-rmcilroy@chromium.org
diff --git a/deps/v8/src/a64/assembler-a64-inl.h b/deps/v8/src/a64/assembler-a64-inl.h
deleted file mode 100644
index e68dee0738..0000000000
--- a/deps/v8/src/a64/assembler-a64-inl.h
+++ /dev/null
@@ -1,1200 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_A64_ASSEMBLER_A64_INL_H_
-#define V8_A64_ASSEMBLER_A64_INL_H_
-
-#include "a64/assembler-a64.h"
-#include "cpu.h"
-#include "debug.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-void RelocInfo::apply(intptr_t delta) {
- UNIMPLEMENTED();
-}
-
-
-void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
- ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, target);
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
-}
-
-
-inline unsigned CPURegister::code() const {
- ASSERT(IsValid());
- return reg_code;
-}
-
-
-inline CPURegister::RegisterType CPURegister::type() const {
- ASSERT(IsValidOrNone());
- return reg_type;
-}
-
-
-inline RegList CPURegister::Bit() const {
- ASSERT(reg_code < (sizeof(RegList) * kBitsPerByte));
- return IsValid() ? 1UL << reg_code : 0;
-}
-
-
-inline unsigned CPURegister::SizeInBits() const {
- ASSERT(IsValid());
- return reg_size;
-}
-
-
-inline int CPURegister::SizeInBytes() const {
- ASSERT(IsValid());
- ASSERT(SizeInBits() % 8 == 0);
- return reg_size / 8;
-}
-
-
-inline bool CPURegister::Is32Bits() const {
- ASSERT(IsValid());
- return reg_size == 32;
-}
-
-
-inline bool CPURegister::Is64Bits() const {
- ASSERT(IsValid());
- return reg_size == 64;
-}
-
-
-inline bool CPURegister::IsValid() const {
- if (IsValidRegister() || IsValidFPRegister()) {
- ASSERT(!IsNone());
- return true;
- } else {
- ASSERT(IsNone());
- return false;
- }
-}
-
-
-inline bool CPURegister::IsValidRegister() const {
- return IsRegister() &&
- ((reg_size == kWRegSize) || (reg_size == kXRegSize)) &&
- ((reg_code < kNumberOfRegisters) || (reg_code == kSPRegInternalCode));
-}
-
-
-inline bool CPURegister::IsValidFPRegister() const {
- return IsFPRegister() &&
- ((reg_size == kSRegSize) || (reg_size == kDRegSize)) &&
- (reg_code < kNumberOfFPRegisters);
-}
-
-
-inline bool CPURegister::IsNone() const {
- // kNoRegister types should always have size 0 and code 0.
- ASSERT((reg_type != kNoRegister) || (reg_code == 0));
- ASSERT((reg_type != kNoRegister) || (reg_size == 0));
-
- return reg_type == kNoRegister;
-}
-
-
-inline bool CPURegister::Is(const CPURegister& other) const {
- ASSERT(IsValidOrNone() && other.IsValidOrNone());
- return (reg_code == other.reg_code) && (reg_size == other.reg_size) &&
- (reg_type == other.reg_type);
-}
-
-
-inline bool CPURegister::IsRegister() const {
- return reg_type == kRegister;
-}
-
-
-inline bool CPURegister::IsFPRegister() const {
- return reg_type == kFPRegister;
-}
-
-
-inline bool CPURegister::IsSameSizeAndType(const CPURegister& other) const {
- return (reg_size == other.reg_size) && (reg_type == other.reg_type);
-}
-
-
-inline bool CPURegister::IsValidOrNone() const {
- return IsValid() || IsNone();
-}
-
-
-inline bool CPURegister::IsZero() const {
- ASSERT(IsValid());
- return IsRegister() && (reg_code == kZeroRegCode);
-}
-
-
-inline bool CPURegister::IsSP() const {
- ASSERT(IsValid());
- return IsRegister() && (reg_code == kSPRegInternalCode);
-}
-
-
-inline void CPURegList::Combine(const CPURegList& other) {
- ASSERT(IsValid());
- ASSERT(other.type() == type_);
- ASSERT(other.RegisterSizeInBits() == size_);
- list_ |= other.list();
-}
-
-
-inline void CPURegList::Remove(const CPURegList& other) {
- ASSERT(IsValid());
- ASSERT(other.type() == type_);
- ASSERT(other.RegisterSizeInBits() == size_);
- list_ &= ~other.list();
-}
-
-
-inline void CPURegList::Combine(const CPURegister& other) {
- ASSERT(other.type() == type_);
- ASSERT(other.SizeInBits() == size_);
- Combine(other.code());
-}
-
-
-inline void CPURegList::Remove(const CPURegister& other) {
- ASSERT(other.type() == type_);
- ASSERT(other.SizeInBits() == size_);
- Remove(other.code());
-}
-
-
-inline void CPURegList::Combine(int code) {
- ASSERT(IsValid());
- ASSERT(CPURegister::Create(code, size_, type_).IsValid());
- list_ |= (1UL << code);
-}
-
-
-inline void CPURegList::Remove(int code) {
- ASSERT(IsValid());
- ASSERT(CPURegister::Create(code, size_, type_).IsValid());
- list_ &= ~(1UL << code);
-}
-
-
-inline Register Register::XRegFromCode(unsigned code) {
- // This function returns the zero register when code = 31. The stack pointer
- // can not be returned.
- ASSERT(code < kNumberOfRegisters);
- return Register::Create(code, kXRegSize);
-}
-
-
-inline Register Register::WRegFromCode(unsigned code) {
- ASSERT(code < kNumberOfRegisters);
- return Register::Create(code, kWRegSize);
-}
-
-
-inline FPRegister FPRegister::SRegFromCode(unsigned code) {
- ASSERT(code < kNumberOfFPRegisters);
- return FPRegister::Create(code, kSRegSize);
-}
-
-
-inline FPRegister FPRegister::DRegFromCode(unsigned code) {
- ASSERT(code < kNumberOfFPRegisters);
- return FPRegister::Create(code, kDRegSize);
-}
-
-
-inline Register CPURegister::W() const {
- ASSERT(IsValidRegister());
- return Register::WRegFromCode(reg_code);
-}
-
-
-inline Register CPURegister::X() const {
- ASSERT(IsValidRegister());
- return Register::XRegFromCode(reg_code);
-}
-
-
-inline FPRegister CPURegister::S() const {
- ASSERT(IsValidFPRegister());
- return FPRegister::SRegFromCode(reg_code);
-}
-
-
-inline FPRegister CPURegister::D() const {
- ASSERT(IsValidFPRegister());
- return FPRegister::DRegFromCode(reg_code);
-}
-
-
-// Operand.
-template<typename T>
-Operand::Operand(Handle<T> value) : reg_(NoReg) {
- initialize_handle(value);
-}
-
-
-// Default initializer is for int types
-template<typename int_t>
-struct OperandInitializer {
- static const bool kIsIntType = true;
- static inline RelocInfo::Mode rmode_for(int_t) {
- return sizeof(int_t) == 8 ? RelocInfo::NONE64 : RelocInfo::NONE32;
- }
- static inline int64_t immediate_for(int_t t) {
- STATIC_ASSERT(sizeof(int_t) <= 8);
- return t;
- }
-};
-
-
-template<>
-struct OperandInitializer<Smi*> {
- static const bool kIsIntType = false;
- static inline RelocInfo::Mode rmode_for(Smi* t) {
- return RelocInfo::NONE64;
- }
- static inline int64_t immediate_for(Smi* t) {;
- return reinterpret_cast<int64_t>(t);
- }
-};
-
-
-template<>
-struct OperandInitializer<ExternalReference> {
- static const bool kIsIntType = false;
- static inline RelocInfo::Mode rmode_for(ExternalReference t) {
- return RelocInfo::EXTERNAL_REFERENCE;
- }
- static inline int64_t immediate_for(ExternalReference t) {;
- return reinterpret_cast<int64_t>(t.address());
- }
-};
-
-
-template<typename T>
-Operand::Operand(T t)
- : immediate_(OperandInitializer<T>::immediate_for(t)),
- reg_(NoReg),
- rmode_(OperandInitializer<T>::rmode_for(t)) {}
-
-
-template<typename T>
-Operand::Operand(T t, RelocInfo::Mode rmode)
- : immediate_(OperandInitializer<T>::immediate_for(t)),
- reg_(NoReg),
- rmode_(rmode) {
- STATIC_ASSERT(OperandInitializer<T>::kIsIntType);
-}
-
-
-Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
- : reg_(reg),
- shift_(shift),
- extend_(NO_EXTEND),
- shift_amount_(shift_amount),
- rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
- ASSERT(reg.Is64Bits() || (shift_amount < kWRegSize));
- ASSERT(reg.Is32Bits() || (shift_amount < kXRegSize));
- ASSERT(!reg.IsSP());
-}
-
-
-Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
- : reg_(reg),
- shift_(NO_SHIFT),
- extend_(extend),
- shift_amount_(shift_amount),
- rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
- ASSERT(reg.IsValid());
- ASSERT(shift_amount <= 4);
- ASSERT(!reg.IsSP());
-
- // Extend modes SXTX and UXTX require a 64-bit register.
- ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
-}
-
-
-bool Operand::IsImmediate() const {
- return reg_.Is(NoReg);
-}
-
-
-bool Operand::IsShiftedRegister() const {
- return reg_.IsValid() && (shift_ != NO_SHIFT);
-}
-
-
-bool Operand::IsExtendedRegister() const {
- return reg_.IsValid() && (extend_ != NO_EXTEND);
-}
-
-
-bool Operand::IsZero() const {
- if (IsImmediate()) {
- return immediate() == 0;
- } else {
- return reg().IsZero();
- }
-}
-
-
-Operand Operand::ToExtendedRegister() const {
- ASSERT(IsShiftedRegister());
- ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
- return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
-}
-
-
-int64_t Operand::immediate() const {
- ASSERT(IsImmediate());
- return immediate_;
-}
-
-
-Register Operand::reg() const {
- ASSERT(IsShiftedRegister() || IsExtendedRegister());
- return reg_;
-}
-
-
-Shift Operand::shift() const {
- ASSERT(IsShiftedRegister());
- return shift_;
-}
-
-
-Extend Operand::extend() const {
- ASSERT(IsExtendedRegister());
- return extend_;
-}
-
-
-unsigned Operand::shift_amount() const {
- ASSERT(IsShiftedRegister() || IsExtendedRegister());
- return shift_amount_;
-}
-
-
-Operand Operand::UntagSmi(Register smi) {
- ASSERT(smi.Is64Bits());
- return Operand(smi, ASR, kSmiShift);
-}
-
-
-Operand Operand::UntagSmiAndScale(Register smi, int scale) {
- ASSERT(smi.Is64Bits());
- ASSERT((scale >= 0) && (scale <= (64 - kSmiValueSize)));
- if (scale > kSmiShift) {
- return Operand(smi, LSL, scale - kSmiShift);
- } else if (scale < kSmiShift) {
- return Operand(smi, ASR, kSmiShift - scale);
- }
- return Operand(smi);
-}
-
-
-MemOperand::MemOperand(Register base, ptrdiff_t offset, AddrMode addrmode)
- : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode),
- shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) {
- ASSERT(base.Is64Bits() && !base.IsZero());
-}
-
-
-MemOperand::MemOperand(Register base,
- Register regoffset,
- Extend extend,
- unsigned shift_amount)
- : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
- shift_(NO_SHIFT), extend_(extend), shift_amount_(shift_amount) {
- ASSERT(base.Is64Bits() && !base.IsZero());
- ASSERT(!regoffset.IsSP());
- ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
-
- // SXTX extend mode requires a 64-bit offset register.
- ASSERT(regoffset.Is64Bits() || (extend != SXTX));
-}
-
-
-MemOperand::MemOperand(Register base,
- Register regoffset,
- Shift shift,
- unsigned shift_amount)
- : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
- shift_(shift), extend_(NO_EXTEND), shift_amount_(shift_amount) {
- ASSERT(base.Is64Bits() && !base.IsZero());
- ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
- ASSERT(shift == LSL);
-}
-
-
-MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
- : base_(base), addrmode_(addrmode) {
- ASSERT(base.Is64Bits() && !base.IsZero());
-
- if (offset.IsImmediate()) {
- offset_ = offset.immediate();
-
- regoffset_ = NoReg;
- } else if (offset.IsShiftedRegister()) {
- ASSERT(addrmode == Offset);
-
- regoffset_ = offset.reg();
- shift_= offset.shift();
- shift_amount_ = offset.shift_amount();
-
- extend_ = NO_EXTEND;
- offset_ = 0;
-
- // These assertions match those in the shifted-register constructor.
- ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
- ASSERT(shift_ == LSL);
- } else {
- ASSERT(offset.IsExtendedRegister());
- ASSERT(addrmode == Offset);
-
- regoffset_ = offset.reg();
- extend_ = offset.extend();
- shift_amount_ = offset.shift_amount();
-
- shift_= NO_SHIFT;
- offset_ = 0;
-
- // These assertions match those in the extended-register constructor.
- ASSERT(!regoffset_.IsSP());
- ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
- ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
- }
-}
-
-bool MemOperand::IsImmediateOffset() const {
- return (addrmode_ == Offset) && regoffset_.Is(NoReg);
-}
-
-
-bool MemOperand::IsRegisterOffset() const {
- return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
-}
-
-
-bool MemOperand::IsPreIndex() const {
- return addrmode_ == PreIndex;
-}
-
-
-bool MemOperand::IsPostIndex() const {
- return addrmode_ == PostIndex;
-}
-
-Operand MemOperand::OffsetAsOperand() const {
- if (IsImmediateOffset()) {
- return offset();
- } else {
- ASSERT(IsRegisterOffset());
- if (extend() == NO_EXTEND) {
- return Operand(regoffset(), shift(), shift_amount());
- } else {
- return Operand(regoffset(), extend(), shift_amount());
- }
- }
-}
-
-
-void Assembler::Unreachable() {
-#ifdef USE_SIMULATOR
- debug("UNREACHABLE", __LINE__, BREAK);
-#else
- // Crash by branching to 0. lr now points near the fault.
- Emit(BLR | Rn(xzr));
-#endif
-}
-
-
-Address Assembler::target_pointer_address_at(Address pc) {
- Instruction* instr = reinterpret_cast<Instruction*>(pc);
- ASSERT(instr->IsLdrLiteralX());
- return reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
-}
-
-
-// Read/Modify the code target address in the branch/call instruction at pc.
-Address Assembler::target_address_at(Address pc) {
- return Memory::Address_at(target_pointer_address_at(pc));
-}
-
-
-Address Assembler::target_address_from_return_address(Address pc) {
- // Returns the address of the call target from the return address that will
- // be returned to after a call.
- // Call sequence on A64 is:
- // ldr ip0, #... @ load from literal pool
- // blr ip0
- Address candidate = pc - 2 * kInstructionSize;
- Instruction* instr = reinterpret_cast<Instruction*>(candidate);
- USE(instr);
- ASSERT(instr->IsLdrLiteralX());
- return candidate;
-}
-
-
-Address Assembler::return_address_from_call_start(Address pc) {
- // The call, generated by MacroAssembler::Call, is one of two possible
- // sequences:
- //
- // Without relocation:
- // movz ip0, #(target & 0x000000000000ffff)
- // movk ip0, #(target & 0x00000000ffff0000)
- // movk ip0, #(target & 0x0000ffff00000000)
- // movk ip0, #(target & 0xffff000000000000)
- // blr ip0
- //
- // With relocation:
- // ldr ip0, =target
- // blr ip0
- //
- // The return address is immediately after the blr instruction in both cases,
- // so it can be found by adding the call size to the address at the start of
- // the call sequence.
- STATIC_ASSERT(Assembler::kCallSizeWithoutRelocation == 5 * kInstructionSize);
- STATIC_ASSERT(Assembler::kCallSizeWithRelocation == 2 * kInstructionSize);
-
- Instruction* instr = reinterpret_cast<Instruction*>(pc);
- if (instr->IsMovz()) {
- // Verify the instruction sequence.
- ASSERT(instr->following(1)->IsMovk());
- ASSERT(instr->following(2)->IsMovk());
- ASSERT(instr->following(3)->IsMovk());
- ASSERT(instr->following(4)->IsBranchAndLinkToRegister());
- return pc + Assembler::kCallSizeWithoutRelocation;
- } else {
- // Verify the instruction sequence.
- ASSERT(instr->IsLdrLiteralX());
- ASSERT(instr->following(1)->IsBranchAndLinkToRegister());
- return pc + Assembler::kCallSizeWithRelocation;
- }
-}
-
-
-void Assembler::deserialization_set_special_target_at(
- Address constant_pool_entry, Address target) {
- Memory::Address_at(constant_pool_entry) = target;
-}
-
-
-void Assembler::set_target_address_at(Address pc, Address target) {
- Memory::Address_at(target_pointer_address_at(pc)) = target;
- // Intuitively, we would think it is necessary to always flush the
- // instruction cache after patching a target address in the code as follows:
- // CPU::FlushICache(pc, sizeof(target));
- // However, on ARM, an instruction is actually patched in the case of
- // embedded constants of the form:
- // ldr ip, [pc, #...]
- // since the instruction accessing this address in the constant pool remains
- // unchanged, a flush is not required.
-}
-
-
-int RelocInfo::target_address_size() {
- return kPointerSize;
-}
-
-
-Address RelocInfo::target_address() {
- ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- return Assembler::target_address_at(pc_);
-}
-
-
-Address RelocInfo::target_address_address() {
- ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
- || rmode_ == EMBEDDED_OBJECT
- || rmode_ == EXTERNAL_REFERENCE);
- return Assembler::target_pointer_address_at(pc_);
-}
-
-
-Object* RelocInfo::target_object() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
-}
-
-
-Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Handle<Object>(reinterpret_cast<Object**>(
- Assembler::target_address_at(pc_)));
-}
-
-
-void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- ASSERT(!target->IsConsString());
- Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
- if (mode == UPDATE_WRITE_BARRIER &&
- host() != NULL &&
- target->IsHeapObject()) {
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), &Memory::Object_at(pc_), HeapObject::cast(target));
- }
-}
-
-
-Address RelocInfo::target_reference() {
- ASSERT(rmode_ == EXTERNAL_REFERENCE);
- return Assembler::target_address_at(pc_);
-}
-
-
-Address RelocInfo::target_runtime_entry(Assembler* origin) {
- ASSERT(IsRuntimeEntry(rmode_));
- return target_address();
-}
-
-
-void RelocInfo::set_target_runtime_entry(Address target,
- WriteBarrierMode mode) {
- ASSERT(IsRuntimeEntry(rmode_));
- if (target_address() != target) set_target_address(target, mode);
-}
-
-
-Handle<Cell> RelocInfo::target_cell_handle() {
- UNIMPLEMENTED();
- Cell *null_cell = NULL;
- return Handle<Cell>(null_cell);
-}
-
-
-Cell* RelocInfo::target_cell() {
- ASSERT(rmode_ == RelocInfo::CELL);
- return Cell::FromValueAddress(Memory::Address_at(pc_));
-}
-
-
-void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
- UNIMPLEMENTED();
-}
-
-
-static const int kCodeAgeSequenceSize = 5 * kInstructionSize;
-static const int kCodeAgeStubEntryOffset = 3 * kInstructionSize;
-
-
-Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
- UNREACHABLE(); // This should never be reached on A64.
- return Handle<Object>();
-}
-
-
-Code* RelocInfo::code_age_stub() {
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- ASSERT(!Code::IsYoungSequence(pc_));
- // Read the stub entry point from the code age sequence.
- Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
- return Code::GetCodeFromTargetAddress(Memory::Address_at(stub_entry_address));
-}
-
-
-void RelocInfo::set_code_age_stub(Code* stub) {
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- ASSERT(!Code::IsYoungSequence(pc_));
- // Overwrite the stub entry point in the code age sequence. This is loaded as
- // a literal so there is no need to call FlushICache here.
- Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
- Memory::Address_at(stub_entry_address) = stub->instruction_start();
-}
-
-
-Address RelocInfo::call_address() {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- // For the above sequences the Relocinfo points to the load literal loading
- // the call address.
- return Assembler::target_address_at(pc_);
-}
-
-
-void RelocInfo::set_call_address(Address target) {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- Assembler::set_target_address_at(pc_, target);
- if (host() != NULL) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
-}
-
-
-void RelocInfo::WipeOut() {
- ASSERT(IsEmbeddedObject(rmode_) ||
- IsCodeTarget(rmode_) ||
- IsRuntimeEntry(rmode_) ||
- IsExternalReference(rmode_));
- Assembler::set_target_address_at(pc_, NULL);
-}
-
-
-bool RelocInfo::IsPatchedReturnSequence() {
- // The sequence must be:
- // ldr ip0, [pc, #offset]
- // blr ip0
- // See a64/debug-a64.cc BreakLocationIterator::SetDebugBreakAtReturn().
- Instruction* i1 = reinterpret_cast<Instruction*>(pc_);
- Instruction* i2 = i1->following();
- return i1->IsLdrLiteralX() && (i1->Rt() == ip0.code()) &&
- i2->IsBranchAndLinkToRegister() && (i2->Rn() == ip0.code());
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
- Instruction* current_instr = reinterpret_cast<Instruction*>(pc_);
- return !current_instr->IsNop(Assembler::DEBUG_BREAK_NOP);
-}
-
-
-void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(this);
- } else if (RelocInfo::IsCodeTarget(mode)) {
- visitor->VisitCodeTarget(this);
- } else if (mode == RelocInfo::CELL) {
- visitor->VisitCell(this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- } else if (((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence())) &&
- isolate->debug()->has_break_points()) {
- visitor->VisitDebugTarget(this);
-#endif
- } else if (RelocInfo::IsRuntimeEntry(mode)) {
- visitor->VisitRuntimeEntry(this);
- }
-}
-
-
-template<typename StaticVisitor>
-void RelocInfo::Visit(Heap* heap) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitEmbeddedPointer(heap, this);
- } else if (RelocInfo::IsCodeTarget(mode)) {
- StaticVisitor::VisitCodeTarget(heap, this);
- } else if (mode == RelocInfo::CELL) {
- StaticVisitor::VisitCell(heap, this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- StaticVisitor::VisitExternalReference(this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- } else if (heap->isolate()->debug()->has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
- StaticVisitor::VisitDebugTarget(heap, this);
-#endif
- } else if (RelocInfo::IsRuntimeEntry(mode)) {
- StaticVisitor::VisitRuntimeEntry(this);
- }
-}
-
-
-LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
- ASSERT(rt.IsValid());
- if (rt.IsRegister()) {
- return rt.Is64Bits() ? LDR_x : LDR_w;
- } else {
- ASSERT(rt.IsFPRegister());
- return rt.Is64Bits() ? LDR_d : LDR_s;
- }
-}
-
-
-LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
- const CPURegister& rt2) {
- ASSERT(AreSameSizeAndType(rt, rt2));
- USE(rt2);
- if (rt.IsRegister()) {
- return rt.Is64Bits() ? LDP_x : LDP_w;
- } else {
- ASSERT(rt.IsFPRegister());
- return rt.Is64Bits() ? LDP_d : LDP_s;
- }
-}
-
-
-LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
- ASSERT(rt.IsValid());
- if (rt.IsRegister()) {
- return rt.Is64Bits() ? STR_x : STR_w;
- } else {
- ASSERT(rt.IsFPRegister());
- return rt.Is64Bits() ? STR_d : STR_s;
- }
-}
-
-
-LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
- const CPURegister& rt2) {
- ASSERT(AreSameSizeAndType(rt, rt2));
- USE(rt2);
- if (rt.IsRegister()) {
- return rt.Is64Bits() ? STP_x : STP_w;
- } else {
- ASSERT(rt.IsFPRegister());
- return rt.Is64Bits() ? STP_d : STP_s;
- }
-}
-
-
-LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
- const CPURegister& rt, const CPURegister& rt2) {
- ASSERT(AreSameSizeAndType(rt, rt2));
- USE(rt2);
- if (rt.IsRegister()) {
- return rt.Is64Bits() ? LDNP_x : LDNP_w;
- } else {
- ASSERT(rt.IsFPRegister());
- return rt.Is64Bits() ? LDNP_d : LDNP_s;
- }
-}
-
-
-LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor(
- const CPURegister& rt, const CPURegister& rt2) {
- ASSERT(AreSameSizeAndType(rt, rt2));
- USE(rt2);
- if (rt.IsRegister()) {
- return rt.Is64Bits() ? STNP_x : STNP_w;
- } else {
- ASSERT(rt.IsFPRegister());
- return rt.Is64Bits() ? STNP_d : STNP_s;
- }
-}
-
-
-int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
- ASSERT(kStartOfLabelLinkChain == 0);
- int offset = LinkAndGetByteOffsetTo(label);
- ASSERT(IsAligned(offset, kInstructionSize));
- return offset >> kInstructionSizeLog2;
-}
-
-
-Instr Assembler::Flags(FlagsUpdate S) {
- if (S == SetFlags) {
- return 1 << FlagsUpdate_offset;
- } else if (S == LeaveFlags) {
- return 0 << FlagsUpdate_offset;
- }
- UNREACHABLE();
- return 0;
-}
-
-
-Instr Assembler::Cond(Condition cond) {
- return cond << Condition_offset;
-}
-
-
-Instr Assembler::ImmPCRelAddress(int imm21) {
- CHECK(is_int21(imm21));
- Instr imm = static_cast<Instr>(truncate_to_int21(imm21));
- Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset;
- Instr immlo = imm << ImmPCRelLo_offset;
- return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask);
-}
-
-
-Instr Assembler::ImmUncondBranch(int imm26) {
- CHECK(is_int26(imm26));
- return truncate_to_int26(imm26) << ImmUncondBranch_offset;
-}
-
-
-Instr Assembler::ImmCondBranch(int imm19) {
- CHECK(is_int19(imm19));
- return truncate_to_int19(imm19) << ImmCondBranch_offset;
-}
-
-
-Instr Assembler::ImmCmpBranch(int imm19) {
- CHECK(is_int19(imm19));
- return truncate_to_int19(imm19) << ImmCmpBranch_offset;
-}
-
-
-Instr Assembler::ImmTestBranch(int imm14) {
- CHECK(is_int14(imm14));
- return truncate_to_int14(imm14) << ImmTestBranch_offset;
-}
-
-
-Instr Assembler::ImmTestBranchBit(unsigned bit_pos) {
- ASSERT(is_uint6(bit_pos));
- // Subtract five from the shift offset, as we need bit 5 from bit_pos.
- unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
- unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
- b5 &= ImmTestBranchBit5_mask;
- b40 &= ImmTestBranchBit40_mask;
- return b5 | b40;
-}
-
-
-Instr Assembler::SF(Register rd) {
- return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits;
-}
-
-
-Instr Assembler::ImmAddSub(int64_t imm) {
- ASSERT(IsImmAddSub(imm));
- if (is_uint12(imm)) { // No shift required.
- return imm << ImmAddSub_offset;
- } else {
- return ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
- }
-}
-
-
-Instr Assembler::ImmS(unsigned imms, unsigned reg_size) {
- ASSERT(((reg_size == kXRegSize) && is_uint6(imms)) ||
- ((reg_size == kWRegSize) && is_uint5(imms)));
- USE(reg_size);
- return imms << ImmS_offset;
-}
-
-
-Instr Assembler::ImmR(unsigned immr, unsigned reg_size) {
- ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
- ((reg_size == kWRegSize) && is_uint5(immr)));
- USE(reg_size);
- ASSERT(is_uint6(immr));
- return immr << ImmR_offset;
-}
-
-
-Instr Assembler::ImmSetBits(unsigned imms, unsigned reg_size) {
- ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
- ASSERT(is_uint6(imms));
- ASSERT((reg_size == kXRegSize) || is_uint6(imms + 3));
- USE(reg_size);
- return imms << ImmSetBits_offset;
-}
-
-
-Instr Assembler::ImmRotate(unsigned immr, unsigned reg_size) {
- ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
- ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
- ((reg_size == kWRegSize) && is_uint5(immr)));
- USE(reg_size);
- return immr << ImmRotate_offset;
-}
-
-
-Instr Assembler::ImmLLiteral(int imm19) {
- CHECK(is_int19(imm19));
- return truncate_to_int19(imm19) << ImmLLiteral_offset;
-}
-
-
-Instr Assembler::BitN(unsigned bitn, unsigned reg_size) {
- ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
- ASSERT((reg_size == kXRegSize) || (bitn == 0));
- USE(reg_size);
- return bitn << BitN_offset;
-}
-
-
-Instr Assembler::ShiftDP(Shift shift) {
- ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
- return shift << ShiftDP_offset;
-}
-
-
-Instr Assembler::ImmDPShift(unsigned amount) {
- ASSERT(is_uint6(amount));
- return amount << ImmDPShift_offset;
-}
-
-
-Instr Assembler::ExtendMode(Extend extend) {
- return extend << ExtendMode_offset;
-}
-
-
-Instr Assembler::ImmExtendShift(unsigned left_shift) {
- ASSERT(left_shift <= 4);
- return left_shift << ImmExtendShift_offset;
-}
-
-
-Instr Assembler::ImmCondCmp(unsigned imm) {
- ASSERT(is_uint5(imm));
- return imm << ImmCondCmp_offset;
-}
-
-
-Instr Assembler::Nzcv(StatusFlags nzcv) {
- return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset;
-}
-
-
-Instr Assembler::ImmLSUnsigned(int imm12) {
- ASSERT(is_uint12(imm12));
- return imm12 << ImmLSUnsigned_offset;
-}
-
-
-Instr Assembler::ImmLS(int imm9) {
- ASSERT(is_int9(imm9));
- return truncate_to_int9(imm9) << ImmLS_offset;
-}
-
-
-Instr Assembler::ImmLSPair(int imm7, LSDataSize size) {
- ASSERT(((imm7 >> size) << size) == imm7);
- int scaled_imm7 = imm7 >> size;
- ASSERT(is_int7(scaled_imm7));
- return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
-}
-
-
-Instr Assembler::ImmShiftLS(unsigned shift_amount) {
- ASSERT(is_uint1(shift_amount));
- return shift_amount << ImmShiftLS_offset;
-}
-
-
-Instr Assembler::ImmException(int imm16) {
- ASSERT(is_uint16(imm16));
- return imm16 << ImmException_offset;
-}
-
-
-Instr Assembler::ImmSystemRegister(int imm15) {
- ASSERT(is_uint15(imm15));
- return imm15 << ImmSystemRegister_offset;
-}
-
-
-Instr Assembler::ImmHint(int imm7) {
- ASSERT(is_uint7(imm7));
- return imm7 << ImmHint_offset;
-}
-
-
-Instr Assembler::ImmBarrierDomain(int imm2) {
- ASSERT(is_uint2(imm2));
- return imm2 << ImmBarrierDomain_offset;
-}
-
-
-Instr Assembler::ImmBarrierType(int imm2) {
- ASSERT(is_uint2(imm2));
- return imm2 << ImmBarrierType_offset;
-}
-
-
-LSDataSize Assembler::CalcLSDataSize(LoadStoreOp op) {
- ASSERT((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
- return static_cast<LSDataSize>(op >> SizeLS_offset);
-}
-
-
-Instr Assembler::ImmMoveWide(uint64_t imm) {
- ASSERT(is_uint16(imm));
- return imm << ImmMoveWide_offset;
-}
-
-
-Instr Assembler::ShiftMoveWide(int64_t shift) {
- ASSERT(is_uint2(shift));
- return shift << ShiftMoveWide_offset;
-}
-
-
-Instr Assembler::FPType(FPRegister fd) {
- return fd.Is64Bits() ? FP64 : FP32;
-}
-
-
-Instr Assembler::FPScale(unsigned scale) {
- ASSERT(is_uint6(scale));
- return scale << FPScale_offset;
-}
-
-
-const Register& Assembler::AppropriateZeroRegFor(const CPURegister& reg) const {
- return reg.Is64Bits() ? xzr : wzr;
-}
-
-
-void Assembler::LoadRelocated(const CPURegister& rt, const Operand& operand) {
- LoadRelocatedValue(rt, operand, LDR_x_lit);
-}
-
-
-inline void Assembler::CheckBuffer() {
- ASSERT(pc_ < (buffer_ + buffer_size_));
- if (buffer_space() < kGap) {
- GrowBuffer();
- }
- if (pc_offset() >= next_buffer_check_) {
- CheckConstPool(false, true);
- }
-}
-
-
-TypeFeedbackId Assembler::RecordedAstId() {
- ASSERT(!recorded_ast_id_.IsNone());
- return recorded_ast_id_;
-}
-
-
-void Assembler::ClearRecordedAstId() {
- recorded_ast_id_ = TypeFeedbackId::None();
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_A64_ASSEMBLER_A64_INL_H_
diff --git a/deps/v8/src/a64/assembler-a64.cc b/deps/v8/src/a64/assembler-a64.cc
deleted file mode 100644
index 43b1391605..0000000000
--- a/deps/v8/src/a64/assembler-a64.cc
+++ /dev/null
@@ -1,2606 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if V8_TARGET_ARCH_A64
-
-#define A64_DEFINE_REG_STATICS
-
-#include "a64/assembler-a64-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-// -----------------------------------------------------------------------------
-// CpuFeatures utilities (for V8 compatibility).
-
-ExternalReference ExternalReference::cpu_features() {
- return ExternalReference(&CpuFeatures::supported_);
-}
-
-
-// -----------------------------------------------------------------------------
-// CPURegList utilities.
-
-CPURegister CPURegList::PopLowestIndex() {
- ASSERT(IsValid());
- if (IsEmpty()) {
- return NoCPUReg;
- }
- int index = CountTrailingZeros(list_, kRegListSizeInBits);
- ASSERT((1 << index) & list_);
- Remove(index);
- return CPURegister::Create(index, size_, type_);
-}
-
-
-CPURegister CPURegList::PopHighestIndex() {
- ASSERT(IsValid());
- if (IsEmpty()) {
- return NoCPUReg;
- }
- int index = CountLeadingZeros(list_, kRegListSizeInBits);
- index = kRegListSizeInBits - 1 - index;
- ASSERT((1 << index) & list_);
- Remove(index);
- return CPURegister::Create(index, size_, type_);
-}
-
-
-void CPURegList::RemoveCalleeSaved() {
- if (type() == CPURegister::kRegister) {
- Remove(GetCalleeSaved(RegisterSizeInBits()));
- } else if (type() == CPURegister::kFPRegister) {
- Remove(GetCalleeSavedFP(RegisterSizeInBits()));
- } else {
- ASSERT(type() == CPURegister::kNoRegister);
- ASSERT(IsEmpty());
- // The list must already be empty, so do nothing.
- }
-}
-
-
-CPURegList CPURegList::GetCalleeSaved(unsigned size) {
- return CPURegList(CPURegister::kRegister, size, 19, 29);
-}
-
-
-CPURegList CPURegList::GetCalleeSavedFP(unsigned size) {
- return CPURegList(CPURegister::kFPRegister, size, 8, 15);
-}
-
-
-CPURegList CPURegList::GetCallerSaved(unsigned size) {
- // Registers x0-x18 and lr (x30) are caller-saved.
- CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
- list.Combine(lr);
- return list;
-}
-
-
-CPURegList CPURegList::GetCallerSavedFP(unsigned size) {
- // Registers d0-d7 and d16-d31 are caller-saved.
- CPURegList list = CPURegList(CPURegister::kFPRegister, size, 0, 7);
- list.Combine(CPURegList(CPURegister::kFPRegister, size, 16, 31));
- return list;
-}
-
-
-// This function defines the list of registers which are associated with a
-// safepoint slot. Safepoint register slots are saved contiguously on the stack.
-// MacroAssembler::SafepointRegisterStackIndex handles mapping from register
-// code to index in the safepoint register slots. Any change here can affect
-// this mapping.
-CPURegList CPURegList::GetSafepointSavedRegisters() {
- CPURegList list = CPURegList::GetCalleeSaved();
- list.Combine(CPURegList(CPURegister::kRegister, kXRegSize, kJSCallerSaved));
-
- // Note that unfortunately we can't use symbolic names for registers and have
- // to directly use register codes. This is because this function is used to
- // initialize some static variables and we can't rely on register variables
- // to be initialized due to static initialization order issues in C++.
-
- // Drop ip0 and ip1 (i.e. x16 and x17), as they should not be expected to be
- // preserved outside of the macro assembler.
- list.Remove(16);
- list.Remove(17);
-
- // Add x18 to the safepoint list, as although it's not in kJSCallerSaved, it
- // is a caller-saved register according to the procedure call standard.
- list.Combine(18);
-
- // Drop jssp as the stack pointer doesn't need to be included.
- list.Remove(28);
-
- // Add the link register (x30) to the safepoint list.
- list.Combine(30);
-
- return list;
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of RelocInfo
-
-const int RelocInfo::kApplyMask = 0;
-
-
-bool RelocInfo::IsCodedSpecially() {
- // The deserializer needs to know whether a pointer is specially coded. Being
- // specially coded on A64 means that it is a movz/movk sequence. We don't
- // generate those for relocatable pointers.
- return false;
-}
-
-
-void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
- // Patch the code at the current address with the supplied instructions.
- Instr* pc = reinterpret_cast<Instr*>(pc_);
- Instr* instr = reinterpret_cast<Instr*>(instructions);
- for (int i = 0; i < instruction_count; i++) {
- *(pc + i) = *(instr + i);
- }
-
- // Indicate that code has changed.
- CPU::FlushICache(pc_, instruction_count * kInstructionSize);
-}
-
-
-// Patch the code at the current PC with a call to the target address.
-// Additional guard instructions can be added if required.
-void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
- UNIMPLEMENTED();
-}
-
-
-Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2,
- Register reg3, Register reg4) {
- CPURegList regs(reg1, reg2, reg3, reg4);
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
- if (regs.IncludesAliasOf(candidate)) continue;
- return candidate;
- }
- UNREACHABLE();
- return NoReg;
-}
-
-
-bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
- const CPURegister& reg3, const CPURegister& reg4,
- const CPURegister& reg5, const CPURegister& reg6,
- const CPURegister& reg7, const CPURegister& reg8) {
- int number_of_valid_regs = 0;
- int number_of_valid_fpregs = 0;
-
- RegList unique_regs = 0;
- RegList unique_fpregs = 0;
-
- const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
-
- for (unsigned i = 0; i < sizeof(regs) / sizeof(regs[0]); i++) {
- if (regs[i].IsRegister()) {
- number_of_valid_regs++;
- unique_regs |= regs[i].Bit();
- } else if (regs[i].IsFPRegister()) {
- number_of_valid_fpregs++;
- unique_fpregs |= regs[i].Bit();
- } else {
- ASSERT(!regs[i].IsValid());
- }
- }
-
- int number_of_unique_regs =
- CountSetBits(unique_regs, sizeof(unique_regs) * kBitsPerByte);
- int number_of_unique_fpregs =
- CountSetBits(unique_fpregs, sizeof(unique_fpregs) * kBitsPerByte);
-
- ASSERT(number_of_valid_regs >= number_of_unique_regs);
- ASSERT(number_of_valid_fpregs >= number_of_unique_fpregs);
-
- return (number_of_valid_regs != number_of_unique_regs) ||
- (number_of_valid_fpregs != number_of_unique_fpregs);
-}
-
-
-bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2,
- const CPURegister& reg3, const CPURegister& reg4,
- const CPURegister& reg5, const CPURegister& reg6,
- const CPURegister& reg7, const CPURegister& reg8) {
- ASSERT(reg1.IsValid());
- bool match = true;
- match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1);
- match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1);
- match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1);
- match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1);
- match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1);
- match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1);
- match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1);
- return match;
-}
-
-
-void Operand::initialize_handle(Handle<Object> handle) {
- AllowDeferredHandleDereference using_raw_address;
-
- // Verify all Objects referred by code are NOT in new space.
- Object* obj = *handle;
- if (obj->IsHeapObject()) {
- ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
- immediate_ = reinterpret_cast<intptr_t>(handle.location());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
- } else {
- STATIC_ASSERT(sizeof(intptr_t) == sizeof(int64_t));
- immediate_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = RelocInfo::NONE64;
- }
-}
-
-
-bool Operand::NeedsRelocation() const {
- if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif
- return Serializer::enabled();
- }
-
- return !RelocInfo::IsNone(rmode_);
-}
-
-
-// Assembler
-
-Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
- : AssemblerBase(isolate, buffer, buffer_size),
- recorded_ast_id_(TypeFeedbackId::None()),
- unresolved_branches_(),
- positions_recorder_(this) {
- const_pool_blocked_nesting_ = 0;
- Reset();
-}
-
-
-Assembler::~Assembler() {
- ASSERT(num_pending_reloc_info_ == 0);
- ASSERT(const_pool_blocked_nesting_ == 0);
-}
-
-
-void Assembler::Reset() {
-#ifdef DEBUG
- ASSERT((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
- ASSERT(const_pool_blocked_nesting_ == 0);
- memset(buffer_, 0, pc_ - buffer_);
-#endif
- pc_ = buffer_;
- reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_),
- reinterpret_cast<byte*>(pc_));
- num_pending_reloc_info_ = 0;
- next_buffer_check_ = 0;
- no_const_pool_before_ = 0;
- first_const_pool_use_ = -1;
- ClearRecordedAstId();
-}
-
-
-void Assembler::GetCode(CodeDesc* desc) {
- // Emit constant pool if necessary.
- CheckConstPool(true, false);
- ASSERT(num_pending_reloc_info_ == 0);
-
- // Set up code descriptor.
- if (desc) {
- desc->buffer = reinterpret_cast<byte*>(buffer_);
- desc->buffer_size = buffer_size_;
- desc->instr_size = pc_offset();
- desc->reloc_size = (reinterpret_cast<byte*>(buffer_) + buffer_size_) -
- reloc_info_writer.pos();
- desc->origin = this;
- }
-}
-
-
-void Assembler::Align(int m) {
- ASSERT(m >= 4 && IsPowerOf2(m));
- while ((pc_offset() & (m - 1)) != 0) {
- nop();
- }
-}
-
-
-void Assembler::CheckLabelLinkChain(Label const * label) {
-#ifdef DEBUG
- if (label->is_linked()) {
- int linkoffset = label->pos();
- bool end_of_chain = false;
- while (!end_of_chain) {
- Instruction * link = InstructionAt(linkoffset);
- int linkpcoffset = link->ImmPCOffset();
- int prevlinkoffset = linkoffset + linkpcoffset;
-
- end_of_chain = (linkoffset == prevlinkoffset);
- linkoffset = linkoffset + linkpcoffset;
- }
- }
-#endif
-}
-
-
-void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
- Label* label,
- Instruction* label_veneer) {
- ASSERT(label->is_linked());
-
- CheckLabelLinkChain(label);
-
- Instruction* link = InstructionAt(label->pos());
- Instruction* prev_link = link;
- Instruction* next_link;
- bool end_of_chain = false;
-
- while (link != branch && !end_of_chain) {
- next_link = link->ImmPCOffsetTarget();
- end_of_chain = (link == next_link);
- prev_link = link;
- link = next_link;
- }
-
- ASSERT(branch == link);
- next_link = branch->ImmPCOffsetTarget();
-
- if (branch == prev_link) {
- // The branch is the first instruction in the chain.
- if (branch == next_link) {
- // It is also the last instruction in the chain, so it is the only branch
- // currently referring to this label.
- label->Unuse();
- } else {
- label->link_to(reinterpret_cast<byte*>(next_link) - buffer_);
- }
-
- } else if (branch == next_link) {
- // The branch is the last (but not also the first) instruction in the chain.
- prev_link->SetImmPCOffsetTarget(prev_link);
-
- } else {
- // The branch is in the middle of the chain.
- if (prev_link->IsTargetInImmPCOffsetRange(next_link)) {
- prev_link->SetImmPCOffsetTarget(next_link);
- } else if (label_veneer != NULL) {
- // Use the veneer for all previous links in the chain.
- prev_link->SetImmPCOffsetTarget(prev_link);
-
- end_of_chain = false;
- link = next_link;
- while (!end_of_chain) {
- next_link = link->ImmPCOffsetTarget();
- end_of_chain = (link == next_link);
- link->SetImmPCOffsetTarget(label_veneer);
- link = next_link;
- }
- } else {
- // The assert below will fire.
- // Some other work could be attempted to fix up the chain, but it would be
- // rather complicated. If we crash here, we may want to consider using an
- // other mechanism than a chain of branches.
- //
- // Note that this situation currently should not happen, as we always call
- // this function with a veneer to the target label.
- // However this could happen with a MacroAssembler in the following state:
- // [previous code]
- // B(label);
- // [20KB code]
- // Tbz(label); // First tbz. Pointing to unconditional branch.
- // [20KB code]
- // Tbz(label); // Second tbz. Pointing to the first tbz.
- // [more code]
- // and this function is called to remove the first tbz from the label link
- // chain. Since tbz has a range of +-32KB, the second tbz cannot point to
- // the unconditional branch.
- CHECK(prev_link->IsTargetInImmPCOffsetRange(next_link));
- UNREACHABLE();
- }
- }
-
- CheckLabelLinkChain(label);
-}
-
-
-void Assembler::bind(Label* label) {
- // Bind label to the address at pc_. All instructions (most likely branches)
- // that are linked to this label will be updated to point to the newly-bound
- // label.
-
- ASSERT(!label->is_near_linked());
- ASSERT(!label->is_bound());
-
- // If the label is linked, the link chain looks something like this:
- //
- // |--I----I-------I-------L
- // |---------------------->| pc_offset
- // |-------------->| linkoffset = label->pos()
- // |<------| link->ImmPCOffset()
- // |------>| prevlinkoffset = linkoffset + link->ImmPCOffset()
- //
- // On each iteration, the last link is updated and then removed from the
- // chain until only one remains. At that point, the label is bound.
- //
- // If the label is not linked, no preparation is required before binding.
- while (label->is_linked()) {
- int linkoffset = label->pos();
- Instruction* link = InstructionAt(linkoffset);
- int prevlinkoffset = linkoffset + link->ImmPCOffset();
-
- CheckLabelLinkChain(label);
-
- ASSERT(linkoffset >= 0);
- ASSERT(linkoffset < pc_offset());
- ASSERT((linkoffset > prevlinkoffset) ||
- (linkoffset - prevlinkoffset == kStartOfLabelLinkChain));
- ASSERT(prevlinkoffset >= 0);
-
- // Update the link to point to the label.
- link->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
-
- // Link the label to the previous link in the chain.
- if (linkoffset - prevlinkoffset == kStartOfLabelLinkChain) {
- // We hit kStartOfLabelLinkChain, so the chain is fully processed.
- label->Unuse();
- } else {
- // Update the label for the next iteration.
- label->link_to(prevlinkoffset);
- }
- }
- label->bind_to(pc_offset());
-
- ASSERT(label->is_bound());
- ASSERT(!label->is_linked());
-
- DeleteUnresolvedBranchInfoForLabel(label);
-}
-
-
-int Assembler::LinkAndGetByteOffsetTo(Label* label) {
- ASSERT(sizeof(*pc_) == 1);
- CheckLabelLinkChain(label);
-
- int offset;
- if (label->is_bound()) {
- // The label is bound, so it does not need to be updated. Referring
- // instructions must link directly to the label as they will not be
- // updated.
- //
- // In this case, label->pos() returns the offset of the label from the
- // start of the buffer.
- //
- // Note that offset can be zero for self-referential instructions. (This
- // could be useful for ADR, for example.)
- offset = label->pos() - pc_offset();
- ASSERT(offset <= 0);
- } else {
- if (label->is_linked()) {
- // The label is linked, so the referring instruction should be added onto
- // the end of the label's link chain.
- //
- // In this case, label->pos() returns the offset of the last linked
- // instruction from the start of the buffer.
- offset = label->pos() - pc_offset();
- ASSERT(offset != kStartOfLabelLinkChain);
- // Note that the offset here needs to be PC-relative only so that the
- // first instruction in a buffer can link to an unbound label. Otherwise,
- // the offset would be 0 for this case, and 0 is reserved for
- // kStartOfLabelLinkChain.
- } else {
- // The label is unused, so it now becomes linked and the referring
- // instruction is at the start of the new link chain.
- offset = kStartOfLabelLinkChain;
- }
- // The instruction at pc is now the last link in the label's chain.
- label->link_to(pc_offset());
- }
-
- return offset;
-}
-
-
-void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
- // Branches to this label will be resolved when the label is bound below.
- std::multimap<int, FarBranchInfo>::iterator it_tmp, it;
- it = unresolved_branches_.begin();
- while (it != unresolved_branches_.end()) {
- it_tmp = it++;
- if (it_tmp->second.label_ == label) {
- CHECK(it_tmp->first >= pc_offset());
- unresolved_branches_.erase(it_tmp);
- }
- }
-}
-
-
-void Assembler::StartBlockConstPool() {
- if (const_pool_blocked_nesting_++ == 0) {
- // Prevent constant pool checks happening by setting the next check to
- // the biggest possible offset.
- next_buffer_check_ = kMaxInt;
- }
-}
-
-
-void Assembler::EndBlockConstPool() {
- if (--const_pool_blocked_nesting_ == 0) {
- // Check the constant pool hasn't been blocked for too long.
- ASSERT((num_pending_reloc_info_ == 0) ||
- (pc_offset() < (first_const_pool_use_ + kMaxDistToPool)));
- // Two cases:
- // * no_const_pool_before_ >= next_buffer_check_ and the emission is
- // still blocked
- // * no_const_pool_before_ < next_buffer_check_ and the next emit will
- // trigger a check.
- next_buffer_check_ = no_const_pool_before_;
- }
-}
-
-
-bool Assembler::is_const_pool_blocked() const {
- return (const_pool_blocked_nesting_ > 0) ||
- (pc_offset() < no_const_pool_before_);
-}
-
-
-bool Assembler::IsConstantPoolAt(Instruction* instr) {
- // The constant pool marker is made of two instructions. These instructions
- // will never be emitted by the JIT, so checking for the first one is enough:
- // 0: ldr xzr, #<size of pool>
- bool result = instr->IsLdrLiteralX() && (instr->Rt() == xzr.code());
-
- // It is still worth asserting the marker is complete.
- // 4: blr xzr
- ASSERT(!result || (instr->following()->IsBranchAndLinkToRegister() &&
- instr->following()->Rn() == xzr.code()));
-
- return result;
-}
-
-
-int Assembler::ConstantPoolSizeAt(Instruction* instr) {
- if (IsConstantPoolAt(instr)) {
- return instr->ImmLLiteral();
- } else {
- return -1;
- }
-}
-
-
-void Assembler::ConstantPoolMarker(uint32_t size) {
- ASSERT(is_const_pool_blocked());
- // + 1 is for the crash guard.
- Emit(LDR_x_lit | ImmLLiteral(2 * size + 1) | Rt(xzr));
-}
-
-
-void Assembler::ConstantPoolGuard() {
-#ifdef DEBUG
- // Currently this is only used after a constant pool marker.
- ASSERT(is_const_pool_blocked());
- Instruction* instr = reinterpret_cast<Instruction*>(pc_);
- ASSERT(instr->preceding()->IsLdrLiteralX() &&
- instr->preceding()->Rt() == xzr.code());
-#endif
-
- // We must generate only one instruction.
- Emit(BLR | Rn(xzr));
-}
-
-
-void Assembler::br(const Register& xn) {
- positions_recorder()->WriteRecordedPositions();
- ASSERT(xn.Is64Bits());
- Emit(BR | Rn(xn));
-}
-
-
-void Assembler::blr(const Register& xn) {
- positions_recorder()->WriteRecordedPositions();
- ASSERT(xn.Is64Bits());
- // The pattern 'blr xzr' is used as a guard to detect when execution falls
- // through the constant pool. It should not be emitted.
- ASSERT(!xn.Is(xzr));
- Emit(BLR | Rn(xn));
-}
-
-
-void Assembler::ret(const Register& xn) {
- positions_recorder()->WriteRecordedPositions();
- ASSERT(xn.Is64Bits());
- Emit(RET | Rn(xn));
-}
-
-
-void Assembler::b(int imm26) {
- Emit(B | ImmUncondBranch(imm26));
-}
-
-
-void Assembler::b(Label* label) {
- positions_recorder()->WriteRecordedPositions();
- b(LinkAndGetInstructionOffsetTo(label));
-}
-
-
-void Assembler::b(int imm19, Condition cond) {
- Emit(B_cond | ImmCondBranch(imm19) | cond);
-}
-
-
-void Assembler::b(Label* label, Condition cond) {
- positions_recorder()->WriteRecordedPositions();
- b(LinkAndGetInstructionOffsetTo(label), cond);
-}
-
-
-void Assembler::bl(int imm26) {
- positions_recorder()->WriteRecordedPositions();
- Emit(BL | ImmUncondBranch(imm26));
-}
-
-
-void Assembler::bl(Label* label) {
- positions_recorder()->WriteRecordedPositions();
- bl(LinkAndGetInstructionOffsetTo(label));
-}
-
-
-void Assembler::cbz(const Register& rt,
- int imm19) {
- positions_recorder()->WriteRecordedPositions();
- Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
-}
-
-
-void Assembler::cbz(const Register& rt,
- Label* label) {
- positions_recorder()->WriteRecordedPositions();
- cbz(rt, LinkAndGetInstructionOffsetTo(label));
-}
-
-
-void Assembler::cbnz(const Register& rt,
- int imm19) {
- positions_recorder()->WriteRecordedPositions();
- Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
-}
-
-
-void Assembler::cbnz(const Register& rt,
- Label* label) {
- positions_recorder()->WriteRecordedPositions();
- cbnz(rt, LinkAndGetInstructionOffsetTo(label));
-}
-
-
-void Assembler::tbz(const Register& rt,
- unsigned bit_pos,
- int imm14) {
- positions_recorder()->WriteRecordedPositions();
- ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
- Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
-}
-
-
-void Assembler::tbz(const Register& rt,
- unsigned bit_pos,
- Label* label) {
- positions_recorder()->WriteRecordedPositions();
- tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
-}
-
-
-void Assembler::tbnz(const Register& rt,
- unsigned bit_pos,
- int imm14) {
- positions_recorder()->WriteRecordedPositions();
- ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
- Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
-}
-
-
-void Assembler::tbnz(const Register& rt,
- unsigned bit_pos,
- Label* label) {
- positions_recorder()->WriteRecordedPositions();
- tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
-}
-
-
-void Assembler::adr(const Register& rd, int imm21) {
- ASSERT(rd.Is64Bits());
- Emit(ADR | ImmPCRelAddress(imm21) | Rd(rd));
-}
-
-
-void Assembler::adr(const Register& rd, Label* label) {
- adr(rd, LinkAndGetByteOffsetTo(label));
-}
-
-
-void Assembler::add(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- AddSub(rd, rn, operand, LeaveFlags, ADD);
-}
-
-
-void Assembler::adds(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- AddSub(rd, rn, operand, SetFlags, ADD);
-}
-
-
-void Assembler::cmn(const Register& rn,
- const Operand& operand) {
- Register zr = AppropriateZeroRegFor(rn);
- adds(zr, rn, operand);
-}
-
-
-void Assembler::sub(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- AddSub(rd, rn, operand, LeaveFlags, SUB);
-}
-
-
-void Assembler::subs(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- AddSub(rd, rn, operand, SetFlags, SUB);
-}
-
-
-void Assembler::cmp(const Register& rn, const Operand& operand) {
- Register zr = AppropriateZeroRegFor(rn);
- subs(zr, rn, operand);
-}
-
-
-void Assembler::neg(const Register& rd, const Operand& operand) {
- Register zr = AppropriateZeroRegFor(rd);
- sub(rd, zr, operand);
-}
-
-
-void Assembler::negs(const Register& rd, const Operand& operand) {
- Register zr = AppropriateZeroRegFor(rd);
- subs(rd, zr, operand);
-}
-
-
-void Assembler::adc(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC);
-}
-
-
-void Assembler::adcs(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- AddSubWithCarry(rd, rn, operand, SetFlags, ADC);
-}
-
-
-void Assembler::sbc(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC);
-}
-
-
-void Assembler::sbcs(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- AddSubWithCarry(rd, rn, operand, SetFlags, SBC);
-}
-
-
-void Assembler::ngc(const Register& rd, const Operand& operand) {
- Register zr = AppropriateZeroRegFor(rd);
- sbc(rd, zr, operand);
-}
-
-
-void Assembler::ngcs(const Register& rd, const Operand& operand) {
- Register zr = AppropriateZeroRegFor(rd);
- sbcs(rd, zr, operand);
-}
-
-
-// Logical instructions.
-void Assembler::and_(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- Logical(rd, rn, operand, AND);
-}
-
-
-void Assembler::ands(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- Logical(rd, rn, operand, ANDS);
-}
-
-
-void Assembler::tst(const Register& rn,
- const Operand& operand) {
- ands(AppropriateZeroRegFor(rn), rn, operand);
-}
-
-
-void Assembler::bic(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- Logical(rd, rn, operand, BIC);
-}
-
-
-void Assembler::bics(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- Logical(rd, rn, operand, BICS);
-}
-
-
-void Assembler::orr(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- Logical(rd, rn, operand, ORR);
-}
-
-
-void Assembler::orn(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- Logical(rd, rn, operand, ORN);
-}
-
-
-void Assembler::eor(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- Logical(rd, rn, operand, EOR);
-}
-
-
-void Assembler::eon(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- Logical(rd, rn, operand, EON);
-}
-
-
-void Assembler::lslv(const Register& rd,
- const Register& rn,
- const Register& rm) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
- ASSERT(rd.SizeInBits() == rm.SizeInBits());
- Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd));
-}
-
-
-void Assembler::lsrv(const Register& rd,
- const Register& rn,
- const Register& rm) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
- ASSERT(rd.SizeInBits() == rm.SizeInBits());
- Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd));
-}
-
-
-void Assembler::asrv(const Register& rd,
- const Register& rn,
- const Register& rm) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
- ASSERT(rd.SizeInBits() == rm.SizeInBits());
- Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd));
-}
-
-
-void Assembler::rorv(const Register& rd,
- const Register& rn,
- const Register& rm) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
- ASSERT(rd.SizeInBits() == rm.SizeInBits());
- Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd));
-}
-
-
-// Bitfield operations.
-void Assembler::bfm(const Register& rd,
- const Register& rn,
- unsigned immr,
- unsigned imms) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
- Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
- Emit(SF(rd) | BFM | N |
- ImmR(immr, rd.SizeInBits()) |
- ImmS(imms, rn.SizeInBits()) |
- Rn(rn) | Rd(rd));
-}
-
-
-void Assembler::sbfm(const Register& rd,
- const Register& rn,
- unsigned immr,
- unsigned imms) {
- ASSERT(rd.Is64Bits() || rn.Is32Bits());
- Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
- Emit(SF(rd) | SBFM | N |
- ImmR(immr, rd.SizeInBits()) |
- ImmS(imms, rn.SizeInBits()) |
- Rn(rn) | Rd(rd));
-}
-
-
-void Assembler::ubfm(const Register& rd,
- const Register& rn,
- unsigned immr,
- unsigned imms) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
- Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
- Emit(SF(rd) | UBFM | N |
- ImmR(immr, rd.SizeInBits()) |
- ImmS(imms, rn.SizeInBits()) |
- Rn(rn) | Rd(rd));
-}
-
-
-void Assembler::extr(const Register& rd,
- const Register& rn,
- const Register& rm,
- unsigned lsb) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
- ASSERT(rd.SizeInBits() == rm.SizeInBits());
- Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
- Emit(SF(rd) | EXTR | N | Rm(rm) |
- ImmS(lsb, rn.SizeInBits()) | Rn(rn) | Rd(rd));
-}
-
-
-void Assembler::csel(const Register& rd,
- const Register& rn,
- const Register& rm,
- Condition cond) {
- ConditionalSelect(rd, rn, rm, cond, CSEL);
-}
-
-
-void Assembler::csinc(const Register& rd,
- const Register& rn,
- const Register& rm,
- Condition cond) {
- ConditionalSelect(rd, rn, rm, cond, CSINC);
-}
-
-
-void Assembler::csinv(const Register& rd,
- const Register& rn,
- const Register& rm,
- Condition cond) {
- ConditionalSelect(rd, rn, rm, cond, CSINV);
-}
-
-
-void Assembler::csneg(const Register& rd,
- const Register& rn,
- const Register& rm,
- Condition cond) {
- ConditionalSelect(rd, rn, rm, cond, CSNEG);
-}
-
-
-void Assembler::cset(const Register &rd, Condition cond) {
- ASSERT((cond != al) && (cond != nv));
- Register zr = AppropriateZeroRegFor(rd);
- csinc(rd, zr, zr, InvertCondition(cond));
-}
-
-
-void Assembler::csetm(const Register &rd, Condition cond) {
- ASSERT((cond != al) && (cond != nv));
- Register zr = AppropriateZeroRegFor(rd);
- csinv(rd, zr, zr, InvertCondition(cond));
-}
-
-
-void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) {
- ASSERT((cond != al) && (cond != nv));
- csinc(rd, rn, rn, InvertCondition(cond));
-}
-
-
-void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) {
- ASSERT((cond != al) && (cond != nv));
- csinv(rd, rn, rn, InvertCondition(cond));
-}
-
-
-void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) {
- ASSERT((cond != al) && (cond != nv));
- csneg(rd, rn, rn, InvertCondition(cond));
-}
-
-
-void Assembler::ConditionalSelect(const Register& rd,
- const Register& rn,
- const Register& rm,
- Condition cond,
- ConditionalSelectOp op) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
- ASSERT(rd.SizeInBits() == rm.SizeInBits());
- Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd));
-}
-
-
-void Assembler::ccmn(const Register& rn,
- const Operand& operand,
- StatusFlags nzcv,
- Condition cond) {
- ConditionalCompare(rn, operand, nzcv, cond, CCMN);
-}
-
-
-void Assembler::ccmp(const Register& rn,
- const Operand& operand,
- StatusFlags nzcv,
- Condition cond) {
- ConditionalCompare(rn, operand, nzcv, cond, CCMP);
-}
-
-
-void Assembler::DataProcessing3Source(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra,
- DataProcessing3SourceOp op) {
- Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd));
-}
-
-
-void Assembler::mul(const Register& rd,
- const Register& rn,
- const Register& rm) {
- ASSERT(AreSameSizeAndType(rd, rn, rm));
- Register zr = AppropriateZeroRegFor(rn);
- DataProcessing3Source(rd, rn, rm, zr, MADD);
-}
-
-
-void Assembler::madd(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra) {
- ASSERT(AreSameSizeAndType(rd, rn, rm, ra));
- DataProcessing3Source(rd, rn, rm, ra, MADD);
-}
-
-
-void Assembler::mneg(const Register& rd,
- const Register& rn,
- const Register& rm) {
- ASSERT(AreSameSizeAndType(rd, rn, rm));
- Register zr = AppropriateZeroRegFor(rn);
- DataProcessing3Source(rd, rn, rm, zr, MSUB);
-}
-
-
-void Assembler::msub(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra) {
- ASSERT(AreSameSizeAndType(rd, rn, rm, ra));
- DataProcessing3Source(rd, rn, rm, ra, MSUB);
-}
-
-
-void Assembler::smaddl(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra) {
- ASSERT(rd.Is64Bits() && ra.Is64Bits());
- ASSERT(rn.Is32Bits() && rm.Is32Bits());
- DataProcessing3Source(rd, rn, rm, ra, SMADDL_x);
-}
-
-
-void Assembler::smsubl(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra) {
- ASSERT(rd.Is64Bits() && ra.Is64Bits());
- ASSERT(rn.Is32Bits() && rm.Is32Bits());
- DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x);
-}
-
-
-void Assembler::umaddl(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra) {
- ASSERT(rd.Is64Bits() && ra.Is64Bits());
- ASSERT(rn.Is32Bits() && rm.Is32Bits());
- DataProcessing3Source(rd, rn, rm, ra, UMADDL_x);
-}
-
-
-void Assembler::umsubl(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra) {
- ASSERT(rd.Is64Bits() && ra.Is64Bits());
- ASSERT(rn.Is32Bits() && rm.Is32Bits());
- DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x);
-}
-
-
-void Assembler::smull(const Register& rd,
- const Register& rn,
- const Register& rm) {
- ASSERT(rd.Is64Bits());
- ASSERT(rn.Is32Bits() && rm.Is32Bits());
- DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x);
-}
-
-
-void Assembler::smulh(const Register& rd,
- const Register& rn,
- const Register& rm) {
- ASSERT(AreSameSizeAndType(rd, rn, rm));
- DataProcessing3Source(rd, rn, rm, xzr, SMULH_x);
-}
-
-
-void Assembler::sdiv(const Register& rd,
- const Register& rn,
- const Register& rm) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
- ASSERT(rd.SizeInBits() == rm.SizeInBits());
- Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd));
-}
-
-
-void Assembler::udiv(const Register& rd,
- const Register& rn,
- const Register& rm) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
- ASSERT(rd.SizeInBits() == rm.SizeInBits());
- Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd));
-}
-
-
-void Assembler::rbit(const Register& rd,
- const Register& rn) {
- DataProcessing1Source(rd, rn, RBIT);
-}
-
-
-void Assembler::rev16(const Register& rd,
- const Register& rn) {
- DataProcessing1Source(rd, rn, REV16);
-}
-
-
-void Assembler::rev32(const Register& rd,
- const Register& rn) {
- ASSERT(rd.Is64Bits());
- DataProcessing1Source(rd, rn, REV);
-}
-
-
-void Assembler::rev(const Register& rd,
- const Register& rn) {
- DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w);
-}
-
-
-void Assembler::clz(const Register& rd,
- const Register& rn) {
- DataProcessing1Source(rd, rn, CLZ);
-}
-
-
-void Assembler::cls(const Register& rd,
- const Register& rn) {
- DataProcessing1Source(rd, rn, CLS);
-}
-
-
-void Assembler::ldp(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& src) {
- LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2));
-}
-
-
-void Assembler::stp(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& dst) {
- LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2));
-}
-
-
-void Assembler::ldpsw(const Register& rt,
- const Register& rt2,
- const MemOperand& src) {
- ASSERT(rt.Is64Bits());
- LoadStorePair(rt, rt2, src, LDPSW_x);
-}
-
-
-void Assembler::LoadStorePair(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& addr,
- LoadStorePairOp op) {
- // 'rt' and 'rt2' can only be aliased for stores.
- ASSERT(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
- ASSERT(AreSameSizeAndType(rt, rt2));
-
- Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
- ImmLSPair(addr.offset(), CalcLSPairDataSize(op));
-
- Instr addrmodeop;
- if (addr.IsImmediateOffset()) {
- addrmodeop = LoadStorePairOffsetFixed;
- } else {
- // Pre-index and post-index modes.
- ASSERT(!rt.Is(addr.base()));
- ASSERT(!rt2.Is(addr.base()));
- ASSERT(addr.offset() != 0);
- if (addr.IsPreIndex()) {
- addrmodeop = LoadStorePairPreIndexFixed;
- } else {
- ASSERT(addr.IsPostIndex());
- addrmodeop = LoadStorePairPostIndexFixed;
- }
- }
- Emit(addrmodeop | memop);
-}
-
-
-void Assembler::ldnp(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& src) {
- LoadStorePairNonTemporal(rt, rt2, src,
- LoadPairNonTemporalOpFor(rt, rt2));
-}
-
-
-void Assembler::stnp(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& dst) {
- LoadStorePairNonTemporal(rt, rt2, dst,
- StorePairNonTemporalOpFor(rt, rt2));
-}
-
-
-void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& addr,
- LoadStorePairNonTemporalOp op) {
- ASSERT(!rt.Is(rt2));
- ASSERT(AreSameSizeAndType(rt, rt2));
- ASSERT(addr.IsImmediateOffset());
-
- LSDataSize size = CalcLSPairDataSize(
- static_cast<LoadStorePairOp>(op & LoadStorePairMask));
- Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
- ImmLSPair(addr.offset(), size));
-}
-
-
-// Memory instructions.
-void Assembler::ldrb(const Register& rt, const MemOperand& src) {
- LoadStore(rt, src, LDRB_w);
-}
-
-
-void Assembler::strb(const Register& rt, const MemOperand& dst) {
- LoadStore(rt, dst, STRB_w);
-}
-
-
-void Assembler::ldrsb(const Register& rt, const MemOperand& src) {
- LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w);
-}
-
-
-void Assembler::ldrh(const Register& rt, const MemOperand& src) {
- LoadStore(rt, src, LDRH_w);
-}
-
-
-void Assembler::strh(const Register& rt, const MemOperand& dst) {
- LoadStore(rt, dst, STRH_w);
-}
-
-
-void Assembler::ldrsh(const Register& rt, const MemOperand& src) {
- LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w);
-}
-
-
-void Assembler::ldr(const CPURegister& rt, const MemOperand& src) {
- LoadStore(rt, src, LoadOpFor(rt));
-}
-
-
-void Assembler::str(const CPURegister& rt, const MemOperand& src) {
- LoadStore(rt, src, StoreOpFor(rt));
-}
-
-
-void Assembler::ldrsw(const Register& rt, const MemOperand& src) {
- ASSERT(rt.Is64Bits());
- LoadStore(rt, src, LDRSW_x);
-}
-
-
-void Assembler::ldr(const Register& rt, uint64_t imm) {
- // TODO(all): Constant pool may be garbage collected. Hence we cannot store
- // TODO(all): arbitrary values in them. Manually move it for now.
- // TODO(all): Fix MacroAssembler::Fmov when this is implemented.
- UNIMPLEMENTED();
-}
-
-
-void Assembler::ldr(const FPRegister& ft, double imm) {
- // TODO(all): Constant pool may be garbage collected. Hence we cannot store
- // TODO(all): arbitrary values in them. Manually move it for now.
- // TODO(all): Fix MacroAssembler::Fmov when this is implemented.
- UNIMPLEMENTED();
-}
-
-
-void Assembler::mov(const Register& rd, const Register& rm) {
- // Moves involving the stack pointer are encoded as add immediate with
- // second operand of zero. Otherwise, orr with first operand zr is
- // used.
- if (rd.IsSP() || rm.IsSP()) {
- add(rd, rm, 0);
- } else {
- orr(rd, AppropriateZeroRegFor(rd), rm);
- }
-}
-
-
-void Assembler::mvn(const Register& rd, const Operand& operand) {
- orn(rd, AppropriateZeroRegFor(rd), operand);
-}
-
-
-void Assembler::mrs(const Register& rt, SystemRegister sysreg) {
- ASSERT(rt.Is64Bits());
- Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt));
-}
-
-
-void Assembler::msr(SystemRegister sysreg, const Register& rt) {
- ASSERT(rt.Is64Bits());
- Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg));
-}
-
-
-void Assembler::hint(SystemHint code) {
- Emit(HINT | ImmHint(code) | Rt(xzr));
-}
-
-
-void Assembler::dmb(BarrierDomain domain, BarrierType type) {
- Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type));
-}
-
-
-void Assembler::dsb(BarrierDomain domain, BarrierType type) {
- Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type));
-}
-
-
-void Assembler::isb() {
- Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
-}
-
-
-void Assembler::fmov(FPRegister fd, double imm) {
- if (fd.Is64Bits() && IsImmFP64(imm)) {
- Emit(FMOV_d_imm | Rd(fd) | ImmFP64(imm));
- } else if (fd.Is32Bits() && IsImmFP32(imm)) {
- Emit(FMOV_s_imm | Rd(fd) | ImmFP32(static_cast<float>(imm)));
- } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
- Register zr = AppropriateZeroRegFor(fd);
- fmov(fd, zr);
- } else {
- ldr(fd, imm);
- }
-}
-
-
-void Assembler::fmov(Register rd, FPRegister fn) {
- ASSERT(rd.SizeInBits() == fn.SizeInBits());
- FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd;
- Emit(op | Rd(rd) | Rn(fn));
-}
-
-
-void Assembler::fmov(FPRegister fd, Register rn) {
- ASSERT(fd.SizeInBits() == rn.SizeInBits());
- FPIntegerConvertOp op = fd.Is32Bits() ? FMOV_sw : FMOV_dx;
- Emit(op | Rd(fd) | Rn(rn));
-}
-
-
-void Assembler::fmov(FPRegister fd, FPRegister fn) {
- ASSERT(fd.SizeInBits() == fn.SizeInBits());
- Emit(FPType(fd) | FMOV | Rd(fd) | Rn(fn));
-}
-
-
-void Assembler::fadd(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- FPDataProcessing2Source(fd, fn, fm, FADD);
-}
-
-
-void Assembler::fsub(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- FPDataProcessing2Source(fd, fn, fm, FSUB);
-}
-
-
-void Assembler::fmul(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- FPDataProcessing2Source(fd, fn, fm, FMUL);
-}
-
-
-void Assembler::fmadd(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa) {
- FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMADD_s : FMADD_d);
-}
-
-
-void Assembler::fmsub(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa) {
- FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMSUB_s : FMSUB_d);
-}
-
-
-void Assembler::fnmadd(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa) {
- FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMADD_s : FNMADD_d);
-}
-
-
-void Assembler::fnmsub(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa) {
- FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMSUB_s : FNMSUB_d);
-}
-
-
-void Assembler::fdiv(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- FPDataProcessing2Source(fd, fn, fm, FDIV);
-}
-
-
-void Assembler::fmax(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- FPDataProcessing2Source(fd, fn, fm, FMAX);
-}
-
-
-void Assembler::fmaxnm(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- FPDataProcessing2Source(fd, fn, fm, FMAXNM);
-}
-
-
-void Assembler::fmin(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- FPDataProcessing2Source(fd, fn, fm, FMIN);
-}
-
-
-void Assembler::fminnm(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- FPDataProcessing2Source(fd, fn, fm, FMINNM);
-}
-
-
-void Assembler::fabs(const FPRegister& fd,
- const FPRegister& fn) {
- ASSERT(fd.SizeInBits() == fn.SizeInBits());
- FPDataProcessing1Source(fd, fn, FABS);
-}
-
-
-void Assembler::fneg(const FPRegister& fd,
- const FPRegister& fn) {
- ASSERT(fd.SizeInBits() == fn.SizeInBits());
- FPDataProcessing1Source(fd, fn, FNEG);
-}
-
-
-void Assembler::fsqrt(const FPRegister& fd,
- const FPRegister& fn) {
- ASSERT(fd.SizeInBits() == fn.SizeInBits());
- FPDataProcessing1Source(fd, fn, FSQRT);
-}
-
-
-void Assembler::frinta(const FPRegister& fd,
- const FPRegister& fn) {
- ASSERT(fd.SizeInBits() == fn.SizeInBits());
- FPDataProcessing1Source(fd, fn, FRINTA);
-}
-
-
-void Assembler::frintn(const FPRegister& fd,
- const FPRegister& fn) {
- ASSERT(fd.SizeInBits() == fn.SizeInBits());
- FPDataProcessing1Source(fd, fn, FRINTN);
-}
-
-
-void Assembler::frintz(const FPRegister& fd,
- const FPRegister& fn) {
- ASSERT(fd.SizeInBits() == fn.SizeInBits());
- FPDataProcessing1Source(fd, fn, FRINTZ);
-}
-
-
-void Assembler::fcmp(const FPRegister& fn,
- const FPRegister& fm) {
- ASSERT(fn.SizeInBits() == fm.SizeInBits());
- Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn));
-}
-
-
-void Assembler::fcmp(const FPRegister& fn,
- double value) {
- USE(value);
- // Although the fcmp instruction can strictly only take an immediate value of
- // +0.0, we don't need to check for -0.0 because the sign of 0.0 doesn't
- // affect the result of the comparison.
- ASSERT(value == 0.0);
- Emit(FPType(fn) | FCMP_zero | Rn(fn));
-}
-
-
-void Assembler::fccmp(const FPRegister& fn,
- const FPRegister& fm,
- StatusFlags nzcv,
- Condition cond) {
- ASSERT(fn.SizeInBits() == fm.SizeInBits());
- Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv));
-}
-
-
-void Assembler::fcsel(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- Condition cond) {
- ASSERT(fd.SizeInBits() == fn.SizeInBits());
- ASSERT(fd.SizeInBits() == fm.SizeInBits());
- Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd));
-}
-
-
-void Assembler::FPConvertToInt(const Register& rd,
- const FPRegister& fn,
- FPIntegerConvertOp op) {
- Emit(SF(rd) | FPType(fn) | op | Rn(fn) | Rd(rd));
-}
-
-
-void Assembler::fcvt(const FPRegister& fd,
- const FPRegister& fn) {
- if (fd.Is64Bits()) {
- // Convert float to double.
- ASSERT(fn.Is32Bits());
- FPDataProcessing1Source(fd, fn, FCVT_ds);
- } else {
- // Convert double to float.
- ASSERT(fn.Is64Bits());
- FPDataProcessing1Source(fd, fn, FCVT_sd);
- }
-}
-
-
-void Assembler::fcvtau(const Register& rd, const FPRegister& fn) {
- FPConvertToInt(rd, fn, FCVTAU);
-}
-
-
-void Assembler::fcvtas(const Register& rd, const FPRegister& fn) {
- FPConvertToInt(rd, fn, FCVTAS);
-}
-
-
-void Assembler::fcvtmu(const Register& rd, const FPRegister& fn) {
- FPConvertToInt(rd, fn, FCVTMU);
-}
-
-
-void Assembler::fcvtms(const Register& rd, const FPRegister& fn) {
- FPConvertToInt(rd, fn, FCVTMS);
-}
-
-
-void Assembler::fcvtnu(const Register& rd, const FPRegister& fn) {
- FPConvertToInt(rd, fn, FCVTNU);
-}
-
-
-void Assembler::fcvtns(const Register& rd, const FPRegister& fn) {
- FPConvertToInt(rd, fn, FCVTNS);
-}
-
-
-void Assembler::fcvtzu(const Register& rd, const FPRegister& fn) {
- FPConvertToInt(rd, fn, FCVTZU);
-}
-
-
-void Assembler::fcvtzs(const Register& rd, const FPRegister& fn) {
- FPConvertToInt(rd, fn, FCVTZS);
-}
-
-
-void Assembler::scvtf(const FPRegister& fd,
- const Register& rn,
- unsigned fbits) {
- if (fbits == 0) {
- Emit(SF(rn) | FPType(fd) | SCVTF | Rn(rn) | Rd(fd));
- } else {
- Emit(SF(rn) | FPType(fd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
- Rd(fd));
- }
-}
-
-
-void Assembler::ucvtf(const FPRegister& fd,
- const Register& rn,
- unsigned fbits) {
- if (fbits == 0) {
- Emit(SF(rn) | FPType(fd) | UCVTF | Rn(rn) | Rd(fd));
- } else {
- Emit(SF(rn) | FPType(fd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
- Rd(fd));
- }
-}
-
-
-// Note:
-// Below, a difference in case for the same letter indicates a
-// negated bit.
-// If b is 1, then B is 0.
-Instr Assembler::ImmFP32(float imm) {
- ASSERT(IsImmFP32(imm));
- // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
- uint32_t bits = float_to_rawbits(imm);
- // bit7: a000.0000
- uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
- // bit6: 0b00.0000
- uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
- // bit5_to_0: 00cd.efgh
- uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
-
- return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
-}
-
-
-Instr Assembler::ImmFP64(double imm) {
- ASSERT(IsImmFP64(imm));
- // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
- // 0000.0000.0000.0000.0000.0000.0000.0000
- uint64_t bits = double_to_rawbits(imm);
- // bit7: a000.0000
- uint32_t bit7 = ((bits >> 63) & 0x1) << 7;
- // bit6: 0b00.0000
- uint32_t bit6 = ((bits >> 61) & 0x1) << 6;
- // bit5_to_0: 00cd.efgh
- uint32_t bit5_to_0 = (bits >> 48) & 0x3f;
-
- return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
-}
-
-
-// Code generation helpers.
-void Assembler::MoveWide(const Register& rd,
- uint64_t imm,
- int shift,
- MoveWideImmediateOp mov_op) {
- if (shift >= 0) {
- // Explicit shift specified.
- ASSERT((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48));
- ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16));
- shift /= 16;
- } else {
- // Calculate a new immediate and shift combination to encode the immediate
- // argument.
- shift = 0;
- if ((imm & ~0xffffUL) == 0) {
- // Nothing to do.
- } else if ((imm & ~(0xffffUL << 16)) == 0) {
- imm >>= 16;
- shift = 1;
- } else if ((imm & ~(0xffffUL << 32)) == 0) {
- ASSERT(rd.Is64Bits());
- imm >>= 32;
- shift = 2;
- } else if ((imm & ~(0xffffUL << 48)) == 0) {
- ASSERT(rd.Is64Bits());
- imm >>= 48;
- shift = 3;
- }
- }
-
- ASSERT(is_uint16(imm));
-
- Emit(SF(rd) | MoveWideImmediateFixed | mov_op |
- Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift));
-}
-
-
-void Assembler::AddSub(const Register& rd,
- const Register& rn,
- const Operand& operand,
- FlagsUpdate S,
- AddSubOp op) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
- ASSERT(!operand.NeedsRelocation());
- if (operand.IsImmediate()) {
- int64_t immediate = operand.immediate();
- ASSERT(IsImmAddSub(immediate));
- Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
- Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
- ImmAddSub(immediate) | dest_reg | RnSP(rn));
- } else if (operand.IsShiftedRegister()) {
- ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
- ASSERT(operand.shift() != ROR);
-
- // For instructions of the form:
- // add/sub wsp, <Wn>, <Wm> [, LSL #0-3 ]
- // add/sub <Wd>, wsp, <Wm> [, LSL #0-3 ]
- // add/sub wsp, wsp, <Wm> [, LSL #0-3 ]
- // adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ]
- // or their 64-bit register equivalents, convert the operand from shifted to
- // extended register mode, and emit an add/sub extended instruction.
- if (rn.IsSP() || rd.IsSP()) {
- ASSERT(!(rd.IsSP() && (S == SetFlags)));
- DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S,
- AddSubExtendedFixed | op);
- } else {
- DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op);
- }
- } else {
- ASSERT(operand.IsExtendedRegister());
- DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op);
- }
-}
-
-
-void Assembler::AddSubWithCarry(const Register& rd,
- const Register& rn,
- const Operand& operand,
- FlagsUpdate S,
- AddSubWithCarryOp op) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
- ASSERT(rd.SizeInBits() == operand.reg().SizeInBits());
- ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
- ASSERT(!operand.NeedsRelocation());
- Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
-}
-
-
-void Assembler::hlt(int code) {
- ASSERT(is_uint16(code));
- Emit(HLT | ImmException(code));
-}
-
-
-void Assembler::brk(int code) {
- ASSERT(is_uint16(code));
- Emit(BRK | ImmException(code));
-}
-
-
-void Assembler::debug(const char* message, uint32_t code, Instr params) {
-#ifdef USE_SIMULATOR
- // Don't generate simulator specific code if we are building a snapshot, which
- // might be run on real hardware.
- if (!Serializer::enabled()) {
-#ifdef DEBUG
- Serializer::TooLateToEnableNow();
-#endif
- // The arguments to the debug marker need to be contiguous in memory, so
- // make sure we don't try to emit a literal pool.
- BlockConstPoolScope scope(this);
-
- Label start;
- bind(&start);
-
- // Refer to instructions-a64.h for a description of the marker and its
- // arguments.
- hlt(kImmExceptionIsDebug);
- ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugCodeOffset);
- dc32(code);
- ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugParamsOffset);
- dc32(params);
- ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugMessageOffset);
- EmitStringData(message);
- hlt(kImmExceptionIsUnreachable);
-
- return;
- }
- // Fall through if Serializer is enabled.
-#endif
-
- if (params & BREAK) {
- hlt(kImmExceptionIsDebug);
- }
-}
-
-
-void Assembler::Logical(const Register& rd,
- const Register& rn,
- const Operand& operand,
- LogicalOp op) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
- ASSERT(!operand.NeedsRelocation());
- if (operand.IsImmediate()) {
- int64_t immediate = operand.immediate();
- unsigned reg_size = rd.SizeInBits();
-
- ASSERT(immediate != 0);
- ASSERT(immediate != -1);
- ASSERT(rd.Is64Bits() || is_uint32(immediate));
-
- // If the operation is NOT, invert the operation and immediate.
- if ((op & NOT) == NOT) {
- op = static_cast<LogicalOp>(op & ~NOT);
- immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask);
- }
-
- unsigned n, imm_s, imm_r;
- if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
- // Immediate can be encoded in the instruction.
- LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
- } else {
- // This case is handled in the macro assembler.
- UNREACHABLE();
- }
- } else {
- ASSERT(operand.IsShiftedRegister());
- ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
- Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed);
- DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
- }
-}
-
-
-void Assembler::LogicalImmediate(const Register& rd,
- const Register& rn,
- unsigned n,
- unsigned imm_s,
- unsigned imm_r,
- LogicalOp op) {
- unsigned reg_size = rd.SizeInBits();
- Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd);
- Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) |
- ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg |
- Rn(rn));
-}
-
-
-void Assembler::ConditionalCompare(const Register& rn,
- const Operand& operand,
- StatusFlags nzcv,
- Condition cond,
- ConditionalCompareOp op) {
- Instr ccmpop;
- ASSERT(!operand.NeedsRelocation());
- if (operand.IsImmediate()) {
- int64_t immediate = operand.immediate();
- ASSERT(IsImmConditionalCompare(immediate));
- ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate);
- } else {
- ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
- ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg());
- }
- Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
-}
-
-
-void Assembler::DataProcessing1Source(const Register& rd,
- const Register& rn,
- DataProcessing1SourceOp op) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
- Emit(SF(rn) | op | Rn(rn) | Rd(rd));
-}
-
-
-void Assembler::FPDataProcessing1Source(const FPRegister& fd,
- const FPRegister& fn,
- FPDataProcessing1SourceOp op) {
- Emit(FPType(fn) | op | Rn(fn) | Rd(fd));
-}
-
-
-void Assembler::FPDataProcessing2Source(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- FPDataProcessing2SourceOp op) {
- ASSERT(fd.SizeInBits() == fn.SizeInBits());
- ASSERT(fd.SizeInBits() == fm.SizeInBits());
- Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd));
-}
-
-
-void Assembler::FPDataProcessing3Source(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa,
- FPDataProcessing3SourceOp op) {
- ASSERT(AreSameSizeAndType(fd, fn, fm, fa));
- Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa));
-}
-
-
-void Assembler::EmitShift(const Register& rd,
- const Register& rn,
- Shift shift,
- unsigned shift_amount) {
- switch (shift) {
- case LSL:
- lsl(rd, rn, shift_amount);
- break;
- case LSR:
- lsr(rd, rn, shift_amount);
- break;
- case ASR:
- asr(rd, rn, shift_amount);
- break;
- case ROR:
- ror(rd, rn, shift_amount);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void Assembler::EmitExtendShift(const Register& rd,
- const Register& rn,
- Extend extend,
- unsigned left_shift) {
- ASSERT(rd.SizeInBits() >= rn.SizeInBits());
- unsigned reg_size = rd.SizeInBits();
- // Use the correct size of register.
- Register rn_ = Register::Create(rn.code(), rd.SizeInBits());
- // Bits extracted are high_bit:0.
- unsigned high_bit = (8 << (extend & 0x3)) - 1;
- // Number of bits left in the result that are not introduced by the shift.
- unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1);
-
- if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) {
- switch (extend) {
- case UXTB:
- case UXTH:
- case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit); break;
- case SXTB:
- case SXTH:
- case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break;
- case UXTX:
- case SXTX: {
- ASSERT(rn.SizeInBits() == kXRegSize);
- // Nothing to extend. Just shift.
- lsl(rd, rn_, left_shift);
- break;
- }
- default: UNREACHABLE();
- }
- } else {
- // No need to extend as the extended bits would be shifted away.
- lsl(rd, rn_, left_shift);
- }
-}
-
-
-void Assembler::DataProcShiftedRegister(const Register& rd,
- const Register& rn,
- const Operand& operand,
- FlagsUpdate S,
- Instr op) {
- ASSERT(operand.IsShiftedRegister());
- ASSERT(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
- ASSERT(!operand.NeedsRelocation());
- Emit(SF(rd) | op | Flags(S) |
- ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
- Rm(operand.reg()) | Rn(rn) | Rd(rd));
-}
-
-
-void Assembler::DataProcExtendedRegister(const Register& rd,
- const Register& rn,
- const Operand& operand,
- FlagsUpdate S,
- Instr op) {
- ASSERT(!operand.NeedsRelocation());
- Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
- Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) |
- ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) |
- dest_reg | RnSP(rn));
-}
-
-
-bool Assembler::IsImmAddSub(int64_t immediate) {
- return is_uint12(immediate) ||
- (is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0));
-}
-
-void Assembler::LoadStore(const CPURegister& rt,
- const MemOperand& addr,
- LoadStoreOp op) {
- Instr memop = op | Rt(rt) | RnSP(addr.base());
- ptrdiff_t offset = addr.offset();
-
- if (addr.IsImmediateOffset()) {
- LSDataSize size = CalcLSDataSize(op);
- if (IsImmLSScaled(offset, size)) {
- // Use the scaled addressing mode.
- Emit(LoadStoreUnsignedOffsetFixed | memop |
- ImmLSUnsigned(offset >> size));
- } else if (IsImmLSUnscaled(offset)) {
- // Use the unscaled addressing mode.
- Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset));
- } else {
- // This case is handled in the macro assembler.
- UNREACHABLE();
- }
- } else if (addr.IsRegisterOffset()) {
- Extend ext = addr.extend();
- Shift shift = addr.shift();
- unsigned shift_amount = addr.shift_amount();
-
- // LSL is encoded in the option field as UXTX.
- if (shift == LSL) {
- ext = UXTX;
- }
-
- // Shifts are encoded in one bit, indicating a left shift by the memory
- // access size.
- ASSERT((shift_amount == 0) ||
- (shift_amount == static_cast<unsigned>(CalcLSDataSize(op))));
- Emit(LoadStoreRegisterOffsetFixed | memop | Rm(addr.regoffset()) |
- ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0));
- } else {
- // Pre-index and post-index modes.
- ASSERT(!rt.Is(addr.base()));
- if (IsImmLSUnscaled(offset)) {
- if (addr.IsPreIndex()) {
- Emit(LoadStorePreIndexFixed | memop | ImmLS(offset));
- } else {
- ASSERT(addr.IsPostIndex());
- Emit(LoadStorePostIndexFixed | memop | ImmLS(offset));
- }
- } else {
- // This case is handled in the macro assembler.
- UNREACHABLE();
- }
- }
-}
-
-
-bool Assembler::IsImmLSUnscaled(ptrdiff_t offset) {
- return is_int9(offset);
-}
-
-
-bool Assembler::IsImmLSScaled(ptrdiff_t offset, LSDataSize size) {
- bool offset_is_size_multiple = (((offset >> size) << size) == offset);
- return offset_is_size_multiple && is_uint12(offset >> size);
-}
-
-
-void Assembler::LoadLiteral(const CPURegister& rt, int offset_from_pc) {
- ASSERT((offset_from_pc & ((1 << kLiteralEntrySizeLog2) - 1)) == 0);
- // The pattern 'ldr xzr, #offset' is used to indicate the beginning of a
- // constant pool. It should not be emitted.
- ASSERT(!rt.Is(xzr));
- Emit(LDR_x_lit |
- ImmLLiteral(offset_from_pc >> kLiteralEntrySizeLog2) |
- Rt(rt));
-}
-
-
-void Assembler::LoadRelocatedValue(const CPURegister& rt,
- const Operand& operand,
- LoadLiteralOp op) {
- int64_t imm = operand.immediate();
- ASSERT(is_int32(imm) || is_uint32(imm) || (rt.Is64Bits()));
- RecordRelocInfo(operand.rmode(), imm);
- BlockConstPoolFor(1);
- Emit(op | ImmLLiteral(0) | Rt(rt));
-}
-
-
-// Test if a given value can be encoded in the immediate field of a logical
-// instruction.
-// If it can be encoded, the function returns true, and values pointed to by n,
-// imm_s and imm_r are updated with immediates encoded in the format required
-// by the corresponding fields in the logical instruction.
-// If it can not be encoded, the function returns false, and the values pointed
-// to by n, imm_s and imm_r are undefined.
-bool Assembler::IsImmLogical(uint64_t value,
- unsigned width,
- unsigned* n,
- unsigned* imm_s,
- unsigned* imm_r) {
- ASSERT((n != NULL) && (imm_s != NULL) && (imm_r != NULL));
- ASSERT((width == kWRegSize) || (width == kXRegSize));
-
- // Logical immediates are encoded using parameters n, imm_s and imm_r using
- // the following table:
- //
- // N imms immr size S R
- // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
- // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
- // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
- // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
- // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
- // 0 11110s xxxxxr 2 UInt(s) UInt(r)
- // (s bits must not be all set)
- //
- // A pattern is constructed of size bits, where the least significant S+1
- // bits are set. The pattern is rotated right by R, and repeated across a
- // 32 or 64-bit value, depending on destination register width.
- //
- // To test if an arbitary immediate can be encoded using this scheme, an
- // iterative algorithm is used.
- //
- // TODO(mcapewel) This code does not consider using X/W register overlap to
- // support 64-bit immediates where the top 32-bits are zero, and the bottom
- // 32-bits are an encodable logical immediate.
-
- // 1. If the value has all set or all clear bits, it can't be encoded.
- if ((value == 0) || (value == 0xffffffffffffffffUL) ||
- ((width == kWRegSize) && (value == 0xffffffff))) {
- return false;
- }
-
- unsigned lead_zero = CountLeadingZeros(value, width);
- unsigned lead_one = CountLeadingZeros(~value, width);
- unsigned trail_zero = CountTrailingZeros(value, width);
- unsigned trail_one = CountTrailingZeros(~value, width);
- unsigned set_bits = CountSetBits(value, width);
-
- // The fixed bits in the immediate s field.
- // If width == 64 (X reg), start at 0xFFFFFF80.
- // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit
- // widths won't be executed.
- int imm_s_fixed = (width == kXRegSize) ? -128 : -64;
- int imm_s_mask = 0x3F;
-
- for (;;) {
- // 2. If the value is two bits wide, it can be encoded.
- if (width == 2) {
- *n = 0;
- *imm_s = 0x3C;
- *imm_r = (value & 3) - 1;
- return true;
- }
-
- *n = (width == 64) ? 1 : 0;
- *imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask);
- if ((lead_zero + set_bits) == width) {
- *imm_r = 0;
- } else {
- *imm_r = (lead_zero > 0) ? (width - trail_zero) : lead_one;
- }
-
- // 3. If the sum of leading zeros, trailing zeros and set bits is equal to
- // the bit width of the value, it can be encoded.
- if (lead_zero + trail_zero + set_bits == width) {
- return true;
- }
-
- // 4. If the sum of leading ones, trailing ones and unset bits in the
- // value is equal to the bit width of the value, it can be encoded.
- if (lead_one + trail_one + (width - set_bits) == width) {
- return true;
- }
-
- // 5. If the most-significant half of the bitwise value is equal to the
- // least-significant half, return to step 2 using the least-significant
- // half of the value.
- uint64_t mask = (1UL << (width >> 1)) - 1;
- if ((value & mask) == ((value >> (width >> 1)) & mask)) {
- width >>= 1;
- set_bits >>= 1;
- imm_s_fixed >>= 1;
- continue;
- }
-
- // 6. Otherwise, the value can't be encoded.
- return false;
- }
-}
-
-
-bool Assembler::IsImmConditionalCompare(int64_t immediate) {
- return is_uint5(immediate);
-}
-
-
-bool Assembler::IsImmFP32(float imm) {
- // Valid values will have the form:
- // aBbb.bbbc.defg.h000.0000.0000.0000.0000
- uint32_t bits = float_to_rawbits(imm);
- // bits[19..0] are cleared.
- if ((bits & 0x7ffff) != 0) {
- return false;
- }
-
- // bits[29..25] are all set or all cleared.
- uint32_t b_pattern = (bits >> 16) & 0x3e00;
- if (b_pattern != 0 && b_pattern != 0x3e00) {
- return false;
- }
-
- // bit[30] and bit[29] are opposite.
- if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
- return false;
- }
-
- return true;
-}
-
-
-bool Assembler::IsImmFP64(double imm) {
- // Valid values will have the form:
- // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
- // 0000.0000.0000.0000.0000.0000.0000.0000
- uint64_t bits = double_to_rawbits(imm);
- // bits[47..0] are cleared.
- if ((bits & 0xffffffffffffL) != 0) {
- return false;
- }
-
- // bits[61..54] are all set or all cleared.
- uint32_t b_pattern = (bits >> 48) & 0x3fc0;
- if (b_pattern != 0 && b_pattern != 0x3fc0) {
- return false;
- }
-
- // bit[62] and bit[61] are opposite.
- if (((bits ^ (bits << 1)) & 0x4000000000000000L) == 0) {
- return false;
- }
-
- return true;
-}
-
-
-void Assembler::GrowBuffer() {
- if (!own_buffer_) FATAL("external code buffer is too small");
-
- // Compute new buffer size.
- CodeDesc desc; // the new buffer
- if (buffer_size_ < 4 * KB) {
- desc.buffer_size = 4 * KB;
- } else if (buffer_size_ < 1 * MB) {
- desc.buffer_size = 2 * buffer_size_;
- } else {
- desc.buffer_size = buffer_size_ + 1 * MB;
- }
- CHECK_GT(desc.buffer_size, 0); // No overflow.
-
- byte* buffer = reinterpret_cast<byte*>(buffer_);
-
- // Set up new buffer.
- desc.buffer = NewArray<byte>(desc.buffer_size);
-
- desc.instr_size = pc_offset();
- desc.reloc_size = (buffer + buffer_size_) - reloc_info_writer.pos();
-
- // Copy the data.
- intptr_t pc_delta = desc.buffer - buffer;
- intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
- (buffer + buffer_size_);
- memmove(desc.buffer, buffer, desc.instr_size);
- memmove(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.pos(), desc.reloc_size);
-
- // Switch buffers.
- DeleteArray(buffer_);
- buffer_ = desc.buffer;
- buffer_size_ = desc.buffer_size;
- pc_ = reinterpret_cast<byte*>(pc_) + pc_delta;
- reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.last_pc() + pc_delta);
-
- // None of our relocation types are pc relative pointing outside the code
- // buffer nor pc absolute pointing inside the code buffer, so there is no need
- // to relocate any emitted relocation entries.
-
- // Relocate pending relocation entries.
- for (int i = 0; i < num_pending_reloc_info_; i++) {
- RelocInfo& rinfo = pending_reloc_info_[i];
- ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
- rinfo.rmode() != RelocInfo::POSITION);
- if (rinfo.rmode() != RelocInfo::JS_RETURN) {
- rinfo.set_pc(rinfo.pc() + pc_delta);
- }
- }
-}
-
-
-void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- // We do not try to reuse pool constants.
- RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
- if (((rmode >= RelocInfo::JS_RETURN) &&
- (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
- (rmode == RelocInfo::CONST_POOL)) {
- // Adjust code for new modes.
- ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
- || RelocInfo::IsJSReturn(rmode)
- || RelocInfo::IsComment(rmode)
- || RelocInfo::IsPosition(rmode)
- || RelocInfo::IsConstPool(rmode));
- // These modes do not need an entry in the constant pool.
- } else {
- ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
- if (num_pending_reloc_info_ == 0) {
- first_const_pool_use_ = pc_offset();
- }
- pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
- // Make sure the constant pool is not emitted in place of the next
- // instruction for which we just recorded relocation info.
- BlockConstPoolFor(1);
- }
-
- if (!RelocInfo::IsNone(rmode)) {
- // Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif
- if (!Serializer::enabled() && !emit_debug_code()) {
- return;
- }
- }
- ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
- if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(
- reinterpret_cast<byte*>(pc_), rmode, RecordedAstId().ToInt(), NULL);
- ClearRecordedAstId();
- reloc_info_writer.Write(&reloc_info_with_ast_id);
- } else {
- reloc_info_writer.Write(&rinfo);
- }
- }
-}
-
-
-void Assembler::BlockConstPoolFor(int instructions) {
- int pc_limit = pc_offset() + instructions * kInstructionSize;
- if (no_const_pool_before_ < pc_limit) {
- // If there are some pending entries, the constant pool cannot be blocked
- // further than first_const_pool_use_ + kMaxDistToPool
- ASSERT((num_pending_reloc_info_ == 0) ||
- (pc_limit < (first_const_pool_use_ + kMaxDistToPool)));
- no_const_pool_before_ = pc_limit;
- }
-
- if (next_buffer_check_ < no_const_pool_before_) {
- next_buffer_check_ = no_const_pool_before_;
- }
-}
-
-
-void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
- // Some short sequence of instruction mustn't be broken up by constant pool
- // emission, such sequences are protected by calls to BlockConstPoolFor and
- // BlockConstPoolScope.
- if (is_const_pool_blocked()) {
- // Something is wrong if emission is forced and blocked at the same time.
- ASSERT(!force_emit);
- return;
- }
-
- // There is nothing to do if there are no pending constant pool entries.
- if (num_pending_reloc_info_ == 0) {
- // Calculate the offset of the next check.
- next_buffer_check_ = pc_offset() + kCheckPoolInterval;
- return;
- }
-
- // We emit a constant pool when:
- // * requested to do so by parameter force_emit (e.g. after each function).
- // * the distance to the first instruction accessing the constant pool is
- // kAvgDistToPool or more.
- // * no jump is required and the distance to the first instruction accessing
- // the constant pool is at least kMaxDistToPool / 2.
- ASSERT(first_const_pool_use_ >= 0);
- int dist = pc_offset() - first_const_pool_use_;
- if (!force_emit && dist < kAvgDistToPool &&
- (require_jump || (dist < (kMaxDistToPool / 2)))) {
- return;
- }
-
- Label size_check;
- bind(&size_check);
-
- // Check that the code buffer is large enough before emitting the constant
- // pool (include the jump over the pool, the constant pool marker, the
- // constant pool guard, and the gap to the relocation information).
- int jump_instr = require_jump ? kInstructionSize : 0;
- int size_pool_marker = kInstructionSize;
- int size_pool_guard = kInstructionSize;
- int pool_size = jump_instr + size_pool_marker + size_pool_guard +
- num_pending_reloc_info_ * kPointerSize;
- int needed_space = pool_size + kGap;
- while (buffer_space() <= needed_space) {
- GrowBuffer();
- }
-
- {
- // Block recursive calls to CheckConstPool.
- BlockConstPoolScope block_const_pool(this);
- RecordComment("[ Constant Pool");
- RecordConstPool(pool_size);
-
- // Emit jump over constant pool if necessary.
- Label after_pool;
- if (require_jump) {
- b(&after_pool);
- }
-
- // Emit a constant pool header. The header has two goals:
- // 1) Encode the size of the constant pool, for use by the disassembler.
- // 2) Terminate the program, to try to prevent execution from accidentally
- // flowing into the constant pool.
- // The header is therefore made of two a64 instructions:
- // ldr xzr, #<size of the constant pool in 32-bit words>
- // blr xzr
- // If executed the code will likely segfault and lr will point to the
- // beginning of the constant pool.
- // TODO(all): currently each relocated constant is 64 bits, consider adding
- // support for 32-bit entries.
- ConstantPoolMarker(2 * num_pending_reloc_info_);
- ConstantPoolGuard();
-
- // Emit constant pool entries.
- for (int i = 0; i < num_pending_reloc_info_; i++) {
- RelocInfo& rinfo = pending_reloc_info_[i];
- ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
- rinfo.rmode() != RelocInfo::POSITION &&
- rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
- rinfo.rmode() != RelocInfo::CONST_POOL);
-
- Instruction* instr = reinterpret_cast<Instruction*>(rinfo.pc());
- // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
- ASSERT(instr->IsLdrLiteral() &&
- instr->ImmLLiteral() == 0);
-
- instr->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
- dc64(rinfo.data());
- }
-
- num_pending_reloc_info_ = 0;
- first_const_pool_use_ = -1;
-
- RecordComment("]");
-
- if (after_pool.is_linked()) {
- bind(&after_pool);
- }
- }
-
- // Since a constant pool was just emitted, move the check offset forward by
- // the standard interval.
- next_buffer_check_ = pc_offset() + kCheckPoolInterval;
-
- ASSERT(SizeOfCodeGeneratedSince(&size_check) ==
- static_cast<unsigned>(pool_size));
-}
-
-
-void Assembler::RecordComment(const char* msg) {
- if (FLAG_code_comments) {
- CheckBuffer();
- RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
- }
-}
-
-
-int Assembler::buffer_space() const {
- return reloc_info_writer.pos() - reinterpret_cast<byte*>(pc_);
-}
-
-
-void Assembler::RecordJSReturn() {
- positions_recorder()->WriteRecordedPositions();
- CheckBuffer();
- RecordRelocInfo(RelocInfo::JS_RETURN);
-}
-
-
-void Assembler::RecordDebugBreakSlot() {
- positions_recorder()->WriteRecordedPositions();
- CheckBuffer();
- RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
-}
-
-
-void Assembler::RecordConstPool(int size) {
- // We only need this for debugger support, to correctly compute offsets in the
- // code.
-#ifdef ENABLE_DEBUGGER_SUPPORT
- RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
-#endif
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/assembler-a64.h b/deps/v8/src/a64/assembler-a64.h
deleted file mode 100644
index a2c93df2ae..0000000000
--- a/deps/v8/src/a64/assembler-a64.h
+++ /dev/null
@@ -1,2085 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_A64_ASSEMBLER_A64_H_
-#define V8_A64_ASSEMBLER_A64_H_
-
-#include <list>
-#include <map>
-
-#include "globals.h"
-#include "utils.h"
-#include "assembler.h"
-#include "serialize.h"
-#include "a64/instructions-a64.h"
-#include "a64/cpu-a64.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-// -----------------------------------------------------------------------------
-// Registers.
-#define REGISTER_CODE_LIST(R) \
-R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
-R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
-R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
-R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
-
-
-static const int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
-
-
-// Some CPURegister methods can return Register and FPRegister types, so we
-// need to declare them in advance.
-struct Register;
-struct FPRegister;
-
-
-struct CPURegister {
- enum RegisterType {
- // The kInvalid value is used to detect uninitialized static instances,
- // which are always zero-initialized before any constructors are called.
- kInvalid = 0,
- kRegister,
- kFPRegister,
- kNoRegister
- };
-
- static CPURegister Create(unsigned code, unsigned size, RegisterType type) {
- CPURegister r = {code, size, type};
- return r;
- }
-
- unsigned code() const;
- RegisterType type() const;
- RegList Bit() const;
- unsigned SizeInBits() const;
- int SizeInBytes() const;
- bool Is32Bits() const;
- bool Is64Bits() const;
- bool IsValid() const;
- bool IsValidOrNone() const;
- bool IsValidRegister() const;
- bool IsValidFPRegister() const;
- bool IsNone() const;
- bool Is(const CPURegister& other) const;
-
- bool IsZero() const;
- bool IsSP() const;
-
- bool IsRegister() const;
- bool IsFPRegister() const;
-
- Register X() const;
- Register W() const;
- FPRegister D() const;
- FPRegister S() const;
-
- bool IsSameSizeAndType(const CPURegister& other) const;
-
- // V8 compatibility.
- bool is(const CPURegister& other) const { return Is(other); }
- bool is_valid() const { return IsValid(); }
-
- unsigned reg_code;
- unsigned reg_size;
- RegisterType reg_type;
-};
-
-
-struct Register : public CPURegister {
- static Register Create(unsigned code, unsigned size) {
- return CPURegister::Create(code, size, CPURegister::kRegister);
- }
-
- Register() {
- reg_code = 0;
- reg_size = 0;
- reg_type = CPURegister::kNoRegister;
- }
-
- Register(const CPURegister& r) { // NOLINT(runtime/explicit)
- reg_code = r.reg_code;
- reg_size = r.reg_size;
- reg_type = r.reg_type;
- ASSERT(IsValidOrNone());
- }
-
- bool IsValid() const {
- ASSERT(IsRegister() || IsNone());
- return IsValidRegister();
- }
-
- static Register XRegFromCode(unsigned code);
- static Register WRegFromCode(unsigned code);
-
- // Start of V8 compatibility section ---------------------
- // These memebers are necessary for compilation.
- // A few of them may be unused for now.
-
- static const int kNumRegisters = kNumberOfRegisters;
- static int NumRegisters() { return kNumRegisters; }
-
- // We allow crankshaft to use the following registers:
- // - x0 to x15
- // - x18 to x24
- // - x27 (also context)
- //
- // TODO(all): Register x25 is currently free and could be available for
- // crankshaft, but we don't use it as we might use it as a per function
- // literal pool pointer in the future.
- //
- // TODO(all): Consider storing cp in x25 to have only two ranges.
- // We split allocatable registers in three ranges called
- // - "low range"
- // - "high range"
- // - "context"
- static const unsigned kAllocatableLowRangeBegin = 0;
- static const unsigned kAllocatableLowRangeEnd = 15;
- static const unsigned kAllocatableHighRangeBegin = 18;
- static const unsigned kAllocatableHighRangeEnd = 24;
- static const unsigned kAllocatableContext = 27;
-
- // Gap between low and high ranges.
- static const int kAllocatableRangeGapSize =
- (kAllocatableHighRangeBegin - kAllocatableLowRangeEnd) - 1;
-
- static const int kMaxNumAllocatableRegisters =
- (kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1) +
- (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1) + 1; // cp
- static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
-
- // Return true if the register is one that crankshaft can allocate.
- bool IsAllocatable() const {
- return ((reg_code == kAllocatableContext) ||
- (reg_code <= kAllocatableLowRangeEnd) ||
- ((reg_code >= kAllocatableHighRangeBegin) &&
- (reg_code <= kAllocatableHighRangeEnd)));
- }
-
- static Register FromAllocationIndex(unsigned index) {
- ASSERT(index < static_cast<unsigned>(NumAllocatableRegisters()));
- // cp is the last allocatable register.
- if (index == (static_cast<unsigned>(NumAllocatableRegisters() - 1))) {
- return from_code(kAllocatableContext);
- }
-
- // Handle low and high ranges.
- return (index <= kAllocatableLowRangeEnd)
- ? from_code(index)
- : from_code(index + kAllocatableRangeGapSize);
- }
-
- static const char* AllocationIndexToString(int index) {
- ASSERT((index >= 0) && (index < NumAllocatableRegisters()));
- ASSERT((kAllocatableLowRangeBegin == 0) &&
- (kAllocatableLowRangeEnd == 15) &&
- (kAllocatableHighRangeBegin == 18) &&
- (kAllocatableHighRangeEnd == 24) &&
- (kAllocatableContext == 27));
- const char* const names[] = {
- "x0", "x1", "x2", "x3", "x4",
- "x5", "x6", "x7", "x8", "x9",
- "x10", "x11", "x12", "x13", "x14",
- "x15", "x18", "x19", "x20", "x21",
- "x22", "x23", "x24", "x27",
- };
- return names[index];
- }
-
- static int ToAllocationIndex(Register reg) {
- ASSERT(reg.IsAllocatable());
- unsigned code = reg.code();
- if (code == kAllocatableContext) {
- return NumAllocatableRegisters() - 1;
- }
-
- return (code <= kAllocatableLowRangeEnd)
- ? code
- : code - kAllocatableRangeGapSize;
- }
-
- static Register from_code(int code) {
- // Always return an X register.
- return Register::Create(code, kXRegSize);
- }
-
- // End of V8 compatibility section -----------------------
-};
-
-
-struct FPRegister : public CPURegister {
- static FPRegister Create(unsigned code, unsigned size) {
- return CPURegister::Create(code, size, CPURegister::kFPRegister);
- }
-
- FPRegister() {
- reg_code = 0;
- reg_size = 0;
- reg_type = CPURegister::kNoRegister;
- }
-
- FPRegister(const CPURegister& r) { // NOLINT(runtime/explicit)
- reg_code = r.reg_code;
- reg_size = r.reg_size;
- reg_type = r.reg_type;
- ASSERT(IsValidOrNone());
- }
-
- bool IsValid() const {
- ASSERT(IsFPRegister() || IsNone());
- return IsValidFPRegister();
- }
-
- static FPRegister SRegFromCode(unsigned code);
- static FPRegister DRegFromCode(unsigned code);
-
- // Start of V8 compatibility section ---------------------
- static const int kMaxNumRegisters = kNumberOfFPRegisters;
-
- // Crankshaft can use all the FP registers except:
- // - d29 which is used in crankshaft as a double scratch register
- // - d30 which is used to keep the 0 double value
- // - d31 which is used in the MacroAssembler as a double scratch register
- static const int kNumReservedRegisters = 3;
- static const int kMaxNumAllocatableRegisters =
- kNumberOfFPRegisters - kNumReservedRegisters;
- static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
- static const RegList kAllocatableFPRegisters =
- (1 << kMaxNumAllocatableRegisters) - 1;
-
- static FPRegister FromAllocationIndex(int index) {
- ASSERT((index >= 0) && (index < NumAllocatableRegisters()));
- return from_code(index);
- }
-
- static const char* AllocationIndexToString(int index) {
- ASSERT((index >= 0) && (index < NumAllocatableRegisters()));
- const char* const names[] = {
- "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
- "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
- "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
- "d24", "d25", "d26", "d27", "d28",
- };
- return names[index];
- }
-
- static int ToAllocationIndex(FPRegister reg) {
- int code = reg.code();
- ASSERT(code < NumAllocatableRegisters());
- return code;
- }
-
- static FPRegister from_code(int code) {
- // Always return a D register.
- return FPRegister::Create(code, kDRegSize);
- }
- // End of V8 compatibility section -----------------------
-};
-
-
-STATIC_ASSERT(sizeof(CPURegister) == sizeof(Register));
-STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister));
-
-
-#if defined(A64_DEFINE_REG_STATICS)
-#define INITIALIZE_REGISTER(register_class, name, code, size, type) \
- const CPURegister init_##register_class##_##name = {code, size, type}; \
- const register_class& name = *reinterpret_cast<const register_class*>( \
- &init_##register_class##_##name)
-#define ALIAS_REGISTER(register_class, alias, name) \
- const register_class& alias = *reinterpret_cast<const register_class*>( \
- &init_##register_class##_##name)
-#else
-#define INITIALIZE_REGISTER(register_class, name, code, size, type) \
- extern const register_class& name
-#define ALIAS_REGISTER(register_class, alias, name) \
- extern const register_class& alias
-#endif // defined(A64_DEFINE_REG_STATICS)
-
-// No*Reg is used to indicate an unused argument, or an error case. Note that
-// these all compare equal (using the Is() method). The Register and FPRegister
-// variants are provided for convenience.
-INITIALIZE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister);
-INITIALIZE_REGISTER(FPRegister, NoFPReg, 0, 0, CPURegister::kNoRegister);
-INITIALIZE_REGISTER(CPURegister, NoCPUReg, 0, 0, CPURegister::kNoRegister);
-
-// v8 compatibility.
-INITIALIZE_REGISTER(Register, no_reg, 0, 0, CPURegister::kNoRegister);
-
-#define DEFINE_REGISTERS(N) \
- INITIALIZE_REGISTER(Register, w##N, N, kWRegSize, CPURegister::kRegister); \
- INITIALIZE_REGISTER(Register, x##N, N, kXRegSize, CPURegister::kRegister);
-REGISTER_CODE_LIST(DEFINE_REGISTERS)
-#undef DEFINE_REGISTERS
-
-INITIALIZE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSize,
- CPURegister::kRegister);
-INITIALIZE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSize,
- CPURegister::kRegister);
-
-#define DEFINE_FPREGISTERS(N) \
- INITIALIZE_REGISTER(FPRegister, s##N, N, kSRegSize, \
- CPURegister::kFPRegister); \
- INITIALIZE_REGISTER(FPRegister, d##N, N, kDRegSize, CPURegister::kFPRegister);
-REGISTER_CODE_LIST(DEFINE_FPREGISTERS)
-#undef DEFINE_FPREGISTERS
-
-#undef INITIALIZE_REGISTER
-
-// Registers aliases.
-ALIAS_REGISTER(Register, ip0, x16);
-ALIAS_REGISTER(Register, ip1, x17);
-ALIAS_REGISTER(Register, wip0, w16);
-ALIAS_REGISTER(Register, wip1, w17);
-// Root register.
-ALIAS_REGISTER(Register, root, x26);
-ALIAS_REGISTER(Register, rr, x26);
-// Context pointer register.
-ALIAS_REGISTER(Register, cp, x27);
-// We use a register as a JS stack pointer to overcome the restriction on the
-// architectural SP alignment.
-// We chose x28 because it is contiguous with the other specific purpose
-// registers.
-STATIC_ASSERT(kJSSPCode == 28);
-ALIAS_REGISTER(Register, jssp, x28);
-ALIAS_REGISTER(Register, wjssp, w28);
-ALIAS_REGISTER(Register, fp, x29);
-ALIAS_REGISTER(Register, lr, x30);
-ALIAS_REGISTER(Register, xzr, x31);
-ALIAS_REGISTER(Register, wzr, w31);
-
-// Crankshaft double scratch register.
-ALIAS_REGISTER(FPRegister, crankshaft_fp_scratch, d29);
-// Keeps the 0 double value.
-ALIAS_REGISTER(FPRegister, fp_zero, d30);
-// MacroAssembler double scratch register.
-ALIAS_REGISTER(FPRegister, fp_scratch, d31);
-
-#undef ALIAS_REGISTER
-
-
-Register GetAllocatableRegisterThatIsNotOneOf(Register reg1,
- Register reg2 = NoReg,
- Register reg3 = NoReg,
- Register reg4 = NoReg);
-
-
-// AreAliased returns true if any of the named registers overlap. Arguments set
-// to NoReg are ignored. The system stack pointer may be specified.
-bool AreAliased(const CPURegister& reg1,
- const CPURegister& reg2,
- const CPURegister& reg3 = NoReg,
- const CPURegister& reg4 = NoReg,
- const CPURegister& reg5 = NoReg,
- const CPURegister& reg6 = NoReg,
- const CPURegister& reg7 = NoReg,
- const CPURegister& reg8 = NoReg);
-
-// AreSameSizeAndType returns true if all of the specified registers have the
-// same size, and are of the same type. The system stack pointer may be
-// specified. Arguments set to NoReg are ignored, as are any subsequent
-// arguments. At least one argument (reg1) must be valid (not NoCPUReg).
-bool AreSameSizeAndType(const CPURegister& reg1,
- const CPURegister& reg2,
- const CPURegister& reg3 = NoCPUReg,
- const CPURegister& reg4 = NoCPUReg,
- const CPURegister& reg5 = NoCPUReg,
- const CPURegister& reg6 = NoCPUReg,
- const CPURegister& reg7 = NoCPUReg,
- const CPURegister& reg8 = NoCPUReg);
-
-
-typedef FPRegister DoubleRegister;
-
-
-// -----------------------------------------------------------------------------
-// Lists of registers.
-class CPURegList {
- public:
- explicit CPURegList(CPURegister reg1,
- CPURegister reg2 = NoCPUReg,
- CPURegister reg3 = NoCPUReg,
- CPURegister reg4 = NoCPUReg)
- : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
- size_(reg1.SizeInBits()), type_(reg1.type()) {
- ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
- ASSERT(IsValid());
- }
-
- CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
- : list_(list), size_(size), type_(type) {
- ASSERT(IsValid());
- }
-
- CPURegList(CPURegister::RegisterType type, unsigned size,
- unsigned first_reg, unsigned last_reg)
- : size_(size), type_(type) {
- ASSERT(((type == CPURegister::kRegister) &&
- (last_reg < kNumberOfRegisters)) ||
- ((type == CPURegister::kFPRegister) &&
- (last_reg < kNumberOfFPRegisters)));
- ASSERT(last_reg >= first_reg);
- list_ = (1UL << (last_reg + 1)) - 1;
- list_ &= ~((1UL << first_reg) - 1);
- ASSERT(IsValid());
- }
-
- CPURegister::RegisterType type() const {
- ASSERT(IsValid());
- return type_;
- }
-
- RegList list() const {
- ASSERT(IsValid());
- return list_;
- }
-
- // Combine another CPURegList into this one. Registers that already exist in
- // this list are left unchanged. The type and size of the registers in the
- // 'other' list must match those in this list.
- void Combine(const CPURegList& other);
-
- // Remove every register in the other CPURegList from this one. Registers that
- // do not exist in this list are ignored. The type and size of the registers
- // in the 'other' list must match those in this list.
- void Remove(const CPURegList& other);
-
- // Variants of Combine and Remove which take a single register.
- void Combine(const CPURegister& other);
- void Remove(const CPURegister& other);
-
- // Variants of Combine and Remove which take a single register by its code;
- // the type and size of the register is inferred from this list.
- void Combine(int code);
- void Remove(int code);
-
- // Remove all callee-saved registers from the list. This can be useful when
- // preparing registers for an AAPCS64 function call, for example.
- void RemoveCalleeSaved();
-
- CPURegister PopLowestIndex();
- CPURegister PopHighestIndex();
-
- // AAPCS64 callee-saved registers.
- static CPURegList GetCalleeSaved(unsigned size = kXRegSize);
- static CPURegList GetCalleeSavedFP(unsigned size = kDRegSize);
-
- // AAPCS64 caller-saved registers. Note that this includes lr.
- static CPURegList GetCallerSaved(unsigned size = kXRegSize);
- static CPURegList GetCallerSavedFP(unsigned size = kDRegSize);
-
- // Registers saved as safepoints.
- static CPURegList GetSafepointSavedRegisters();
-
- bool IsEmpty() const {
- ASSERT(IsValid());
- return list_ == 0;
- }
-
- bool IncludesAliasOf(const CPURegister& other) const {
- ASSERT(IsValid());
- return (type_ == other.type()) && (other.Bit() & list_);
- }
-
- int Count() const {
- ASSERT(IsValid());
- return CountSetBits(list_, kRegListSizeInBits);
- }
-
- unsigned RegisterSizeInBits() const {
- ASSERT(IsValid());
- return size_;
- }
-
- unsigned RegisterSizeInBytes() const {
- int size_in_bits = RegisterSizeInBits();
- ASSERT((size_in_bits % kBitsPerByte) == 0);
- return size_in_bits / kBitsPerByte;
- }
-
- private:
- RegList list_;
- unsigned size_;
- CPURegister::RegisterType type_;
-
- bool IsValid() const {
- if ((type_ == CPURegister::kRegister) ||
- (type_ == CPURegister::kFPRegister)) {
- bool is_valid = true;
- // Try to create a CPURegister for each element in the list.
- for (int i = 0; i < kRegListSizeInBits; i++) {
- if (((list_ >> i) & 1) != 0) {
- is_valid &= CPURegister::Create(i, size_, type_).IsValid();
- }
- }
- return is_valid;
- } else if (type_ == CPURegister::kNoRegister) {
- // The kNoRegister type is valid only for empty lists.
- // We can't use IsEmpty here because that asserts IsValid().
- return list_ == 0;
- } else {
- return false;
- }
- }
-};
-
-
-// AAPCS64 callee-saved registers.
-#define kCalleeSaved CPURegList::GetCalleeSaved()
-#define kCalleeSavedFP CPURegList::GetCalleeSavedFP()
-
-
-// AAPCS64 caller-saved registers. Note that this includes lr.
-#define kCallerSaved CPURegList::GetCallerSaved()
-#define kCallerSavedFP CPURegList::GetCallerSavedFP()
-
-
-// -----------------------------------------------------------------------------
-// Operands.
-const int kSmiShift = kSmiTagSize + kSmiShiftSize;
-const uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
-
-// Represents an operand in a machine instruction.
-class Operand {
- // TODO(all): If necessary, study more in details which methods
- // TODO(all): should be inlined or not.
- public:
- // rm, {<shift> {#<shift_amount>}}
- // where <shift> is one of {LSL, LSR, ASR, ROR}.
- // <shift_amount> is uint6_t.
- // This is allowed to be an implicit constructor because Operand is
- // a wrapper class that doesn't normally perform any type conversion.
- inline Operand(Register reg,
- Shift shift = LSL,
- unsigned shift_amount = 0); // NOLINT(runtime/explicit)
-
- // rm, <extend> {#<shift_amount>}
- // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
- // <shift_amount> is uint2_t.
- inline Operand(Register reg,
- Extend extend,
- unsigned shift_amount = 0);
-
- template<typename T>
- inline explicit Operand(Handle<T> handle);
-
- // Implicit constructor for all int types, ExternalReference, and Smi.
- template<typename T>
- inline Operand(T t); // NOLINT(runtime/explicit)
-
- // Implicit constructor for int types.
- template<typename int_t>
- inline Operand(int_t t, RelocInfo::Mode rmode);
-
- inline bool IsImmediate() const;
- inline bool IsShiftedRegister() const;
- inline bool IsExtendedRegister() const;
- inline bool IsZero() const;
-
- // This returns an LSL shift (<= 4) operand as an equivalent extend operand,
- // which helps in the encoding of instructions that use the stack pointer.
- inline Operand ToExtendedRegister() const;
-
- inline int64_t immediate() const;
- inline Register reg() const;
- inline Shift shift() const;
- inline Extend extend() const;
- inline unsigned shift_amount() const;
-
- // Relocation information.
- RelocInfo::Mode rmode() const { return rmode_; }
- void set_rmode(RelocInfo::Mode rmode) { rmode_ = rmode; }
- bool NeedsRelocation() const;
-
- // Helpers
- inline static Operand UntagSmi(Register smi);
- inline static Operand UntagSmiAndScale(Register smi, int scale);
-
- private:
- void initialize_handle(Handle<Object> value);
- int64_t immediate_;
- Register reg_;
- Shift shift_;
- Extend extend_;
- unsigned shift_amount_;
- RelocInfo::Mode rmode_;
-};
-
-
-// MemOperand represents a memory operand in a load or store instruction.
-class MemOperand {
- public:
- inline explicit MemOperand(Register base,
- ptrdiff_t offset = 0,
- AddrMode addrmode = Offset);
- inline explicit MemOperand(Register base,
- Register regoffset,
- Shift shift = LSL,
- unsigned shift_amount = 0);
- inline explicit MemOperand(Register base,
- Register regoffset,
- Extend extend,
- unsigned shift_amount = 0);
- inline explicit MemOperand(Register base,
- const Operand& offset,
- AddrMode addrmode = Offset);
-
- const Register& base() const { return base_; }
- const Register& regoffset() const { return regoffset_; }
- ptrdiff_t offset() const { return offset_; }
- AddrMode addrmode() const { return addrmode_; }
- Shift shift() const { return shift_; }
- Extend extend() const { return extend_; }
- unsigned shift_amount() const { return shift_amount_; }
- inline bool IsImmediateOffset() const;
- inline bool IsRegisterOffset() const;
- inline bool IsPreIndex() const;
- inline bool IsPostIndex() const;
-
- // For offset modes, return the offset as an Operand. This helper cannot
- // handle indexed modes.
- inline Operand OffsetAsOperand() const;
-
- private:
- Register base_;
- Register regoffset_;
- ptrdiff_t offset_;
- AddrMode addrmode_;
- Shift shift_;
- Extend extend_;
- unsigned shift_amount_;
-};
-
-
-// -----------------------------------------------------------------------------
-// Assembler.
-
-class Assembler : public AssemblerBase {
- public:
- // Create an assembler. Instructions and relocation information are emitted
- // into a buffer, with the instructions starting from the beginning and the
- // relocation information starting from the end of the buffer. See CodeDesc
- // for a detailed comment on the layout (globals.h).
- //
- // If the provided buffer is NULL, the assembler allocates and grows its own
- // buffer, and buffer_size determines the initial buffer size. The buffer is
- // owned by the assembler and deallocated upon destruction of the assembler.
- //
- // If the provided buffer is not NULL, the assembler uses the provided buffer
- // for code generation and assumes its size to be buffer_size. If the buffer
- // is too small, a fatal error occurs. No deallocation of the buffer is done
- // upon destruction of the assembler.
- Assembler(Isolate* arg_isolate, void* buffer, int buffer_size);
-
- virtual ~Assembler();
-
- // System functions ---------------------------------------------------------
- // Start generating code from the beginning of the buffer, discarding any code
- // and data that has already been emitted into the buffer.
- //
- // In order to avoid any accidental transfer of state, Reset ASSERTs that the
- // constant pool is not blocked.
- void Reset();
-
- // GetCode emits any pending (non-emitted) code and fills the descriptor
- // desc. GetCode() is idempotent; it returns the same result if no other
- // Assembler functions are invoked in between GetCode() calls.
- //
- // The descriptor (desc) can be NULL. In that case, the code is finalized as
- // usual, but the descriptor is not populated.
- void GetCode(CodeDesc* desc);
-
- // Insert the smallest number of nop instructions
- // possible to align the pc offset to a multiple
- // of m. m must be a power of 2 (>= 4).
- void Align(int m);
-
- inline void Unreachable();
-
- // Label --------------------------------------------------------------------
- // Bind a label to the current pc. Note that labels can only be bound once,
- // and if labels are linked to other instructions, they _must_ be bound
- // before they go out of scope.
- void bind(Label* label);
-
-
- // RelocInfo and constant pool ----------------------------------------------
-
- // Record relocation information for current pc_.
- void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
-
- // Return the address in the constant pool of the code target address used by
- // the branch/call instruction at pc.
- inline static Address target_pointer_address_at(Address pc);
-
- // Read/Modify the code target address in the branch/call instruction at pc.
- inline static Address target_address_at(Address pc);
- inline static void set_target_address_at(Address pc, Address target);
-
- // Return the code target address at a call site from the return address of
- // that call in the instruction stream.
- inline static Address target_address_from_return_address(Address pc);
-
- // Given the address of the beginning of a call, return the address in the
- // instruction stream that call will return from.
- inline static Address return_address_from_call_start(Address pc);
-
- // This sets the branch destination (which is in the constant pool on ARM).
- // This is for calls and branches within generated code.
- inline static void deserialization_set_special_target_at(
- Address constant_pool_entry, Address target);
-
- // All addresses in the constant pool are the same size as pointers.
- static const int kSpecialTargetSize = kPointerSize;
-
- // The sizes of the call sequences emitted by MacroAssembler::Call.
- // Wherever possible, use MacroAssembler::CallSize instead of these constants,
- // as it will choose the correct value for a given relocation mode.
- //
- // Without relocation:
- // movz ip0, #(target & 0x000000000000ffff)
- // movk ip0, #(target & 0x00000000ffff0000)
- // movk ip0, #(target & 0x0000ffff00000000)
- // movk ip0, #(target & 0xffff000000000000)
- // blr ip0
- //
- // With relocation:
- // ldr ip0, =target
- // blr ip0
- static const int kCallSizeWithoutRelocation = 5 * kInstructionSize;
- static const int kCallSizeWithRelocation = 2 * kInstructionSize;
-
- // Size of the generated code in bytes
- uint64_t SizeOfGeneratedCode() const {
- ASSERT((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
- return pc_ - buffer_;
- }
-
- // Return the code size generated from label to the current position.
- uint64_t SizeOfCodeGeneratedSince(const Label* label) {
- ASSERT(label->is_bound());
- ASSERT(pc_offset() >= label->pos());
- ASSERT(pc_offset() < buffer_size_);
- return pc_offset() - label->pos();
- }
-
- // Check the size of the code generated since the given label. This function
- // is used primarily to work around comparisons between signed and unsigned
- // quantities, since V8 uses both.
- // TODO(jbramley): Work out what sign to use for these things and if possible,
- // change things to be consistent.
- void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) {
- ASSERT(size >= 0);
- ASSERT(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
- }
-
- // Return the number of instructions generated from label to the
- // current position.
- int InstructionsGeneratedSince(const Label* label) {
- return SizeOfCodeGeneratedSince(label) / kInstructionSize;
- }
-
- // TODO(all): Initialize these constants related with code patching.
- // TODO(all): Set to -1 to hopefully crash if mistakenly used.
-
- // Number of instructions generated for the return sequence in
- // FullCodeGenerator::EmitReturnSequence.
- static const int kJSRetSequenceInstructions = 7;
- // Distance between start of patched return sequence and the emitted address
- // to jump to.
- static const int kPatchReturnSequenceAddressOffset = 0;
- static const int kPatchDebugBreakSlotAddressOffset = 0;
-
- // Number of instructions necessary to be able to later patch it to a call.
- // See Debug::GenerateSlot() and BreakLocationIterator::SetDebugBreakAtSlot().
- static const int kDebugBreakSlotInstructions = 4;
- static const int kDebugBreakSlotLength =
- kDebugBreakSlotInstructions * kInstructionSize;
-
- static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstructionSize;
-
- // Prevent contant pool emission until EndBlockConstPool is called.
- // Call to this function can be nested but must be followed by an equal
- // number of call to EndBlockConstpool.
- void StartBlockConstPool();
-
- // Resume constant pool emission. Need to be called as many time as
- // StartBlockConstPool to have an effect.
- void EndBlockConstPool();
-
- bool is_const_pool_blocked() const;
- static bool IsConstantPoolAt(Instruction* instr);
- static int ConstantPoolSizeAt(Instruction* instr);
- // See Assembler::CheckConstPool for more info.
- void ConstantPoolMarker(uint32_t size);
- void ConstantPoolGuard();
-
-
- // Debugging ----------------------------------------------------------------
- PositionsRecorder* positions_recorder() { return &positions_recorder_; }
- void RecordComment(const char* msg);
- int buffer_space() const;
-
- // Mark address of the ExitJSFrame code.
- void RecordJSReturn();
-
- // Mark address of a debug break slot.
- void RecordDebugBreakSlot();
-
- // Record the emission of a constant pool.
- //
- // The emission of constant pool depends on the size of the code generated and
- // the number of RelocInfo recorded.
- // The Debug mechanism needs to map code offsets between two versions of a
- // function, compiled with and without debugger support (see for example
- // Debug::PrepareForBreakPoints()).
- // Compiling functions with debugger support generates additional code
- // (Debug::GenerateSlot()). This may affect the emission of the constant
- // pools and cause the version of the code with debugger support to have
- // constant pools generated in different places.
- // Recording the position and size of emitted constant pools allows to
- // correctly compute the offset mappings between the different versions of a
- // function in all situations.
- //
- // The parameter indicates the size of the constant pool (in bytes), including
- // the marker and branch over the data.
- void RecordConstPool(int size);
-
-
- // Instruction set functions ------------------------------------------------
-
- // Branch / Jump instructions.
- // For branches offsets are scaled, i.e. they in instrcutions not in bytes.
- // Branch to register.
- void br(const Register& xn);
-
- // Branch-link to register.
- void blr(const Register& xn);
-
- // Branch to register with return hint.
- void ret(const Register& xn = lr);
-
- // Unconditional branch to label.
- void b(Label* label);
-
- // Conditional branch to label.
- void b(Label* label, Condition cond);
-
- // Unconditional branch to PC offset.
- void b(int imm26);
-
- // Conditional branch to PC offset.
- void b(int imm19, Condition cond);
-
- // Branch-link to label / pc offset.
- void bl(Label* label);
- void bl(int imm26);
-
- // Compare and branch to label / pc offset if zero.
- void cbz(const Register& rt, Label* label);
- void cbz(const Register& rt, int imm19);
-
- // Compare and branch to label / pc offset if not zero.
- void cbnz(const Register& rt, Label* label);
- void cbnz(const Register& rt, int imm19);
-
- // Test bit and branch to label / pc offset if zero.
- void tbz(const Register& rt, unsigned bit_pos, Label* label);
- void tbz(const Register& rt, unsigned bit_pos, int imm14);
-
- // Test bit and branch to label / pc offset if not zero.
- void tbnz(const Register& rt, unsigned bit_pos, Label* label);
- void tbnz(const Register& rt, unsigned bit_pos, int imm14);
-
- // Address calculation instructions.
- // Calculate a PC-relative address. Unlike for branches the offset in adr is
- // unscaled (i.e. the result can be unaligned).
- void adr(const Register& rd, Label* label);
- void adr(const Register& rd, int imm21);
-
- // Data Processing instructions.
- // Add.
- void add(const Register& rd,
- const Register& rn,
- const Operand& operand);
-
- // Add and update status flags.
- void adds(const Register& rd,
- const Register& rn,
- const Operand& operand);
-
- // Compare negative.
- void cmn(const Register& rn, const Operand& operand);
-
- // Subtract.
- void sub(const Register& rd,
- const Register& rn,
- const Operand& operand);
-
- // Subtract and update status flags.
- void subs(const Register& rd,
- const Register& rn,
- const Operand& operand);
-
- // Compare.
- void cmp(const Register& rn, const Operand& operand);
-
- // Negate.
- void neg(const Register& rd,
- const Operand& operand);
-
- // Negate and update status flags.
- void negs(const Register& rd,
- const Operand& operand);
-
- // Add with carry bit.
- void adc(const Register& rd,
- const Register& rn,
- const Operand& operand);
-
- // Add with carry bit and update status flags.
- void adcs(const Register& rd,
- const Register& rn,
- const Operand& operand);
-
- // Subtract with carry bit.
- void sbc(const Register& rd,
- const Register& rn,
- const Operand& operand);
-
- // Subtract with carry bit and update status flags.
- void sbcs(const Register& rd,
- const Register& rn,
- const Operand& operand);
-
- // Negate with carry bit.
- void ngc(const Register& rd,
- const Operand& operand);
-
- // Negate with carry bit and update status flags.
- void ngcs(const Register& rd,
- const Operand& operand);
-
- // Logical instructions.
- // Bitwise and (A & B).
- void and_(const Register& rd,
- const Register& rn,
- const Operand& operand);
-
- // Bitwise and (A & B) and update status flags.
- void ands(const Register& rd,
- const Register& rn,
- const Operand& operand);
-
- // Bit test, and set flags.
- void tst(const Register& rn, const Operand& operand);
-
- // Bit clear (A & ~B).
- void bic(const Register& rd,
- const Register& rn,
- const Operand& operand);
-
- // Bit clear (A & ~B) and update status flags.
- void bics(const Register& rd,
- const Register& rn,
- const Operand& operand);
-
- // Bitwise or (A | B).
- void orr(const Register& rd, const Register& rn, const Operand& operand);
-
- // Bitwise nor (A | ~B).
- void orn(const Register& rd, const Register& rn, const Operand& operand);
-
- // Bitwise eor/xor (A ^ B).
- void eor(const Register& rd, const Register& rn, const Operand& operand);
-
- // Bitwise enor/xnor (A ^ ~B).
- void eon(const Register& rd, const Register& rn, const Operand& operand);
-
- // Logical shift left variable.
- void lslv(const Register& rd, const Register& rn, const Register& rm);
-
- // Logical shift right variable.
- void lsrv(const Register& rd, const Register& rn, const Register& rm);
-
- // Arithmetic shift right variable.
- void asrv(const Register& rd, const Register& rn, const Register& rm);
-
- // Rotate right variable.
- void rorv(const Register& rd, const Register& rn, const Register& rm);
-
- // Bitfield instructions.
- // Bitfield move.
- void bfm(const Register& rd,
- const Register& rn,
- unsigned immr,
- unsigned imms);
-
- // Signed bitfield move.
- void sbfm(const Register& rd,
- const Register& rn,
- unsigned immr,
- unsigned imms);
-
- // Unsigned bitfield move.
- void ubfm(const Register& rd,
- const Register& rn,
- unsigned immr,
- unsigned imms);
-
- // Bfm aliases.
- // Bitfield insert.
- void bfi(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
- ASSERT(width >= 1);
- ASSERT(lsb + width <= rn.SizeInBits());
- bfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
- }
-
- // Bitfield extract and insert low.
- void bfxil(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
- ASSERT(width >= 1);
- ASSERT(lsb + width <= rn.SizeInBits());
- bfm(rd, rn, lsb, lsb + width - 1);
- }
-
- // Sbfm aliases.
- // Arithmetic shift right.
- void asr(const Register& rd, const Register& rn, unsigned shift) {
- ASSERT(shift < rd.SizeInBits());
- sbfm(rd, rn, shift, rd.SizeInBits() - 1);
- }
-
- // Signed bitfield insert in zero.
- void sbfiz(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
- ASSERT(width >= 1);
- ASSERT(lsb + width <= rn.SizeInBits());
- sbfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
- }
-
- // Signed bitfield extract.
- void sbfx(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
- ASSERT(width >= 1);
- ASSERT(lsb + width <= rn.SizeInBits());
- sbfm(rd, rn, lsb, lsb + width - 1);
- }
-
- // Signed extend byte.
- void sxtb(const Register& rd, const Register& rn) {
- sbfm(rd, rn, 0, 7);
- }
-
- // Signed extend halfword.
- void sxth(const Register& rd, const Register& rn) {
- sbfm(rd, rn, 0, 15);
- }
-
- // Signed extend word.
- void sxtw(const Register& rd, const Register& rn) {
- sbfm(rd, rn, 0, 31);
- }
-
- // Ubfm aliases.
- // Logical shift left.
- void lsl(const Register& rd, const Register& rn, unsigned shift) {
- unsigned reg_size = rd.SizeInBits();
- ASSERT(shift < reg_size);
- ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
- }
-
- // Logical shift right.
- void lsr(const Register& rd, const Register& rn, unsigned shift) {
- ASSERT(shift < rd.SizeInBits());
- ubfm(rd, rn, shift, rd.SizeInBits() - 1);
- }
-
- // Unsigned bitfield insert in zero.
- void ubfiz(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
- ASSERT(width >= 1);
- ASSERT(lsb + width <= rn.SizeInBits());
- ubfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
- }
-
- // Unsigned bitfield extract.
- void ubfx(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
- ASSERT(width >= 1);
- ASSERT(lsb + width <= rn.SizeInBits());
- ubfm(rd, rn, lsb, lsb + width - 1);
- }
-
- // Unsigned extend byte.
- void uxtb(const Register& rd, const Register& rn) {
- ubfm(rd, rn, 0, 7);
- }
-
- // Unsigned extend halfword.
- void uxth(const Register& rd, const Register& rn) {
- ubfm(rd, rn, 0, 15);
- }
-
- // Unsigned extend word.
- void uxtw(const Register& rd, const Register& rn) {
- ubfm(rd, rn, 0, 31);
- }
-
- // Extract.
- void extr(const Register& rd,
- const Register& rn,
- const Register& rm,
- unsigned lsb);
-
- // Conditional select: rd = cond ? rn : rm.
- void csel(const Register& rd,
- const Register& rn,
- const Register& rm,
- Condition cond);
-
- // Conditional select increment: rd = cond ? rn : rm + 1.
- void csinc(const Register& rd,
- const Register& rn,
- const Register& rm,
- Condition cond);
-
- // Conditional select inversion: rd = cond ? rn : ~rm.
- void csinv(const Register& rd,
- const Register& rn,
- const Register& rm,
- Condition cond);
-
- // Conditional select negation: rd = cond ? rn : -rm.
- void csneg(const Register& rd,
- const Register& rn,
- const Register& rm,
- Condition cond);
-
- // Conditional set: rd = cond ? 1 : 0.
- void cset(const Register& rd, Condition cond);
-
- // Conditional set minus: rd = cond ? -1 : 0.
- void csetm(const Register& rd, Condition cond);
-
- // Conditional increment: rd = cond ? rn + 1 : rn.
- void cinc(const Register& rd, const Register& rn, Condition cond);
-
- // Conditional invert: rd = cond ? ~rn : rn.
- void cinv(const Register& rd, const Register& rn, Condition cond);
-
- // Conditional negate: rd = cond ? -rn : rn.
- void cneg(const Register& rd, const Register& rn, Condition cond);
-
- // Extr aliases.
- void ror(const Register& rd, const Register& rs, unsigned shift) {
- extr(rd, rs, rs, shift);
- }
-
- // Conditional comparison.
- // Conditional compare negative.
- void ccmn(const Register& rn,
- const Operand& operand,
- StatusFlags nzcv,
- Condition cond);
-
- // Conditional compare.
- void ccmp(const Register& rn,
- const Operand& operand,
- StatusFlags nzcv,
- Condition cond);
-
- // Multiplication.
- // 32 x 32 -> 32-bit and 64 x 64 -> 64-bit multiply.
- void mul(const Register& rd, const Register& rn, const Register& rm);
-
- // 32 + 32 x 32 -> 32-bit and 64 + 64 x 64 -> 64-bit multiply accumulate.
- void madd(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra);
-
- // -(32 x 32) -> 32-bit and -(64 x 64) -> 64-bit multiply.
- void mneg(const Register& rd, const Register& rn, const Register& rm);
-
- // 32 - 32 x 32 -> 32-bit and 64 - 64 x 64 -> 64-bit multiply subtract.
- void msub(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra);
-
- // 32 x 32 -> 64-bit multiply.
- void smull(const Register& rd, const Register& rn, const Register& rm);
-
- // Xd = bits<127:64> of Xn * Xm.
- void smulh(const Register& rd, const Register& rn, const Register& rm);
-
- // Signed 32 x 32 -> 64-bit multiply and accumulate.
- void smaddl(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra);
-
- // Unsigned 32 x 32 -> 64-bit multiply and accumulate.
- void umaddl(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra);
-
- // Signed 32 x 32 -> 64-bit multiply and subtract.
- void smsubl(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra);
-
- // Unsigned 32 x 32 -> 64-bit multiply and subtract.
- void umsubl(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra);
-
- // Signed integer divide.
- void sdiv(const Register& rd, const Register& rn, const Register& rm);
-
- // Unsigned integer divide.
- void udiv(const Register& rd, const Register& rn, const Register& rm);
-
- // Bit count, bit reverse and endian reverse.
- void rbit(const Register& rd, const Register& rn);
- void rev16(const Register& rd, const Register& rn);
- void rev32(const Register& rd, const Register& rn);
- void rev(const Register& rd, const Register& rn);
- void clz(const Register& rd, const Register& rn);
- void cls(const Register& rd, const Register& rn);
-
- // Memory instructions.
-
- // Load literal from pc + offset_from_pc.
- void LoadLiteral(const CPURegister& rt, int offset_from_pc);
-
- // Load integer or FP register.
- void ldr(const CPURegister& rt, const MemOperand& src);
-
- // Store integer or FP register.
- void str(const CPURegister& rt, const MemOperand& dst);
-
- // Load word with sign extension.
- void ldrsw(const Register& rt, const MemOperand& src);
-
- // Load byte.
- void ldrb(const Register& rt, const MemOperand& src);
-
- // Store byte.
- void strb(const Register& rt, const MemOperand& dst);
-
- // Load byte with sign extension.
- void ldrsb(const Register& rt, const MemOperand& src);
-
- // Load half-word.
- void ldrh(const Register& rt, const MemOperand& src);
-
- // Store half-word.
- void strh(const Register& rt, const MemOperand& dst);
-
- // Load half-word with sign extension.
- void ldrsh(const Register& rt, const MemOperand& src);
-
- // Load integer or FP register pair.
- void ldp(const CPURegister& rt, const CPURegister& rt2,
- const MemOperand& src);
-
- // Store integer or FP register pair.
- void stp(const CPURegister& rt, const CPURegister& rt2,
- const MemOperand& dst);
-
- // Load word pair with sign extension.
- void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src);
-
- // Load integer or FP register pair, non-temporal.
- void ldnp(const CPURegister& rt, const CPURegister& rt2,
- const MemOperand& src);
-
- // Store integer or FP register pair, non-temporal.
- void stnp(const CPURegister& rt, const CPURegister& rt2,
- const MemOperand& dst);
-
- // Load literal to register.
- void ldr(const Register& rt, uint64_t imm);
-
- // Load literal to FP register.
- void ldr(const FPRegister& ft, double imm);
-
- // Move instructions. The default shift of -1 indicates that the move
- // instruction will calculate an appropriate 16-bit immediate and left shift
- // that is equal to the 64-bit immediate argument. If an explicit left shift
- // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value.
- //
- // For movk, an explicit shift can be used to indicate which half word should
- // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant
- // half word with zero, whereas movk(x0, 0, 48) will overwrite the
- // most-significant.
-
- // Move and keep.
- void movk(const Register& rd, uint64_t imm, int shift = -1) {
- MoveWide(rd, imm, shift, MOVK);
- }
-
- // Move with non-zero.
- void movn(const Register& rd, uint64_t imm, int shift = -1) {
- MoveWide(rd, imm, shift, MOVN);
- }
-
- // Move with zero.
- void movz(const Register& rd, uint64_t imm, int shift = -1) {
- MoveWide(rd, imm, shift, MOVZ);
- }
-
- // Misc instructions.
- // Monitor debug-mode breakpoint.
- void brk(int code);
-
- // Halting debug-mode breakpoint.
- void hlt(int code);
-
- // Move register to register.
- void mov(const Register& rd, const Register& rn);
-
- // Move NOT(operand) to register.
- void mvn(const Register& rd, const Operand& operand);
-
- // System instructions.
- // Move to register from system register.
- void mrs(const Register& rt, SystemRegister sysreg);
-
- // Move from register to system register.
- void msr(SystemRegister sysreg, const Register& rt);
-
- // System hint.
- void hint(SystemHint code);
-
- // Data memory barrier
- void dmb(BarrierDomain domain, BarrierType type);
-
- // Data synchronization barrier
- void dsb(BarrierDomain domain, BarrierType type);
-
- // Instruction synchronization barrier
- void isb();
-
- // Alias for system instructions.
- void nop() { hint(NOP); }
-
- // Different nop operations are used by the code generator to detect certain
- // states of the generated code.
- enum NopMarkerTypes {
- DEBUG_BREAK_NOP,
- INTERRUPT_CODE_NOP,
- FIRST_NOP_MARKER = DEBUG_BREAK_NOP,
- LAST_NOP_MARKER = INTERRUPT_CODE_NOP
- };
-
- void nop(NopMarkerTypes n) {
- ASSERT((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
- mov(Register::XRegFromCode(n), Register::XRegFromCode(n));
- }
-
- // FP instructions.
- // Move immediate to FP register.
- void fmov(FPRegister fd, double imm);
-
- // Move FP register to register.
- void fmov(Register rd, FPRegister fn);
-
- // Move register to FP register.
- void fmov(FPRegister fd, Register rn);
-
- // Move FP register to FP register.
- void fmov(FPRegister fd, FPRegister fn);
-
- // FP add.
- void fadd(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
-
- // FP subtract.
- void fsub(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
-
- // FP multiply.
- void fmul(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
-
- // FP fused multiply and add.
- void fmadd(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa);
-
- // FP fused multiply and subtract.
- void fmsub(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa);
-
- // FP fused multiply, add and negate.
- void fnmadd(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa);
-
- // FP fused multiply, subtract and negate.
- void fnmsub(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa);
-
- // FP divide.
- void fdiv(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
-
- // FP maximum.
- void fmax(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
-
- // FP minimum.
- void fmin(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
-
- // FP maximum.
- void fmaxnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
-
- // FP minimum.
- void fminnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
-
- // FP absolute.
- void fabs(const FPRegister& fd, const FPRegister& fn);
-
- // FP negate.
- void fneg(const FPRegister& fd, const FPRegister& fn);
-
- // FP square root.
- void fsqrt(const FPRegister& fd, const FPRegister& fn);
-
- // FP round to integer (nearest with ties to away).
- void frinta(const FPRegister& fd, const FPRegister& fn);
-
- // FP round to integer (nearest with ties to even).
- void frintn(const FPRegister& fd, const FPRegister& fn);
-
- // FP round to integer (towards zero.)
- void frintz(const FPRegister& fd, const FPRegister& fn);
-
- // FP compare registers.
- void fcmp(const FPRegister& fn, const FPRegister& fm);
-
- // FP compare immediate.
- void fcmp(const FPRegister& fn, double value);
-
- // FP conditional compare.
- void fccmp(const FPRegister& fn,
- const FPRegister& fm,
- StatusFlags nzcv,
- Condition cond);
-
- // FP conditional select.
- void fcsel(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- Condition cond);
-
- // Common FP Convert function
- void FPConvertToInt(const Register& rd,
- const FPRegister& fn,
- FPIntegerConvertOp op);
-
- // FP convert between single and double precision.
- void fcvt(const FPRegister& fd, const FPRegister& fn);
-
- // Convert FP to unsigned integer (nearest with ties to away).
- void fcvtau(const Register& rd, const FPRegister& fn);
-
- // Convert FP to signed integer (nearest with ties to away).
- void fcvtas(const Register& rd, const FPRegister& fn);
-
- // Convert FP to unsigned integer (round towards -infinity).
- void fcvtmu(const Register& rd, const FPRegister& fn);
-
- // Convert FP to signed integer (round towards -infinity).
- void fcvtms(const Register& rd, const FPRegister& fn);
-
- // Convert FP to unsigned integer (nearest with ties to even).
- void fcvtnu(const Register& rd, const FPRegister& fn);
-
- // Convert FP to signed integer (nearest with ties to even).
- void fcvtns(const Register& rd, const FPRegister& fn);
-
- // Convert FP to unsigned integer (round towards zero).
- void fcvtzu(const Register& rd, const FPRegister& fn);
-
- // Convert FP to signed integer (rounf towards zero).
- void fcvtzs(const Register& rd, const FPRegister& fn);
-
- // Convert signed integer or fixed point to FP.
- void scvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
-
- // Convert unsigned integer or fixed point to FP.
- void ucvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
-
- // Instruction functions used only for test, debug, and patching.
- // Emit raw instructions in the instruction stream.
- void dci(Instr raw_inst) { Emit(raw_inst); }
-
- // Emit 8 bits of data in the instruction stream.
- void dc8(uint8_t data) { EmitData(&data, sizeof(data)); }
-
- // Emit 32 bits of data in the instruction stream.
- void dc32(uint32_t data) { EmitData(&data, sizeof(data)); }
-
- // Emit 64 bits of data in the instruction stream.
- void dc64(uint64_t data) { EmitData(&data, sizeof(data)); }
-
- // Copy a string into the instruction stream, including the terminating NULL
- // character. The instruction pointer (pc_) is then aligned correctly for
- // subsequent instructions.
- void EmitStringData(const char * string) {
- size_t len = strlen(string) + 1;
- ASSERT(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
- EmitData(string, len);
- // Pad with NULL characters until pc_ is aligned.
- const char pad[] = {'\0', '\0', '\0', '\0'};
- STATIC_ASSERT(sizeof(pad) == kInstructionSize);
- byte* next_pc = AlignUp(pc_, kInstructionSize);
- EmitData(&pad, next_pc - pc_);
- }
-
- // Pseudo-instructions ------------------------------------------------------
-
- // Parameters are described in a64/instructions-a64.h.
- void debug(const char* message, uint32_t code, Instr params = BREAK);
-
- // Required by V8.
- void dd(uint32_t data) { dc32(data); }
- void db(uint8_t data) { dc8(data); }
-
- // Code generation helpers --------------------------------------------------
-
- unsigned num_pending_reloc_info() const { return num_pending_reloc_info_; }
-
- Instruction* InstructionAt(int offset) const {
- return reinterpret_cast<Instruction*>(buffer_ + offset);
- }
-
- // Register encoding.
- static Instr Rd(CPURegister rd) {
- ASSERT(rd.code() != kSPRegInternalCode);
- return rd.code() << Rd_offset;
- }
-
- static Instr Rn(CPURegister rn) {
- ASSERT(rn.code() != kSPRegInternalCode);
- return rn.code() << Rn_offset;
- }
-
- static Instr Rm(CPURegister rm) {
- ASSERT(rm.code() != kSPRegInternalCode);
- return rm.code() << Rm_offset;
- }
-
- static Instr Ra(CPURegister ra) {
- ASSERT(ra.code() != kSPRegInternalCode);
- return ra.code() << Ra_offset;
- }
-
- static Instr Rt(CPURegister rt) {
- ASSERT(rt.code() != kSPRegInternalCode);
- return rt.code() << Rt_offset;
- }
-
- static Instr Rt2(CPURegister rt2) {
- ASSERT(rt2.code() != kSPRegInternalCode);
- return rt2.code() << Rt2_offset;
- }
-
- // These encoding functions allow the stack pointer to be encoded, and
- // disallow the zero register.
- static Instr RdSP(Register rd) {
- ASSERT(!rd.IsZero());
- return (rd.code() & kRegCodeMask) << Rd_offset;
- }
-
- static Instr RnSP(Register rn) {
- ASSERT(!rn.IsZero());
- return (rn.code() & kRegCodeMask) << Rn_offset;
- }
-
- // Flags encoding.
- inline static Instr Flags(FlagsUpdate S);
- inline static Instr Cond(Condition cond);
-
- // PC-relative address encoding.
- inline static Instr ImmPCRelAddress(int imm21);
-
- // Branch encoding.
- inline static Instr ImmUncondBranch(int imm26);
- inline static Instr ImmCondBranch(int imm19);
- inline static Instr ImmCmpBranch(int imm19);
- inline static Instr ImmTestBranch(int imm14);
- inline static Instr ImmTestBranchBit(unsigned bit_pos);
-
- // Data Processing encoding.
- inline static Instr SF(Register rd);
- inline static Instr ImmAddSub(int64_t imm);
- inline static Instr ImmS(unsigned imms, unsigned reg_size);
- inline static Instr ImmR(unsigned immr, unsigned reg_size);
- inline static Instr ImmSetBits(unsigned imms, unsigned reg_size);
- inline static Instr ImmRotate(unsigned immr, unsigned reg_size);
- inline static Instr ImmLLiteral(int imm19);
- inline static Instr BitN(unsigned bitn, unsigned reg_size);
- inline static Instr ShiftDP(Shift shift);
- inline static Instr ImmDPShift(unsigned amount);
- inline static Instr ExtendMode(Extend extend);
- inline static Instr ImmExtendShift(unsigned left_shift);
- inline static Instr ImmCondCmp(unsigned imm);
- inline static Instr Nzcv(StatusFlags nzcv);
-
- // MemOperand offset encoding.
- inline static Instr ImmLSUnsigned(int imm12);
- inline static Instr ImmLS(int imm9);
- inline static Instr ImmLSPair(int imm7, LSDataSize size);
- inline static Instr ImmShiftLS(unsigned shift_amount);
- inline static Instr ImmException(int imm16);
- inline static Instr ImmSystemRegister(int imm15);
- inline static Instr ImmHint(int imm7);
- inline static Instr ImmBarrierDomain(int imm2);
- inline static Instr ImmBarrierType(int imm2);
- inline static LSDataSize CalcLSDataSize(LoadStoreOp op);
-
- // Move immediates encoding.
- inline static Instr ImmMoveWide(uint64_t imm);
- inline static Instr ShiftMoveWide(int64_t shift);
-
- // FP Immediates.
- static Instr ImmFP32(float imm);
- static Instr ImmFP64(double imm);
- inline static Instr FPScale(unsigned scale);
-
- // FP register type.
- inline static Instr FPType(FPRegister fd);
-
- // Class for scoping postponing the constant pool generation.
- class BlockConstPoolScope {
- public:
- explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
- assem_->StartBlockConstPool();
- }
- ~BlockConstPoolScope() {
- assem_->EndBlockConstPool();
- }
-
- private:
- Assembler* assem_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
- };
-
- // Check if is time to emit a constant pool.
- void CheckConstPool(bool force_emit, bool require_jump);
-
- // Available for constrained code generation scopes. Prefer
- // MacroAssembler::Mov() when possible.
- inline void LoadRelocated(const CPURegister& rt, const Operand& operand);
-
- protected:
- inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
-
- void LoadStore(const CPURegister& rt,
- const MemOperand& addr,
- LoadStoreOp op);
- static bool IsImmLSUnscaled(ptrdiff_t offset);
- static bool IsImmLSScaled(ptrdiff_t offset, LSDataSize size);
-
- void Logical(const Register& rd,
- const Register& rn,
- const Operand& operand,
- LogicalOp op);
- void LogicalImmediate(const Register& rd,
- const Register& rn,
- unsigned n,
- unsigned imm_s,
- unsigned imm_r,
- LogicalOp op);
- static bool IsImmLogical(uint64_t value,
- unsigned width,
- unsigned* n,
- unsigned* imm_s,
- unsigned* imm_r);
-
- void ConditionalCompare(const Register& rn,
- const Operand& operand,
- StatusFlags nzcv,
- Condition cond,
- ConditionalCompareOp op);
- static bool IsImmConditionalCompare(int64_t immediate);
-
- void AddSubWithCarry(const Register& rd,
- const Register& rn,
- const Operand& operand,
- FlagsUpdate S,
- AddSubWithCarryOp op);
-
- // Functions for emulating operands not directly supported by the instruction
- // set.
- void EmitShift(const Register& rd,
- const Register& rn,
- Shift shift,
- unsigned amount);
- void EmitExtendShift(const Register& rd,
- const Register& rn,
- Extend extend,
- unsigned left_shift);
-
- void AddSub(const Register& rd,
- const Register& rn,
- const Operand& operand,
- FlagsUpdate S,
- AddSubOp op);
- static bool IsImmAddSub(int64_t immediate);
-
- static bool IsImmFP32(float imm);
- static bool IsImmFP64(double imm);
-
- // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified
- // registers. Only simple loads are supported; sign- and zero-extension (such
- // as in LDPSW_x or LDRB_w) are not supported.
- static inline LoadStoreOp LoadOpFor(const CPURegister& rt);
- static inline LoadStorePairOp LoadPairOpFor(const CPURegister& rt,
- const CPURegister& rt2);
- static inline LoadStoreOp StoreOpFor(const CPURegister& rt);
- static inline LoadStorePairOp StorePairOpFor(const CPURegister& rt,
- const CPURegister& rt2);
- static inline LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(
- const CPURegister& rt, const CPURegister& rt2);
- static inline LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(
- const CPURegister& rt, const CPURegister& rt2);
-
- // Remove the specified branch from the unbound label link chain.
- // If available, a veneer for this label can be used for other branches in the
- // chain if the link chain cannot be fixed up without this branch.
- void RemoveBranchFromLabelLinkChain(Instruction* branch,
- Label* label,
- Instruction* label_veneer = NULL);
-
- private:
- // Instruction helpers.
- void MoveWide(const Register& rd,
- uint64_t imm,
- int shift,
- MoveWideImmediateOp mov_op);
- void DataProcShiftedRegister(const Register& rd,
- const Register& rn,
- const Operand& operand,
- FlagsUpdate S,
- Instr op);
- void DataProcExtendedRegister(const Register& rd,
- const Register& rn,
- const Operand& operand,
- FlagsUpdate S,
- Instr op);
- void LoadStorePair(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& addr,
- LoadStorePairOp op);
- void LoadStorePairNonTemporal(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& addr,
- LoadStorePairNonTemporalOp op);
- // Register the relocation information for the operand and load its value
- // into rt.
- void LoadRelocatedValue(const CPURegister& rt,
- const Operand& operand,
- LoadLiteralOp op);
- void ConditionalSelect(const Register& rd,
- const Register& rn,
- const Register& rm,
- Condition cond,
- ConditionalSelectOp op);
- void DataProcessing1Source(const Register& rd,
- const Register& rn,
- DataProcessing1SourceOp op);
- void DataProcessing3Source(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra,
- DataProcessing3SourceOp op);
- void FPDataProcessing1Source(const FPRegister& fd,
- const FPRegister& fn,
- FPDataProcessing1SourceOp op);
- void FPDataProcessing2Source(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- FPDataProcessing2SourceOp op);
- void FPDataProcessing3Source(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa,
- FPDataProcessing3SourceOp op);
-
- // Label helpers.
-
- // Return an offset for a label-referencing instruction, typically a branch.
- int LinkAndGetByteOffsetTo(Label* label);
-
- // This is the same as LinkAndGetByteOffsetTo, but return an offset
- // suitable for fields that take instruction offsets.
- inline int LinkAndGetInstructionOffsetTo(Label* label);
-
- static const int kStartOfLabelLinkChain = 0;
-
- // Verify that a label's link chain is intact.
- void CheckLabelLinkChain(Label const * label);
-
- void RecordLiteral(int64_t imm, unsigned size);
-
- // Postpone the generation of the constant pool for the specified number of
- // instructions.
- void BlockConstPoolFor(int instructions);
-
- // Emit the instruction at pc_.
- void Emit(Instr instruction) {
- STATIC_ASSERT(sizeof(*pc_) == 1);
- STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
- ASSERT((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
-
- memcpy(pc_, &instruction, sizeof(instruction));
- pc_ += sizeof(instruction);
- CheckBuffer();
- }
-
- // Emit data inline in the instruction stream.
- void EmitData(void const * data, unsigned size) {
- ASSERT(sizeof(*pc_) == 1);
- ASSERT((pc_ + size) <= (buffer_ + buffer_size_));
-
- // TODO(all): Somehow register we have some data here. Then we can
- // disassemble it correctly.
- memcpy(pc_, data, size);
- pc_ += size;
- CheckBuffer();
- }
-
- void GrowBuffer();
- void CheckBuffer();
-
- // Pc offset of the next buffer check.
- int next_buffer_check_;
-
- // Constant pool generation
- // Pools are emitted in the instruction stream, preferably after unconditional
- // jumps or after returns from functions (in dead code locations).
- // If a long code sequence does not contain unconditional jumps, it is
- // necessary to emit the constant pool before the pool gets too far from the
- // location it is accessed from. In this case, we emit a jump over the emitted
- // constant pool.
- // Constants in the pool may be addresses of functions that gets relocated;
- // if so, a relocation info entry is associated to the constant pool entry.
-
- // Repeated checking whether the constant pool should be emitted is rather
- // expensive. By default we only check again once a number of instructions
- // has been generated. That also means that the sizing of the buffers is not
- // an exact science, and that we rely on some slop to not overrun buffers.
- static const int kCheckPoolIntervalInst = 128;
- static const int kCheckPoolInterval =
- kCheckPoolIntervalInst * kInstructionSize;
-
- // Constants in pools are accessed via pc relative addressing, which can
- // reach +/-4KB thereby defining a maximum distance between the instruction
- // and the accessed constant.
- static const int kMaxDistToPool = 4 * KB;
- static const int kMaxNumPendingRelocInfo = kMaxDistToPool / kInstructionSize;
-
-
- // Average distance beetween a constant pool and the first instruction
- // accessing the constant pool. Longer distance should result in less I-cache
- // pollution.
- // In practice the distance will be smaller since constant pool emission is
- // forced after function return and sometimes after unconditional branches.
- static const int kAvgDistToPool = kMaxDistToPool - kCheckPoolInterval;
-
- // Emission of the constant pool may be blocked in some code sequences.
- int const_pool_blocked_nesting_; // Block emission if this is not zero.
- int no_const_pool_before_; // Block emission before this pc offset.
-
- // Keep track of the first instruction requiring a constant pool entry
- // since the previous constant pool was emitted.
- int first_const_pool_use_;
-
- // Relocation info generation
- // Each relocation is encoded as a variable size value
- static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
- RelocInfoWriter reloc_info_writer;
-
- // Relocation info records are also used during code generation as temporary
- // containers for constants and code target addresses until they are emitted
- // to the constant pool. These pending relocation info records are temporarily
- // stored in a separate buffer until a constant pool is emitted.
- // If every instruction in a long sequence is accessing the pool, we need one
- // pending relocation entry per instruction.
-
- // the buffer of pending relocation info
- RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
- // number of pending reloc info entries in the buffer
- int num_pending_reloc_info_;
-
- // Relocation for a type-recording IC has the AST id added to it. This
- // member variable is a way to pass the information from the call site to
- // the relocation info.
- TypeFeedbackId recorded_ast_id_;
-
- inline TypeFeedbackId RecordedAstId();
- inline void ClearRecordedAstId();
-
- protected:
- // Record the AST id of the CallIC being compiled, so that it can be placed
- // in the relocation information.
- void SetRecordedAstId(TypeFeedbackId ast_id) {
- ASSERT(recorded_ast_id_.IsNone());
- recorded_ast_id_ = ast_id;
- }
-
- // Code generation
- // The relocation writer's position is at least kGap bytes below the end of
- // the generated instructions. This is so that multi-instruction sequences do
- // not have to check for overflow. The same is true for writes of large
- // relocation info entries, and debug strings encoded in the instruction
- // stream.
- static const int kGap = 128;
-
- public:
- class FarBranchInfo {
- public:
- FarBranchInfo(int offset, Label* label)
- : pc_offset_(offset), label_(label) {}
- // Offset of the branch in the code generation buffer.
- int pc_offset_;
- // The label branched to.
- Label* label_;
- };
-
- protected:
- // Information about unresolved (forward) branches.
- // The Assembler is only allowed to delete out-of-date information from here
- // after a label is bound. The MacroAssembler uses this information to
- // generate veneers.
- //
- // The second member gives information about the unresolved branch. The first
- // member of the pair is the maximum offset that the branch can reach in the
- // buffer. The map is sorted according to this reachable offset, allowing to
- // easily check when veneers need to be emitted.
- // Note that the maximum reachable offset (first member of the pairs) should
- // always be positive but has the same type as the return value for
- // pc_offset() for convenience.
- std::multimap<int, FarBranchInfo> unresolved_branches_;
-
- private:
- // If a veneer is emitted for a branch instruction, that instruction must be
- // removed from the associated label's link chain so that the assembler does
- // not later attempt (likely unsuccessfully) to patch it to branch directly to
- // the label.
- void DeleteUnresolvedBranchInfoForLabel(Label* label);
-
- private:
- // TODO(jbramley): VIXL uses next_literal_pool_check_ and
- // literal_pool_monitor_ to determine when to consider emitting a literal
- // pool. V8 doesn't use them, so they should either not be here at all, or
- // should replace or be merged with next_buffer_check_ and
- // const_pool_blocked_nesting_.
- Instruction* next_literal_pool_check_;
- unsigned literal_pool_monitor_;
-
- PositionsRecorder positions_recorder_;
- friend class PositionsRecorder;
- friend class EnsureSpace;
-};
-
-class PatchingAssembler : public Assembler {
- public:
- // Create an Assembler with a buffer starting at 'start'.
- // The buffer size is
- // size of instructions to patch + kGap
- // Where kGap is the distance from which the Assembler tries to grow the
- // buffer.
- // If more or fewer instructions than expected are generated or if some
- // relocation information takes space in the buffer, the PatchingAssembler
- // will crash trying to grow the buffer.
- PatchingAssembler(Instruction* start, unsigned count)
- : Assembler(NULL,
- reinterpret_cast<byte*>(start),
- count * kInstructionSize + kGap) {
- // Block constant pool emission.
- StartBlockConstPool();
- }
-
- PatchingAssembler(byte* start, unsigned count)
- : Assembler(NULL, start, count * kInstructionSize + kGap) {
- // Block constant pool emission.
- StartBlockConstPool();
- }
-
- ~PatchingAssembler() {
- // Const pool should still be blocked.
- ASSERT(is_const_pool_blocked());
- EndBlockConstPool();
- // Verify we have generated the number of instruction we expected.
- ASSERT((pc_offset() + kGap) == buffer_size_);
- // Verify no relocation information has been emitted.
- ASSERT(num_pending_reloc_info() == 0);
- // Flush the Instruction cache.
- size_t length = buffer_size_ - kGap;
- CPU::FlushICache(buffer_, length);
- }
-};
-
-
-class EnsureSpace BASE_EMBEDDED {
- public:
- explicit EnsureSpace(Assembler* assembler) {
- assembler->CheckBuffer();
- }
-};
-
-} } // namespace v8::internal
-
-#endif // V8_A64_ASSEMBLER_A64_H_
diff --git a/deps/v8/src/a64/builtins-a64.cc b/deps/v8/src/a64/builtins-a64.cc
deleted file mode 100644
index 797fbc3a54..0000000000
--- a/deps/v8/src/a64/builtins-a64.cc
+++ /dev/null
@@ -1,1479 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if V8_TARGET_ARCH_A64
-
-#include "codegen.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "runtime.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm)
-
-
-// Load the built-in Array function from the current context.
-static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the native context.
- __ Ldr(result, GlobalObjectMemOperand());
- __ Ldr(result,
- FieldMemOperand(result, GlobalObject::kNativeContextOffset));
- // Load the InternalArray function from the native context.
- __ Ldr(result,
- MemOperand(result,
- Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
-}
-
-
-// Load the built-in InternalArray function from the current context.
-static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
- Register result) {
- // Load the native context.
- __ Ldr(result, GlobalObjectMemOperand());
- __ Ldr(result,
- FieldMemOperand(result, GlobalObject::kNativeContextOffset));
- // Load the InternalArray function from the native context.
- __ Ldr(result, ContextMemOperand(result,
- Context::INTERNAL_ARRAY_FUNCTION_INDEX));
-}
-
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
- CFunctionId id,
- BuiltinExtraArguments extra_args) {
- // ----------- S t a t e -------------
- // -- x0 : number of arguments excluding receiver
- // -- x1 : called function (only guaranteed when
- // extra_args requires it)
- // -- cp : context
- // -- sp[0] : last argument
- // -- ...
- // -- sp[4 * (argc - 1)] : first argument (argc == x0)
- // -- sp[4 * argc] : receiver
- // -----------------------------------
-
- // Insert extra arguments.
- int num_extra_args = 0;
- if (extra_args == NEEDS_CALLED_FUNCTION) {
- num_extra_args = 1;
- __ Push(x1);
- } else {
- ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
- }
-
- // JumpToExternalReference expects x0 to contain the number of arguments
- // including the receiver and the extra arguments.
- __ Add(x0, x0, num_extra_args + 1);
- __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
-}
-
-
-void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : number of arguments
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
- ASM_LOCATION("Builtins::Generate_InternalArrayCode");
- Label generic_array_code;
-
- // Get the InternalArray function.
- GenerateLoadInternalArrayFunction(masm, x1);
-
- if (FLAG_debug_code) {
- // Initial map for the builtin InternalArray functions should be maps.
- __ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
- __ Tst(x10, kSmiTagMask);
- __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction);
- __ CompareObjectType(x10, x11, x12, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction);
- }
-
- // Run the native code for the InternalArray function called as a normal
- // function.
- InternalArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-
-void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : number of arguments
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
- ASM_LOCATION("Builtins::Generate_ArrayCode");
- Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
-
- // Get the Array function.
- GenerateLoadArrayFunction(masm, x1);
-
- if (FLAG_debug_code) {
- // Initial map for the builtin Array functions should be maps.
- __ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
- __ Tst(x10, kSmiTagMask);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
- __ CompareObjectType(x10, x11, x12, MAP_TYPE);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
- }
-
- // Run the native code for the Array function called as a normal function.
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
- __ Mov(x2, Operand(undefined_sentinel));
- ArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-
-void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : number of arguments
- // -- x1 : constructor function
- // -- lr : return address
- // -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
- // -- sp[argc * 8] : receiver
- // -----------------------------------
- ASM_LOCATION("Builtins::Generate_StringConstructCode");
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_ctor_calls(), 1, x10, x11);
-
- Register argc = x0;
- Register function = x1;
- if (FLAG_debug_code) {
- __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, x10);
- __ Cmp(function, x10);
- __ Assert(eq, kUnexpectedStringFunction);
- }
-
- // Load the first arguments in x0 and get rid of the rest.
- Label no_arguments;
- __ Cbz(argc, &no_arguments);
- // First args = sp[(argc - 1) * 8].
- __ Sub(argc, argc, 1);
- __ Claim(argc, kXRegSizeInBytes);
- // jssp now point to args[0], load and drop args[0] + receiver.
- // TODO(jbramley): Consider adding ClaimAndPoke.
- __ Ldr(argc, MemOperand(jssp, 2 * kPointerSize, PostIndex));
-
- Register argument = x2;
- Label not_cached, argument_is_string;
- __ LookupNumberStringCache(argc, // Input.
- argument, // Result.
- x10, // Scratch.
- x11, // Scratch.
- x12, // Scratch.
- &not_cached);
- __ IncrementCounter(counters->string_ctor_cached_number(), 1, x10, x11);
- __ Bind(&argument_is_string);
-
- // ----------- S t a t e -------------
- // -- x2 : argument converted to string
- // -- x1 : constructor function
- // -- lr : return address
- // -----------------------------------
-
- Label gc_required;
- Register new_obj = x0;
- __ Allocate(JSValue::kSize, new_obj, x10, x11, &gc_required, TAG_OBJECT);
-
- // Initialize the String object.
- Register map = x3;
- __ LoadGlobalFunctionInitialMap(function, map, x10);
- if (FLAG_debug_code) {
- __ Ldrb(x4, FieldMemOperand(map, Map::kInstanceSizeOffset));
- __ Cmp(x4, JSValue::kSize >> kPointerSizeLog2);
- __ Assert(eq, kUnexpectedStringWrapperInstanceSize);
- __ Ldrb(x4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
- __ Cmp(x4, 0);
- __ Assert(eq, kUnexpectedUnusedPropertiesOfStringWrapper);
- }
- __ Str(map, FieldMemOperand(new_obj, HeapObject::kMapOffset));
-
- Register empty = x3;
- __ LoadRoot(empty, Heap::kEmptyFixedArrayRootIndex);
- __ Str(empty, FieldMemOperand(new_obj, JSObject::kPropertiesOffset));
- __ Str(empty, FieldMemOperand(new_obj, JSObject::kElementsOffset));
-
- __ Str(argument, FieldMemOperand(new_obj, JSValue::kValueOffset));
-
- // Ensure the object is fully initialized.
- STATIC_ASSERT(JSValue::kSize == (4 * kPointerSize));
-
- __ Ret();
-
- // The argument was not found in the number to string cache. Check
- // if it's a string already before calling the conversion builtin.
- Label convert_argument;
- __ Bind(&not_cached);
- __ JumpIfSmi(argc, &convert_argument);
-
- // Is it a String?
- __ Ldr(x10, FieldMemOperand(x0, HeapObject::kMapOffset));
- __ Ldrb(x11, FieldMemOperand(x10, Map::kInstanceTypeOffset));
- __ Tbnz(x11, MaskToBit(kIsNotStringMask), &convert_argument);
- __ Mov(argument, argc);
- __ IncrementCounter(counters->string_ctor_string_value(), 1, x10, x11);
- __ B(&argument_is_string);
-
- // Invoke the conversion builtin and put the result into x2.
- __ Bind(&convert_argument);
- __ Push(function); // Preserve the function.
- __ IncrementCounter(counters->string_ctor_conversions(), 1, x10, x11);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(argc);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
- }
- __ Pop(function);
- __ Mov(argument, x0);
- __ B(&argument_is_string);
-
- // Load the empty string into x2, remove the receiver from the
- // stack, and jump back to the case where the argument is a string.
- __ Bind(&no_arguments);
- __ LoadRoot(argument, Heap::kempty_stringRootIndex);
- __ Drop(1);
- __ B(&argument_is_string);
-
- // At this point the argument is already a string. Call runtime to create a
- // string wrapper.
- __ Bind(&gc_required);
- __ IncrementCounter(counters->string_ctor_gc_required(), 1, x10, x11);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(argument);
- __ CallRuntime(Runtime::kNewStringWrapper, 1);
- }
- __ Ret();
-}
-
-
-static void CallRuntimePassFunction(MacroAssembler* masm,
- Runtime::FunctionId function_id) {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // - Push a copy of the function onto the stack.
- // - Push another copy as a parameter to the runtime call.
- __ Push(x1, x1);
-
- __ CallRuntime(function_id, 1);
-
- // - Restore receiver.
- __ Pop(x1);
-}
-
-
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
- __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
- __ Br(x2);
-}
-
-
-static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
- __ Add(x0, x0, Code::kHeaderSize - kHeapObjectTag);
- __ Br(x0);
-}
-
-
-void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
- // Checking whether the queued function is ready for install is optional,
- // since we come across interrupts and stack checks elsewhere. However, not
- // checking may delay installing ready functions, and always checking would be
- // quite expensive. A good compromise is to first check against stack limit as
- // a cue for an interrupt signal.
- Label ok;
- __ CompareRoot(masm->StackPointer(), Heap::kStackLimitRootIndex);
- __ B(hs, &ok);
-
- CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
- GenerateTailCallToReturnedCode(masm);
-
- __ Bind(&ok);
- GenerateTailCallToSharedCode(masm);
-}
-
-
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool count_constructions) {
- // ----------- S t a t e -------------
- // -- x0 : number of arguments
- // -- x1 : constructor function
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
-
- ASM_LOCATION("Builtins::Generate_JSConstructStubHelper");
- // Should never count constructions for api objects.
- ASSERT(!is_api_function || !count_constructions);
-
- Isolate* isolate = masm->isolate();
-
- // Enter a construct frame.
- {
- FrameScope scope(masm, StackFrame::CONSTRUCT);
-
- // Preserve the two incoming parameters on the stack.
- Register argc = x0;
- Register constructor = x1;
- // x1: constructor function
- __ SmiTag(argc);
- __ Push(argc, constructor);
- // sp[0] : Constructor function.
- // sp[1]: number of arguments (smi-tagged)
-
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- Label undo_allocation;
-#if ENABLE_DEBUGGER_SUPPORT
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(isolate);
- __ Mov(x2, Operand(debug_step_in_fp));
- __ Ldr(x2, MemOperand(x2));
- __ Cbnz(x2, &rt_call);
-#endif
- // Load the initial map and verify that it is in fact a map.
- Register init_map = x2;
- __ Ldr(init_map,
- FieldMemOperand(constructor,
- JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(init_map, &rt_call);
- __ JumpIfNotObjectType(init_map, x10, x11, MAP_TYPE, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the initial
- // map's instance type would be JS_FUNCTION_TYPE.
- __ CompareInstanceType(init_map, x10, JS_FUNCTION_TYPE);
- __ B(eq, &rt_call);
-
- if (count_constructions) {
- Label allocate;
- // Decrease generous allocation count.
- __ Ldr(x3, FieldMemOperand(constructor,
- JSFunction::kSharedFunctionInfoOffset));
- MemOperand constructor_count =
- FieldMemOperand(x3, SharedFunctionInfo::kConstructionCountOffset);
- __ Ldrb(x4, constructor_count);
- __ Subs(x4, x4, 1);
- __ Strb(x4, constructor_count);
- __ B(ne, &allocate);
-
- // Push the constructor and map to the stack, and the constructor again
- // as argument to the runtime call.
- __ Push(constructor, init_map, constructor);
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
- __ Pop(init_map, constructor);
- __ Bind(&allocate);
- }
-
- // Now allocate the JSObject on the heap.
- Register obj_size = x3;
- Register new_obj = x4;
- __ Ldrb(obj_size, FieldMemOperand(init_map, Map::kInstanceSizeOffset));
- __ Allocate(obj_size, new_obj, x10, x11, &rt_call, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to
- // initial map and properties and elements are set to empty fixed array.
- // NB. the object pointer is not tagged, so MemOperand is used.
- Register empty = x5;
- __ LoadRoot(empty, Heap::kEmptyFixedArrayRootIndex);
- __ Str(init_map, MemOperand(new_obj, JSObject::kMapOffset));
- __ Str(empty, MemOperand(new_obj, JSObject::kPropertiesOffset));
- __ Str(empty, MemOperand(new_obj, JSObject::kElementsOffset));
-
- Register first_prop = x5;
- __ Add(first_prop, new_obj, JSObject::kHeaderSize);
-
- // Fill all of the in-object properties with the appropriate filler.
- Register obj_end = x6;
- __ Add(obj_end, new_obj, Operand(obj_size, LSL, kPointerSizeLog2));
- Register undef = x7;
- __ LoadRoot(undef, Heap::kUndefinedValueRootIndex);
-
- // Obtain number of pre-allocated property fields and in-object
- // properties.
- Register prealloc_fields = x10;
- Register inobject_props = x11;
- Register inst_sizes = x11;
- __ Ldr(inst_sizes, FieldMemOperand(init_map, Map::kInstanceSizesOffset));
- __ Ubfx(prealloc_fields, inst_sizes,
- Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
- kBitsPerByte);
- __ Ubfx(inobject_props, inst_sizes,
- Map::kInObjectPropertiesByte * kBitsPerByte, kBitsPerByte);
-
- if (count_constructions) {
- // Register first_non_prealloc is the offset of the first field after
- // pre-allocated fields.
- Register first_non_prealloc = x12;
- __ Add(first_non_prealloc, first_prop,
- Operand(prealloc_fields, LSL, kPointerSizeLog2));
-
- if (FLAG_debug_code) {
- __ Cmp(first_non_prealloc, obj_end);
- __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
- }
- __ InitializeFieldsWithFiller(first_prop, first_non_prealloc, undef);
- // To allow for truncation.
- __ LoadRoot(x12, Heap::kOnePointerFillerMapRootIndex);
- __ InitializeFieldsWithFiller(first_prop, obj_end, x12);
- } else {
- __ InitializeFieldsWithFiller(first_prop, obj_end, undef);
- }
-
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on. Any
- // failures need to undo the allocation, so that the heap is in a
- // consistent state and verifiable.
- __ Add(new_obj, new_obj, kHeapObjectTag);
-
- // Check if a non-empty properties array is needed. Continue with
- // allocated object if not, or fall through to runtime call if it is.
- Register element_count = x3;
- __ Ldrb(x3, FieldMemOperand(init_map, Map::kUnusedPropertyFieldsOffset));
- // The field instance sizes contains both pre-allocated property fields
- // and in-object properties.
- __ Add(x3, x3, prealloc_fields);
- __ Subs(element_count, x3, inobject_props);
-
- // Done if no extra properties are to be allocated.
- __ B(eq, &allocated);
- __ Assert(pl, kPropertyAllocationCountFailed);
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- Register new_array = x5;
- Register array_size = x6;
- __ Add(array_size, element_count, FixedArray::kHeaderSize / kPointerSize);
- __ Allocate(array_size, new_array, x11, x12, &undo_allocation,
- static_cast<AllocationFlags>(RESULT_CONTAINS_TOP |
- SIZE_IN_WORDS));
-
- Register array_map = x10;
- __ LoadRoot(array_map, Heap::kFixedArrayMapRootIndex);
- __ Str(array_map, MemOperand(new_array, FixedArray::kMapOffset));
- __ SmiTag(x0, element_count);
- __ Str(x0, MemOperand(new_array, FixedArray::kLengthOffset));
-
- // Initialize the fields to undefined.
- Register elements = x10;
- Register elements_end = x11;
- __ Add(elements, new_array, FixedArray::kHeaderSize);
- __ Add(elements_end, elements,
- Operand(element_count, LSL, kPointerSizeLog2));
- __ InitializeFieldsWithFiller(elements, elements_end, undef);
-
- // Store the initialized FixedArray into the properties field of the
- // JSObject.
- __ Add(new_array, new_array, kHeapObjectTag);
- __ Str(new_array, FieldMemOperand(new_obj, JSObject::kPropertiesOffset));
-
- // Continue with JSObject being successfully allocated.
- __ B(&allocated);
-
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- __ Bind(&undo_allocation);
- __ UndoAllocationInNewSpace(new_obj, x14);
- }
-
- // Allocate the new receiver object using the runtime call.
- __ Bind(&rt_call);
- __ Push(constructor); // Argument for Runtime_NewObject.
- __ CallRuntime(Runtime::kNewObject, 1);
- __ Mov(x4, x0);
-
- // Receiver for constructor call allocated.
- // x4: JSObject
- __ Bind(&allocated);
- __ Push(x4, x4);
-
- // Reload the number of arguments from the stack.
- // Set it up in x0 for the function call below.
- // jssp[0]: receiver
- // jssp[1]: receiver
- // jssp[2]: constructor function
- // jssp[3]: number of arguments (smi-tagged)
- __ Peek(constructor, 2 * kXRegSizeInBytes); // Load constructor.
- __ Peek(argc, 3 * kXRegSizeInBytes); // Load number of arguments.
- __ SmiUntag(argc);
-
- // Set up pointer to last argument.
- __ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
-
- // Copy arguments and receiver to the expression stack.
- // Copy 2 values every loop to use ldp/stp.
- // x0: number of arguments
- // x1: constructor function
- // x2: address of last argument (caller sp)
- // jssp[0]: receiver
- // jssp[1]: receiver
- // jssp[2]: constructor function
- // jssp[3]: number of arguments (smi-tagged)
- // Compute the start address of the copy in x3.
- __ Add(x3, x2, Operand(argc, LSL, kPointerSizeLog2));
- Label loop, entry, done_copying_arguments;
- __ B(&entry);
- __ Bind(&loop);
- __ Ldp(x10, x11, MemOperand(x3, -2 * kPointerSize, PreIndex));
- __ Push(x11, x10);
- __ Bind(&entry);
- __ Cmp(x3, x2);
- __ B(gt, &loop);
- // Because we copied values 2 by 2 we may have copied one extra value.
- // Drop it if that is the case.
- __ B(eq, &done_copying_arguments);
- __ Drop(1);
- __ Bind(&done_copying_arguments);
-
- // Call the function.
- // x0: number of arguments
- // x1: constructor function
- if (is_api_function) {
- __ Ldr(cp, FieldMemOperand(constructor, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- __ Call(code, RelocInfo::CODE_TARGET);
- } else {
- ParameterCount actual(argc);
- __ InvokeFunction(constructor, actual, CALL_FUNCTION, NullCallWrapper());
- }
-
- // Store offset of return address for deoptimizer.
- if (!is_api_function && !count_constructions) {
- masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore the context from the frame.
- // x0: result
- // jssp[0]: receiver
- // jssp[1]: constructor function
- // jssp[2]: number of arguments (smi-tagged)
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // x0: result
- // jssp[0]: receiver (newly allocated object)
- // jssp[1]: constructor function
- // jssp[2]: number of arguments (smi-tagged)
- __ JumpIfSmi(x0, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ JumpIfObjectType(x0, x1, x3, FIRST_SPEC_OBJECT_TYPE, &exit, ge);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ Bind(&use_receiver);
- __ Peek(x0, 0);
-
- // Remove the receiver from the stack, remove caller arguments, and
- // return.
- __ Bind(&exit);
- // x0: result
- // jssp[0]: receiver (newly allocated object)
- // jssp[1]: constructor function
- // jssp[2]: number of arguments (smi-tagged)
- __ Peek(x1, 2 * kXRegSizeInBytes);
-
- // Leave construct frame.
- }
-
- __ DropBySMI(x1);
- __ Drop(1);
- __ IncrementCounter(isolate->counters()->constructed_objects(), 1, x1, x2);
- __ Ret();
-}
-
-
-void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
-}
-
-
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
-}
-
-
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
-}
-
-
-// Input:
-// x0: code entry.
-// x1: function.
-// x2: receiver.
-// x3: argc.
-// x4: argv.
-// Output:
-// x0: result.
-static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
- bool is_construct) {
- // Called from JSEntryStub::GenerateBody().
- Register function = x1;
- Register receiver = x2;
- Register argc = x3;
- Register argv = x4;
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
- // Clear the context before we push it when entering the internal frame.
- __ Mov(cp, 0);
-
- {
- // Enter an internal frame.
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Set up the context from the function argument.
- __ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
-
- __ InitializeRootRegister();
-
- // Push the function and the receiver onto the stack.
- __ Push(function, receiver);
-
- // Copy arguments to the stack in a loop, in reverse order.
- // x3: argc.
- // x4: argv.
- Label loop, entry;
- // Compute the copy end address.
- __ Add(x10, argv, Operand(argc, LSL, kPointerSizeLog2));
-
- __ B(&entry);
- __ Bind(&loop);
- __ Ldr(x11, MemOperand(argv, kPointerSize, PostIndex));
- __ Ldr(x12, MemOperand(x11)); // Dereference the handle.
- __ Push(x12); // Push the argument.
- __ Bind(&entry);
- __ Cmp(x10, argv);
- __ B(ne, &loop);
-
- // Initialize all JavaScript callee-saved registers, since they will be seen
- // by the garbage collector as part of handlers.
- // The original values have been saved in JSEntryStub::GenerateBody().
- __ LoadRoot(x19, Heap::kUndefinedValueRootIndex);
- __ Mov(x20, x19);
- __ Mov(x21, x19);
- __ Mov(x22, x19);
- __ Mov(x23, x19);
- __ Mov(x24, x19);
- __ Mov(x25, x19);
- // Don't initialize the reserved registers.
- // x26 : root register (root).
- // x27 : context pointer (cp).
- // x28 : JS stack pointer (jssp).
- // x29 : frame pointer (fp).
-
- // TODO(alexandre): Revisit the MAsm function invocation mechanisms.
- // Currently there is a mix of statically and dynamically allocated
- // registers.
- __ Mov(x0, argc);
- if (is_construct) {
- // No type feedback cell is available.
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(), masm->isolate());
- __ Mov(x2, Operand(undefined_sentinel));
-
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
- __ CallStub(&stub);
- } else {
- ParameterCount actual(x0);
- __ InvokeFunction(function, actual, CALL_FUNCTION, NullCallWrapper());
- }
- // Exit the JS internal frame and remove the parameters (except function),
- // and return.
- }
-
- // Result is in x0. Return.
- __ Ret();
-}
-
-
-void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
- Generate_JSEntryTrampolineHelper(masm, false);
-}
-
-
-void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
- Generate_JSEntryTrampolineHelper(masm, true);
-}
-
-
-void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
- GenerateTailCallToReturnedCode(masm);
-}
-
-
-static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
- FrameScope scope(masm, StackFrame::INTERNAL);
- Register function = x1;
-
- // Preserve function. At the same time, push arguments for
- // kCompileOptimized.
- __ LoadObject(x10, masm->isolate()->factory()->ToBoolean(concurrent));
- __ Push(function, function, x10);
-
- __ CallRuntime(Runtime::kCompileOptimized, 2);
-
- // Restore receiver.
- __ Pop(function);
-}
-
-
-void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallCompileOptimized(masm, false);
- GenerateTailCallToReturnedCode(masm);
-}
-
-
-void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallCompileOptimized(masm, true);
- GenerateTailCallToReturnedCode(masm);
-}
-
-
-static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
- // For now, we are relying on the fact that make_code_young doesn't do any
- // garbage collection which allows us to save/restore the registers without
- // worrying about which of them contain pointers. We also don't build an
- // internal frame to make the code fast, since we shouldn't have to do stack
- // crawls in MakeCodeYoung. This seems a bit fragile.
-
- // The following caller-saved registers must be saved and restored when
- // calling through to the runtime:
- // x0 - The address from which to resume execution.
- // x1 - isolate
- // lr - The return address for the JSFunction itself. It has not yet been
- // preserved on the stack because the frame setup code was replaced
- // with a call to this stub, to handle code ageing.
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ Push(x0, x1, fp, lr);
- __ Mov(x1, Operand(ExternalReference::isolate_address(masm->isolate())));
- __ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 2);
- __ Pop(lr, fp, x1, x0);
- }
-
- // The calling function has been made young again, so return to execute the
- // real frame set-up code.
- __ Br(x0);
-}
-
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
-void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-} \
-void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-}
-CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
-#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-
-
-void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
- // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
- // that make_code_young doesn't do any garbage collection which allows us to
- // save/restore the registers without worrying about which of them contain
- // pointers.
-
- // The following caller-saved registers must be saved and restored when
- // calling through to the runtime:
- // x0 - The address from which to resume execution.
- // x1 - isolate
- // lr - The return address for the JSFunction itself. It has not yet been
- // preserved on the stack because the frame setup code was replaced
- // with a call to this stub, to handle code ageing.
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ Push(x0, x1, fp, lr);
- __ Mov(x1, Operand(ExternalReference::isolate_address(masm->isolate())));
- __ CallCFunction(
- ExternalReference::get_mark_code_as_executed_function(
- masm->isolate()), 2);
- __ Pop(lr, fp, x1, x0);
-
- // Perform prologue operations usually performed by the young code stub.
- __ EmitFrameSetupForCodeAgePatching(masm);
- }
-
- // Jump to point after the code-age stub.
- __ Add(x0, x0, kCodeAgeSequenceSize);
- __ Br(x0);
-}
-
-
-void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
- GenerateMakeCodeYoungAgainCommon(masm);
-}
-
-
-static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
- SaveFPRegsMode save_doubles) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve registers across notification, this is important for compiled
- // stubs that tail call the runtime on deopts passing their parameters in
- // registers.
- // TODO(jbramley): Is it correct (and appropriate) to use safepoint
- // registers here? According to the comment above, we should only need to
- // preserve the registers with parameters.
- __ PushXRegList(kSafepointSavedRegisters);
- // Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
- __ PopXRegList(kSafepointSavedRegisters);
- }
-
- // Ignore state (pushed by Deoptimizer::EntryGenerator::Generate).
- __ Drop(1);
-
- // Jump to the miss handler. Deoptimizer::EntryGenerator::Generate loads this
- // into lr before it jumps here.
- __ Br(lr);
-}
-
-
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
-}
-
-
-void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
-}
-
-
-static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
- Deoptimizer::BailoutType type) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Pass the deoptimization type to the runtime system.
- __ Mov(x0, Operand(Smi::FromInt(static_cast<int>(type))));
- __ Push(x0);
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
- }
-
- // Get the full codegen state from the stack and untag it.
- Register state = x6;
- __ Peek(state, 0);
- __ SmiUntag(state);
-
- // Switch on the state.
- Label with_tos_register, unknown_state;
- __ CompareAndBranch(
- state, FullCodeGenerator::NO_REGISTERS, ne, &with_tos_register);
- __ Drop(1); // Remove state.
- __ Ret();
-
- __ Bind(&with_tos_register);
- // Reload TOS register.
- __ Peek(x0, kPointerSize);
- __ CompareAndBranch(state, FullCodeGenerator::TOS_REG, ne, &unknown_state);
- __ Drop(2); // Remove state and TOS.
- __ Ret();
-
- __ Bind(&unknown_state);
- __ Abort(kInvalidFullCodegenState);
-}
-
-
-void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-
-void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- // Lookup the function in the JavaScript frame.
- __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Pass function as argument.
- __ Push(x0);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
- }
-
- // If the code object is null, just return to the unoptimized code.
- Label skip;
- __ CompareAndBranch(x0, Operand(Smi::FromInt(0)), ne, &skip);
- __ Ret();
-
- __ Bind(&skip);
-
- // Load deoptimization data from the code object.
- // <deopt_data> = <code>[#deoptimization_data_offset]
- __ Ldr(x1, MemOperand(x0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
-
- // Load the OSR entrypoint offset from the deoptimization data.
- // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
- __ Ldrsw(w1, UntagSmiFieldMemOperand(x1, FixedArray::OffsetOfElementAt(
- DeoptimizationInputData::kOsrPcOffsetIndex)));
-
- // Compute the target address = code_obj + header_size + osr_offset
- // <entry_addr> = <code_obj> + #header_size + <osr_offset>
- __ Add(x0, x0, x1);
- __ Add(lr, x0, Code::kHeaderSize - kHeapObjectTag);
-
- // And "return" to the OSR entry point of the function.
- __ Ret();
-}
-
-
-void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
- // We check the stack limit as indicator that recompilation might be done.
- Label ok;
- __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
- __ B(hs, &ok);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
- }
- __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
- RelocInfo::CODE_TARGET);
-
- __ Bind(&ok);
- __ Ret();
-}
-
-
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
- enum {
- call_type_JS_func = 0,
- call_type_func_proxy = 1,
- call_type_non_func = 2
- };
- Register argc = x0;
- Register function = x1;
- Register call_type = x4;
- Register scratch1 = x10;
- Register scratch2 = x11;
- Register receiver_type = x13;
-
- ASM_LOCATION("Builtins::Generate_FunctionCall");
- // 1. Make sure we have at least one argument.
- { Label done;
- __ Cbnz(argc, &done);
- __ LoadRoot(scratch1, Heap::kUndefinedValueRootIndex);
- __ Push(scratch1);
- __ Mov(argc, 1);
- __ Bind(&done);
- }
-
- // 2. Get the function to call (passed as receiver) from the stack, check
- // if it is a function.
- Label slow, non_function;
- __ Peek(function, Operand(argc, LSL, kXRegSizeInBytesLog2));
- __ JumpIfSmi(function, &non_function);
- __ JumpIfNotObjectType(function, scratch1, receiver_type,
- JS_FUNCTION_TYPE, &slow);
-
- // 3a. Patch the first argument if necessary when calling a function.
- Label shift_arguments;
- __ Mov(call_type, static_cast<int>(call_type_JS_func));
- { Label convert_to_object, use_global_receiver, patch_receiver;
- // Change context eagerly in case we need the global receiver.
- __ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
-
- // Do not transform the receiver for strict mode functions.
- // Also do not transform the receiver for native (Compilerhints already in
- // x3).
- __ Ldr(scratch1,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(scratch2.W(),
- FieldMemOperand(scratch1, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestAndBranchIfAnySet(
- scratch2.W(),
- (1 << SharedFunctionInfo::kStrictModeFunction) |
- (1 << SharedFunctionInfo::kNative),
- &shift_arguments);
-
- // Compute the receiver in non-strict mode.
- Register receiver = x2;
- __ Sub(scratch1, argc, 1);
- __ Peek(receiver, Operand(scratch1, LSL, kXRegSizeInBytesLog2));
- __ JumpIfSmi(receiver, &convert_to_object);
-
- __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex,
- &use_global_receiver);
- __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_receiver);
-
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ JumpIfObjectType(receiver, scratch1, scratch2,
- FIRST_SPEC_OBJECT_TYPE, &shift_arguments, ge);
-
- __ Bind(&convert_to_object);
-
- {
- // Enter an internal frame in order to preserve argument count.
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(argc);
-
- __ Push(argc, receiver);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ Mov(receiver, x0);
-
- __ Pop(argc);
- __ SmiUntag(argc);
-
- // Exit the internal frame.
- }
-
- // Restore the function and flag in the registers.
- __ Peek(function, Operand(argc, LSL, kXRegSizeInBytesLog2));
- __ Mov(call_type, static_cast<int>(call_type_JS_func));
- __ B(&patch_receiver);
-
- __ Bind(&use_global_receiver);
- __ Ldr(receiver, GlobalObjectMemOperand());
- __ Ldr(receiver,
- FieldMemOperand(receiver, GlobalObject::kGlobalReceiverOffset));
-
-
- __ Bind(&patch_receiver);
- __ Sub(scratch1, argc, 1);
- __ Poke(receiver, Operand(scratch1, LSL, kXRegSizeInBytesLog2));
-
- __ B(&shift_arguments);
- }
-
- // 3b. Check for function proxy.
- __ Bind(&slow);
- __ Mov(call_type, static_cast<int>(call_type_func_proxy));
- __ Cmp(receiver_type, JS_FUNCTION_PROXY_TYPE);
- __ B(eq, &shift_arguments);
- __ Bind(&non_function);
- __ Mov(call_type, static_cast<int>(call_type_non_func));
-
- // 3c. Patch the first argument when calling a non-function. The
- // CALL_NON_FUNCTION builtin expects the non-function callee as
- // receiver, so overwrite the first argument which will ultimately
- // become the receiver.
- // call type (0: JS function, 1: function proxy, 2: non-function)
- __ Sub(scratch1, argc, 1);
- __ Poke(function, Operand(scratch1, LSL, kXRegSizeInBytesLog2));
-
- // 4. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
- // call type (0: JS function, 1: function proxy, 2: non-function)
- __ Bind(&shift_arguments);
- { Label loop;
- // Calculate the copy start address (destination). Copy end address is jssp.
- __ Add(scratch2, jssp, Operand(argc, LSL, kPointerSizeLog2));
- __ Sub(scratch1, scratch2, kPointerSize);
-
- __ Bind(&loop);
- __ Ldr(x12, MemOperand(scratch1, -kPointerSize, PostIndex));
- __ Str(x12, MemOperand(scratch2, -kPointerSize, PostIndex));
- __ Cmp(scratch1, jssp);
- __ B(ge, &loop);
- // Adjust the actual number of arguments and remove the top element
- // (which is a copy of the last argument).
- __ Sub(argc, argc, 1);
- __ Drop(1);
- }
-
- // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
- // or a function proxy via CALL_FUNCTION_PROXY.
- // call type (0: JS function, 1: function proxy, 2: non-function)
- { Label js_function, non_proxy;
- __ Cbz(call_type, &js_function);
- // Expected number of arguments is 0 for CALL_NON_FUNCTION.
- __ Mov(x2, 0);
- __ Cmp(call_type, static_cast<int>(call_type_func_proxy));
- __ B(ne, &non_proxy);
-
- __ Push(function); // Re-add proxy object as additional argument.
- __ Add(argc, argc, 1);
- __ GetBuiltinFunction(function, Builtins::CALL_FUNCTION_PROXY);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-
- __ Bind(&non_proxy);
- __ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
- __ Bind(&js_function);
- }
-
- // 5b. Get the code to call from the function and check that the number of
- // expected arguments matches what we're providing. If so, jump
- // (tail-call) to the code in register edx without checking arguments.
- __ Ldr(x3, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ Ldrsw(x2,
- FieldMemOperand(x3,
- SharedFunctionInfo::kFormalParameterCountOffset));
- Label dont_adapt_args;
- __ Cmp(x2, argc); // Check formal and actual parameter counts.
- __ B(eq, &dont_adapt_args);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
- __ Bind(&dont_adapt_args);
-
- __ Ldr(x3, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
- ParameterCount expected(0);
- __ InvokeCode(x3, expected, expected, JUMP_FUNCTION, NullCallWrapper());
-}
-
-
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- ASM_LOCATION("Builtins::Generate_FunctionApply");
- const int kIndexOffset =
- StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
- const int kArgsOffset = 2 * kPointerSize;
- const int kReceiverOffset = 3 * kPointerSize;
- const int kFunctionOffset = 4 * kPointerSize;
-
- {
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
-
- Register args = x12;
- Register receiver = x14;
- Register function = x15;
-
- // Get the length of the arguments via a builtin call.
- __ Ldr(function, MemOperand(fp, kFunctionOffset));
- __ Ldr(args, MemOperand(fp, kArgsOffset));
- __ Push(function, args);
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
- Register argc = x0;
-
- // Check the stack for overflow.
- // We are not trying to catch interruptions (e.g. debug break and
- // preemption) here, so the "real stack limit" is checked.
- Label enough_stack_space;
- __ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
- __ Ldr(function, MemOperand(fp, kFunctionOffset));
- // Make x10 the space we have left. The stack might already be overflowed
- // here which will cause x10 to become negative.
- // TODO(jbramley): Check that the stack usage here is safe.
- __ Sub(x10, jssp, x10);
- // Check if the arguments will overflow the stack.
- __ Cmp(x10, Operand(argc, LSR, kSmiShift - kPointerSizeLog2));
- __ B(gt, &enough_stack_space);
- // There is not enough stack space, so use a builtin to throw an appropriate
- // error.
- __ Push(function, argc);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
- // We should never return from the APPLY_OVERFLOW builtin.
- if (__ emit_debug_code()) {
- __ Unreachable();
- }
-
- __ Bind(&enough_stack_space);
- // Push current limit and index.
- __ Mov(x1, 0); // Initial index.
- __ Push(argc, x1);
-
- Label push_receiver;
- __ Ldr(receiver, MemOperand(fp, kReceiverOffset));
-
- // Check that the function is a JS function. Otherwise it must be a proxy.
- // When it is not the function proxy will be invoked later.
- __ JumpIfNotObjectType(function, x10, x11, JS_FUNCTION_TYPE,
- &push_receiver);
-
- // Change context eagerly to get the right global object if necessary.
- __ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
- // Load the shared function info.
- __ Ldr(x2, FieldMemOperand(function,
- JSFunction::kSharedFunctionInfoOffset));
-
- // Compute and push the receiver.
- // Do not transform the receiver for strict mode functions.
- Label convert_receiver_to_object, use_global_receiver;
- __ Ldr(w10, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset));
- __ Tbnz(x10, SharedFunctionInfo::kStrictModeFunction, &push_receiver);
- // Do not transform the receiver for native functions.
- __ Tbnz(x10, SharedFunctionInfo::kNative, &push_receiver);
-
- // Compute the receiver in non-strict mode.
- __ JumpIfSmi(receiver, &convert_receiver_to_object);
- __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_receiver);
- __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex,
- &use_global_receiver);
-
- // Check if the receiver is already a JavaScript object.
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ JumpIfObjectType(receiver, x10, x11, FIRST_SPEC_OBJECT_TYPE,
- &push_receiver, ge);
-
- // Call a builtin to convert the receiver to a regular object.
- __ Bind(&convert_receiver_to_object);
- __ Push(receiver);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ Mov(receiver, x0);
- __ B(&push_receiver);
-
- __ Bind(&use_global_receiver);
- __ Ldr(x10, GlobalObjectMemOperand());
- __ Ldr(receiver, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
-
- // Push the receiver
- __ Bind(&push_receiver);
- __ Push(receiver);
-
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- Register current = x0;
- __ Ldr(current, MemOperand(fp, kIndexOffset));
- __ B(&entry);
-
- __ Bind(&loop);
- // Load the current argument from the arguments array and push it.
- // TODO(all): Couldn't we optimize this for JS arrays?
-
- __ Ldr(x1, MemOperand(fp, kArgsOffset));
- __ Push(x1, current);
-
- // Call the runtime to access the property in the arguments array.
- __ CallRuntime(Runtime::kGetProperty, 2);
- __ Push(x0);
-
- // Use inline caching to access the arguments.
- __ Ldr(current, MemOperand(fp, kIndexOffset));
- __ Add(current, current, Operand(Smi::FromInt(1)));
- __ Str(current, MemOperand(fp, kIndexOffset));
-
- // Test if the copy loop has finished copying all the elements from the
- // arguments object.
- __ Bind(&entry);
- __ Ldr(x1, MemOperand(fp, kLimitOffset));
- __ Cmp(current, x1);
- __ B(ne, &loop);
-
- // At the end of the loop, the number of arguments is stored in 'current',
- // represented as a smi.
-
- function = x1; // From now on we want the function to be kept in x1;
- __ Ldr(function, MemOperand(fp, kFunctionOffset));
-
- // Call the function.
- Label call_proxy;
- ParameterCount actual(current);
- __ SmiUntag(current);
- __ JumpIfNotObjectType(function, x10, x11, JS_FUNCTION_TYPE, &call_proxy);
- __ InvokeFunction(function, actual, CALL_FUNCTION, NullCallWrapper());
- frame_scope.GenerateLeaveFrame();
- __ Drop(3);
- __ Ret();
-
- // Call the function proxy.
- __ Bind(&call_proxy);
- // x0 : argc
- // x1 : function
- __ Push(function); // Add function proxy as last argument.
- __ Add(x0, x0, 1);
- __ Mov(x2, 0);
- __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY);
- __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
- }
- __ Drop(3);
- __ Ret();
-}
-
-
-static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ SmiTag(x10, x0);
- __ Mov(x11, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ Push(lr, fp);
- __ Push(x11, x1, x10);
- __ Add(fp, jssp,
- StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
-}
-
-
-static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : result being passed through
- // -----------------------------------
- // Get the number of arguments passed (as a smi), tear down the frame and
- // then drop the parameters and the receiver.
- __ Ldr(x10, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize)));
- __ Mov(jssp, fp);
- __ Pop(fp, lr);
- __ DropBySMI(x10, kXRegSizeInBytes);
- __ Drop(1);
-}
-
-
-void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
- ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline");
- // ----------- S t a t e -------------
- // -- x0 : actual number of arguments
- // -- x1 : function (passed through to callee)
- // -- x2 : expected number of arguments
- // -----------------------------------
-
- Label invoke, dont_adapt_arguments;
-
- Label enough, too_few;
- __ Ldr(x3, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
- __ Cmp(x0, x2);
- __ B(lt, &too_few);
- __ Cmp(x2, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
- __ B(eq, &dont_adapt_arguments);
-
- { // Enough parameters: actual >= expected
- EnterArgumentsAdaptorFrame(masm);
-
- // Calculate copy start address into x10 and end address into x11.
- // x0: actual number of arguments
- // x1: function
- // x2: expected number of arguments
- // x3: code entry to call
- __ Add(x10, fp, Operand(x0, LSL, kPointerSizeLog2));
- // Adjust for return address and receiver
- __ Add(x10, x10, 2 * kPointerSize);
- __ Sub(x11, x10, Operand(x2, LSL, kPointerSizeLog2));
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // x0: actual number of arguments
- // x1: function
- // x2: expected number of arguments
- // x3: code entry to call
- // x10: copy start address
- // x11: copy end address
-
- // TODO(all): Should we push values 2 by 2?
- Label copy;
- __ Bind(&copy);
- __ Cmp(x10, x11);
- __ Ldr(x12, MemOperand(x10, -kPointerSize, PostIndex));
- __ Push(x12);
- __ B(gt, &copy);
-
- __ B(&invoke);
- }
-
- { // Too few parameters: Actual < expected
- __ Bind(&too_few);
- EnterArgumentsAdaptorFrame(masm);
-
- // Calculate copy start address into x10 and copy end address into x11.
- // x0: actual number of arguments
- // x1: function
- // x2: expected number of arguments
- // x3: code entry to call
- // Adjust for return address.
- __ Add(x11, fp, 1 * kPointerSize);
- __ Add(x10, x11, Operand(x0, LSL, kPointerSizeLog2));
- __ Add(x10, x10, 1 * kPointerSize);
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // x0: actual number of arguments
- // x1: function
- // x2: expected number of arguments
- // x3: code entry to call
- // x10: copy start address
- // x11: copy end address
- Label copy;
- __ Bind(&copy);
- __ Ldr(x12, MemOperand(x10, -kPointerSize, PostIndex));
- __ Push(x12);
- __ Cmp(x10, x11); // Compare before moving to next argument.
- __ B(ne, &copy);
-
- // Fill the remaining expected arguments with undefined.
- // x0: actual number of arguments
- // x1: function
- // x2: expected number of arguments
- // x3: code entry to call
- __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
- __ Sub(x11, fp, Operand(x2, LSL, kPointerSizeLog2));
- // Adjust for the arguments adaptor frame and already pushed receiver.
- __ Sub(x11, x11,
- StandardFrameConstants::kFixedFrameSizeFromFp + (2 * kPointerSize));
-
- // TODO(all): Optimize this to use ldp?
- Label fill;
- __ Bind(&fill);
- __ Push(x10);
- __ Cmp(jssp, x11);
- __ B(ne, &fill);
- }
-
- // Arguments have been adapted. Now call the entry point.
- __ Bind(&invoke);
- __ Call(x3);
-
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
-
- // Exit frame and return.
- LeaveArgumentsAdaptorFrame(masm);
- __ Ret();
-
- // Call the entry point without adapting the arguments.
- __ Bind(&dont_adapt_arguments);
- __ Jump(x3);
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/a64/code-stubs-a64.cc b/deps/v8/src/a64/code-stubs-a64.cc
deleted file mode 100644
index b640677cae..0000000000
--- a/deps/v8/src/a64/code-stubs-a64.cc
+++ /dev/null
@@ -1,5809 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if V8_TARGET_ARCH_A64
-
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "regexp-macro-assembler.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-void FastNewClosureStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x2: function info
- static Register registers[] = { x2 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
-}
-
-
-void FastNewContextStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x1: function
- static Register registers[] = { x1 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void ToNumberStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x0: value
- static Register registers[] = { x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void NumberToStringStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x0: value
- static Register registers[] = { x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNumberToString)->entry;
-}
-
-
-void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x3: array literals array
- // x2: array literal index
- // x1: constant elements
- static Register registers[] = { x3, x2, x1 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
-}
-
-
-void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x3: object literals array
- // x2: object literal index
- // x1: constant properties
- // x0: object literal flags
- static Register registers[] = { x3, x2, x1, x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
-}
-
-
-void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x2: feedback vector
- // x3: call feedback slot
- static Register registers[] = { x2, x3 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x1: receiver
- // x0: key
- static Register registers[] = { x1, x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
-}
-
-
-void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x1: receiver
- // x0: key
- static Register registers[] = { x1, x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
-}
-
-
-void RegExpConstructResultStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x2: length
- // x1: index (of last match)
- // x0: string
- static Register registers[] = { x2, x1, x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry;
-}
-
-
-void LoadFieldStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x0: receiver
- static Register registers[] = { x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x1: receiver
- static Register registers[] = { x1 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x2: receiver
- // x1: key
- // x0: value
- static Register registers[] = { x2, x1, x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
-}
-
-
-void TransitionElementsKindStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x0: value (js_array)
- // x1: to_map
- static Register registers[] = { x0, x1 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- Address entry =
- Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
- descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
-}
-
-
-void CompareNilICStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x0: value to compare
- static Register registers[] = { x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(CompareNilIC_Miss);
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
-}
-
-
-static void InitializeArrayConstructorDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor,
- int constant_stack_parameter_count) {
- // x1: function
- // x2: allocation site with elements kind
- // x0: number of arguments to the constructor function
- static Register registers_variable_args[] = { x1, x2, x0 };
- static Register registers_no_args[] = { x1, x2 };
-
- if (constant_stack_parameter_count == 0) {
- descriptor->register_param_count_ =
- sizeof(registers_no_args) / sizeof(registers_no_args[0]);
- descriptor->register_params_ = registers_no_args;
- } else {
- // stack param count needs (constructor pointer, and single argument)
- descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
- descriptor->stack_parameter_count_ = x0;
- descriptor->register_param_count_ =
- sizeof(registers_variable_args) / sizeof(registers_variable_args[0]);
- descriptor->register_params_ = registers_variable_args;
- }
-
- descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
- descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
-}
-
-
-void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
-}
-
-
-static void InitializeInternalArrayConstructorDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor,
- int constant_stack_parameter_count) {
- // x1: constructor function
- // x0: number of arguments to the constructor function
- static Register registers_variable_args[] = { x1, x0 };
- static Register registers_no_args[] = { x1 };
-
- if (constant_stack_parameter_count == 0) {
- descriptor->register_param_count_ =
- sizeof(registers_no_args) / sizeof(registers_no_args[0]);
- descriptor->register_params_ = registers_no_args;
- } else {
- // stack param count needs (constructor pointer, and single argument)
- descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
- descriptor->stack_parameter_count_ = x0;
- descriptor->register_param_count_ =
- sizeof(registers_variable_args) / sizeof(registers_variable_args[0]);
- descriptor->register_params_ = registers_variable_args;
- }
-
- descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
- descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
-}
-
-
-void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
-}
-
-
-void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
-}
-
-
-void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
-}
-
-
-void ToBooleanStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x0: value
- static Register registers[] = { x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = FUNCTION_ADDR(ToBooleanIC_Miss);
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
-}
-
-
-void StoreGlobalStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x1: receiver
- // x2: key (unused)
- // x0: value
- static Register registers[] = { x1, x2, x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(StoreIC_MissFromStubFailure);
-}
-
-
-void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x0: value
- // x3: target map
- // x1: key
- // x2: receiver
- static Register registers[] = { x0, x3, x1, x2 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
-}
-
-
-void BinaryOpICStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x1: left operand
- // x0: right operand
- static Register registers[] = { x1, x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
-}
-
-
-void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x2: allocation site
- // x1: left operand
- // x0: right operand
- static Register registers[] = { x2, x1, x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
-}
-
-
-void StringAddStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // x1: left operand
- // x0: right operand
- static Register registers[] = { x1, x0 };
- descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kStringAdd)->entry;
-}
-
-
-void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
- static PlatformCallInterfaceDescriptor default_descriptor =
- PlatformCallInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
-
- static PlatformCallInterfaceDescriptor noInlineDescriptor =
- PlatformCallInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
-
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
- static Register registers[] = { x1, // JSFunction
- cp, // context
- x0, // actual number of arguments
- x2, // expected number of arguments
- };
- static Representation representations[] = {
- Representation::Tagged(), // JSFunction
- Representation::Tagged(), // context
- Representation::Integer32(), // actual number of arguments
- Representation::Integer32(), // expected number of arguments
- };
- descriptor->register_param_count_ = 4;
- descriptor->register_params_ = registers;
- descriptor->param_representations_ = representations;
- descriptor->platform_specific_descriptor_ = &default_descriptor;
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::KeyedCall);
- static Register registers[] = { cp, // context
- x2, // key
- };
- static Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // key
- };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->param_representations_ = representations;
- descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::NamedCall);
- static Register registers[] = { cp, // context
- x2, // name
- };
- static Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // name
- };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->param_representations_ = representations;
- descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::CallHandler);
- static Register registers[] = { cp, // context
- x0, // receiver
- };
- static Representation representations[] = {
- Representation::Tagged(), // context
- Representation::Tagged(), // receiver
- };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->param_representations_ = representations;
- descriptor->platform_specific_descriptor_ = &default_descriptor;
- }
- {
- CallInterfaceDescriptor* descriptor =
- isolate->call_descriptor(Isolate::ApiFunctionCall);
- static Register registers[] = { x0, // callee
- x4, // call_data
- x2, // holder
- x1, // api_function_address
- cp, // context
- };
- static Representation representations[] = {
- Representation::Tagged(), // callee
- Representation::Tagged(), // call_data
- Representation::Tagged(), // holder
- Representation::External(), // api_function_address
- Representation::Tagged(), // context
- };
- descriptor->register_param_count_ = 5;
- descriptor->register_params_ = registers;
- descriptor->param_representations_ = representations;
- descriptor->platform_specific_descriptor_ = &default_descriptor;
- }
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-
-void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
- // Update the static counter each time a new code stub is generated.
- Isolate* isolate = masm->isolate();
- isolate->counters()->code_stubs()->Increment();
-
- CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
- int param_count = descriptor->register_param_count_;
- {
- // Call the runtime system in a fresh internal frame.
- FrameScope scope(masm, StackFrame::INTERNAL);
- ASSERT((descriptor->register_param_count_ == 0) ||
- x0.Is(descriptor->register_params_[param_count - 1]));
- // Push arguments
- // TODO(jbramley): Try to push these in blocks.
- for (int i = 0; i < param_count; ++i) {
- __ Push(descriptor->register_params_[i]);
- }
- ExternalReference miss = descriptor->miss_handler();
- __ CallExternalReference(miss, descriptor->register_param_count_);
- }
-
- __ Ret();
-}
-
-
-void DoubleToIStub::Generate(MacroAssembler* masm) {
- Label done;
- Register input = source();
- Register result = destination();
- ASSERT(is_truncating());
-
- ASSERT(result.Is64Bits());
- ASSERT(jssp.Is(masm->StackPointer()));
-
- int double_offset = offset();
-
- DoubleRegister double_scratch = d0; // only used if !skip_fastpath()
- Register scratch1 = GetAllocatableRegisterThatIsNotOneOf(input, result);
- Register scratch2 =
- GetAllocatableRegisterThatIsNotOneOf(input, result, scratch1);
-
- __ Push(scratch1, scratch2);
- // Account for saved regs if input is jssp.
- if (input.is(jssp)) double_offset += 2 * kPointerSize;
-
- if (!skip_fastpath()) {
- __ Push(double_scratch);
- if (input.is(jssp)) double_offset += 1 * kDoubleSize;
- __ Ldr(double_scratch, MemOperand(input, double_offset));
- // Try to convert with a FPU convert instruction. This handles all
- // non-saturating cases.
- __ TryInlineTruncateDoubleToI(result, double_scratch, &done);
- __ Fmov(result, double_scratch);
- } else {
- __ Ldr(result, MemOperand(input, double_offset));
- }
-
- // If we reach here we need to manually convert the input to an int32.
-
- // Extract the exponent.
- Register exponent = scratch1;
- __ Ubfx(exponent, result, HeapNumber::kMantissaBits,
- HeapNumber::kExponentBits);
-
- // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since
- // the mantissa gets shifted completely out of the int32_t result.
- __ Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32);
- __ CzeroX(result, ge);
- __ B(ge, &done);
-
- // The Fcvtzs sequence handles all cases except where the conversion causes
- // signed overflow in the int64_t target. Since we've already handled
- // exponents >= 84, we can guarantee that 63 <= exponent < 84.
-
- if (masm->emit_debug_code()) {
- __ Cmp(exponent, HeapNumber::kExponentBias + 63);
- // Exponents less than this should have been handled by the Fcvt case.
- __ Check(ge, kUnexpectedValue);
- }
-
- // Isolate the mantissa bits, and set the implicit '1'.
- Register mantissa = scratch2;
- __ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits);
- __ Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits);
-
- // Negate the mantissa if necessary.
- __ Tst(result, kXSignMask);
- __ Cneg(mantissa, mantissa, ne);
-
- // Shift the mantissa bits in the correct place. We know that we have to shift
- // it left here, because exponent >= 63 >= kMantissaBits.
- __ Sub(exponent, exponent,
- HeapNumber::kExponentBias + HeapNumber::kMantissaBits);
- __ Lsl(result, mantissa, exponent);
-
- __ Bind(&done);
- if (!skip_fastpath()) {
- __ Pop(double_scratch);
- }
- __ Pop(scratch2, scratch1);
- __ Ret();
-}
-
-
-// See call site for description.
-static void EmitIdenticalObjectComparison(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch,
- FPRegister double_scratch,
- Label* slow,
- Condition cond) {
- ASSERT(!AreAliased(left, right, scratch));
- Label not_identical, return_equal, heap_number;
- Register result = x0;
-
- __ Cmp(right, left);
- __ B(ne, &not_identical);
-
- // Test for NaN. Sadly, we can't just compare to factory::nan_value(),
- // so we do the second best thing - test it ourselves.
- // They are both equal and they are not both Smis so both of them are not
- // Smis. If it's not a heap number, then return equal.
- if ((cond == lt) || (cond == gt)) {
- __ JumpIfObjectType(right, scratch, scratch, FIRST_SPEC_OBJECT_TYPE, slow,
- ge);
- } else {
- Register right_type = scratch;
- __ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE,
- &heap_number);
- // Comparing JS objects with <=, >= is complicated.
- if (cond != eq) {
- __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
- __ B(ge, slow);
- // Normally here we fall through to return_equal, but undefined is
- // special: (undefined == undefined) == true, but
- // (undefined <= undefined) == false! See ECMAScript 11.8.5.
- if ((cond == le) || (cond == ge)) {
- __ Cmp(right_type, ODDBALL_TYPE);
- __ B(ne, &return_equal);
- __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &return_equal);
- if (cond == le) {
- // undefined <= undefined should fail.
- __ Mov(result, GREATER);
- } else {
- // undefined >= undefined should fail.
- __ Mov(result, LESS);
- }
- __ Ret();
- }
- }
- }
-
- __ Bind(&return_equal);
- if (cond == lt) {
- __ Mov(result, GREATER); // Things aren't less than themselves.
- } else if (cond == gt) {
- __ Mov(result, LESS); // Things aren't greater than themselves.
- } else {
- __ Mov(result, EQUAL); // Things are <=, >=, ==, === themselves.
- }
- __ Ret();
-
- // Cases lt and gt have been handled earlier, and case ne is never seen, as
- // it is handled in the parser (see Parser::ParseBinaryExpression). We are
- // only concerned with cases ge, le and eq here.
- if ((cond != lt) && (cond != gt)) {
- ASSERT((cond == ge) || (cond == le) || (cond == eq));
- __ Bind(&heap_number);
- // Left and right are identical pointers to a heap number object. Return
- // non-equal if the heap number is a NaN, and equal otherwise. Comparing
- // the number to itself will set the overflow flag iff the number is NaN.
- __ Ldr(double_scratch, FieldMemOperand(right, HeapNumber::kValueOffset));
- __ Fcmp(double_scratch, double_scratch);
- __ B(vc, &return_equal); // Not NaN, so treat as normal heap number.
-
- if (cond == le) {
- __ Mov(result, GREATER);
- } else {
- __ Mov(result, LESS);
- }
- __ Ret();
- }
-
- // No fall through here.
- if (FLAG_debug_code) {
- __ Unreachable();
- }
-
- __ Bind(&not_identical);
-}
-
-
-// See call site for description.
-static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
- Register left,
- Register right,
- Register left_type,
- Register right_type,
- Register scratch) {
- ASSERT(!AreAliased(left, right, left_type, right_type, scratch));
-
- if (masm->emit_debug_code()) {
- // We assume that the arguments are not identical.
- __ Cmp(left, right);
- __ Assert(ne, kExpectedNonIdenticalObjects);
- }
-
- // If either operand is a JS object or an oddball value, then they are not
- // equal since their pointers are different.
- // There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
- Label right_non_object;
-
- __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
- __ B(lt, &right_non_object);
-
- // Return non-zero - x0 already contains a non-zero pointer.
- ASSERT(left.is(x0) || right.is(x0));
- Label return_not_equal;
- __ Bind(&return_not_equal);
- __ Ret();
-
- __ Bind(&right_non_object);
-
- // Check for oddballs: true, false, null, undefined.
- __ Cmp(right_type, ODDBALL_TYPE);
-
- // If right is not ODDBALL, test left. Otherwise, set eq condition.
- __ Ccmp(left_type, ODDBALL_TYPE, ZFlag, ne);
-
- // If right or left is not ODDBALL, test left >= FIRST_SPEC_OBJECT_TYPE.
- // Otherwise, right or left is ODDBALL, so set a ge condition.
- __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NVFlag, ne);
-
- __ B(ge, &return_not_equal);
-
- // Internalized strings are unique, so they can only be equal if they are the
- // same object. We have already tested that case, so if left and right are
- // both internalized strings, they cannot be equal.
- STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
- __ Orr(scratch, left_type, right_type);
- __ TestAndBranchIfAllClear(
- scratch, kIsNotStringMask | kIsNotInternalizedMask, &return_not_equal);
-}
-
-
-// See call site for description.
-static void EmitSmiNonsmiComparison(MacroAssembler* masm,
- Register left,
- Register right,
- FPRegister left_d,
- FPRegister right_d,
- Register scratch,
- Label* slow,
- bool strict) {
- ASSERT(!AreAliased(left, right, scratch));
- ASSERT(!AreAliased(left_d, right_d));
- ASSERT((left.is(x0) && right.is(x1)) ||
- (right.is(x0) && left.is(x1)));
- Register result = x0;
-
- Label right_is_smi, done;
- __ JumpIfSmi(right, &right_is_smi);
-
- // Left is the smi. Check whether right is a heap number.
- if (strict) {
- // If right is not a number and left is a smi, then strict equality cannot
- // succeed. Return non-equal.
- Label is_heap_number;
- __ JumpIfObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE,
- &is_heap_number);
- // Register right is a non-zero pointer, which is a valid NOT_EQUAL result.
- if (!right.is(result)) {
- __ Mov(result, NOT_EQUAL);
- }
- __ Ret();
- __ Bind(&is_heap_number);
- } else {
- // Smi compared non-strictly with a non-smi, non-heap-number. Call the
- // runtime.
- __ JumpIfNotObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE, slow);
- }
-
- // Left is the smi. Right is a heap number. Load right value into right_d, and
- // convert left smi into double in left_d.
- __ Ldr(right_d, FieldMemOperand(right, HeapNumber::kValueOffset));
- __ SmiUntagToDouble(left_d, left);
- __ B(&done);
-
- __ Bind(&right_is_smi);
- // Right is a smi. Check whether the non-smi left is a heap number.
- if (strict) {
- // If left is not a number and right is a smi then strict equality cannot
- // succeed. Return non-equal.
- Label is_heap_number;
- __ JumpIfObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE,
- &is_heap_number);
- // Register left is a non-zero pointer, which is a valid NOT_EQUAL result.
- if (!left.is(result)) {
- __ Mov(result, NOT_EQUAL);
- }
- __ Ret();
- __ Bind(&is_heap_number);
- } else {
- // Smi compared non-strictly with a non-smi, non-heap-number. Call the
- // runtime.
- __ JumpIfNotObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE, slow);
- }
-
- // Right is the smi. Left is a heap number. Load left value into left_d, and
- // convert right smi into double in right_d.
- __ Ldr(left_d, FieldMemOperand(left, HeapNumber::kValueOffset));
- __ SmiUntagToDouble(right_d, right);
-
- // Fall through to both_loaded_as_doubles.
- __ Bind(&done);
-}
-
-
-// Fast negative check for internalized-to-internalized equality.
-// See call site for description.
-static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
- Register left,
- Register right,
- Register left_map,
- Register right_map,
- Register left_type,
- Register right_type,
- Label* possible_strings,
- Label* not_both_strings) {
- ASSERT(!AreAliased(left, right, left_map, right_map, left_type, right_type));
- Register result = x0;
-
- Label object_test;
- STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
- // TODO(all): reexamine this branch sequence for optimisation wrt branch
- // prediction.
- __ Tbnz(right_type, MaskToBit(kIsNotStringMask), &object_test);
- __ Tbnz(right_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
- __ Tbnz(left_type, MaskToBit(kIsNotStringMask), not_both_strings);
- __ Tbnz(left_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
-
- // Both are internalized. We already checked that they weren't the same
- // pointer, so they are not equal.
- __ Mov(result, NOT_EQUAL);
- __ Ret();
-
- __ Bind(&object_test);
-
- __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
-
- // If right >= FIRST_SPEC_OBJECT_TYPE, test left.
- // Otherwise, right < FIRST_SPEC_OBJECT_TYPE, so set lt condition.
- __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NFlag, ge);
-
- __ B(lt, not_both_strings);
-
- // If both objects are undetectable, they are equal. Otherwise, they are not
- // equal, since they are different objects and an object is not equal to
- // undefined.
-
- // Returning here, so we can corrupt right_type and left_type.
- Register right_bitfield = right_type;
- Register left_bitfield = left_type;
- __ Ldrb(right_bitfield, FieldMemOperand(right_map, Map::kBitFieldOffset));
- __ Ldrb(left_bitfield, FieldMemOperand(left_map, Map::kBitFieldOffset));
- __ And(result, right_bitfield, left_bitfield);
- __ And(result, result, 1 << Map::kIsUndetectable);
- __ Eor(result, result, 1 << Map::kIsUndetectable);
- __ Ret();
-}
-
-
-static void ICCompareStub_CheckInputType(MacroAssembler* masm,
- Register input,
- Register scratch,
- CompareIC::State expected,
- Label* fail) {
- Label ok;
- if (expected == CompareIC::SMI) {
- __ JumpIfNotSmi(input, fail);
- } else if (expected == CompareIC::NUMBER) {
- __ JumpIfSmi(input, &ok);
- __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
- DONT_DO_SMI_CHECK);
- }
- // We could be strict about internalized/non-internalized here, but as long as
- // hydrogen doesn't care, the stub doesn't have to care either.
- __ Bind(&ok);
-}
-
-
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
- Register lhs = x1;
- Register rhs = x0;
- Register result = x0;
- Condition cond = GetCondition();
-
- Label miss;
- ICCompareStub_CheckInputType(masm, lhs, x2, left_, &miss);
- ICCompareStub_CheckInputType(masm, rhs, x3, right_, &miss);
-
- Label slow; // Call builtin.
- Label not_smis, both_loaded_as_doubles;
- Label not_two_smis, smi_done;
- __ JumpIfEitherNotSmi(lhs, rhs, &not_two_smis);
- __ SmiUntag(lhs);
- __ Sub(result, lhs, Operand::UntagSmi(rhs));
- __ Ret();
-
- __ Bind(&not_two_smis);
-
- // NOTICE! This code is only reached after a smi-fast-case check, so it is
- // certain that at least one operand isn't a smi.
-
- // Handle the case where the objects are identical. Either returns the answer
- // or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond);
-
- // If either is a smi (we know that at least one is not a smi), then they can
- // only be strictly equal if the other is a HeapNumber.
- __ JumpIfBothNotSmi(lhs, rhs, &not_smis);
-
- // Exactly one operand is a smi. EmitSmiNonsmiComparison generates code that
- // can:
- // 1) Return the answer.
- // 2) Branch to the slow case.
- // 3) Fall through to both_loaded_as_doubles.
- // In case 3, we have found out that we were dealing with a number-number
- // comparison. The double values of the numbers have been loaded, right into
- // rhs_d, left into lhs_d.
- FPRegister rhs_d = d0;
- FPRegister lhs_d = d1;
- EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, x10, &slow, strict());
-
- __ Bind(&both_loaded_as_doubles);
- // The arguments have been converted to doubles and stored in rhs_d and
- // lhs_d.
- Label nan;
- __ Fcmp(lhs_d, rhs_d);
- __ B(vs, &nan); // Overflow flag set if either is NaN.
- STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
- __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
- __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0.
- __ Ret();
-
- __ Bind(&nan);
- // Left and/or right is a NaN. Load the result register with whatever makes
- // the comparison fail, since comparisons with NaN always fail (except ne,
- // which is filtered out at a higher level.)
- ASSERT(cond != ne);
- if ((cond == lt) || (cond == le)) {
- __ Mov(result, GREATER);
- } else {
- __ Mov(result, LESS);
- }
- __ Ret();
-
- __ Bind(&not_smis);
- // At this point we know we are dealing with two different objects, and
- // neither of them is a smi. The objects are in rhs_ and lhs_.
-
- // Load the maps and types of the objects.
- Register rhs_map = x10;
- Register rhs_type = x11;
- Register lhs_map = x12;
- Register lhs_type = x13;
- __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
- __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
- __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
- __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
-
- if (strict()) {
- // This emits a non-equal return sequence for some object types, or falls
- // through if it was not lucky.
- EmitStrictTwoHeapObjectCompare(masm, lhs, rhs, lhs_type, rhs_type, x14);
- }
-
- Label check_for_internalized_strings;
- Label flat_string_check;
- // Check for heap number comparison. Branch to earlier double comparison code
- // if they are heap numbers, otherwise, branch to internalized string check.
- __ Cmp(rhs_type, HEAP_NUMBER_TYPE);
- __ B(ne, &check_for_internalized_strings);
- __ Cmp(lhs_map, rhs_map);
-
- // If maps aren't equal, lhs_ and rhs_ are not heap numbers. Branch to flat
- // string check.
- __ B(ne, &flat_string_check);
-
- // Both lhs_ and rhs_ are heap numbers. Load them and branch to the double
- // comparison code.
- __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- __ B(&both_loaded_as_doubles);
-
- __ Bind(&check_for_internalized_strings);
- // In the strict case, the EmitStrictTwoHeapObjectCompare already took care
- // of internalized strings.
- if ((cond == eq) && !strict()) {
- // Returns an answer for two internalized strings or two detectable objects.
- // Otherwise branches to the string case or not both strings case.
- EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, lhs_map, rhs_map,
- lhs_type, rhs_type,
- &flat_string_check, &slow);
- }
-
- // Check for both being sequential ASCII strings, and inline if that is the
- // case.
- __ Bind(&flat_string_check);
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(lhs_type, rhs_type, x14,
- x15, &slow);
-
- Isolate* isolate = masm->isolate();
- __ IncrementCounter(isolate->counters()->string_compare_native(), 1, x10,
- x11);
- if (cond == eq) {
- StringCompareStub::GenerateFlatAsciiStringEquals(masm, lhs, rhs,
- x10, x11, x12);
- } else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm, lhs, rhs,
- x10, x11, x12, x13);
- }
-
- // Never fall through to here.
- if (FLAG_debug_code) {
- __ Unreachable();
- }
-
- __ Bind(&slow);
-
- __ Push(lhs, rhs);
- // Figure out which native to call and setup the arguments.
- Builtins::JavaScript native;
- if (cond == eq) {
- native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
- } else {
- native = Builtins::COMPARE;
- int ncr; // NaN compare result
- if ((cond == lt) || (cond == le)) {
- ncr = GREATER;
- } else {
- ASSERT((cond == gt) || (cond == ge)); // remaining cases
- ncr = LESS;
- }
- __ Mov(x10, Operand(Smi::FromInt(ncr)));
- __ Push(x10);
- }
-
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(native, JUMP_FUNCTION);
-
- __ Bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
- // Preserve caller-saved registers x0-x7 and x10-x15. We don't care if x8, x9,
- // ip0 and ip1 are corrupted by the call into C.
- CPURegList saved_regs = kCallerSaved;
- saved_regs.Remove(ip0);
- saved_regs.Remove(ip1);
- saved_regs.Remove(x8);
- saved_regs.Remove(x9);
-
- // We don't allow a GC during a store buffer overflow so there is no need to
- // store the registers in any particular way, but we do have to store and
- // restore them.
- __ PushCPURegList(saved_regs);
- if (save_doubles_ == kSaveFPRegs) {
- __ PushCPURegList(kCallerSavedFP);
- }
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ Mov(x0, Operand(ExternalReference::isolate_address(masm->isolate())));
- __ CallCFunction(
- ExternalReference::store_buffer_overflow_function(masm->isolate()),
- 1, 0);
-
- if (save_doubles_ == kSaveFPRegs) {
- __ PopCPURegList(kCallerSavedFP);
- }
- __ PopCPURegList(saved_regs);
- __ Ret();
-}
-
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
- Isolate* isolate) {
- StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode(isolate);
- StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode(isolate);
-}
-
-
-void MathPowStub::Generate(MacroAssembler* masm) {
- // Stack on entry:
- // jssp[0]: Exponent (as a tagged value).
- // jssp[1]: Base (as a tagged value).
- //
- // The (tagged) result will be returned in x0, as a heap number.
-
- Register result_tagged = x0;
- Register base_tagged = x10;
- Register exponent_tagged = x11;
- Register exponent_integer = x12;
- Register scratch1 = x14;
- Register scratch0 = x15;
- Register saved_lr = x19;
- FPRegister result_double = d0;
- FPRegister base_double = d0;
- FPRegister exponent_double = d1;
- FPRegister base_double_copy = d2;
- FPRegister scratch1_double = d6;
- FPRegister scratch0_double = d7;
-
- // A fast-path for integer exponents.
- Label exponent_is_smi, exponent_is_integer;
- // Bail out to runtime.
- Label call_runtime;
- // Allocate a heap number for the result, and return it.
- Label done;
-
- // Unpack the inputs.
- if (exponent_type_ == ON_STACK) {
- Label base_is_smi;
- Label unpack_exponent;
-
- __ Pop(exponent_tagged, base_tagged);
-
- __ JumpIfSmi(base_tagged, &base_is_smi);
- __ JumpIfNotHeapNumber(base_tagged, &call_runtime);
- // base_tagged is a heap number, so load its double value.
- __ Ldr(base_double, FieldMemOperand(base_tagged, HeapNumber::kValueOffset));
- __ B(&unpack_exponent);
- __ Bind(&base_is_smi);
- // base_tagged is a SMI, so untag it and convert it to a double.
- __ SmiUntagToDouble(base_double, base_tagged);
-
- __ Bind(&unpack_exponent);
- // x10 base_tagged The tagged base (input).
- // x11 exponent_tagged The tagged exponent (input).
- // d1 base_double The base as a double.
- __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
- __ JumpIfNotHeapNumber(exponent_tagged, &call_runtime);
- // exponent_tagged is a heap number, so load its double value.
- __ Ldr(exponent_double,
- FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
- } else if (exponent_type_ == TAGGED) {
- __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
- __ Ldr(exponent_double,
- FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
- }
-
- // Handle double (heap number) exponents.
- if (exponent_type_ != INTEGER) {
- // Detect integer exponents stored as doubles and handle those in the
- // integer fast-path.
- __ TryConvertDoubleToInt64(exponent_integer, exponent_double,
- scratch0_double, &exponent_is_integer);
-
- if (exponent_type_ == ON_STACK) {
- FPRegister half_double = d3;
- FPRegister minus_half_double = d4;
- FPRegister zero_double = d5;
- // Detect square root case. Crankshaft detects constant +/-0.5 at compile
- // time and uses DoMathPowHalf instead. We then skip this check for
- // non-constant cases of +/-0.5 as these hardly occur.
-
- __ Fmov(minus_half_double, -0.5);
- __ Fmov(half_double, 0.5);
- __ Fcmp(minus_half_double, exponent_double);
- __ Fccmp(half_double, exponent_double, NZFlag, ne);
- // Condition flags at this point:
- // 0.5; nZCv // Identified by eq && pl
- // -0.5: NZcv // Identified by eq && mi
- // other: ?z?? // Identified by ne
- __ B(ne, &call_runtime);
-
- // The exponent is 0.5 or -0.5.
-
- // Given that exponent is known to be either 0.5 or -0.5, the following
- // special cases could apply (according to ECMA-262 15.8.2.13):
- //
- // base.isNaN(): The result is NaN.
- // (base == +INFINITY) || (base == -INFINITY)
- // exponent == 0.5: The result is +INFINITY.
- // exponent == -0.5: The result is +0.
- // (base == +0) || (base == -0)
- // exponent == 0.5: The result is +0.
- // exponent == -0.5: The result is +INFINITY.
- // (base < 0) && base.isFinite(): The result is NaN.
- //
- // Fsqrt (and Fdiv for the -0.5 case) can handle all of those except
- // where base is -INFINITY or -0.
-
- // Add +0 to base. This has no effect other than turning -0 into +0.
- __ Fmov(zero_double, 0.0);
- __ Fadd(base_double, base_double, zero_double);
- // The operation -0+0 results in +0 in all cases except where the
- // FPCR rounding mode is 'round towards minus infinity' (RM). The
- // A64 simulator does not currently simulate FPCR (where the rounding
- // mode is set), so test the operation with some debug code.
- if (masm->emit_debug_code()) {
- Register temp = masm->Tmp1();
- // d5 zero_double The value +0.0 as a double.
- __ Fneg(scratch0_double, zero_double);
- // Verify that we correctly generated +0.0 and -0.0.
- // bits(+0.0) = 0x0000000000000000
- // bits(-0.0) = 0x8000000000000000
- __ Fmov(temp, zero_double);
- __ CheckRegisterIsClear(temp, kCouldNotGenerateZero);
- __ Fmov(temp, scratch0_double);
- __ Eor(temp, temp, kDSignMask);
- __ CheckRegisterIsClear(temp, kCouldNotGenerateNegativeZero);
- // Check that -0.0 + 0.0 == +0.0.
- __ Fadd(scratch0_double, scratch0_double, zero_double);
- __ Fmov(temp, scratch0_double);
- __ CheckRegisterIsClear(temp, kExpectedPositiveZero);
- }
-
- // If base is -INFINITY, make it +INFINITY.
- // * Calculate base - base: All infinities will become NaNs since both
- // -INFINITY+INFINITY and +INFINITY-INFINITY are NaN in A64.
- // * If the result is NaN, calculate abs(base).
- __ Fsub(scratch0_double, base_double, base_double);
- __ Fcmp(scratch0_double, 0.0);
- __ Fabs(scratch1_double, base_double);
- __ Fcsel(base_double, scratch1_double, base_double, vs);
-
- // Calculate the square root of base.
- __ Fsqrt(result_double, base_double);
- __ Fcmp(exponent_double, 0.0);
- __ B(ge, &done); // Finish now for exponents of 0.5.
- // Find the inverse for exponents of -0.5.
- __ Fmov(scratch0_double, 1.0);
- __ Fdiv(result_double, scratch0_double, result_double);
- __ B(&done);
- }
-
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ Mov(saved_lr, lr);
- __ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
- 0, 2);
- __ Mov(lr, saved_lr);
- __ B(&done);
- }
-
- // Handle SMI exponents.
- __ Bind(&exponent_is_smi);
- // x10 base_tagged The tagged base (input).
- // x11 exponent_tagged The tagged exponent (input).
- // d1 base_double The base as a double.
- __ SmiUntag(exponent_integer, exponent_tagged);
- }
-
- __ Bind(&exponent_is_integer);
- // x10 base_tagged The tagged base (input).
- // x11 exponent_tagged The tagged exponent (input).
- // x12 exponent_integer The exponent as an integer.
- // d1 base_double The base as a double.
-
- // Find abs(exponent). For negative exponents, we can find the inverse later.
- Register exponent_abs = x13;
- __ Cmp(exponent_integer, 0);
- __ Cneg(exponent_abs, exponent_integer, mi);
- // x13 exponent_abs The value of abs(exponent_integer).
-
- // Repeatedly multiply to calculate the power.
- // result = 1.0;
- // For each bit n (exponent_integer{n}) {
- // if (exponent_integer{n}) {
- // result *= base;
- // }
- // base *= base;
- // if (remaining bits in exponent_integer are all zero) {
- // break;
- // }
- // }
- Label power_loop, power_loop_entry, power_loop_exit;
- __ Fmov(scratch1_double, base_double);
- __ Fmov(base_double_copy, base_double);
- __ Fmov(result_double, 1.0);
- __ B(&power_loop_entry);
-
- __ Bind(&power_loop);
- __ Fmul(scratch1_double, scratch1_double, scratch1_double);
- __ Lsr(exponent_abs, exponent_abs, 1);
- __ Cbz(exponent_abs, &power_loop_exit);
-
- __ Bind(&power_loop_entry);
- __ Tbz(exponent_abs, 0, &power_loop);
- __ Fmul(result_double, result_double, scratch1_double);
- __ B(&power_loop);
-
- __ Bind(&power_loop_exit);
-
- // If the exponent was positive, result_double holds the result.
- __ Tbz(exponent_integer, kXSignBit, &done);
-
- // The exponent was negative, so find the inverse.
- __ Fmov(scratch0_double, 1.0);
- __ Fdiv(result_double, scratch0_double, result_double);
- // ECMA-262 only requires Math.pow to return an 'implementation-dependent
- // approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow
- // to calculate the subnormal value 2^-1074. This method of calculating
- // negative powers doesn't work because 2^1074 overflows to infinity. To
- // catch this corner-case, we bail out if the result was 0. (This can only
- // occur if the divisor is infinity or the base is zero.)
- __ Fcmp(result_double, 0.0);
- __ B(&done, ne);
-
- if (exponent_type_ == ON_STACK) {
- // Bail out to runtime code.
- __ Bind(&call_runtime);
- // Put the arguments back on the stack.
- __ Push(base_tagged, exponent_tagged);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
-
- // Return.
- __ Bind(&done);
- __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1);
- __ Str(result_double,
- FieldMemOperand(result_tagged, HeapNumber::kValueOffset));
- ASSERT(result_tagged.is(x0));
- __ IncrementCounter(
- masm->isolate()->counters()->math_pow(), 1, scratch0, scratch1);
- __ Ret();
- } else {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ Mov(saved_lr, lr);
- __ Fmov(base_double, base_double_copy);
- __ Scvtf(exponent_double, exponent_integer);
- __ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
- 0, 2);
- __ Mov(lr, saved_lr);
- __ Bind(&done);
- __ IncrementCounter(
- masm->isolate()->counters()->math_pow(), 1, scratch0, scratch1);
- __ Ret();
- }
-}
-
-
-void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
- // It is important that the following stubs are generated in this order
- // because pregenerated stubs can only call other pregenerated stubs.
- // RecordWriteStub uses StoreBufferOverflowStub, which in turn uses
- // CEntryStub.
- CEntryStub::GenerateAheadOfTime(isolate);
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
- StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
- ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
- CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
- BinaryOpICStub::GenerateAheadOfTime(isolate);
- BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
-}
-
-
-void CodeStub::GenerateFPStubs(Isolate* isolate) {
- // Floating-point code doesn't get special handling in A64, so there's
- // nothing to do here.
- USE(isolate);
-}
-
-
-static void JumpIfOOM(MacroAssembler* masm,
- Register value,
- Register scratch,
- Label* oom_label) {
- STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
- STATIC_ASSERT(kFailureTag == 3);
- __ And(scratch, value, 0xf);
- __ Cmp(scratch, 0xf);
- __ B(eq, oom_label);
-}
-
-
-bool CEntryStub::NeedsImmovableCode() {
- // CEntryStub stores the return address on the stack before calling into
- // C++ code. In some cases, the VM accesses this address, but it is not used
- // when the C++ code returns to the stub because LR holds the return address
- // in AAPCS64. If the stub is moved (perhaps during a GC), we could end up
- // returning to dead code.
- // TODO(jbramley): Whilst this is the only analysis that makes sense, I can't
- // find any comment to confirm this, and I don't hit any crashes whatever
- // this function returns. The anaylsis should be properly confirmed.
- return true;
-}
-
-
-void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
- CEntryStub stub(1, kDontSaveFPRegs);
- stub.GetCode(isolate);
- CEntryStub stub_fp(1, kSaveFPRegs);
- stub_fp.GetCode(isolate);
-}
-
-
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal,
- Label* throw_termination,
- Label* throw_out_of_memory,
- bool do_gc,
- bool always_allocate) {
- // x0 : Result parameter for PerformGC, if do_gc is true.
- // x21 : argv
- // x22 : argc
- // x23 : target
- //
- // The stack (on entry) holds the arguments and the receiver, with the
- // receiver at the highest address:
- //
- // argv[8]: receiver
- // argv -> argv[0]: arg[argc-2]
- // ... ...
- // argv[...]: arg[1]
- // argv[...]: arg[0]
- //
- // Immediately below (after) this is the exit frame, as constructed by
- // EnterExitFrame:
- // fp[8]: CallerPC (lr)
- // fp -> fp[0]: CallerFP (old fp)
- // fp[-8]: Space reserved for SPOffset.
- // fp[-16]: CodeObject()
- // csp[...]: Saved doubles, if saved_doubles is true.
- // csp[32]: Alignment padding, if necessary.
- // csp[24]: Preserved x23 (used for target).
- // csp[16]: Preserved x22 (used for argc).
- // csp[8]: Preserved x21 (used for argv).
- // csp -> csp[0]: Space reserved for the return address.
- //
- // After a successful call, the exit frame, preserved registers (x21-x23) and
- // the arguments (including the receiver) are dropped or popped as
- // appropriate. The stub then returns.
- //
- // After an unsuccessful call, the exit frame and suchlike are left
- // untouched, and the stub either throws an exception by jumping to one of
- // the provided throw_ labels, or it falls through. The failure details are
- // passed through in x0.
- ASSERT(csp.Is(__ StackPointer()));
-
- Isolate* isolate = masm->isolate();
-
- const Register& argv = x21;
- const Register& argc = x22;
- const Register& target = x23;
-
- if (do_gc) {
- // Call Runtime::PerformGC, passing x0 (the result parameter for
- // PerformGC) and x1 (the isolate).
- __ Mov(x1, Operand(ExternalReference::isolate_address(masm->isolate())));
- __ CallCFunction(
- ExternalReference::perform_gc_function(isolate), 2, 0);
- }
-
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth(isolate);
- if (always_allocate) {
- __ Mov(x10, Operand(scope_depth));
- __ Ldr(x11, MemOperand(x10));
- __ Add(x11, x11, 1);
- __ Str(x11, MemOperand(x10));
- }
-
- // Prepare AAPCS64 arguments to pass to the builtin.
- __ Mov(x0, argc);
- __ Mov(x1, argv);
- __ Mov(x2, Operand(ExternalReference::isolate_address(isolate)));
-
- // Store the return address on the stack, in the space previously allocated
- // by EnterExitFrame. The return address is queried by
- // ExitFrame::GetStateForFramePointer.
- Label return_location;
- __ Adr(x12, &return_location);
- __ Poke(x12, 0);
- if (__ emit_debug_code()) {
- // Verify that the slot below fp[kSPOffset]-8 points to the return location
- // (currently in x12).
- Register temp = masm->Tmp1();
- __ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset));
- __ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSizeInBytes)));
- __ Cmp(temp, x12);
- __ Check(eq, kReturnAddressNotFoundInFrame);
- }
-
- // Call the builtin.
- __ Blr(target);
- __ Bind(&return_location);
- const Register& result = x0;
-
- if (always_allocate) {
- __ Mov(x10, Operand(scope_depth));
- __ Ldr(x11, MemOperand(x10));
- __ Sub(x11, x11, 1);
- __ Str(x11, MemOperand(x10));
- }
-
- // x0 result The return code from the call.
- // x21 argv
- // x22 argc
- // x23 target
- //
- // If all of the result bits matching kFailureTagMask are '1', the result is
- // a failure. Otherwise, it's an ordinary tagged object and the call was a
- // success.
- Label failure;
- __ And(x10, result, kFailureTagMask);
- __ Cmp(x10, kFailureTagMask);
- __ B(&failure, eq);
-
- // The call succeeded, so unwind the stack and return.
-
- // Restore callee-saved registers x21-x23.
- __ Mov(x11, argc);
-
- __ Peek(argv, 1 * kPointerSize);
- __ Peek(argc, 2 * kPointerSize);
- __ Peek(target, 3 * kPointerSize);
-
- __ LeaveExitFrame(save_doubles_, x10, true);
- ASSERT(jssp.Is(__ StackPointer()));
- // Pop or drop the remaining stack slots and return from the stub.
- // jssp[24]: Arguments array (of size argc), including receiver.
- // jssp[16]: Preserved x23 (used for target).
- // jssp[8]: Preserved x22 (used for argc).
- // jssp[0]: Preserved x21 (used for argv).
- __ Drop(x11);
- __ Ret();
-
- // The stack pointer is still csp if we aren't returning, and the frame
- // hasn't changed (except for the return address).
- __ SetStackPointer(csp);
-
- __ Bind(&failure);
- // The call failed, so check if we need to throw an exception, and fall
- // through (to retry) otherwise.
-
- Label retry;
- // x0 result The return code from the call, including the failure
- // code and details.
- // x21 argv
- // x22 argc
- // x23 target
- // Refer to the Failure class for details of the bit layout.
- STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ Tst(result, kFailureTypeTagMask << kFailureTagSize);
- __ B(eq, &retry); // RETRY_AFTER_GC
-
- // Special handling of out-of-memory exceptions: Pass the failure result,
- // rather than the exception descriptor.
- JumpIfOOM(masm, result, x10, throw_out_of_memory);
-
- // Retrieve the pending exception.
- const Register& exception = result;
- const Register& exception_address = x11;
- __ Mov(exception_address,
- Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ Ldr(exception, MemOperand(exception_address));
-
- // See if we just retrieved an OOM exception.
- JumpIfOOM(masm, exception, x10, throw_out_of_memory);
-
- // Clear the pending exception.
- __ Mov(x10, Operand(isolate->factory()->the_hole_value()));
- __ Str(x10, MemOperand(exception_address));
-
- // x0 exception The exception descriptor.
- // x21 argv
- // x22 argc
- // x23 target
-
- // Special handling of termination exceptions, which are uncatchable by
- // JavaScript code.
- __ Cmp(exception, Operand(isolate->factory()->termination_exception()));
- __ B(eq, throw_termination);
-
- // Handle normal exception.
- __ B(throw_normal);
-
- __ Bind(&retry);
- // The result (x0) is passed through as the next PerformGC parameter.
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // The Abort mechanism relies on CallRuntime, which in turn relies on
- // CEntryStub, so until this stub has been generated, we have to use a
- // fall-back Abort mechanism.
- //
- // Note that this stub must be generated before any use of Abort.
- MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
-
- ASM_LOCATION("CEntryStub::Generate entry");
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
- // Register parameters:
- // x0: argc (including receiver, untagged)
- // x1: target
- //
- // The stack on entry holds the arguments and the receiver, with the receiver
- // at the highest address:
- //
- // jssp]argc-1]: receiver
- // jssp[argc-2]: arg[argc-2]
- // ... ...
- // jssp[1]: arg[1]
- // jssp[0]: arg[0]
- //
- // The arguments are in reverse order, so that arg[argc-2] is actually the
- // first argument to the target function and arg[0] is the last.
- ASSERT(jssp.Is(__ StackPointer()));
- const Register& argc_input = x0;
- const Register& target_input = x1;
-
- // Calculate argv, argc and the target address, and store them in
- // callee-saved registers so we can retry the call without having to reload
- // these arguments.
- // TODO(jbramley): If the first call attempt succeeds in the common case (as
- // it should), then we might be better off putting these parameters directly
- // into their argument registers, rather than using callee-saved registers and
- // preserving them on the stack.
- const Register& argv = x21;
- const Register& argc = x22;
- const Register& target = x23;
-
- // Derive argv from the stack pointer so that it points to the first argument
- // (arg[argc-2]), or just below the receiver in case there are no arguments.
- // - Adjust for the arg[] array.
- Register temp_argv = x11;
- __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2));
- // - Adjust for the receiver.
- __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
-
- // Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved
- // registers.
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(save_doubles_, x10, 3);
- ASSERT(csp.Is(__ StackPointer()));
-
- // Poke callee-saved registers into reserved space.
- __ Poke(argv, 1 * kPointerSize);
- __ Poke(argc, 2 * kPointerSize);
- __ Poke(target, 3 * kPointerSize);
-
- // We normally only keep tagged values in callee-saved registers, as they
- // could be pushed onto the stack by called stubs and functions, and on the
- // stack they can confuse the GC. However, we're only calling C functions
- // which can push arbitrary data onto the stack anyway, and so the GC won't
- // examine that part of the stack.
- __ Mov(argc, argc_input);
- __ Mov(target, target_input);
- __ Mov(argv, temp_argv);
-
- Label throw_normal;
- Label throw_termination;
- Label throw_out_of_memory;
-
- // Call the runtime function.
- GenerateCore(masm,
- &throw_normal,
- &throw_termination,
- &throw_out_of_memory,
- false,
- false);
-
- // If successful, the previous GenerateCore will have returned to the
- // calling code. Otherwise, we fall through into the following.
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal,
- &throw_termination,
- &throw_out_of_memory,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- __ Mov(x0, reinterpret_cast<uint64_t>(Failure::InternalError()));
- GenerateCore(masm,
- &throw_normal,
- &throw_termination,
- &throw_out_of_memory,
- true,
- true);
-
- // We didn't execute a return case, so the stack frame hasn't been updated
- // (except for the return address slot). However, we don't need to initialize
- // jssp because the throw method will immediately overwrite it when it
- // unwinds the stack.
- if (__ emit_debug_code()) {
- __ Mov(jssp, kDebugZapValue);
- }
- __ SetStackPointer(jssp);
-
- // Throw exceptions.
- // If we throw an exception, we can end up re-entering CEntryStub before we
- // pop the exit frame, so need to ensure that x21-x23 contain GC-safe values
- // here.
- __ Bind(&throw_out_of_memory);
- ASM_LOCATION("Throw out of memory");
- __ Mov(argv, 0);
- __ Mov(argc, 0);
- __ Mov(target, 0);
- // Set external caught exception to false.
- Isolate* isolate = masm->isolate();
- __ Mov(x2, Operand(ExternalReference(Isolate::kExternalCaughtExceptionAddress,
- isolate)));
- __ Str(xzr, MemOperand(x2));
-
- // Set pending exception and x0 to out of memory exception.
- Label already_have_failure;
- JumpIfOOM(masm, x0, x10, &already_have_failure);
- Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
- __ Mov(x0, Operand(reinterpret_cast<uint64_t>(out_of_memory)));
- __ Bind(&already_have_failure);
- __ Mov(x2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ Str(x0, MemOperand(x2));
- // Fall through to the next label.
-
- __ Bind(&throw_termination);
- ASM_LOCATION("Throw termination");
- __ Mov(argv, 0);
- __ Mov(argc, 0);
- __ Mov(target, 0);
- __ ThrowUncatchable(x0, x10, x11, x12, x13);
-
- __ Bind(&throw_normal);
- ASM_LOCATION("Throw normal");
- __ Mov(argv, 0);
- __ Mov(argc, 0);
- __ Mov(target, 0);
- __ Throw(x0, x10, x11, x12, x13);
-}
-
-
-// This is the entry point from C++. 5 arguments are provided in x0-x4.
-// See use of the CALL_GENERATED_CODE macro for example in src/execution.cc.
-// Input:
-// x0: code entry.
-// x1: function.
-// x2: receiver.
-// x3: argc.
-// x4: argv.
-// Output:
-// x0: result.
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- ASSERT(jssp.Is(__ StackPointer()));
- Register code_entry = x0;
-
- // Enable instruction instrumentation. This only works on the simulator, and
- // will have no effect on the model or real hardware.
- __ EnableInstrumentation();
-
- Label invoke, handler_entry, exit;
-
- // Push callee-saved registers and synchronize the system stack pointer (csp)
- // and the JavaScript stack pointer (jssp).
- //
- // We must not write to jssp until after the PushCalleeSavedRegisters()
- // call, since jssp is itself a callee-saved register.
- __ SetStackPointer(csp);
- __ PushCalleeSavedRegisters();
- __ Mov(jssp, csp);
- __ SetStackPointer(jssp);
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
- // Build an entry frame (see layout below).
- Isolate* isolate = masm->isolate();
-
- // Build an entry frame.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used.
- __ Mov(x13, bad_frame_pointer);
- __ Mov(x12, Operand(Smi::FromInt(marker)));
- __ Mov(x11, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
- __ Ldr(x10, MemOperand(x11));
-
- // TODO(all): Pushing the marker twice seems unnecessary.
- // In this case perhaps we could push xzr in the slot for the context
- // (see MAsm::EnterFrame).
- __ Push(x13, x12, x12, x10);
- // Set up fp.
- __ Sub(fp, jssp, EntryFrameConstants::kCallerFPOffset);
-
- // Push the JS entry frame marker. Also set js_entry_sp if this is the
- // outermost JS call.
- Label non_outermost_js, done;
- ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
- __ Mov(x10, Operand(ExternalReference(js_entry_sp)));
- __ Ldr(x11, MemOperand(x10));
- __ Cbnz(x11, &non_outermost_js);
- __ Str(fp, MemOperand(x10));
- __ Mov(x12, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
- __ Push(x12);
- __ B(&done);
- __ Bind(&non_outermost_js);
- // We spare one instruction by pushing xzr since the marker is 0.
- ASSERT(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL);
- __ Push(xzr);
- __ Bind(&done);
-
- // The frame set up looks like this:
- // jssp[0] : JS entry frame marker.
- // jssp[1] : C entry FP.
- // jssp[2] : stack frame marker.
- // jssp[3] : stack frmae marker.
- // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
-
-
- // Jump to a faked try block that does the invoke, with a faked catch
- // block that sets the pending exception.
- __ B(&invoke);
-
- // Prevent the constant pool from being emitted between the record of the
- // handler_entry position and the first instruction of the sequence here.
- // There is no risk because Assembler::Emit() emits the instruction before
- // checking for constant pool emission, but we do not want to depend on
- // that.
- {
- Assembler::BlockConstPoolScope block_const_pool(masm);
- __ bind(&handler_entry);
- handler_offset_ = handler_entry.pos();
- // Caught exception: Store result (exception) in the pending exception
- // field in the JSEnv and return a failure sentinel. Coming in here the
- // fp will be invalid because the PushTryHandler below sets it to 0 to
- // signal the existence of the JSEntry frame.
- // TODO(jbramley): Do this in the Assembler.
- __ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- }
- __ Str(code_entry, MemOperand(x10));
- __ Mov(x0, Operand(reinterpret_cast<int64_t>(Failure::Exception())));
- __ B(&exit);
-
- // Invoke: Link this frame into the handler chain. There's only one
- // handler block in this code object, so its index is 0.
- __ Bind(&invoke);
- __ PushTryHandler(StackHandler::JS_ENTRY, 0);
- // If an exception not caught by another handler occurs, this handler
- // returns control to the code after the B(&invoke) above, which
- // restores all callee-saved registers (including cp and fp) to their
- // saved values before returning a failure to C.
-
- // Clear any pending exceptions.
- __ Mov(x10, Operand(isolate->factory()->the_hole_value()));
- __ Mov(x11, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ Str(x10, MemOperand(x11));
-
- // Invoke the function by calling through the JS entry trampoline builtin.
- // Notice that we cannot store a reference to the trampoline code directly in
- // this stub, because runtime stubs are not traversed when doing GC.
-
- // Expected registers by Builtins::JSEntryTrampoline
- // x0: code entry.
- // x1: function.
- // x2: receiver.
- // x3: argc.
- // x4: argv.
- // TODO(jbramley): The latest ARM code checks is_construct and conditionally
- // uses construct_entry. We probably need to do the same here.
- ExternalReference entry(is_construct ? Builtins::kJSConstructEntryTrampoline
- : Builtins::kJSEntryTrampoline,
- isolate);
- __ Mov(x10, Operand(entry));
-
- // Call the JSEntryTrampoline.
- __ Ldr(x11, MemOperand(x10)); // Dereference the address.
- __ Add(x12, x11, Code::kHeaderSize - kHeapObjectTag);
- __ Blr(x12);
-
- // Unlink this frame from the handler chain.
- __ PopTryHandler();
-
-
- __ Bind(&exit);
- // x0 holds the result.
- // The stack pointer points to the top of the entry frame pushed on entry from
- // C++ (at the beginning of this stub):
- // jssp[0] : JS entry frame marker.
- // jssp[1] : C entry FP.
- // jssp[2] : stack frame marker.
- // jssp[3] : stack frmae marker.
- // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
-
- // Check if the current stack frame is marked as the outermost JS frame.
- Label non_outermost_js_2;
- __ Pop(x10);
- __ Cmp(x10, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
- __ B(ne, &non_outermost_js_2);
- __ Mov(x11, Operand(ExternalReference(js_entry_sp)));
- __ Str(xzr, MemOperand(x11));
- __ Bind(&non_outermost_js_2);
-
- // Restore the top frame descriptors from the stack.
- __ Pop(x10);
- __ Mov(x11, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
- __ Str(x10, MemOperand(x11));
-
- // Reset the stack to the callee saved registers.
- __ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes);
- // Restore the callee-saved registers and return.
- ASSERT(jssp.Is(__ StackPointer()));
- __ Mov(csp, jssp);
- __ SetStackPointer(csp);
- __ PopCalleeSavedRegisters();
- // After this point, we must not modify jssp because it is a callee-saved
- // register which we have just restored.
- __ Ret();
-}
-
-
-void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- x1 : receiver
- // -- x0 : key
- // -----------------------------------
- Register key = x0;
- receiver = x1;
- __ Cmp(key, Operand(masm->isolate()->factory()->prototype_string()));
- __ B(ne, &miss);
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- x2 : name
- // -- x0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- receiver = x0;
- }
-
- StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10, x11, &miss);
-
- __ Bind(&miss);
- StubCompiler::TailCallBuiltin(masm,
- BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
-void StringLengthStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- x1 : receiver
- // -- x0 : key
- // -----------------------------------
- Register key = x0;
- receiver = x1;
- __ Cmp(key, Operand(masm->isolate()->factory()->length_string()));
- __ B(ne, &miss);
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- x2 : name
- // -- x0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- receiver = x0;
- }
-
- StubCompiler::GenerateLoadStringLength(masm, receiver, x10, x11, &miss);
-
- __ Bind(&miss);
- StubCompiler::TailCallBuiltin(masm,
- BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
-void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
- ASM_LOCATION("StoreArrayLengthStub::Generate");
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
- Label miss;
-
- Register receiver;
- Register value;
- if (kind() == Code::KEYED_STORE_IC) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- x2 : receiver
- // -- x1 : key
- // -- x0 : value
- // -----------------------------------
- Register key = x1;
- receiver = x2;
- value = x0;
- __ Cmp(key, Operand(masm->isolate()->factory()->length_string()));
- __ B(ne, &miss);
- } else {
- ASSERT(kind() == Code::STORE_IC);
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- x2 : key
- // -- x1 : receiver
- // -- x0 : value
- // -----------------------------------
- receiver = x1;
- value = x0;
- }
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ CompareObjectType(receiver, x10, x11, JS_ARRAY_TYPE);
- __ B(ne, &miss);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ Ldr(x10, FieldMemOperand(receiver, JSArray::kElementsOffset));
- __ CompareObjectType(x10, x11, x12, FIXED_ARRAY_TYPE);
- __ B(ne, &miss);
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ Ldr(x10, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
- __ Ldr(x10, FieldMemOperand(x10, FixedArray::kMapOffset));
- __ CompareRoot(x10, Heap::kHashTableMapRootIndex);
- __ B(eq, &miss);
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ Push(receiver, value);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ Bind(&miss);
- StubCompiler::TailCallBuiltin(masm,
- BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Stack on entry:
- // jssp[0]: function.
- // jssp[8]: object.
- //
- // Returns result in x0. Zero indicates instanceof, smi 1 indicates not
- // instanceof.
-
- Register result = x0;
- Register function = right();
- Register object = left();
- Register scratch1 = x6;
- Register scratch2 = x7;
- Register res_true = x8;
- Register res_false = x9;
- // Only used if there was an inline map check site. (See
- // LCodeGen::DoInstanceOfKnownGlobal().)
- Register map_check_site = x4;
- // Delta for the instructions generated between the inline map check and the
- // instruction setting the result.
- const int32_t kDeltaToLoadBoolResult = 4 * kInstructionSize;
-
- Label not_js_object, slow;
-
- if (!HasArgsInRegisters()) {
- __ Pop(function, object);
- }
-
- if (ReturnTrueFalseObject()) {
- __ LoadTrueFalseRoots(res_true, res_false);
- } else {
- // This is counter-intuitive, but correct.
- __ Mov(res_true, Operand(Smi::FromInt(0)));
- __ Mov(res_false, Operand(Smi::FromInt(1)));
- }
-
- // Check that the left hand side is a JS object and load its map as a side
- // effect.
- Register map = x12;
- __ JumpIfSmi(object, &not_js_object);
- __ IsObjectJSObjectType(object, map, scratch2, &not_js_object);
-
- // If there is a call site cache, don't look in the global cache, but do the
- // real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck()) {
- Label miss;
- __ JumpIfNotRoot(function, Heap::kInstanceofCacheFunctionRootIndex, &miss);
- __ JumpIfNotRoot(map, Heap::kInstanceofCacheMapRootIndex, &miss);
- __ LoadRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
- __ Ret();
- __ Bind(&miss);
- }
-
- // Get the prototype of the function.
- Register prototype = x13;
- __ TryGetFunctionPrototype(function, prototype, scratch2, &slow,
- MacroAssembler::kMissOnBoundFunction);
-
- // Check that the function prototype is a JS object.
- __ JumpIfSmi(prototype, &slow);
- __ IsObjectJSObjectType(prototype, scratch1, scratch2, &slow);
-
- // Update the global instanceof or call site inlined cache with the current
- // map and function. The cached answer will be set when it is known below.
- if (HasCallSiteInlineCheck()) {
- // Patch the (relocated) inlined map check.
- __ GetRelocatedValueLocation(map_check_site, scratch1);
- // We have a cell, so need another level of dereferencing.
- __ Ldr(scratch1, MemOperand(scratch1));
- __ Str(map, FieldMemOperand(scratch1, Cell::kValueOffset));
- } else {
- __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
- }
-
- Label return_true, return_result;
- {
- // Loop through the prototype chain looking for the function prototype.
- Register chain_map = x1;
- Register chain_prototype = x14;
- Register null_value = x15;
- Label loop;
- __ Ldr(chain_prototype, FieldMemOperand(map, Map::kPrototypeOffset));
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- // Speculatively set a result.
- __ Mov(result, res_false);
-
- __ Bind(&loop);
-
- // If the chain prototype is the object prototype, return true.
- __ Cmp(chain_prototype, prototype);
- __ B(eq, &return_true);
-
- // If the chain prototype is null, we've reached the end of the chain, so
- // return false.
- __ Cmp(chain_prototype, null_value);
- __ B(eq, &return_result);
-
- // Otherwise, load the next prototype in the chain, and loop.
- __ Ldr(chain_map, FieldMemOperand(chain_prototype, HeapObject::kMapOffset));
- __ Ldr(chain_prototype, FieldMemOperand(chain_map, Map::kPrototypeOffset));
- __ B(&loop);
- }
-
- // Return sequence when no arguments are on the stack.
- // We cannot fall through to here.
- __ Bind(&return_true);
- __ Mov(result, res_true);
- __ Bind(&return_result);
- if (HasCallSiteInlineCheck()) {
- ASSERT(ReturnTrueFalseObject());
- __ Add(map_check_site, map_check_site, kDeltaToLoadBoolResult);
- __ GetRelocatedValueLocation(map_check_site, scratch2);
- __ Str(result, MemOperand(scratch2));
- } else {
- __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
- }
- __ Ret();
-
- Label object_not_null, object_not_null_or_smi;
-
- __ Bind(&not_js_object);
- Register object_type = x14;
- // x0 result result return register (uninit)
- // x10 function pointer to function
- // x11 object pointer to object
- // x14 object_type type of object (uninit)
-
- // Before null, smi and string checks, check that the rhs is a function.
- // For a non-function rhs, an exception must be thrown.
- __ JumpIfSmi(function, &slow);
- __ JumpIfNotObjectType(
- function, scratch1, object_type, JS_FUNCTION_TYPE, &slow);
-
- __ Mov(result, res_false);
-
- // Null is not instance of anything.
- __ Cmp(object_type, Operand(masm->isolate()->factory()->null_value()));
- __ B(ne, &object_not_null);
- __ Ret();
-
- __ Bind(&object_not_null);
- // Smi values are not instances of anything.
- __ JumpIfNotSmi(object, &object_not_null_or_smi);
- __ Ret();
-
- __ Bind(&object_not_null_or_smi);
- // String values are not instances of anything.
- __ IsObjectJSStringType(object, scratch2, &slow);
- __ Ret();
-
- // Slow-case. Tail call builtin.
- __ Bind(&slow);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Arguments have either been passed into registers or have been previously
- // popped. We need to push them before calling builtin.
- __ Push(object, function);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- }
- if (ReturnTrueFalseObject()) {
- // Reload true/false because they were clobbered in the builtin call.
- __ LoadTrueFalseRoots(res_true, res_false);
- __ Cmp(result, 0);
- __ Csel(result, res_true, res_false, eq);
- }
- __ Ret();
-}
-
-
-Register InstanceofStub::left() {
- // Object to check (instanceof lhs).
- return x11;
-}
-
-
-Register InstanceofStub::right() {
- // Constructor function (instanceof rhs).
- return x10;
-}
-
-
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- Register arg_count = x0;
- Register key = x1;
-
- // The displacement is the offset of the last parameter (if any) relative
- // to the frame pointer.
- static const int kDisplacement =
- StandardFrameConstants::kCallerSPOffset - kPointerSize;
-
- // Check that the key is a smi.
- Label slow;
- __ JumpIfNotSmi(key, &slow);
-
- // Check if the calling frame is an arguments adaptor frame.
- Register local_fp = x11;
- Register caller_fp = x11;
- Register caller_ctx = x12;
- Label skip_adaptor;
- __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(caller_ctx, MemOperand(caller_fp,
- StandardFrameConstants::kContextOffset));
- __ Cmp(caller_ctx, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ Csel(local_fp, fp, caller_fp, ne);
- __ B(ne, &skip_adaptor);
-
- // Load the actual arguments limit found in the arguments adaptor frame.
- __ Ldr(arg_count, MemOperand(caller_fp,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ Bind(&skip_adaptor);
-
- // Check index against formal parameters count limit. Use unsigned comparison
- // to get negative check for free: branch if key < 0 or key >= arg_count.
- __ Cmp(key, arg_count);
- __ B(hs, &slow);
-
- // Read the argument from the stack and return it.
- __ Sub(x10, arg_count, key);
- __ Add(x10, local_fp, Operand::UntagSmiAndScale(x10, kPointerSizeLog2));
- __ Ldr(x0, MemOperand(x10, kDisplacement));
- __ Ret();
-
- // Slow case: handle non-smi or out-of-bounds access to arguments by calling
- // the runtime system.
- __ Bind(&slow);
- __ Push(key);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
- // Stack layout on entry.
- // jssp[0]: number of parameters (tagged)
- // jssp[8]: address of receiver argument
- // jssp[16]: function
-
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- Register caller_fp = x10;
- __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- // Load and untag the context.
- STATIC_ASSERT((kSmiShift / kBitsPerByte) == 4);
- __ Ldr(w11, MemOperand(caller_fp, StandardFrameConstants::kContextOffset +
- (kSmiShift / kBitsPerByte)));
- __ Cmp(w11, StackFrame::ARGUMENTS_ADAPTOR);
- __ B(ne, &runtime);
-
- // Patch the arguments.length and parameters pointer in the current frame.
- __ Ldr(x11, MemOperand(caller_fp,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ Poke(x11, 0 * kXRegSizeInBytes);
- __ Add(x10, caller_fp, Operand::UntagSmiAndScale(x11, kPointerSizeLog2));
- __ Add(x10, x10, Operand(StandardFrameConstants::kCallerSPOffset));
- __ Poke(x10, 1 * kXRegSizeInBytes);
-
- __ Bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
- // Stack layout on entry.
- // jssp[0]: number of parameters (tagged)
- // jssp[8]: address of receiver argument
- // jssp[16]: function
- //
- // Returns pointer to result object in x0.
-
- // Note: arg_count_smi is an alias of param_count_smi.
- Register arg_count_smi = x3;
- Register param_count_smi = x3;
- Register param_count = x7;
- Register recv_arg = x14;
- Register function = x4;
- __ Pop(param_count_smi, recv_arg, function);
- __ SmiUntag(param_count, param_count_smi);
-
- // Check if the calling frame is an arguments adaptor frame.
- Register caller_fp = x11;
- Register caller_ctx = x12;
- Label runtime;
- Label adaptor_frame, try_allocate;
- __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(caller_ctx, MemOperand(caller_fp,
- StandardFrameConstants::kContextOffset));
- __ Cmp(caller_ctx, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ B(eq, &adaptor_frame);
-
- // No adaptor, parameter count = argument count.
-
- // x1 mapped_params number of mapped params, min(params, args) (uninit)
- // x2 arg_count number of function arguments (uninit)
- // x3 arg_count_smi number of function arguments (smi)
- // x4 function function pointer
- // x7 param_count number of function parameters
- // x11 caller_fp caller's frame pointer
- // x14 recv_arg pointer to receiver arguments
-
- Register arg_count = x2;
- __ Mov(arg_count, param_count);
- __ B(&try_allocate);
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ Bind(&adaptor_frame);
- __ Ldr(arg_count_smi,
- MemOperand(caller_fp,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(arg_count, arg_count_smi);
- __ Add(x10, caller_fp, Operand(arg_count, LSL, kPointerSizeLog2));
- __ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset);
-
- // Compute the mapped parameter count = min(param_count, arg_count)
- Register mapped_params = x1;
- __ Cmp(param_count, arg_count);
- __ Csel(mapped_params, param_count, arg_count, lt);
-
- __ Bind(&try_allocate);
-
- // x0 alloc_obj pointer to allocated objects: param map, backing
- // store, arguments (uninit)
- // x1 mapped_params number of mapped parameters, min(params, args)
- // x2 arg_count number of function arguments
- // x3 arg_count_smi number of function arguments (smi)
- // x4 function function pointer
- // x7 param_count number of function parameters
- // x10 size size of objects to allocate (uninit)
- // x14 recv_arg pointer to receiver arguments
-
- // Compute the size of backing store, parameter map, and arguments object.
- // 1. Parameter map, has two extra words containing context and backing
- // store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
-
- // Calculate the parameter map size, assuming it exists.
- Register size = x10;
- __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2));
- __ Add(size, size, kParameterMapHeaderSize);
-
- // If there are no mapped parameters, set the running size total to zero.
- // Otherwise, use the parameter map size calculated earlier.
- __ Cmp(mapped_params, 0);
- __ CzeroX(size, eq);
-
- // 2. Add the size of the backing store and arguments object.
- __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2));
- __ Add(size, size, FixedArray::kHeaderSize + Heap::kArgumentsObjectSize);
-
- // Do the allocation of all three objects in one go. Assign this to x0, as it
- // will be returned to the caller.
- Register alloc_obj = x0;
- __ Allocate(size, alloc_obj, x11, x12, &runtime, TAG_OBJECT);
-
- // Get the arguments boilerplate from the current (global) context.
-
- // x0 alloc_obj pointer to allocated objects (param map, backing
- // store, arguments)
- // x1 mapped_params number of mapped parameters, min(params, args)
- // x2 arg_count number of function arguments
- // x3 arg_count_smi number of function arguments (smi)
- // x4 function function pointer
- // x7 param_count number of function parameters
- // x11 args_offset offset to args (or aliased args) boilerplate (uninit)
- // x14 recv_arg pointer to receiver arguments
-
- Register global_object = x10;
- Register global_ctx = x10;
- Register args_offset = x11;
- Register aliased_args_offset = x10;
- __ Ldr(global_object, GlobalObjectMemOperand());
- __ Ldr(global_ctx, FieldMemOperand(global_object,
- GlobalObject::kNativeContextOffset));
-
- __ Ldr(args_offset, ContextMemOperand(global_ctx,
- Context::ARGUMENTS_BOILERPLATE_INDEX));
- __ Ldr(aliased_args_offset,
- ContextMemOperand(global_ctx,
- Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX));
- __ Cmp(mapped_params, 0);
- __ CmovX(args_offset, aliased_args_offset, ne);
-
- // Copy the JS object part.
- __ CopyFields(alloc_obj, args_offset, CPURegList(x10, x12, x13),
- JSObject::kHeaderSize / kPointerSize);
-
- // Set up the callee in-object property.
- STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- const int kCalleeOffset = JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize;
- __ Str(function, FieldMemOperand(alloc_obj, kCalleeOffset));
-
- // Use the length and set that as an in-object property.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- const int kLengthOffset = JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize;
- __ Str(arg_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, "elements" will point there, otherwise
- // it will point to the backing store.
-
- // x0 alloc_obj pointer to allocated objects (param map, backing
- // store, arguments)
- // x1 mapped_params number of mapped parameters, min(params, args)
- // x2 arg_count number of function arguments
- // x3 arg_count_smi number of function arguments (smi)
- // x4 function function pointer
- // x5 elements pointer to parameter map or backing store (uninit)
- // x6 backing_store pointer to backing store (uninit)
- // x7 param_count number of function parameters
- // x14 recv_arg pointer to receiver arguments
-
- Register elements = x5;
- __ Add(elements, alloc_obj, Heap::kArgumentsObjectSize);
- __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
-
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- __ Cmp(mapped_params, 0);
- // Set up backing store address, because it is needed later for filling in
- // the unmapped arguments.
- Register backing_store = x6;
- __ CmovX(backing_store, elements, eq);
- __ B(eq, &skip_parameter_map);
-
- __ LoadRoot(x10, Heap::kNonStrictArgumentsElementsMapRootIndex);
- __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
- __ Add(x10, mapped_params, 2);
- __ SmiTag(x10);
- __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Str(cp, FieldMemOperand(elements,
- FixedArray::kHeaderSize + 0 * kPointerSize));
- __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2));
- __ Add(x10, x10, kParameterMapHeaderSize);
- __ Str(x10, FieldMemOperand(elements,
- FixedArray::kHeaderSize + 1 * kPointerSize));
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. Then index the context,
- // where parameters are stored in reverse order, at:
- //
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS + parameter_count - 1
- //
- // The mapped parameter thus needs to get indices:
- //
- // MIN_CONTEXT_SLOTS + parameter_count - 1 ..
- // MIN_CONTEXT_SLOTS + parameter_count - mapped_parameter_count
- //
- // We loop from right to left.
-
- // x0 alloc_obj pointer to allocated objects (param map, backing
- // store, arguments)
- // x1 mapped_params number of mapped parameters, min(params, args)
- // x2 arg_count number of function arguments
- // x3 arg_count_smi number of function arguments (smi)
- // x4 function function pointer
- // x5 elements pointer to parameter map or backing store (uninit)
- // x6 backing_store pointer to backing store (uninit)
- // x7 param_count number of function parameters
- // x11 loop_count parameter loop counter (uninit)
- // x12 index parameter index (smi, uninit)
- // x13 the_hole hole value (uninit)
- // x14 recv_arg pointer to receiver arguments
-
- Register loop_count = x11;
- Register index = x12;
- Register the_hole = x13;
- Label parameters_loop, parameters_test;
- __ Mov(loop_count, mapped_params);
- __ Add(index, param_count, static_cast<int>(Context::MIN_CONTEXT_SLOTS));
- __ Sub(index, index, mapped_params);
- __ SmiTag(index);
- __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
- __ Add(backing_store, elements, Operand(loop_count, LSL, kPointerSizeLog2));
- __ Add(backing_store, backing_store, kParameterMapHeaderSize);
-
- __ B(&parameters_test);
-
- __ Bind(&parameters_loop);
- __ Sub(loop_count, loop_count, 1);
- __ Mov(x10, Operand(loop_count, LSL, kPointerSizeLog2));
- __ Add(x10, x10, kParameterMapHeaderSize - kHeapObjectTag);
- __ Str(index, MemOperand(elements, x10));
- __ Sub(x10, x10, kParameterMapHeaderSize - FixedArray::kHeaderSize);
- __ Str(the_hole, MemOperand(backing_store, x10));
- __ Add(index, index, Operand(Smi::FromInt(1)));
- __ Bind(&parameters_test);
- __ Cbnz(loop_count, &parameters_loop);
-
- __ Bind(&skip_parameter_map);
- // Copy arguments header and remaining slots (if there are any.)
- __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
- __ Str(x10, FieldMemOperand(backing_store, FixedArray::kMapOffset));
- __ Str(arg_count_smi, FieldMemOperand(backing_store,
- FixedArray::kLengthOffset));
-
- // x0 alloc_obj pointer to allocated objects (param map, backing
- // store, arguments)
- // x1 mapped_params number of mapped parameters, min(params, args)
- // x2 arg_count number of function arguments
- // x4 function function pointer
- // x3 arg_count_smi number of function arguments (smi)
- // x6 backing_store pointer to backing store (uninit)
- // x14 recv_arg pointer to receiver arguments
-
- Label arguments_loop, arguments_test;
- __ Mov(x10, mapped_params);
- __ Sub(recv_arg, recv_arg, Operand(x10, LSL, kPointerSizeLog2));
- __ B(&arguments_test);
-
- __ Bind(&arguments_loop);
- __ Sub(recv_arg, recv_arg, kPointerSize);
- __ Ldr(x11, MemOperand(recv_arg));
- __ Add(x12, backing_store, Operand(x10, LSL, kPointerSizeLog2));
- __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
- __ Add(x10, x10, 1);
-
- __ Bind(&arguments_test);
- __ Cmp(x10, arg_count);
- __ B(lt, &arguments_loop);
-
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- __ Bind(&runtime);
- __ Push(function, recv_arg, arg_count_smi);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // Stack layout on entry.
- // jssp[0]: number of parameters (tagged)
- // jssp[8]: address of receiver argument
- // jssp[16]: function
- //
- // Returns pointer to result object in x0.
-
- // Get the stub arguments from the frame, and make an untagged copy of the
- // parameter count.
- Register param_count_smi = x1;
- Register params = x2;
- Register function = x3;
- Register param_count = x13;
- __ Pop(param_count_smi, params, function);
- __ SmiUntag(param_count, param_count_smi);
-
- // Test if arguments adaptor needed.
- Register caller_fp = x11;
- Register caller_ctx = x12;
- Label try_allocate, runtime;
- __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(caller_ctx, MemOperand(caller_fp,
- StandardFrameConstants::kContextOffset));
- __ Cmp(caller_ctx, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ B(ne, &try_allocate);
-
- // x1 param_count_smi number of parameters passed to function (smi)
- // x2 params pointer to parameters
- // x3 function function pointer
- // x11 caller_fp caller's frame pointer
- // x13 param_count number of parameters passed to function
-
- // Patch the argument length and parameters pointer.
- __ Ldr(param_count_smi,
- MemOperand(caller_fp,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(param_count, param_count_smi);
- __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
- __ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
-
- // Try the new space allocation. Start out with computing the size of the
- // arguments object and the elements array in words.
- Register size = x10;
- __ Bind(&try_allocate);
- __ Add(size, param_count, FixedArray::kHeaderSize / kPointerSize);
- __ Cmp(param_count, 0);
- __ CzeroX(size, eq);
- __ Add(size, size, Heap::kArgumentsObjectSizeStrict / kPointerSize);
-
- // Do the allocation of both objects in one go. Assign this to x0, as it will
- // be returned to the caller.
- Register alloc_obj = x0;
- __ Allocate(size, alloc_obj, x11, x12, &runtime,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-
- // Get the arguments boilerplate from the current (native) context.
- Register global_object = x10;
- Register global_ctx = x10;
- Register args_offset = x4;
- __ Ldr(global_object, GlobalObjectMemOperand());
- __ Ldr(global_ctx, FieldMemOperand(global_object,
- GlobalObject::kNativeContextOffset));
- __ Ldr(args_offset,
- ContextMemOperand(global_ctx,
- Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX));
-
- // x0 alloc_obj pointer to allocated objects: parameter array and
- // arguments object
- // x1 param_count_smi number of parameters passed to function (smi)
- // x2 params pointer to parameters
- // x3 function function pointer
- // x4 args_offset offset to arguments boilerplate
- // x13 param_count number of parameters passed to function
-
- // Copy the JS object part.
- __ CopyFields(alloc_obj, args_offset, CPURegList(x5, x6, x7),
- JSObject::kHeaderSize / kPointerSize);
-
- // Set the smi-tagged length as an in-object property.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- const int kLengthOffset = JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize;
- __ Str(param_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
-
- // If there are no actual arguments, we're done.
- Label done;
- __ Cbz(param_count, &done);
-
- // Set up the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- Register elements = x5;
- __ Add(elements, alloc_obj, Heap::kArgumentsObjectSizeStrict);
- __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
- __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
- __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
- __ Str(param_count_smi, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // x0 alloc_obj pointer to allocated objects: parameter array and
- // arguments object
- // x1 param_count_smi number of parameters passed to function (smi)
- // x2 params pointer to parameters
- // x3 function function pointer
- // x4 array pointer to array slot (uninit)
- // x5 elements pointer to elements array of alloc_obj
- // x13 param_count number of parameters passed to function
-
- // Copy the fixed array slots.
- Label loop;
- Register array = x4;
- // Set up pointer to first array slot.
- __ Add(array, elements, FixedArray::kHeaderSize - kHeapObjectTag);
-
- __ Bind(&loop);
- // Pre-decrement the parameters pointer by kPointerSize on each iteration.
- // Pre-decrement in order to skip receiver.
- __ Ldr(x10, MemOperand(params, -kPointerSize, PreIndex));
- // Post-increment elements by kPointerSize on each iteration.
- __ Str(x10, MemOperand(array, kPointerSize, PostIndex));
- __ Sub(param_count, param_count, 1);
- __ Cbnz(param_count, &loop);
-
- // Return from stub.
- __ Bind(&done);
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- __ Bind(&runtime);
- __ Push(function, params, param_count_smi);
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
-}
-
-
-void RegExpExecStub::Generate(MacroAssembler* masm) {
-#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#else // V8_INTERPRETED_REGEXP
-
- // Stack frame on entry.
- // jssp[0]: last_match_info (expected JSArray)
- // jssp[8]: previous index
- // jssp[16]: subject string
- // jssp[24]: JSRegExp object
- Label runtime;
-
- // Use of registers for this function.
-
- // Variable registers:
- // x10-x13 used as scratch registers
- // w0 string_type type of subject string
- // x2 jsstring_length subject string length
- // x3 jsregexp_object JSRegExp object
- // w4 string_encoding ASCII or UC16
- // w5 sliced_string_offset if the string is a SlicedString
- // offset to the underlying string
- // w6 string_representation groups attributes of the string:
- // - is a string
- // - type of the string
- // - is a short external string
- Register string_type = w0;
- Register jsstring_length = x2;
- Register jsregexp_object = x3;
- Register string_encoding = w4;
- Register sliced_string_offset = w5;
- Register string_representation = w6;
-
- // These are in callee save registers and will be preserved by the call
- // to the native RegExp code, as this code is called using the normal
- // C calling convention. When calling directly from generated code the
- // native RegExp code will not do a GC and therefore the content of
- // these registers are safe to use after the call.
-
- // x19 subject subject string
- // x20 regexp_data RegExp data (FixedArray)
- // x21 last_match_info_elements info relative to the last match
- // (FixedArray)
- // x22 code_object generated regexp code
- Register subject = x19;
- Register regexp_data = x20;
- Register last_match_info_elements = x21;
- Register code_object = x22;
-
- // TODO(jbramley): Is it necessary to preserve these? I don't think ARM does.
- CPURegList used_callee_saved_registers(subject,
- regexp_data,
- last_match_info_elements,
- code_object);
- __ PushCPURegList(used_callee_saved_registers);
-
- // Stack frame.
- // jssp[0] : x19
- // jssp[8] : x20
- // jssp[16]: x21
- // jssp[24]: x22
- // jssp[32]: last_match_info (JSArray)
- // jssp[40]: previous index
- // jssp[48]: subject string
- // jssp[56]: JSRegExp object
-
- const int kLastMatchInfoOffset = 4 * kPointerSize;
- const int kPreviousIndexOffset = 5 * kPointerSize;
- const int kSubjectOffset = 6 * kPointerSize;
- const int kJSRegExpOffset = 7 * kPointerSize;
-
- // Ensure that a RegExp stack is allocated.
- Isolate* isolate = masm->isolate();
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(isolate);
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate);
- __ Mov(x10, Operand(address_of_regexp_stack_memory_size));
- __ Ldr(x10, MemOperand(x10));
- __ Cbz(x10, &runtime);
-
- // Check that the first argument is a JSRegExp object.
- ASSERT(jssp.Is(__ StackPointer()));
- __ Peek(jsregexp_object, kJSRegExpOffset);
- __ JumpIfSmi(jsregexp_object, &runtime);
- __ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime);
-
- // Check that the RegExp has been compiled (data contains a fixed array).
- __ Ldr(regexp_data, FieldMemOperand(jsregexp_object, JSRegExp::kDataOffset));
- if (FLAG_debug_code) {
- STATIC_ASSERT(kSmiTag == 0);
- __ Tst(regexp_data, kSmiTagMask);
- __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
- __ CompareObjectType(regexp_data, x10, x10, FIXED_ARRAY_TYPE);
- __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
- }
-
- // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
- __ Ldr(x10, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
- __ Cmp(x10, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
- __ B(ne, &runtime);
-
- // Check that the number of captures fit in the static offsets vector buffer.
- // We have always at least one capture for the whole match, plus additional
- // ones due to capturing parentheses. A capture takes 2 registers.
- // The number of capture registers then is (number_of_captures + 1) * 2.
- __ Ldrsw(x10,
- UntagSmiFieldMemOperand(regexp_data,
- JSRegExp::kIrregexpCaptureCountOffset));
- // Check (number_of_captures + 1) * 2 <= offsets vector size
- // number_of_captures * 2 <= offsets vector size - 2
- STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
- __ Add(x10, x10, x10);
- __ Cmp(x10, Isolate::kJSRegexpStaticOffsetsVectorSize - 2);
- __ B(hi, &runtime);
-
- // Initialize offset for possibly sliced string.
- __ Mov(sliced_string_offset, 0);
-
- ASSERT(jssp.Is(__ StackPointer()));
- __ Peek(subject, kSubjectOffset);
- __ JumpIfSmi(subject, &runtime);
-
- __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
- __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
-
- __ Ldr(jsstring_length, FieldMemOperand(subject, String::kLengthOffset));
-
- // Handle subject string according to its encoding and representation:
- // (1) Sequential string? If yes, go to (5).
- // (2) Anything but sequential or cons? If yes, go to (6).
- // (3) Cons string. If the string is flat, replace subject with first string.
- // Otherwise bailout.
- // (4) Is subject external? If yes, go to (7).
- // (5) Sequential string. Load regexp code according to encoding.
- // (E) Carry on.
- /// [...]
-
- // Deferred code at the end of the stub:
- // (6) Not a long external string? If yes, go to (8).
- // (7) External string. Make it, offset-wise, look like a sequential string.
- // Go to (5).
- // (8) Short external string or not a string? If yes, bail out to runtime.
- // (9) Sliced string. Replace subject with parent. Go to (4).
-
- Label check_underlying; // (4)
- Label seq_string; // (5)
- Label not_seq_nor_cons; // (6)
- Label external_string; // (7)
- Label not_long_external; // (8)
-
- // (1) Sequential string? If yes, go to (5).
- __ And(string_representation,
- string_type,
- kIsNotStringMask |
- kStringRepresentationMask |
- kShortExternalStringMask);
- // We depend on the fact that Strings of type
- // SeqString and not ShortExternalString are defined
- // by the following pattern:
- // string_type: 0XX0 XX00
- // ^ ^ ^^
- // | | ||
- // | | is a SeqString
- // | is not a short external String
- // is a String
- STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ Cbz(string_representation, &seq_string); // Go to (5).
-
- // (2) Anything but sequential or cons? If yes, go to (6).
- STATIC_ASSERT(kConsStringTag < kExternalStringTag);
- STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
- STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
- STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
- __ Cmp(string_representation, kExternalStringTag);
- __ B(ge, &not_seq_nor_cons); // Go to (6).
-
- // (3) Cons string. Check that it's flat.
- __ Ldr(x10, FieldMemOperand(subject, ConsString::kSecondOffset));
- __ JumpIfNotRoot(x10, Heap::kempty_stringRootIndex, &runtime);
- // Replace subject with first string.
- __ Ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
-
- // (4) Is subject external? If yes, go to (7).
- __ Bind(&check_underlying);
- // Reload the string type.
- __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
- __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kSeqStringTag == 0);
- // The underlying external string is never a short external string.
- STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
- STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
- __ TestAndBranchIfAnySet(string_type.X(),
- kStringRepresentationMask,
- &external_string); // Go to (7).
-
- // (5) Sequential string. Load regexp code according to encoding.
- __ Bind(&seq_string);
-
- // Check that the third argument is a positive smi less than the subject
- // string length. A negative value will be greater (unsigned comparison).
- ASSERT(jssp.Is(__ StackPointer()));
- __ Peek(x10, kPreviousIndexOffset);
- __ JumpIfNotSmi(x10, &runtime);
- __ Cmp(jsstring_length, x10);
- __ B(ls, &runtime);
-
- // Argument 2 (x1): We need to load argument 2 (the previous index) into x1
- // before entering the exit frame.
- __ SmiUntag(x1, x10);
-
- // The third bit determines the string encoding in string_type.
- STATIC_ASSERT(kOneByteStringTag == 0x04);
- STATIC_ASSERT(kTwoByteStringTag == 0x00);
- STATIC_ASSERT(kStringEncodingMask == 0x04);
-
- // Find the code object based on the assumptions above.
- // kDataAsciiCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
- // of kPointerSize to reach the latter.
- ASSERT_EQ(JSRegExp::kDataAsciiCodeOffset + kPointerSize,
- JSRegExp::kDataUC16CodeOffset);
- __ Mov(x10, kPointerSize);
- // We will need the encoding later: ASCII = 0x04
- // UC16 = 0x00
- __ Ands(string_encoding, string_type, kStringEncodingMask);
- __ CzeroX(x10, ne);
- __ Add(x10, regexp_data, x10);
- __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataAsciiCodeOffset));
-
- // (E) Carry on. String handling is done.
-
- // Check that the irregexp code has been generated for the actual string
- // encoding. If it has, the field contains a code object otherwise it contains
- // a smi (code flushing support).
- __ JumpIfSmi(code_object, &runtime);
-
- // All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1,
- x10,
- x11);
-
- // Isolates: note we add an additional parameter here (isolate pointer).
- __ EnterExitFrame(false, x10, 1);
- ASSERT(csp.Is(__ StackPointer()));
-
- // We have 9 arguments to pass to the regexp code, therefore we have to pass
- // one on the stack and the rest as registers.
-
- // Note that the placement of the argument on the stack isn't standard
- // AAPCS64:
- // csp[0]: Space for the return address placed by DirectCEntryStub.
- // csp[8]: Argument 9, the current isolate address.
-
- __ Mov(x10, Operand(ExternalReference::isolate_address(isolate)));
- __ Poke(x10, kPointerSize);
-
- Register length = w11;
- Register previous_index_in_bytes = w12;
- Register start = x13;
-
- // Load start of the subject string.
- __ Add(start, subject, SeqString::kHeaderSize - kHeapObjectTag);
- // Load the length from the original subject string from the previous stack
- // frame. Therefore we have to use fp, which points exactly to two pointer
- // sizes below the previous sp. (Because creating a new stack frame pushes
- // the previous fp onto the stack and decrements sp by 2 * kPointerSize.)
- __ Ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
- __ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset));
-
- // Handle UC16 encoding, two bytes make one character.
- // string_encoding: if ASCII: 0x04
- // if UC16: 0x00
- STATIC_ASSERT(kStringEncodingMask == 0x04);
- __ Ubfx(string_encoding, string_encoding, 2, 1);
- __ Eor(string_encoding, string_encoding, 1);
- // string_encoding: if ASCII: 0
- // if UC16: 1
-
- // Convert string positions from characters to bytes.
- // Previous index is in x1.
- __ Lsl(previous_index_in_bytes, w1, string_encoding);
- __ Lsl(length, length, string_encoding);
- __ Lsl(sliced_string_offset, sliced_string_offset, string_encoding);
-
- // Argument 1 (x0): Subject string.
- __ Mov(x0, subject);
-
- // Argument 2 (x1): Previous index, already there.
-
- // Argument 3 (x2): Get the start of input.
- // Start of input = start of string + previous index + substring offset
- // (0 if the string
- // is not sliced).
- __ Add(w10, previous_index_in_bytes, sliced_string_offset);
- __ Add(x2, start, Operand(w10, UXTW));
-
- // Argument 4 (x3):
- // End of input = start of input + (length of input - previous index)
- __ Sub(w10, length, previous_index_in_bytes);
- __ Add(x3, x2, Operand(w10, UXTW));
-
- // Argument 5 (x4): static offsets vector buffer.
- __ Mov(x4,
- Operand(ExternalReference::address_of_static_offsets_vector(isolate)));
-
- // Argument 6 (x5): Set the number of capture registers to zero to force
- // global regexps to behave as non-global. This stub is not used for global
- // regexps.
- __ Mov(x5, 0);
-
- // Argument 7 (x6): Start (high end) of backtracking stack memory area.
- __ Mov(x10, Operand(address_of_regexp_stack_memory_address));
- __ Ldr(x10, MemOperand(x10));
- __ Mov(x11, Operand(address_of_regexp_stack_memory_size));
- __ Ldr(x11, MemOperand(x11));
- __ Add(x6, x10, x11);
-
- // Argument 8 (x7): Indicate that this is a direct call from JavaScript.
- __ Mov(x7, 1);
-
- // Locate the code entry and call it.
- __ Add(code_object, code_object, Code::kHeaderSize - kHeapObjectTag);
- DirectCEntryStub stub;
- stub.GenerateCall(masm, code_object);
-
- __ LeaveExitFrame(false, x10, true);
-
- // The generated regexp code returns an int32 in w0.
- Label failure, exception;
- __ CompareAndBranch(w0, NativeRegExpMacroAssembler::FAILURE, eq, &failure);
- __ CompareAndBranch(w0,
- NativeRegExpMacroAssembler::EXCEPTION,
- eq,
- &exception);
- __ CompareAndBranch(w0, NativeRegExpMacroAssembler::RETRY, eq, &runtime);
-
- // Success: process the result from the native regexp code.
- Register number_of_capture_registers = x12;
-
- // Calculate number of capture registers (number_of_captures + 1) * 2
- // and store it in the last match info.
- __ Ldrsw(x10,
- UntagSmiFieldMemOperand(regexp_data,
- JSRegExp::kIrregexpCaptureCountOffset));
- __ Add(x10, x10, x10);
- __ Add(number_of_capture_registers, x10, 2);
-
- // Check that the fourth object is a JSArray object.
- ASSERT(jssp.Is(__ StackPointer()));
- __ Peek(x10, kLastMatchInfoOffset);
- __ JumpIfSmi(x10, &runtime);
- __ JumpIfNotObjectType(x10, x11, x11, JS_ARRAY_TYPE, &runtime);
-
- // Check that the JSArray is the fast case.
- __ Ldr(last_match_info_elements,
- FieldMemOperand(x10, JSArray::kElementsOffset));
- __ Ldr(x10,
- FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
- __ JumpIfNotRoot(x10, Heap::kFixedArrayMapRootIndex, &runtime);
-
- // Check that the last match info has space for the capture registers and the
- // additional information (overhead).
- // (number_of_captures + 1) * 2 + overhead <= last match info size
- // (number_of_captures * 2) + 2 + overhead <= last match info size
- // number_of_capture_registers + overhead <= last match info size
- __ Ldrsw(x10,
- UntagSmiFieldMemOperand(last_match_info_elements,
- FixedArray::kLengthOffset));
- __ Add(x11, number_of_capture_registers, RegExpImpl::kLastMatchOverhead);
- __ Cmp(x11, x10);
- __ B(gt, &runtime);
-
- // Store the capture count.
- __ SmiTag(x10, number_of_capture_registers);
- __ Str(x10,
- FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastCaptureCountOffset));
- // Store last subject and last input.
- __ Str(subject,
- FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastSubjectOffset));
- // Use x10 as the subject string in order to only need
- // one RecordWriteStub.
- __ Mov(x10, subject);
- __ RecordWriteField(last_match_info_elements,
- RegExpImpl::kLastSubjectOffset,
- x10,
- x11,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
- __ Str(subject,
- FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastInputOffset));
- __ Mov(x10, subject);
- __ RecordWriteField(last_match_info_elements,
- RegExpImpl::kLastInputOffset,
- x10,
- x11,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
-
- Register last_match_offsets = x13;
- Register offsets_vector_index = x14;
- Register current_offset = x15;
-
- // Get the static offsets vector filled by the native regexp code
- // and fill the last match info.
- ExternalReference address_of_static_offsets_vector =
- ExternalReference::address_of_static_offsets_vector(isolate);
- __ Mov(offsets_vector_index, Operand(address_of_static_offsets_vector));
-
- Label next_capture, done;
- // Capture register counter starts from number of capture registers and
- // iterates down to zero (inclusive).
- __ Add(last_match_offsets,
- last_match_info_elements,
- RegExpImpl::kFirstCaptureOffset - kHeapObjectTag);
- __ Bind(&next_capture);
- __ Subs(number_of_capture_registers, number_of_capture_registers, 2);
- __ B(mi, &done);
- // Read two 32 bit values from the static offsets vector buffer into
- // an X register
- __ Ldr(current_offset,
- MemOperand(offsets_vector_index, kWRegSizeInBytes * 2, PostIndex));
- // Store the smi values in the last match info.
- __ SmiTag(x10, current_offset);
- // Clearing the 32 bottom bits gives us a Smi.
- STATIC_ASSERT(kSmiShift == 32);
- __ And(x11, current_offset, ~kWRegMask);
- __ Stp(x10,
- x11,
- MemOperand(last_match_offsets, kXRegSizeInBytes * 2, PostIndex));
- __ B(&next_capture);
- __ Bind(&done);
-
- // Return last match info.
- __ Peek(x0, kLastMatchInfoOffset);
- __ PopCPURegList(used_callee_saved_registers);
- // Drop the 4 arguments of the stub from the stack.
- __ Drop(4);
- __ Ret();
-
- __ Bind(&exception);
- Register exception_value = x0;
- // A stack overflow (on the backtrack stack) may have occured
- // in the RegExp code but no exception has been created yet.
- // If there is no pending exception, handle that in the runtime system.
- __ Mov(x10, Operand(isolate->factory()->the_hole_value()));
- __ Mov(x11,
- Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ Ldr(exception_value, MemOperand(x11));
- __ Cmp(x10, exception_value);
- __ B(eq, &runtime);
-
- __ Str(x10, MemOperand(x11)); // Clear pending exception.
-
- // Check if the exception is a termination. If so, throw as uncatchable.
- Label termination_exception;
- __ JumpIfRoot(exception_value,
- Heap::kTerminationExceptionRootIndex,
- &termination_exception);
-
- __ Throw(exception_value, x10, x11, x12, x13);
-
- __ Bind(&termination_exception);
- __ ThrowUncatchable(exception_value, x10, x11, x12, x13);
-
- __ Bind(&failure);
- __ Mov(x0, Operand(masm->isolate()->factory()->null_value()));
- __ PopCPURegList(used_callee_saved_registers);
- // Drop the 4 arguments of the stub from the stack.
- __ Drop(4);
- __ Ret();
-
- __ Bind(&runtime);
- __ PopCPURegList(used_callee_saved_registers);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-
- // Deferred code for string handling.
- // (6) Not a long external string? If yes, go to (8).
- __ Bind(&not_seq_nor_cons);
- // Compare flags are still set.
- __ B(ne, &not_long_external); // Go to (8).
-
- // (7) External string. Make it, offset-wise, look like a sequential string.
- __ Bind(&external_string);
- if (masm->emit_debug_code()) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
- __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
- __ Tst(x10, kIsIndirectStringMask);
- __ Check(eq, kExternalStringExpectedButNotFound);
- __ And(x10, x10, kStringRepresentationMask);
- __ Cmp(x10, 0);
- __ Check(ne, kExternalStringExpectedButNotFound);
- }
- __ Ldr(subject,
- FieldMemOperand(subject, ExternalString::kResourceDataOffset));
- // Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ Sub(subject, subject, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
- __ B(&seq_string); // Go to (5).
-
- // (8) If this is a short external string or not a string, bail out to
- // runtime.
- __ Bind(&not_long_external);
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ TestAndBranchIfAnySet(string_representation,
- kShortExternalStringMask | kIsNotStringMask,
- &runtime);
-
- // (9) Sliced string. Replace subject with parent.
- __ Ldr(sliced_string_offset,
- UntagSmiFieldMemOperand(subject, SlicedString::kOffsetOffset));
- __ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
- __ B(&check_underlying); // Go to (4).
-#endif
-}
-
-
-// TODO(jbramley): Don't use static registers here, but take them as arguments.
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
- ASM_LOCATION("GenerateRecordCallTarget");
- // Cache the called function in a feedback vector slot. Cache states are
- // uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
- // x0 : number of arguments to the construct function
- // x1 : the function to call
- // x2 : feedback vector
- // x3 : slot in feedback vector (smi)
- Label check_array, initialize_array, initialize_non_array, megamorphic, done;
-
- ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- Heap::RootListIndex kMegamorphicRootIndex = Heap::kUndefinedValueRootIndex;
- ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
- masm->isolate()->heap()->the_hole_value());
- Heap::RootListIndex kUninitializedRootIndex = Heap::kTheHoleValueRootIndex;
- ASSERT_EQ(*TypeFeedbackInfo::PremonomorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->null_value());
- Heap::RootListIndex kPremonomorphicRootIndex = Heap::kNullValueRootIndex;
-
- // Load the cache state.
- __ Add(x4, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
- __ Ldr(x4, FieldMemOperand(x4, FixedArray::kHeaderSize));
-
- // A monomorphic cache hit or an already megamorphic state: invoke the
- // function without changing the state.
- __ Cmp(x4, x1);
- __ B(eq, &done);
- __ JumpIfRoot(x4, kMegamorphicRootIndex, &done);
-
- // Check if we're dealing with the Array function or not.
- __ LoadArrayFunction(x5);
- __ Cmp(x1, x5);
- __ B(eq, &check_array);
-
- // Non-array cache: Check the cache state.
- __ JumpIfRoot(x4, kPremonomorphicRootIndex, &initialize_non_array);
- __ JumpIfNotRoot(x4, kUninitializedRootIndex, &megamorphic);
-
- // Non-array cache: Uninitialized -> premonomorphic. The sentinel is an
- // immortal immovable object (null) so no write-barrier is needed.
- __ Add(x4, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
- __ LoadRoot(x10, kPremonomorphicRootIndex);
- __ Str(x10, FieldMemOperand(x4, FixedArray::kHeaderSize));
- __ B(&done);
-
- // Array cache: Check the cache state to see if we're in a monomorphic
- // state where the state object is an AllocationSite object.
- __ Bind(&check_array);
- __ Ldr(x5, FieldMemOperand(x4, AllocationSite::kMapOffset));
- __ JumpIfRoot(x5, Heap::kAllocationSiteMapRootIndex, &done);
-
- // Array cache: Uninitialized or premonomorphic -> monomorphic.
- __ JumpIfRoot(x4, kUninitializedRootIndex, &initialize_array);
- __ JumpIfRoot(x4, kPremonomorphicRootIndex, &initialize_array);
-
- // Both caches: Monomorphic -> megamorphic. The sentinel is an
- // immortal immovable object (undefined) so no write-barrier is needed.
- __ Bind(&megamorphic);
- __ Add(x4, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
- __ LoadRoot(x10, kMegamorphicRootIndex);
- __ Str(x10, FieldMemOperand(x4, FixedArray::kHeaderSize));
- __ B(&done);
-
- // Array cache: Uninitialized or premonomorphic -> monomorphic.
- __ Bind(&initialize_array);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- CreateAllocationSiteStub create_stub;
-
- // Arguments register must be smi-tagged to call out.
- __ SmiTag(x0);
- __ Push(x0, x1, x2, x3);
-
- __ CallStub(&create_stub);
-
- __ Pop(x3, x2, x1, x0);
- __ SmiUntag(x0);
- }
- __ B(&done);
-
- // Non-array cache: Premonomorphic -> monomorphic.
- __ Bind(&initialize_non_array);
- __ Add(x4, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
- // TODO(all): Does the value need to be left in x4? If not, FieldMemOperand
- // could be used to avoid this add.
- __ Add(x4, x4, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Str(x1, MemOperand(x4, 0));
-
- __ Push(x4, x2, x1);
- __ RecordWrite(x2, x4, x1, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Pop(x1, x2, x4);
-
- // TODO(all): Are x4, x2 and x1 outputs? This isn't clear.
- __ Bind(&done);
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- ASM_LOCATION("CallFunctionStub::Generate");
- // x1 function the function to call
- // x2 : feedback vector
- // x3 : slot in feedback vector (smi) (if x2 is not undefined)
- Register function = x1;
- Register cache_cell = x2;
- Register slot = x3;
- Register type = x4;
- Label slow, non_function, wrap, cont;
-
- // TODO(jbramley): This function has a lot of unnamed registers. Name them,
- // and tidy things up a bit.
-
- if (NeedsChecks()) {
- // Check that the function is really a JavaScript function.
- __ JumpIfSmi(function, &non_function);
-
- // Goto slow case if we do not have a function.
- __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow);
-
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
- }
- }
-
- // Fast-case: Invoke the function now.
- // x1 function pushed function
- ParameterCount actual(argc_);
-
- if (CallAsMethod()) {
- if (NeedsChecks()) {
- // Do not transform the receiver for strict mode functions.
- __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(w4, FieldMemOperand(x3, SharedFunctionInfo::kCompilerHintsOffset));
- __ Tbnz(w4, SharedFunctionInfo::kStrictModeFunction, &cont);
-
- // Do not transform the receiver for native (Compilerhints already in x3).
- __ Tbnz(w4, SharedFunctionInfo::kNative, &cont);
- }
-
- // Compute the receiver in non-strict mode.
- __ Peek(x3, argc_ * kPointerSize);
-
- if (NeedsChecks()) {
- __ JumpIfSmi(x3, &wrap);
- __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt);
- } else {
- __ B(&wrap);
- }
-
- __ Bind(&cont);
- }
- __ InvokeFunction(function,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper());
-
- if (NeedsChecks()) {
- // Slow-case: Non-function called.
- __ Bind(&slow);
- if (RecordCallTarget()) {
- // If there is a call target cache, mark it megamorphic in the
- // non-function case. MegamorphicSentinel is an immortal immovable object
- // (undefined) so no write barrier is needed.
- ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- __ Add(x12, cache_cell, Operand::UntagSmiAndScale(slot,
- kPointerSizeLog2));
- __ LoadRoot(x11, Heap::kUndefinedValueRootIndex);
- __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
- }
- // Check for function proxy.
- // x10 : function type.
- __ CompareAndBranch(type, JS_FUNCTION_PROXY_TYPE, ne, &non_function);
- __ Push(function); // put proxy as additional argument
- __ Mov(x0, argc_ + 1);
- __ Mov(x2, 0);
- __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY);
- {
- Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ Jump(adaptor, RelocInfo::CODE_TARGET);
- }
-
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ Bind(&non_function);
- __ Poke(function, argc_ * kXRegSizeInBytes);
- __ Mov(x0, argc_); // Set up the number of arguments.
- __ Mov(x2, 0);
- __ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
- }
-
- if (CallAsMethod()) {
- __ Bind(&wrap);
- // Wrap the receiver and patch it back onto the stack.
- { FrameScope frame_scope(masm, StackFrame::INTERNAL);
- __ Push(x1, x3);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ Pop(x1);
- }
- __ Poke(x0, argc_ * kPointerSize);
- __ B(&cont);
- }
-}
-
-
-void CallConstructStub::Generate(MacroAssembler* masm) {
- ASM_LOCATION("CallConstructStub::Generate");
- // x0 : number of arguments
- // x1 : the function to call
- // x2 : feedback vector
- // x3 : slot in feedback vector (smi) (if r2 is not undefined)
- Register function = x1;
- Label slow, non_function_call;
-
- // Check that the function is not a smi.
- __ JumpIfSmi(function, &non_function_call);
- // Check that the function is a JSFunction.
- Register object_type = x10;
- __ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE,
- &slow);
-
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
- }
-
- // Jump to the function-specific construct stub.
- Register jump_reg = x4;
- Register shared_func_info = jump_reg;
- Register cons_stub = jump_reg;
- Register cons_stub_code = jump_reg;
- __ Ldr(shared_func_info,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(cons_stub,
- FieldMemOperand(shared_func_info,
- SharedFunctionInfo::kConstructStubOffset));
- __ Add(cons_stub_code, cons_stub, Code::kHeaderSize - kHeapObjectTag);
- __ Br(cons_stub_code);
-
- Label do_call;
- __ Bind(&slow);
- __ Cmp(object_type, JS_FUNCTION_PROXY_TYPE);
- __ B(ne, &non_function_call);
- __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
- __ B(&do_call);
-
- __ Bind(&non_function_call);
- __ GetBuiltinFunction(x1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
-
- __ Bind(&do_call);
- // Set expected number of arguments to zero (not changing x0).
- __ Mov(x2, 0);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-}
-
-
-void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- // If the receiver is a smi trigger the non-string case.
- __ JumpIfSmi(object_, receiver_not_string_);
-
- // Fetch the instance type of the receiver into result register.
- __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
-
- // If the receiver is not a string trigger the non-string case.
- __ TestAndBranchIfAnySet(result_, kIsNotStringMask, receiver_not_string_);
-
- // If the index is non-smi trigger the non-smi case.
- __ JumpIfNotSmi(index_, &index_not_smi_);
-
- __ Bind(&got_smi_index_);
- // Check for index out of range.
- __ Ldrsw(result_, UntagSmiFieldMemOperand(object_, String::kLengthOffset));
- __ Cmp(result_, Operand::UntagSmi(index_));
- __ B(ls, index_out_of_range_);
-
- __ SmiUntag(index_);
-
- StringCharLoadGenerator::Generate(masm,
- object_,
- index_,
- result_,
- &call_runtime_);
- __ SmiTag(result_);
- __ Bind(&exit_);
-}
-
-
-void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
-
- __ Bind(&index_not_smi_);
- // If index is a heap number, try converting it to an integer.
- __ CheckMap(index_,
- result_,
- Heap::kHeapNumberMapRootIndex,
- index_not_number_,
- DONT_DO_SMI_CHECK);
- call_helper.BeforeCall(masm);
- // Save object_ on the stack and pass index_ as argument for runtime call.
- __ Push(object_, index_);
- if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
- } else {
- ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
- // NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
- }
- // Save the conversion result before the pop instructions below
- // have a chance to overwrite it.
- __ Mov(index_, x0);
- __ Pop(object_);
- // Reload the instance type.
- __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
- call_helper.AfterCall(masm);
-
- // If index is still not a smi, it must be out of range.
- __ JumpIfNotSmi(index_, index_out_of_range_);
- // Otherwise, return to the fast path.
- __ B(&got_smi_index_);
-
- // Call runtime. We get here when the receiver is a string and the
- // index is a number, but the code of getting the actual character
- // is too complex (e.g., when the string needs to be flattened).
- __ Bind(&call_runtime_);
- call_helper.BeforeCall(masm);
- __ SmiTag(index_);
- __ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
- __ Mov(result_, x0);
- call_helper.AfterCall(masm);
- __ B(&exit_);
-
- __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
-}
-
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- __ JumpIfNotSmi(code_, &slow_case_);
- __ Cmp(code_, Operand(Smi::FromInt(String::kMaxOneByteCharCode)));
- __ B(hi, &slow_case_);
-
- __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- // At this point code register contains smi tagged ASCII char code.
- STATIC_ASSERT(kSmiShift > kPointerSizeLog2);
- __ Add(result_, result_, Operand(code_, LSR, kSmiShift - kPointerSizeLog2));
- __ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
- __ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_);
- __ Bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
-
- __ Bind(&slow_case_);
- call_helper.BeforeCall(masm);
- __ Push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
- __ Mov(result_, x0);
- call_helper.AfterCall(masm);
- __ B(&exit_);
-
- __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
-}
-
-
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- // Inputs are in x0 (lhs) and x1 (rhs).
- ASSERT(state_ == CompareIC::SMI);
- ASM_LOCATION("ICCompareStub[Smis]");
- Label miss;
- // Bail out (to 'miss') unless both x0 and x1 are smis.
- __ JumpIfEitherNotSmi(x0, x1, &miss);
-
- // TODO(jbramley): Why do we only set the flags for EQ?
- if (GetCondition() == eq) {
- // For equality we do not care about the sign of the result.
- __ Subs(x0, x0, x1);
- } else {
- // Untag before subtracting to avoid handling overflow.
- __ SmiUntag(x1);
- __ Sub(x0, x1, Operand::UntagSmi(x0));
- }
- __ Ret();
-
- __ Bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::NUMBER);
- ASM_LOCATION("ICCompareStub[HeapNumbers]");
-
- Label unordered, maybe_undefined1, maybe_undefined2;
- Label miss, handle_lhs, values_in_d_regs;
- Label untag_rhs, untag_lhs;
-
- Register result = x0;
- Register rhs = x0;
- Register lhs = x1;
- FPRegister rhs_d = d0;
- FPRegister lhs_d = d1;
-
- if (left_ == CompareIC::SMI) {
- __ JumpIfNotSmi(lhs, &miss);
- }
- if (right_ == CompareIC::SMI) {
- __ JumpIfNotSmi(rhs, &miss);
- }
-
- __ SmiUntagToDouble(rhs_d, rhs, kSpeculativeUntag);
- __ SmiUntagToDouble(lhs_d, lhs, kSpeculativeUntag);
-
- // Load rhs if it's a heap number.
- __ JumpIfSmi(rhs, &handle_lhs);
- __ CheckMap(rhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
- DONT_DO_SMI_CHECK);
- __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
-
- // Load lhs if it's a heap number.
- __ Bind(&handle_lhs);
- __ JumpIfSmi(lhs, &values_in_d_regs);
- __ CheckMap(lhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
- DONT_DO_SMI_CHECK);
- __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
-
- __ Bind(&values_in_d_regs);
- __ Fcmp(lhs_d, rhs_d);
- __ B(vs, &unordered); // Overflow flag set if either is NaN.
- STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
- __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
- __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0.
- __ Ret();
-
- __ Bind(&unordered);
- ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
- CompareIC::GENERIC);
- __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
-
- __ Bind(&maybe_undefined1);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
- __ JumpIfNotRoot(rhs, Heap::kUndefinedValueRootIndex, &miss);
- __ JumpIfSmi(lhs, &unordered);
- __ JumpIfNotObjectType(lhs, x10, x10, HEAP_NUMBER_TYPE, &maybe_undefined2);
- __ B(&unordered);
- }
-
- __ Bind(&maybe_undefined2);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
- __ JumpIfRoot(lhs, Heap::kUndefinedValueRootIndex, &unordered);
- }
-
- __ Bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
- ASM_LOCATION("ICCompareStub[InternalizedStrings]");
- Label miss;
-
- Register result = x0;
- Register rhs = x0;
- Register lhs = x1;
-
- // Check that both operands are heap objects.
- __ JumpIfEitherSmi(lhs, rhs, &miss);
-
- // Check that both operands are internalized strings.
- Register rhs_map = x10;
- Register lhs_map = x11;
- Register rhs_type = x10;
- Register lhs_type = x11;
- __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
- __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
- __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
- __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
-
- STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
- __ Orr(x12, lhs_type, rhs_type);
- __ TestAndBranchIfAnySet(
- x12, kIsNotStringMask | kIsNotInternalizedMask, &miss);
-
- // Internalized strings are compared by identity.
- STATIC_ASSERT(EQUAL == 0);
- __ Cmp(lhs, rhs);
- __ Cset(result, ne);
- __ Ret();
-
- __ Bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::UNIQUE_NAME);
- ASM_LOCATION("ICCompareStub[UniqueNames]");
- ASSERT(GetCondition() == eq);
- Label miss;
-
- Register result = x0;
- Register rhs = x0;
- Register lhs = x1;
-
- Register lhs_instance_type = w2;
- Register rhs_instance_type = w3;
-
- // Check that both operands are heap objects.
- __ JumpIfEitherSmi(lhs, rhs, &miss);
-
- // Check that both operands are unique names. This leaves the instance
- // types loaded in tmp1 and tmp2.
- __ Ldr(x10, FieldMemOperand(lhs, HeapObject::kMapOffset));
- __ Ldr(x11, FieldMemOperand(rhs, HeapObject::kMapOffset));
- __ Ldrb(lhs_instance_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
- __ Ldrb(rhs_instance_type, FieldMemOperand(x11, Map::kInstanceTypeOffset));
-
- // To avoid a miss, each instance type should be either SYMBOL_TYPE or it
- // should have kInternalizedTag set.
- __ JumpIfNotUniqueName(lhs_instance_type, &miss);
- __ JumpIfNotUniqueName(rhs_instance_type, &miss);
-
- // Unique names are compared by identity.
- STATIC_ASSERT(EQUAL == 0);
- __ Cmp(lhs, rhs);
- __ Cset(result, ne);
- __ Ret();
-
- __ Bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRING);
- ASM_LOCATION("ICCompareStub[Strings]");
-
- Label miss;
-
- bool equality = Token::IsEqualityOp(op_);
-
- Register result = x0;
- Register rhs = x0;
- Register lhs = x1;
-
- // Check that both operands are heap objects.
- __ JumpIfEitherSmi(rhs, lhs, &miss);
-
- // Check that both operands are strings.
- Register rhs_map = x10;
- Register lhs_map = x11;
- Register rhs_type = x10;
- Register lhs_type = x11;
- __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
- __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
- __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
- __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- __ Orr(x12, lhs_type, rhs_type);
- __ Tbnz(x12, MaskToBit(kIsNotStringMask), &miss);
-
- // Fast check for identical strings.
- Label not_equal;
- __ Cmp(lhs, rhs);
- __ B(ne, &not_equal);
- __ Mov(result, EQUAL);
- __ Ret();
-
- __ Bind(&not_equal);
- // Handle not identical strings
-
- // Check that both strings are internalized strings. If they are, we're done
- // because we already know they are not identical. We know they are both
- // strings.
- if (equality) {
- ASSERT(GetCondition() == eq);
- STATIC_ASSERT(kInternalizedTag == 0);
- Label not_internalized_strings;
- __ Orr(x12, lhs_type, rhs_type);
- __ TestAndBranchIfAnySet(
- x12, kIsNotInternalizedMask, &not_internalized_strings);
- // Result is in rhs (x0), and not EQUAL, as rhs is not a smi.
- __ Ret();
- __ Bind(&not_internalized_strings);
- }
-
- // Check that both strings are sequential ASCII.
- Label runtime;
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(
- lhs_type, rhs_type, x12, x13, &runtime);
-
- // Compare flat ASCII strings. Returns when done.
- if (equality) {
- StringCompareStub::GenerateFlatAsciiStringEquals(
- masm, lhs, rhs, x10, x11, x12);
- } else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(
- masm, lhs, rhs, x10, x11, x12, x13);
- }
-
- // Handle more complex cases in runtime.
- __ Bind(&runtime);
- __ Push(lhs, rhs);
- if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
- } else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
- }
-
- __ Bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECT);
- ASM_LOCATION("ICCompareStub[Objects]");
-
- Label miss;
-
- Register result = x0;
- Register rhs = x0;
- Register lhs = x1;
-
- __ JumpIfEitherSmi(rhs, lhs, &miss);
-
- __ JumpIfNotObjectType(rhs, x10, x10, JS_OBJECT_TYPE, &miss);
- __ JumpIfNotObjectType(lhs, x10, x10, JS_OBJECT_TYPE, &miss);
-
- ASSERT(GetCondition() == eq);
- __ Sub(result, rhs, lhs);
- __ Ret();
-
- __ Bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
- ASM_LOCATION("ICCompareStub[KnownObjects]");
-
- Label miss;
-
- Register result = x0;
- Register rhs = x0;
- Register lhs = x1;
-
- __ JumpIfEitherSmi(rhs, lhs, &miss);
-
- Register rhs_map = x10;
- Register lhs_map = x11;
- __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
- __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
- __ Cmp(rhs_map, Operand(known_map_));
- __ B(ne, &miss);
- __ Cmp(lhs_map, Operand(known_map_));
- __ B(ne, &miss);
-
- __ Sub(result, rhs, lhs);
- __ Ret();
-
- __ Bind(&miss);
- GenerateMiss(masm);
-}
-
-
-// This method handles the case where a compare stub had the wrong
-// implementation. It calls a miss handler, which re-writes the stub. All other
-// ICCompareStub::Generate* methods should fall back into this one if their
-// operands were not the expected types.
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- ASM_LOCATION("ICCompareStub[Miss]");
-
- Register stub_entry = x11;
- {
- ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
-
- FrameScope scope(masm, StackFrame::INTERNAL);
- Register op = x10;
- Register left = x1;
- Register right = x0;
- // Preserve some caller-saved registers.
- __ Push(x1, x0, lr);
- // Push the arguments.
- __ Mov(op, Operand(Smi::FromInt(op_)));
- __ Push(left, right, op);
-
- // Call the miss handler. This also pops the arguments.
- __ CallExternalReference(miss, 3);
-
- // Compute the entry point of the rewritten stub.
- __ Add(stub_entry, x0, Code::kHeaderSize - kHeapObjectTag);
- // Restore caller-saved registers.
- __ Pop(lr, x0, x1);
- }
-
- // Tail-call to the new stub.
- __ Jump(stub_entry);
-}
-
-
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character) {
- ASSERT(!AreAliased(hash, character));
-
- // hash = character + (character << 10);
- __ LoadRoot(hash, Heap::kHashSeedRootIndex);
- // Untag smi seed and add the character.
- __ Add(hash, character, Operand(hash, LSR, kSmiShift));
-
- // Compute hashes modulo 2^32 using a 32-bit W register.
- Register hash_w = hash.W();
-
- // hash += hash << 10;
- __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10));
- // hash ^= hash >> 6;
- __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6));
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character) {
- ASSERT(!AreAliased(hash, character));
-
- // hash += character;
- __ Add(hash, hash, character);
-
- // Compute hashes modulo 2^32 using a 32-bit W register.
- Register hash_w = hash.W();
-
- // hash += hash << 10;
- __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10));
- // hash ^= hash >> 6;
- __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6));
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch) {
- // Compute hashes modulo 2^32 using a 32-bit W register.
- Register hash_w = hash.W();
- Register scratch_w = scratch.W();
- ASSERT(!AreAliased(hash_w, scratch_w));
-
- // hash += hash << 3;
- __ Add(hash_w, hash_w, Operand(hash_w, LSL, 3));
- // hash ^= hash >> 11;
- __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 11));
- // hash += hash << 15;
- __ Add(hash_w, hash_w, Operand(hash_w, LSL, 15));
-
- __ Ands(hash_w, hash_w, String::kHashBitMask);
-
- // if (hash == 0) hash = 27;
- __ Mov(scratch_w, StringHasher::kZeroHash);
- __ Csel(hash_w, scratch_w, hash_w, eq);
-}
-
-
-void SubStringStub::Generate(MacroAssembler* masm) {
- ASM_LOCATION("SubStringStub::Generate");
- Label runtime;
-
- // Stack frame on entry.
- // lr: return address
- // jssp[0]: substring "to" offset
- // jssp[8]: substring "from" offset
- // jssp[16]: pointer to string object
-
- // This stub is called from the native-call %_SubString(...), so
- // nothing can be assumed about the arguments. It is tested that:
- // "string" is a sequential string,
- // both "from" and "to" are smis, and
- // 0 <= from <= to <= string.length (in debug mode.)
- // If any of these assumptions fail, we call the runtime system.
-
- static const int kToOffset = 0 * kPointerSize;
- static const int kFromOffset = 1 * kPointerSize;
- static const int kStringOffset = 2 * kPointerSize;
-
- Register to = x0;
- Register from = x15;
- Register input_string = x10;
- Register input_length = x11;
- Register input_type = x12;
- Register result_string = x0;
- Register result_length = x1;
- Register temp = x3;
-
- __ Peek(to, kToOffset);
- __ Peek(from, kFromOffset);
-
- // Check that both from and to are smis. If not, jump to runtime.
- __ JumpIfEitherNotSmi(from, to, &runtime);
- __ SmiUntag(from);
- __ SmiUntag(to);
-
- // Calculate difference between from and to. If to < from, branch to runtime.
- __ Subs(result_length, to, from);
- __ B(mi, &runtime);
-
- // Check from is positive.
- __ Tbnz(from, kWSignBit, &runtime);
-
- // Make sure first argument is a string.
- __ Peek(input_string, kStringOffset);
- __ JumpIfSmi(input_string, &runtime);
- __ IsObjectJSStringType(input_string, input_type, &runtime);
-
- Label single_char;
- __ Cmp(result_length, 1);
- __ B(eq, &single_char);
-
- // Short-cut for the case of trivial substring.
- Label return_x0;
- __ Ldrsw(input_length,
- UntagSmiFieldMemOperand(input_string, String::kLengthOffset));
-
- __ Cmp(result_length, input_length);
- __ CmovX(x0, input_string, eq);
- // Return original string.
- __ B(eq, &return_x0);
-
- // Longer than original string's length or negative: unsafe arguments.
- __ B(hi, &runtime);
-
- // Shorter than original string's length: an actual substring.
-
- // x0 to substring end character offset
- // x1 result_length length of substring result
- // x10 input_string pointer to input string object
- // x10 unpacked_string pointer to unpacked string object
- // x11 input_length length of input string
- // x12 input_type instance type of input string
- // x15 from substring start character offset
-
- // Deal with different string types: update the index if necessary and put
- // the underlying string into register unpacked_string.
- Label underlying_unpacked, sliced_string, seq_or_external_string;
- Label update_instance_type;
- // If the string is not indirect, it can only be sequential or external.
- STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
- STATIC_ASSERT(kIsIndirectStringMask != 0);
-
- // Test for string types, and branch/fall through to appropriate unpacking
- // code.
- __ Tst(input_type, kIsIndirectStringMask);
- __ B(eq, &seq_or_external_string);
- __ Tst(input_type, kSlicedNotConsMask);
- __ B(ne, &sliced_string);
-
- Register unpacked_string = input_string;
-
- // Cons string. Check whether it is flat, then fetch first part.
- __ Ldr(temp, FieldMemOperand(input_string, ConsString::kSecondOffset));
- __ JumpIfNotRoot(temp, Heap::kempty_stringRootIndex, &runtime);
- __ Ldr(unpacked_string,
- FieldMemOperand(input_string, ConsString::kFirstOffset));
- __ B(&update_instance_type);
-
- __ Bind(&sliced_string);
- // Sliced string. Fetch parent and correct start index by offset.
- __ Ldrsw(temp,
- UntagSmiFieldMemOperand(input_string, SlicedString::kOffsetOffset));
- __ Add(from, from, temp);
- __ Ldr(unpacked_string,
- FieldMemOperand(input_string, SlicedString::kParentOffset));
-
- __ Bind(&update_instance_type);
- __ Ldr(temp, FieldMemOperand(unpacked_string, HeapObject::kMapOffset));
- __ Ldrb(input_type, FieldMemOperand(temp, Map::kInstanceTypeOffset));
- // TODO(all): This generates "b #+0x4". Can these be optimised out?
- __ B(&underlying_unpacked);
-
- __ Bind(&seq_or_external_string);
- // Sequential or external string. Registers unpacked_string and input_string
- // alias, so there's nothing to do here.
-
- // x0 result_string pointer to result string object (uninit)
- // x1 result_length length of substring result
- // x10 unpacked_string pointer to unpacked string object
- // x11 input_length length of input string
- // x12 input_type instance type of input string
- // x15 from substring start character offset
- __ Bind(&underlying_unpacked);
-
- if (FLAG_string_slices) {
- Label copy_routine;
- __ Cmp(result_length, SlicedString::kMinLength);
- // Short slice. Copy instead of slicing.
- __ B(lt, &copy_routine);
- // Allocate new sliced string. At this point we do not reload the instance
- // type including the string encoding because we simply rely on the info
- // provided by the original string. It does not matter if the original
- // string's encoding is wrong because we always have to recheck encoding of
- // the newly created string's parent anyway due to externalized strings.
- Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_slice);
- __ AllocateAsciiSlicedString(result_string, result_length, x3, x4,
- &runtime);
- __ B(&set_slice_header);
-
- __ Bind(&two_byte_slice);
- __ AllocateTwoByteSlicedString(result_string, result_length, x3, x4,
- &runtime);
-
- __ Bind(&set_slice_header);
- __ SmiTag(from);
- __ Str(from, FieldMemOperand(result_string, SlicedString::kOffsetOffset));
- __ Str(unpacked_string,
- FieldMemOperand(result_string, SlicedString::kParentOffset));
- __ B(&return_x0);
-
- __ Bind(&copy_routine);
- }
-
- // x0 result_string pointer to result string object (uninit)
- // x1 result_length length of substring result
- // x10 unpacked_string pointer to unpacked string object
- // x11 input_length length of input string
- // x12 input_type instance type of input string
- // x13 unpacked_char0 pointer to first char of unpacked string (uninit)
- // x13 substring_char0 pointer to first char of substring (uninit)
- // x14 result_char0 pointer to first char of result (uninit)
- // x15 from substring start character offset
- Register unpacked_char0 = x13;
- Register substring_char0 = x13;
- Register result_char0 = x14;
- Label two_byte_sequential, sequential_string, allocate_result;
- STATIC_ASSERT(kExternalStringTag != 0);
- STATIC_ASSERT(kSeqStringTag == 0);
-
- __ Tst(input_type, kExternalStringTag);
- __ B(eq, &sequential_string);
-
- __ Tst(input_type, kShortExternalStringTag);
- __ B(ne, &runtime);
- __ Ldr(unpacked_char0,
- FieldMemOperand(unpacked_string, ExternalString::kResourceDataOffset));
- // unpacked_char0 points to the first character of the underlying string.
- __ B(&allocate_result);
-
- __ Bind(&sequential_string);
- // Locate first character of underlying subject string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ Add(unpacked_char0, unpacked_string,
- SeqOneByteString::kHeaderSize - kHeapObjectTag);
-
- __ Bind(&allocate_result);
- // Sequential ASCII string. Allocate the result.
- STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
- __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_sequential);
-
- // Allocate and copy the resulting ASCII string.
- __ AllocateAsciiString(result_string, result_length, x3, x4, x5, &runtime);
-
- // Locate first character of substring to copy.
- __ Add(substring_char0, unpacked_char0, from);
-
- // Locate first character of result.
- __ Add(result_char0, result_string,
- SeqOneByteString::kHeaderSize - kHeapObjectTag);
-
- STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
- __ B(&return_x0);
-
- // Allocate and copy the resulting two-byte string.
- __ Bind(&two_byte_sequential);
- __ AllocateTwoByteString(result_string, result_length, x3, x4, x5, &runtime);
-
- // Locate first character of substring to copy.
- __ Add(substring_char0, unpacked_char0, Operand(from, LSL, 1));
-
- // Locate first character of result.
- __ Add(result_char0, result_string,
- SeqTwoByteString::kHeaderSize - kHeapObjectTag);
-
- STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- __ Add(result_length, result_length, result_length);
- __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
-
- __ Bind(&return_x0);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->sub_string_native(), 1, x3, x4);
- __ Drop(3);
- __ Ret();
-
- __ Bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
-
- __ bind(&single_char);
- // x1: result_length
- // x10: input_string
- // x12: input_type
- // x15: from (untagged)
- __ SmiTag(from);
- StringCharAtGenerator generator(
- input_string, from, result_length, x0,
- &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm);
- // TODO(jbramley): Why doesn't this jump to return_x0?
- __ Drop(3);
- __ Ret();
- generator.SkipSlow(masm, &runtime);
-}
-
-
-void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3));
- Register result = x0;
- Register left_length = scratch1;
- Register right_length = scratch2;
-
- // Compare lengths. If lengths differ, strings can't be equal. Lengths are
- // smis, and don't need to be untagged.
- Label strings_not_equal, check_zero_length;
- __ Ldr(left_length, FieldMemOperand(left, String::kLengthOffset));
- __ Ldr(right_length, FieldMemOperand(right, String::kLengthOffset));
- __ Cmp(left_length, right_length);
- __ B(eq, &check_zero_length);
-
- __ Bind(&strings_not_equal);
- __ Mov(result, Operand(Smi::FromInt(NOT_EQUAL)));
- __ Ret();
-
- // Check if the length is zero. If so, the strings must be equal (and empty.)
- Label compare_chars;
- __ Bind(&check_zero_length);
- STATIC_ASSERT(kSmiTag == 0);
- __ Cbnz(left_length, &compare_chars);
- __ Mov(result, Operand(Smi::FromInt(EQUAL)));
- __ Ret();
-
- // Compare characters. Falls through if all characters are equal.
- __ Bind(&compare_chars);
- GenerateAsciiCharsCompareLoop(masm, left, right, left_length, scratch2,
- scratch3, &strings_not_equal);
-
- // Characters in strings are equal.
- __ Mov(result, Operand(Smi::FromInt(EQUAL)));
- __ Ret();
-}
-
-
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
- ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
- Label result_not_equal, compare_lengths;
-
- // Find minimum length and length difference.
- Register length_delta = scratch3;
- __ Ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
- __ Ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
- __ Subs(length_delta, scratch1, scratch2);
-
- Register min_length = scratch1;
- __ Csel(min_length, scratch2, scratch1, gt);
- __ Cbz(min_length, &compare_lengths);
-
- // Compare loop.
- GenerateAsciiCharsCompareLoop(masm,
- left, right, min_length, scratch2, scratch4,
- &result_not_equal);
-
- // Compare lengths - strings up to min-length are equal.
- __ Bind(&compare_lengths);
-
- ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
-
- // Use length_delta as result if it's zero.
- Register result = x0;
- __ Subs(result, length_delta, 0);
-
- __ Bind(&result_not_equal);
- Register greater = x10;
- Register less = x11;
- __ Mov(greater, Operand(Smi::FromInt(GREATER)));
- __ Mov(less, Operand(Smi::FromInt(LESS)));
- __ CmovX(result, greater, gt);
- __ CmovX(result, less, lt);
- __ Ret();
-}
-
-
-void StringCompareStub::GenerateAsciiCharsCompareLoop(
- MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* chars_not_equal) {
- ASSERT(!AreAliased(left, right, length, scratch1, scratch2));
-
- // Change index to run from -length to -1 by adding length to string
- // start. This means that loop ends when index reaches zero, which
- // doesn't need an additional compare.
- __ SmiUntag(length);
- __ Add(scratch1, length, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ Add(left, left, scratch1);
- __ Add(right, right, scratch1);
-
- Register index = length;
- __ Neg(index, length); // index = -length;
-
- // Compare loop
- Label loop;
- __ Bind(&loop);
- __ Ldrb(scratch1, MemOperand(left, index));
- __ Ldrb(scratch2, MemOperand(right, index));
- __ Cmp(scratch1, scratch2);
- __ B(ne, chars_not_equal);
- __ Add(index, index, 1);
- __ Cbnz(index, &loop);
-}
-
-
-void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- Counters* counters = masm->isolate()->counters();
-
- // Stack frame on entry.
- // sp[0]: right string
- // sp[8]: left string
- Register right = x10;
- Register left = x11;
- Register result = x0;
- __ Pop(right, left);
-
- Label not_same;
- __ Subs(result, right, left);
- __ B(ne, &not_same);
- STATIC_ASSERT(EQUAL == 0);
- __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
- __ Ret();
-
- __ Bind(&not_same);
-
- // Check that both objects are sequential ASCII strings.
- __ JumpIfEitherIsNotSequentialAsciiStrings(left, right, x12, x13, &runtime);
-
- // Compare flat ASCII strings natively. Remove arguments from stack first,
- // as this function will generate a return.
- __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
- GenerateCompareFlatAsciiStrings(masm, left, right, x12, x13, x14, x15);
-
- __ Bind(&runtime);
-
- // Push arguments back on to the stack.
- // sp[0] = right string
- // sp[8] = left string.
- __ Push(left, right);
-
- // Call the runtime.
- // Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
-}
-
-
-void ArrayPushStub::Generate(MacroAssembler* masm) {
- Register receiver = x0;
-
- int argc = arguments_count();
-
- if (argc == 0) {
- // Nothing to do, just return the length.
- __ Ldr(x0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Drop(argc + 1);
- __ Ret();
- return;
- }
-
- Isolate* isolate = masm->isolate();
-
- if (argc != 1) {
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
- return;
- }
-
- Label call_builtin, attempt_to_grow_elements, with_write_barrier;
-
- Register elements_length = x8;
- Register length = x7;
- Register elements = x6;
- Register end_elements = x5;
- Register value = x4;
- // Get the elements array of the object.
- __ Ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
-
- if (IsFastSmiOrObjectElementsKind(elements_kind())) {
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- x10,
- Heap::kFixedArrayMapRootIndex,
- &call_builtin,
- DONT_DO_SMI_CHECK);
- }
-
- // Get the array's length and calculate new length.
- __ Ldr(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ Add(length, length, Operand(Smi::FromInt(argc)));
-
- // Check if we could survive without allocation.
- __ Ldr(elements_length,
- FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Cmp(length, elements_length);
-
- const int kEndElementsOffset =
- FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
-
- if (IsFastSmiOrObjectElementsKind(elements_kind())) {
- __ B(gt, &attempt_to_grow_elements);
-
- // Check if value is a smi.
- __ Peek(value, (argc - 1) * kPointerSize);
- __ JumpIfNotSmi(value, &with_write_barrier);
-
- // Store the value.
- // We may need a register containing the address end_elements below,
- // so write back the value in end_elements.
- __ Add(end_elements, elements,
- Operand::UntagSmiAndScale(length, kPointerSizeLog2));
- __ Str(value, MemOperand(end_elements, kEndElementsOffset, PreIndex));
- } else {
- // TODO(all): ARM has a redundant cmp here.
- __ B(gt, &call_builtin);
-
- __ Peek(value, (argc - 1) * kPointerSize);
- __ StoreNumberToDoubleElements(value, length, elements, x10, d0, d1,
- &call_builtin, argc * kDoubleSize);
- }
-
- // Save new length.
- __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Return length.
- __ Drop(argc + 1);
- __ Mov(x0, length);
- __ Ret();
-
- if (IsFastDoubleElementsKind(elements_kind())) {
- __ Bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
- return;
- }
-
- __ Bind(&with_write_barrier);
-
- if (IsFastSmiElementsKind(elements_kind())) {
- if (FLAG_trace_elements_transitions) {
- __ B(&call_builtin);
- }
-
- __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
- __ JumpIfHeapNumber(x10, &call_builtin);
-
- ElementsKind target_kind = IsHoleyElementsKind(elements_kind())
- ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- __ Ldr(x10, GlobalObjectMemOperand());
- __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kNativeContextOffset));
- __ Ldr(x10, ContextMemOperand(x10, Context::JS_ARRAY_MAPS_INDEX));
- const int header_size = FixedArrayBase::kHeaderSize;
- // Verify that the object can be transitioned in place.
- const int origin_offset = header_size + elements_kind() * kPointerSize;
- __ ldr(x11, FieldMemOperand(receiver, origin_offset));
- __ ldr(x12, FieldMemOperand(x10, HeapObject::kMapOffset));
- __ cmp(x11, x12);
- __ B(ne, &call_builtin);
-
- const int target_offset = header_size + target_kind * kPointerSize;
- __ Ldr(x10, FieldMemOperand(x10, target_offset));
- __ Mov(x11, receiver);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- masm, DONT_TRACK_ALLOCATION_SITE, NULL);
- }
-
- // Save new length.
- __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Store the value.
- // We may need a register containing the address end_elements below,
- // so write back the value in end_elements.
- __ Add(end_elements, elements,
- Operand::UntagSmiAndScale(length, kPointerSizeLog2));
- __ Str(value, MemOperand(end_elements, kEndElementsOffset, PreIndex));
-
- __ RecordWrite(elements,
- end_elements,
- value,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Drop(argc + 1);
- __ Mov(x0, length);
- __ Ret();
-
- __ Bind(&attempt_to_grow_elements);
-
- if (!FLAG_inline_new) {
- __ B(&call_builtin);
- }
-
- Register argument = x2;
- __ Peek(argument, (argc - 1) * kPointerSize);
- // Growing elements that are SMI-only requires special handling in case
- // the new element is non-Smi. For now, delegate to the builtin.
- if (IsFastSmiElementsKind(elements_kind())) {
- __ JumpIfNotSmi(argument, &call_builtin);
- }
-
- // We could be lucky and the elements array could be at the top of new-space.
- // In this case we can just grow it in place by moving the allocation pointer
- // up.
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate);
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate);
-
- const int kAllocationDelta = 4;
- ASSERT(kAllocationDelta >= argc);
- Register allocation_top_addr = x5;
- Register allocation_top = x9;
- // Load top and check if it is the end of elements.
- __ Add(end_elements, elements,
- Operand::UntagSmiAndScale(length, kPointerSizeLog2));
- __ Add(end_elements, end_elements, kEndElementsOffset);
- __ Mov(allocation_top_addr, Operand(new_space_allocation_top));
- __ Ldr(allocation_top, MemOperand(allocation_top_addr));
- __ Cmp(end_elements, allocation_top);
- __ B(ne, &call_builtin);
-
- __ Mov(x10, Operand(new_space_allocation_limit));
- __ Ldr(x10, MemOperand(x10));
- __ Add(allocation_top, allocation_top, kAllocationDelta * kPointerSize);
- __ Cmp(allocation_top, x10);
- __ B(hi, &call_builtin);
-
- // We fit and could grow elements.
- // Update new_space_allocation_top.
- __ Str(allocation_top, MemOperand(allocation_top_addr));
- // Push the argument.
- __ Str(argument, MemOperand(end_elements));
- // Fill the rest with holes.
- __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < kAllocationDelta; i++) {
- // TODO(all): Try to use stp here.
- __ Str(x10, MemOperand(end_elements, i * kPointerSize));
- }
-
- // Update elements' and array's sizes.
- __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Add(elements_length,
- elements_length,
- Operand(Smi::FromInt(kAllocationDelta)));
- __ Str(elements_length,
- FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Elements are in new space, so write barrier is not required.
- __ Drop(argc + 1);
- __ Mov(x0, length);
- __ Ret();
-
- __ Bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
-}
-
-
-void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x1 : left
- // -- x0 : right
- // -- lr : return address
- // -----------------------------------
- Isolate* isolate = masm->isolate();
-
- // Load x2 with the allocation site. We stick an undefined dummy value here
- // and replace it with the real allocation site later when we instantiate this
- // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
- __ LoadObject(x2, handle(isolate->heap()->undefined_value()));
-
- // Make sure that we actually patched the allocation site.
- if (FLAG_debug_code) {
- __ AssertNotSmi(x2, kExpectedAllocationSite);
- __ Ldr(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
- __ AssertRegisterIsRoot(x10, Heap::kAllocationSiteMapRootIndex,
- kExpectedAllocationSite);
- }
-
- // Tail call into the stub that handles binary operations with allocation
- // sites.
- BinaryOpWithAllocationSiteStub stub(state_);
- __ TailCallStub(&stub);
-}
-
-
-bool CodeStub::CanUseFPRegisters() {
- // FP registers always available on A64.
- return true;
-}
-
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
- // We need some extra registers for this stub, they have been allocated
- // but we need to save them before using them.
- regs_.Save(masm);
-
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- Label dont_need_remembered_set;
-
- Register value = regs_.scratch0();
- __ Ldr(value, MemOperand(regs_.address()));
- __ JumpIfNotInNewSpace(value, &dont_need_remembered_set);
-
- __ CheckPageFlagSet(regs_.object(),
- value,
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
- &dont_need_remembered_set);
-
- // First notify the incremental marker if necessary, then update the
- // remembered set.
- CheckNeedsToInformIncrementalMarker(
- masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
- regs_.Restore(masm); // Restore the extra scratch registers we used.
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
-
- __ Bind(&dont_need_remembered_set);
- }
-
- CheckNeedsToInformIncrementalMarker(
- masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
- regs_.Restore(masm); // Restore the extra scratch registers we used.
- __ Ret();
-}
-
-
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
- Register address =
- x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address();
- ASSERT(!address.Is(regs_.object()));
- ASSERT(!address.Is(x0));
- __ Mov(address, regs_.address());
- __ Mov(x0, regs_.object());
- __ Mov(x1, address);
- __ Mov(x2, Operand(ExternalReference::isolate_address(masm->isolate())));
-
- AllowExternalCallThatCantCauseGC scope(masm);
- ExternalReference function = (mode == INCREMENTAL_COMPACTION)
- ? ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate())
- : ExternalReference::incremental_marking_record_write_function(
- masm->isolate());
- __ CallCFunction(function, 3, 0);
-
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
-}
-
-
-void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode) {
- Label on_black;
- Label need_incremental;
- Label need_incremental_pop_scratch;
-
- Register mem_chunk = regs_.scratch0();
- Register counter = regs_.scratch1();
- __ Bic(mem_chunk, regs_.object(), Page::kPageAlignmentMask);
- __ Ldr(counter,
- MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
- __ Subs(counter, counter, 1);
- __ Str(counter,
- MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
- __ B(mi, &need_incremental);
-
- // If the object is not black we don't have to inform the incremental marker.
- __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
-
- regs_.Restore(masm); // Restore the extra scratch registers we used.
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ Ret();
- }
-
- __ Bind(&on_black);
- // Get the value from the slot.
- Register value = regs_.scratch0();
- __ Ldr(value, MemOperand(regs_.address()));
-
- if (mode == INCREMENTAL_COMPACTION) {
- Label ensure_not_white;
-
- __ CheckPageFlagClear(value,
- regs_.scratch1(),
- MemoryChunk::kEvacuationCandidateMask,
- &ensure_not_white);
-
- __ CheckPageFlagClear(regs_.object(),
- regs_.scratch1(),
- MemoryChunk::kSkipEvacuationSlotsRecordingMask,
- &need_incremental);
-
- __ Bind(&ensure_not_white);
- }
-
- // We need extra registers for this, so we push the object and the address
- // register temporarily.
- __ Push(regs_.address(), regs_.object());
- __ EnsureNotWhite(value,
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- regs_.address(), // Scratch.
- regs_.scratch2(), // Scratch.
- &need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- regs_.Restore(masm); // Restore the extra scratch registers we used.
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ Ret();
- }
-
- __ Bind(&need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- __ Bind(&need_incremental);
- // Fall through when we need to inform the incremental marker.
-}
-
-
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- Label skip_to_incremental_noncompacting;
- Label skip_to_incremental_compacting;
-
- // We patch these two first instructions back and forth between a nop and
- // real branch when we start and stop incremental heap marking.
- // Initially the stub is expected to be in STORE_BUFFER_ONLY mode, so 2 nops
- // are generated.
- // See RecordWriteStub::Patch for details.
- {
- InstructionAccurateScope scope(masm, 2);
- __ adr(xzr, &skip_to_incremental_noncompacting);
- __ adr(xzr, &skip_to_incremental_compacting);
- }
-
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- }
- __ Ret();
-
- __ Bind(&skip_to_incremental_noncompacting);
- GenerateIncremental(masm, INCREMENTAL);
-
- __ Bind(&skip_to_incremental_compacting);
- GenerateIncremental(masm, INCREMENTAL_COMPACTION);
-}
-
-
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
- // TODO(all): Possible optimisations in this function:
- // 1. Merge CheckFastElements and CheckFastSmiElements, so that the map
- // bitfield is loaded only once.
- // 2. Refactor the Ldr/Add sequence at the start of fast_elements and
- // smi_element.
-
- // x0 value element value to store
- // x3 index_smi element index as smi
- // sp[0] array_index_smi array literal index in function as smi
- // sp[1] array array literal
-
- Register value = x0;
- Register index_smi = x3;
-
- Register array = x1;
- Register array_map = x2;
- Register array_index_smi = x4;
- __ PeekPair(array_index_smi, array, 0);
- __ Ldr(array_map, FieldMemOperand(array, JSObject::kMapOffset));
-
- Label double_elements, smi_element, fast_elements, slow_elements;
- __ CheckFastElements(array_map, x10, &double_elements);
- __ JumpIfSmi(value, &smi_element);
- __ CheckFastSmiElements(array_map, x10, &fast_elements);
-
- // Store into the array literal requires an elements transition. Call into
- // the runtime.
- __ Bind(&slow_elements);
- __ Push(array, index_smi, value);
- __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Ldr(x11, FieldMemOperand(x10, JSFunction::kLiteralsOffset));
- __ Push(x11, array_index_smi);
- __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
- __ Bind(&fast_elements);
- __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
- __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
- __ Add(x11, x11, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Str(value, MemOperand(x11));
- // Update the write barrier for the array store.
- __ RecordWrite(x10, x11, value, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Ret();
-
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
- // and value is Smi.
- __ Bind(&smi_element);
- __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
- __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
- __ Str(value, FieldMemOperand(x11, FixedArray::kHeaderSize));
- __ Ret();
-
- __ Bind(&double_elements);
- __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(value, index_smi, x10, x11, d0, d1,
- &slow_elements);
- __ Ret();
-}
-
-
-void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- // TODO(jbramley): The ARM code leaves the (shifted) offset in r1. Why?
- CEntryStub ces(1, kSaveFPRegs);
- __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
- int parameter_count_offset =
- StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
- __ Ldr(x1, MemOperand(fp, parameter_count_offset));
- if (function_mode_ == JS_FUNCTION_STUB_MODE) {
- __ Add(x1, x1, 1);
- }
- masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
- __ Drop(x1);
- // Return to IC Miss stub, continuation still on stack.
- __ Ret();
-}
-
-
-void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (masm->isolate()->function_entry_hook() != NULL) {
- // TODO(all): This needs to be reliably consistent with
- // kReturnAddressDistanceFromFunctionStart in ::Generate.
- Assembler::BlockConstPoolScope no_const_pools(masm);
- ProfileEntryHookStub stub;
- __ Push(lr);
- __ CallStub(&stub);
- __ Pop(lr);
- }
-}
-
-
-void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
- MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
- // The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
- // a "Push lr" instruction, followed by a call.
- // TODO(jbramley): Verify that this call is always made with relocation.
- static const int kReturnAddressDistanceFromFunctionStart =
- Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
-
- // Save all kCallerSaved registers (including lr), since this can be called
- // from anywhere.
- // TODO(jbramley): What about FP registers?
- __ PushCPURegList(kCallerSaved);
- ASSERT(kCallerSaved.IncludesAliasOf(lr));
- const int kNumSavedRegs = kCallerSaved.Count();
-
- // Compute the function's address as the first argument.
- __ Sub(x0, lr, kReturnAddressDistanceFromFunctionStart);
-
-#if V8_HOST_ARCH_A64
- uintptr_t entry_hook =
- reinterpret_cast<uintptr_t>(masm->isolate()->function_entry_hook());
- __ Mov(x10, entry_hook);
-#else
- // Under the simulator we need to indirect the entry hook through a trampoline
- // function at a known address.
- ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
- __ Mov(x10, Operand(ExternalReference(&dispatcher,
- ExternalReference::BUILTIN_CALL,
- masm->isolate())));
- // It additionally takes an isolate as a third parameter
- __ Mov(x2, Operand(ExternalReference::isolate_address(masm->isolate())));
-#endif
-
- // The caller's return address is above the saved temporaries.
- // Grab its location for the second argument to the hook.
- __ Add(x1, __ StackPointer(), kNumSavedRegs * kPointerSize);
-
- {
- // Create a dummy frame, as CallCFunction requires this.
- FrameScope frame(masm, StackFrame::MANUAL);
- __ CallCFunction(x10, 2, 0);
- }
-
- __ PopCPURegList(kCallerSaved);
- __ Ret();
-}
-
-
-void DirectCEntryStub::Generate(MacroAssembler* masm) {
- // When calling into C++ code the stack pointer must be csp.
- // Therefore this code must use csp for peek/poke operations when the
- // stub is generated. When the stub is called
- // (via DirectCEntryStub::GenerateCall), the caller must setup an ExitFrame
- // and configure the stack pointer *before* doing the call.
- const Register old_stack_pointer = __ StackPointer();
- __ SetStackPointer(csp);
-
- // Put return address on the stack (accessible to GC through exit frame pc).
- __ Poke(lr, 0);
- // Call the C++ function.
- __ Blr(x10);
- // Return to calling code.
- __ Peek(lr, 0);
- __ Ret();
-
- __ SetStackPointer(old_stack_pointer);
-}
-
-void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
- Register target) {
- // Make sure the caller configured the stack pointer (see comment in
- // DirectCEntryStub::Generate).
- ASSERT(csp.Is(__ StackPointer()));
-
- intptr_t code =
- reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
- __ Mov(lr, Operand(code, RelocInfo::CODE_TARGET));
- __ Mov(x10, target);
- // Branch to the stub.
- __ Blr(lr);
-}
-
-
-// Probe the name dictionary in the 'elements' register.
-// Jump to the 'done' label if a property with the given name is found.
-// Jump to the 'miss' label otherwise.
-//
-// If lookup was successful 'scratch2' will be equal to elements + 4 * index.
-// 'elements' and 'name' registers are preserved on miss.
-void NameDictionaryLookupStub::GeneratePositiveLookup(
- MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register scratch1,
- Register scratch2) {
- ASSERT(!AreAliased(elements, name, scratch1, scratch2));
-
- // Assert that name contains a string.
- __ AssertName(name);
-
- // Compute the capacity mask.
- __ Ldrsw(scratch1, UntagSmiFieldMemOperand(elements, kCapacityOffset));
- __ Sub(scratch1, scratch1, 1);
-
- // Generate an unrolled loop that performs a few probes before giving up.
- for (int i = 0; i < kInlinedProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ Ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
- if (i > 0) {
- // Add the probe offset (i + i * i) left shifted to avoid right shifting
- // the hash in a separate instruction. The value hash + i + i * i is right
- // shifted in the following and instruction.
- ASSERT(NameDictionary::GetProbeOffset(i) <
- 1 << (32 - Name::kHashFieldOffset));
- __ Add(scratch2, scratch2, Operand(
- NameDictionary::GetProbeOffset(i) << Name::kHashShift));
- }
- __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
-
- // Scale the index by multiplying by the element size.
- ASSERT(NameDictionary::kEntrySize == 3);
- __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
-
- // Check if the key is identical to the name.
- __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
- // TODO(jbramley): We need another scratch here, but some callers can't
- // provide a scratch3 so we have to use Tmp1(). We should find a clean way
- // to make it unavailable to the MacroAssembler for a short time.
- __ Ldr(__ Tmp1(), FieldMemOperand(scratch2, kElementsStartOffset));
- __ Cmp(name, __ Tmp1());
- __ B(eq, done);
- }
-
- // The inlined probes didn't find the entry.
- // Call the complete stub to scan the whole dictionary.
-
- CPURegList spill_list(CPURegister::kRegister, kXRegSize, 0, 6);
- spill_list.Combine(lr);
- spill_list.Remove(scratch1);
- spill_list.Remove(scratch2);
-
- __ PushCPURegList(spill_list);
-
- if (name.is(x0)) {
- ASSERT(!elements.is(x1));
- __ Mov(x1, name);
- __ Mov(x0, elements);
- } else {
- __ Mov(x0, elements);
- __ Mov(x1, name);
- }
-
- Label not_found;
- NameDictionaryLookupStub stub(POSITIVE_LOOKUP);
- __ CallStub(&stub);
- __ Cbz(x0, &not_found);
- __ Mov(scratch2, x2); // Move entry index into scratch2.
- __ PopCPURegList(spill_list);
- __ B(done);
-
- __ Bind(&not_found);
- __ PopCPURegList(spill_list);
- __ B(miss);
-}
-
-
-void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- Handle<Name> name,
- Register scratch0) {
- ASSERT(!AreAliased(receiver, properties, scratch0));
- ASSERT(name->IsUniqueName());
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the hole value).
- for (int i = 0; i < kInlinedProbes; i++) {
- // scratch0 points to properties hash.
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = scratch0;
- // Capacity is smi 2^n.
- __ Ldrsw(index, UntagSmiFieldMemOperand(properties, kCapacityOffset));
- __ Sub(index, index, 1);
- __ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(NameDictionary::kEntrySize == 3);
- __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
-
- Register entity_name = scratch0;
- // Having undefined at this place means the name is not contained.
- Register tmp = index;
- __ Add(tmp, properties, Operand(index, LSL, kPointerSizeLog2));
- __ Ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
-
- __ JumpIfRoot(entity_name, Heap::kUndefinedValueRootIndex, done);
-
- // Stop if found the property.
- __ Cmp(entity_name, Operand(name));
- __ B(eq, miss);
-
- Label good;
- __ JumpIfRoot(entity_name, Heap::kTheHoleValueRootIndex, &good);
-
- // Check if the entry name is not a unique name.
- __ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
- __ Ldrb(entity_name,
- FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueName(entity_name, miss);
- __ Bind(&good);
- }
-
- CPURegList spill_list(CPURegister::kRegister, kXRegSize, 0, 6);
- spill_list.Combine(lr);
- spill_list.Remove(scratch0); // Scratch registers don't need to be preserved.
-
- __ PushCPURegList(spill_list);
-
- __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ Mov(x1, Operand(name));
- NameDictionaryLookupStub stub(NEGATIVE_LOOKUP);
- __ CallStub(&stub);
- // Move stub return value to scratch0. Note that scratch0 is not included in
- // spill_list and won't be clobbered by PopCPURegList.
- __ Mov(scratch0, x0);
- __ PopCPURegList(spill_list);
-
- __ Cbz(scratch0, done);
- __ B(miss);
-}
-
-
-void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- //
- // Arguments are in x0 and x1:
- // x0: property dictionary.
- // x1: the name of the property we are looking for.
- //
- // Return value is in x0 and is zero if lookup failed, non zero otherwise.
- // If the lookup is successful, x2 will contains the index of the entry.
-
- Register result = x0;
- Register dictionary = x0;
- Register key = x1;
- Register index = x2;
- Register mask = x3;
- Register hash = x4;
- Register undefined = x5;
- Register entry_key = x6;
-
- Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
-
- __ Ldrsw(mask, UntagSmiFieldMemOperand(dictionary, kCapacityOffset));
- __ Sub(mask, mask, 1);
-
- __ Ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
-
- for (int i = kInlinedProbes; i < kTotalProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- // Capacity is smi 2^n.
- if (i > 0) {
- // Add the probe offset (i + i * i) left shifted to avoid right shifting
- // the hash in a separate instruction. The value hash + i + i * i is right
- // shifted in the following and instruction.
- ASSERT(NameDictionary::GetProbeOffset(i) <
- 1 << (32 - Name::kHashFieldOffset));
- __ Add(index, hash,
- NameDictionary::GetProbeOffset(i) << Name::kHashShift);
- } else {
- __ Mov(index, hash);
- }
- __ And(index, mask, Operand(index, LSR, Name::kHashShift));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(NameDictionary::kEntrySize == 3);
- __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
-
- __ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2));
- __ Ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
-
- // Having undefined at this place means the name is not contained.
- __ Cmp(entry_key, undefined);
- __ B(eq, &not_in_dictionary);
-
- // Stop if found the property.
- __ Cmp(entry_key, key);
- __ B(eq, &in_dictionary);
-
- if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
- // Check if the entry name is not a unique name.
- __ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
- __ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
- }
- }
-
- __ Bind(&maybe_in_dictionary);
- // If we are doing negative lookup then probing failure should be
- // treated as a lookup success. For positive lookup, probing failure
- // should be treated as lookup failure.
- if (mode_ == POSITIVE_LOOKUP) {
- __ Mov(result, 0);
- __ Ret();
- }
-
- __ Bind(&in_dictionary);
- __ Mov(result, 1);
- __ Ret();
-
- __ Bind(&not_in_dictionary);
- __ Mov(result, 0);
- __ Ret();
-}
-
-
-template<class T>
-static void CreateArrayDispatch(MacroAssembler* masm,
- AllocationSiteOverrideMode mode) {
- ASM_LOCATION("CreateArrayDispatch");
- if (mode == DISABLE_ALLOCATION_SITES) {
- T stub(GetInitialFastElementsKind(), mode);
- __ TailCallStub(&stub);
-
- } else if (mode == DONT_OVERRIDE) {
- Register kind = x3;
- int last_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- Label next;
- ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
- // TODO(jbramley): Is this the best way to handle this? Can we make the
- // tail calls conditional, rather than hopping over each one?
- __ CompareAndBranch(kind, candidate_kind, ne, &next);
- T stub(candidate_kind);
- __ TailCallStub(&stub);
- __ Bind(&next);
- }
-
- // If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
-
- } else {
- UNREACHABLE();
- }
-}
-
-
-// TODO(jbramley): If this needs to be a special case, make it a proper template
-// specialization, and not a separate function.
-static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
- AllocationSiteOverrideMode mode) {
- ASM_LOCATION("CreateArrayDispatchOneArgument");
- // x0 - argc
- // x1 - constructor?
- // x2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
- // x3 - kind (if mode != DISABLE_ALLOCATION_SITES)
- // sp[0] - last argument
-
- Register allocation_site = x2;
- Register kind = x3;
-
- Label normal_sequence;
- if (mode == DONT_OVERRIDE) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
- STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
-
- // Is the low bit set? If so, the array is holey.
- __ Tbnz(kind, 0, &normal_sequence);
- }
-
- // Look at the last argument.
- // TODO(jbramley): What does a 0 argument represent?
- __ Peek(x10, 0);
- __ Cbz(x10, &normal_sequence);
-
- if (mode == DISABLE_ALLOCATION_SITES) {
- ElementsKind initial = GetInitialFastElementsKind();
- ElementsKind holey_initial = GetHoleyElementsKind(initial);
-
- ArraySingleArgumentConstructorStub stub_holey(holey_initial,
- DISABLE_ALLOCATION_SITES);
- __ TailCallStub(&stub_holey);
-
- __ Bind(&normal_sequence);
- ArraySingleArgumentConstructorStub stub(initial,
- DISABLE_ALLOCATION_SITES);
- __ TailCallStub(&stub);
- } else if (mode == DONT_OVERRIDE) {
- // We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the slot).
- __ Orr(kind, kind, 1);
-
- if (FLAG_debug_code) {
- __ Ldr(x10, FieldMemOperand(allocation_site, 0));
- __ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex,
- &normal_sequence);
- __ Assert(eq, kExpectedAllocationSite);
- }
-
- // Save the resulting elements kind in type info. We can't just store 'kind'
- // in the AllocationSite::transition_info field because elements kind is
- // restricted to a portion of the field; upper bits need to be left alone.
- STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ Ldr(x11, FieldMemOperand(allocation_site,
- AllocationSite::kTransitionInfoOffset));
- __ Add(x11, x11, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
- __ Str(x11, FieldMemOperand(allocation_site,
- AllocationSite::kTransitionInfoOffset));
-
- __ Bind(&normal_sequence);
- int last_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- Label next;
- ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
- // TODO(jbramley): Is this the best way to handle this? Can we make the
- // tail calls conditional, rather than hopping over each one?
- __ CompareAndBranch(kind, candidate_kind, ne, &next);
- ArraySingleArgumentConstructorStub stub(candidate_kind);
- __ TailCallStub(&stub);
- __ Bind(&next);
- }
-
- // If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
- } else {
- UNREACHABLE();
- }
-}
-
-
-template<class T>
-static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
- int to_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= to_index; ++i) {
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- T stub(kind);
- stub.GetCode(isolate);
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
- T stub1(kind, DISABLE_ALLOCATION_SITES);
- stub1.GetCode(isolate);
- }
- }
-}
-
-
-void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
- ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
- isolate);
- ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
- isolate);
- ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
- isolate);
-}
-
-
-void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
- Isolate* isolate) {
- ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
- for (int i = 0; i < 2; i++) {
- // For internal arrays we only need a few things
- InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
- stubh1.GetCode(isolate);
- InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
- stubh2.GetCode(isolate);
- InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
- stubh3.GetCode(isolate);
- }
-}
-
-
-void ArrayConstructorStub::GenerateDispatchToArrayStub(
- MacroAssembler* masm,
- AllocationSiteOverrideMode mode) {
- Register argc = x0;
- if (argument_count_ == ANY) {
- Label zero_case, n_case;
- __ Cbz(argc, &zero_case);
- __ Cmp(argc, 1);
- __ B(ne, &n_case);
-
- // One argument.
- CreateArrayDispatchOneArgument(masm, mode);
-
- __ Bind(&zero_case);
- // No arguments.
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-
- __ Bind(&n_case);
- // N arguments.
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
-
- } else if (argument_count_ == NONE) {
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
- } else if (argument_count_ == ONE) {
- CreateArrayDispatchOneArgument(masm, mode);
- } else if (argument_count_ == MORE_THAN_ONE) {
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
- } else {
- UNREACHABLE();
- }
-}
-
-
-void ArrayConstructorStub::Generate(MacroAssembler* masm) {
- ASM_LOCATION("ArrayConstructorStub::Generate");
- // ----------- S t a t e -------------
- // -- x0 : argc (only if argument_count_ == ANY)
- // -- x1 : constructor
- // -- x2 : feedback vector (fixed array or undefined)
- // -- x3 : slot index (if x2 is fixed array)
- // -- sp[0] : return address
- // -- sp[4] : last argument
- // -----------------------------------
- Register constructor = x1;
- Register feedback_vector = x2;
- Register slot_index = x3;
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the global and natives
- // builtin Array functions which always have maps.
-
- Label unexpected_map, map_ok;
- // Initial map for the builtin Array function should be a map.
- __ Ldr(x10, FieldMemOperand(constructor,
- JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- __ JumpIfSmi(x10, &unexpected_map);
- __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
- __ Bind(&unexpected_map);
- __ Abort(kUnexpectedInitialMapForArrayFunction);
- __ Bind(&map_ok);
-
- // In feedback_vector, we expect either undefined or a valid fixed array.
- Label okay_here;
- Handle<Map> fixed_array_map = masm->isolate()->factory()->fixed_array_map();
- __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex, &okay_here);
- __ Ldr(x10, FieldMemOperand(feedback_vector, FixedArray::kMapOffset));
- __ Cmp(x10, Operand(fixed_array_map));
- __ Assert(eq, kExpectedFixedArrayInFeedbackVector);
-
- // slot_index should be a smi if we don't have undefined in feedback_vector.
- __ AssertSmi(slot_index);
-
- __ Bind(&okay_here);
- }
-
- Register allocation_site = x2; // Overwrites feedback_vector.
- Register kind = x3;
- Label no_info;
- // Get the elements kind and case on that.
- __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex, &no_info);
- __ Add(feedback_vector, feedback_vector,
- Operand::UntagSmiAndScale(slot_index, kPointerSizeLog2));
- __ Ldr(allocation_site, FieldMemOperand(feedback_vector,
- FixedArray::kHeaderSize));
-
- // If the feedback vector is undefined, or contains anything other than an
- // AllocationSite, call an array constructor that doesn't use AllocationSites.
- __ Ldr(x10, FieldMemOperand(allocation_site, AllocationSite::kMapOffset));
- __ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex, &no_info);
-
- __ Ldrsw(kind,
- UntagSmiFieldMemOperand(allocation_site,
- AllocationSite::kTransitionInfoOffset));
- __ And(kind, kind, AllocationSite::ElementsKindBits::kMask);
- GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
-
- __ Bind(&no_info);
- GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
-}
-
-
-void InternalArrayConstructorStub::GenerateCase(
- MacroAssembler* masm, ElementsKind kind) {
- Label zero_case, n_case;
- Register argc = x0;
-
- __ Cbz(argc, &zero_case);
- __ CompareAndBranch(argc, 1, ne, &n_case);
-
- // One argument.
- if (IsFastPackedElementsKind(kind)) {
- Label packed_case;
-
- // We might need to create a holey array; look at the first argument.
- __ Peek(x10, 0);
- __ Cbz(x10, &packed_case);
-
- InternalArraySingleArgumentConstructorStub
- stub1_holey(GetHoleyElementsKind(kind));
- __ TailCallStub(&stub1_holey);
-
- __ Bind(&packed_case);
- }
- InternalArraySingleArgumentConstructorStub stub1(kind);
- __ TailCallStub(&stub1);
-
- __ Bind(&zero_case);
- // No arguments.
- InternalArrayNoArgumentConstructorStub stub0(kind);
- __ TailCallStub(&stub0);
-
- __ Bind(&n_case);
- // N arguments.
- InternalArrayNArgumentsConstructorStub stubN(kind);
- __ TailCallStub(&stubN);
-}
-
-
-void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : argc
- // -- x1 : constructor
- // -- sp[0] : return address
- // -- sp[4] : last argument
- // -----------------------------------
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(), masm->isolate());
-
- Register constructor = x1;
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the global and natives
- // builtin Array functions which always have maps.
-
- Label unexpected_map, map_ok;
- // Initial map for the builtin Array function should be a map.
- __ Ldr(x10, FieldMemOperand(constructor,
- JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- __ JumpIfSmi(x10, &unexpected_map);
- __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
- __ Bind(&unexpected_map);
- __ Abort(kUnexpectedInitialMapForArrayFunction);
- __ Bind(&map_ok);
- }
-
- Register kind = w3;
- // Figure out the right elements kind
- __ Ldr(x10, FieldMemOperand(constructor,
- JSFunction::kPrototypeOrInitialMapOffset));
-
- // TODO(jbramley): Add a helper function to read elements kind from an
- // existing map.
- // Load the map's "bit field 2" into result.
- __ Ldr(kind, FieldMemOperand(x10, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ Ubfx(kind, kind, Map::kElementsKindShift, Map::kElementsKindBitCount);
-
- if (FLAG_debug_code) {
- Label done;
- __ Cmp(x3, FAST_ELEMENTS);
- __ Ccmp(x3, FAST_HOLEY_ELEMENTS, ZFlag, ne);
- __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
- }
-
- Label fast_elements_case;
- __ CompareAndBranch(kind, FAST_ELEMENTS, eq, &fast_elements_case);
- GenerateCase(masm, FAST_HOLEY_ELEMENTS);
-
- __ Bind(&fast_elements_case);
- GenerateCase(masm, FAST_ELEMENTS);
-}
-
-
-void CallApiFunctionStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : callee
- // -- x4 : call_data
- // -- x2 : holder
- // -- x1 : api_function_address
- // -- cp : context
- // --
- // -- sp[0] : last argument
- // -- ...
- // -- sp[(argc - 1) * 8] : first argument
- // -- sp[argc * 8] : receiver
- // -----------------------------------
-
- Register callee = x0;
- Register call_data = x4;
- Register holder = x2;
- Register api_function_address = x1;
- Register context = cp;
-
- int argc = ArgumentBits::decode(bit_field_);
- bool is_store = IsStoreBits::decode(bit_field_);
- bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
-
- typedef FunctionCallbackArguments FCA;
-
- STATIC_ASSERT(FCA::kContextSaveIndex == 6);
- STATIC_ASSERT(FCA::kCalleeIndex == 5);
- STATIC_ASSERT(FCA::kDataIndex == 4);
- STATIC_ASSERT(FCA::kReturnValueOffset == 3);
- STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
- STATIC_ASSERT(FCA::kIsolateIndex == 1);
- STATIC_ASSERT(FCA::kHolderIndex == 0);
- STATIC_ASSERT(FCA::kArgsLength == 7);
-
- Isolate* isolate = masm->isolate();
-
- // FunctionCallbackArguments: context, callee and call data.
- __ Push(context, callee, call_data);
-
- // Load context from callee
- __ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
-
- if (!call_data_undefined) {
- __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
- }
- Register isolate_reg = x5;
- __ Mov(isolate_reg, Operand(ExternalReference::isolate_address(isolate)));
-
- // FunctionCallbackArguments:
- // return value, return value default, isolate, holder.
- __ Push(call_data, call_data, isolate_reg, holder);
-
- // Prepare arguments.
- Register args = x6;
- __ Mov(args, masm->StackPointer());
-
- // Allocate the v8::Arguments structure in the arguments' space, since it's
- // not controlled by GC.
- const int kApiStackSpace = 4;
-
- // Allocate space for CallApiFunctionAndReturn can store some scratch
- // registeres on the stack.
- const int kCallApiFunctionSpillSpace = 4;
-
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
-
- // TODO(all): Optimize this with stp and suchlike.
- ASSERT(!AreAliased(x0, api_function_address));
- // x0 = FunctionCallbackInfo&
- // Arguments is after the return address.
- __ Add(x0, masm->StackPointer(), 1 * kPointerSize);
- // FunctionCallbackInfo::implicit_args_
- __ Str(args, MemOperand(x0, 0 * kPointerSize));
- // FunctionCallbackInfo::values_
- __ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
- __ Str(x10, MemOperand(x0, 1 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc
- __ Mov(x10, argc);
- __ Str(x10, MemOperand(x0, 2 * kPointerSize));
- // FunctionCallbackInfo::is_construct_call = 0
- __ Str(xzr, MemOperand(x0, 3 * kPointerSize));
-
- const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
- Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
- ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
- ApiFunction thunk_fun(thunk_address);
- ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
- masm->isolate());
-
- AllowExternalCallThatCantCauseGC scope(masm);
- MemOperand context_restore_operand(
- fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
- // Stores return the first js argument
- int return_value_offset = 0;
- if (is_store) {
- return_value_offset = 2 + FCA::kArgsLength;
- } else {
- return_value_offset = 2 + FCA::kReturnValueOffset;
- }
- MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
-
- const int spill_offset = 1 + kApiStackSpace;
- __ CallApiFunctionAndReturn(api_function_address,
- thunk_ref,
- kStackUnwindSpace,
- spill_offset,
- return_value_operand,
- &context_restore_operand);
-}
-
-
-void CallApiGetterStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- sp[0] : name
- // -- sp[8 - kArgsLength*8] : PropertyCallbackArguments object
- // -- ...
- // -- x2 : api_function_address
- // -----------------------------------
-
- Register api_function_address = x2;
-
- __ Mov(x0, masm->StackPointer()); // x0 = Handle<Name>
- __ Add(x1, x0, 1 * kPointerSize); // x1 = PCA
-
- const int kApiStackSpace = 1;
-
- // Allocate space for CallApiFunctionAndReturn can store some scratch
- // registeres on the stack.
- const int kCallApiFunctionSpillSpace = 4;
-
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
-
- // Create PropertyAccessorInfo instance on the stack above the exit frame with
- // x1 (internal::Object** args_) as the data.
- __ Poke(x1, 1 * kPointerSize);
- __ Add(x1, masm->StackPointer(), 1 * kPointerSize); // x1 = AccessorInfo&
-
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
-
- Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
- ExternalReference::Type thunk_type =
- ExternalReference::PROFILING_GETTER_CALL;
- ApiFunction thunk_fun(thunk_address);
- ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
- masm->isolate());
-
- const int spill_offset = 1 + kApiStackSpace;
- __ CallApiFunctionAndReturn(api_function_address,
- thunk_ref,
- kStackUnwindSpace,
- spill_offset,
- MemOperand(fp, 6 * kPointerSize),
- NULL);
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/code-stubs-a64.h b/deps/v8/src/a64/code-stubs-a64.h
deleted file mode 100644
index 0709bfc511..0000000000
--- a/deps/v8/src/a64/code-stubs-a64.h
+++ /dev/null
@@ -1,469 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_A64_CODE_STUBS_A64_H_
-#define V8_A64_CODE_STUBS_A64_H_
-
-#include "ic-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
-
-
-class StoreBufferOverflowStub: public PlatformCodeStub {
- public:
- explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
- : save_doubles_(save_fp) { }
-
- void Generate(MacroAssembler* masm);
-
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- SaveFPRegsMode save_doubles_;
-
- Major MajorKey() { return StoreBufferOverflow; }
- int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-};
-
-
-class StringHelper : public AllStatic {
- public:
- // TODO(all): These don't seem to be used any more. Delete them.
-
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-class RecordWriteStub: public PlatformCodeStub {
- public:
- // Stub to record the write of 'value' at 'address' in 'object'.
- // Typically 'address' = 'object' + <some offset>.
- // See MacroAssembler::RecordWriteField() for example.
- RecordWriteStub(Register object,
- Register value,
- Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode)
- : object_(object),
- value_(value),
- address_(address),
- remembered_set_action_(remembered_set_action),
- save_fp_regs_mode_(fp_mode),
- regs_(object, // An input reg.
- address, // An input reg.
- value) { // One scratch reg.
- }
-
- enum Mode {
- STORE_BUFFER_ONLY,
- INCREMENTAL,
- INCREMENTAL_COMPACTION
- };
-
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- static Mode GetMode(Code* stub) {
- // Find the mode depending on the first two instructions.
- Instruction* instr1 =
- reinterpret_cast<Instruction*>(stub->instruction_start());
- Instruction* instr2 = instr1->following();
-
- if (instr1->IsUncondBranchImm()) {
- ASSERT(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
- return INCREMENTAL;
- }
-
- ASSERT(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
-
- if (instr2->IsUncondBranchImm()) {
- return INCREMENTAL_COMPACTION;
- }
-
- ASSERT(instr2->IsPCRelAddressing());
-
- return STORE_BUFFER_ONLY;
- }
-
- // We patch the two first instructions of the stub back and forth between an
- // adr and branch when we start and stop incremental heap marking.
- // The branch is
- // b label
- // The adr is
- // adr xzr label
- // so effectively a nop.
- static void Patch(Code* stub, Mode mode) {
- // We are going to patch the two first instructions of the stub.
- PatchingAssembler patcher(
- reinterpret_cast<Instruction*>(stub->instruction_start()), 2);
- Instruction* instr1 = patcher.InstructionAt(0);
- Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
- // Instructions must be either 'adr' or 'b'.
- ASSERT(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
- ASSERT(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
- // Retrieve the offsets to the labels.
- int32_t offset_to_incremental_noncompacting = instr1->ImmPCOffset();
- int32_t offset_to_incremental_compacting = instr2->ImmPCOffset();
-
- switch (mode) {
- case STORE_BUFFER_ONLY:
- ASSERT(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
- patcher.adr(xzr, offset_to_incremental_noncompacting);
- patcher.adr(xzr, offset_to_incremental_compacting);
- break;
- case INCREMENTAL:
- ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
- patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2);
- patcher.adr(xzr, offset_to_incremental_compacting);
- break;
- case INCREMENTAL_COMPACTION:
- ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
- patcher.adr(xzr, offset_to_incremental_noncompacting);
- patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2);
- break;
- }
- ASSERT(GetMode(stub) == mode);
- }
-
- private:
- // This is a helper class to manage the registers associated with the stub.
- // The 'object' and 'address' registers must be preserved.
- class RegisterAllocation {
- public:
- RegisterAllocation(Register object,
- Register address,
- Register scratch)
- : object_(object),
- address_(address),
- scratch0_(scratch),
- saved_regs_(kCallerSaved) {
- ASSERT(!AreAliased(scratch, object, address));
-
- // We would like to require more scratch registers for this stub,
- // but the number of registers comes down to the ones used in
- // FullCodeGen::SetVar(), which is architecture independent.
- // We allocate 2 extra scratch registers that we'll save on the stack.
- CPURegList pool_available = GetValidRegistersForAllocation();
- CPURegList used_regs(object, address, scratch);
- pool_available.Remove(used_regs);
- scratch1_ = Register(pool_available.PopLowestIndex());
- scratch2_ = Register(pool_available.PopLowestIndex());
-
- // SaveCallerRegisters method needs to save caller saved register, however
- // we don't bother saving ip0 and ip1 because they are used as scratch
- // registers by the MacroAssembler.
- saved_regs_.Remove(ip0);
- saved_regs_.Remove(ip1);
-
- // The scratch registers will be restored by other means so we don't need
- // to save them with the other caller saved registers.
- saved_regs_.Remove(scratch0_);
- saved_regs_.Remove(scratch1_);
- saved_regs_.Remove(scratch2_);
- }
-
- void Save(MacroAssembler* masm) {
- // We don't have to save scratch0_ because it was given to us as
- // a scratch register.
- masm->Push(scratch1_, scratch2_);
- }
-
- void Restore(MacroAssembler* masm) {
- masm->Pop(scratch2_, scratch1_);
- }
-
- // If we have to call into C then we need to save and restore all caller-
- // saved registers that were not already preserved.
- void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
- // TODO(all): This can be very expensive, and it is likely that not every
- // register will need to be preserved. Can we improve this?
- masm->PushCPURegList(saved_regs_);
- if (mode == kSaveFPRegs) {
- masm->PushCPURegList(kCallerSavedFP);
- }
- }
-
- void RestoreCallerSaveRegisters(MacroAssembler*masm, SaveFPRegsMode mode) {
- // TODO(all): This can be very expensive, and it is likely that not every
- // register will need to be preserved. Can we improve this?
- if (mode == kSaveFPRegs) {
- masm->PopCPURegList(kCallerSavedFP);
- }
- masm->PopCPURegList(saved_regs_);
- }
-
- Register object() { return object_; }
- Register address() { return address_; }
- Register scratch0() { return scratch0_; }
- Register scratch1() { return scratch1_; }
- Register scratch2() { return scratch2_; }
-
- private:
- Register object_;
- Register address_;
- Register scratch0_;
- Register scratch1_;
- Register scratch2_;
- CPURegList saved_regs_;
-
- // TODO(all): We should consider moving this somewhere else.
- static CPURegList GetValidRegistersForAllocation() {
- // The list of valid registers for allocation is defined as all the
- // registers without those with a special meaning.
- //
- // The default list excludes registers x26 to x31 because they are
- // reserved for the following purpose:
- // - x26 root register
- // - x27 context pointer register
- // - x28 jssp
- // - x29 frame pointer
- // - x30 link register(lr)
- // - x31 xzr/stack pointer
- CPURegList list(CPURegister::kRegister, kXRegSize, 0, 25);
-
- // We also remove MacroAssembler's scratch registers.
- list.Remove(ip0);
- list.Remove(ip1);
- list.Remove(x8);
- list.Remove(x9);
-
- return list;
- }
-
- friend class RecordWriteStub;
- };
-
- // A list of stub variants which are pregenerated.
- // The variants are stored in the same format as the minor key, so
- // MinorKeyFor() can be used to populate and check this list.
- static const int kAheadOfTime[];
-
- void Generate(MacroAssembler* masm);
- void GenerateIncremental(MacroAssembler* masm, Mode mode);
-
- enum OnNoNeedToInformIncrementalMarker {
- kReturnOnNoNeedToInformIncrementalMarker,
- kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
- };
-
- void CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
-
- Major MajorKey() { return RecordWrite; }
-
- int MinorKey() {
- return MinorKeyFor(object_, value_, address_, remembered_set_action_,
- save_fp_regs_mode_);
- }
-
- static int MinorKeyFor(Register object,
- Register value,
- Register address,
- RememberedSetAction action,
- SaveFPRegsMode fp_mode) {
- ASSERT(object.Is64Bits());
- ASSERT(value.Is64Bits());
- ASSERT(address.Is64Bits());
- return ObjectBits::encode(object.code()) |
- ValueBits::encode(value.code()) |
- AddressBits::encode(address.code()) |
- RememberedSetActionBits::encode(action) |
- SaveFPRegsModeBits::encode(fp_mode);
- }
-
- void Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
- }
-
- class ObjectBits: public BitField<int, 0, 5> {};
- class ValueBits: public BitField<int, 5, 5> {};
- class AddressBits: public BitField<int, 10, 5> {};
- class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
- class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
-
- Register object_;
- Register value_;
- Register address_;
- RememberedSetAction remembered_set_action_;
- SaveFPRegsMode save_fp_regs_mode_;
- Label slow_;
- RegisterAllocation regs_;
-};
-
-
-// Helper to call C++ functions from generated code. The caller must prepare
-// the exit frame before doing the call with GenerateCall.
-class DirectCEntryStub: public PlatformCodeStub {
- public:
- DirectCEntryStub() {}
- void Generate(MacroAssembler* masm);
- void GenerateCall(MacroAssembler* masm, Register target);
-
- private:
- Major MajorKey() { return DirectCEntry; }
- int MinorKey() { return 0; }
-
- bool NeedsImmovableCode() { return true; }
-};
-
-
-class NameDictionaryLookupStub: public PlatformCodeStub {
- public:
- enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
-
- explicit NameDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
-
- void Generate(MacroAssembler* masm);
-
- static void GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- Handle<Name> name,
- Register scratch0);
-
- static void GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register scratch1,
- Register scratch2);
-
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- static const int kInlinedProbes = 4;
- static const int kTotalProbes = 20;
-
- static const int kCapacityOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kCapacityIndex * kPointerSize;
-
- static const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
-
- Major MajorKey() { return NameDictionaryLookup; }
-
- int MinorKey() {
- return LookupModeBits::encode(mode_);
- }
-
- class LookupModeBits: public BitField<LookupMode, 0, 1> {};
-
- LookupMode mode_;
-};
-
-
-class SubStringStub: public PlatformCodeStub {
- public:
- SubStringStub() {}
-
- private:
- Major MajorKey() { return SubString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class StringCompareStub: public PlatformCodeStub {
- public:
- StringCompareStub() { }
-
- // Compares two flat ASCII strings and returns result in x0.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4);
-
- // Compare two flat ASCII strings for equality and returns result
- // in x0.
- static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3);
-
- private:
- virtual Major MajorKey() { return StringCompare; }
- virtual int MinorKey() { return 0; }
- virtual void Generate(MacroAssembler* masm);
-
- static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* chars_not_equal);
-};
-
-
-struct PlatformCallInterfaceDescriptor {
- explicit PlatformCallInterfaceDescriptor(
- TargetAddressStorageMode storage_mode)
- : storage_mode_(storage_mode) { }
-
- TargetAddressStorageMode storage_mode() { return storage_mode_; }
-
- private:
- TargetAddressStorageMode storage_mode_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_A64_CODE_STUBS_A64_H_
diff --git a/deps/v8/src/a64/codegen-a64.cc b/deps/v8/src/a64/codegen-a64.cc
deleted file mode 100644
index 3f0e2295df..0000000000
--- a/deps/v8/src/a64/codegen-a64.cc
+++ /dev/null
@@ -1,616 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if V8_TARGET_ARCH_A64
-
-#include "codegen.h"
-#include "macro-assembler.h"
-#include "simulator-a64.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-#if defined(USE_SIMULATOR)
-byte* fast_exp_a64_machine_code = NULL;
-double fast_exp_simulator(double x) {
- Simulator * simulator = Simulator::current(Isolate::Current());
- Simulator::CallArgument args[] = {
- Simulator::CallArgument(x),
- Simulator::CallArgument::End()
- };
- return simulator->CallDouble(fast_exp_a64_machine_code, args);
-}
-#endif
-
-
-UnaryMathFunction CreateExpFunction() {
- if (!FLAG_fast_math) return &std::exp;
-
- // Use the Math.exp implemetation in MathExpGenerator::EmitMathExp() to create
- // an AAPCS64-compliant exp() function. This will be faster than the C
- // library's exp() function, but probably less accurate.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::exp;
-
- ExternalReference::InitializeMathExpData();
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
- masm.SetStackPointer(csp);
-
- // The argument will be in d0 on entry.
- DoubleRegister input = d0;
- // Use other caller-saved registers for all other values.
- DoubleRegister result = d1;
- DoubleRegister double_temp1 = d2;
- DoubleRegister double_temp2 = d3;
- Register temp1 = x10;
- Register temp2 = x11;
- Register temp3 = x12;
-
- MathExpGenerator::EmitMathExp(&masm, input, result,
- double_temp1, double_temp2,
- temp1, temp2, temp3);
- // Move the result to the return register.
- masm.Fmov(d0, result);
- masm.Ret();
-
- CodeDesc desc;
- masm.GetCode(&desc);
- ASSERT(!RelocInfo::RequiresRelocation(desc));
-
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
-
-#if !defined(USE_SIMULATOR)
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-#else
- fast_exp_a64_machine_code = buffer;
- return &fast_exp_simulator;
-#endif
-}
-
-
-UnaryMathFunction CreateSqrtFunction() {
- return &std::sqrt;
-}
-
-
-// -------------------------------------------------------------------------
-// Platform-specific RuntimeCallHelper functions.
-
-void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- masm->EnterFrame(StackFrame::INTERNAL);
- ASSERT(!masm->has_frame());
- masm->set_has_frame(true);
-}
-
-
-void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- masm->LeaveFrame(StackFrame::INTERNAL);
- ASSERT(masm->has_frame());
- masm->set_has_frame(false);
-}
-
-
-// -------------------------------------------------------------------------
-// Code generators
-
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm, AllocationSiteMode mode,
- Label* allocation_memento_found) {
- // ----------- S t a t e -------------
- // -- x2 : receiver
- // -- x3 : target map
- // -----------------------------------
- Register receiver = x2;
- Register map = x3;
-
- if (mode == TRACK_ALLOCATION_SITE) {
- ASSERT(allocation_memento_found != NULL);
- __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11,
- allocation_memento_found);
- }
-
- // Set transitioned map.
- __ Str(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver,
- HeapObject::kMapOffset,
- map,
- x10,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
- ASM_LOCATION("ElementsTransitionGenerator::GenerateSmiToDouble");
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- x0 : value
- // -- x1 : key
- // -- x2 : receiver
- // -- x3 : target map, scratch for subsequent call
- // -----------------------------------
- Register receiver = x2;
- Register target_map = x3;
-
- Label gc_required, only_change_map;
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- Register elements = x4;
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
-
- __ Push(lr);
- Register length = x5;
- __ Ldrsw(length, UntagSmiFieldMemOperand(elements,
- FixedArray::kLengthOffset));
-
- // Allocate new FixedDoubleArray.
- Register array_size = x6;
- Register array = x7;
- __ Lsl(array_size, length, kDoubleSizeLog2);
- __ Add(array_size, array_size, FixedDoubleArray::kHeaderSize);
- __ Allocate(array_size, array, x10, x11, &gc_required, DOUBLE_ALIGNMENT);
- // Register array is non-tagged heap object.
-
- // Set the destination FixedDoubleArray's length and map.
- Register map_root = x6;
- __ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex);
- __ SmiTag(x11, length);
- __ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
- __ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
-
- __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
- kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- // Replace receiver's backing store with newly created FixedDoubleArray.
- __ Add(x10, array, kHeapObjectTag);
- __ Str(x10, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ RecordWriteField(receiver, JSObject::kElementsOffset, x10,
- x6, kLRHasBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Prepare for conversion loop.
- Register src_elements = x10;
- Register dst_elements = x11;
- Register dst_end = x12;
- __ Add(src_elements, elements, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Add(dst_elements, array, FixedDoubleArray::kHeaderSize);
- __ Add(dst_end, dst_elements, Operand(length, LSL, kDoubleSizeLog2));
-
- FPRegister nan_d = d1;
- __ Fmov(nan_d, rawbits_to_double(kHoleNanInt64));
-
- Label entry, done;
- __ B(&entry);
-
- __ Bind(&only_change_map);
- __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
- kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ B(&done);
-
- // Call into runtime if GC is required.
- __ Bind(&gc_required);
- __ Pop(lr);
- __ B(fail);
-
- // Iterate over the array, copying and coverting smis to doubles. If an
- // element is non-smi, write a hole to the destination.
- {
- Label loop;
- __ Bind(&loop);
- __ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
- __ SmiUntagToDouble(d0, x13, kSpeculativeUntag);
- __ Tst(x13, kSmiTagMask);
- __ Fcsel(d0, d0, nan_d, eq);
- __ Str(d0, MemOperand(dst_elements, kDoubleSize, PostIndex));
-
- __ Bind(&entry);
- __ Cmp(dst_elements, dst_end);
- __ B(lt, &loop);
- }
-
- __ Pop(lr);
- __ Bind(&done);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
- ASM_LOCATION("ElementsTransitionGenerator::GenerateDoubleToObject");
- // ----------- S t a t e -------------
- // -- x0 : value
- // -- x1 : key
- // -- x2 : receiver
- // -- lr : return address
- // -- x3 : target map, scratch for subsequent call
- // -- x4 : scratch (elements)
- // -----------------------------------
- Register value = x0;
- Register key = x1;
- Register receiver = x2;
- Register target_map = x3;
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- Label only_change_map;
- Register elements = x4;
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
-
- __ Push(lr);
- // TODO(all): These registers may not need to be pushed. Examine
- // RecordWriteStub and check whether it's needed.
- __ Push(target_map, receiver, key, value);
- Register length = x5;
- __ Ldrsw(length, UntagSmiFieldMemOperand(elements,
- FixedArray::kLengthOffset));
-
- // Allocate new FixedArray.
- Register array_size = x6;
- Register array = x7;
- Label gc_required;
- __ Mov(array_size, FixedDoubleArray::kHeaderSize);
- __ Add(array_size, array_size, Operand(length, LSL, kPointerSizeLog2));
- __ Allocate(array_size, array, x10, x11, &gc_required, NO_ALLOCATION_FLAGS);
-
- // Set destination FixedDoubleArray's length and map.
- Register map_root = x6;
- __ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex);
- __ SmiTag(x11, length);
- __ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
- __ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
-
- // Prepare for conversion loop.
- Register src_elements = x10;
- Register dst_elements = x11;
- Register dst_end = x12;
- __ Add(src_elements, elements,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag);
- __ Add(dst_elements, array, FixedArray::kHeaderSize);
- __ Add(array, array, kHeapObjectTag);
- __ Add(dst_end, dst_elements, Operand(length, LSL, kPointerSizeLog2));
-
- Register the_hole = x14;
- Register heap_num_map = x15;
- __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
- __ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex);
-
- Label entry;
- __ B(&entry);
-
- // Call into runtime if GC is required.
- __ Bind(&gc_required);
- __ Pop(value, key, receiver, target_map);
- __ Pop(lr);
- __ B(fail);
-
- {
- Label loop, convert_hole;
- __ Bind(&loop);
- __ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
- __ Cmp(x13, kHoleNanInt64);
- __ B(eq, &convert_hole);
-
- // Non-hole double, copy value into a heap number.
- Register heap_num = x5;
- __ AllocateHeapNumber(heap_num, &gc_required, x6, x4, heap_num_map);
- __ Str(x13, FieldMemOperand(heap_num, HeapNumber::kValueOffset));
- __ Mov(x13, dst_elements);
- __ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex));
- __ RecordWrite(array, x13, heap_num, kLRHasBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- __ B(&entry);
-
- // Replace the-hole NaN with the-hole pointer.
- __ Bind(&convert_hole);
- __ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex));
-
- __ Bind(&entry);
- __ Cmp(dst_elements, dst_end);
- __ B(lt, &loop);
- }
-
- __ Pop(value, key, receiver, target_map);
- // Replace receiver's backing store with newly created and filled FixedArray.
- __ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ RecordWriteField(receiver, JSObject::kElementsOffset, array, x13,
- kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Pop(lr);
-
- __ Bind(&only_change_map);
- __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x13,
- kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
-bool Code::IsYoungSequence(byte* sequence) {
- return MacroAssembler::IsYoungSequence(sequence);
-}
-
-
-void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
- MarkingParity* parity) {
- if (IsYoungSequence(sequence)) {
- *age = kNoAgeCodeAge;
- *parity = NO_MARKING_PARITY;
- } else {
- byte* target = sequence + kCodeAgeStubEntryOffset;
- Code* stub = GetCodeFromTargetAddress(Memory::Address_at(target));
- GetCodeAgeAndParity(stub, age, parity);
- }
-}
-
-
-void Code::PatchPlatformCodeAge(Isolate* isolate,
- byte* sequence,
- Code::Age age,
- MarkingParity parity) {
- PatchingAssembler patcher(sequence, kCodeAgeSequenceSize / kInstructionSize);
- if (age == kNoAgeCodeAge) {
- MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
- } else {
- Code * stub = GetCodeAgeStub(isolate, age, parity);
- MacroAssembler::EmitCodeAgeSequence(&patcher, stub);
- }
-}
-
-
-void StringCharLoadGenerator::Generate(MacroAssembler* masm,
- Register string,
- Register index,
- Register result,
- Label* call_runtime) {
- // Fetch the instance type of the receiver into result register.
- __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
- // We need special handling for indirect strings.
- Label check_sequential;
- __ TestAndBranchIfAllClear(result, kIsIndirectStringMask, &check_sequential);
-
- // Dispatch on the indirect string shape: slice or cons.
- Label cons_string;
- __ TestAndBranchIfAllClear(result, kSlicedNotConsMask, &cons_string);
-
- // Handle slices.
- Label indirect_string_loaded;
- __ Ldrsw(result,
- UntagSmiFieldMemOperand(string, SlicedString::kOffsetOffset));
- __ Ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
- __ Add(index, index, result);
- __ B(&indirect_string_loaded);
-
- // Handle cons strings.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ Bind(&cons_string);
- __ Ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
- __ JumpIfNotRoot(result, Heap::kempty_stringRootIndex, call_runtime);
- // Get the first of the two strings and load its instance type.
- __ Ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
-
- __ Bind(&indirect_string_loaded);
- __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
- // Distinguish sequential and external strings. Only these two string
- // representations can reach here (slices and flat cons strings have been
- // reduced to the underlying sequential or external string).
- Label external_string, check_encoding;
- __ Bind(&check_sequential);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ TestAndBranchIfAnySet(result, kStringRepresentationMask, &external_string);
-
- // Prepare sequential strings
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ Add(string, string, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
- __ B(&check_encoding);
-
- // Handle external strings.
- __ Bind(&external_string);
- if (FLAG_debug_code) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- __ Tst(result, kIsIndirectStringMask);
- __ Assert(eq, kExternalStringExpectedButNotFound);
- }
- // Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
- // TestAndBranchIfAnySet can emit Tbnz. Do not use it because call_runtime
- // can be bound far away in deferred code.
- __ Tst(result, kShortExternalStringMask);
- __ B(ne, call_runtime);
- __ Ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
-
- Label ascii, done;
- __ Bind(&check_encoding);
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ TestAndBranchIfAnySet(result, kStringEncodingMask, &ascii);
- // Two-byte string.
- __ Ldrh(result, MemOperand(string, index, LSL, 1));
- __ B(&done);
- __ Bind(&ascii);
- // Ascii string.
- __ Ldrb(result, MemOperand(string, index));
- __ Bind(&done);
-}
-
-
-static MemOperand ExpConstant(Register base, int index) {
- return MemOperand(base, index * kDoubleSize);
-}
-
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
- DoubleRegister input,
- DoubleRegister result,
- DoubleRegister double_temp1,
- DoubleRegister double_temp2,
- Register temp1,
- Register temp2,
- Register temp3) {
- // TODO(jbramley): There are several instances where fnmsub could be used
- // instead of fmul and fsub. Doing this changes the result, but since this is
- // an estimation anyway, does it matter?
-
- ASSERT(!AreAliased(input, result,
- double_temp1, double_temp2,
- temp1, temp2, temp3));
- ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
-
- Label done;
- DoubleRegister double_temp3 = result;
- Register constants = temp3;
-
- // The algorithm used relies on some magic constants which are initialized in
- // ExternalReference::InitializeMathExpData().
-
- // Load the address of the start of the array.
- __ Mov(constants, Operand(ExternalReference::math_exp_constants(0)));
-
- // We have to do a four-way split here:
- // - If input <= about -708.4, the output always rounds to zero.
- // - If input >= about 709.8, the output always rounds to +infinity.
- // - If the input is NaN, the output is NaN.
- // - Otherwise, the result needs to be calculated.
- Label result_is_finite_non_zero;
- // Assert that we can load offset 0 (the small input threshold) and offset 1
- // (the large input threshold) with a single ldp.
- ASSERT(kDRegSizeInBytes == (ExpConstant(constants, 1).offset() -
- ExpConstant(constants, 0).offset()));
- __ Ldp(double_temp1, double_temp2, ExpConstant(constants, 0));
-
- __ Fcmp(input, double_temp1);
- __ Fccmp(input, double_temp2, NoFlag, hi);
- // At this point, the condition flags can be in one of five states:
- // NZCV
- // 1000 -708.4 < input < 709.8 result = exp(input)
- // 0110 input == 709.8 result = +infinity
- // 0010 input > 709.8 result = +infinity
- // 0011 input is NaN result = input
- // 0000 input <= -708.4 result = +0.0
-
- // Continue the common case first. 'mi' tests N == 1.
- __ B(&result_is_finite_non_zero, mi);
-
- // TODO(jbramley): Add (and use) a zero D register for A64.
- // TODO(jbramley): Consider adding a +infinity register for A64.
- __ Ldr(double_temp2, ExpConstant(constants, 2)); // Synthesize +infinity.
- __ Fsub(double_temp1, double_temp1, double_temp1); // Synthesize +0.0.
-
- // Select between +0.0 and +infinity. 'lo' tests C == 0.
- __ Fcsel(result, double_temp1, double_temp2, lo);
- // Select between {+0.0 or +infinity} and input. 'vc' tests V == 0.
- __ Fcsel(result, result, input, vc);
- __ B(&done);
-
- // The rest is magic, as described in InitializeMathExpData().
- __ Bind(&result_is_finite_non_zero);
-
- // Assert that we can load offset 3 and offset 4 with a single ldp.
- ASSERT(kDRegSizeInBytes == (ExpConstant(constants, 4).offset() -
- ExpConstant(constants, 3).offset()));
- __ Ldp(double_temp1, double_temp3, ExpConstant(constants, 3));
- __ Fmadd(double_temp1, double_temp1, input, double_temp3);
- __ Fmov(temp2.W(), double_temp1.S());
- __ Fsub(double_temp1, double_temp1, double_temp3);
-
- // Assert that we can load offset 5 and offset 6 with a single ldp.
- ASSERT(kDRegSizeInBytes == (ExpConstant(constants, 6).offset() -
- ExpConstant(constants, 5).offset()));
- __ Ldp(double_temp2, double_temp3, ExpConstant(constants, 5));
- // TODO(jbramley): Consider using Fnmsub here.
- __ Fmul(double_temp1, double_temp1, double_temp2);
- __ Fsub(double_temp1, double_temp1, input);
-
- __ Fmul(double_temp2, double_temp1, double_temp1);
- __ Fsub(double_temp3, double_temp3, double_temp1);
- __ Fmul(double_temp3, double_temp3, double_temp2);
-
- __ Mov(temp1.W(), Operand(temp2.W(), LSR, 11));
-
- __ Ldr(double_temp2, ExpConstant(constants, 7));
- // TODO(jbramley): Consider using Fnmsub here.
- __ Fmul(double_temp3, double_temp3, double_temp2);
- __ Fsub(double_temp3, double_temp3, double_temp1);
-
- // The 8th constant is 1.0, so use an immediate move rather than a load.
- // We can't generate a runtime assertion here as we would need to call Abort
- // in the runtime and we don't have an Isolate when we generate this code.
- __ Fmov(double_temp2, 1.0);
- __ Fadd(double_temp3, double_temp3, double_temp2);
-
- __ And(temp2, temp2, 0x7ff);
- __ Add(temp1, temp1, 0x3ff);
-
- // Do the final table lookup.
- __ Mov(temp3, Operand(ExternalReference::math_exp_log_table()));
-
- __ Add(temp3, temp3, Operand(temp2, LSL, kDRegSizeInBytesLog2));
- __ Ldp(temp2.W(), temp3.W(), MemOperand(temp3));
- __ Orr(temp1.W(), temp3.W(), Operand(temp1.W(), LSL, 20));
- __ Bfi(temp2, temp1, 32, 32);
- __ Fmov(double_temp1, temp2);
-
- __ Fmul(result, double_temp3, double_temp1);
-
- __ Bind(&done);
-}
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/codegen-a64.h b/deps/v8/src/a64/codegen-a64.h
deleted file mode 100644
index d66bd34a93..0000000000
--- a/deps/v8/src/a64/codegen-a64.h
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_A64_CODEGEN_A64_H_
-#define V8_A64_CODEGEN_A64_H_
-
-#include "ast.h"
-#include "ic-inl.h"
-
-namespace v8 {
-namespace internal {
-
-class StringCharLoadGenerator : public AllStatic {
- public:
- // Generates the code for handling different string types and loading the
- // indexed character into |result|. We expect |index| as untagged input and
- // |result| as untagged output.
- static void Generate(MacroAssembler* masm,
- Register string,
- Register index,
- Register result,
- Label* call_runtime);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
-};
-
-
-class MathExpGenerator : public AllStatic {
- public:
- static void EmitMathExp(MacroAssembler* masm,
- DoubleRegister input,
- DoubleRegister result,
- DoubleRegister double_scratch1,
- DoubleRegister double_scratch2,
- Register temp1,
- Register temp2,
- Register temp3);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_A64_CODEGEN_A64_H_
diff --git a/deps/v8/src/a64/constants-a64.h b/deps/v8/src/a64/constants-a64.h
deleted file mode 100644
index 4f43f13537..0000000000
--- a/deps/v8/src/a64/constants-a64.h
+++ /dev/null
@@ -1,1262 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_A64_CONSTANTS_A64_H_
-#define V8_A64_CONSTANTS_A64_H_
-
-
-// Assert that this is an LP64 system.
-STATIC_ASSERT(sizeof(int) == sizeof(int32_t)); // NOLINT(runtime/sizeof)
-STATIC_ASSERT(sizeof(long) == sizeof(int64_t)); // NOLINT(runtime/int)
-STATIC_ASSERT(sizeof(void *) == sizeof(int64_t)); // NOLINT(runtime/sizeof)
-STATIC_ASSERT(sizeof(1) == sizeof(int32_t)); // NOLINT(runtime/sizeof)
-STATIC_ASSERT(sizeof(1L) == sizeof(int64_t)); // NOLINT(runtime/sizeof)
-
-
-// Get the standard printf format macros for C99 stdint types.
-#define __STDC_FORMAT_MACROS
-#include <inttypes.h>
-
-
-namespace v8 {
-namespace internal {
-
-
-const unsigned kInstructionSize = 4;
-const unsigned kInstructionSizeLog2 = 2;
-const unsigned kLiteralEntrySize = 4;
-const unsigned kLiteralEntrySizeLog2 = 2;
-const unsigned kMaxLoadLiteralRange = 1 * MB;
-
-const unsigned kNumberOfRegisters = 32;
-const unsigned kNumberOfFPRegisters = 32;
-// Callee saved registers are x19-x30(lr).
-const int kNumberOfCalleeSavedRegisters = 11;
-const int kFirstCalleeSavedRegisterIndex = 19;
-// Callee saved FP registers are d8-d15.
-const int kNumberOfCalleeSavedFPRegisters = 8;
-const int kFirstCalleeSavedFPRegisterIndex = 8;
-// Callee saved registers with no specific purpose in JS are x19-x25.
-const unsigned kJSCalleeSavedRegList = 0x03f80000;
-// TODO(all): k<Y>RegSize should probably be k<Y>RegSizeInBits.
-const unsigned kWRegSize = 32;
-const unsigned kWRegSizeLog2 = 5;
-const unsigned kWRegSizeInBytes = kWRegSize >> 3;
-const unsigned kWRegSizeInBytesLog2 = kWRegSizeLog2 - 3;
-const unsigned kXRegSize = 64;
-const unsigned kXRegSizeLog2 = 6;
-const unsigned kXRegSizeInBytes = kXRegSize >> 3;
-const unsigned kXRegSizeInBytesLog2 = kXRegSizeLog2 - 3;
-const unsigned kSRegSize = 32;
-const unsigned kSRegSizeLog2 = 5;
-const unsigned kSRegSizeInBytes = kSRegSize >> 3;
-const unsigned kSRegSizeInBytesLog2 = kSRegSizeLog2 - 3;
-const unsigned kDRegSize = 64;
-const unsigned kDRegSizeLog2 = 6;
-const unsigned kDRegSizeInBytes = kDRegSize >> 3;
-const unsigned kDRegSizeInBytesLog2 = kDRegSizeLog2 - 3;
-const int64_t kWRegMask = 0x00000000ffffffffL;
-const int64_t kXRegMask = 0xffffffffffffffffL;
-const int64_t kSRegMask = 0x00000000ffffffffL;
-const int64_t kDRegMask = 0xffffffffffffffffL;
-// TODO(all) check if the expression below works on all compilers or if it
-// triggers an overflow error.
-const int64_t kDSignMask = 0x1L << 63;
-const int64_t kDSignBit = 63;
-const int64_t kXSignMask = 0x1L << 63;
-const int64_t kXSignBit = 63;
-const int64_t kWSignMask = 0x1L << 31;
-const int64_t kWSignBit = 31;
-const int64_t kByteMask = 0xffL;
-const int64_t kHalfWordMask = 0xffffL;
-const int64_t kWordMask = 0xffffffffL;
-const uint64_t kXMaxUInt = 0xffffffffffffffffUL;
-const uint64_t kWMaxUInt = 0xffffffffUL;
-const int64_t kXMaxInt = 0x7fffffffffffffffL;
-const int64_t kXMinInt = 0x8000000000000000L;
-const int32_t kWMaxInt = 0x7fffffff;
-const int32_t kWMinInt = 0x80000000;
-const unsigned kFramePointerRegCode = 29;
-const unsigned kLinkRegCode = 30;
-const unsigned kZeroRegCode = 31;
-const unsigned kJSSPCode = 28;
-const unsigned kSPRegInternalCode = 63;
-const unsigned kRegCodeMask = 0x1f;
-// Standard machine types defined by AAPCS64.
-const unsigned kByteSize = 8;
-const unsigned kByteSizeInBytes = kByteSize >> 3;
-const unsigned kHalfWordSize = 16;
-const unsigned kHalfWordSizeLog2 = 4;
-const unsigned kHalfWordSizeInBytes = kHalfWordSize >> 3;
-const unsigned kHalfWordSizeInBytesLog2 = kHalfWordSizeLog2 - 3;
-const unsigned kWordSize = 32;
-const unsigned kWordSizeLog2 = 5;
-const unsigned kWordSizeInBytes = kWordSize >> 3;
-const unsigned kWordSizeInBytesLog2 = kWordSizeLog2 - 3;
-const unsigned kDoubleWordSize = 64;
-const unsigned kDoubleWordSizeInBytes = kDoubleWordSize >> 3;
-const unsigned kQuadWordSize = 128;
-const unsigned kQuadWordSizeInBytes = kQuadWordSize >> 3;
-// AArch64 floating-point specifics. These match IEEE-754.
-const unsigned kDoubleMantissaBits = 52;
-const unsigned kDoubleExponentBits = 11;
-const unsigned kFloatMantissaBits = 23;
-const unsigned kFloatExponentBits = 8;
-
-#define REGISTER_CODE_LIST(R) \
-R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
-R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
-R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
-R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
-
-#define INSTRUCTION_FIELDS_LIST(V_) \
-/* Register fields */ \
-V_(Rd, 4, 0, Bits) /* Destination register. */ \
-V_(Rn, 9, 5, Bits) /* First source register. */ \
-V_(Rm, 20, 16, Bits) /* Second source register. */ \
-V_(Ra, 14, 10, Bits) /* Third source register. */ \
-V_(Rt, 4, 0, Bits) /* Load dest / store source. */ \
-V_(Rt2, 14, 10, Bits) /* Load second dest / */ \
- /* store second source. */ \
-V_(PrefetchMode, 4, 0, Bits) \
- \
-/* Common bits */ \
-V_(SixtyFourBits, 31, 31, Bits) \
-V_(FlagsUpdate, 29, 29, Bits) \
- \
-/* PC relative addressing */ \
-V_(ImmPCRelHi, 23, 5, SignedBits) \
-V_(ImmPCRelLo, 30, 29, Bits) \
- \
-/* Add/subtract/logical shift register */ \
-V_(ShiftDP, 23, 22, Bits) \
-V_(ImmDPShift, 15, 10, Bits) \
- \
-/* Add/subtract immediate */ \
-V_(ImmAddSub, 21, 10, Bits) \
-V_(ShiftAddSub, 23, 22, Bits) \
- \
-/* Add/substract extend */ \
-V_(ImmExtendShift, 12, 10, Bits) \
-V_(ExtendMode, 15, 13, Bits) \
- \
-/* Move wide */ \
-V_(ImmMoveWide, 20, 5, Bits) \
-V_(ShiftMoveWide, 22, 21, Bits) \
- \
-/* Logical immediate, bitfield and extract */ \
-V_(BitN, 22, 22, Bits) \
-V_(ImmRotate, 21, 16, Bits) \
-V_(ImmSetBits, 15, 10, Bits) \
-V_(ImmR, 21, 16, Bits) \
-V_(ImmS, 15, 10, Bits) \
- \
-/* Test and branch immediate */ \
-V_(ImmTestBranch, 18, 5, SignedBits) \
-V_(ImmTestBranchBit40, 23, 19, Bits) \
-V_(ImmTestBranchBit5, 31, 31, Bits) \
- \
-/* Conditionals */ \
-V_(Condition, 15, 12, Bits) \
-V_(ConditionBranch, 3, 0, Bits) \
-V_(Nzcv, 3, 0, Bits) \
-V_(ImmCondCmp, 20, 16, Bits) \
-V_(ImmCondBranch, 23, 5, SignedBits) \
- \
-/* Floating point */ \
-V_(FPType, 23, 22, Bits) \
-V_(ImmFP, 20, 13, Bits) \
-V_(FPScale, 15, 10, Bits) \
- \
-/* Load Store */ \
-V_(ImmLS, 20, 12, SignedBits) \
-V_(ImmLSUnsigned, 21, 10, Bits) \
-V_(ImmLSPair, 21, 15, SignedBits) \
-V_(SizeLS, 31, 30, Bits) \
-V_(ImmShiftLS, 12, 12, Bits) \
- \
-/* Other immediates */ \
-V_(ImmUncondBranch, 25, 0, SignedBits) \
-V_(ImmCmpBranch, 23, 5, SignedBits) \
-V_(ImmLLiteral, 23, 5, SignedBits) \
-V_(ImmException, 20, 5, Bits) \
-V_(ImmHint, 11, 5, Bits) \
-V_(ImmBarrierDomain, 11, 10, Bits) \
-V_(ImmBarrierType, 9, 8, Bits) \
- \
-/* System (MRS, MSR) */ \
-V_(ImmSystemRegister, 19, 5, Bits) \
-V_(SysO0, 19, 19, Bits) \
-V_(SysOp1, 18, 16, Bits) \
-V_(SysOp2, 7, 5, Bits) \
-V_(CRn, 15, 12, Bits) \
-V_(CRm, 11, 8, Bits) \
-
-
-#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \
-/* NZCV */ \
-V_(Flags, 31, 28, Bits) \
-V_(N, 31, 31, Bits) \
-V_(Z, 30, 30, Bits) \
-V_(C, 29, 29, Bits) \
-V_(V, 28, 28, Bits) \
-M_(NZCV, Flags_mask) \
- \
-/* FPCR */ \
-V_(AHP, 26, 26, Bits) \
-V_(DN, 25, 25, Bits) \
-V_(FZ, 24, 24, Bits) \
-V_(RMode, 23, 22, Bits) \
-M_(FPCR, AHP_mask | DN_mask | FZ_mask | RMode_mask)
-
-
-// Fields offsets.
-#define DECLARE_FIELDS_OFFSETS(Name, HighBit, LowBit, X) \
-const int Name##_offset = LowBit; \
-const int Name##_width = HighBit - LowBit + 1; \
-const uint32_t Name##_mask = ((1 << Name##_width) - 1) << LowBit;
-#define NOTHING(A, B)
-INSTRUCTION_FIELDS_LIST(DECLARE_FIELDS_OFFSETS)
-SYSTEM_REGISTER_FIELDS_LIST(DECLARE_FIELDS_OFFSETS, NOTHING)
-#undef NOTHING
-#undef DECLARE_FIELDS_BITS
-
-// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST), formed
-// from ImmPCRelLo and ImmPCRelHi.
-const int ImmPCRel_mask = ImmPCRelLo_mask | ImmPCRelHi_mask;
-
-// Condition codes.
-enum Condition {
- eq = 0,
- ne = 1,
- hs = 2,
- lo = 3,
- mi = 4,
- pl = 5,
- vs = 6,
- vc = 7,
- hi = 8,
- ls = 9,
- ge = 10,
- lt = 11,
- gt = 12,
- le = 13,
- al = 14,
- nv = 15 // Behaves as always/al.
-};
-
-inline Condition InvertCondition(Condition cond) {
- // Conditions al and nv behave identically, as "always true". They can't be
- // inverted, because there is no never condition.
- ASSERT((cond != al) && (cond != nv));
- return static_cast<Condition>(cond ^ 1);
-}
-
-// Corresponds to transposing the operands of a comparison.
-inline Condition ReverseConditionForCmp(Condition cond) {
- switch (cond) {
- case lo:
- return hi;
- case hi:
- return lo;
- case hs:
- return ls;
- case ls:
- return hs;
- case lt:
- return gt;
- case gt:
- return lt;
- case ge:
- return le;
- case le:
- return ge;
- case eq:
- return eq;
- default:
- // In practice this function is only used with a condition coming from
- // TokenToCondition in lithium-codegen-a64.cc. Any other condition is
- // invalid as it doesn't necessary make sense to reverse it (consider
- // 'mi' for instance).
- UNREACHABLE();
- return nv;
- };
-}
-
-enum FlagsUpdate {
- SetFlags = 1,
- LeaveFlags = 0
-};
-
-enum StatusFlags {
- NoFlag = 0,
-
- // Derive the flag combinations from the system register bit descriptions.
- NFlag = N_mask,
- ZFlag = Z_mask,
- CFlag = C_mask,
- VFlag = V_mask,
- NZFlag = NFlag | ZFlag,
- NCFlag = NFlag | CFlag,
- NVFlag = NFlag | VFlag,
- ZCFlag = ZFlag | CFlag,
- ZVFlag = ZFlag | VFlag,
- CVFlag = CFlag | VFlag,
- NZCFlag = NFlag | ZFlag | CFlag,
- NZVFlag = NFlag | ZFlag | VFlag,
- NCVFlag = NFlag | CFlag | VFlag,
- ZCVFlag = ZFlag | CFlag | VFlag,
- NZCVFlag = NFlag | ZFlag | CFlag | VFlag,
-
- // Floating-point comparison results.
- FPEqualFlag = ZCFlag,
- FPLessThanFlag = NFlag,
- FPGreaterThanFlag = CFlag,
- FPUnorderedFlag = CVFlag
-};
-
-enum Shift {
- NO_SHIFT = -1,
- LSL = 0x0,
- LSR = 0x1,
- ASR = 0x2,
- ROR = 0x3
-};
-
-enum Extend {
- NO_EXTEND = -1,
- UXTB = 0,
- UXTH = 1,
- UXTW = 2,
- UXTX = 3,
- SXTB = 4,
- SXTH = 5,
- SXTW = 6,
- SXTX = 7
-};
-
-enum SystemHint {
- NOP = 0,
- YIELD = 1,
- WFE = 2,
- WFI = 3,
- SEV = 4,
- SEVL = 5
-};
-
-enum BarrierDomain {
- OuterShareable = 0,
- NonShareable = 1,
- InnerShareable = 2,
- FullSystem = 3
-};
-
-enum BarrierType {
- BarrierOther = 0,
- BarrierReads = 1,
- BarrierWrites = 2,
- BarrierAll = 3
-};
-
-// System/special register names.
-// This information is not encoded as one field but as the concatenation of
-// multiple fields (Op0<0>, Op1, Crn, Crm, Op2).
-enum SystemRegister {
- NZCV = ((0x1 << SysO0_offset) |
- (0x3 << SysOp1_offset) |
- (0x4 << CRn_offset) |
- (0x2 << CRm_offset) |
- (0x0 << SysOp2_offset)) >> ImmSystemRegister_offset,
- FPCR = ((0x1 << SysO0_offset) |
- (0x3 << SysOp1_offset) |
- (0x4 << CRn_offset) |
- (0x4 << CRm_offset) |
- (0x0 << SysOp2_offset)) >> ImmSystemRegister_offset
-};
-
-// Instruction enumerations.
-//
-// These are the masks that define a class of instructions, and the list of
-// instructions within each class. Each enumeration has a Fixed, FMask and
-// Mask value.
-//
-// Fixed: The fixed bits in this instruction class.
-// FMask: The mask used to extract the fixed bits in the class.
-// Mask: The mask used to identify the instructions within a class.
-//
-// The enumerations can be used like this:
-//
-// ASSERT(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed);
-// switch(instr->Mask(PCRelAddressingMask)) {
-// case ADR: Format("adr 'Xd, 'AddrPCRelByte"); break;
-// case ADRP: Format("adrp 'Xd, 'AddrPCRelPage"); break;
-// default: printf("Unknown instruction\n");
-// }
-
-
-// Generic fields.
-enum GenericInstrField {
- SixtyFourBits = 0x80000000,
- ThirtyTwoBits = 0x00000000,
- FP32 = 0x00000000,
- FP64 = 0x00400000
-};
-
-// PC relative addressing.
-enum PCRelAddressingOp {
- PCRelAddressingFixed = 0x10000000,
- PCRelAddressingFMask = 0x1F000000,
- PCRelAddressingMask = 0x9F000000,
- ADR = PCRelAddressingFixed | 0x00000000,
- ADRP = PCRelAddressingFixed | 0x80000000
-};
-
-// Add/sub (immediate, shifted and extended.)
-const int kSFOffset = 31;
-enum AddSubOp {
- AddSubOpMask = 0x60000000,
- AddSubSetFlagsBit = 0x20000000,
- ADD = 0x00000000,
- ADDS = ADD | AddSubSetFlagsBit,
- SUB = 0x40000000,
- SUBS = SUB | AddSubSetFlagsBit
-};
-
-#define ADD_SUB_OP_LIST(V) \
- V(ADD), \
- V(ADDS), \
- V(SUB), \
- V(SUBS)
-
-enum AddSubImmediateOp {
- AddSubImmediateFixed = 0x11000000,
- AddSubImmediateFMask = 0x1F000000,
- AddSubImmediateMask = 0xFF000000,
- #define ADD_SUB_IMMEDIATE(A) \
- A##_w_imm = AddSubImmediateFixed | A, \
- A##_x_imm = AddSubImmediateFixed | A | SixtyFourBits
- ADD_SUB_OP_LIST(ADD_SUB_IMMEDIATE)
- #undef ADD_SUB_IMMEDIATE
-};
-
-enum AddSubShiftedOp {
- AddSubShiftedFixed = 0x0B000000,
- AddSubShiftedFMask = 0x1F200000,
- AddSubShiftedMask = 0xFF200000,
- #define ADD_SUB_SHIFTED(A) \
- A##_w_shift = AddSubShiftedFixed | A, \
- A##_x_shift = AddSubShiftedFixed | A | SixtyFourBits
- ADD_SUB_OP_LIST(ADD_SUB_SHIFTED)
- #undef ADD_SUB_SHIFTED
-};
-
-enum AddSubExtendedOp {
- AddSubExtendedFixed = 0x0B200000,
- AddSubExtendedFMask = 0x1F200000,
- AddSubExtendedMask = 0xFFE00000,
- #define ADD_SUB_EXTENDED(A) \
- A##_w_ext = AddSubExtendedFixed | A, \
- A##_x_ext = AddSubExtendedFixed | A | SixtyFourBits
- ADD_SUB_OP_LIST(ADD_SUB_EXTENDED)
- #undef ADD_SUB_EXTENDED
-};
-
-// Add/sub with carry.
-enum AddSubWithCarryOp {
- AddSubWithCarryFixed = 0x1A000000,
- AddSubWithCarryFMask = 0x1FE00000,
- AddSubWithCarryMask = 0xFFE0FC00,
- ADC_w = AddSubWithCarryFixed | ADD,
- ADC_x = AddSubWithCarryFixed | ADD | SixtyFourBits,
- ADC = ADC_w,
- ADCS_w = AddSubWithCarryFixed | ADDS,
- ADCS_x = AddSubWithCarryFixed | ADDS | SixtyFourBits,
- SBC_w = AddSubWithCarryFixed | SUB,
- SBC_x = AddSubWithCarryFixed | SUB | SixtyFourBits,
- SBC = SBC_w,
- SBCS_w = AddSubWithCarryFixed | SUBS,
- SBCS_x = AddSubWithCarryFixed | SUBS | SixtyFourBits
-};
-
-
-// Logical (immediate and shifted register).
-enum LogicalOp {
- LogicalOpMask = 0x60200000,
- NOT = 0x00200000,
- AND = 0x00000000,
- BIC = AND | NOT,
- ORR = 0x20000000,
- ORN = ORR | NOT,
- EOR = 0x40000000,
- EON = EOR | NOT,
- ANDS = 0x60000000,
- BICS = ANDS | NOT
-};
-
-// Logical immediate.
-enum LogicalImmediateOp {
- LogicalImmediateFixed = 0x12000000,
- LogicalImmediateFMask = 0x1F800000,
- LogicalImmediateMask = 0xFF800000,
- AND_w_imm = LogicalImmediateFixed | AND,
- AND_x_imm = LogicalImmediateFixed | AND | SixtyFourBits,
- ORR_w_imm = LogicalImmediateFixed | ORR,
- ORR_x_imm = LogicalImmediateFixed | ORR | SixtyFourBits,
- EOR_w_imm = LogicalImmediateFixed | EOR,
- EOR_x_imm = LogicalImmediateFixed | EOR | SixtyFourBits,
- ANDS_w_imm = LogicalImmediateFixed | ANDS,
- ANDS_x_imm = LogicalImmediateFixed | ANDS | SixtyFourBits
-};
-
-// Logical shifted register.
-enum LogicalShiftedOp {
- LogicalShiftedFixed = 0x0A000000,
- LogicalShiftedFMask = 0x1F000000,
- LogicalShiftedMask = 0xFF200000,
- AND_w = LogicalShiftedFixed | AND,
- AND_x = LogicalShiftedFixed | AND | SixtyFourBits,
- AND_shift = AND_w,
- BIC_w = LogicalShiftedFixed | BIC,
- BIC_x = LogicalShiftedFixed | BIC | SixtyFourBits,
- BIC_shift = BIC_w,
- ORR_w = LogicalShiftedFixed | ORR,
- ORR_x = LogicalShiftedFixed | ORR | SixtyFourBits,
- ORR_shift = ORR_w,
- ORN_w = LogicalShiftedFixed | ORN,
- ORN_x = LogicalShiftedFixed | ORN | SixtyFourBits,
- ORN_shift = ORN_w,
- EOR_w = LogicalShiftedFixed | EOR,
- EOR_x = LogicalShiftedFixed | EOR | SixtyFourBits,
- EOR_shift = EOR_w,
- EON_w = LogicalShiftedFixed | EON,
- EON_x = LogicalShiftedFixed | EON | SixtyFourBits,
- EON_shift = EON_w,
- ANDS_w = LogicalShiftedFixed | ANDS,
- ANDS_x = LogicalShiftedFixed | ANDS | SixtyFourBits,
- ANDS_shift = ANDS_w,
- BICS_w = LogicalShiftedFixed | BICS,
- BICS_x = LogicalShiftedFixed | BICS | SixtyFourBits,
- BICS_shift = BICS_w
-};
-
-// Move wide immediate.
-enum MoveWideImmediateOp {
- MoveWideImmediateFixed = 0x12800000,
- MoveWideImmediateFMask = 0x1F800000,
- MoveWideImmediateMask = 0xFF800000,
- MOVN = 0x00000000,
- MOVZ = 0x40000000,
- MOVK = 0x60000000,
- MOVN_w = MoveWideImmediateFixed | MOVN,
- MOVN_x = MoveWideImmediateFixed | MOVN | SixtyFourBits,
- MOVZ_w = MoveWideImmediateFixed | MOVZ,
- MOVZ_x = MoveWideImmediateFixed | MOVZ | SixtyFourBits,
- MOVK_w = MoveWideImmediateFixed | MOVK,
- MOVK_x = MoveWideImmediateFixed | MOVK | SixtyFourBits
-};
-
-// Bitfield.
-const int kBitfieldNOffset = 22;
-enum BitfieldOp {
- BitfieldFixed = 0x13000000,
- BitfieldFMask = 0x1F800000,
- BitfieldMask = 0xFF800000,
- SBFM_w = BitfieldFixed | 0x00000000,
- SBFM_x = BitfieldFixed | 0x80000000,
- SBFM = SBFM_w,
- BFM_w = BitfieldFixed | 0x20000000,
- BFM_x = BitfieldFixed | 0xA0000000,
- BFM = BFM_w,
- UBFM_w = BitfieldFixed | 0x40000000,
- UBFM_x = BitfieldFixed | 0xC0000000,
- UBFM = UBFM_w
- // Bitfield N field.
-};
-
-// Extract.
-enum ExtractOp {
- ExtractFixed = 0x13800000,
- ExtractFMask = 0x1F800000,
- ExtractMask = 0xFFA00000,
- EXTR_w = ExtractFixed | 0x00000000,
- EXTR_x = ExtractFixed | 0x80000000,
- EXTR = EXTR_w
-};
-
-// Unconditional branch.
-enum UnconditionalBranchOp {
- UnconditionalBranchFixed = 0x14000000,
- UnconditionalBranchFMask = 0x7C000000,
- UnconditionalBranchMask = 0xFC000000,
- B = UnconditionalBranchFixed | 0x00000000,
- BL = UnconditionalBranchFixed | 0x80000000
-};
-
-// Unconditional branch to register.
-enum UnconditionalBranchToRegisterOp {
- UnconditionalBranchToRegisterFixed = 0xD6000000,
- UnconditionalBranchToRegisterFMask = 0xFE000000,
- UnconditionalBranchToRegisterMask = 0xFFFFFC1F,
- BR = UnconditionalBranchToRegisterFixed | 0x001F0000,
- BLR = UnconditionalBranchToRegisterFixed | 0x003F0000,
- RET = UnconditionalBranchToRegisterFixed | 0x005F0000
-};
-
-// Compare and branch.
-enum CompareBranchOp {
- CompareBranchFixed = 0x34000000,
- CompareBranchFMask = 0x7E000000,
- CompareBranchMask = 0xFF000000,
- CBZ_w = CompareBranchFixed | 0x00000000,
- CBZ_x = CompareBranchFixed | 0x80000000,
- CBZ = CBZ_w,
- CBNZ_w = CompareBranchFixed | 0x01000000,
- CBNZ_x = CompareBranchFixed | 0x81000000,
- CBNZ = CBNZ_w
-};
-
-// Test and branch.
-enum TestBranchOp {
- TestBranchFixed = 0x36000000,
- TestBranchFMask = 0x7E000000,
- TestBranchMask = 0x7F000000,
- TBZ = TestBranchFixed | 0x00000000,
- TBNZ = TestBranchFixed | 0x01000000
-};
-
-// Conditional branch.
-enum ConditionalBranchOp {
- ConditionalBranchFixed = 0x54000000,
- ConditionalBranchFMask = 0xFE000000,
- ConditionalBranchMask = 0xFF000010,
- B_cond = ConditionalBranchFixed | 0x00000000
-};
-
-// System.
-// System instruction encoding is complicated because some instructions use op
-// and CR fields to encode parameters. To handle this cleanly, the system
-// instructions are split into more than one enum.
-
-enum SystemOp {
- SystemFixed = 0xD5000000,
- SystemFMask = 0xFFC00000
-};
-
-enum SystemSysRegOp {
- SystemSysRegFixed = 0xD5100000,
- SystemSysRegFMask = 0xFFD00000,
- SystemSysRegMask = 0xFFF00000,
- MRS = SystemSysRegFixed | 0x00200000,
- MSR = SystemSysRegFixed | 0x00000000
-};
-
-enum SystemHintOp {
- SystemHintFixed = 0xD503201F,
- SystemHintFMask = 0xFFFFF01F,
- SystemHintMask = 0xFFFFF01F,
- HINT = SystemHintFixed | 0x00000000
-};
-
-// Exception.
-enum ExceptionOp {
- ExceptionFixed = 0xD4000000,
- ExceptionFMask = 0xFF000000,
- ExceptionMask = 0xFFE0001F,
- HLT = ExceptionFixed | 0x00400000,
- BRK = ExceptionFixed | 0x00200000,
- SVC = ExceptionFixed | 0x00000001,
- HVC = ExceptionFixed | 0x00000002,
- SMC = ExceptionFixed | 0x00000003,
- DCPS1 = ExceptionFixed | 0x00A00001,
- DCPS2 = ExceptionFixed | 0x00A00002,
- DCPS3 = ExceptionFixed | 0x00A00003
-};
-// Code used to spot hlt instructions that should not be hit.
-const int kHltBadCode = 0xbad;
-
-enum MemBarrierOp {
- MemBarrierFixed = 0xD503309F,
- MemBarrierFMask = 0xFFFFF09F,
- MemBarrierMask = 0xFFFFF0FF,
- DSB = MemBarrierFixed | 0x00000000,
- DMB = MemBarrierFixed | 0x00000020,
- ISB = MemBarrierFixed | 0x00000040
-};
-
-// Any load or store (including pair).
-enum LoadStoreAnyOp {
- LoadStoreAnyFMask = 0x0a000000,
- LoadStoreAnyFixed = 0x08000000
-};
-
-// Any load pair or store pair.
-enum LoadStorePairAnyOp {
- LoadStorePairAnyFMask = 0x3a000000,
- LoadStorePairAnyFixed = 0x28000000
-};
-
-#define LOAD_STORE_PAIR_OP_LIST(V) \
- V(STP, w, 0x00000000), \
- V(LDP, w, 0x00400000), \
- V(LDPSW, x, 0x40400000), \
- V(STP, x, 0x80000000), \
- V(LDP, x, 0x80400000), \
- V(STP, s, 0x04000000), \
- V(LDP, s, 0x04400000), \
- V(STP, d, 0x44000000), \
- V(LDP, d, 0x44400000)
-
-// Load/store pair (post, pre and offset.)
-enum LoadStorePairOp {
- LoadStorePairMask = 0xC4400000,
- LoadStorePairLBit = 1 << 22,
- #define LOAD_STORE_PAIR(A, B, C) \
- A##_##B = C
- LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR)
- #undef LOAD_STORE_PAIR
-};
-
-enum LoadStorePairPostIndexOp {
- LoadStorePairPostIndexFixed = 0x28800000,
- LoadStorePairPostIndexFMask = 0x3B800000,
- LoadStorePairPostIndexMask = 0xFFC00000,
- #define LOAD_STORE_PAIR_POST_INDEX(A, B, C) \
- A##_##B##_post = LoadStorePairPostIndexFixed | A##_##B
- LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_POST_INDEX)
- #undef LOAD_STORE_PAIR_POST_INDEX
-};
-
-enum LoadStorePairPreIndexOp {
- LoadStorePairPreIndexFixed = 0x29800000,
- LoadStorePairPreIndexFMask = 0x3B800000,
- LoadStorePairPreIndexMask = 0xFFC00000,
- #define LOAD_STORE_PAIR_PRE_INDEX(A, B, C) \
- A##_##B##_pre = LoadStorePairPreIndexFixed | A##_##B
- LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_PRE_INDEX)
- #undef LOAD_STORE_PAIR_PRE_INDEX
-};
-
-enum LoadStorePairOffsetOp {
- LoadStorePairOffsetFixed = 0x29000000,
- LoadStorePairOffsetFMask = 0x3B800000,
- LoadStorePairOffsetMask = 0xFFC00000,
- #define LOAD_STORE_PAIR_OFFSET(A, B, C) \
- A##_##B##_off = LoadStorePairOffsetFixed | A##_##B
- LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_OFFSET)
- #undef LOAD_STORE_PAIR_OFFSET
-};
-
-enum LoadStorePairNonTemporalOp {
- LoadStorePairNonTemporalFixed = 0x28000000,
- LoadStorePairNonTemporalFMask = 0x3B800000,
- LoadStorePairNonTemporalMask = 0xFFC00000,
- STNP_w = LoadStorePairNonTemporalFixed | STP_w,
- LDNP_w = LoadStorePairNonTemporalFixed | LDP_w,
- STNP_x = LoadStorePairNonTemporalFixed | STP_x,
- LDNP_x = LoadStorePairNonTemporalFixed | LDP_x,
- STNP_s = LoadStorePairNonTemporalFixed | STP_s,
- LDNP_s = LoadStorePairNonTemporalFixed | LDP_s,
- STNP_d = LoadStorePairNonTemporalFixed | STP_d,
- LDNP_d = LoadStorePairNonTemporalFixed | LDP_d
-};
-
-// Load literal.
-enum LoadLiteralOp {
- LoadLiteralFixed = 0x18000000,
- LoadLiteralFMask = 0x3B000000,
- LoadLiteralMask = 0xFF000000,
- LDR_w_lit = LoadLiteralFixed | 0x00000000,
- LDR_x_lit = LoadLiteralFixed | 0x40000000,
- LDRSW_x_lit = LoadLiteralFixed | 0x80000000,
- PRFM_lit = LoadLiteralFixed | 0xC0000000,
- LDR_s_lit = LoadLiteralFixed | 0x04000000,
- LDR_d_lit = LoadLiteralFixed | 0x44000000
-};
-
-#define LOAD_STORE_OP_LIST(V) \
- V(ST, RB, w, 0x00000000), \
- V(ST, RH, w, 0x40000000), \
- V(ST, R, w, 0x80000000), \
- V(ST, R, x, 0xC0000000), \
- V(LD, RB, w, 0x00400000), \
- V(LD, RH, w, 0x40400000), \
- V(LD, R, w, 0x80400000), \
- V(LD, R, x, 0xC0400000), \
- V(LD, RSB, x, 0x00800000), \
- V(LD, RSH, x, 0x40800000), \
- V(LD, RSW, x, 0x80800000), \
- V(LD, RSB, w, 0x00C00000), \
- V(LD, RSH, w, 0x40C00000), \
- V(ST, R, s, 0x84000000), \
- V(ST, R, d, 0xC4000000), \
- V(LD, R, s, 0x84400000), \
- V(LD, R, d, 0xC4400000)
-
-
-// Load/store unscaled offset.
-enum LoadStoreUnscaledOffsetOp {
- LoadStoreUnscaledOffsetFixed = 0x38000000,
- LoadStoreUnscaledOffsetFMask = 0x3B200C00,
- LoadStoreUnscaledOffsetMask = 0xFFE00C00,
- #define LOAD_STORE_UNSCALED(A, B, C, D) \
- A##U##B##_##C = LoadStoreUnscaledOffsetFixed | D
- LOAD_STORE_OP_LIST(LOAD_STORE_UNSCALED)
- #undef LOAD_STORE_UNSCALED
-};
-
-// Load/store (post, pre, offset and unsigned.)
-enum LoadStoreOp {
- LoadStoreOpMask = 0xC4C00000,
- #define LOAD_STORE(A, B, C, D) \
- A##B##_##C = D
- LOAD_STORE_OP_LIST(LOAD_STORE),
- #undef LOAD_STORE
- PRFM = 0xC0800000
-};
-
-// Load/store post index.
-enum LoadStorePostIndex {
- LoadStorePostIndexFixed = 0x38000400,
- LoadStorePostIndexFMask = 0x3B200C00,
- LoadStorePostIndexMask = 0xFFE00C00,
- #define LOAD_STORE_POST_INDEX(A, B, C, D) \
- A##B##_##C##_post = LoadStorePostIndexFixed | D
- LOAD_STORE_OP_LIST(LOAD_STORE_POST_INDEX)
- #undef LOAD_STORE_POST_INDEX
-};
-
-// Load/store pre index.
-enum LoadStorePreIndex {
- LoadStorePreIndexFixed = 0x38000C00,
- LoadStorePreIndexFMask = 0x3B200C00,
- LoadStorePreIndexMask = 0xFFE00C00,
- #define LOAD_STORE_PRE_INDEX(A, B, C, D) \
- A##B##_##C##_pre = LoadStorePreIndexFixed | D
- LOAD_STORE_OP_LIST(LOAD_STORE_PRE_INDEX)
- #undef LOAD_STORE_PRE_INDEX
-};
-
-// Load/store unsigned offset.
-enum LoadStoreUnsignedOffset {
- LoadStoreUnsignedOffsetFixed = 0x39000000,
- LoadStoreUnsignedOffsetFMask = 0x3B000000,
- LoadStoreUnsignedOffsetMask = 0xFFC00000,
- PRFM_unsigned = LoadStoreUnsignedOffsetFixed | PRFM,
- #define LOAD_STORE_UNSIGNED_OFFSET(A, B, C, D) \
- A##B##_##C##_unsigned = LoadStoreUnsignedOffsetFixed | D
- LOAD_STORE_OP_LIST(LOAD_STORE_UNSIGNED_OFFSET)
- #undef LOAD_STORE_UNSIGNED_OFFSET
-};
-
-// Load/store register offset.
-enum LoadStoreRegisterOffset {
- LoadStoreRegisterOffsetFixed = 0x38200800,
- LoadStoreRegisterOffsetFMask = 0x3B200C00,
- LoadStoreRegisterOffsetMask = 0xFFE00C00,
- PRFM_reg = LoadStoreRegisterOffsetFixed | PRFM,
- #define LOAD_STORE_REGISTER_OFFSET(A, B, C, D) \
- A##B##_##C##_reg = LoadStoreRegisterOffsetFixed | D
- LOAD_STORE_OP_LIST(LOAD_STORE_REGISTER_OFFSET)
- #undef LOAD_STORE_REGISTER_OFFSET
-};
-
-// Conditional compare.
-enum ConditionalCompareOp {
- ConditionalCompareMask = 0x60000000,
- CCMN = 0x20000000,
- CCMP = 0x60000000
-};
-
-// Conditional compare register.
-enum ConditionalCompareRegisterOp {
- ConditionalCompareRegisterFixed = 0x1A400000,
- ConditionalCompareRegisterFMask = 0x1FE00800,
- ConditionalCompareRegisterMask = 0xFFE00C10,
- CCMN_w = ConditionalCompareRegisterFixed | CCMN,
- CCMN_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMN,
- CCMP_w = ConditionalCompareRegisterFixed | CCMP,
- CCMP_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMP
-};
-
-// Conditional compare immediate.
-enum ConditionalCompareImmediateOp {
- ConditionalCompareImmediateFixed = 0x1A400800,
- ConditionalCompareImmediateFMask = 0x1FE00800,
- ConditionalCompareImmediateMask = 0xFFE00C10,
- CCMN_w_imm = ConditionalCompareImmediateFixed | CCMN,
- CCMN_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMN,
- CCMP_w_imm = ConditionalCompareImmediateFixed | CCMP,
- CCMP_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMP
-};
-
-// Conditional select.
-enum ConditionalSelectOp {
- ConditionalSelectFixed = 0x1A800000,
- ConditionalSelectFMask = 0x1FE00000,
- ConditionalSelectMask = 0xFFE00C00,
- CSEL_w = ConditionalSelectFixed | 0x00000000,
- CSEL_x = ConditionalSelectFixed | 0x80000000,
- CSEL = CSEL_w,
- CSINC_w = ConditionalSelectFixed | 0x00000400,
- CSINC_x = ConditionalSelectFixed | 0x80000400,
- CSINC = CSINC_w,
- CSINV_w = ConditionalSelectFixed | 0x40000000,
- CSINV_x = ConditionalSelectFixed | 0xC0000000,
- CSINV = CSINV_w,
- CSNEG_w = ConditionalSelectFixed | 0x40000400,
- CSNEG_x = ConditionalSelectFixed | 0xC0000400,
- CSNEG = CSNEG_w
-};
-
-// Data processing 1 source.
-enum DataProcessing1SourceOp {
- DataProcessing1SourceFixed = 0x5AC00000,
- DataProcessing1SourceFMask = 0x5FE00000,
- DataProcessing1SourceMask = 0xFFFFFC00,
- RBIT = DataProcessing1SourceFixed | 0x00000000,
- RBIT_w = RBIT,
- RBIT_x = RBIT | SixtyFourBits,
- REV16 = DataProcessing1SourceFixed | 0x00000400,
- REV16_w = REV16,
- REV16_x = REV16 | SixtyFourBits,
- REV = DataProcessing1SourceFixed | 0x00000800,
- REV_w = REV,
- REV32_x = REV | SixtyFourBits,
- REV_x = DataProcessing1SourceFixed | SixtyFourBits | 0x00000C00,
- CLZ = DataProcessing1SourceFixed | 0x00001000,
- CLZ_w = CLZ,
- CLZ_x = CLZ | SixtyFourBits,
- CLS = DataProcessing1SourceFixed | 0x00001400,
- CLS_w = CLS,
- CLS_x = CLS | SixtyFourBits
-};
-
-// Data processing 2 source.
-enum DataProcessing2SourceOp {
- DataProcessing2SourceFixed = 0x1AC00000,
- DataProcessing2SourceFMask = 0x5FE00000,
- DataProcessing2SourceMask = 0xFFE0FC00,
- UDIV_w = DataProcessing2SourceFixed | 0x00000800,
- UDIV_x = DataProcessing2SourceFixed | 0x80000800,
- UDIV = UDIV_w,
- SDIV_w = DataProcessing2SourceFixed | 0x00000C00,
- SDIV_x = DataProcessing2SourceFixed | 0x80000C00,
- SDIV = SDIV_w,
- LSLV_w = DataProcessing2SourceFixed | 0x00002000,
- LSLV_x = DataProcessing2SourceFixed | 0x80002000,
- LSLV = LSLV_w,
- LSRV_w = DataProcessing2SourceFixed | 0x00002400,
- LSRV_x = DataProcessing2SourceFixed | 0x80002400,
- LSRV = LSRV_w,
- ASRV_w = DataProcessing2SourceFixed | 0x00002800,
- ASRV_x = DataProcessing2SourceFixed | 0x80002800,
- ASRV = ASRV_w,
- RORV_w = DataProcessing2SourceFixed | 0x00002C00,
- RORV_x = DataProcessing2SourceFixed | 0x80002C00,
- RORV = RORV_w,
- CRC32B = DataProcessing2SourceFixed | 0x00004000,
- CRC32H = DataProcessing2SourceFixed | 0x00004400,
- CRC32W = DataProcessing2SourceFixed | 0x00004800,
- CRC32X = DataProcessing2SourceFixed | SixtyFourBits | 0x00004C00,
- CRC32CB = DataProcessing2SourceFixed | 0x00005000,
- CRC32CH = DataProcessing2SourceFixed | 0x00005400,
- CRC32CW = DataProcessing2SourceFixed | 0x00005800,
- CRC32CX = DataProcessing2SourceFixed | SixtyFourBits | 0x00005C00
-};
-
-// Data processing 3 source.
-enum DataProcessing3SourceOp {
- DataProcessing3SourceFixed = 0x1B000000,
- DataProcessing3SourceFMask = 0x1F000000,
- DataProcessing3SourceMask = 0xFFE08000,
- MADD_w = DataProcessing3SourceFixed | 0x00000000,
- MADD_x = DataProcessing3SourceFixed | 0x80000000,
- MADD = MADD_w,
- MSUB_w = DataProcessing3SourceFixed | 0x00008000,
- MSUB_x = DataProcessing3SourceFixed | 0x80008000,
- MSUB = MSUB_w,
- SMADDL_x = DataProcessing3SourceFixed | 0x80200000,
- SMSUBL_x = DataProcessing3SourceFixed | 0x80208000,
- SMULH_x = DataProcessing3SourceFixed | 0x80400000,
- UMADDL_x = DataProcessing3SourceFixed | 0x80A00000,
- UMSUBL_x = DataProcessing3SourceFixed | 0x80A08000,
- UMULH_x = DataProcessing3SourceFixed | 0x80C00000
-};
-
-// Floating point compare.
-enum FPCompareOp {
- FPCompareFixed = 0x1E202000,
- FPCompareFMask = 0x5F203C00,
- FPCompareMask = 0xFFE0FC1F,
- FCMP_s = FPCompareFixed | 0x00000000,
- FCMP_d = FPCompareFixed | FP64 | 0x00000000,
- FCMP = FCMP_s,
- FCMP_s_zero = FPCompareFixed | 0x00000008,
- FCMP_d_zero = FPCompareFixed | FP64 | 0x00000008,
- FCMP_zero = FCMP_s_zero,
- FCMPE_s = FPCompareFixed | 0x00000010,
- FCMPE_d = FPCompareFixed | FP64 | 0x00000010,
- FCMPE_s_zero = FPCompareFixed | 0x00000018,
- FCMPE_d_zero = FPCompareFixed | FP64 | 0x00000018
-};
-
-// Floating point conditional compare.
-enum FPConditionalCompareOp {
- FPConditionalCompareFixed = 0x1E200400,
- FPConditionalCompareFMask = 0x5F200C00,
- FPConditionalCompareMask = 0xFFE00C10,
- FCCMP_s = FPConditionalCompareFixed | 0x00000000,
- FCCMP_d = FPConditionalCompareFixed | FP64 | 0x00000000,
- FCCMP = FCCMP_s,
- FCCMPE_s = FPConditionalCompareFixed | 0x00000010,
- FCCMPE_d = FPConditionalCompareFixed | FP64 | 0x00000010,
- FCCMPE = FCCMPE_s
-};
-
-// Floating point conditional select.
-enum FPConditionalSelectOp {
- FPConditionalSelectFixed = 0x1E200C00,
- FPConditionalSelectFMask = 0x5F200C00,
- FPConditionalSelectMask = 0xFFE00C00,
- FCSEL_s = FPConditionalSelectFixed | 0x00000000,
- FCSEL_d = FPConditionalSelectFixed | FP64 | 0x00000000,
- FCSEL = FCSEL_s
-};
-
-// Floating point immediate.
-enum FPImmediateOp {
- FPImmediateFixed = 0x1E201000,
- FPImmediateFMask = 0x5F201C00,
- FPImmediateMask = 0xFFE01C00,
- FMOV_s_imm = FPImmediateFixed | 0x00000000,
- FMOV_d_imm = FPImmediateFixed | FP64 | 0x00000000
-};
-
-// Floating point data processing 1 source.
-enum FPDataProcessing1SourceOp {
- FPDataProcessing1SourceFixed = 0x1E204000,
- FPDataProcessing1SourceFMask = 0x5F207C00,
- FPDataProcessing1SourceMask = 0xFFFFFC00,
- FMOV_s = FPDataProcessing1SourceFixed | 0x00000000,
- FMOV_d = FPDataProcessing1SourceFixed | FP64 | 0x00000000,
- FMOV = FMOV_s,
- FABS_s = FPDataProcessing1SourceFixed | 0x00008000,
- FABS_d = FPDataProcessing1SourceFixed | FP64 | 0x00008000,
- FABS = FABS_s,
- FNEG_s = FPDataProcessing1SourceFixed | 0x00010000,
- FNEG_d = FPDataProcessing1SourceFixed | FP64 | 0x00010000,
- FNEG = FNEG_s,
- FSQRT_s = FPDataProcessing1SourceFixed | 0x00018000,
- FSQRT_d = FPDataProcessing1SourceFixed | FP64 | 0x00018000,
- FSQRT = FSQRT_s,
- FCVT_ds = FPDataProcessing1SourceFixed | 0x00028000,
- FCVT_sd = FPDataProcessing1SourceFixed | FP64 | 0x00020000,
- FRINTN_s = FPDataProcessing1SourceFixed | 0x00040000,
- FRINTN_d = FPDataProcessing1SourceFixed | FP64 | 0x00040000,
- FRINTN = FRINTN_s,
- FRINTP_s = FPDataProcessing1SourceFixed | 0x00048000,
- FRINTP_d = FPDataProcessing1SourceFixed | FP64 | 0x00048000,
- FRINTP = FRINTP_s,
- FRINTM_s = FPDataProcessing1SourceFixed | 0x00050000,
- FRINTM_d = FPDataProcessing1SourceFixed | FP64 | 0x00050000,
- FRINTM = FRINTM_s,
- FRINTZ_s = FPDataProcessing1SourceFixed | 0x00058000,
- FRINTZ_d = FPDataProcessing1SourceFixed | FP64 | 0x00058000,
- FRINTZ = FRINTZ_s,
- FRINTA_s = FPDataProcessing1SourceFixed | 0x00060000,
- FRINTA_d = FPDataProcessing1SourceFixed | FP64 | 0x00060000,
- FRINTA = FRINTA_s,
- FRINTX_s = FPDataProcessing1SourceFixed | 0x00070000,
- FRINTX_d = FPDataProcessing1SourceFixed | FP64 | 0x00070000,
- FRINTX = FRINTX_s,
- FRINTI_s = FPDataProcessing1SourceFixed | 0x00078000,
- FRINTI_d = FPDataProcessing1SourceFixed | FP64 | 0x00078000,
- FRINTI = FRINTI_s
-};
-
-// Floating point data processing 2 source.
-enum FPDataProcessing2SourceOp {
- FPDataProcessing2SourceFixed = 0x1E200800,
- FPDataProcessing2SourceFMask = 0x5F200C00,
- FPDataProcessing2SourceMask = 0xFFE0FC00,
- FMUL = FPDataProcessing2SourceFixed | 0x00000000,
- FMUL_s = FMUL,
- FMUL_d = FMUL | FP64,
- FDIV = FPDataProcessing2SourceFixed | 0x00001000,
- FDIV_s = FDIV,
- FDIV_d = FDIV | FP64,
- FADD = FPDataProcessing2SourceFixed | 0x00002000,
- FADD_s = FADD,
- FADD_d = FADD | FP64,
- FSUB = FPDataProcessing2SourceFixed | 0x00003000,
- FSUB_s = FSUB,
- FSUB_d = FSUB | FP64,
- FMAX = FPDataProcessing2SourceFixed | 0x00004000,
- FMAX_s = FMAX,
- FMAX_d = FMAX | FP64,
- FMIN = FPDataProcessing2SourceFixed | 0x00005000,
- FMIN_s = FMIN,
- FMIN_d = FMIN | FP64,
- FMAXNM = FPDataProcessing2SourceFixed | 0x00006000,
- FMAXNM_s = FMAXNM,
- FMAXNM_d = FMAXNM | FP64,
- FMINNM = FPDataProcessing2SourceFixed | 0x00007000,
- FMINNM_s = FMINNM,
- FMINNM_d = FMINNM | FP64,
- FNMUL = FPDataProcessing2SourceFixed | 0x00008000,
- FNMUL_s = FNMUL,
- FNMUL_d = FNMUL | FP64
-};
-
-// Floating point data processing 3 source.
-enum FPDataProcessing3SourceOp {
- FPDataProcessing3SourceFixed = 0x1F000000,
- FPDataProcessing3SourceFMask = 0x5F000000,
- FPDataProcessing3SourceMask = 0xFFE08000,
- FMADD_s = FPDataProcessing3SourceFixed | 0x00000000,
- FMSUB_s = FPDataProcessing3SourceFixed | 0x00008000,
- FNMADD_s = FPDataProcessing3SourceFixed | 0x00200000,
- FNMSUB_s = FPDataProcessing3SourceFixed | 0x00208000,
- FMADD_d = FPDataProcessing3SourceFixed | 0x00400000,
- FMSUB_d = FPDataProcessing3SourceFixed | 0x00408000,
- FNMADD_d = FPDataProcessing3SourceFixed | 0x00600000,
- FNMSUB_d = FPDataProcessing3SourceFixed | 0x00608000
-};
-
-// Conversion between floating point and integer.
-enum FPIntegerConvertOp {
- FPIntegerConvertFixed = 0x1E200000,
- FPIntegerConvertFMask = 0x5F20FC00,
- FPIntegerConvertMask = 0xFFFFFC00,
- FCVTNS = FPIntegerConvertFixed | 0x00000000,
- FCVTNS_ws = FCVTNS,
- FCVTNS_xs = FCVTNS | SixtyFourBits,
- FCVTNS_wd = FCVTNS | FP64,
- FCVTNS_xd = FCVTNS | SixtyFourBits | FP64,
- FCVTNU = FPIntegerConvertFixed | 0x00010000,
- FCVTNU_ws = FCVTNU,
- FCVTNU_xs = FCVTNU | SixtyFourBits,
- FCVTNU_wd = FCVTNU | FP64,
- FCVTNU_xd = FCVTNU | SixtyFourBits | FP64,
- FCVTPS = FPIntegerConvertFixed | 0x00080000,
- FCVTPS_ws = FCVTPS,
- FCVTPS_xs = FCVTPS | SixtyFourBits,
- FCVTPS_wd = FCVTPS | FP64,
- FCVTPS_xd = FCVTPS | SixtyFourBits | FP64,
- FCVTPU = FPIntegerConvertFixed | 0x00090000,
- FCVTPU_ws = FCVTPU,
- FCVTPU_xs = FCVTPU | SixtyFourBits,
- FCVTPU_wd = FCVTPU | FP64,
- FCVTPU_xd = FCVTPU | SixtyFourBits | FP64,
- FCVTMS = FPIntegerConvertFixed | 0x00100000,
- FCVTMS_ws = FCVTMS,
- FCVTMS_xs = FCVTMS | SixtyFourBits,
- FCVTMS_wd = FCVTMS | FP64,
- FCVTMS_xd = FCVTMS | SixtyFourBits | FP64,
- FCVTMU = FPIntegerConvertFixed | 0x00110000,
- FCVTMU_ws = FCVTMU,
- FCVTMU_xs = FCVTMU | SixtyFourBits,
- FCVTMU_wd = FCVTMU | FP64,
- FCVTMU_xd = FCVTMU | SixtyFourBits | FP64,
- FCVTZS = FPIntegerConvertFixed | 0x00180000,
- FCVTZS_ws = FCVTZS,
- FCVTZS_xs = FCVTZS | SixtyFourBits,
- FCVTZS_wd = FCVTZS | FP64,
- FCVTZS_xd = FCVTZS | SixtyFourBits | FP64,
- FCVTZU = FPIntegerConvertFixed | 0x00190000,
- FCVTZU_ws = FCVTZU,
- FCVTZU_xs = FCVTZU | SixtyFourBits,
- FCVTZU_wd = FCVTZU | FP64,
- FCVTZU_xd = FCVTZU | SixtyFourBits | FP64,
- SCVTF = FPIntegerConvertFixed | 0x00020000,
- SCVTF_sw = SCVTF,
- SCVTF_sx = SCVTF | SixtyFourBits,
- SCVTF_dw = SCVTF | FP64,
- SCVTF_dx = SCVTF | SixtyFourBits | FP64,
- UCVTF = FPIntegerConvertFixed | 0x00030000,
- UCVTF_sw = UCVTF,
- UCVTF_sx = UCVTF | SixtyFourBits,
- UCVTF_dw = UCVTF | FP64,
- UCVTF_dx = UCVTF | SixtyFourBits | FP64,
- FCVTAS = FPIntegerConvertFixed | 0x00040000,
- FCVTAS_ws = FCVTAS,
- FCVTAS_xs = FCVTAS | SixtyFourBits,
- FCVTAS_wd = FCVTAS | FP64,
- FCVTAS_xd = FCVTAS | SixtyFourBits | FP64,
- FCVTAU = FPIntegerConvertFixed | 0x00050000,
- FCVTAU_ws = FCVTAU,
- FCVTAU_xs = FCVTAU | SixtyFourBits,
- FCVTAU_wd = FCVTAU | FP64,
- FCVTAU_xd = FCVTAU | SixtyFourBits | FP64,
- FMOV_ws = FPIntegerConvertFixed | 0x00060000,
- FMOV_sw = FPIntegerConvertFixed | 0x00070000,
- FMOV_xd = FMOV_ws | SixtyFourBits | FP64,
- FMOV_dx = FMOV_sw | SixtyFourBits | FP64
-};
-
-// Conversion between fixed point and floating point.
-enum FPFixedPointConvertOp {
- FPFixedPointConvertFixed = 0x1E000000,
- FPFixedPointConvertFMask = 0x5F200000,
- FPFixedPointConvertMask = 0xFFFF0000,
- FCVTZS_fixed = FPFixedPointConvertFixed | 0x00180000,
- FCVTZS_ws_fixed = FCVTZS_fixed,
- FCVTZS_xs_fixed = FCVTZS_fixed | SixtyFourBits,
- FCVTZS_wd_fixed = FCVTZS_fixed | FP64,
- FCVTZS_xd_fixed = FCVTZS_fixed | SixtyFourBits | FP64,
- FCVTZU_fixed = FPFixedPointConvertFixed | 0x00190000,
- FCVTZU_ws_fixed = FCVTZU_fixed,
- FCVTZU_xs_fixed = FCVTZU_fixed | SixtyFourBits,
- FCVTZU_wd_fixed = FCVTZU_fixed | FP64,
- FCVTZU_xd_fixed = FCVTZU_fixed | SixtyFourBits | FP64,
- SCVTF_fixed = FPFixedPointConvertFixed | 0x00020000,
- SCVTF_sw_fixed = SCVTF_fixed,
- SCVTF_sx_fixed = SCVTF_fixed | SixtyFourBits,
- SCVTF_dw_fixed = SCVTF_fixed | FP64,
- SCVTF_dx_fixed = SCVTF_fixed | SixtyFourBits | FP64,
- UCVTF_fixed = FPFixedPointConvertFixed | 0x00030000,
- UCVTF_sw_fixed = UCVTF_fixed,
- UCVTF_sx_fixed = UCVTF_fixed | SixtyFourBits,
- UCVTF_dw_fixed = UCVTF_fixed | FP64,
- UCVTF_dx_fixed = UCVTF_fixed | SixtyFourBits | FP64
-};
-
-// Unimplemented and unallocated instructions. These are defined to make fixed
-// bit assertion easier.
-enum UnimplementedOp {
- UnimplementedFixed = 0x00000000,
- UnimplementedFMask = 0x00000000
-};
-
-enum UnallocatedOp {
- UnallocatedFixed = 0x00000000,
- UnallocatedFMask = 0x00000000
-};
-
-} } // namespace v8::internal
-
-#endif // V8_A64_CONSTANTS_A64_H_
diff --git a/deps/v8/src/a64/cpu-a64.cc b/deps/v8/src/a64/cpu-a64.cc
deleted file mode 100644
index 6dd5e52ae2..0000000000
--- a/deps/v8/src/a64/cpu-a64.cc
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// CPU specific code for arm independent of OS goes here.
-
-#include "v8.h"
-
-#if V8_TARGET_ARCH_A64
-
-#include "a64/cpu-a64.h"
-#include "a64/utils-a64.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef DEBUG
-bool CpuFeatures::initialized_ = false;
-#endif
-unsigned CpuFeatures::supported_ = 0;
-unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
-unsigned CpuFeatures::cross_compile_ = 0;
-
-// Initialise to smallest possible cache size.
-unsigned CpuFeatures::dcache_line_size_ = 1;
-unsigned CpuFeatures::icache_line_size_ = 1;
-
-
-void CPU::SetUp() {
- CpuFeatures::Probe();
-}
-
-
-bool CPU::SupportsCrankshaft() {
- return true;
-}
-
-
-void CPU::FlushICache(void* address, size_t length) {
- if (length == 0) {
- return;
- }
-
-#ifdef USE_SIMULATOR
- // TODO(all): consider doing some cache simulation to ensure every address
- // run has been synced.
- USE(address);
- USE(length);
-#else
- // The code below assumes user space cache operations are allowed. The goal
- // of this routine is to make sure the code generated is visible to the I
- // side of the CPU.
-
- uintptr_t start = reinterpret_cast<uintptr_t>(address);
- // Sizes will be used to generate a mask big enough to cover a pointer.
- uintptr_t dsize = static_cast<uintptr_t>(CpuFeatures::dcache_line_size());
- uintptr_t isize = static_cast<uintptr_t>(CpuFeatures::icache_line_size());
- // Cache line sizes are always a power of 2.
- ASSERT(CountSetBits(dsize, 64) == 1);
- ASSERT(CountSetBits(isize, 64) == 1);
- uintptr_t dstart = start & ~(dsize - 1);
- uintptr_t istart = start & ~(isize - 1);
- uintptr_t end = start + length;
-
- __asm__ __volatile__ ( // NOLINT
- // Clean every line of the D cache containing the target data.
- "0: \n\t"
- // dc : Data Cache maintenance
- // c : Clean
- // va : by (Virtual) Address
- // u : to the point of Unification
- // The point of unification for a processor is the point by which the
- // instruction and data caches are guaranteed to see the same copy of a
- // memory location. See ARM DDI 0406B page B2-12 for more information.
- "dc cvau, %[dline] \n\t"
- "add %[dline], %[dline], %[dsize] \n\t"
- "cmp %[dline], %[end] \n\t"
- "b.lt 0b \n\t"
- // Barrier to make sure the effect of the code above is visible to the rest
- // of the world.
- // dsb : Data Synchronisation Barrier
- // ish : Inner SHareable domain
- // The point of unification for an Inner Shareable shareability domain is
- // the point by which the instruction and data caches of all the processors
- // in that Inner Shareable shareability domain are guaranteed to see the
- // same copy of a memory location. See ARM DDI 0406B page B2-12 for more
- // information.
- "dsb ish \n\t"
- // Invalidate every line of the I cache containing the target data.
- "1: \n\t"
- // ic : instruction cache maintenance
- // i : invalidate
- // va : by address
- // u : to the point of unification
- "ic ivau, %[iline] \n\t"
- "add %[iline], %[iline], %[isize] \n\t"
- "cmp %[iline], %[end] \n\t"
- "b.lt 1b \n\t"
- // Barrier to make sure the effect of the code above is visible to the rest
- // of the world.
- "dsb ish \n\t"
- // Barrier to ensure any prefetching which happened before this code is
- // discarded.
- // isb : Instruction Synchronisation Barrier
- "isb \n\t"
- : [dline] "+r" (dstart),
- [iline] "+r" (istart)
- : [dsize] "r" (dsize),
- [isize] "r" (isize),
- [end] "r" (end)
- // This code does not write to memory but without the dependency gcc might
- // move this code before the code is generated.
- : "cc", "memory"
- ); // NOLINT
-#endif
-}
-
-
-void CpuFeatures::Probe() {
- // Compute I and D cache line size. The cache type register holds
- // information about the caches.
- uint32_t cache_type_register = GetCacheType();
-
- static const int kDCacheLineSizeShift = 16;
- static const int kICacheLineSizeShift = 0;
- static const uint32_t kDCacheLineSizeMask = 0xf << kDCacheLineSizeShift;
- static const uint32_t kICacheLineSizeMask = 0xf << kICacheLineSizeShift;
-
- // The cache type register holds the size of the I and D caches as a power of
- // two.
- uint32_t dcache_line_size_power_of_two =
- (cache_type_register & kDCacheLineSizeMask) >> kDCacheLineSizeShift;
- uint32_t icache_line_size_power_of_two =
- (cache_type_register & kICacheLineSizeMask) >> kICacheLineSizeShift;
-
- dcache_line_size_ = 1 << dcache_line_size_power_of_two;
- icache_line_size_ = 1 << icache_line_size_power_of_two;
-
- // AArch64 has no configuration options, no further probing is required.
- supported_ = 0;
-
-#ifdef DEBUG
- initialized_ = true;
-#endif
-}
-
-
-unsigned CpuFeatures::dcache_line_size() {
- ASSERT(initialized_);
- return dcache_line_size_;
-}
-
-
-unsigned CpuFeatures::icache_line_size() {
- ASSERT(initialized_);
- return icache_line_size_;
-}
-
-
-uint32_t CpuFeatures::GetCacheType() {
-#ifdef USE_SIMULATOR
- // This will lead to a cache with 1 byte long lines, which is fine since the
- // simulator will not need this information.
- return 0;
-#else
- uint32_t cache_type_register;
- // Copy the content of the cache type register to a core register.
- __asm__ __volatile__ ("mrs %[ctr], ctr_el0" // NOLINT
- : [ctr] "=r" (cache_type_register));
- return cache_type_register;
-#endif
-}
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/cpu-a64.h b/deps/v8/src/a64/cpu-a64.h
deleted file mode 100644
index 969312b3c4..0000000000
--- a/deps/v8/src/a64/cpu-a64.h
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_A64_CPU_A64_H_
-#define V8_A64_CPU_A64_H_
-
-#include <stdio.h>
-#include "serialize.h"
-#include "cpu.h"
-
-namespace v8 {
-namespace internal {
-
-
-// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a CpuFeatureScope before use.
-class CpuFeatures : public AllStatic {
- public:
- // Detect features of the target CPU. Set safe defaults if the serializer
- // is enabled (snapshots must be portable).
- static void Probe();
-
- // Check whether a feature is supported by the target CPU.
- static bool IsSupported(CpuFeature f) {
- ASSERT(initialized_);
- // There are no optional features for A64.
- return false;
- };
-
- static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
- ASSERT(initialized_);
- // There are no optional features for A64.
- return false;
- }
-
- static bool IsSafeForSnapshot(CpuFeature f) {
- return (IsSupported(f) &&
- (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
- }
-
- // I and D cache line size in bytes.
- static unsigned dcache_line_size();
- static unsigned icache_line_size();
-
- static unsigned supported_;
-
- static bool VerifyCrossCompiling() {
- // There are no optional features for A64.
- ASSERT(cross_compile_ == 0);
- return true;
- }
-
- static bool VerifyCrossCompiling(CpuFeature f) {
- // There are no optional features for A64.
- USE(f);
- ASSERT(cross_compile_ == 0);
- return true;
- }
-
- private:
- // Return the content of the cache type register.
- static uint32_t GetCacheType();
-
- // I and D cache line size in bytes.
- static unsigned icache_line_size_;
- static unsigned dcache_line_size_;
-
-#ifdef DEBUG
- static bool initialized_;
-#endif
-
- // This isn't used (and is always 0), but it is required by V8.
- static unsigned found_by_runtime_probing_only_;
-
- static unsigned cross_compile_;
-
- friend class PlatformFeatureScope;
- DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_A64_CPU_A64_H_
diff --git a/deps/v8/src/a64/debug-a64.cc b/deps/v8/src/a64/debug-a64.cc
deleted file mode 100644
index d8711650c1..0000000000
--- a/deps/v8/src/a64/debug-a64.cc
+++ /dev/null
@@ -1,394 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if V8_TARGET_ARCH_A64
-
-#include "codegen.h"
-#include "debug.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm)
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-bool BreakLocationIterator::IsDebugBreakAtReturn() {
- return Debug::IsDebugBreakAtReturn(rinfo());
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtReturn() {
- // Patch the code emitted by FullCodeGenerator::EmitReturnSequence, changing
- // the return from JS function sequence from
- // mov sp, fp
- // ldp fp, lr, [sp] #16
- // lrd ip0, [pc, #(3 * kInstructionSize)]
- // add sp, sp, ip0
- // ret
- // <number of paramters ...
- // ... plus one (64 bits)>
- // to a call to the debug break return code.
- // ldr ip0, [pc, #(3 * kInstructionSize)]
- // blr ip0
- // hlt kHltBadCode @ code should not return, catch if it does.
- // <debug break return code ...
- // ... entry point address (64 bits)>
-
- // The patching code must not overflow the space occupied by the return
- // sequence.
- STATIC_ASSERT(Assembler::kJSRetSequenceInstructions >= 5);
- PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 5);
- byte* entry =
- debug_info_->GetIsolate()->debug()->debug_break_return()->entry();
-
- // The first instruction of a patched return sequence must be a load literal
- // loading the address of the debug break return code.
- patcher.LoadLiteral(ip0, 3 * kInstructionSize);
- // TODO(all): check the following is correct.
- // The debug break return code will push a frame and call statically compiled
- // code. By using blr, even though control will not return after the branch,
- // this call site will be registered in the frame (lr being saved as the pc
- // of the next instruction to execute for this frame). The debugger can now
- // iterate on the frames to find call to debug break return code.
- patcher.blr(ip0);
- patcher.hlt(kHltBadCode);
- patcher.dc64(reinterpret_cast<int64_t>(entry));
-}
-
-
-void BreakLocationIterator::ClearDebugBreakAtReturn() {
- // Reset the code emitted by EmitReturnSequence to its original state.
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kJSRetSequenceInstructions);
-}
-
-
-bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
- return rinfo->IsPatchedReturnSequence();
-}
-
-
-bool BreakLocationIterator::IsDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- // Check whether the debug break slot instructions have been patched.
- return rinfo()->IsPatchedDebugBreakSlotSequence();
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtSlot() {
- // Patch the code emitted by Debug::GenerateSlots, changing the debug break
- // slot code from
- // mov x0, x0 @ nop DEBUG_BREAK_NOP
- // mov x0, x0 @ nop DEBUG_BREAK_NOP
- // mov x0, x0 @ nop DEBUG_BREAK_NOP
- // mov x0, x0 @ nop DEBUG_BREAK_NOP
- // to a call to the debug slot code.
- // ldr ip0, [pc, #(2 * kInstructionSize)]
- // blr ip0
- // <debug break slot code ...
- // ... entry point address (64 bits)>
-
- // TODO(all): consider adding a hlt instruction after the blr as we don't
- // expect control to return here. This implies increasing
- // kDebugBreakSlotInstructions to 5 instructions.
-
- // The patching code must not overflow the space occupied by the return
- // sequence.
- STATIC_ASSERT(Assembler::kDebugBreakSlotInstructions >= 4);
- PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 4);
- byte* entry =
- debug_info_->GetIsolate()->debug()->debug_break_slot()->entry();
-
- // The first instruction of a patched debug break slot must be a load literal
- // loading the address of the debug break slot code.
- patcher.LoadLiteral(ip0, 2 * kInstructionSize);
- // TODO(all): check the following is correct.
- // The debug break slot code will push a frame and call statically compiled
- // code. By using blr, event hough control will not return after the branch,
- // this call site will be registered in the frame (lr being saved as the pc
- // of the next instruction to execute for this frame). The debugger can now
- // iterate on the frames to find call to debug break slot code.
- patcher.blr(ip0);
- patcher.dc64(reinterpret_cast<int64_t>(entry));
-}
-
-
-void BreakLocationIterator::ClearDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kDebugBreakSlotInstructions);
-}
-
-const bool Debug::FramePaddingLayout::kIsSupported = false;
-
-static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
- RegList object_regs,
- RegList non_object_regs,
- Register scratch) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Any live values (object_regs and non_object_regs) in caller-saved
- // registers (or lr) need to be stored on the stack so that their values are
- // safely preserved for a call into C code.
- //
- // Also:
- // * object_regs may be modified during the C code by the garbage
- // collector. Every object register must be a valid tagged pointer or
- // SMI.
- //
- // * non_object_regs will be converted to SMIs so that the garbage
- // collector doesn't try to interpret them as pointers.
- //
- // TODO(jbramley): Why can't this handle callee-saved registers?
- ASSERT((~kCallerSaved.list() & object_regs) == 0);
- ASSERT((~kCallerSaved.list() & non_object_regs) == 0);
- ASSERT((object_regs & non_object_regs) == 0);
- ASSERT((scratch.Bit() & object_regs) == 0);
- ASSERT((scratch.Bit() & non_object_regs) == 0);
- ASSERT((ip0.Bit() & (object_regs | non_object_regs)) == 0);
- ASSERT((ip1.Bit() & (object_regs | non_object_regs)) == 0);
- STATIC_ASSERT(kSmiValueSize == 32);
-
- CPURegList non_object_list =
- CPURegList(CPURegister::kRegister, kXRegSize, non_object_regs);
- while (!non_object_list.IsEmpty()) {
- // Store each non-object register as two SMIs.
- Register reg = Register(non_object_list.PopLowestIndex());
- __ Push(reg);
- __ Poke(wzr, 0);
- __ Push(reg.W(), wzr);
- // Stack:
- // jssp[12]: reg[63:32]
- // jssp[8]: 0x00000000 (SMI tag & padding)
- // jssp[4]: reg[31:0]
- // jssp[0]: 0x00000000 (SMI tag & padding)
- STATIC_ASSERT((kSmiTag == 0) && (kSmiShift == 32));
- }
-
- if (object_regs != 0) {
- __ PushXRegList(object_regs);
- }
-
-#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
-#endif
- __ Mov(x0, 0); // No arguments.
- __ Mov(x1, Operand(ExternalReference::debug_break(masm->isolate())));
-
- CEntryStub stub(1);
- __ CallStub(&stub);
-
- // Restore the register values from the expression stack.
- if (object_regs != 0) {
- __ PopXRegList(object_regs);
- }
-
- non_object_list =
- CPURegList(CPURegister::kRegister, kXRegSize, non_object_regs);
- while (!non_object_list.IsEmpty()) {
- // Load each non-object register from two SMIs.
- // Stack:
- // jssp[12]: reg[63:32]
- // jssp[8]: 0x00000000 (SMI tag & padding)
- // jssp[4]: reg[31:0]
- // jssp[0]: 0x00000000 (SMI tag & padding)
- Register reg = Register(non_object_list.PopHighestIndex());
- __ Pop(scratch, reg);
- __ Bfxil(reg, scratch, 32, 32);
- }
-
- // Leave the internal frame.
- }
-
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target(Debug_Address::AfterBreakTarget(),
- masm->isolate());
- __ Mov(scratch, Operand(after_break_target));
- __ Ldr(scratch, MemOperand(scratch));
- __ Br(scratch);
-}
-
-
-void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC load (from ic-arm.cc).
- // ----------- S t a t e -------------
- // -- x2 : name
- // -- lr : return address
- // -- x0 : receiver
- // -- [sp] : receiver
- // -----------------------------------
- // Registers x0 and x2 contain objects that need to be pushed on the
- // expression stack of the fake JS frame.
- Generate_DebugBreakCallHelper(masm, x0.Bit() | x2.Bit(), 0, x10);
-}
-
-
-void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC store (from ic-arm.cc).
- // ----------- S t a t e -------------
- // -- x0 : value
- // -- x1 : receiver
- // -- x2 : name
- // -- lr : return address
- // -----------------------------------
- // Registers x0, x1, and x2 contain objects that need to be pushed on the
- // expression stack of the fake JS frame.
- Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit() | x2.Bit(), 0, x10);
-}
-
-
-void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- x0 : key
- // -- x1 : receiver
- Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit(), 0, x10);
-}
-
-
-void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- x0 : value
- // -- x1 : key
- // -- x2 : receiver
- // -- lr : return address
- Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit() | x2.Bit(), 0, x10);
-}
-
-
-void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
- // Register state for CompareNil IC
- // ----------- S t a t e -------------
- // -- r0 : value
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10);
-}
-
-
-void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC call (from ic-arm.cc)
- // ----------- S t a t e -------------
- // -- x2 : name
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, x2.Bit(), 0, x10);
-}
-
-
-void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
- // In places other than IC call sites it is expected that r0 is TOS which
- // is an object - this is not generally the case so this should be used with
- // care.
- Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10);
-}
-
-
-void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-a64.cc).
- // ----------- S t a t e -------------
- // -- x1 : function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, x1.Bit(), 0, x10);
-}
-
-
-void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-a64.cc).
- // ----------- S t a t e -------------
- // -- x1 : function
- // -- x2 : feedback array
- // -- x3 : slot in feedback array
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, x1.Bit() | x2.Bit() | x3.Bit(), 0, x10);
-}
-
-
-void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
- // Calling convention for CallConstructStub (from code-stubs-a64.cc).
- // ----------- S t a t e -------------
- // -- x0 : number of arguments (not smi)
- // -- x1 : constructor function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, x1.Bit(), x0.Bit(), x10);
-}
-
-
-void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
- // Calling convention for CallConstructStub (from code-stubs-a64.cc).
- // ----------- S t a t e -------------
- // -- x0 : number of arguments (not smi)
- // -- x1 : constructor function
- // -- x2 : feedback array
- // -- x3 : feedback slot (smi)
- // -----------------------------------
- Generate_DebugBreakCallHelper(
- masm, x1.Bit() | x2.Bit() | x3.Bit(), x0.Bit(), x10);
-}
-
-
-void Debug::GenerateSlot(MacroAssembler* masm) {
- // Generate enough nop's to make space for a call instruction. Avoid emitting
- // the constant pool in the debug break slot code.
- InstructionAccurateScope scope(masm, Assembler::kDebugBreakSlotInstructions);
-
- __ RecordDebugBreakSlot();
- for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
- __ nop(Assembler::DEBUG_BREAK_NOP);
- }
-}
-
-
-void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
- // In the places where a debug break slot is inserted no registers can contain
- // object pointers.
- Generate_DebugBreakCallHelper(masm, 0, 0, x10);
-}
-
-
-void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnA64);
-}
-
-
-void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnA64);
-}
-
-const bool Debug::kFrameDropperSupported = false;
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/debugger-a64.cc b/deps/v8/src/a64/debugger-a64.cc
deleted file mode 100644
index 5bccc39776..0000000000
--- a/deps/v8/src/a64/debugger-a64.cc
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#if V8_TARGET_ARCH_A64
-
-#if defined(USE_SIMULATOR)
-
-#include "a64/debugger-a64.h"
-
-namespace v8 {
-namespace internal {
-
-
-void Debugger::VisitException(Instruction* instr) {
- switch (instr->Mask(ExceptionMask)) {
- case HLT: {
- if (instr->ImmException() == kImmExceptionIsDebug) {
- // Read the arguments encoded inline in the instruction stream.
- uint32_t code;
- uint32_t parameters;
- char const * message;
-
- ASSERT(sizeof(*pc_) == 1);
- memcpy(&code, pc_ + kDebugCodeOffset, sizeof(code));
- memcpy(&parameters, pc_ + kDebugParamsOffset, sizeof(parameters));
- message = reinterpret_cast<char const *>(pc_ + kDebugMessageOffset);
-
- if (message[0] == '\0') {
- fprintf(stream_, "Debugger hit %" PRIu32 ".\n", code);
- } else {
- fprintf(stream_, "Debugger hit %" PRIu32 ": %s\n", code, message);
- }
-
- // Other options.
- switch (parameters & kDebuggerTracingDirectivesMask) {
- case TRACE_ENABLE:
- set_log_parameters(log_parameters() | parameters);
- break;
- case TRACE_DISABLE:
- set_log_parameters(log_parameters() & ~parameters);
- break;
- case TRACE_OVERRIDE:
- set_log_parameters(parameters);
- break;
- default:
- // We don't support a one-shot LOG_DISASM.
- ASSERT((parameters & LOG_DISASM) == 0);
- // Don't print information that is already being traced.
- parameters &= ~log_parameters();
- // Print the requested information.
- if (parameters & LOG_SYS_REGS) PrintSystemRegisters(true);
- if (parameters & LOG_REGS) PrintRegisters(true);
- if (parameters & LOG_FP_REGS) PrintFPRegisters(true);
- }
-
- // Check if the debugger should break.
- if (parameters & BREAK) OS::DebugBreak();
-
- // The stop parameters are inlined in the code. Skip them:
- // - Skip to the end of the message string.
- pc_ += kDebugMessageOffset + strlen(message) + 1;
- // - Advance to the next aligned location.
- pc_ = AlignUp(pc_, kInstructionSize);
- // - Verify that the unreachable marker is present.
- ASSERT(reinterpret_cast<Instruction*>(pc_)->Mask(ExceptionMask) == HLT);
- ASSERT(reinterpret_cast<Instruction*>(pc_)->ImmException() ==
- kImmExceptionIsUnreachable);
- // - Skip past the unreachable marker.
- pc_ += kInstructionSize;
- pc_modified_ = true;
- } else {
- Simulator::VisitException(instr);
- }
- break;
- }
-
- default:
- UNIMPLEMENTED();
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // USE_SIMULATOR
-
-#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/debugger-a64.h b/deps/v8/src/a64/debugger-a64.h
deleted file mode 100644
index 1317b5f37d..0000000000
--- a/deps/v8/src/a64/debugger-a64.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_A64_DEBUGGER_A64_H_
-#define V8_A64_DEBUGGER_A64_H_
-
-#if defined(USE_SIMULATOR)
-
-#include "globals.h"
-#include "utils.h"
-#include "a64/constants-a64.h"
-#include "a64/simulator-a64.h"
-
-namespace v8 {
-namespace internal {
-
-
-class Debugger : public Simulator {
- public:
- Debugger(Decoder* decoder, FILE* stream = stderr)
- : Simulator(decoder, NULL, stream) {}
-
- // Functions overloading.
- void VisitException(Instruction* instr);
-};
-
-
-} } // namespace v8::internal
-
-#endif // USE_SIMULATOR
-
-#endif // V8_A64_DEBUGGER_A64_H_
diff --git a/deps/v8/src/a64/decoder-a64.cc b/deps/v8/src/a64/decoder-a64.cc
deleted file mode 100644
index e7383d446a..0000000000
--- a/deps/v8/src/a64/decoder-a64.cc
+++ /dev/null
@@ -1,726 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if V8_TARGET_ARCH_A64
-
-#include "globals.h"
-#include "utils.h"
-#include "a64/decoder-a64.h"
-
-
-namespace v8 {
-namespace internal {
-
-// Top-level instruction decode function.
-void Decoder::Decode(Instruction *instr) {
- if (instr->Bits(28, 27) == 0) {
- VisitUnallocated(instr);
- } else {
- switch (instr->Bits(27, 24)) {
- // 0: PC relative addressing.
- case 0x0: DecodePCRelAddressing(instr); break;
-
- // 1: Add/sub immediate.
- case 0x1: DecodeAddSubImmediate(instr); break;
-
- // A: Logical shifted register.
- // Add/sub with carry.
- // Conditional compare register.
- // Conditional compare immediate.
- // Conditional select.
- // Data processing 1 source.
- // Data processing 2 source.
- // B: Add/sub shifted register.
- // Add/sub extended register.
- // Data processing 3 source.
- case 0xA:
- case 0xB: DecodeDataProcessing(instr); break;
-
- // 2: Logical immediate.
- // Move wide immediate.
- case 0x2: DecodeLogical(instr); break;
-
- // 3: Bitfield.
- // Extract.
- case 0x3: DecodeBitfieldExtract(instr); break;
-
- // 4: Unconditional branch immediate.
- // Exception generation.
- // Compare and branch immediate.
- // 5: Compare and branch immediate.
- // Conditional branch.
- // System.
- // 6,7: Unconditional branch.
- // Test and branch immediate.
- case 0x4:
- case 0x5:
- case 0x6:
- case 0x7: DecodeBranchSystemException(instr); break;
-
- // 8,9: Load/store register pair post-index.
- // Load register literal.
- // Load/store register unscaled immediate.
- // Load/store register immediate post-index.
- // Load/store register immediate pre-index.
- // Load/store register offset.
- // C,D: Load/store register pair offset.
- // Load/store register pair pre-index.
- // Load/store register unsigned immediate.
- // Advanced SIMD.
- case 0x8:
- case 0x9:
- case 0xC:
- case 0xD: DecodeLoadStore(instr); break;
-
- // E: FP fixed point conversion.
- // FP integer conversion.
- // FP data processing 1 source.
- // FP compare.
- // FP immediate.
- // FP data processing 2 source.
- // FP conditional compare.
- // FP conditional select.
- // Advanced SIMD.
- // F: FP data processing 3 source.
- // Advanced SIMD.
- case 0xE:
- case 0xF: DecodeFP(instr); break;
- }
- }
-}
-
-
-void Decoder::AppendVisitor(DecoderVisitor* new_visitor) {
- visitors_.remove(new_visitor);
- visitors_.push_front(new_visitor);
-}
-
-
-void Decoder::PrependVisitor(DecoderVisitor* new_visitor) {
- visitors_.remove(new_visitor);
- visitors_.push_back(new_visitor);
-}
-
-
-void Decoder::InsertVisitorBefore(DecoderVisitor* new_visitor,
- DecoderVisitor* registered_visitor) {
- visitors_.remove(new_visitor);
- std::list<DecoderVisitor*>::iterator it;
- for (it = visitors_.begin(); it != visitors_.end(); it++) {
- if (*it == registered_visitor) {
- visitors_.insert(it, new_visitor);
- return;
- }
- }
- // We reached the end of the list. The last element must be
- // registered_visitor.
- ASSERT(*it == registered_visitor);
- visitors_.insert(it, new_visitor);
-}
-
-
-void Decoder::InsertVisitorAfter(DecoderVisitor* new_visitor,
- DecoderVisitor* registered_visitor) {
- visitors_.remove(new_visitor);
- std::list<DecoderVisitor*>::iterator it;
- for (it = visitors_.begin(); it != visitors_.end(); it++) {
- if (*it == registered_visitor) {
- it++;
- visitors_.insert(it, new_visitor);
- return;
- }
- }
- // We reached the end of the list. The last element must be
- // registered_visitor.
- ASSERT(*it == registered_visitor);
- visitors_.push_back(new_visitor);
-}
-
-
-void Decoder::RemoveVisitor(DecoderVisitor* visitor) {
- visitors_.remove(visitor);
-}
-
-
-void Decoder::DecodePCRelAddressing(Instruction* instr) {
- ASSERT(instr->Bits(27, 24) == 0x0);
- // We know bit 28 is set, as <b28:b27> = 0 is filtered out at the top level
- // decode.
- ASSERT(instr->Bit(28) == 0x1);
- VisitPCRelAddressing(instr);
-}
-
-
-void Decoder::DecodeBranchSystemException(Instruction* instr) {
- ASSERT((instr->Bits(27, 24) == 0x4) ||
- (instr->Bits(27, 24) == 0x5) ||
- (instr->Bits(27, 24) == 0x6) ||
- (instr->Bits(27, 24) == 0x7) );
-
- switch (instr->Bits(31, 29)) {
- case 0:
- case 4: {
- VisitUnconditionalBranch(instr);
- break;
- }
- case 1:
- case 5: {
- if (instr->Bit(25) == 0) {
- VisitCompareBranch(instr);
- } else {
- VisitTestBranch(instr);
- }
- break;
- }
- case 2: {
- if (instr->Bit(25) == 0) {
- if ((instr->Bit(24) == 0x1) ||
- (instr->Mask(0x01000010) == 0x00000010)) {
- VisitUnallocated(instr);
- } else {
- VisitConditionalBranch(instr);
- }
- } else {
- VisitUnallocated(instr);
- }
- break;
- }
- case 6: {
- if (instr->Bit(25) == 0) {
- if (instr->Bit(24) == 0) {
- if ((instr->Bits(4, 2) != 0) ||
- (instr->Mask(0x00E0001D) == 0x00200001) ||
- (instr->Mask(0x00E0001D) == 0x00400001) ||
- (instr->Mask(0x00E0001E) == 0x00200002) ||
- (instr->Mask(0x00E0001E) == 0x00400002) ||
- (instr->Mask(0x00E0001C) == 0x00600000) ||
- (instr->Mask(0x00E0001C) == 0x00800000) ||
- (instr->Mask(0x00E0001F) == 0x00A00000) ||
- (instr->Mask(0x00C0001C) == 0x00C00000)) {
- VisitUnallocated(instr);
- } else {
- VisitException(instr);
- }
- } else {
- if (instr->Bits(23, 22) == 0) {
- const Instr masked_003FF0E0 = instr->Mask(0x003FF0E0);
- if ((instr->Bits(21, 19) == 0x4) ||
- (masked_003FF0E0 == 0x00033000) ||
- (masked_003FF0E0 == 0x003FF020) ||
- (masked_003FF0E0 == 0x003FF060) ||
- (masked_003FF0E0 == 0x003FF0E0) ||
- (instr->Mask(0x00388000) == 0x00008000) ||
- (instr->Mask(0x0038E000) == 0x00000000) ||
- (instr->Mask(0x0039E000) == 0x00002000) ||
- (instr->Mask(0x003AE000) == 0x00002000) ||
- (instr->Mask(0x003CE000) == 0x00042000) ||
- (instr->Mask(0x003FFFC0) == 0x000320C0) ||
- (instr->Mask(0x003FF100) == 0x00032100) ||
- (instr->Mask(0x003FF200) == 0x00032200) ||
- (instr->Mask(0x003FF400) == 0x00032400) ||
- (instr->Mask(0x003FF800) == 0x00032800) ||
- (instr->Mask(0x0038F000) == 0x00005000) ||
- (instr->Mask(0x0038E000) == 0x00006000)) {
- VisitUnallocated(instr);
- } else {
- VisitSystem(instr);
- }
- } else {
- VisitUnallocated(instr);
- }
- }
- } else {
- if ((instr->Bit(24) == 0x1) ||
- (instr->Bits(20, 16) != 0x1F) ||
- (instr->Bits(15, 10) != 0) ||
- (instr->Bits(4, 0) != 0) ||
- (instr->Bits(24, 21) == 0x3) ||
- (instr->Bits(24, 22) == 0x3)) {
- VisitUnallocated(instr);
- } else {
- VisitUnconditionalBranchToRegister(instr);
- }
- }
- break;
- }
- case 3:
- case 7: {
- VisitUnallocated(instr);
- break;
- }
- }
-}
-
-
-void Decoder::DecodeLoadStore(Instruction* instr) {
- ASSERT((instr->Bits(27, 24) == 0x8) ||
- (instr->Bits(27, 24) == 0x9) ||
- (instr->Bits(27, 24) == 0xC) ||
- (instr->Bits(27, 24) == 0xD) );
-
- if (instr->Bit(24) == 0) {
- if (instr->Bit(28) == 0) {
- if (instr->Bit(29) == 0) {
- if (instr->Bit(26) == 0) {
- // TODO(all): VisitLoadStoreExclusive.
- VisitUnimplemented(instr);
- } else {
- DecodeAdvSIMDLoadStore(instr);
- }
- } else {
- if ((instr->Bits(31, 30) == 0x3) ||
- (instr->Mask(0xC4400000) == 0x40000000)) {
- VisitUnallocated(instr);
- } else {
- if (instr->Bit(23) == 0) {
- if (instr->Mask(0xC4400000) == 0xC0400000) {
- VisitUnallocated(instr);
- } else {
- VisitLoadStorePairNonTemporal(instr);
- }
- } else {
- VisitLoadStorePairPostIndex(instr);
- }
- }
- }
- } else {
- if (instr->Bit(29) == 0) {
- if (instr->Mask(0xC4000000) == 0xC4000000) {
- VisitUnallocated(instr);
- } else {
- VisitLoadLiteral(instr);
- }
- } else {
- if ((instr->Mask(0x84C00000) == 0x80C00000) ||
- (instr->Mask(0x44800000) == 0x44800000) ||
- (instr->Mask(0x84800000) == 0x84800000)) {
- VisitUnallocated(instr);
- } else {
- if (instr->Bit(21) == 0) {
- switch (instr->Bits(11, 10)) {
- case 0: {
- VisitLoadStoreUnscaledOffset(instr);
- break;
- }
- case 1: {
- if (instr->Mask(0xC4C00000) == 0xC0800000) {
- VisitUnallocated(instr);
- } else {
- VisitLoadStorePostIndex(instr);
- }
- break;
- }
- case 2: {
- // TODO(all): VisitLoadStoreRegisterOffsetUnpriv.
- VisitUnimplemented(instr);
- break;
- }
- case 3: {
- if (instr->Mask(0xC4C00000) == 0xC0800000) {
- VisitUnallocated(instr);
- } else {
- VisitLoadStorePreIndex(instr);
- }
- break;
- }
- }
- } else {
- if (instr->Bits(11, 10) == 0x2) {
- if (instr->Bit(14) == 0) {
- VisitUnallocated(instr);
- } else {
- VisitLoadStoreRegisterOffset(instr);
- }
- } else {
- VisitUnallocated(instr);
- }
- }
- }
- }
- }
- } else {
- if (instr->Bit(28) == 0) {
- if (instr->Bit(29) == 0) {
- VisitUnallocated(instr);
- } else {
- if ((instr->Bits(31, 30) == 0x3) ||
- (instr->Mask(0xC4400000) == 0x40000000)) {
- VisitUnallocated(instr);
- } else {
- if (instr->Bit(23) == 0) {
- VisitLoadStorePairOffset(instr);
- } else {
- VisitLoadStorePairPreIndex(instr);
- }
- }
- }
- } else {
- if (instr->Bit(29) == 0) {
- VisitUnallocated(instr);
- } else {
- if ((instr->Mask(0x84C00000) == 0x80C00000) ||
- (instr->Mask(0x44800000) == 0x44800000) ||
- (instr->Mask(0x84800000) == 0x84800000)) {
- VisitUnallocated(instr);
- } else {
- VisitLoadStoreUnsignedOffset(instr);
- }
- }
- }
- }
-}
-
-
-void Decoder::DecodeLogical(Instruction* instr) {
- ASSERT(instr->Bits(27, 24) == 0x2);
-
- if (instr->Mask(0x80400000) == 0x00400000) {
- VisitUnallocated(instr);
- } else {
- if (instr->Bit(23) == 0) {
- VisitLogicalImmediate(instr);
- } else {
- if (instr->Bits(30, 29) == 0x1) {
- VisitUnallocated(instr);
- } else {
- VisitMoveWideImmediate(instr);
- }
- }
- }
-}
-
-
-void Decoder::DecodeBitfieldExtract(Instruction* instr) {
- ASSERT(instr->Bits(27, 24) == 0x3);
-
- if ((instr->Mask(0x80400000) == 0x80000000) ||
- (instr->Mask(0x80400000) == 0x00400000) ||
- (instr->Mask(0x80008000) == 0x00008000)) {
- VisitUnallocated(instr);
- } else if (instr->Bit(23) == 0) {
- if ((instr->Mask(0x80200000) == 0x00200000) ||
- (instr->Mask(0x60000000) == 0x60000000)) {
- VisitUnallocated(instr);
- } else {
- VisitBitfield(instr);
- }
- } else {
- if ((instr->Mask(0x60200000) == 0x00200000) ||
- (instr->Mask(0x60000000) != 0x00000000)) {
- VisitUnallocated(instr);
- } else {
- VisitExtract(instr);
- }
- }
-}
-
-
-void Decoder::DecodeAddSubImmediate(Instruction* instr) {
- ASSERT(instr->Bits(27, 24) == 0x1);
- if (instr->Bit(23) == 1) {
- VisitUnallocated(instr);
- } else {
- VisitAddSubImmediate(instr);
- }
-}
-
-
-void Decoder::DecodeDataProcessing(Instruction* instr) {
- ASSERT((instr->Bits(27, 24) == 0xA) ||
- (instr->Bits(27, 24) == 0xB) );
-
- if (instr->Bit(24) == 0) {
- if (instr->Bit(28) == 0) {
- if (instr->Mask(0x80008000) == 0x00008000) {
- VisitUnallocated(instr);
- } else {
- VisitLogicalShifted(instr);
- }
- } else {
- switch (instr->Bits(23, 21)) {
- case 0: {
- if (instr->Mask(0x0000FC00) != 0) {
- VisitUnallocated(instr);
- } else {
- VisitAddSubWithCarry(instr);
- }
- break;
- }
- case 2: {
- if ((instr->Bit(29) == 0) ||
- (instr->Mask(0x00000410) != 0)) {
- VisitUnallocated(instr);
- } else {
- if (instr->Bit(11) == 0) {
- VisitConditionalCompareRegister(instr);
- } else {
- VisitConditionalCompareImmediate(instr);
- }
- }
- break;
- }
- case 4: {
- if (instr->Mask(0x20000800) != 0x00000000) {
- VisitUnallocated(instr);
- } else {
- VisitConditionalSelect(instr);
- }
- break;
- }
- case 6: {
- if (instr->Bit(29) == 0x1) {
- VisitUnallocated(instr);
- } else {
- if (instr->Bit(30) == 0) {
- if ((instr->Bit(15) == 0x1) ||
- (instr->Bits(15, 11) == 0) ||
- (instr->Bits(15, 12) == 0x1) ||
- (instr->Bits(15, 12) == 0x3) ||
- (instr->Bits(15, 13) == 0x3) ||
- (instr->Mask(0x8000EC00) == 0x00004C00) ||
- (instr->Mask(0x8000E800) == 0x80004000) ||
- (instr->Mask(0x8000E400) == 0x80004000)) {
- VisitUnallocated(instr);
- } else {
- VisitDataProcessing2Source(instr);
- }
- } else {
- if ((instr->Bit(13) == 1) ||
- (instr->Bits(20, 16) != 0) ||
- (instr->Bits(15, 14) != 0) ||
- (instr->Mask(0xA01FFC00) == 0x00000C00) ||
- (instr->Mask(0x201FF800) == 0x00001800)) {
- VisitUnallocated(instr);
- } else {
- VisitDataProcessing1Source(instr);
- }
- }
- break;
- }
- }
- case 1:
- case 3:
- case 5:
- case 7: VisitUnallocated(instr); break;
- }
- }
- } else {
- if (instr->Bit(28) == 0) {
- if (instr->Bit(21) == 0) {
- if ((instr->Bits(23, 22) == 0x3) ||
- (instr->Mask(0x80008000) == 0x00008000)) {
- VisitUnallocated(instr);
- } else {
- VisitAddSubShifted(instr);
- }
- } else {
- if ((instr->Mask(0x00C00000) != 0x00000000) ||
- (instr->Mask(0x00001400) == 0x00001400) ||
- (instr->Mask(0x00001800) == 0x00001800)) {
- VisitUnallocated(instr);
- } else {
- VisitAddSubExtended(instr);
- }
- }
- } else {
- if ((instr->Bit(30) == 0x1) ||
- (instr->Bits(30, 29) == 0x1) ||
- (instr->Mask(0xE0600000) == 0x00200000) ||
- (instr->Mask(0xE0608000) == 0x00400000) ||
- (instr->Mask(0x60608000) == 0x00408000) ||
- (instr->Mask(0x60E00000) == 0x00E00000) ||
- (instr->Mask(0x60E00000) == 0x00800000) ||
- (instr->Mask(0x60E00000) == 0x00600000)) {
- VisitUnallocated(instr);
- } else {
- VisitDataProcessing3Source(instr);
- }
- }
- }
-}
-
-
-void Decoder::DecodeFP(Instruction* instr) {
- ASSERT((instr->Bits(27, 24) == 0xE) ||
- (instr->Bits(27, 24) == 0xF) );
-
- if (instr->Bit(28) == 0) {
- DecodeAdvSIMDDataProcessing(instr);
- } else {
- if (instr->Bit(29) == 1) {
- VisitUnallocated(instr);
- } else {
- if (instr->Bits(31, 30) == 0x3) {
- VisitUnallocated(instr);
- } else if (instr->Bits(31, 30) == 0x1) {
- DecodeAdvSIMDDataProcessing(instr);
- } else {
- if (instr->Bit(24) == 0) {
- if (instr->Bit(21) == 0) {
- if ((instr->Bit(23) == 1) ||
- (instr->Bit(18) == 1) ||
- (instr->Mask(0x80008000) == 0x00000000) ||
- (instr->Mask(0x000E0000) == 0x00000000) ||
- (instr->Mask(0x000E0000) == 0x000A0000) ||
- (instr->Mask(0x00160000) == 0x00000000) ||
- (instr->Mask(0x00160000) == 0x00120000)) {
- VisitUnallocated(instr);
- } else {
- VisitFPFixedPointConvert(instr);
- }
- } else {
- if (instr->Bits(15, 10) == 32) {
- VisitUnallocated(instr);
- } else if (instr->Bits(15, 10) == 0) {
- if ((instr->Bits(23, 22) == 0x3) ||
- (instr->Mask(0x000E0000) == 0x000A0000) ||
- (instr->Mask(0x000E0000) == 0x000C0000) ||
- (instr->Mask(0x00160000) == 0x00120000) ||
- (instr->Mask(0x00160000) == 0x00140000) ||
- (instr->Mask(0x20C40000) == 0x00800000) ||
- (instr->Mask(0x20C60000) == 0x00840000) ||
- (instr->Mask(0xA0C60000) == 0x80060000) ||
- (instr->Mask(0xA0C60000) == 0x00860000) ||
- (instr->Mask(0xA0C60000) == 0x00460000) ||
- (instr->Mask(0xA0CE0000) == 0x80860000) ||
- (instr->Mask(0xA0CE0000) == 0x804E0000) ||
- (instr->Mask(0xA0CE0000) == 0x000E0000) ||
- (instr->Mask(0xA0D60000) == 0x00160000) ||
- (instr->Mask(0xA0D60000) == 0x80560000) ||
- (instr->Mask(0xA0D60000) == 0x80960000)) {
- VisitUnallocated(instr);
- } else {
- VisitFPIntegerConvert(instr);
- }
- } else if (instr->Bits(14, 10) == 16) {
- const Instr masked_A0DF8000 = instr->Mask(0xA0DF8000);
- if ((instr->Mask(0x80180000) != 0) ||
- (masked_A0DF8000 == 0x00020000) ||
- (masked_A0DF8000 == 0x00030000) ||
- (masked_A0DF8000 == 0x00068000) ||
- (masked_A0DF8000 == 0x00428000) ||
- (masked_A0DF8000 == 0x00430000) ||
- (masked_A0DF8000 == 0x00468000) ||
- (instr->Mask(0xA0D80000) == 0x00800000) ||
- (instr->Mask(0xA0DE0000) == 0x00C00000) ||
- (instr->Mask(0xA0DF0000) == 0x00C30000) ||
- (instr->Mask(0xA0DC0000) == 0x00C40000)) {
- VisitUnallocated(instr);
- } else {
- VisitFPDataProcessing1Source(instr);
- }
- } else if (instr->Bits(13, 10) == 8) {
- if ((instr->Bits(15, 14) != 0) ||
- (instr->Bits(2, 0) != 0) ||
- (instr->Mask(0x80800000) != 0x00000000)) {
- VisitUnallocated(instr);
- } else {
- VisitFPCompare(instr);
- }
- } else if (instr->Bits(12, 10) == 4) {
- if ((instr->Bits(9, 5) != 0) ||
- (instr->Mask(0x80800000) != 0x00000000)) {
- VisitUnallocated(instr);
- } else {
- VisitFPImmediate(instr);
- }
- } else {
- if (instr->Mask(0x80800000) != 0x00000000) {
- VisitUnallocated(instr);
- } else {
- switch (instr->Bits(11, 10)) {
- case 1: {
- VisitFPConditionalCompare(instr);
- break;
- }
- case 2: {
- if ((instr->Bits(15, 14) == 0x3) ||
- (instr->Mask(0x00009000) == 0x00009000) ||
- (instr->Mask(0x0000A000) == 0x0000A000)) {
- VisitUnallocated(instr);
- } else {
- VisitFPDataProcessing2Source(instr);
- }
- break;
- }
- case 3: {
- VisitFPConditionalSelect(instr);
- break;
- }
- default: UNREACHABLE();
- }
- }
- }
- }
- } else {
- // Bit 30 == 1 has been handled earlier.
- ASSERT(instr->Bit(30) == 0);
- if (instr->Mask(0xA0800000) != 0) {
- VisitUnallocated(instr);
- } else {
- VisitFPDataProcessing3Source(instr);
- }
- }
- }
- }
- }
-}
-
-
-void Decoder::DecodeAdvSIMDLoadStore(Instruction* instr) {
- // TODO(all): Implement Advanced SIMD load/store instruction decode.
- ASSERT(instr->Bits(29, 25) == 0x6);
- VisitUnimplemented(instr);
-}
-
-
-void Decoder::DecodeAdvSIMDDataProcessing(Instruction* instr) {
- // TODO(all): Implement Advanced SIMD data processing instruction decode.
- ASSERT(instr->Bits(27, 25) == 0x7);
- VisitUnimplemented(instr);
-}
-
-
-#define DEFINE_VISITOR_CALLERS(A) \
- void Decoder::Visit##A(Instruction *instr) { \
- if (!(instr->Mask(A##FMask) == A##Fixed)) { \
- ASSERT(instr->Mask(A##FMask) == A##Fixed); \
- } \
- std::list<DecoderVisitor*>::iterator it; \
- for (it = visitors_.begin(); it != visitors_.end(); it++) { \
- (*it)->Visit##A(instr); \
- } \
- }
-VISITOR_LIST(DEFINE_VISITOR_CALLERS)
-#undef DEFINE_VISITOR_CALLERS
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/decoder-a64.h b/deps/v8/src/a64/decoder-a64.h
deleted file mode 100644
index 0f53c34e88..0000000000
--- a/deps/v8/src/a64/decoder-a64.h
+++ /dev/null
@@ -1,202 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_A64_DECODER_A64_H_
-#define V8_A64_DECODER_A64_H_
-
-#include <list>
-
-#include "globals.h"
-#include "a64/instructions-a64.h"
-
-namespace v8 {
-namespace internal {
-
-
-// List macro containing all visitors needed by the decoder class.
-
-#define VISITOR_LIST(V) \
- V(PCRelAddressing) \
- V(AddSubImmediate) \
- V(LogicalImmediate) \
- V(MoveWideImmediate) \
- V(Bitfield) \
- V(Extract) \
- V(UnconditionalBranch) \
- V(UnconditionalBranchToRegister) \
- V(CompareBranch) \
- V(TestBranch) \
- V(ConditionalBranch) \
- V(System) \
- V(Exception) \
- V(LoadStorePairPostIndex) \
- V(LoadStorePairOffset) \
- V(LoadStorePairPreIndex) \
- V(LoadStorePairNonTemporal) \
- V(LoadLiteral) \
- V(LoadStoreUnscaledOffset) \
- V(LoadStorePostIndex) \
- V(LoadStorePreIndex) \
- V(LoadStoreRegisterOffset) \
- V(LoadStoreUnsignedOffset) \
- V(LogicalShifted) \
- V(AddSubShifted) \
- V(AddSubExtended) \
- V(AddSubWithCarry) \
- V(ConditionalCompareRegister) \
- V(ConditionalCompareImmediate) \
- V(ConditionalSelect) \
- V(DataProcessing1Source) \
- V(DataProcessing2Source) \
- V(DataProcessing3Source) \
- V(FPCompare) \
- V(FPConditionalCompare) \
- V(FPConditionalSelect) \
- V(FPImmediate) \
- V(FPDataProcessing1Source) \
- V(FPDataProcessing2Source) \
- V(FPDataProcessing3Source) \
- V(FPIntegerConvert) \
- V(FPFixedPointConvert) \
- V(Unallocated) \
- V(Unimplemented)
-
-// The Visitor interface. Disassembler and simulator (and other tools)
-// must provide implementations for all of these functions.
-class DecoderVisitor {
- public:
- #define DECLARE(A) virtual void Visit##A(Instruction* instr) = 0;
- VISITOR_LIST(DECLARE)
- #undef DECLARE
-
- virtual ~DecoderVisitor() {}
-
- private:
- // Visitors are registered in a list.
- std::list<DecoderVisitor*> visitors_;
-
- friend class Decoder;
-};
-
-
-class Decoder: public DecoderVisitor {
- public:
- explicit Decoder() {}
-
- // Top-level instruction decoder function. Decodes an instruction and calls
- // the visitor functions registered with the Decoder class.
- void Decode(Instruction *instr);
-
- // Register a new visitor class with the decoder.
- // Decode() will call the corresponding visitor method from all registered
- // visitor classes when decoding reaches the leaf node of the instruction
- // decode tree.
- // Visitors are called in the order.
- // A visitor can only be registered once.
- // Registering an already registered visitor will update its position.
- //
- // d.AppendVisitor(V1);
- // d.AppendVisitor(V2);
- // d.PrependVisitor(V2); // Move V2 at the start of the list.
- // d.InsertVisitorBefore(V3, V2);
- // d.AppendVisitor(V4);
- // d.AppendVisitor(V4); // No effect.
- //
- // d.Decode(i);
- //
- // will call in order visitor methods in V3, V2, V1, V4.
- void AppendVisitor(DecoderVisitor* visitor);
- void PrependVisitor(DecoderVisitor* visitor);
- void InsertVisitorBefore(DecoderVisitor* new_visitor,
- DecoderVisitor* registered_visitor);
- void InsertVisitorAfter(DecoderVisitor* new_visitor,
- DecoderVisitor* registered_visitor);
-
- // Remove a previously registered visitor class from the list of visitors
- // stored by the decoder.
- void RemoveVisitor(DecoderVisitor* visitor);
-
- #define DECLARE(A) void Visit##A(Instruction* instr);
- VISITOR_LIST(DECLARE)
- #undef DECLARE
-
- private:
- // Decode the PC relative addressing instruction, and call the corresponding
- // visitors.
- // On entry, instruction bits 27:24 = 0x0.
- void DecodePCRelAddressing(Instruction* instr);
-
- // Decode the add/subtract immediate instruction, and call the corresponding
- // visitors.
- // On entry, instruction bits 27:24 = 0x1.
- void DecodeAddSubImmediate(Instruction* instr);
-
- // Decode the branch, system command, and exception generation parts of
- // the instruction tree, and call the corresponding visitors.
- // On entry, instruction bits 27:24 = {0x4, 0x5, 0x6, 0x7}.
- void DecodeBranchSystemException(Instruction* instr);
-
- // Decode the load and store parts of the instruction tree, and call
- // the corresponding visitors.
- // On entry, instruction bits 27:24 = {0x8, 0x9, 0xC, 0xD}.
- void DecodeLoadStore(Instruction* instr);
-
- // Decode the logical immediate and move wide immediate parts of the
- // instruction tree, and call the corresponding visitors.
- // On entry, instruction bits 27:24 = 0x2.
- void DecodeLogical(Instruction* instr);
-
- // Decode the bitfield and extraction parts of the instruction tree,
- // and call the corresponding visitors.
- // On entry, instruction bits 27:24 = 0x3.
- void DecodeBitfieldExtract(Instruction* instr);
-
- // Decode the data processing parts of the instruction tree, and call the
- // corresponding visitors.
- // On entry, instruction bits 27:24 = {0x1, 0xA, 0xB}.
- void DecodeDataProcessing(Instruction* instr);
-
- // Decode the floating point parts of the instruction tree, and call the
- // corresponding visitors.
- // On entry, instruction bits 27:24 = {0xE, 0xF}.
- void DecodeFP(Instruction* instr);
-
- // Decode the Advanced SIMD (NEON) load/store part of the instruction tree,
- // and call the corresponding visitors.
- // On entry, instruction bits 29:25 = 0x6.
- void DecodeAdvSIMDLoadStore(Instruction* instr);
-
- // Decode the Advanced SIMD (NEON) data processing part of the instruction
- // tree, and call the corresponding visitors.
- // On entry, instruction bits 27:25 = 0x7.
- void DecodeAdvSIMDDataProcessing(Instruction* instr);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_A64_DECODER_A64_H_
diff --git a/deps/v8/src/a64/deoptimizer-a64.cc b/deps/v8/src/a64/deoptimizer-a64.cc
deleted file mode 100644
index 660feb2394..0000000000
--- a/deps/v8/src/a64/deoptimizer-a64.cc
+++ /dev/null
@@ -1,376 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "safepoint-table.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-int Deoptimizer::patch_size() {
- // Size of the code used to patch lazy bailout points.
- // Patching is done by Deoptimizer::DeoptimizeFunction.
- return 4 * kInstructionSize;
-}
-
-
-
-void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
- // Invalidate the relocation information, as it will become invalid by the
- // code patching below, and is not needed any more.
- code->InvalidateRelocation();
-
- // For each LLazyBailout instruction insert a call to the corresponding
- // deoptimization entry.
- DeoptimizationInputData* deopt_data =
- DeoptimizationInputData::cast(code->deoptimization_data());
- Address code_start_address = code->instruction_start();
-#ifdef DEBUG
- Address prev_call_address = NULL;
-#endif
-
- for (int i = 0; i < deopt_data->DeoptCount(); i++) {
- if (deopt_data->Pc(i)->value() == -1) continue;
-
- Address call_address = code_start_address + deopt_data->Pc(i)->value();
- Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
-
- PatchingAssembler patcher(call_address, patch_size() / kInstructionSize);
- patcher.LoadLiteral(ip0, 2 * kInstructionSize);
- patcher.blr(ip0);
- patcher.dc64(reinterpret_cast<intptr_t>(deopt_entry));
-
- ASSERT((prev_call_address == NULL) ||
- (call_address >= prev_call_address + patch_size()));
- ASSERT(call_address + patch_size() <= code->instruction_end());
-#ifdef DEBUG
- prev_call_address = call_address;
-#endif
- }
-}
-
-
-void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
- // Set the register values. The values are not important as there are no
- // callee saved registers in JavaScript frames, so all registers are
- // spilled. Registers fp and sp are set to the correct values though.
- for (int i = 0; i < Register::NumRegisters(); i++) {
- input_->SetRegister(i, 0);
- }
-
- // TODO(all): Do we also need to set a value to csp?
- input_->SetRegister(jssp.code(), reinterpret_cast<intptr_t>(frame->sp()));
- input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
-
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
- input_->SetDoubleRegister(i, 0.0);
- }
-
- // Fill the frame content from the actual data on the frame.
- for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
- input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
- }
-}
-
-
-bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
- // There is no dynamic alignment padding on A64 in the input frame.
- return false;
-}
-
-
-void Deoptimizer::SetPlatformCompiledStubRegisters(
- FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
- ApiFunction function(descriptor->deoptimization_handler_);
- ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
- intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
- int params = descriptor->GetHandlerParameterCount();
- output_frame->SetRegister(x0.code(), params);
- output_frame->SetRegister(x1.code(), handler);
-}
-
-
-void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
- for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
- double double_value = input_->GetDoubleRegister(i);
- output_frame->SetDoubleRegister(i, double_value);
- }
-}
-
-
-Code* Deoptimizer::NotifyStubFailureBuiltin() {
- return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
-}
-
-
-#define __ masm()->
-
-void Deoptimizer::EntryGenerator::Generate() {
- GeneratePrologue();
-
- // TODO(all): This code needs to be revisited. We probably only need to save
- // caller-saved registers here. Callee-saved registers can be stored directly
- // in the input frame.
-
- // Save all allocatable floating point registers.
- CPURegList saved_fp_registers(CPURegister::kFPRegister, kDRegSize,
- 0, FPRegister::NumAllocatableRegisters() - 1);
- __ PushCPURegList(saved_fp_registers);
-
- // We save all the registers expcept jssp, sp and lr.
- CPURegList saved_registers(CPURegister::kRegister, kXRegSize, 0, 27);
- saved_registers.Combine(fp);
- __ PushCPURegList(saved_registers);
-
- const int kSavedRegistersAreaSize =
- (saved_registers.Count() * kXRegSizeInBytes) +
- (saved_fp_registers.Count() * kDRegSizeInBytes);
-
- // Floating point registers are saved on the stack above core registers.
- const int kFPRegistersOffset = saved_registers.Count() * kXRegSizeInBytes;
-
- // Get the bailout id from the stack.
- Register bailout_id = x2;
- __ Peek(bailout_id, kSavedRegistersAreaSize);
-
- Register code_object = x3;
- Register fp_to_sp = x4;
- // Get the address of the location in the code object. This is the return
- // address for lazy deoptimization.
- __ Mov(code_object, lr);
- // Compute the fp-to-sp delta, and correct one word for bailout id.
- __ Add(fp_to_sp, masm()->StackPointer(),
- kSavedRegistersAreaSize + (1 * kPointerSize));
- __ Sub(fp_to_sp, fp, fp_to_sp);
-
- // Allocate a new deoptimizer object.
- __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Mov(x1, type());
- // Following arguments are already loaded:
- // - x2: bailout id
- // - x3: code object address
- // - x4: fp-to-sp delta
- __ Mov(x5, Operand(ExternalReference::isolate_address(isolate())));
-
- {
- // Call Deoptimizer::New().
- AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
- }
-
- // Preserve "deoptimizer" object in register x0.
- Register deoptimizer = x0;
-
- // Get the input frame descriptor pointer.
- __ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));
-
- // Copy core registers into the input frame.
- CPURegList copy_to_input = saved_registers;
- for (int i = 0; i < saved_registers.Count(); i++) {
- // TODO(all): Look for opportunities to optimize this by using ldp/stp.
- __ Peek(x2, i * kPointerSize);
- CPURegister current_reg = copy_to_input.PopLowestIndex();
- int offset = (current_reg.code() * kPointerSize) +
- FrameDescription::registers_offset();
- __ Str(x2, MemOperand(x1, offset));
- }
-
- // Copy FP registers to the input frame.
- for (int i = 0; i < saved_fp_registers.Count(); i++) {
- // TODO(all): Look for opportunities to optimize this by using ldp/stp.
- int dst_offset = FrameDescription::double_registers_offset() +
- (i * kDoubleSize);
- int src_offset = kFPRegistersOffset + (i * kDoubleSize);
- __ Peek(x2, src_offset);
- __ Str(x2, MemOperand(x1, dst_offset));
- }
-
- // Remove the bailout id and the saved registers from the stack.
- __ Drop(1 + (kSavedRegistersAreaSize / kXRegSizeInBytes));
-
- // Compute a pointer to the unwinding limit in register x2; that is
- // the first stack slot not part of the input frame.
- Register unwind_limit = x2;
- __ Ldr(unwind_limit, MemOperand(x1, FrameDescription::frame_size_offset()));
- __ Add(unwind_limit, unwind_limit, __ StackPointer());
-
- // Unwind the stack down to - but not including - the unwinding
- // limit and copy the contents of the activation frame to the input
- // frame description.
- __ Add(x3, x1, FrameDescription::frame_content_offset());
- Label pop_loop;
- Label pop_loop_header;
- __ B(&pop_loop_header);
- __ Bind(&pop_loop);
- __ Pop(x4);
- __ Str(x4, MemOperand(x3, kPointerSize, PostIndex));
- __ Bind(&pop_loop_header);
- __ Cmp(unwind_limit, __ StackPointer());
- __ B(ne, &pop_loop);
-
- // Compute the output frame in the deoptimizer.
- __ Push(x0); // Preserve deoptimizer object across call.
-
- {
- // Call Deoptimizer::ComputeOutputFrames().
- AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(
- ExternalReference::compute_output_frames_function(isolate()), 1);
- }
- __ Pop(x4); // Restore deoptimizer object (class Deoptimizer).
-
- // Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop,
- outer_loop_header, inner_loop_header;
- __ Ldrsw(x1, MemOperand(x4, Deoptimizer::output_count_offset()));
- __ Ldr(x0, MemOperand(x4, Deoptimizer::output_offset()));
- __ Add(x1, x0, Operand(x1, LSL, kPointerSizeLog2));
- __ B(&outer_loop_header);
-
- __ Bind(&outer_push_loop);
- Register current_frame = x2;
- __ Ldr(current_frame, MemOperand(x0, 0));
- __ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset()));
- __ B(&inner_loop_header);
-
- __ Bind(&inner_push_loop);
- __ Sub(x3, x3, kPointerSize);
- __ Add(x6, current_frame, x3);
- __ Ldr(x7, MemOperand(x6, FrameDescription::frame_content_offset()));
- __ Push(x7);
- __ Bind(&inner_loop_header);
- __ Cbnz(x3, &inner_push_loop);
-
- __ Add(x0, x0, kPointerSize);
- __ Bind(&outer_loop_header);
- __ Cmp(x0, x1);
- __ B(lt, &outer_push_loop);
-
- __ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
- ASSERT(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
- !saved_fp_registers.IncludesAliasOf(fp_zero) &&
- !saved_fp_registers.IncludesAliasOf(fp_scratch));
- int src_offset = FrameDescription::double_registers_offset();
- while (!saved_fp_registers.IsEmpty()) {
- const CPURegister reg = saved_fp_registers.PopLowestIndex();
- __ Ldr(reg, MemOperand(x1, src_offset));
- src_offset += kDoubleSize;
- }
-
- // Push state from the last output frame.
- __ Ldr(x6, MemOperand(current_frame, FrameDescription::state_offset()));
- __ Push(x6);
-
- // TODO(all): ARM copies a lot (if not all) of the last output frame onto the
- // stack, then pops it all into registers. Here, we try to load it directly
- // into the relevant registers. Is this correct? If so, we should improve the
- // ARM code.
-
- // TODO(all): This code needs to be revisited, We probably don't need to
- // restore all the registers as fullcodegen does not keep live values in
- // registers (note that at least fp must be restored though).
-
- // Restore registers from the last output frame.
- // Note that lr is not in the list of saved_registers and will be restored
- // later. We can use it to hold the address of last output frame while
- // reloading the other registers.
- ASSERT(!saved_registers.IncludesAliasOf(lr));
- Register last_output_frame = lr;
- __ Mov(last_output_frame, current_frame);
-
- // We don't need to restore x7 as it will be clobbered later to hold the
- // continuation address.
- Register continuation = x7;
- saved_registers.Remove(continuation);
-
- while (!saved_registers.IsEmpty()) {
- // TODO(all): Look for opportunities to optimize this by using ldp.
- CPURegister current_reg = saved_registers.PopLowestIndex();
- int offset = (current_reg.code() * kPointerSize) +
- FrameDescription::registers_offset();
- __ Ldr(current_reg, MemOperand(last_output_frame, offset));
- }
-
- __ Ldr(continuation, MemOperand(last_output_frame,
- FrameDescription::continuation_offset()));
- __ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset()));
- __ InitializeRootRegister();
- __ Br(continuation);
-}
-
-
-// Size of an entry of the second level deopt table.
-// This is the code size generated by GeneratePrologue for one entry.
-const int Deoptimizer::table_entry_size_ = 2 * kInstructionSize;
-
-
-void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- // Create a sequence of deoptimization entries.
- // Note that registers are still live when jumping to an entry.
- Label done;
- {
- InstructionAccurateScope scope(masm());
-
- // The number of entry will never exceed kMaxNumberOfEntries.
- // As long as kMaxNumberOfEntries is a valid 16 bits immediate you can use
- // a movz instruction to load the entry id.
- ASSERT(is_uint16(Deoptimizer::kMaxNumberOfEntries));
-
- for (int i = 0; i < count(); i++) {
- int start = masm()->pc_offset();
- USE(start);
- __ movz(masm()->Tmp0(), i);
- __ b(&done);
- ASSERT(masm()->pc_offset() - start == table_entry_size_);
- }
- }
- __ Bind(&done);
- // TODO(all): We need to add some kind of assertion to verify that Tmp0()
- // is not clobbered by Push.
- __ Push(masm()->Tmp0());
-}
-
-
-void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
- SetFrameSlot(offset, value);
-}
-
-
-void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
- SetFrameSlot(offset, value);
-}
-
-
-#undef __
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/a64/disasm-a64.cc b/deps/v8/src/a64/disasm-a64.cc
deleted file mode 100644
index 5ef75d55e2..0000000000
--- a/deps/v8/src/a64/disasm-a64.cc
+++ /dev/null
@@ -1,1854 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <assert.h>
-#include <stdio.h>
-#include <stdarg.h>
-#include <string.h>
-
-#include "v8.h"
-
-#if V8_TARGET_ARCH_A64
-
-#include "disasm.h"
-#include "a64/disasm-a64.h"
-#include "macro-assembler.h"
-#include "platform.h"
-
-namespace v8 {
-namespace internal {
-
-
-Disassembler::Disassembler() {
- buffer_size_ = 256;
- buffer_ = reinterpret_cast<char*>(malloc(buffer_size_));
- buffer_pos_ = 0;
- own_buffer_ = true;
-}
-
-
-Disassembler::Disassembler(char* text_buffer, int buffer_size) {
- buffer_size_ = buffer_size;
- buffer_ = text_buffer;
- buffer_pos_ = 0;
- own_buffer_ = false;
-}
-
-
-Disassembler::~Disassembler() {
- if (own_buffer_) {
- free(buffer_);
- }
-}
-
-
-char* Disassembler::GetOutput() {
- return buffer_;
-}
-
-
-void Disassembler::VisitAddSubImmediate(Instruction* instr) {
- bool rd_is_zr = RdIsZROrSP(instr);
- bool stack_op = (rd_is_zr || RnIsZROrSP(instr)) &&
- (instr->ImmAddSub() == 0) ? true : false;
- const char *mnemonic = "";
- const char *form = "'Rds, 'Rns, 'IAddSub";
- const char *form_cmp = "'Rns, 'IAddSub";
- const char *form_mov = "'Rds, 'Rns";
-
- switch (instr->Mask(AddSubImmediateMask)) {
- case ADD_w_imm:
- case ADD_x_imm: {
- mnemonic = "add";
- if (stack_op) {
- mnemonic = "mov";
- form = form_mov;
- }
- break;
- }
- case ADDS_w_imm:
- case ADDS_x_imm: {
- mnemonic = "adds";
- if (rd_is_zr) {
- mnemonic = "cmn";
- form = form_cmp;
- }
- break;
- }
- case SUB_w_imm:
- case SUB_x_imm: mnemonic = "sub"; break;
- case SUBS_w_imm:
- case SUBS_x_imm: {
- mnemonic = "subs";
- if (rd_is_zr) {
- mnemonic = "cmp";
- form = form_cmp;
- }
- break;
- }
- default: UNREACHABLE();
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitAddSubShifted(Instruction* instr) {
- bool rd_is_zr = RdIsZROrSP(instr);
- bool rn_is_zr = RnIsZROrSP(instr);
- const char *mnemonic = "";
- const char *form = "'Rd, 'Rn, 'Rm'HDP";
- const char *form_cmp = "'Rn, 'Rm'HDP";
- const char *form_neg = "'Rd, 'Rm'HDP";
-
- switch (instr->Mask(AddSubShiftedMask)) {
- case ADD_w_shift:
- case ADD_x_shift: mnemonic = "add"; break;
- case ADDS_w_shift:
- case ADDS_x_shift: {
- mnemonic = "adds";
- if (rd_is_zr) {
- mnemonic = "cmn";
- form = form_cmp;
- }
- break;
- }
- case SUB_w_shift:
- case SUB_x_shift: {
- mnemonic = "sub";
- if (rn_is_zr) {
- mnemonic = "neg";
- form = form_neg;
- }
- break;
- }
- case SUBS_w_shift:
- case SUBS_x_shift: {
- mnemonic = "subs";
- if (rd_is_zr) {
- mnemonic = "cmp";
- form = form_cmp;
- } else if (rn_is_zr) {
- mnemonic = "negs";
- form = form_neg;
- }
- break;
- }
- default: UNREACHABLE();
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitAddSubExtended(Instruction* instr) {
- bool rd_is_zr = RdIsZROrSP(instr);
- const char *mnemonic = "";
- Extend mode = static_cast<Extend>(instr->ExtendMode());
- const char *form = ((mode == UXTX) || (mode == SXTX)) ?
- "'Rds, 'Rns, 'Xm'Ext" : "'Rds, 'Rns, 'Wm'Ext";
- const char *form_cmp = ((mode == UXTX) || (mode == SXTX)) ?
- "'Rns, 'Xm'Ext" : "'Rns, 'Wm'Ext";
-
- switch (instr->Mask(AddSubExtendedMask)) {
- case ADD_w_ext:
- case ADD_x_ext: mnemonic = "add"; break;
- case ADDS_w_ext:
- case ADDS_x_ext: {
- mnemonic = "adds";
- if (rd_is_zr) {
- mnemonic = "cmn";
- form = form_cmp;
- }
- break;
- }
- case SUB_w_ext:
- case SUB_x_ext: mnemonic = "sub"; break;
- case SUBS_w_ext:
- case SUBS_x_ext: {
- mnemonic = "subs";
- if (rd_is_zr) {
- mnemonic = "cmp";
- form = form_cmp;
- }
- break;
- }
- default: UNREACHABLE();
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitAddSubWithCarry(Instruction* instr) {
- bool rn_is_zr = RnIsZROrSP(instr);
- const char *mnemonic = "";
- const char *form = "'Rd, 'Rn, 'Rm";
- const char *form_neg = "'Rd, 'Rm";
-
- switch (instr->Mask(AddSubWithCarryMask)) {
- case ADC_w:
- case ADC_x: mnemonic = "adc"; break;
- case ADCS_w:
- case ADCS_x: mnemonic = "adcs"; break;
- case SBC_w:
- case SBC_x: {
- mnemonic = "sbc";
- if (rn_is_zr) {
- mnemonic = "ngc";
- form = form_neg;
- }
- break;
- }
- case SBCS_w:
- case SBCS_x: {
- mnemonic = "sbcs";
- if (rn_is_zr) {
- mnemonic = "ngcs";
- form = form_neg;
- }
- break;
- }
- default: UNREACHABLE();
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitLogicalImmediate(Instruction* instr) {
- bool rd_is_zr = RdIsZROrSP(instr);
- bool rn_is_zr = RnIsZROrSP(instr);
- const char *mnemonic = "";
- const char *form = "'Rds, 'Rn, 'ITri";
-
- if (instr->ImmLogical() == 0) {
- // The immediate encoded in the instruction is not in the expected format.
- Format(instr, "unallocated", "(LogicalImmediate)");
- return;
- }
-
- switch (instr->Mask(LogicalImmediateMask)) {
- case AND_w_imm:
- case AND_x_imm: mnemonic = "and"; break;
- case ORR_w_imm:
- case ORR_x_imm: {
- mnemonic = "orr";
- unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize
- : kWRegSize;
- if (rn_is_zr && !IsMovzMovnImm(reg_size, instr->ImmLogical())) {
- mnemonic = "mov";
- form = "'Rds, 'ITri";
- }
- break;
- }
- case EOR_w_imm:
- case EOR_x_imm: mnemonic = "eor"; break;
- case ANDS_w_imm:
- case ANDS_x_imm: {
- mnemonic = "ands";
- if (rd_is_zr) {
- mnemonic = "tst";
- form = "'Rn, 'ITri";
- }
- break;
- }
- default: UNREACHABLE();
- }
- Format(instr, mnemonic, form);
-}
-
-
-bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
- ASSERT((reg_size == kXRegSize) ||
- ((reg_size == kWRegSize) && (value <= 0xffffffff)));
-
- // Test for movz: 16-bits set at positions 0, 16, 32 or 48.
- if (((value & 0xffffffffffff0000UL) == 0UL) ||
- ((value & 0xffffffff0000ffffUL) == 0UL) ||
- ((value & 0xffff0000ffffffffUL) == 0UL) ||
- ((value & 0x0000ffffffffffffUL) == 0UL)) {
- return true;
- }
-
- // Test for movn: NOT(16-bits set at positions 0, 16, 32 or 48).
- if ((reg_size == kXRegSize) &&
- (((value & 0xffffffffffff0000UL) == 0xffffffffffff0000UL) ||
- ((value & 0xffffffff0000ffffUL) == 0xffffffff0000ffffUL) ||
- ((value & 0xffff0000ffffffffUL) == 0xffff0000ffffffffUL) ||
- ((value & 0x0000ffffffffffffUL) == 0x0000ffffffffffffUL))) {
- return true;
- }
- if ((reg_size == kWRegSize) &&
- (((value & 0xffff0000) == 0xffff0000) ||
- ((value & 0x0000ffff) == 0x0000ffff))) {
- return true;
- }
- return false;
-}
-
-
-void Disassembler::VisitLogicalShifted(Instruction* instr) {
- bool rd_is_zr = RdIsZROrSP(instr);
- bool rn_is_zr = RnIsZROrSP(instr);
- const char *mnemonic = "";
- const char *form = "'Rd, 'Rn, 'Rm'HLo";
-
- switch (instr->Mask(LogicalShiftedMask)) {
- case AND_w:
- case AND_x: mnemonic = "and"; break;
- case BIC_w:
- case BIC_x: mnemonic = "bic"; break;
- case EOR_w:
- case EOR_x: mnemonic = "eor"; break;
- case EON_w:
- case EON_x: mnemonic = "eon"; break;
- case BICS_w:
- case BICS_x: mnemonic = "bics"; break;
- case ANDS_w:
- case ANDS_x: {
- mnemonic = "ands";
- if (rd_is_zr) {
- mnemonic = "tst";
- form = "'Rn, 'Rm'HLo";
- }
- break;
- }
- case ORR_w:
- case ORR_x: {
- mnemonic = "orr";
- if (rn_is_zr && (instr->ImmDPShift() == 0) && (instr->ShiftDP() == LSL)) {
- mnemonic = "mov";
- form = "'Rd, 'Rm";
- }
- break;
- }
- case ORN_w:
- case ORN_x: {
- mnemonic = "orn";
- if (rn_is_zr) {
- mnemonic = "mvn";
- form = "'Rd, 'Rm'HLo";
- }
- break;
- }
- default: UNREACHABLE();
- }
-
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitConditionalCompareRegister(Instruction* instr) {
- const char *mnemonic = "";
- const char *form = "'Rn, 'Rm, 'INzcv, 'Cond";
-
- switch (instr->Mask(ConditionalCompareRegisterMask)) {
- case CCMN_w:
- case CCMN_x: mnemonic = "ccmn"; break;
- case CCMP_w:
- case CCMP_x: mnemonic = "ccmp"; break;
- default: UNREACHABLE();
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitConditionalCompareImmediate(Instruction* instr) {
- const char *mnemonic = "";
- const char *form = "'Rn, 'IP, 'INzcv, 'Cond";
-
- switch (instr->Mask(ConditionalCompareImmediateMask)) {
- case CCMN_w_imm:
- case CCMN_x_imm: mnemonic = "ccmn"; break;
- case CCMP_w_imm:
- case CCMP_x_imm: mnemonic = "ccmp"; break;
- default: UNREACHABLE();
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitConditionalSelect(Instruction* instr) {
- bool rnm_is_zr = (RnIsZROrSP(instr) && RmIsZROrSP(instr));
- bool rn_is_rm = (instr->Rn() == instr->Rm());
- const char *mnemonic = "";
- const char *form = "'Rd, 'Rn, 'Rm, 'Cond";
- const char *form_test = "'Rd, 'CInv";
- const char *form_update = "'Rd, 'Rn, 'CInv";
-
- Condition cond = static_cast<Condition>(instr->Condition());
- bool invertible_cond = (cond != al) && (cond != nv);
-
- switch (instr->Mask(ConditionalSelectMask)) {
- case CSEL_w:
- case CSEL_x: mnemonic = "csel"; break;
- case CSINC_w:
- case CSINC_x: {
- mnemonic = "csinc";
- if (rnm_is_zr && invertible_cond) {
- mnemonic = "cset";
- form = form_test;
- } else if (rn_is_rm && invertible_cond) {
- mnemonic = "cinc";
- form = form_update;
- }
- break;
- }
- case CSINV_w:
- case CSINV_x: {
- mnemonic = "csinv";
- if (rnm_is_zr && invertible_cond) {
- mnemonic = "csetm";
- form = form_test;
- } else if (rn_is_rm && invertible_cond) {
- mnemonic = "cinv";
- form = form_update;
- }
- break;
- }
- case CSNEG_w:
- case CSNEG_x: {
- mnemonic = "csneg";
- if (rn_is_rm && invertible_cond) {
- mnemonic = "cneg";
- form = form_update;
- }
- break;
- }
- default: UNREACHABLE();
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitBitfield(Instruction* instr) {
- unsigned s = instr->ImmS();
- unsigned r = instr->ImmR();
- unsigned rd_size_minus_1 =
- ((instr->SixtyFourBits() == 1) ? kXRegSize : kWRegSize) - 1;
- const char *mnemonic = "";
- const char *form = "";
- const char *form_shift_right = "'Rd, 'Rn, 'IBr";
- const char *form_extend = "'Rd, 'Wn";
- const char *form_bfiz = "'Rd, 'Rn, 'IBZ-r, 'IBs+1";
- const char *form_bfx = "'Rd, 'Rn, 'IBr, 'IBs-r+1";
- const char *form_lsl = "'Rd, 'Rn, 'IBZ-r";
-
- switch (instr->Mask(BitfieldMask)) {
- case SBFM_w:
- case SBFM_x: {
- mnemonic = "sbfx";
- form = form_bfx;
- if (r == 0) {
- form = form_extend;
- if (s == 7) {
- mnemonic = "sxtb";
- } else if (s == 15) {
- mnemonic = "sxth";
- } else if ((s == 31) && (instr->SixtyFourBits() == 1)) {
- mnemonic = "sxtw";
- } else {
- form = form_bfx;
- }
- } else if (s == rd_size_minus_1) {
- mnemonic = "asr";
- form = form_shift_right;
- } else if (s < r) {
- mnemonic = "sbfiz";
- form = form_bfiz;
- }
- break;
- }
- case UBFM_w:
- case UBFM_x: {
- mnemonic = "ubfx";
- form = form_bfx;
- if (r == 0) {
- form = form_extend;
- if (s == 7) {
- mnemonic = "uxtb";
- } else if (s == 15) {
- mnemonic = "uxth";
- } else {
- form = form_bfx;
- }
- }
- if (s == rd_size_minus_1) {
- mnemonic = "lsr";
- form = form_shift_right;
- } else if (r == s + 1) {
- mnemonic = "lsl";
- form = form_lsl;
- } else if (s < r) {
- mnemonic = "ubfiz";
- form = form_bfiz;
- }
- break;
- }
- case BFM_w:
- case BFM_x: {
- mnemonic = "bfxil";
- form = form_bfx;
- if (s < r) {
- mnemonic = "bfi";
- form = form_bfiz;
- }
- }
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitExtract(Instruction* instr) {
- const char *mnemonic = "";
- const char *form = "'Rd, 'Rn, 'Rm, 'IExtract";
-
- switch (instr->Mask(ExtractMask)) {
- case EXTR_w:
- case EXTR_x: {
- if (instr->Rn() == instr->Rm()) {
- mnemonic = "ror";
- form = "'Rd, 'Rn, 'IExtract";
- } else {
- mnemonic = "extr";
- }
- break;
- }
- default: UNREACHABLE();
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitPCRelAddressing(Instruction* instr) {
- switch (instr->Mask(PCRelAddressingMask)) {
- case ADR: Format(instr, "adr", "'Xd, 'AddrPCRelByte"); break;
- // ADRP is not implemented.
- default: Format(instr, "unimplemented", "(PCRelAddressing)");
- }
-}
-
-
-void Disassembler::VisitConditionalBranch(Instruction* instr) {
- switch (instr->Mask(ConditionalBranchMask)) {
- case B_cond: Format(instr, "b.'CBrn", "'BImmCond"); break;
- default: UNREACHABLE();
- }
-}
-
-
-void Disassembler::VisitUnconditionalBranchToRegister(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "'Xn";
-
- switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
- case BR: mnemonic = "br"; break;
- case BLR: mnemonic = "blr"; break;
- case RET: {
- mnemonic = "ret";
- if (instr->Rn() == kLinkRegCode) {
- form = NULL;
- }
- break;
- }
- default: form = "(UnconditionalBranchToRegister)";
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitUnconditionalBranch(Instruction* instr) {
- const char *mnemonic = "";
- const char *form = "'BImmUncn";
-
- switch (instr->Mask(UnconditionalBranchMask)) {
- case B: mnemonic = "b"; break;
- case BL: mnemonic = "bl"; break;
- default: UNREACHABLE();
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitDataProcessing1Source(Instruction* instr) {
- const char *mnemonic = "";
- const char *form = "'Rd, 'Rn";
-
- switch (instr->Mask(DataProcessing1SourceMask)) {
- #define FORMAT(A, B) \
- case A##_w: \
- case A##_x: mnemonic = B; break;
- FORMAT(RBIT, "rbit");
- FORMAT(REV16, "rev16");
- FORMAT(REV, "rev");
- FORMAT(CLZ, "clz");
- FORMAT(CLS, "cls");
- #undef FORMAT
- case REV32_x: mnemonic = "rev32"; break;
- default: UNREACHABLE();
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitDataProcessing2Source(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "'Rd, 'Rn, 'Rm";
-
- switch (instr->Mask(DataProcessing2SourceMask)) {
- #define FORMAT(A, B) \
- case A##_w: \
- case A##_x: mnemonic = B; break;
- FORMAT(UDIV, "udiv");
- FORMAT(SDIV, "sdiv");
- FORMAT(LSLV, "lsl");
- FORMAT(LSRV, "lsr");
- FORMAT(ASRV, "asr");
- FORMAT(RORV, "ror");
- #undef FORMAT
- default: form = "(DataProcessing2Source)";
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitDataProcessing3Source(Instruction* instr) {
- bool ra_is_zr = RaIsZROrSP(instr);
- const char *mnemonic = "";
- const char *form = "'Xd, 'Wn, 'Wm, 'Xa";
- const char *form_rrr = "'Rd, 'Rn, 'Rm";
- const char *form_rrrr = "'Rd, 'Rn, 'Rm, 'Ra";
- const char *form_xww = "'Xd, 'Wn, 'Wm";
- const char *form_xxx = "'Xd, 'Xn, 'Xm";
-
- switch (instr->Mask(DataProcessing3SourceMask)) {
- case MADD_w:
- case MADD_x: {
- mnemonic = "madd";
- form = form_rrrr;
- if (ra_is_zr) {
- mnemonic = "mul";
- form = form_rrr;
- }
- break;
- }
- case MSUB_w:
- case MSUB_x: {
- mnemonic = "msub";
- form = form_rrrr;
- if (ra_is_zr) {
- mnemonic = "mneg";
- form = form_rrr;
- }
- break;
- }
- case SMADDL_x: {
- mnemonic = "smaddl";
- if (ra_is_zr) {
- mnemonic = "smull";
- form = form_xww;
- }
- break;
- }
- case SMSUBL_x: {
- mnemonic = "smsubl";
- if (ra_is_zr) {
- mnemonic = "smnegl";
- form = form_xww;
- }
- break;
- }
- case UMADDL_x: {
- mnemonic = "umaddl";
- if (ra_is_zr) {
- mnemonic = "umull";
- form = form_xww;
- }
- break;
- }
- case UMSUBL_x: {
- mnemonic = "umsubl";
- if (ra_is_zr) {
- mnemonic = "umnegl";
- form = form_xww;
- }
- break;
- }
- case SMULH_x: {
- mnemonic = "smulh";
- form = form_xxx;
- break;
- }
- case UMULH_x: {
- mnemonic = "umulh";
- form = form_xxx;
- break;
- }
- default: UNREACHABLE();
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitCompareBranch(Instruction* instr) {
- const char *mnemonic = "";
- const char *form = "'Rt, 'BImmCmpa";
-
- switch (instr->Mask(CompareBranchMask)) {
- case CBZ_w:
- case CBZ_x: mnemonic = "cbz"; break;
- case CBNZ_w:
- case CBNZ_x: mnemonic = "cbnz"; break;
- default: UNREACHABLE();
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitTestBranch(Instruction* instr) {
- const char *mnemonic = "";
- // If the top bit of the immediate is clear, the tested register is
- // disassembled as Wt, otherwise Xt. As the top bit of the immediate is
- // encoded in bit 31 of the instruction, we can reuse the Rt form, which
- // uses bit 31 (normally "sf") to choose the register size.
- const char *form = "'Rt, 'IS, 'BImmTest";
-
- switch (instr->Mask(TestBranchMask)) {
- case TBZ: mnemonic = "tbz"; break;
- case TBNZ: mnemonic = "tbnz"; break;
- default: UNREACHABLE();
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitMoveWideImmediate(Instruction* instr) {
- const char *mnemonic = "";
- const char *form = "'Rd, 'IMoveImm";
-
- // Print the shift separately for movk, to make it clear which half word will
- // be overwritten. Movn and movz print the computed immediate, which includes
- // shift calculation.
- switch (instr->Mask(MoveWideImmediateMask)) {
- case MOVN_w:
- case MOVN_x: mnemonic = "movn"; break;
- case MOVZ_w:
- case MOVZ_x: mnemonic = "movz"; break;
- case MOVK_w:
- case MOVK_x: mnemonic = "movk"; form = "'Rd, 'IMoveLSL"; break;
- default: UNREACHABLE();
- }
- Format(instr, mnemonic, form);
-}
-
-
-#define LOAD_STORE_LIST(V) \
- V(STRB_w, "strb", "'Wt") \
- V(STRH_w, "strh", "'Wt") \
- V(STR_w, "str", "'Wt") \
- V(STR_x, "str", "'Xt") \
- V(LDRB_w, "ldrb", "'Wt") \
- V(LDRH_w, "ldrh", "'Wt") \
- V(LDR_w, "ldr", "'Wt") \
- V(LDR_x, "ldr", "'Xt") \
- V(LDRSB_x, "ldrsb", "'Xt") \
- V(LDRSH_x, "ldrsh", "'Xt") \
- V(LDRSW_x, "ldrsw", "'Xt") \
- V(LDRSB_w, "ldrsb", "'Wt") \
- V(LDRSH_w, "ldrsh", "'Wt") \
- V(STR_s, "str", "'St") \
- V(STR_d, "str", "'Dt") \
- V(LDR_s, "ldr", "'St") \
- V(LDR_d, "ldr", "'Dt")
-
-void Disassembler::VisitLoadStorePreIndex(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "(LoadStorePreIndex)";
-
- switch (instr->Mask(LoadStorePreIndexMask)) {
- #define LS_PREINDEX(A, B, C) \
- case A##_pre: mnemonic = B; form = C ", ['Xns'ILS]!"; break;
- LOAD_STORE_LIST(LS_PREINDEX)
- #undef LS_PREINDEX
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitLoadStorePostIndex(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "(LoadStorePostIndex)";
-
- switch (instr->Mask(LoadStorePostIndexMask)) {
- #define LS_POSTINDEX(A, B, C) \
- case A##_post: mnemonic = B; form = C ", ['Xns]'ILS"; break;
- LOAD_STORE_LIST(LS_POSTINDEX)
- #undef LS_POSTINDEX
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitLoadStoreUnsignedOffset(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "(LoadStoreUnsignedOffset)";
-
- switch (instr->Mask(LoadStoreUnsignedOffsetMask)) {
- #define LS_UNSIGNEDOFFSET(A, B, C) \
- case A##_unsigned: mnemonic = B; form = C ", ['Xns'ILU]"; break;
- LOAD_STORE_LIST(LS_UNSIGNEDOFFSET)
- #undef LS_UNSIGNEDOFFSET
- case PRFM_unsigned: mnemonic = "prfm"; form = "'PrefOp, ['Xn'ILU]";
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitLoadStoreRegisterOffset(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "(LoadStoreRegisterOffset)";
-
- switch (instr->Mask(LoadStoreRegisterOffsetMask)) {
- #define LS_REGISTEROFFSET(A, B, C) \
- case A##_reg: mnemonic = B; form = C ", ['Xns, 'Offsetreg]"; break;
- LOAD_STORE_LIST(LS_REGISTEROFFSET)
- #undef LS_REGISTEROFFSET
- case PRFM_reg: mnemonic = "prfm"; form = "'PrefOp, ['Xns, 'Offsetreg]";
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitLoadStoreUnscaledOffset(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "'Wt, ['Xns'ILS]";
- const char *form_x = "'Xt, ['Xns'ILS]";
- const char *form_s = "'St, ['Xns'ILS]";
- const char *form_d = "'Dt, ['Xns'ILS]";
-
- switch (instr->Mask(LoadStoreUnscaledOffsetMask)) {
- case STURB_w: mnemonic = "sturb"; break;
- case STURH_w: mnemonic = "sturh"; break;
- case STUR_w: mnemonic = "stur"; break;
- case STUR_x: mnemonic = "stur"; form = form_x; break;
- case STUR_s: mnemonic = "stur"; form = form_s; break;
- case STUR_d: mnemonic = "stur"; form = form_d; break;
- case LDURB_w: mnemonic = "ldurb"; break;
- case LDURH_w: mnemonic = "ldurh"; break;
- case LDUR_w: mnemonic = "ldur"; break;
- case LDUR_x: mnemonic = "ldur"; form = form_x; break;
- case LDUR_s: mnemonic = "ldur"; form = form_s; break;
- case LDUR_d: mnemonic = "ldur"; form = form_d; break;
- case LDURSB_x: form = form_x; // Fall through.
- case LDURSB_w: mnemonic = "ldursb"; break;
- case LDURSH_x: form = form_x; // Fall through.
- case LDURSH_w: mnemonic = "ldursh"; break;
- case LDURSW_x: mnemonic = "ldursw"; form = form_x; break;
- default: form = "(LoadStoreUnscaledOffset)";
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitLoadLiteral(Instruction* instr) {
- const char *mnemonic = "ldr";
- const char *form = "(LoadLiteral)";
-
- switch (instr->Mask(LoadLiteralMask)) {
- case LDR_w_lit: form = "'Wt, 'ILLiteral 'LValue"; break;
- case LDR_x_lit: form = "'Xt, 'ILLiteral 'LValue"; break;
- case LDR_s_lit: form = "'St, 'ILLiteral 'LValue"; break;
- case LDR_d_lit: form = "'Dt, 'ILLiteral 'LValue"; break;
- default: mnemonic = "unimplemented";
- }
- Format(instr, mnemonic, form);
-}
-
-
-#define LOAD_STORE_PAIR_LIST(V) \
- V(STP_w, "stp", "'Wt, 'Wt2", "4") \
- V(LDP_w, "ldp", "'Wt, 'Wt2", "4") \
- V(LDPSW_x, "ldpsw", "'Xt, 'Xt2", "4") \
- V(STP_x, "stp", "'Xt, 'Xt2", "8") \
- V(LDP_x, "ldp", "'Xt, 'Xt2", "8") \
- V(STP_s, "stp", "'St, 'St2", "4") \
- V(LDP_s, "ldp", "'St, 'St2", "4") \
- V(STP_d, "stp", "'Dt, 'Dt2", "8") \
- V(LDP_d, "ldp", "'Dt, 'Dt2", "8")
-
-void Disassembler::VisitLoadStorePairPostIndex(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "(LoadStorePairPostIndex)";
-
- switch (instr->Mask(LoadStorePairPostIndexMask)) {
- #define LSP_POSTINDEX(A, B, C, D) \
- case A##_post: mnemonic = B; form = C ", ['Xns]'ILP" D; break;
- LOAD_STORE_PAIR_LIST(LSP_POSTINDEX)
- #undef LSP_POSTINDEX
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitLoadStorePairPreIndex(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "(LoadStorePairPreIndex)";
-
- switch (instr->Mask(LoadStorePairPreIndexMask)) {
- #define LSP_PREINDEX(A, B, C, D) \
- case A##_pre: mnemonic = B; form = C ", ['Xns'ILP" D "]!"; break;
- LOAD_STORE_PAIR_LIST(LSP_PREINDEX)
- #undef LSP_PREINDEX
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitLoadStorePairOffset(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "(LoadStorePairOffset)";
-
- switch (instr->Mask(LoadStorePairOffsetMask)) {
- #define LSP_OFFSET(A, B, C, D) \
- case A##_off: mnemonic = B; form = C ", ['Xns'ILP" D "]"; break;
- LOAD_STORE_PAIR_LIST(LSP_OFFSET)
- #undef LSP_OFFSET
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitLoadStorePairNonTemporal(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form;
-
- switch (instr->Mask(LoadStorePairNonTemporalMask)) {
- case STNP_w: mnemonic = "stnp"; form = "'Wt, 'Wt2, ['Xns'ILP4]"; break;
- case LDNP_w: mnemonic = "ldnp"; form = "'Wt, 'Wt2, ['Xns'ILP4]"; break;
- case STNP_x: mnemonic = "stnp"; form = "'Xt, 'Xt2, ['Xns'ILP8]"; break;
- case LDNP_x: mnemonic = "ldnp"; form = "'Xt, 'Xt2, ['Xns'ILP8]"; break;
- case STNP_s: mnemonic = "stnp"; form = "'St, 'St2, ['Xns'ILP4]"; break;
- case LDNP_s: mnemonic = "ldnp"; form = "'St, 'St2, ['Xns'ILP4]"; break;
- case STNP_d: mnemonic = "stnp"; form = "'Dt, 'Dt2, ['Xns'ILP8]"; break;
- case LDNP_d: mnemonic = "ldnp"; form = "'Dt, 'Dt2, ['Xns'ILP8]"; break;
- default: form = "(LoadStorePairNonTemporal)";
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitFPCompare(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "'Fn, 'Fm";
- const char *form_zero = "'Fn, #0.0";
-
- switch (instr->Mask(FPCompareMask)) {
- case FCMP_s_zero:
- case FCMP_d_zero: form = form_zero; // Fall through.
- case FCMP_s:
- case FCMP_d: mnemonic = "fcmp"; break;
- default: form = "(FPCompare)";
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitFPConditionalCompare(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "'Fn, 'Fm, 'INzcv, 'Cond";
-
- switch (instr->Mask(FPConditionalCompareMask)) {
- case FCCMP_s:
- case FCCMP_d: mnemonic = "fccmp"; break;
- case FCCMPE_s:
- case FCCMPE_d: mnemonic = "fccmpe"; break;
- default: form = "(FPConditionalCompare)";
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitFPConditionalSelect(Instruction* instr) {
- const char *mnemonic = "";
- const char *form = "'Fd, 'Fn, 'Fm, 'Cond";
-
- switch (instr->Mask(FPConditionalSelectMask)) {
- case FCSEL_s:
- case FCSEL_d: mnemonic = "fcsel"; break;
- default: UNREACHABLE();
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitFPDataProcessing1Source(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "'Fd, 'Fn";
-
- switch (instr->Mask(FPDataProcessing1SourceMask)) {
- #define FORMAT(A, B) \
- case A##_s: \
- case A##_d: mnemonic = B; break;
- FORMAT(FMOV, "fmov");
- FORMAT(FABS, "fabs");
- FORMAT(FNEG, "fneg");
- FORMAT(FSQRT, "fsqrt");
- FORMAT(FRINTN, "frintn");
- FORMAT(FRINTP, "frintp");
- FORMAT(FRINTM, "frintm");
- FORMAT(FRINTZ, "frintz");
- FORMAT(FRINTA, "frinta");
- FORMAT(FRINTX, "frintx");
- FORMAT(FRINTI, "frinti");
- #undef FORMAT
- case FCVT_ds: mnemonic = "fcvt"; form = "'Dd, 'Sn"; break;
- case FCVT_sd: mnemonic = "fcvt"; form = "'Sd, 'Dn"; break;
- default: form = "(FPDataProcessing1Source)";
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitFPDataProcessing2Source(Instruction* instr) {
- const char *mnemonic = "";
- const char *form = "'Fd, 'Fn, 'Fm";
-
- switch (instr->Mask(FPDataProcessing2SourceMask)) {
- #define FORMAT(A, B) \
- case A##_s: \
- case A##_d: mnemonic = B; break;
- FORMAT(FMUL, "fmul");
- FORMAT(FDIV, "fdiv");
- FORMAT(FADD, "fadd");
- FORMAT(FSUB, "fsub");
- FORMAT(FMAX, "fmax");
- FORMAT(FMIN, "fmin");
- FORMAT(FMAXNM, "fmaxnm");
- FORMAT(FMINNM, "fminnm");
- FORMAT(FNMUL, "fnmul");
- #undef FORMAT
- default: UNREACHABLE();
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitFPDataProcessing3Source(Instruction* instr) {
- const char *mnemonic = "";
- const char *form = "'Fd, 'Fn, 'Fm, 'Fa";
-
- switch (instr->Mask(FPDataProcessing3SourceMask)) {
- #define FORMAT(A, B) \
- case A##_s: \
- case A##_d: mnemonic = B; break;
- FORMAT(FMADD, "fmadd");
- FORMAT(FMSUB, "fmsub");
- FORMAT(FNMADD, "fnmadd");
- FORMAT(FNMSUB, "fnmsub");
- #undef FORMAT
- default: UNREACHABLE();
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitFPImmediate(Instruction* instr) {
- const char *mnemonic = "";
- const char *form = "(FPImmediate)";
-
- switch (instr->Mask(FPImmediateMask)) {
- case FMOV_s_imm: mnemonic = "fmov"; form = "'Sd, 'IFPSingle"; break;
- case FMOV_d_imm: mnemonic = "fmov"; form = "'Dd, 'IFPDouble"; break;
- default: UNREACHABLE();
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitFPIntegerConvert(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "(FPIntegerConvert)";
- const char *form_rf = "'Rd, 'Fn";
- const char *form_fr = "'Fd, 'Rn";
-
- switch (instr->Mask(FPIntegerConvertMask)) {
- case FMOV_ws:
- case FMOV_xd: mnemonic = "fmov"; form = form_rf; break;
- case FMOV_sw:
- case FMOV_dx: mnemonic = "fmov"; form = form_fr; break;
- case FCVTAS_ws:
- case FCVTAS_xs:
- case FCVTAS_wd:
- case FCVTAS_xd: mnemonic = "fcvtas"; form = form_rf; break;
- case FCVTAU_ws:
- case FCVTAU_xs:
- case FCVTAU_wd:
- case FCVTAU_xd: mnemonic = "fcvtau"; form = form_rf; break;
- case FCVTMS_ws:
- case FCVTMS_xs:
- case FCVTMS_wd:
- case FCVTMS_xd: mnemonic = "fcvtms"; form = form_rf; break;
- case FCVTMU_ws:
- case FCVTMU_xs:
- case FCVTMU_wd:
- case FCVTMU_xd: mnemonic = "fcvtmu"; form = form_rf; break;
- case FCVTNS_ws:
- case FCVTNS_xs:
- case FCVTNS_wd:
- case FCVTNS_xd: mnemonic = "fcvtns"; form = form_rf; break;
- case FCVTNU_ws:
- case FCVTNU_xs:
- case FCVTNU_wd:
- case FCVTNU_xd: mnemonic = "fcvtnu"; form = form_rf; break;
- case FCVTZU_xd:
- case FCVTZU_ws:
- case FCVTZU_wd:
- case FCVTZU_xs: mnemonic = "fcvtzu"; form = form_rf; break;
- case FCVTZS_xd:
- case FCVTZS_wd:
- case FCVTZS_xs:
- case FCVTZS_ws: mnemonic = "fcvtzs"; form = form_rf; break;
- case SCVTF_sw:
- case SCVTF_sx:
- case SCVTF_dw:
- case SCVTF_dx: mnemonic = "scvtf"; form = form_fr; break;
- case UCVTF_sw:
- case UCVTF_sx:
- case UCVTF_dw:
- case UCVTF_dx: mnemonic = "ucvtf"; form = form_fr; break;
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitFPFixedPointConvert(Instruction* instr) {
- const char *mnemonic = "";
- const char *form = "'Rd, 'Fn, 'IFPFBits";
- const char *form_fr = "'Fd, 'Rn, 'IFPFBits";
-
- switch (instr->Mask(FPFixedPointConvertMask)) {
- case FCVTZS_ws_fixed:
- case FCVTZS_xs_fixed:
- case FCVTZS_wd_fixed:
- case FCVTZS_xd_fixed: mnemonic = "fcvtzs"; break;
- case FCVTZU_ws_fixed:
- case FCVTZU_xs_fixed:
- case FCVTZU_wd_fixed:
- case FCVTZU_xd_fixed: mnemonic = "fcvtzu"; break;
- case SCVTF_sw_fixed:
- case SCVTF_sx_fixed:
- case SCVTF_dw_fixed:
- case SCVTF_dx_fixed: mnemonic = "scvtf"; form = form_fr; break;
- case UCVTF_sw_fixed:
- case UCVTF_sx_fixed:
- case UCVTF_dw_fixed:
- case UCVTF_dx_fixed: mnemonic = "ucvtf"; form = form_fr; break;
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitSystem(Instruction* instr) {
- // Some system instructions hijack their Op and Cp fields to represent a
- // range of immediates instead of indicating a different instruction. This
- // makes the decoding tricky.
- const char *mnemonic = "unimplemented";
- const char *form = "(System)";
-
- if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
- switch (instr->Mask(SystemSysRegMask)) {
- case MRS: {
- mnemonic = "mrs";
- switch (instr->ImmSystemRegister()) {
- case NZCV: form = "'Xt, nzcv"; break;
- case FPCR: form = "'Xt, fpcr"; break;
- default: form = "'Xt, (unknown)"; break;
- }
- break;
- }
- case MSR: {
- mnemonic = "msr";
- switch (instr->ImmSystemRegister()) {
- case NZCV: form = "nzcv, 'Xt"; break;
- case FPCR: form = "fpcr, 'Xt"; break;
- default: form = "(unknown), 'Xt"; break;
- }
- break;
- }
- }
- } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
- ASSERT(instr->Mask(SystemHintMask) == HINT);
- switch (instr->ImmHint()) {
- case NOP: {
- mnemonic = "nop";
- form = NULL;
- break;
- }
- }
- } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
- switch (instr->Mask(MemBarrierMask)) {
- case DMB: {
- mnemonic = "dmb";
- form = "'M";
- break;
- }
- case DSB: {
- mnemonic = "dsb";
- form = "'M";
- break;
- }
- case ISB: {
- mnemonic = "isb";
- form = NULL;
- break;
- }
- }
- }
-
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitException(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "'IDebug";
-
- switch (instr->Mask(ExceptionMask)) {
- case HLT: mnemonic = "hlt"; break;
- case BRK: mnemonic = "brk"; break;
- case SVC: mnemonic = "svc"; break;
- case HVC: mnemonic = "hvc"; break;
- case SMC: mnemonic = "smc"; break;
- case DCPS1: mnemonic = "dcps1"; form = "{'IDebug}"; break;
- case DCPS2: mnemonic = "dcps2"; form = "{'IDebug}"; break;
- case DCPS3: mnemonic = "dcps3"; form = "{'IDebug}"; break;
- default: form = "(Exception)";
- }
- Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitUnimplemented(Instruction* instr) {
- Format(instr, "unimplemented", "(Unimplemented)");
-}
-
-
-void Disassembler::VisitUnallocated(Instruction* instr) {
- Format(instr, "unallocated", "(Unallocated)");
-}
-
-
-void Disassembler::ProcessOutput(Instruction* /*instr*/) {
- // The base disasm does nothing more than disassembling into a buffer.
-}
-
-
-void Disassembler::Format(Instruction* instr, const char* mnemonic,
- const char* format) {
- // TODO(mcapewel) don't think I can use the instr address here - there needs
- // to be a base address too
- ASSERT(mnemonic != NULL);
- ResetOutput();
- Substitute(instr, mnemonic);
- if (format != NULL) {
- buffer_[buffer_pos_++] = ' ';
- Substitute(instr, format);
- }
- buffer_[buffer_pos_] = 0;
- ProcessOutput(instr);
-}
-
-
-void Disassembler::Substitute(Instruction* instr, const char* string) {
- char chr = *string++;
- while (chr != '\0') {
- if (chr == '\'') {
- string += SubstituteField(instr, string);
- } else {
- buffer_[buffer_pos_++] = chr;
- }
- chr = *string++;
- }
-}
-
-
-int Disassembler::SubstituteField(Instruction* instr, const char* format) {
- switch (format[0]) {
- case 'R': // Register. X or W, selected by sf bit.
- case 'F': // FP Register. S or D, selected by type field.
- case 'W':
- case 'X':
- case 'S':
- case 'D': return SubstituteRegisterField(instr, format);
- case 'I': return SubstituteImmediateField(instr, format);
- case 'L': return SubstituteLiteralField(instr, format);
- case 'H': return SubstituteShiftField(instr, format);
- case 'P': return SubstitutePrefetchField(instr, format);
- case 'C': return SubstituteConditionField(instr, format);
- case 'E': return SubstituteExtendField(instr, format);
- case 'A': return SubstitutePCRelAddressField(instr, format);
- case 'B': return SubstituteBranchTargetField(instr, format);
- case 'O': return SubstituteLSRegOffsetField(instr, format);
- case 'M': return SubstituteBarrierField(instr, format);
- default: {
- UNREACHABLE();
- return 1;
- }
- }
-}
-
-
-int Disassembler::SubstituteRegisterField(Instruction* instr,
- const char* format) {
- unsigned reg_num = 0;
- unsigned field_len = 2;
- switch (format[1]) {
- case 'd': reg_num = instr->Rd(); break;
- case 'n': reg_num = instr->Rn(); break;
- case 'm': reg_num = instr->Rm(); break;
- case 'a': reg_num = instr->Ra(); break;
- case 't': {
- if (format[2] == '2') {
- reg_num = instr->Rt2();
- field_len = 3;
- } else {
- reg_num = instr->Rt();
- }
- break;
- }
- default: UNREACHABLE();
- }
-
- // Increase field length for registers tagged as stack.
- if (format[2] == 's') {
- field_len = 3;
- }
-
- char reg_type;
- if (format[0] == 'R') {
- // Register type is R: use sf bit to choose X and W.
- reg_type = instr->SixtyFourBits() ? 'x' : 'w';
- } else if (format[0] == 'F') {
- // Floating-point register: use type field to choose S or D.
- reg_type = ((instr->FPType() & 1) == 0) ? 's' : 'd';
- } else {
- // Register type is specified. Make it lower case.
- reg_type = format[0] + 0x20;
- }
-
- if ((reg_num != kZeroRegCode) || (reg_type == 's') || (reg_type == 'd')) {
- // A normal register: w0 - w30, x0 - x30, s0 - s31, d0 - d31.
-
- // Filter special registers
- if ((reg_type == 'x') && (reg_num == 27)) {
- AppendToOutput("cp");
- } else if ((reg_type == 'x') && (reg_num == 28)) {
- AppendToOutput("jssp");
- } else if ((reg_type == 'x') && (reg_num == 29)) {
- AppendToOutput("fp");
- } else if ((reg_type == 'x') && (reg_num == 30)) {
- AppendToOutput("lr");
- } else {
- AppendToOutput("%c%d", reg_type, reg_num);
- }
- } else if (format[2] == 's') {
- // Disassemble w31/x31 as stack pointer wcsp/csp.
- AppendToOutput("%s", (reg_type == 'w') ? "wcsp" : "csp");
- } else {
- // Disassemble w31/x31 as zero register wzr/xzr.
- AppendToOutput("%czr", reg_type);
- }
-
- return field_len;
-}
-
-
-int Disassembler::SubstituteImmediateField(Instruction* instr,
- const char* format) {
- ASSERT(format[0] == 'I');
-
- switch (format[1]) {
- case 'M': { // IMoveImm or IMoveLSL.
- if (format[5] == 'I') {
- uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide());
- AppendToOutput("#0x%" PRIx64, imm);
- } else {
- ASSERT(format[5] == 'L');
- AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide());
- if (instr->ShiftMoveWide() > 0) {
- AppendToOutput(", lsl #%d", 16 * instr->ShiftMoveWide());
- }
- }
- return 8;
- }
- case 'L': {
- switch (format[2]) {
- case 'L': { // ILLiteral - Immediate Load Literal.
- AppendToOutput("pc%+" PRId64,
- instr->ImmLLiteral() << kLiteralEntrySizeLog2);
- return 9;
- }
- case 'S': { // ILS - Immediate Load/Store.
- if (instr->ImmLS() != 0) {
- AppendToOutput(", #%" PRId64, instr->ImmLS());
- }
- return 3;
- }
- case 'P': { // ILPx - Immediate Load/Store Pair, x = access size.
- if (instr->ImmLSPair() != 0) {
- // format[3] is the scale value. Convert to a number.
- int scale = format[3] - 0x30;
- AppendToOutput(", #%" PRId64, instr->ImmLSPair() * scale);
- }
- return 4;
- }
- case 'U': { // ILU - Immediate Load/Store Unsigned.
- if (instr->ImmLSUnsigned() != 0) {
- AppendToOutput(", #%" PRIu64,
- instr->ImmLSUnsigned() << instr->SizeLS());
- }
- return 3;
- }
- }
- }
- case 'C': { // ICondB - Immediate Conditional Branch.
- int64_t offset = instr->ImmCondBranch() << 2;
- char sign = (offset >= 0) ? '+' : '-';
- AppendToOutput("#%c0x%" PRIx64, sign, offset);
- return 6;
- }
- case 'A': { // IAddSub.
- ASSERT(instr->ShiftAddSub() <= 1);
- int64_t imm = instr->ImmAddSub() << (12 * instr->ShiftAddSub());
- AppendToOutput("#0x%" PRIx64 " (%" PRId64 ")", imm, imm);
- return 7;
- }
- case 'F': { // IFPSingle, IFPDouble or IFPFBits.
- if (format[3] == 'F') { // IFPFBits.
- AppendToOutput("#%d", 64 - instr->FPScale());
- return 8;
- } else {
- AppendToOutput("#0x%" PRIx64 " (%.4f)", instr->ImmFP(),
- format[3] == 'S' ? instr->ImmFP32() : instr->ImmFP64());
- return 9;
- }
- }
- case 'T': { // ITri - Immediate Triangular Encoded.
- AppendToOutput("#0x%" PRIx64, instr->ImmLogical());
- return 4;
- }
- case 'N': { // INzcv.
- int nzcv = (instr->Nzcv() << Flags_offset);
- AppendToOutput("#%c%c%c%c", ((nzcv & NFlag) == 0) ? 'n' : 'N',
- ((nzcv & ZFlag) == 0) ? 'z' : 'Z',
- ((nzcv & CFlag) == 0) ? 'c' : 'C',
- ((nzcv & VFlag) == 0) ? 'v' : 'V');
- return 5;
- }
- case 'P': { // IP - Conditional compare.
- AppendToOutput("#%d", instr->ImmCondCmp());
- return 2;
- }
- case 'B': { // Bitfields.
- return SubstituteBitfieldImmediateField(instr, format);
- }
- case 'E': { // IExtract.
- AppendToOutput("#%d", instr->ImmS());
- return 8;
- }
- case 'S': { // IS - Test and branch bit.
- AppendToOutput("#%d", (instr->ImmTestBranchBit5() << 5) |
- instr->ImmTestBranchBit40());
- return 2;
- }
- case 'D': { // IDebug - HLT and BRK instructions.
- AppendToOutput("#0x%x", instr->ImmException());
- return 6;
- }
- default: {
- UNIMPLEMENTED();
- return 0;
- }
- }
-}
-
-
-int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr,
- const char* format) {
- ASSERT((format[0] == 'I') && (format[1] == 'B'));
- unsigned r = instr->ImmR();
- unsigned s = instr->ImmS();
-
- switch (format[2]) {
- case 'r': { // IBr.
- AppendToOutput("#%d", r);
- return 3;
- }
- case 's': { // IBs+1 or IBs-r+1.
- if (format[3] == '+') {
- AppendToOutput("#%d", s + 1);
- return 5;
- } else {
- ASSERT(format[3] == '-');
- AppendToOutput("#%d", s - r + 1);
- return 7;
- }
- }
- case 'Z': { // IBZ-r.
- ASSERT((format[3] == '-') && (format[4] == 'r'));
- unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize : kWRegSize;
- AppendToOutput("#%d", reg_size - r);
- return 5;
- }
- default: {
- UNREACHABLE();
- return 0;
- }
- }
-}
-
-
-int Disassembler::SubstituteLiteralField(Instruction* instr,
- const char* format) {
- ASSERT(strncmp(format, "LValue", 6) == 0);
- USE(format);
-
- switch (instr->Mask(LoadLiteralMask)) {
- case LDR_w_lit:
- case LDR_x_lit:
- case LDR_s_lit:
- case LDR_d_lit: AppendToOutput("(addr %p)", instr->LiteralAddress()); break;
- default: UNREACHABLE();
- }
-
- return 6;
-}
-
-
-int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
- ASSERT(format[0] == 'H');
- ASSERT(instr->ShiftDP() <= 0x3);
-
- switch (format[1]) {
- case 'D': { // HDP.
- ASSERT(instr->ShiftDP() != ROR);
- } // Fall through.
- case 'L': { // HLo.
- if (instr->ImmDPShift() != 0) {
- const char* shift_type[] = {"lsl", "lsr", "asr", "ror"};
- AppendToOutput(", %s #%" PRId64, shift_type[instr->ShiftDP()],
- instr->ImmDPShift());
- }
- return 3;
- }
- default:
- UNIMPLEMENTED();
- return 0;
- }
-}
-
-
-int Disassembler::SubstituteConditionField(Instruction* instr,
- const char* format) {
- ASSERT(format[0] == 'C');
- const char* condition_code[] = { "eq", "ne", "hs", "lo",
- "mi", "pl", "vs", "vc",
- "hi", "ls", "ge", "lt",
- "gt", "le", "al", "nv" };
- int cond;
- switch (format[1]) {
- case 'B': cond = instr->ConditionBranch(); break;
- case 'I': {
- cond = InvertCondition(static_cast<Condition>(instr->Condition()));
- break;
- }
- default: cond = instr->Condition();
- }
- AppendToOutput("%s", condition_code[cond]);
- return 4;
-}
-
-
-int Disassembler::SubstitutePCRelAddressField(Instruction* instr,
- const char* format) {
- USE(format);
- ASSERT(strncmp(format, "AddrPCRel", 9) == 0);
-
- int offset = instr->ImmPCRel();
-
- // Only ADR (AddrPCRelByte) is supported.
- ASSERT(strcmp(format, "AddrPCRelByte") == 0);
-
- char sign = '+';
- if (offset < 0) {
- offset = -offset;
- sign = '-';
- }
- // TODO(jbramley): Can we print the target address here?
- AppendToOutput("#%c0x%x", sign, offset);
- return 13;
-}
-
-
-int Disassembler::SubstituteBranchTargetField(Instruction* instr,
- const char* format) {
- ASSERT(strncmp(format, "BImm", 4) == 0);
-
- int64_t offset = 0;
- switch (format[5]) {
- // BImmUncn - unconditional branch immediate.
- case 'n': offset = instr->ImmUncondBranch(); break;
- // BImmCond - conditional branch immediate.
- case 'o': offset = instr->ImmCondBranch(); break;
- // BImmCmpa - compare and branch immediate.
- case 'm': offset = instr->ImmCmpBranch(); break;
- // BImmTest - test and branch immediate.
- case 'e': offset = instr->ImmTestBranch(); break;
- default: UNIMPLEMENTED();
- }
- offset <<= kInstructionSizeLog2;
- char sign = '+';
- if (offset < 0) {
- offset = -offset;
- sign = '-';
- }
- // TODO(mcapewel): look up pc + offset in label table.
- AppendToOutput("#%c0x%" PRIx64, sign, offset);
- return 8;
-}
-
-
-int Disassembler::SubstituteExtendField(Instruction* instr,
- const char* format) {
- ASSERT(strncmp(format, "Ext", 3) == 0);
- ASSERT(instr->ExtendMode() <= 7);
- USE(format);
-
- const char* extend_mode[] = { "uxtb", "uxth", "uxtw", "uxtx",
- "sxtb", "sxth", "sxtw", "sxtx" };
-
- // If rd or rn is SP, uxtw on 32-bit registers and uxtx on 64-bit
- // registers becomes lsl.
- if (((instr->Rd() == kZeroRegCode) || (instr->Rn() == kZeroRegCode)) &&
- (((instr->ExtendMode() == UXTW) && (instr->SixtyFourBits() == 0)) ||
- (instr->ExtendMode() == UXTX))) {
- if (instr->ImmExtendShift() > 0) {
- AppendToOutput(", lsl #%d", instr->ImmExtendShift());
- }
- } else {
- AppendToOutput(", %s", extend_mode[instr->ExtendMode()]);
- if (instr->ImmExtendShift() > 0) {
- AppendToOutput(" #%d", instr->ImmExtendShift());
- }
- }
- return 3;
-}
-
-
-int Disassembler::SubstituteLSRegOffsetField(Instruction* instr,
- const char* format) {
- ASSERT(strncmp(format, "Offsetreg", 9) == 0);
- const char* extend_mode[] = { "undefined", "undefined", "uxtw", "lsl",
- "undefined", "undefined", "sxtw", "sxtx" };
- USE(format);
-
- unsigned shift = instr->ImmShiftLS();
- Extend ext = static_cast<Extend>(instr->ExtendMode());
- char reg_type = ((ext == UXTW) || (ext == SXTW)) ? 'w' : 'x';
-
- unsigned rm = instr->Rm();
- if (rm == kZeroRegCode) {
- AppendToOutput("%czr", reg_type);
- } else {
- AppendToOutput("%c%d", reg_type, rm);
- }
-
- // Extend mode UXTX is an alias for shift mode LSL here.
- if (!((ext == UXTX) && (shift == 0))) {
- AppendToOutput(", %s", extend_mode[ext]);
- if (shift != 0) {
- AppendToOutput(" #%d", instr->SizeLS());
- }
- }
- return 9;
-}
-
-
-int Disassembler::SubstitutePrefetchField(Instruction* instr,
- const char* format) {
- ASSERT(format[0] == 'P');
- USE(format);
-
- int prefetch_mode = instr->PrefetchMode();
-
- const char* ls = (prefetch_mode & 0x10) ? "st" : "ld";
- int level = (prefetch_mode >> 1) + 1;
- const char* ks = (prefetch_mode & 1) ? "strm" : "keep";
-
- AppendToOutput("p%sl%d%s", ls, level, ks);
- return 6;
-}
-
-int Disassembler::SubstituteBarrierField(Instruction* instr,
- const char* format) {
- ASSERT(format[0] == 'M');
- USE(format);
-
- static const char* options[4][4] = {
- { "sy (0b0000)", "oshld", "oshst", "osh" },
- { "sy (0b0100)", "nshld", "nshst", "nsh" },
- { "sy (0b1000)", "ishld", "ishst", "ish" },
- { "sy (0b1100)", "ld", "st", "sy" }
- };
- int domain = instr->ImmBarrierDomain();
- int type = instr->ImmBarrierType();
-
- AppendToOutput("%s", options[domain][type]);
- return 1;
-}
-
-
-void Disassembler::ResetOutput() {
- buffer_pos_ = 0;
- buffer_[buffer_pos_] = 0;
-}
-
-
-void Disassembler::AppendToOutput(const char* format, ...) {
- va_list args;
- va_start(args, format);
- buffer_pos_ += vsnprintf(&buffer_[buffer_pos_], buffer_size_, format, args);
- va_end(args);
-}
-
-
-void PrintDisassembler::ProcessOutput(Instruction* instr) {
- fprintf(stream_, "0x%016" PRIx64 " %08" PRIx32 "\t\t%s\n",
- reinterpret_cast<uint64_t>(instr), instr->InstructionBits(),
- GetOutput());
-}
-
-} } // namespace v8::internal
-
-
-namespace disasm {
-
-
-const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
- return tmp_buffer_.start();
-}
-
-
-const char* NameConverter::NameOfConstant(byte* addr) const {
- return NameOfAddress(addr);
-}
-
-
-const char* NameConverter::NameOfCPURegister(int reg) const {
- unsigned ureg = reg; // Avoid warnings about signed/unsigned comparisons.
- if (ureg >= v8::internal::kNumberOfRegisters) {
- return "noreg";
- }
- if (ureg == v8::internal::kZeroRegCode) {
- return "xzr";
- }
- v8::internal::OS::SNPrintF(tmp_buffer_, "x%u", ureg);
- return tmp_buffer_.start();
-}
-
-
-const char* NameConverter::NameOfByteCPURegister(int reg) const {
- UNREACHABLE(); // A64 does not have the concept of a byte register
- return "nobytereg";
-}
-
-
-const char* NameConverter::NameOfXMMRegister(int reg) const {
- UNREACHABLE(); // A64 does not have any XMM registers
- return "noxmmreg";
-}
-
-
-const char* NameConverter::NameInCode(byte* addr) const {
- // The default name converter is called for unknown code, so we will not try
- // to access any memory.
- return "";
-}
-
-
-//------------------------------------------------------------------------------
-
-class BufferDisassembler : public v8::internal::Disassembler {
- public:
- explicit BufferDisassembler(v8::internal::Vector<char> out_buffer)
- : out_buffer_(out_buffer) { }
-
- ~BufferDisassembler() { }
-
- virtual void ProcessOutput(v8::internal::Instruction* instr) {
- v8::internal::OS::SNPrintF(out_buffer_, "%s", GetOutput());
- }
-
- private:
- v8::internal::Vector<char> out_buffer_;
-};
-
-Disassembler::Disassembler(const NameConverter& converter)
- : converter_(converter) {}
-
-
-Disassembler::~Disassembler() {}
-
-
-int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
- byte* instr) {
- v8::internal::Decoder decoder;
- BufferDisassembler disasm(buffer);
- decoder.AppendVisitor(&disasm);
-
- decoder.Decode(reinterpret_cast<v8::internal::Instruction*>(instr));
- return v8::internal::kInstructionSize;
-}
-
-
-int Disassembler::ConstantPoolSizeAt(byte* instr) {
- return v8::internal::Assembler::ConstantPoolSizeAt(
- reinterpret_cast<v8::internal::Instruction*>(instr));
-}
-
-
-void Disassembler::Disassemble(FILE* file, byte* start, byte* end) {
- v8::internal::Decoder decoder;
- v8::internal::PrintDisassembler disasm(file);
- decoder.AppendVisitor(&disasm);
-
- for (byte* pc = start; pc < end; pc += v8::internal::kInstructionSize) {
- decoder.Decode(reinterpret_cast<v8::internal::Instruction*>(pc));
- }
-}
-
-} // namespace disasm
-
-#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/disasm-a64.h b/deps/v8/src/a64/disasm-a64.h
deleted file mode 100644
index 35b8fe1f63..0000000000
--- a/deps/v8/src/a64/disasm-a64.h
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_A64_DISASM_A64_H
-#define V8_A64_DISASM_A64_H
-
-#include "v8.h"
-
-#include "globals.h"
-#include "utils.h"
-#include "instructions-a64.h"
-#include "decoder-a64.h"
-
-namespace v8 {
-namespace internal {
-
-
-class Disassembler: public DecoderVisitor {
- public:
- Disassembler();
- Disassembler(char* text_buffer, int buffer_size);
- virtual ~Disassembler();
- char* GetOutput();
-
- // Declare all Visitor functions.
- #define DECLARE(A) void Visit##A(Instruction* instr);
- VISITOR_LIST(DECLARE)
- #undef DECLARE
-
- protected:
- virtual void ProcessOutput(Instruction* instr);
-
- void Format(Instruction* instr, const char* mnemonic, const char* format);
- void Substitute(Instruction* instr, const char* string);
- int SubstituteField(Instruction* instr, const char* format);
- int SubstituteRegisterField(Instruction* instr, const char* format);
- int SubstituteImmediateField(Instruction* instr, const char* format);
- int SubstituteLiteralField(Instruction* instr, const char* format);
- int SubstituteBitfieldImmediateField(Instruction* instr, const char* format);
- int SubstituteShiftField(Instruction* instr, const char* format);
- int SubstituteExtendField(Instruction* instr, const char* format);
- int SubstituteConditionField(Instruction* instr, const char* format);
- int SubstitutePCRelAddressField(Instruction* instr, const char* format);
- int SubstituteBranchTargetField(Instruction* instr, const char* format);
- int SubstituteLSRegOffsetField(Instruction* instr, const char* format);
- int SubstitutePrefetchField(Instruction* instr, const char* format);
- int SubstituteBarrierField(Instruction* instr, const char* format);
-
- bool RdIsZROrSP(Instruction* instr) const {
- return (instr->Rd() == kZeroRegCode);
- }
-
- bool RnIsZROrSP(Instruction* instr) const {
- return (instr->Rn() == kZeroRegCode);
- }
-
- bool RmIsZROrSP(Instruction* instr) const {
- return (instr->Rm() == kZeroRegCode);
- }
-
- bool RaIsZROrSP(Instruction* instr) const {
- return (instr->Ra() == kZeroRegCode);
- }
-
- bool IsMovzMovnImm(unsigned reg_size, uint64_t value);
-
- void ResetOutput();
- void AppendToOutput(const char* string, ...);
-
- char* buffer_;
- uint32_t buffer_pos_;
- uint32_t buffer_size_;
- bool own_buffer_;
-};
-
-
-class PrintDisassembler: public Disassembler {
- public:
- explicit PrintDisassembler(FILE* stream) : stream_(stream) { }
- ~PrintDisassembler() { }
-
- virtual void ProcessOutput(Instruction* instr);
-
- private:
- FILE *stream_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_A64_DISASM_A64_H
diff --git a/deps/v8/src/a64/frames-a64.cc b/deps/v8/src/a64/frames-a64.cc
deleted file mode 100644
index 56d2e26b72..0000000000
--- a/deps/v8/src/a64/frames-a64.cc
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if V8_TARGET_ARCH_A64
-
-#include "assembler.h"
-#include "assembler-a64.h"
-#include "assembler-a64-inl.h"
-#include "frames.h"
-
-namespace v8 {
-namespace internal {
-
-
-Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
-Register JavaScriptFrame::context_register() { return cp; }
-
-
-Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
-Register StubFailureTrampolineFrame::context_register() { return cp; }
-
-
-Object*& ExitFrame::constant_pool_slot() const {
- UNREACHABLE();
- return Memory::Object_at(NULL);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/frames-a64.h b/deps/v8/src/a64/frames-a64.h
deleted file mode 100644
index 5ef7681645..0000000000
--- a/deps/v8/src/a64/frames-a64.h
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "a64/constants-a64.h"
-#include "a64/assembler-a64.h"
-
-#ifndef V8_A64_FRAMES_A64_H_
-#define V8_A64_FRAMES_A64_H_
-
-namespace v8 {
-namespace internal {
-
-const int kNumRegs = kNumberOfRegisters;
-// Registers x0-x17 are caller-saved.
-const int kNumJSCallerSaved = 18;
-const RegList kJSCallerSaved = 0x3ffff;
-typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
-
-// Number of registers for which space is reserved in safepoints. Must be a
-// multiple of eight.
-// TODO(all): Refine this number.
-const int kNumSafepointRegisters = 32;
-
-// Define the list of registers actually saved at safepoints.
-// Note that the number of saved registers may be smaller than the reserved
-// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-#define kSafepointSavedRegisters CPURegList::GetSafepointSavedRegisters().list()
-#define kNumSafepointSavedRegisters \
- CPURegList::GetSafepointSavedRegisters().Count();
-
-class EntryFrameConstants : public AllStatic {
- public:
- static const int kCallerFPOffset =
- -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
-};
-
-
-class ExitFrameConstants : public AllStatic {
- public:
- static const int kFrameSize = 2 * kPointerSize;
-
- static const int kCallerSPDisplacement = 2 * kPointerSize;
- static const int kCallerPCOffset = 1 * kPointerSize;
- static const int kCallerFPOffset = 0 * kPointerSize; // <- fp
- static const int kSPOffset = -1 * kPointerSize;
- static const int kCodeOffset = -2 * kPointerSize;
- static const int kLastExitFrameField = kCodeOffset;
-};
-
-
-class JavaScriptFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
-
- // There are two words on the stack (saved fp and saved lr) between fp and
- // the arguments.
- static const int kLastParameterOffset = 2 * kPointerSize;
-
- static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
-};
-
-
-class ArgumentsAdaptorFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
-
- static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + kPointerSize;
-};
-
-
-class ConstructFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
- static const int kLengthOffset = -4 * kPointerSize;
- static const int kConstructorOffset = -5 * kPointerSize;
- static const int kImplicitReceiverOffset = -6 * kPointerSize;
-
- static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
-};
-
-
-class InternalFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-};
-
-
-inline Object* JavaScriptFrame::function_slot_object() const {
- const int offset = JavaScriptFrameConstants::kFunctionOffset;
- return Memory::Object_at(fp() + offset);
-}
-
-
-inline void StackHandler::SetFp(Address slot, Address fp) {
- Memory::Address_at(slot) = fp;
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_A64_FRAMES_A64_H_
diff --git a/deps/v8/src/a64/full-codegen-a64.cc b/deps/v8/src/a64/full-codegen-a64.cc
deleted file mode 100644
index ec5d339781..0000000000
--- a/deps/v8/src/a64/full-codegen-a64.cc
+++ /dev/null
@@ -1,5010 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if V8_TARGET_ARCH_A64
-
-#include "code-stubs.h"
-#include "codegen.h"
-#include "compiler.h"
-#include "debug.h"
-#include "full-codegen.h"
-#include "isolate-inl.h"
-#include "parser.h"
-#include "scopes.h"
-#include "stub-cache.h"
-
-#include "a64/code-stubs-a64.h"
-#include "a64/macro-assembler-a64.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-class JumpPatchSite BASE_EMBEDDED {
- public:
- explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm), reg_(NoReg) {
-#ifdef DEBUG
- info_emitted_ = false;
-#endif
- }
-
- ~JumpPatchSite() {
- if (patch_site_.is_bound()) {
- ASSERT(info_emitted_);
- } else {
- ASSERT(reg_.IsNone());
- }
- }
-
- void EmitJumpIfNotSmi(Register reg, Label* target) {
- // This code will be patched by PatchInlinedSmiCode, in ic-a64.cc.
- InstructionAccurateScope scope(masm_, 1);
- ASSERT(!info_emitted_);
- ASSERT(reg.Is64Bits());
- ASSERT(!reg.Is(csp));
- reg_ = reg;
- __ bind(&patch_site_);
- __ tbz(xzr, 0, target); // Always taken before patched.
- }
-
- void EmitJumpIfSmi(Register reg, Label* target) {
- // This code will be patched by PatchInlinedSmiCode, in ic-a64.cc.
- InstructionAccurateScope scope(masm_, 1);
- ASSERT(!info_emitted_);
- ASSERT(reg.Is64Bits());
- ASSERT(!reg.Is(csp));
- reg_ = reg;
- __ bind(&patch_site_);
- __ tbnz(xzr, 0, target); // Never taken before patched.
- }
-
- void EmitJumpIfEitherNotSmi(Register reg1, Register reg2, Label* target) {
- // We need to use ip0, so don't allow access to the MacroAssembler.
- InstructionAccurateScope scope(masm_);
- __ orr(ip0, reg1, reg2);
- EmitJumpIfNotSmi(ip0, target);
- }
-
- void EmitPatchInfo() {
- Assembler::BlockConstPoolScope scope(masm_);
- InlineSmiCheckInfo::Emit(masm_, reg_, &patch_site_);
-#ifdef DEBUG
- info_emitted_ = true;
-#endif
- }
-
- private:
- MacroAssembler* masm_;
- Label patch_site_;
- Register reg_;
-#ifdef DEBUG
- bool info_emitted_;
-#endif
-};
-
-
-// Generate code for a JS function. On entry to the function the receiver
-// and arguments have been pushed on the stack left to right. The actual
-// argument count matches the formal parameter count expected by the
-// function.
-//
-// The live registers are:
-// - x1: the JS function object being called (i.e. ourselves).
-// - cp: our context.
-// - fp: our caller's frame pointer.
-// - jssp: stack pointer.
-// - lr: return address.
-//
-// The function builds a JS frame. See JavaScriptFrameConstants in
-// frames-arm.h for its layout.
-void FullCodeGenerator::Generate() {
- CompilationInfo* info = info_;
- handler_table_ =
- isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
-
- InitializeFeedbackVector();
-
- profiling_counter_ = isolate()->factory()->NewCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
- SetFunctionPosition(function());
- Comment cmnt(masm_, "[ Function compiled by full code generator");
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ Debug("stop-at", __LINE__, BREAK);
- }
-#endif
-
- // Classic mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info->is_classic_mode() && !info->is_native()) {
- Label ok;
- int receiver_offset = info->scope()->num_parameters() * kXRegSizeInBytes;
- __ Peek(x10, receiver_offset);
- __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
-
- __ Ldr(x10, GlobalObjectMemOperand());
- __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
- __ Poke(x10, receiver_offset);
-
- __ Bind(&ok);
- }
-
-
- // Open a frame scope to indicate that there is a frame on the stack.
- // The MANUAL indicates that the scope shouldn't actually generate code
- // to set up the frame because we do it manually below.
- FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
- // This call emits the following sequence in a way that can be patched for
- // code ageing support:
- // Push(lr, fp, cp, x1);
- // Add(fp, jssp, 2 * kPointerSize);
- info->set_prologue_offset(masm_->pc_offset());
- __ Prologue(BUILD_FUNCTION_FRAME);
- info->AddNoFrameRange(0, masm_->pc_offset());
-
- // Reserve space on the stack for locals.
- { Comment cmnt(masm_, "[ Allocate locals");
- int locals_count = info->scope()->num_stack_slots();
- // Generators allocate locals, if any, in context slots.
- ASSERT(!info->function()->is_generator() || locals_count == 0);
-
- if (locals_count > 0) {
- __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
- __ PushMultipleTimes(locals_count, x10);
- }
- }
-
- bool function_in_register_x1 = true;
-
- int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- // Argument to NewContext is the function, which is still in x1.
- Comment cmnt(masm_, "[ Allocate context");
- if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
- __ Mov(x10, Operand(info->scope()->GetScopeInfo()));
- __ Push(x1, x10);
- __ CallRuntime(Runtime::kNewGlobalContext, 2);
- } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- __ CallStub(&stub);
- } else {
- __ Push(x1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
- }
- function_in_register_x1 = false;
- // Context is returned in x0. It replaces the context passed to us.
- // It's saved in the stack and kept live in cp.
- __ Mov(cp, x0);
- __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Copy any necessary parameters into the context.
- int num_parameters = info->scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
- if (var->IsContextSlot()) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ Ldr(x10, MemOperand(fp, parameter_offset));
- // Store it in the context.
- MemOperand target = ContextMemOperand(cp, var->index());
- __ Str(x10, target);
-
- // Update the write barrier.
- __ RecordWriteContextSlot(
- cp, target.offset(), x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
- }
- }
- }
-
- Variable* arguments = scope()->arguments();
- if (arguments != NULL) {
- // Function uses arguments object.
- Comment cmnt(masm_, "[ Allocate arguments object");
- if (!function_in_register_x1) {
- // Load this again, if it's used by the local context below.
- __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- } else {
- __ Mov(x3, x1);
- }
- // Receiver is just before the parameters on the caller's stack.
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
- __ Add(x2, fp, StandardFrameConstants::kCallerSPOffset + offset);
- __ Mov(x1, Operand(Smi::FromInt(num_parameters)));
- __ Push(x3, x2, x1);
-
- // Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiver and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub::Type type;
- if (!is_classic_mode()) {
- type = ArgumentsAccessStub::NEW_STRICT;
- } else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
- } else {
- type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
- }
- ArgumentsAccessStub stub(type);
- __ CallStub(&stub);
-
- SetVar(arguments, x0, x1, x2);
- }
-
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
-
-
- // Visit the declarations and body unless there is an illegal
- // redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ Declarations");
- scope()->VisitIllegalRedeclaration(this);
-
- } else {
- PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
- { Comment cmnt(masm_, "[ Declarations");
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- VariableDeclaration* function = scope()->function();
- ASSERT(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_HARMONY);
- ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
- VisitVariableDeclaration(function);
- }
- VisitDeclarations(scope()->declarations());
- }
- }
-
- { Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- ASSERT(jssp.Is(__ StackPointer()));
- __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
- __ B(hs, &ok);
- PredictableCodeSizeScope predictable(masm_,
- Assembler::kCallSizeWithRelocation);
- __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
- __ Bind(&ok);
- }
-
- { Comment cmnt(masm_, "[ Body");
- ASSERT(loop_depth() == 0);
- VisitStatements(function()->body());
- ASSERT(loop_depth() == 0);
- }
-
- // Always emit a 'return undefined' in case control fell off the end of
- // the body.
- { Comment cmnt(masm_, "[ return <undefined>;");
- __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
- }
- EmitReturnSequence();
-
- // Force emit the constant pool, so it doesn't get emitted in the middle
- // of the back edge table.
- masm()->CheckConstPool(true, false);
-}
-
-
-void FullCodeGenerator::ClearAccumulator() {
- __ Mov(x0, Operand(Smi::FromInt(0)));
-}
-
-
-void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
- __ Mov(x2, Operand(profiling_counter_));
- __ Ldr(x3, FieldMemOperand(x2, Cell::kValueOffset));
- __ Subs(x3, x3, Operand(Smi::FromInt(delta)));
- __ Str(x3, FieldMemOperand(x2, Cell::kValueOffset));
-}
-
-
-void FullCodeGenerator::EmitProfilingCounterReset() {
- int reset_value = FLAG_interrupt_budget;
- if (isolate()->IsDebuggerActive()) {
- // Detect debug break requests as soon as possible.
- reset_value = FLAG_interrupt_budget >> 4;
- }
- __ Mov(x2, Operand(profiling_counter_));
- __ Mov(x3, Operand(Smi::FromInt(reset_value)));
- __ Str(x3, FieldMemOperand(x2, Cell::kValueOffset));
-}
-
-
-void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
- Label* back_edge_target) {
- ASSERT(jssp.Is(__ StackPointer()));
- Comment cmnt(masm_, "[ Back edge bookkeeping");
- // Block literal pools whilst emitting back edge code.
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- Label ok;
-
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- int weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- EmitProfilingCounterDecrement(weight);
- __ B(pl, &ok);
- __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
-
- // Record a mapping of this PC offset to the OSR id. This is used to find
- // the AST id from the unoptimized code in order to use it as a key into
- // the deoptimization input data found in the optimized code.
- RecordBackEdge(stmt->OsrEntryId());
-
- EmitProfilingCounterReset();
-
- __ Bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
- // Record a mapping of the OSR id to this PC. This is used if the OSR
- // entry becomes the target of a bailout. We don't expect it to be, but
- // we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::EmitReturnSequence() {
- Comment cmnt(masm_, "[ Return sequence");
-
- if (return_label_.is_bound()) {
- __ B(&return_label_);
-
- } else {
- __ Bind(&return_label_);
- if (FLAG_trace) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in x0.
- __ Push(result_register());
- __ CallRuntime(Runtime::kTraceExit, 1);
- ASSERT(x0.Is(result_register()));
- }
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else {
- int distance = masm_->pc_offset();
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ B(pl, &ok);
- __ Push(x0);
- __ Call(isolate()->builtins()->InterruptCheck(),
- RelocInfo::CODE_TARGET);
- __ Pop(x0);
- EmitProfilingCounterReset();
- __ Bind(&ok);
-
- // Make sure that the constant pool is not emitted inside of the return
- // sequence. This sequence can get patched when the debugger is used. See
- // debug-a64.cc:BreakLocationIterator::SetDebugBreakAtReturn().
- {
- InstructionAccurateScope scope(masm_,
- Assembler::kJSRetSequenceInstructions);
- CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
- __ RecordJSReturn();
- // This code is generated using Assembler methods rather than Macro
- // Assembler methods because it will be patched later on, and so the size
- // of the generated code must be consistent.
- const Register& current_sp = __ StackPointer();
- // Nothing ensures 16 bytes alignment here.
- ASSERT(!current_sp.Is(csp));
- __ mov(current_sp, fp);
- int no_frame_start = masm_->pc_offset();
- __ ldp(fp, lr, MemOperand(current_sp, 2 * kXRegSizeInBytes, PostIndex));
- // Drop the arguments and receiver and return.
- // TODO(all): This implementation is overkill as it supports 2**31+1
- // arguments, consider how to improve it without creating a security
- // hole.
- __ LoadLiteral(ip0, 3 * kInstructionSize);
- __ add(current_sp, current_sp, ip0);
- __ ret();
- __ dc64(kXRegSizeInBytes * (info_->scope()->num_parameters() + 1));
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
- }
- }
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- codegen()->GetVar(result_register(), var);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- codegen()->GetVar(result_register(), var);
- __ Push(result_register());
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- // For simplicity we always test the accumulator register.
- codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
- // Root values have no side effects.
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Heap::RootListIndex index) const {
- __ LoadRoot(result_register(), index);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Heap::RootListIndex index) const {
- __ LoadRoot(result_register(), index);
- __ Push(result_register());
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
- false_label_);
- if (index == Heap::kUndefinedValueRootIndex ||
- index == Heap::kNullValueRootIndex ||
- index == Heap::kFalseValueRootIndex) {
- if (false_label_ != fall_through_) __ B(false_label_);
- } else if (index == Heap::kTrueValueRootIndex) {
- if (true_label_ != fall_through_) __ B(true_label_);
- } else {
- __ LoadRoot(result_register(), index);
- codegen()->DoTest(this);
- }
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Handle<Object> lit) const {
- __ Mov(result_register(), Operand(lit));
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
- // Immediates cannot be pushed directly.
- __ Mov(result_register(), Operand(lit));
- __ Push(result_register());
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
- ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
- if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
- if (false_label_ != fall_through_) __ B(false_label_);
- } else if (lit->IsTrue() || lit->IsJSObject()) {
- if (true_label_ != fall_through_) __ B(true_label_);
- } else if (lit->IsString()) {
- if (String::cast(*lit)->length() == 0) {
- if (false_label_ != fall_through_) __ B(false_label_);
- } else {
- if (true_label_ != fall_through_) __ B(true_label_);
- }
- } else if (lit->IsSmi()) {
- if (Smi::cast(*lit)->value() == 0) {
- if (false_label_ != fall_through_) __ B(false_label_);
- } else {
- if (true_label_ != fall_through_) __ B(true_label_);
- }
- } else {
- // For simplicity we always test the accumulator register.
- __ Mov(result_register(), Operand(lit));
- codegen()->DoTest(this);
- }
-}
-
-
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
- int count,
- Register reg) const {
- ASSERT(count > 0);
- __ Drop(count);
- __ Move(result_register(), reg);
-}
-
-
-void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- if (count > 1) __ Drop(count - 1);
- __ Poke(reg, 0);
-}
-
-
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- // For simplicity we always test the accumulator register.
- __ Drop(count);
- __ Mov(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- ASSERT(materialize_true == materialize_false);
- __ Bind(materialize_true);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- Label done;
- __ Bind(materialize_true);
- __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
- __ B(&done);
- __ Bind(materialize_false);
- __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
- __ Bind(&done);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- Label done;
- __ Bind(materialize_true);
- __ LoadRoot(x10, Heap::kTrueValueRootIndex);
- __ B(&done);
- __ Bind(materialize_false);
- __ LoadRoot(x10, Heap::kFalseValueRootIndex);
- __ Bind(&done);
- __ Push(x10);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- ASSERT(materialize_true == true_label_);
- ASSERT(materialize_false == false_label_);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(bool flag) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
- Heap::RootListIndex value_root_index =
- flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
- __ LoadRoot(result_register(), value_root_index);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
- Heap::RootListIndex value_root_index =
- flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
- __ LoadRoot(x10, value_root_index);
- __ Push(x10);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
- if (flag) {
- if (true_label_ != fall_through_) {
- __ B(true_label_);
- }
- } else {
- if (false_label_ != fall_through_) {
- __ B(false_label_);
- }
- }
-}
-
-
-void FullCodeGenerator::DoTest(Expression* condition,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(ic, condition->test_id());
- __ CompareAndSplit(result_register(), 0, ne, if_true, if_false, fall_through);
-}
-
-
-// If (cond), branch to if_true.
-// If (!cond), branch to if_false.
-// fall_through is used as an optimization in cases where only one branch
-// instruction is necessary.
-void FullCodeGenerator::Split(Condition cond,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (if_false == fall_through) {
- __ B(cond, if_true);
- } else if (if_true == fall_through) {
- ASSERT(if_false != fall_through);
- __ B(InvertCondition(cond), if_false);
- } else {
- __ B(cond, if_true);
- __ B(if_false);
- }
-}
-
-
-MemOperand FullCodeGenerator::StackOperand(Variable* var) {
- // Offset is negative because higher indexes are at lower addresses.
- int offset = -var->index() * kXRegSizeInBytes;
- // Adjust by a (parameter or local) base offset.
- if (var->IsParameter()) {
- offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
- } else {
- offset += JavaScriptFrameConstants::kLocal0Offset;
- }
- return MemOperand(fp, offset);
-}
-
-
-MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
- ASSERT(var->IsContextSlot() || var->IsStackAllocated());
- if (var->IsContextSlot()) {
- int context_chain_length = scope()->ContextChainLength(var->scope());
- __ LoadContext(scratch, context_chain_length);
- return ContextMemOperand(scratch, var->index());
- } else {
- return StackOperand(var);
- }
-}
-
-
-void FullCodeGenerator::GetVar(Register dest, Variable* var) {
- // Use destination as scratch.
- MemOperand location = VarOperand(var, dest);
- __ Ldr(dest, location);
-}
-
-
-void FullCodeGenerator::SetVar(Variable* var,
- Register src,
- Register scratch0,
- Register scratch1) {
- ASSERT(var->IsContextSlot() || var->IsStackAllocated());
- ASSERT(!AreAliased(src, scratch0, scratch1));
- MemOperand location = VarOperand(var, scratch0);
- __ Str(src, location);
-
- // Emit the write barrier code if the location is in the heap.
- if (var->IsContextSlot()) {
- // scratch0 contains the correct context.
- __ RecordWriteContextSlot(scratch0,
- location.offset(),
- src,
- scratch1,
- kLRHasBeenSaved,
- kDontSaveFPRegs);
- }
-}
-
-
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
- bool should_normalize,
- Label* if_true,
- Label* if_false) {
- // Only prepare for bailouts before splits if we're in a test
- // context. Otherwise, we let the Visit function deal with the
- // preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest() || !info_->IsOptimizable()) return;
-
- // TODO(all): Investigate to see if there is something to work on here.
- Label skip;
- if (should_normalize) {
- __ B(&skip);
- }
- PrepareForBailout(expr, TOS_REG);
- if (should_normalize) {
- __ CompareRoot(x0, Heap::kTrueValueRootIndex);
- Split(eq, if_true, if_false, NULL);
- __ Bind(&skip);
- }
-}
-
-
-void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
- // The variable in the declaration always resides in the current function
- // context.
- ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (generate_debug_code_) {
- // Check that we're not inside a with or catch context.
- __ Ldr(x1, FieldMemOperand(cp, HeapObject::kMapOffset));
- __ CompareRoot(x1, Heap::kWithContextMapRootIndex);
- __ Check(ne, kDeclarationInWithContext);
- __ CompareRoot(x1, Heap::kCatchContextMapRootIndex);
- __ Check(ne, kDeclarationInCatchContext);
- }
-}
-
-
-void FullCodeGenerator::VisitVariableDeclaration(
- VariableDeclaration* declaration) {
- // If it was not possible to allocate the variable at compile time, we
- // need to "declare" it at runtime to make sure it actually exists in the
- // local context.
- VariableProxy* proxy = declaration->proxy();
- VariableMode mode = declaration->mode();
- Variable* variable = proxy->var();
- bool hole_init = (mode == CONST) || (mode == CONST_HARMONY) || (mode == LET);
-
- switch (variable->location()) {
- case Variable::UNALLOCATED:
- globals_->Add(variable->name(), zone());
- globals_->Add(variable->binding_needs_init()
- ? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value(),
- zone());
- break;
-
- case Variable::PARAMETER:
- case Variable::LOCAL:
- if (hole_init) {
- Comment cmnt(masm_, "[ VariableDeclaration");
- __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
- __ Str(x10, StackOperand(variable));
- }
- break;
-
- case Variable::CONTEXT:
- if (hole_init) {
- Comment cmnt(masm_, "[ VariableDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
- __ Str(x10, ContextMemOperand(cp, variable->index()));
- // No write barrier since the_hole_value is in old space.
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- }
- break;
-
- case Variable::LOOKUP: {
- Comment cmnt(masm_, "[ VariableDeclaration");
- __ Mov(x2, Operand(variable->name()));
- // Declaration nodes are always introduced in one of four modes.
- ASSERT(IsDeclaredVariableMode(mode));
- PropertyAttributes attr = IsImmutableVariableMode(mode) ? READ_ONLY
- : NONE;
- __ Mov(x1, Operand(Smi::FromInt(attr)));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (hole_init) {
- __ LoadRoot(x0, Heap::kTheHoleValueRootIndex);
- __ Push(cp, x2, x1, x0);
- } else {
- // Pushing 0 (xzr) indicates no initial value.
- __ Push(cp, x2, x1, xzr);
- }
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitFunctionDeclaration(
- FunctionDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case Variable::UNALLOCATED: {
- globals_->Add(variable->name(), zone());
- Handle<SharedFunctionInfo> function =
- Compiler::BuildFunctionInfo(declaration->fun(), script());
- // Check for stack overflow exception.
- if (function.is_null()) return SetStackOverflow();
- globals_->Add(function, zone());
- break;
- }
-
- case Variable::PARAMETER:
- case Variable::LOCAL: {
- Comment cmnt(masm_, "[ Function Declaration");
- VisitForAccumulatorValue(declaration->fun());
- __ Str(result_register(), StackOperand(variable));
- break;
- }
-
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ Function Declaration");
- EmitDebugCheckDeclarationContext(variable);
- VisitForAccumulatorValue(declaration->fun());
- __ Str(result_register(), ContextMemOperand(cp, variable->index()));
- int offset = Context::SlotOffset(variable->index());
- // We know that we have written a function, which is not a smi.
- __ RecordWriteContextSlot(cp,
- offset,
- result_register(),
- x2,
- kLRHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- break;
- }
-
- case Variable::LOOKUP: {
- Comment cmnt(masm_, "[ Function Declaration");
- __ Mov(x2, Operand(variable->name()));
- __ Mov(x1, Operand(Smi::FromInt(NONE)));
- __ Push(cp, x2, x1);
- // Push initial value for function declaration.
- VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
- Variable* variable = declaration->proxy()->var();
- ASSERT(variable->location() == Variable::CONTEXT);
- ASSERT(variable->interface()->IsFrozen());
-
- Comment cmnt(masm_, "[ ModuleDeclaration");
- EmitDebugCheckDeclarationContext(variable);
-
- // Load instance object.
- __ LoadContext(x1, scope_->ContextChainLength(scope_->GlobalScope()));
- __ Ldr(x1, ContextMemOperand(x1, variable->interface()->Index()));
- __ Ldr(x1, ContextMemOperand(x1, Context::EXTENSION_INDEX));
-
- // Assign it.
- __ Str(x1, ContextMemOperand(cp, variable->index()));
- // We know that we have written a module, which is not a smi.
- __ RecordWriteContextSlot(cp,
- Context::SlotOffset(variable->index()),
- x1,
- x3,
- kLRHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
-
- // Traverse info body.
- Visit(declaration->module());
-}
-
-
-void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case Variable::UNALLOCATED:
- // TODO(rossberg)
- break;
-
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ ImportDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- // TODO(rossberg)
- break;
- }
-
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::LOOKUP:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
- // TODO(rossberg)
-}
-
-
-void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- // Call the runtime to declare the globals.
- __ Mov(x11, Operand(pairs));
- Register flags = xzr;
- if (Smi::FromInt(DeclareGlobalsFlags())) {
- flags = x10;
- __ Mov(flags, Operand(Smi::FromInt(DeclareGlobalsFlags())));
- }
- __ Push(cp, x11, flags);
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
- // Return value is ignored.
-}
-
-
-void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
- // Call the runtime to declare the modules.
- __ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
- // Return value is ignored.
-}
-
-
-void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
- ASM_LOCATION("FullCodeGenerator::VisitSwitchStatement");
- Comment cmnt(masm_, "[ SwitchStatement");
- Breakable nested_statement(this, stmt);
- SetStatementPosition(stmt);
-
- // Keep the switch value on the stack until a case matches.
- VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
-
- ZoneList<CaseClause*>* clauses = stmt->cases();
- CaseClause* default_clause = NULL; // Can occur anywhere in the list.
-
- Label next_test; // Recycled for each test.
- // Compile all the tests with branches to their bodies.
- for (int i = 0; i < clauses->length(); i++) {
- CaseClause* clause = clauses->at(i);
- clause->body_target()->Unuse();
-
- // The default is not a test, but remember it as final fall through.
- if (clause->is_default()) {
- default_clause = clause;
- continue;
- }
-
- Comment cmnt(masm_, "[ Case comparison");
- __ Bind(&next_test);
- next_test.Unuse();
-
- // Compile the label expression.
- VisitForAccumulatorValue(clause->label());
-
- // Perform the comparison as if via '==='.
- __ Peek(x1, 0); // Switch value.
-
- JumpPatchSite patch_site(masm_);
- if (ShouldInlineSmiCase(Token::EQ_STRICT)) {
- Label slow_case;
- patch_site.EmitJumpIfEitherNotSmi(x0, x1, &slow_case);
- __ Cmp(x1, x0);
- __ B(ne, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ B(clause->body_target());
- __ Bind(&slow_case);
- }
-
- // Record position before stub call for type feedback.
- SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, clause->CompareId());
- patch_site.EmitPatchInfo();
-
- Label skip;
- __ B(&skip);
- PrepareForBailout(clause, TOS_REG);
- __ JumpIfNotRoot(x0, Heap::kTrueValueRootIndex, &next_test);
- __ Drop(1);
- __ B(clause->body_target());
- __ Bind(&skip);
-
- __ Cbnz(x0, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ B(clause->body_target());
- }
-
- // Discard the test value and jump to the default if present, otherwise to
- // the end of the statement.
- __ Bind(&next_test);
- __ Drop(1); // Switch value is no longer needed.
- if (default_clause == NULL) {
- __ B(nested_statement.break_label());
- } else {
- __ B(default_clause->body_target());
- }
-
- // Compile all the case bodies.
- for (int i = 0; i < clauses->length(); i++) {
- Comment cmnt(masm_, "[ Case body");
- CaseClause* clause = clauses->at(i);
- __ Bind(clause->body_target());
- PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
- VisitStatements(clause->statements());
- }
-
- __ Bind(nested_statement.break_label());
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
- ASM_LOCATION("FullCodeGenerator::VisitForInStatement");
- Comment cmnt(masm_, "[ ForInStatement");
- int slot = stmt->ForInFeedbackSlot();
- // TODO(all): This visitor probably needs better comments and a revisit.
- SetStatementPosition(stmt);
-
- Label loop, exit;
- ForIn loop_statement(this, stmt);
- increment_loop_depth();
-
- // Get the object to enumerate over. If the object is null or undefined, skip
- // over the loop. See ECMA-262 version 5, section 12.6.4.
- VisitForAccumulatorValue(stmt->enumerable());
- __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, &exit);
- Register null_value = x15;
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ Cmp(x0, null_value);
- __ B(eq, &exit);
-
- PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
-
- // Convert the object to a JS object.
- Label convert, done_convert;
- __ JumpIfSmi(x0, &convert);
- __ JumpIfObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE, &done_convert, ge);
- __ Bind(&convert);
- __ Push(x0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ Bind(&done_convert);
- __ Push(x0);
-
- // Check for proxies.
- Label call_runtime;
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ JumpIfObjectType(x0, x10, x11, LAST_JS_PROXY_TYPE, &call_runtime, le);
-
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- __ CheckEnumCache(x0, null_value, x10, x11, x12, x13, &call_runtime);
-
- // The enum cache is valid. Load the map of the object being
- // iterated over and use the cache for the iteration.
- Label use_cache;
- __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
- __ B(&use_cache);
-
- // Get the set of properties to enumerate.
- __ Bind(&call_runtime);
- __ Push(x0); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
-
- // If we got a map from the runtime call, we can do a fast
- // modification check. Otherwise, we got a fixed array, and we have
- // to do a slow check.
- Label fixed_array, no_descriptors;
- __ Ldr(x2, FieldMemOperand(x0, HeapObject::kMapOffset));
- __ JumpIfNotRoot(x2, Heap::kMetaMapRootIndex, &fixed_array);
-
- // We got a map in register x0. Get the enumeration cache from it.
- __ Bind(&use_cache);
-
- __ EnumLengthUntagged(x1, x0);
- __ Cbz(x1, &no_descriptors);
-
- __ LoadInstanceDescriptors(x0, x2);
- __ Ldr(x2, FieldMemOperand(x2, DescriptorArray::kEnumCacheOffset));
- __ Ldr(x2,
- FieldMemOperand(x2, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
- // Set up the four remaining stack slots.
- __ Push(x0); // Map.
- __ Mov(x0, Operand(Smi::FromInt(0)));
- // Push enumeration cache, enumeration cache length (as smi) and zero.
- __ SmiTag(x1);
- __ Push(x2, x1, x0);
- __ B(&loop);
-
- __ Bind(&no_descriptors);
- __ Drop(1);
- __ B(&exit);
-
- // We got a fixed array in register x0. Iterate through that.
- __ Bind(&fixed_array);
-
- Handle<Object> feedback = Handle<Object>(
- Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker),
- isolate());
- StoreFeedbackVectorSlot(slot, feedback);
- __ LoadObject(x1, FeedbackVector());
- __ Mov(x10, Operand(Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker)));
- __ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(slot)));
-
- __ Mov(x1, Operand(Smi::FromInt(1))); // Smi indicates slow check.
- __ Peek(x10, 0); // Get enumerated object.
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- // TODO(all): similar check was done already. Can we avoid it here?
- __ CompareObjectType(x10, x11, x12, LAST_JS_PROXY_TYPE);
- ASSERT(Smi::FromInt(0) == 0);
- __ CzeroX(x1, le); // Zero indicates proxy.
- __ Push(x1, x0); // Smi and array
- __ Ldr(x1, FieldMemOperand(x0, FixedArray::kLengthOffset));
- __ Push(x1, xzr); // Fixed array length (as smi) and initial index.
-
- // Generate code for doing the condition check.
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
- __ Bind(&loop);
- // Load the current count to x0, load the length to x1.
- __ PeekPair(x0, x1, 0);
- __ Cmp(x0, x1); // Compare to the array length.
- __ B(hs, loop_statement.break_label());
-
- // Get the current entry of the array into register r3.
- __ Peek(x10, 2 * kXRegSizeInBytes);
- __ Add(x10, x10, Operand::UntagSmiAndScale(x0, kPointerSizeLog2));
- __ Ldr(x3, MemOperand(x10, FixedArray::kHeaderSize - kHeapObjectTag));
-
- // Get the expected map from the stack or a smi in the
- // permanent slow case into register x10.
- __ Peek(x2, 3 * kXRegSizeInBytes);
-
- // Check if the expected map still matches that of the enumerable.
- // If not, we may have to filter the key.
- Label update_each;
- __ Peek(x1, 4 * kXRegSizeInBytes);
- __ Ldr(x11, FieldMemOperand(x1, HeapObject::kMapOffset));
- __ Cmp(x11, x2);
- __ B(eq, &update_each);
-
- // For proxies, no filtering is done.
- // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- STATIC_ASSERT(kSmiTag == 0);
- __ Cbz(x2, &update_each);
-
- // Convert the entry to a string or (smi) 0 if it isn't a property
- // any more. If the property has been removed while iterating, we
- // just skip it.
- __ Push(x1, x3);
- __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
- __ Mov(x3, x0);
- __ Cbz(x0, loop_statement.continue_label());
-
- // Update the 'each' property or variable from the possibly filtered
- // entry in register x3.
- __ Bind(&update_each);
- __ Mov(result_register(), x3);
- // Perform the assignment as if via '='.
- { EffectContext context(this);
- EmitAssignment(stmt->each());
- }
-
- // Generate code for the body of the loop.
- Visit(stmt->body());
-
- // Generate code for going to the next element by incrementing
- // the index (smi) stored on top of the stack.
- __ Bind(loop_statement.continue_label());
- // TODO(all): We could use a callee saved register to avoid popping.
- __ Pop(x0);
- __ Add(x0, x0, Operand(Smi::FromInt(1)));
- __ Push(x0);
-
- EmitBackEdgeBookkeeping(stmt, &loop);
- __ B(&loop);
-
- // Remove the pointers stored on the stack.
- __ Bind(loop_statement.break_label());
- __ Drop(5);
-
- // Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
- __ Bind(&exit);
- decrement_loop_depth();
-}
-
-
-void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
- Comment cmnt(masm_, "[ ForOfStatement");
- SetStatementPosition(stmt);
-
- Iteration loop_statement(this, stmt);
- increment_loop_depth();
-
- // var iterator = iterable[@@iterator]()
- VisitForAccumulatorValue(stmt->assign_iterator());
-
- // As with for-in, skip the loop if the iterator is null or undefined.
- Register iterator = x0;
- __ JumpIfRoot(iterator, Heap::kUndefinedValueRootIndex,
- loop_statement.break_label());
- __ JumpIfRoot(iterator, Heap::kNullValueRootIndex,
- loop_statement.break_label());
-
- // Convert the iterator to a JS object.
- Label convert, done_convert;
- __ JumpIfSmi(iterator, &convert);
- __ CompareObjectType(iterator, x1, x1, FIRST_SPEC_OBJECT_TYPE);
- __ B(ge, &done_convert);
- __ Bind(&convert);
- __ Push(iterator);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ Bind(&done_convert);
- __ Push(iterator);
-
- // Loop entry.
- __ Bind(loop_statement.continue_label());
-
- // result = iterator.next()
- VisitForEffect(stmt->next_result());
-
- // if (result.done) break;
- Label result_not_done;
- VisitForControl(stmt->result_done(),
- loop_statement.break_label(),
- &result_not_done,
- &result_not_done);
- __ Bind(&result_not_done);
-
- // each = result.value
- VisitForEffect(stmt->assign_each());
-
- // Generate code for the body of the loop.
- Visit(stmt->body());
-
- // Check stack before looping.
- PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
- EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
- __ B(loop_statement.continue_label());
-
- // Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
- __ Bind(loop_statement.break_label());
- decrement_loop_depth();
-}
-
-
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
- bool pretenure) {
- // Use the fast case closure allocation code that allocates in new space for
- // nested functions that don't need literals cloning. If we're running with
- // the --always-opt or the --prepare-always-opt flag, we need to use the
- // runtime function so that the new function we are creating here gets a
- // chance to have its code optimized and doesn't just get a copy of the
- // existing unoptimized code.
- if (!FLAG_always_opt &&
- !FLAG_prepare_always_opt &&
- !pretenure &&
- scope()->is_function_scope() &&
- info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode(), info->is_generator());
- __ Mov(x2, Operand(info));
- __ CallStub(&stub);
- } else {
- __ Mov(x11, Operand(info));
- __ LoadRoot(x10, pretenure ? Heap::kTrueValueRootIndex
- : Heap::kFalseValueRootIndex);
- __ Push(cp, x11, x10);
- __ CallRuntime(Runtime::kNewClosure, 3);
- }
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr);
-}
-
-
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
- TypeofState typeof_state,
- Label* slow) {
- Register current = cp;
- Register next = x10;
- Register temp = x11;
-
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
- // Check that extension is NULL.
- __ Ldr(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
- __ Cbnz(temp, slow);
- }
- // Load next context in chain.
- __ Ldr(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- current = next;
- }
- // If no outer scope calls eval, we do not need to check more
- // context extensions.
- if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s->is_eval_scope()) {
- Label loop, fast;
- __ Mov(next, current);
-
- __ Bind(&loop);
- // Terminate at native context.
- __ Ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset));
- __ JumpIfRoot(temp, Heap::kNativeContextMapRootIndex, &fast);
- // Check that extension is NULL.
- __ Ldr(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
- __ Cbnz(temp, slow);
- // Load next context in chain.
- __ Ldr(next, ContextMemOperand(next, Context::PREVIOUS_INDEX));
- __ B(&loop);
- __ Bind(&fast);
- }
-
- __ Ldr(x0, GlobalObjectMemOperand());
- __ Mov(x2, Operand(var->name()));
- ContextualMode mode = (typeof_state == INSIDE_TYPEOF) ? NOT_CONTEXTUAL
- : CONTEXTUAL;
- CallLoadIC(mode);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
- Label* slow) {
- ASSERT(var->IsContextSlot());
- Register context = cp;
- Register next = x10;
- Register temp = x11;
-
- for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
- // Check that extension is NULL.
- __ Ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
- __ Cbnz(temp, slow);
- }
- __ Ldr(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- context = next;
- }
- }
- // Check that last extension is NULL.
- __ Ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
- __ Cbnz(temp, slow);
-
- // This function is used only for loads, not stores, so it's safe to
- // return an cp-based operand (the write barrier cannot be allowed to
- // destroy the cp register).
- return ContextMemOperand(context, var->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
- TypeofState typeof_state,
- Label* slow,
- Label* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
- __ B(done);
- } else if (var->mode() == DYNAMIC_LOCAL) {
- Variable* local = var->local_if_not_shadowed();
- __ Ldr(x0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
- __ JumpIfNotRoot(x0, Heap::kTheHoleValueRootIndex, done);
- if (local->mode() == CONST) {
- __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
- } else { // LET || CONST_HARMONY
- __ Mov(x0, Operand(var->name()));
- __ Push(x0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- }
- }
- __ B(done);
- }
-}
-
-
-void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
- // Record position before possible IC call.
- SetSourcePosition(proxy->position());
- Variable* var = proxy->var();
-
- // Three cases: global variables, lookup variables, and all other types of
- // variables.
- switch (var->location()) {
- case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
- // Use inline caching. Variable name is passed in x2 and the global
- // object (receiver) in x0.
- __ Ldr(x0, GlobalObjectMemOperand());
- __ Mov(x2, Operand(var->name()));
- CallLoadIC(CONTEXTUAL);
- context()->Plug(x0);
- break;
- }
-
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot()
- ? "Context variable"
- : "Stack variable");
- if (var->binding_needs_init()) {
- // var->scope() may be NULL when the proxy is located in eval code and
- // refers to a potential outside binding. Currently those bindings are
- // always looked up dynamically, i.e. in that case
- // var->location() == LOOKUP.
- // always holds.
- ASSERT(var->scope() != NULL);
-
- // Check if the binding really needs an initialization check. The check
- // can be skipped in the following situation: we have a LET or CONST
- // binding in harmony mode, both the Variable and the VariableProxy have
- // the same declaration scope (i.e. they are both in global code, in the
- // same function or in the same eval code) and the VariableProxy is in
- // the source physically located after the initializer of the variable.
- //
- // We cannot skip any initialization checks for CONST in non-harmony
- // mode because const variables may be declared but never initialized:
- // if (false) { const x; }; var y = x;
- //
- // The condition on the declaration scopes is a conservative check for
- // nested functions that access a binding and are called before the
- // binding is initialized:
- // function() { f(); let x = 1; function f() { x = 2; } }
- //
- bool skip_init_check;
- if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
- skip_init_check = false;
- } else {
- // Check that we always have valid source position.
- ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
- ASSERT(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST &&
- var->initializer_position() < proxy->position();
- }
-
- if (!skip_init_check) {
- // Let and const need a read barrier.
- GetVar(x0, var);
- Label done;
- __ JumpIfNotRoot(x0, Heap::kTheHoleValueRootIndex, &done);
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- __ Mov(x0, Operand(var->name()));
- __ Push(x0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ Bind(&done);
- } else {
- // Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
- __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
- __ Bind(&done);
- }
- context()->Plug(x0);
- break;
- }
- }
- context()->Plug(var);
- break;
- }
-
- case Variable::LOOKUP: {
- Label done, slow;
- // Generate code for loading from variables potentially shadowed by
- // eval-introduced variables.
- EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
- __ Bind(&slow);
- Comment cmnt(masm_, "Lookup variable");
- __ Mov(x1, Operand(var->name()));
- __ Push(cp, x1); // Context and name.
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ Bind(&done);
- context()->Plug(x0);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
- // Registers will be used as follows:
- // x5 = materialized value (RegExp literal)
- // x4 = JS function, literals array
- // x3 = literal index
- // x2 = RegExp pattern
- // x1 = RegExp flags
- // x0 = RegExp literal clone
- __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Ldr(x4, FieldMemOperand(x10, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ Ldr(x5, FieldMemOperand(x4, literal_offset));
- __ JumpIfNotRoot(x5, Heap::kUndefinedValueRootIndex, &materialized);
-
- // Create regexp literal using runtime function.
- // Result will be in x0.
- __ Mov(x3, Operand(Smi::FromInt(expr->literal_index())));
- __ Mov(x2, Operand(expr->pattern()));
- __ Mov(x1, Operand(expr->flags()));
- __ Push(x4, x3, x2, x1);
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ Mov(x5, x0);
-
- __ Bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ Allocate(size, x0, x2, x3, &runtime_allocate, TAG_OBJECT);
- __ B(&allocated);
-
- __ Bind(&runtime_allocate);
- __ Mov(x10, Operand(Smi::FromInt(size)));
- __ Push(x5, x10);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ Pop(x5);
-
- __ Bind(&allocated);
- // After this, registers are used as follows:
- // x0: Newly allocated regexp.
- // x5: Materialized regexp.
- // x10, x11, x12: temps.
- __ CopyFields(x0, x5, CPURegList(x10, x11, x12), size / kPointerSize);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitAccessor(Expression* expression) {
- if (expression == NULL) {
- __ LoadRoot(x10, Heap::kNullValueRootIndex);
- __ Push(x10);
- } else {
- VisitForStackValue(expression);
- }
-}
-
-
-void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- Comment cmnt(masm_, "[ ObjectLiteral");
-
- expr->BuildConstantProperties(isolate());
- Handle<FixedArray> constant_properties = expr->constant_properties();
- __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Ldr(x3, FieldMemOperand(x3, JSFunction::kLiteralsOffset));
- __ Mov(x2, Operand(Smi::FromInt(expr->literal_index())));
- __ Mov(x1, Operand(constant_properties));
- int flags = expr->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- flags |= expr->has_function()
- ? ObjectLiteral::kHasFunction
- : ObjectLiteral::kNoFlags;
- __ Mov(x0, Operand(Smi::FromInt(flags)));
- int properties_count = constant_properties->length() / 2;
- const int max_cloned_properties =
- FastCloneShallowObjectStub::kMaximumClonedProperties;
- if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- (expr->depth() > 1) || Serializer::enabled() ||
- (flags != ObjectLiteral::kFastElements) ||
- (properties_count > max_cloned_properties)) {
- __ Push(x3, x2, x1, x0);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else {
- FastCloneShallowObjectStub stub(properties_count);
- __ CallStub(&stub);
- }
-
- // If result_saved is true the result is on top of the stack. If
- // result_saved is false the result is in x0.
- bool result_saved = false;
-
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- expr->CalculateEmitStore(zone());
-
- AccessorTable accessor_table(zone());
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
- if (property->IsCompileTimeValue()) continue;
-
- Literal* key = property->key();
- Expression* value = property->value();
- if (!result_saved) {
- __ Push(x0); // Save result on stack
- result_saved = true;
- }
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- UNREACHABLE();
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
- // Fall through.
- case ObjectLiteral::Property::COMPUTED:
- if (key->value()->IsInternalizedString()) {
- if (property->emit_store()) {
- VisitForAccumulatorValue(value);
- __ Mov(x2, Operand(key->value()));
- __ Peek(x1, 0);
- CallStoreIC(key->LiteralFeedbackId());
- PrepareForBailoutForId(key->id(), NO_REGISTERS);
- } else {
- VisitForEffect(value);
- }
- break;
- }
- // Duplicate receiver on stack.
- __ Peek(x0, 0);
- __ Push(x0);
- VisitForStackValue(key);
- VisitForStackValue(value);
- if (property->emit_store()) {
- __ Mov(x0, Operand(Smi::FromInt(NONE))); // PropertyAttributes
- __ Push(x0);
- __ CallRuntime(Runtime::kSetProperty, 4);
- } else {
- __ Drop(3);
- }
- break;
- case ObjectLiteral::Property::PROTOTYPE:
- // Duplicate receiver on stack.
- __ Peek(x0, 0);
- // TODO(jbramley): This push shouldn't be necessary if we don't call the
- // runtime below. In that case, skip it.
- __ Push(x0);
- VisitForStackValue(value);
- if (property->emit_store()) {
- __ CallRuntime(Runtime::kSetPrototype, 2);
- } else {
- __ Drop(2);
- }
- break;
- case ObjectLiteral::Property::GETTER:
- accessor_table.lookup(key)->second->getter = value;
- break;
- case ObjectLiteral::Property::SETTER:
- accessor_table.lookup(key)->second->setter = value;
- break;
- }
- }
-
- // Emit code to define accessors, using only a single call to the runtime for
- // each pair of corresponding getters and setters.
- for (AccessorTable::Iterator it = accessor_table.begin();
- it != accessor_table.end();
- ++it) {
- __ Peek(x10, 0); // Duplicate receiver.
- __ Push(x10);
- VisitForStackValue(it->first);
- EmitAccessor(it->second->getter);
- EmitAccessor(it->second->setter);
- __ Mov(x10, Operand(Smi::FromInt(NONE)));
- __ Push(x10);
- __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
- }
-
- if (expr->has_function()) {
- ASSERT(result_saved);
- __ Peek(x0, 0);
- __ Push(x0);
- __ CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- if (result_saved) {
- context()->PlugTOS();
- } else {
- context()->Plug(x0);
- }
-}
-
-
-void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
- Comment cmnt(masm_, "[ ArrayLiteral");
-
- expr->BuildConstantElements(isolate());
- int flags = (expr->depth() == 1) ? ArrayLiteral::kShallowElements
- : ArrayLiteral::kNoFlags;
-
- ZoneList<Expression*>* subexprs = expr->values();
- int length = subexprs->length();
- Handle<FixedArray> constant_elements = expr->constant_elements();
- ASSERT_EQ(2, constant_elements->length());
- ElementsKind constant_elements_kind =
- static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
- bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind);
- Handle<FixedArrayBase> constant_elements_values(
- FixedArrayBase::cast(constant_elements->get(1)));
-
- AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
- if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
- // If the only customer of allocation sites is transitioning, then
- // we can turn it off if we don't have anywhere else to transition to.
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
- }
-
- __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Ldr(x3, FieldMemOperand(x3, JSFunction::kLiteralsOffset));
- // TODO(jbramley): Can these Operand constructors be implicit?
- __ Mov(x2, Operand(Smi::FromInt(expr->literal_index())));
- __ Mov(x1, Operand(constant_elements));
- if (has_fast_elements && constant_elements_values->map() ==
- isolate()->heap()->fixed_cow_array_map()) {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- allocation_site_mode,
- length);
- __ CallStub(&stub);
- __ IncrementCounter(
- isolate()->counters()->cow_arrays_created_stub(), 1, x10, x11);
- } else if ((expr->depth() > 1) || Serializer::enabled() ||
- length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- __ Mov(x0, Operand(Smi::FromInt(flags)));
- __ Push(x3, x2, x1, x0);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
- } else {
- ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
- FLAG_smi_only_arrays);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
-
- if (has_fast_elements) {
- mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- }
-
- FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
- __ CallStub(&stub);
- }
-
- bool result_saved = false; // Is the result saved to the stack?
-
- // Emit code to evaluate all the non-constant subexpressions and to store
- // them into the newly cloned array.
- for (int i = 0; i < length; i++) {
- Expression* subexpr = subexprs->at(i);
- // If the subexpression is a literal or a simple materialized literal it
- // is already set in the cloned array.
- if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
-
- if (!result_saved) {
- __ Push(x0);
- __ Push(Smi::FromInt(expr->literal_index()));
- result_saved = true;
- }
- VisitForAccumulatorValue(subexpr);
-
- if (IsFastObjectElementsKind(constant_elements_kind)) {
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ Peek(x6, kPointerSize); // Copy of array literal.
- __ Ldr(x1, FieldMemOperand(x6, JSObject::kElementsOffset));
- __ Str(result_register(), FieldMemOperand(x1, offset));
- // Update the write barrier for the array store.
- __ RecordWriteField(x1, offset, result_register(), x10,
- kLRHasBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
- } else {
- __ Mov(x3, Operand(Smi::FromInt(i)));
- StoreArrayLiteralElementStub stub;
- __ CallStub(&stub);
- }
-
- PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
- }
-
- if (result_saved) {
- __ Drop(1); // literal index
- context()->PlugTOS();
- } else {
- context()->Plug(x0);
- }
-}
-
-
-void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- Comment cmnt(masm_, "[ Assignment");
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // on the left-hand side.
- if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
- return;
- }
-
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* property = expr->target()->AsProperty();
- if (property != NULL) {
- assign_type = (property->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
-
- // Evaluate LHS expression.
- switch (assign_type) {
- case VARIABLE:
- // Nothing to do here.
- break;
- case NAMED_PROPERTY:
- if (expr->is_compound()) {
- // We need the receiver both on the stack and in the accumulator.
- VisitForAccumulatorValue(property->obj());
- __ Push(result_register());
- } else {
- VisitForStackValue(property->obj());
- }
- break;
- case KEYED_PROPERTY:
- if (expr->is_compound()) {
- VisitForStackValue(property->obj());
- VisitForAccumulatorValue(property->key());
- __ Peek(x1, 0);
- __ Push(x0);
- } else {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- }
- break;
- }
-
- // For compound assignments we need another deoptimization point after the
- // variable/property load.
- if (expr->is_compound()) {
- { AccumulatorValueContext context(this);
- switch (assign_type) {
- case VARIABLE:
- EmitVariableLoad(expr->target()->AsVariableProxy());
- PrepareForBailout(expr->target(), TOS_REG);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
- break;
- }
- }
-
- Token::Value op = expr->binary_op();
- __ Push(x0); // Left operand goes on the stack.
- VisitForAccumulatorValue(expr->value());
-
- OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
- ? OVERWRITE_RIGHT
- : NO_OVERWRITE;
- SetSourcePosition(expr->position() + 1);
- AccumulatorValueContext context(this);
- if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr->binary_operation(),
- op,
- mode,
- expr->target(),
- expr->value());
- } else {
- EmitBinaryOp(expr->binary_operation(), op, mode);
- }
-
- // Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), TOS_REG);
- } else {
- VisitForAccumulatorValue(expr->value());
- }
-
- // Record source position before possible IC call.
- SetSourcePosition(expr->position());
-
- // Store the value.
- switch (assign_type) {
- case VARIABLE:
- EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(x0);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyAssignment(expr);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyAssignment(expr);
- break;
- }
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
- Literal* key = prop->key()->AsLiteral();
- __ Mov(x2, Operand(key->value()));
- // Call load IC. It has arguments receiver and property name x0 and x2.
- CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
- // Call keyed load IC. It has arguments key and receiver in r0 and r1.
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, prop->PropertyFeedbackId());
-}
-
-
-void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
- Token::Value op,
- OverwriteMode mode,
- Expression* left_expr,
- Expression* right_expr) {
- Label done, both_smis, stub_call;
-
- // Get the arguments.
- Register left = x1;
- Register right = x0;
- Register result = x0;
- __ Pop(left);
-
- // Perform combined smi check on both operands.
- __ Orr(x10, left, right);
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(x10, &both_smis);
-
- __ Bind(&stub_call);
- BinaryOpICStub stub(op, mode);
- {
- Assembler::BlockConstPoolScope scope(masm_);
- CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- }
- __ B(&done);
-
- __ Bind(&both_smis);
- // Smi case. This code works in the same way as the smi-smi case in the type
- // recording binary operation stub, see
- // BinaryOpStub::GenerateSmiSmiOperation for comments.
- // TODO(all): That doesn't exist any more. Where are the comments?
- //
- // The set of operations that needs to be supported here is controlled by
- // FullCodeGenerator::ShouldInlineSmiCase().
- switch (op) {
- case Token::SAR:
- __ Ubfx(right, right, kSmiShift, 5);
- __ Asr(result, left, right);
- __ Bic(result, result, kSmiShiftMask);
- break;
- case Token::SHL:
- __ Ubfx(right, right, kSmiShift, 5);
- __ Lsl(result, left, right);
- break;
- case Token::SHR: {
- Label right_not_zero;
- __ Cbnz(right, &right_not_zero);
- __ Tbnz(left, kXSignBit, &stub_call);
- __ Bind(&right_not_zero);
- __ Ubfx(right, right, kSmiShift, 5);
- __ Lsr(result, left, right);
- __ Bic(result, result, kSmiShiftMask);
- break;
- }
- case Token::ADD:
- __ Adds(x10, left, right);
- __ B(vs, &stub_call);
- __ Mov(result, x10);
- break;
- case Token::SUB:
- __ Subs(x10, left, right);
- __ B(vs, &stub_call);
- __ Mov(result, x10);
- break;
- case Token::MUL: {
- Label not_minus_zero, done;
- __ Smulh(x10, left, right);
- __ Cbnz(x10, &not_minus_zero);
- __ Eor(x11, left, right);
- __ Tbnz(x11, kXSignBit, &stub_call);
- STATIC_ASSERT(kSmiTag == 0);
- __ Mov(result, x10);
- __ B(&done);
- __ Bind(&not_minus_zero);
- __ Cls(x11, x10);
- __ Cmp(x11, kXRegSize - kSmiShift);
- __ B(lt, &stub_call);
- __ SmiTag(result, x10);
- __ Bind(&done);
- break;
- }
- case Token::BIT_OR:
- __ Orr(result, left, right);
- break;
- case Token::BIT_AND:
- __ And(result, left, right);
- break;
- case Token::BIT_XOR:
- __ Eor(result, left, right);
- break;
- default:
- UNREACHABLE();
- }
-
- __ Bind(&done);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
- Token::Value op,
- OverwriteMode mode) {
- __ Pop(x1);
- BinaryOpICStub stub(op, mode);
- JumpPatchSite patch_site(masm_); // Unbound, signals no inlined smi code.
- {
- Assembler::BlockConstPoolScope scope(masm_);
- CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- }
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten to have a 'throw
- // ReferenceError' on the left-hand side.
- if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
- return;
- }
-
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* prop = expr->AsProperty();
- if (prop != NULL) {
- assign_type = (prop->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
-
- switch (assign_type) {
- case VARIABLE: {
- Variable* var = expr->AsVariableProxy()->var();
- EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN);
- break;
- }
- case NAMED_PROPERTY: {
- __ Push(x0); // Preserve value.
- VisitForAccumulatorValue(prop->obj());
- // TODO(all): We could introduce a VisitForRegValue(reg, expr) to avoid
- // this copy.
- __ Mov(x1, x0);
- __ Pop(x0); // Restore value.
- __ Mov(x2, Operand(prop->key()->AsLiteral()->value()));
- CallStoreIC();
- break;
- }
- case KEYED_PROPERTY: {
- __ Push(x0); // Preserve value.
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ Mov(x1, x0);
- __ Pop(x2, x0);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic);
- break;
- }
- }
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
- Variable* var, MemOperand location) {
- __ Str(result_register(), location);
- if (var->IsContextSlot()) {
- // RecordWrite may destroy all its register arguments.
- __ Mov(x10, result_register());
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- x1, offset, x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
- }
-}
-
-
-void FullCodeGenerator::EmitCallStoreContextSlot(
- Handle<String> name, LanguageMode mode) {
- __ Mov(x11, Operand(name));
- __ Mov(x10, Operand(Smi::FromInt(mode)));
- // jssp[0] : mode.
- // jssp[8] : name.
- // jssp[16] : context.
- // jssp[24] : value.
- __ Push(x0, cp, x11, x10);
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
-}
-
-
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Token::Value op) {
- ASM_LOCATION("FullCodeGenerator::EmitVariableAssignment");
- if (var->IsUnallocated()) {
- // Global var, const, or let.
- __ Mov(x2, Operand(var->name()));
- __ Ldr(x1, GlobalObjectMemOperand());
- CallStoreIC();
-
- } else if (op == Token::INIT_CONST) {
- // Const initializers need a write barrier.
- ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsLookupSlot()) {
- __ Push(x0);
- __ Mov(x0, Operand(var->name()));
- __ Push(cp, x0); // Context and name.
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- ASSERT(var->IsStackLocal() || var->IsContextSlot());
- Label skip;
- MemOperand location = VarOperand(var, x1);
- __ Ldr(x10, location);
- __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &skip);
- EmitStoreToStackLocalOrContextSlot(var, location);
- __ Bind(&skip);
- }
-
- } else if (var->mode() == LET && op != Token::INIT_LET) {
- // Non-initializing assignment to let variable needs a write barrier.
- if (var->IsLookupSlot()) {
- EmitCallStoreContextSlot(var->name(), language_mode());
- } else {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- Label assign;
- MemOperand location = VarOperand(var, x1);
- __ Ldr(x10, location);
- __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &assign);
- __ Mov(x10, Operand(var->name()));
- __ Push(x10);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- // Perform the assignment.
- __ Bind(&assign);
- EmitStoreToStackLocalOrContextSlot(var, location);
- }
-
- } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
- // Assignment to var or initializing assignment to let/const
- // in harmony mode.
- if (var->IsLookupSlot()) {
- EmitCallStoreContextSlot(var->name(), language_mode());
- } else {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- MemOperand location = VarOperand(var, x1);
- if (FLAG_debug_code && op == Token::INIT_LET) {
- __ Ldr(x10, location);
- __ CompareRoot(x10, Heap::kTheHoleValueRootIndex);
- __ Check(eq, kLetBindingReInitialization);
- }
- EmitStoreToStackLocalOrContextSlot(var, location);
- }
- }
- // Non-initializing assignments to consts are ignored.
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitNamedPropertyAssignment");
- // Assignment to a property, using a named store IC.
- Property* prop = expr->target()->AsProperty();
- ASSERT(prop != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
-
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
- __ Mov(x2, Operand(prop->key()->AsLiteral()->value()));
- __ Pop(x1);
-
- CallStoreIC(expr->AssignmentFeedbackId());
-
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitKeyedPropertyAssignment");
- // Assignment to a property, using a keyed store IC.
-
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
- // TODO(all): Could we pass this in registers rather than on the stack?
- __ Pop(x1, x2); // Key and object holding the property.
-
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, expr->AssignmentFeedbackId());
-
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
- Comment cmnt(masm_, "[ Property");
- Expression* key = expr->key();
-
- if (key->IsPropertyName()) {
- VisitForAccumulatorValue(expr->obj());
- EmitNamedPropertyLoad(expr);
- PrepareForBailoutForId(expr->LoadId(), TOS_REG);
- context()->Plug(x0);
- } else {
- VisitForStackValue(expr->obj());
- VisitForAccumulatorValue(expr->key());
- __ Pop(x1);
- EmitKeyedPropertyLoad(expr);
- context()->Plug(x0);
- }
-}
-
-
-void FullCodeGenerator::CallIC(Handle<Code> code,
- TypeFeedbackId ast_id) {
- ic_total_count_++;
- // All calls must have a predictable size in full-codegen code to ensure that
- // the debugger can patch them correctly.
- __ Call(code, RelocInfo::CODE_TARGET, ast_id);
-}
-
-
-// Code common for calls using the IC.
-void FullCodeGenerator::EmitCallWithIC(Call* expr) {
- ASM_LOCATION("EmitCallWithIC");
-
- Expression* callee = expr->expression();
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- CallFunctionFlags flags;
- // Get the target function.
- if (callee->IsVariableProxy()) {
- { StackValueContext context(this);
- EmitVariableLoad(callee->AsVariableProxy());
- PrepareForBailout(callee, NO_REGISTERS);
- }
- // Push undefined as receiver. This is patched in the method prologue if it
- // is a classic mode method.
- __ Push(isolate()->factory()->undefined_value());
- flags = NO_CALL_FUNCTION_FLAGS;
- } else {
- // Load the function from the receiver.
- ASSERT(callee->IsProperty());
- __ Peek(x0, 0);
- EmitNamedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
- // Push the target function under the receiver.
- __ Pop(x10);
- __ Push(x0, x10);
- flags = CALL_AS_METHOD;
- }
-
- // Load the arguments.
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
-
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, flags);
- __ Peek(x1, (arg_count + 1) * kPointerSize);
- __ CallStub(&stub);
-
- RecordJSReturnSite(expr);
-
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- context()->DropAndPlug(1, x0);
-}
-
-
-// Code common for calls using the IC.
-void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
- Expression* key) {
- // Load the key.
- VisitForAccumulatorValue(key);
-
- Expression* callee = expr->expression();
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- // Load the function from the receiver.
- ASSERT(callee->IsProperty());
- __ Peek(x1, 0);
- EmitKeyedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
-
- // Push the target function under the receiver.
- __ Pop(x10);
- __ Push(x0, x10);
-
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
-
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, CALL_AS_METHOD);
- __ Peek(x1, (arg_count + 1) * kPointerSize);
- __ CallStub(&stub);
-
- RecordJSReturnSite(expr);
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- context()->DropAndPlug(1, x0);
-}
-
-
-void FullCodeGenerator::EmitCallWithStub(Call* expr) {
- // Code common for calls using the call stub.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
-
- Handle<Object> uninitialized =
- TypeFeedbackInfo::UninitializedSentinel(isolate());
- StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized);
- __ LoadObject(x2, FeedbackVector());
- __ Mov(x3, Operand(Smi::FromInt(expr->CallFeedbackSlot())));
-
- // Record call targets in unoptimized code.
- CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
- __ Peek(x1, (arg_count + 1) * kXRegSizeInBytes);
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, x0);
-}
-
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
- ASM_LOCATION("FullCodeGenerator::EmitResolvePossiblyDirectEval");
- // Prepare to push a copy of the first argument or undefined if it doesn't
- // exist.
- if (arg_count > 0) {
- __ Peek(x10, arg_count * kXRegSizeInBytes);
- } else {
- __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
- }
-
- // Prepare to push the receiver of the enclosing function.
- int receiver_offset = 2 + info_->scope()->num_parameters();
- __ Ldr(x11, MemOperand(fp, receiver_offset * kPointerSize));
-
- // Push.
- __ Push(x10, x11);
-
- // Prepare to push the language mode.
- __ Mov(x10, Operand(Smi::FromInt(language_mode())));
- // Prepare to push the start position of the scope the calls resides in.
- __ Mov(x11, Operand(Smi::FromInt(scope()->start_position())));
-
- // Push.
- __ Push(x10, x11);
-
- // Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
-}
-
-
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* callee = expr->expression();
- Call::CallType call_type = expr->GetCallType(isolate());
-
- if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the
- // call. Then we call the resolved function using the given
- // arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- {
- PreservePositionScope pos_scope(masm()->positions_recorder());
- VisitForStackValue(callee);
- __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
- __ Push(x10); // Reserved receiver slot.
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ Peek(x10, (arg_count + 1) * kPointerSize);
- __ Push(x10);
- EmitResolvePossiblyDirectEval(arg_count);
-
- // The runtime call returns a pair of values in x0 (function) and
- // x1 (receiver). Touch up the stack with the right values.
- __ PokePair(x1, x0, arg_count * kPointerSize);
- }
-
- // Record source position for debugger.
- SetSourcePosition(expr->position());
-
- // Call the evaluated function.
- CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS);
- __ Peek(x1, (arg_count + 1) * kXRegSizeInBytes);
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, x0);
-
- } else if (call_type == Call::GLOBAL_CALL) {
- EmitCallWithIC(expr);
-
- } else if (call_type == Call::LOOKUP_SLOT_CALL) {
- // Call to a lookup slot (dynamically introduced variable).
- VariableProxy* proxy = callee->AsVariableProxy();
- Label slow, done;
-
- { PreservePositionScope scope(masm()->positions_recorder());
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
- }
-
- __ Bind(&slow);
- // Call the runtime to find the function to call (returned in x0)
- // and the object holding it (returned in x1).
- __ Push(context_register());
- __ Mov(x10, Operand(proxy->name()));
- __ Push(x10);
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ Push(x0, x1); // Receiver, function.
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- Label call;
- __ B(&call);
- __ Bind(&done);
- // Push function.
- __ Push(x0);
- // The receiver is implicitly the global receiver. Indicate this
- // by passing the undefined to the call function stub.
- __ LoadRoot(x1, Heap::kUndefinedValueRootIndex);
- __ Push(x1);
- __ Bind(&call);
- }
-
- // The receiver is either the global receiver or an object found
- // by LoadContextSlot.
- EmitCallWithStub(expr);
- } else if (call_type == Call::PROPERTY_CALL) {
- Property* property = callee->AsProperty();
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(property->obj());
- }
- if (property->key()->IsPropertyName()) {
- EmitCallWithIC(expr);
- } else {
- EmitKeyedCallWithIC(expr, property->key());
- }
-
- } else {
- ASSERT(call_type == Call::OTHER_CALL);
- // Call to an arbitrary expression not handled specially above.
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(callee);
- }
- __ LoadRoot(x1, Heap::kUndefinedValueRootIndex);
- __ Push(x1);
- // Emit function call.
- EmitCallWithStub(expr);
- }
-
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- ASSERT(expr->return_is_recorded_);
-#endif
-}
-
-
-void FullCodeGenerator::VisitCallNew(CallNew* expr) {
- Comment cmnt(masm_, "[ CallNew");
- // According to ECMA-262, section 11.2.2, page 44, the function
- // expression in new calls must be evaluated before the
- // arguments.
-
- // Push constructor on the stack. If it's not a function it's used as
- // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
- // ignored.
- VisitForStackValue(expr->expression());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetSourcePosition(expr->position());
-
- // Load function and argument count into x1 and x0.
- __ Mov(x0, arg_count);
- __ Peek(x1, arg_count * kXRegSizeInBytes);
-
- // Record call targets in unoptimized code.
- Handle<Object> uninitialized =
- TypeFeedbackInfo::UninitializedSentinel(isolate());
- StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized);
- __ LoadObject(x2, FeedbackVector());
- __ Mov(x3, Operand(Smi::FromInt(expr->CallNewFeedbackSlot())));
-
- CallConstructStub stub(RECORD_CALL_TARGET);
- __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
- PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ TestAndSplit(x0, kSmiTagMask, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ TestAndSplit(x0, kSmiTagMask | (0x80000000UL << kSmiShift), if_true,
- if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(x0, if_false);
- __ JumpIfRoot(x0, Heap::kNullValueRootIndex, if_true);
- __ Ldr(x10, FieldMemOperand(x0, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ Ldrb(x11, FieldMemOperand(x10, Map::kBitFieldOffset));
- __ Tbnz(x11, Map::kIsUndetectable, if_false);
- __ Ldrb(x12, FieldMemOperand(x10, Map::kInstanceTypeOffset));
- __ Cmp(x12, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ B(lt, if_false);
- __ Cmp(x12, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(le, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(x0, if_false);
- __ CompareObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ge, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitIsUndetectableObject");
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(x0, if_false);
- __ Ldr(x10, FieldMemOperand(x0, HeapObject::kMapOffset));
- __ Ldrb(x11, FieldMemOperand(x10, Map::kBitFieldOffset));
- __ Tst(x11, 1 << Map::kIsUndetectable);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ne, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false, skip_lookup;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- Register object = x0;
- __ AssertNotSmi(object);
-
- Register map = x10;
- Register bitfield2 = x11;
- __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
- __ Ldrb(bitfield2, FieldMemOperand(map, Map::kBitField2Offset));
- __ Tbnz(bitfield2, Map::kStringWrapperSafeForDefaultValueOf, &skip_lookup);
-
- // Check for fast case object. Generate false result for slow case object.
- Register props = x12;
- Register props_map = x12;
- Register hash_table_map = x13;
- __ Ldr(props, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ Ldr(props_map, FieldMemOperand(props, HeapObject::kMapOffset));
- __ LoadRoot(hash_table_map, Heap::kHashTableMapRootIndex);
- __ Cmp(props_map, hash_table_map);
- __ B(eq, if_false);
-
- // Look for valueOf name in the descriptor array, and indicate false if found.
- // Since we omit an enumeration index check, if it is added via a transition
- // that shares its descriptor array, this is a false positive.
- Label loop, done;
-
- // Skip loop if no descriptors are valid.
- Register descriptors = x12;
- Register descriptors_length = x13;
- __ NumberOfOwnDescriptors(descriptors_length, map);
- __ Cbz(descriptors_length, &done);
-
- __ LoadInstanceDescriptors(map, descriptors);
-
- // Calculate the end of the descriptor array.
- Register descriptors_end = x14;
- __ Mov(x15, DescriptorArray::kDescriptorSize);
- __ Mul(descriptors_length, descriptors_length, x15);
- // Calculate location of the first key name.
- __ Add(descriptors, descriptors,
- DescriptorArray::kFirstOffset - kHeapObjectTag);
- // Calculate the end of the descriptor array.
- __ Add(descriptors_end, descriptors,
- Operand(descriptors_length, LSL, kPointerSizeLog2));
-
- // Loop through all the keys in the descriptor array. If one of these is the
- // string "valueOf" the result is false.
- Register valueof_string = x1;
- int descriptor_size = DescriptorArray::kDescriptorSize * kPointerSize;
- __ Mov(valueof_string, Operand(isolate()->factory()->value_of_string()));
- __ Bind(&loop);
- __ Ldr(x15, MemOperand(descriptors, descriptor_size, PostIndex));
- __ Cmp(x15, valueof_string);
- __ B(eq, if_false);
- __ Cmp(descriptors, descriptors_end);
- __ B(ne, &loop);
-
- __ Bind(&done);
-
- // Set the bit in the map to indicate that there is no local valueOf field.
- __ Ldrb(x2, FieldMemOperand(map, Map::kBitField2Offset));
- __ Orr(x2, x2, 1 << Map::kStringWrapperSafeForDefaultValueOf);
- __ Strb(x2, FieldMemOperand(map, Map::kBitField2Offset));
-
- __ Bind(&skip_lookup);
-
- // If a valueOf property is not found on the object check that its prototype
- // is the unmodified String prototype. If not result is false.
- Register prototype = x1;
- Register global_idx = x2;
- Register native_context = x2;
- Register string_proto = x3;
- Register proto_map = x4;
- __ Ldr(prototype, FieldMemOperand(map, Map::kPrototypeOffset));
- __ JumpIfSmi(prototype, if_false);
- __ Ldr(proto_map, FieldMemOperand(prototype, HeapObject::kMapOffset));
- __ Ldr(global_idx, GlobalObjectMemOperand());
- __ Ldr(native_context,
- FieldMemOperand(global_idx, GlobalObject::kNativeContextOffset));
- __ Ldr(string_proto,
- ContextMemOperand(native_context,
- Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ Cmp(proto_map, string_proto);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(x0, if_false);
- __ CompareObjectType(x0, x10, x11, JS_FUNCTION_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // Only a HeapNumber can be -0.0, so return false if we have something else.
- __ CheckMap(x0, x1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
-
- // Test the bit pattern.
- __ Ldr(x10, FieldMemOperand(x0, HeapNumber::kValueOffset));
- __ Cmp(x10, 1); // Set V on 0x8000000000000000.
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(vs, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(x0, if_false);
- __ CompareObjectType(x0, x10, x11, JS_ARRAY_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(x0, if_false);
- __ CompareObjectType(x0, x10, x11, JS_REGEXP_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // Get the frame pointer for the calling frame.
- __ Ldr(x2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ Ldr(x1, MemOperand(x2, StandardFrameConstants::kContextOffset));
- __ Cmp(x1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ B(ne, &check_frame_marker);
- __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ Bind(&check_frame_marker);
- __ Ldr(x1, MemOperand(x2, StandardFrameConstants::kMarkerOffset));
- __ Cmp(x1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ Pop(x1);
- __ Cmp(x0, x1);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in x1.
- VisitForAccumulatorValue(args->at(0));
- __ Mov(x1, x0);
- __ Mov(x0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
- __ CallStub(&stub);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
- Label exit;
- // Get the number of formal parameters.
- __ Mov(x0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ Ldr(x12, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(x13, MemOperand(x12, StandardFrameConstants::kContextOffset));
- __ Cmp(x13, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ B(ne, &exit);
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ Ldr(x0, MemOperand(x12, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ Bind(&exit);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitClassOf");
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- Label done, null, function, non_function_constructor;
-
- VisitForAccumulatorValue(args->at(0));
-
- // If the object is a smi, we return null.
- __ JumpIfSmi(x0, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CompareObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE);
- // x10: object's map.
- // x11: object's type.
- __ B(lt, &null);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ B(eq, &function);
-
- __ Cmp(x11, LAST_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- __ B(eq, &function);
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
-
- // Check if the constructor in the map is a JS function.
- __ Ldr(x12, FieldMemOperand(x10, Map::kConstructorOffset));
- __ JumpIfNotObjectType(x12, x13, x14, JS_FUNCTION_TYPE,
- &non_function_constructor);
-
- // x12 now contains the constructor function. Grab the
- // instance class name from there.
- __ Ldr(x13, FieldMemOperand(x12, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(x0,
- FieldMemOperand(x13, SharedFunctionInfo::kInstanceClassNameOffset));
- __ B(&done);
-
- // Functions have class 'Function'.
- __ Bind(&function);
- __ LoadRoot(x0, Heap::kfunction_class_stringRootIndex);
- __ B(&done);
-
- // Objects with a non-function constructor have class 'Object'.
- __ Bind(&non_function_constructor);
- __ LoadRoot(x0, Heap::kObject_stringRootIndex);
- __ B(&done);
-
- // Non-JS objects have class null.
- __ Bind(&null);
- __ LoadRoot(x0, Heap::kNullValueRootIndex);
-
- // All done.
- __ Bind(&done);
-
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitLog(CallRuntime* expr) {
- // Conditionally generate a log call.
- // Args:
- // 0 (literal string): The type of logging (corresponds to the flags).
- // This is used to determine whether or not to generate the log call.
- // 1 (string): Format string. Access the string at argument index 2
- // with '%2s' (see Logger::LogRuntime for all the formats).
- // 2 (array): Arguments to the format string.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
- }
-
- // Finally, we're expected to leave a value on the top of the stack.
- __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- SubStringStub stub;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- RegExpExecStub stub;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 4);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- VisitForStackValue(args->at(3));
- __ CallStub(&stub);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitValueOf");
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(x0, &done);
- // If the object is not a value type, return the object.
- __ JumpIfNotObjectType(x0, x10, x11, JS_VALUE_TYPE, &done);
- __ Ldr(x0, FieldMemOperand(x0, JSValue::kValueOffset));
-
- __ Bind(&done);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- ASSERT_NE(NULL, args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label runtime, done, not_date_object;
- Register object = x0;
- Register result = x0;
- Register stamp_addr = x10;
- Register stamp_cache = x11;
-
- __ JumpIfSmi(object, &not_date_object);
- __ JumpIfNotObjectType(object, x10, x10, JS_DATE_TYPE, &not_date_object);
-
- if (index->value() == 0) {
- __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
- __ B(&done);
- } else {
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ Mov(x10, Operand(stamp));
- __ Ldr(stamp_addr, MemOperand(x10));
- __ Ldr(stamp_cache, FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ Cmp(stamp_addr, stamp_cache);
- __ B(ne, &runtime);
- __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ B(&done);
- }
-
- __ Bind(&runtime);
- __ Mov(x1, Operand(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ B(&done);
- }
-
- __ Bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
- __ Bind(&done);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(3, args->length());
-
- Register string = x0;
- Register index = x1;
- Register value = x2;
- Register scratch = x10;
-
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- VisitForAccumulatorValue(args->at(0)); // string
- __ Pop(value, index);
-
- if (FLAG_debug_code) {
- __ AssertSmi(value, kNonSmiValue);
- __ AssertSmi(index, kNonSmiIndex);
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- __ EmitSeqStringSetCharCheck(string, index, kIndexIsSmi, scratch,
- one_byte_seq_type);
- }
-
- __ Add(scratch, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ SmiUntag(value);
- __ SmiUntag(index);
- __ Strb(value, MemOperand(scratch, index));
- context()->Plug(string);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(3, args->length());
-
- Register string = x0;
- Register index = x1;
- Register value = x2;
- Register scratch = x10;
-
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- VisitForAccumulatorValue(args->at(0)); // string
- __ Pop(value, index);
-
- if (FLAG_debug_code) {
- __ AssertSmi(value, kNonSmiValue);
- __ AssertSmi(index, kNonSmiIndex);
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ EmitSeqStringSetCharCheck(string, index, kIndexIsSmi, scratch,
- two_byte_seq_type);
- }
-
- __ Add(scratch, string, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
- __ SmiUntag(value);
- __ SmiUntag(index);
- __ Strh(value, MemOperand(scratch, index, LSL, 1));
- context()->Plug(string);
-}
-
-
-void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
- // Load the arguments on the stack and call the MathPow stub.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- MathPowStub stub(MathPowStub::ON_STACK);
- __ CallStub(&stub);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- VisitForStackValue(args->at(0)); // Load the object.
- VisitForAccumulatorValue(args->at(1)); // Load the value.
- __ Pop(x1);
- // x0 = value.
- // x1 = object.
-
- Label done;
- // If the object is a smi, return the value.
- __ JumpIfSmi(x1, &done);
-
- // If the object is not a value type, return the value.
- __ JumpIfNotObjectType(x1, x10, x11, JS_VALUE_TYPE, &done);
-
- // Store the value.
- __ Str(x0, FieldMemOperand(x1, JSValue::kValueOffset));
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- __ Mov(x10, x0);
- __ RecordWriteField(
- x1, JSValue::kValueOffset, x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
-
- __ Bind(&done);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(args->length(), 1);
-
- // Load the argument into x0 and call the stub.
- VisitForAccumulatorValue(args->at(0));
-
- NumberToStringStub stub;
- __ CallStub(&stub);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label done;
- Register code = x0;
- Register result = x1;
-
- StringCharFromCodeGenerator generator(code, result);
- generator.GenerateFast(masm_);
- __ B(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ Bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = x1;
- Register index = x0;
- Register result = x3;
-
- __ Pop(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharCodeAtGenerator generator(object,
- index,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ B(&done);
-
- __ Bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return NaN.
- __ LoadRoot(result, Heap::kNanValueRootIndex);
- __ B(&done);
-
- __ Bind(&need_conversion);
- // Load the undefined value into the result register, which will
- // trigger conversion.
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ B(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ Bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = x1;
- Register index = x0;
- Register result = x0;
-
- __ Pop(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharAtGenerator generator(object,
- index,
- x3,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ B(&done);
-
- __ Bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ LoadRoot(result, Heap::kempty_stringRootIndex);
- __ B(&done);
-
- __ Bind(&need_conversion);
- // Move smi zero into the result register, which will trigger conversion.
- __ Mov(result, Operand(Smi::FromInt(0)));
- __ B(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ Bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitStringAdd");
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- __ Pop(x1);
- StringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED);
- __ CallStub(&stub);
-
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringCompareStub stub;
- __ CallStub(&stub);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
- // Load the argument on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_log, 1);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
- // Load the argument on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_sqrt, 1);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitCallFunction");
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() >= 2);
-
- int arg_count = args->length() - 2; // 2 ~ receiver and function.
- for (int i = 0; i < arg_count + 1; i++) {
- VisitForStackValue(args->at(i));
- }
- VisitForAccumulatorValue(args->last()); // Function.
-
- Label runtime, done;
- // Check for non-function argument (including proxy).
- __ JumpIfSmi(x0, &runtime);
- __ JumpIfNotObjectType(x0, x1, x1, JS_FUNCTION_TYPE, &runtime);
-
- // InvokeFunction requires the function in x1. Move it in there.
- __ Mov(x1, x0);
- ParameterCount count(arg_count);
- __ InvokeFunction(x1, count, CALL_FUNCTION, NullCallWrapper());
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ B(&done);
-
- __ Bind(&runtime);
- __ Push(x0);
- __ CallRuntime(Runtime::kCall, args->length());
- __ Bind(&done);
-
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- RegExpConstructResultStub stub;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(2));
- __ Pop(x1, x2);
- __ CallStub(&stub);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
- ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- isolate()->native_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort(kAttemptToUseUndefinedCache);
- __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
- context()->Plug(x0);
- return;
- }
-
- VisitForAccumulatorValue(args->at(1));
-
- Register key = x0;
- Register cache = x1;
- __ Ldr(cache, GlobalObjectMemOperand());
- __ Ldr(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset));
- __ Ldr(cache, ContextMemOperand(cache,
- Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ Ldr(cache,
- FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
-
- Label done;
- __ Ldrsw(x2, UntagSmiFieldMemOperand(cache,
- JSFunctionResultCache::kFingerOffset));
- __ Add(x3, cache, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Add(x3, x3, Operand(x2, LSL, kPointerSizeLog2));
-
- // Load the key and data from the cache.
- __ Ldp(x2, x3, MemOperand(x3));
-
- __ Cmp(key, x2);
- __ CmovX(x0, x3, eq);
- __ B(eq, &done);
-
- // Call runtime to perform the lookup.
- __ Push(cache, key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
-
- __ Bind(&done);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ Ldr(x10, FieldMemOperand(x0, String::kHashFieldOffset));
- __ Tst(x10, String::kContainsCachedArrayIndexMask);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- __ AssertString(x0);
-
- __ Ldr(x10, FieldMemOperand(x0, String::kHashFieldOffset));
- __ IndexFromHash(x10, x0);
-
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitFastAsciiArrayJoin");
-
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(0));
-
- Register array = x0;
- Register result = x0;
- Register elements = x1;
- Register element = x2;
- Register separator = x3;
- Register array_length = x4;
- Register result_pos = x5;
- Register map = x6;
- Register string_length = x10;
- Register elements_end = x11;
- Register string = x12;
- Register scratch1 = x13;
- Register scratch2 = x14;
- Register scratch3 = x7;
- Register separator_length = x15;
-
- Label bailout, done, one_char_separator, long_separator,
- non_trivial_array, not_size_one_array, loop,
- empty_separator_loop, one_char_separator_loop,
- one_char_separator_loop_entry, long_separator_loop;
-
- // The separator operand is on the stack.
- __ Pop(separator);
-
- // Check that the array is a JSArray.
- __ JumpIfSmi(array, &bailout);
- __ JumpIfNotObjectType(array, map, scratch1, JS_ARRAY_TYPE, &bailout);
-
- // Check that the array has fast elements.
- __ CheckFastElements(map, scratch1, &bailout);
-
- // If the array has length zero, return the empty string.
- // Load and untag the length of the array.
- // It is an unsigned value, so we can skip sign extension.
- // We assume little endianness.
- __ Ldrsw(array_length,
- UntagSmiFieldMemOperand(array, JSArray::kLengthOffset));
- __ Cbnz(array_length, &non_trivial_array);
- __ LoadRoot(result, Heap::kempty_stringRootIndex);
- __ B(&done);
-
- __ Bind(&non_trivial_array);
- // Get the FixedArray containing array's elements.
- __ Ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset));
-
- // Check that all array elements are sequential ASCII strings, and
- // accumulate the sum of their lengths.
- __ Mov(string_length, 0);
- __ Add(element, elements, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
- // Loop condition: while (element < elements_end).
- // Live values in registers:
- // elements: Fixed array of strings.
- // array_length: Length of the fixed array of strings (not smi)
- // separator: Separator string
- // string_length: Accumulated sum of string lengths (not smi).
- // element: Current array element.
- // elements_end: Array end.
- if (FLAG_debug_code) {
- __ Cmp(array_length, Operand(0));
- __ Assert(gt, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
- }
- __ Bind(&loop);
- __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ JumpIfSmi(string, &bailout);
- __ Ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
- __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
- __ Ldrsw(scratch1,
- UntagSmiFieldMemOperand(string, SeqOneByteString::kLengthOffset));
- __ Adds(string_length, string_length, scratch1);
- __ B(vs, &bailout);
- __ Cmp(element, elements_end);
- __ B(lt, &loop);
-
- // If array_length is 1, return elements[0], a string.
- __ Cmp(array_length, 1);
- __ B(ne, &not_size_one_array);
- __ Ldr(result, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ B(&done);
-
- __ Bind(&not_size_one_array);
-
- // Live values in registers:
- // separator: Separator string
- // array_length: Length of the array (not smi).
- // string_length: Sum of string lengths (not smi).
- // elements: FixedArray of strings.
-
- // Check that the separator is a flat ASCII string.
- __ JumpIfSmi(separator, &bailout);
- __ Ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
- __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
-
- // Add (separator length times array_length) - separator length to the
- // string_length to get the length of the result string.
- // Load the separator length as untagged.
- // We assume little endianness, and that the length is positive.
- __ Ldrsw(separator_length,
- UntagSmiFieldMemOperand(separator,
- SeqOneByteString::kLengthOffset));
- __ Sub(string_length, string_length, separator_length);
- __ Umaddl(string_length, array_length.W(), separator_length.W(),
- string_length);
-
- // Get first element in the array.
- __ Add(element, elements, FixedArray::kHeaderSize - kHeapObjectTag);
- // Live values in registers:
- // element: First array element
- // separator: Separator string
- // string_length: Length of result string (not smi)
- // array_length: Length of the array (not smi).
- __ AllocateAsciiString(result, string_length, scratch1, scratch2, scratch3,
- &bailout);
-
- // Prepare for looping. Set up elements_end to end of the array. Set
- // result_pos to the position of the result where to write the first
- // character.
- // TODO(all): useless unless AllocateAsciiString trashes the register.
- __ Add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
- __ Add(result_pos, result, SeqOneByteString::kHeaderSize - kHeapObjectTag);
-
- // Check the length of the separator.
- __ Cmp(separator_length, 1);
- __ B(eq, &one_char_separator);
- __ B(gt, &long_separator);
-
- // Empty separator case
- __ Bind(&empty_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
-
- // Copy next array element to the result.
- __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ Ldrsw(string_length,
- UntagSmiFieldMemOperand(string, String::kLengthOffset));
- __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ CopyBytes(result_pos, string, string_length, scratch1);
- __ Cmp(element, elements_end);
- __ B(lt, &empty_separator_loop); // End while (element < elements_end).
- __ B(&done);
-
- // One-character separator case
- __ Bind(&one_char_separator);
- // Replace separator with its ASCII character value.
- __ Ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ B(&one_char_separator_loop_entry);
-
- __ Bind(&one_char_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
- // separator: Single separator ASCII char (in lower byte).
-
- // Copy the separator character to the result.
- __ Strb(separator, MemOperand(result_pos, 1, PostIndex));
-
- // Copy next array element to the result.
- __ Bind(&one_char_separator_loop_entry);
- __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ Ldrsw(string_length,
- UntagSmiFieldMemOperand(string, String::kLengthOffset));
- __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ CopyBytes(result_pos, string, string_length, scratch1);
- __ Cmp(element, elements_end);
- __ B(lt, &one_char_separator_loop); // End while (element < elements_end).
- __ B(&done);
-
- // Long separator case (separator is more than one character). Entry is at the
- // label long_separator below.
- __ Bind(&long_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
- // separator: Separator string.
-
- // Copy the separator to the result.
- // TODO(all): hoist next two instructions.
- __ Ldrsw(string_length,
- UntagSmiFieldMemOperand(separator, String::kLengthOffset));
- __ Add(string, separator, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ CopyBytes(result_pos, string, string_length, scratch1);
-
- __ Bind(&long_separator);
- __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ Ldrsw(string_length,
- UntagSmiFieldMemOperand(string, String::kLengthOffset));
- __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ CopyBytes(result_pos, string, string_length, scratch1);
- __ Cmp(element, elements_end);
- __ B(lt, &long_separator_loop); // End while (element < elements_end).
- __ B(&done);
-
- __ Bind(&bailout);
- // Returning undefined will force slower code to handle it.
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ Bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->length() > 0 && name->Get(0) == '_') {
- Comment cmnt(masm_, "[ InlineRuntimeCall");
- EmitInlineRuntimeCall(expr);
- return;
- }
-
- Comment cmnt(masm_, "[ CallRunTime");
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- if (expr->is_jsruntime()) {
- // Push the builtins object as the receiver.
- __ Ldr(x10, GlobalObjectMemOperand());
- __ Ldr(x0, FieldMemOperand(x10, GlobalObject::kBuiltinsOffset));
- __ Push(x0);
-
- // Load the function from the receiver.
- __ Mov(x2, Operand(name));
- CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
-
- // Push the target function under the receiver.
- __ Pop(x10);
- __ Push(x0, x10);
-
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Record source position of the IC call.
- SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS);
- __ Peek(x1, (arg_count + 1) * kPointerSize);
- __ CallStub(&stub);
-
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- context()->DropAndPlug(1, x0);
- } else {
- // Push the arguments ("left-to-right").
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the C runtime function.
- __ CallRuntime(expr->function(), arg_count);
- context()->Plug(x0);
- }
-}
-
-
-void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
- switch (expr->op()) {
- case Token::DELETE: {
- Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
- Property* property = expr->expression()->AsProperty();
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
-
- if (property != NULL) {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
- __ Mov(x10, Operand(Smi::FromInt(strict_mode_flag)));
- __ Push(x10);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(x0);
- } else if (proxy != NULL) {
- Variable* var = proxy->var();
- // Delete of an unqualified identifier is disallowed in strict mode
- // but "delete this" is allowed.
- ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
- if (var->IsUnallocated()) {
- __ Ldr(x12, GlobalObjectMemOperand());
- __ Mov(x11, Operand(var->name()));
- __ Mov(x10, Operand(Smi::FromInt(kNonStrictMode)));
- __ Push(x12, x11, x10);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(x0);
- } else if (var->IsStackAllocated() || var->IsContextSlot()) {
- // Result of deleting non-global, non-dynamic variables is false.
- // The subexpression does not have side effects.
- context()->Plug(var->is_this());
- } else {
- // Non-global variable. Call the runtime to try to delete from the
- // context where the variable was introduced.
- __ Mov(x2, Operand(var->name()));
- __ Push(context_register(), x2);
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
- context()->Plug(x0);
- }
- } else {
- // Result of deleting non-property, non-variable reference is true.
- // The subexpression may have side effects.
- VisitForEffect(expr->expression());
- context()->Plug(true);
- }
- break;
- break;
- }
- case Token::VOID: {
- Comment cmnt(masm_, "[ UnaryOperation (VOID)");
- VisitForEffect(expr->expression());
- context()->Plug(Heap::kUndefinedValueRootIndex);
- break;
- }
- case Token::NOT: {
- Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- if (context()->IsEffect()) {
- // Unary NOT has no side effects so it's only necessary to visit the
- // subexpression. Match the optimizing compiler by not branching.
- VisitForEffect(expr->expression());
- } else if (context()->IsTest()) {
- const TestContext* test = TestContext::cast(context());
- // The labels are swapped for the recursive call.
- VisitForControl(expr->expression(),
- test->false_label(),
- test->true_label(),
- test->fall_through());
- context()->Plug(test->true_label(), test->false_label());
- } else {
- ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
- // TODO(jbramley): This could be much more efficient using (for
- // example) the CSEL instruction.
- Label materialize_true, materialize_false, done;
- VisitForControl(expr->expression(),
- &materialize_false,
- &materialize_true,
- &materialize_true);
-
- __ Bind(&materialize_true);
- PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
- __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
- __ B(&done);
-
- __ Bind(&materialize_false);
- PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
- __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
- __ B(&done);
-
- __ Bind(&done);
- if (context()->IsStackValue()) {
- __ Push(result_register());
- }
- }
- break;
- }
- case Token::TYPEOF: {
- Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- {
- StackValueContext context(this);
- VisitForTypeofValue(expr->expression());
- }
- __ CallRuntime(Runtime::kTypeof, 1);
- context()->Plug(x0);
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
- Comment cmnt(masm_, "[ CountOperation");
- SetSourcePosition(expr->position());
-
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // as the left-hand side.
- if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
- return;
- }
-
- // Expression can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* prop = expr->expression()->AsProperty();
- // In case of a property we use the uninitialized expression context
- // of the key to detect a named property.
- if (prop != NULL) {
- assign_type =
- (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
- }
-
- // Evaluate expression and get value.
- if (assign_type == VARIABLE) {
- ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
- AccumulatorValueContext context(this);
- EmitVariableLoad(expr->expression()->AsVariableProxy());
- } else {
- // Reserve space for result of postfix operation.
- if (expr->is_postfix() && !context()->IsEffect()) {
- __ Push(xzr);
- }
- if (assign_type == NAMED_PROPERTY) {
- // Put the object both on the stack and in the accumulator.
- VisitForAccumulatorValue(prop->obj());
- __ Push(x0);
- EmitNamedPropertyLoad(prop);
- } else {
- // KEYED_PROPERTY
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ Peek(x1, 0);
- __ Push(x0);
- EmitKeyedPropertyLoad(prop);
- }
- }
-
- // We need a second deoptimization point after loading the value
- // in case evaluating the property load my have a side effect.
- if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), TOS_REG);
- } else {
- PrepareForBailoutForId(prop->LoadId(), TOS_REG);
- }
-
- // Inline smi case if we are in a loop.
- Label stub_call, done;
- JumpPatchSite patch_site(masm_);
-
- int count_value = expr->op() == Token::INC ? 1 : -1;
- if (ShouldInlineSmiCase(expr->op())) {
- Label slow;
- patch_site.EmitJumpIfNotSmi(x0, &slow);
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- // Save the result on the stack. If we have a named or keyed property we
- // store the result under the receiver that is currently on top of the
- // stack.
- switch (assign_type) {
- case VARIABLE:
- __ Push(x0);
- break;
- case NAMED_PROPERTY:
- __ Poke(x0, kPointerSize);
- break;
- case KEYED_PROPERTY:
- __ Poke(x0, kPointerSize * 2);
- break;
- }
- }
- }
-
- __ Adds(x0, x0, Operand(Smi::FromInt(count_value)));
- __ B(vc, &done);
- // Call stub. Undo operation first.
- __ Sub(x0, x0, Operand(Smi::FromInt(count_value)));
- __ B(&stub_call);
- __ Bind(&slow);
- }
- ToNumberStub convert_stub;
- __ CallStub(&convert_stub);
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ Push(x0);
- break;
- case NAMED_PROPERTY:
- __ Poke(x0, kXRegSizeInBytes);
- break;
- case KEYED_PROPERTY:
- __ Poke(x0, 2 * kXRegSizeInBytes);
- break;
- }
- }
- }
-
- __ Bind(&stub_call);
- __ Mov(x1, x0);
- __ Mov(x0, Operand(Smi::FromInt(count_value)));
-
- // Record position before stub call.
- SetSourcePosition(expr->position());
-
- {
- Assembler::BlockConstPoolScope scope(masm_);
- BinaryOpICStub stub(Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId());
- patch_site.EmitPatchInfo();
- }
- __ Bind(&done);
-
- // Store the value returned in x0.
- switch (assign_type) {
- case VARIABLE:
- if (expr->is_postfix()) {
- { EffectContext context(this);
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context.Plug(x0);
- }
- // For all contexts except EffectConstant We have the result on
- // top of the stack.
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(x0);
- }
- break;
- case NAMED_PROPERTY: {
- __ Mov(x2, Operand(prop->key()->AsLiteral()->value()));
- __ Pop(x1);
- CallStoreIC(expr->CountStoreFeedbackId());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(x0);
- }
- break;
- }
- case KEYED_PROPERTY: {
- __ Pop(x1); // Key.
- __ Pop(x2); // Receiver.
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, expr->CountStoreFeedbackId());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(x0);
- }
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
- ASSERT(!context()->IsEffect());
- ASSERT(!context()->IsTest());
- VariableProxy* proxy = expr->AsVariableProxy();
- if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
- __ Ldr(x0, GlobalObjectMemOperand());
- __ Mov(x2, Operand(proxy->name()));
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- CallLoadIC(NOT_CONTEXTUAL);
- PrepareForBailout(expr, TOS_REG);
- context()->Plug(x0);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
- Label done, slow;
-
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
-
- __ Bind(&slow);
- __ Mov(x0, Operand(proxy->name()));
- __ Push(cp, x0);
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- PrepareForBailout(expr, TOS_REG);
- __ Bind(&done);
-
- context()->Plug(x0);
- } else {
- // This expression cannot throw a reference error at the top level.
- VisitInDuplicateContext(expr);
- }
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
- Expression* sub_expr,
- Handle<String> check) {
- ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof");
- Comment cmnt(masm_, "[ EmitLiteralCompareTypeof");
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- { AccumulatorValueContext context(this);
- VisitForTypeofValue(sub_expr);
- }
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-
- if (check->Equals(isolate()->heap()->number_string())) {
- ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof number_string");
- __ JumpIfSmi(x0, if_true);
- __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
- __ CompareRoot(x0, Heap::kHeapNumberMapRootIndex);
- Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->string_string())) {
- ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof string_string");
- __ JumpIfSmi(x0, if_false);
- // Check for undetectable objects => false.
- __ JumpIfObjectType(x0, x0, x1, FIRST_NONSTRING_TYPE, if_false, ge);
- __ Ldrb(x1, FieldMemOperand(x0, Map::kBitFieldOffset));
- __ TestAndSplit(x1, 1 << Map::kIsUndetectable, if_true, if_false,
- fall_through);
- } else if (check->Equals(isolate()->heap()->symbol_string())) {
- ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof symbol_string");
- __ JumpIfSmi(x0, if_false);
- __ CompareObjectType(x0, x0, x1, SYMBOL_TYPE);
- Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->boolean_string())) {
- ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof boolean_string");
- __ JumpIfRoot(x0, Heap::kTrueValueRootIndex, if_true);
- __ CompareRoot(x0, Heap::kFalseValueRootIndex);
- Split(eq, if_true, if_false, fall_through);
- } else if (FLAG_harmony_typeof &&
- check->Equals(isolate()->heap()->null_string())) {
- ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof null_string");
- __ CompareRoot(x0, Heap::kNullValueRootIndex);
- Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->undefined_string())) {
- ASM_LOCATION(
- "FullCodeGenerator::EmitLiteralCompareTypeof undefined_string");
- __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, if_true);
- __ JumpIfSmi(x0, if_false);
- // Check for undetectable objects => true.
- __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
- __ Ldrb(x1, FieldMemOperand(x0, Map::kBitFieldOffset));
- __ TestAndSplit(x1, 1 << Map::kIsUndetectable, if_false, if_true,
- fall_through);
- } else if (check->Equals(isolate()->heap()->function_string())) {
- ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof function_string");
- __ JumpIfSmi(x0, if_false);
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ JumpIfObjectType(x0, x10, x11, JS_FUNCTION_TYPE, if_true);
- __ CompareAndSplit(x11, JS_FUNCTION_PROXY_TYPE, eq, if_true, if_false,
- fall_through);
-
- } else if (check->Equals(isolate()->heap()->object_string())) {
- ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof object_string");
- __ JumpIfSmi(x0, if_false);
- if (!FLAG_harmony_typeof) {
- __ JumpIfRoot(x0, Heap::kNullValueRootIndex, if_true);
- }
- // Check for JS objects => true.
- Register map = x10;
- __ JumpIfObjectType(x0, map, x11, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
- if_false, lt);
- __ CompareInstanceType(map, x11, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ B(gt, if_false);
- // Check for undetectable objects => false.
- __ Ldrb(x10, FieldMemOperand(map, Map::kBitFieldOffset));
-
- __ TestAndSplit(x10, 1 << Map::kIsUndetectable, if_true, if_false,
- fall_through);
-
- } else {
- ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof other");
- if (if_false != fall_through) __ B(if_false);
- }
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
- Comment cmnt(masm_, "[ CompareOperation");
- SetSourcePosition(expr->position());
-
- // Try to generate an optimized comparison with a literal value.
- // TODO(jbramley): This only checks common values like NaN or undefined.
- // Should it also handle A64 immediate operands?
- if (TryLiteralCompare(expr)) {
- return;
- }
-
- // Assign labels according to context()->PrepareTest.
- Label materialize_true;
- Label materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- Token::Value op = expr->op();
- VisitForStackValue(expr->left());
- switch (op) {
- case Token::IN:
- VisitForStackValue(expr->right());
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
- PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
- __ CompareRoot(x0, Heap::kTrueValueRootIndex);
- Split(eq, if_true, if_false, fall_through);
- break;
-
- case Token::INSTANCEOF: {
- VisitForStackValue(expr->right());
- InstanceofStub stub(InstanceofStub::kNoFlags);
- __ CallStub(&stub);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- // The stub returns 0 for true.
- __ CompareAndSplit(x0, 0, eq, if_true, if_false, fall_through);
- break;
- }
-
- default: {
- VisitForAccumulatorValue(expr->right());
- Condition cond = CompareIC::ComputeCondition(op);
-
- // Pop the stack value.
- __ Pop(x1);
-
- JumpPatchSite patch_site(masm_);
- if (ShouldInlineSmiCase(op)) {
- Label slow_case;
- patch_site.EmitJumpIfEitherNotSmi(x0, x1, &slow_case);
- __ Cmp(x1, x0);
- Split(cond, if_true, if_false, NULL);
- __ Bind(&slow_case);
- }
-
- // Record position and call the compare IC.
- SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, expr->CompareOperationFeedbackId());
- patch_site.EmitPatchInfo();
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ CompareAndSplit(x0, 0, cond, if_true, if_false, fall_through);
- }
- }
-
- // Convert the result of the comparison into one expected for this
- // expression's context.
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
- Expression* sub_expr,
- NilValue nil) {
- ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareNil");
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- VisitForAccumulatorValue(sub_expr);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-
- if (expr->op() == Token::EQ_STRICT) {
- Heap::RootListIndex nil_value = nil == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
- __ CompareRoot(x0, nil_value);
- Split(eq, if_true, if_false, fall_through);
- } else {
- Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, expr->CompareOperationFeedbackId());
- __ CompareAndSplit(x0, 0, ne, if_true, if_false, fall_through);
- }
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::VisitYield(Yield* expr) {
- Comment cmnt(masm_, "[ Yield");
- // Evaluate yielded value first; the initial iterator definition depends on
- // this. It stays on the stack while we update the iterator.
- VisitForStackValue(expr->expression());
-
- // TODO(jbramley): Tidy this up once the merge is done, using named registers
- // and suchlike. The implementation changes a little by bleeding_edge so I
- // don't want to spend too much time on it now.
-
- switch (expr->yield_kind()) {
- case Yield::SUSPEND:
- // Pop value from top-of-stack slot; box result into result register.
- EmitCreateIteratorResult(false);
- __ Push(result_register());
- // Fall through.
- case Yield::INITIAL: {
- Label suspend, continuation, post_runtime, resume;
-
- __ B(&suspend);
-
- // TODO(jbramley): This label is bound here because the following code
- // looks at its pos(). Is it possible to do something more efficient here,
- // perhaps using Adr?
- __ Bind(&continuation);
- __ B(&resume);
-
- __ Bind(&suspend);
- VisitForAccumulatorValue(expr->generator_object());
- ASSERT((continuation.pos() > 0) && Smi::IsValid(continuation.pos()));
- __ Mov(x1, Operand(Smi::FromInt(continuation.pos())));
- __ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
- __ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset));
- __ Mov(x1, cp);
- __ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2,
- kLRHasBeenSaved, kDontSaveFPRegs);
- __ Add(x1, fp, StandardFrameConstants::kExpressionsOffset);
- __ Cmp(__ StackPointer(), x1);
- __ B(eq, &post_runtime);
- __ Push(x0); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ Bind(&post_runtime);
- __ Pop(result_register());
- EmitReturnSequence();
-
- __ Bind(&resume);
- context()->Plug(result_register());
- break;
- }
-
- case Yield::FINAL: {
- VisitForAccumulatorValue(expr->generator_object());
- __ Mov(x1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
- __ Str(x1, FieldMemOperand(result_register(),
- JSGeneratorObject::kContinuationOffset));
- // Pop value from top-of-stack slot, box result into result register.
- EmitCreateIteratorResult(true);
- EmitUnwindBeforeReturn();
- EmitReturnSequence();
- break;
- }
-
- case Yield::DELEGATING: {
- VisitForStackValue(expr->generator_object());
-
- // Initial stack layout is as follows:
- // [sp + 1 * kPointerSize] iter
- // [sp + 0 * kPointerSize] g
-
- Label l_catch, l_try, l_suspend, l_continuation, l_resume;
- Label l_next, l_call, l_loop;
- // Initial send value is undefined.
- __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
- __ B(&l_next);
-
- // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
- __ Bind(&l_catch);
- handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
- __ LoadRoot(x2, Heap::kthrow_stringRootIndex); // "throw"
- __ Peek(x3, 1 * kPointerSize); // iter
- __ Push(x2, x3, x0); // "throw", iter, except
- __ B(&l_call);
-
- // try { received = %yield result }
- // Shuffle the received result above a try handler and yield it without
- // re-boxing.
- __ Bind(&l_try);
- __ Pop(x0); // result
- __ PushTryHandler(StackHandler::CATCH, expr->index());
- const int handler_size = StackHandlerConstants::kSize;
- __ Push(x0); // result
- __ B(&l_suspend);
-
- // TODO(jbramley): This label is bound here because the following code
- // looks at its pos(). Is it possible to do something more efficient here,
- // perhaps using Adr?
- __ Bind(&l_continuation);
- __ B(&l_resume);
-
- __ Bind(&l_suspend);
- const int generator_object_depth = kPointerSize + handler_size;
- __ Peek(x0, generator_object_depth);
- __ Push(x0); // g
- ASSERT((l_continuation.pos() > 0) && Smi::IsValid(l_continuation.pos()));
- __ Mov(x1, Operand(Smi::FromInt(l_continuation.pos())));
- __ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
- __ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset));
- __ Mov(x1, cp);
- __ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2,
- kLRHasBeenSaved, kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ Pop(x0); // result
- EmitReturnSequence();
- __ Bind(&l_resume); // received in x0
- __ PopTryHandler();
-
- // receiver = iter; f = 'next'; arg = received;
- __ Bind(&l_next);
- __ LoadRoot(x2, Heap::knext_stringRootIndex); // "next"
- __ Peek(x3, 1 * kPointerSize); // iter
- __ Push(x2, x3, x0); // "next", iter, received
-
- // result = receiver[f](arg);
- __ Bind(&l_call);
- __ Peek(x1, 1 * kPointerSize);
- __ Peek(x0, 2 * kPointerSize);
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, TypeFeedbackId::None());
- __ Mov(x1, x0);
- __ Poke(x1, 2 * kPointerSize);
- CallFunctionStub stub(1, CALL_AS_METHOD);
- __ CallStub(&stub);
-
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ Drop(1); // The function is still on the stack; drop it.
-
- // if (!result.done) goto l_try;
- __ Bind(&l_loop);
- __ Push(x0); // save result
- __ LoadRoot(x2, Heap::kdone_stringRootIndex); // "done"
- CallLoadIC(NOT_CONTEXTUAL); // result.done in x0
- // The ToBooleanStub argument (result.done) is in x0.
- Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(bool_ic);
- __ Cbz(x0, &l_try);
-
- // result.value
- __ Pop(x0); // result
- __ LoadRoot(x2, Heap::kvalue_stringRootIndex); // "value"
- CallLoadIC(NOT_CONTEXTUAL); // result.value in x0
- context()->DropAndPlug(2, x0); // drop iter and g
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
- Expression *value,
- JSGeneratorObject::ResumeMode resume_mode) {
- ASM_LOCATION("FullCodeGenerator::EmitGeneratorResume");
- Register value_reg = x0;
- Register generator_object = x1;
- Register the_hole = x2;
- Register operand_stack_size = w3;
- Register function = x4;
-
- // The value stays in x0, and is ultimately read by the resumed generator, as
- // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
- // is read to throw the value when the resumed generator is already closed. r1
- // will hold the generator object until the activation has been resumed.
- VisitForStackValue(generator);
- VisitForAccumulatorValue(value);
- __ Pop(generator_object);
-
- // Check generator state.
- Label wrong_state, closed_state, done;
- __ Ldr(x10, FieldMemOperand(generator_object,
- JSGeneratorObject::kContinuationOffset));
- STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
- STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
- __ CompareAndBranch(x10, Operand(Smi::FromInt(0)), eq, &closed_state);
- __ CompareAndBranch(x10, Operand(Smi::FromInt(0)), lt, &wrong_state);
-
- // Load suspended function and context.
- __ Ldr(cp, FieldMemOperand(generator_object,
- JSGeneratorObject::kContextOffset));
- __ Ldr(function, FieldMemOperand(generator_object,
- JSGeneratorObject::kFunctionOffset));
-
- // Load receiver and store as the first argument.
- __ Ldr(x10, FieldMemOperand(generator_object,
- JSGeneratorObject::kReceiverOffset));
- __ Push(x10);
-
- // Push holes for the rest of the arguments to the generator function.
- __ Ldr(x10, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
-
- // The number of arguments is stored as an int32_t, and -1 is a marker
- // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
- // extension to correctly handle it. However, in this case, we operate on
- // 32-bit W registers, so extension isn't required.
- __ Ldr(w10, FieldMemOperand(x10,
- SharedFunctionInfo::kFormalParameterCountOffset));
- __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
-
- // TODO(jbramley): Write a variant of PushMultipleTimes which takes a register
- // instead of a constant count, and use it to replace this loop.
- Label push_argument_holes, push_frame;
- __ Bind(&push_argument_holes);
- __ Subs(w10, w10, 1);
- __ B(mi, &push_frame);
- __ Push(the_hole);
- __ B(&push_argument_holes);
-
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- Label resume_frame;
- __ Bind(&push_frame);
- __ Bl(&resume_frame);
- __ B(&done);
-
- __ Bind(&resume_frame);
- __ Push(lr, // Return address.
- fp, // Caller's frame pointer.
- cp, // Callee's context.
- function); // Callee's JS Function.
- __ Add(fp, __ StackPointer(), kPointerSize * 2);
-
- // Load and untag the operand stack size.
- __ Ldr(x10, FieldMemOperand(generator_object,
- JSGeneratorObject::kOperandStackOffset));
- __ Ldr(operand_stack_size,
- UntagSmiFieldMemOperand(x10, FixedArray::kLengthOffset));
-
- // If we are sending a value and there is no operand stack, we can jump back
- // in directly.
- if (resume_mode == JSGeneratorObject::NEXT) {
- Label slow_resume;
- __ Cbnz(operand_stack_size, &slow_resume);
- __ Ldr(x10, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
- __ Ldrsw(x11,
- UntagSmiFieldMemOperand(generator_object,
- JSGeneratorObject::kContinuationOffset));
- __ Add(x10, x10, x11);
- __ Mov(x12, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
- __ Str(x12, FieldMemOperand(generator_object,
- JSGeneratorObject::kContinuationOffset));
- __ Br(x10);
-
- __ Bind(&slow_resume);
- }
-
- // Otherwise, we push holes for the operand stack and call the runtime to fix
- // up the stack and the handlers.
- // TODO(jbramley): Write a variant of PushMultipleTimes which takes a register
- // instead of a constant count, and use it to replace this loop.
- Label push_operand_holes, call_resume;
- __ Bind(&push_operand_holes);
- __ Subs(operand_stack_size, operand_stack_size, 1);
- __ B(mi, &call_resume);
- __ Push(the_hole);
- __ B(&push_operand_holes);
-
- __ Bind(&call_resume);
- __ Mov(x10, Operand(Smi::FromInt(resume_mode)));
- __ Push(generator_object, result_register(), x10);
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
- // Not reached: the runtime call returns elsewhere.
- __ Unreachable();
-
- // Reach here when generator is closed.
- __ Bind(&closed_state);
- if (resume_mode == JSGeneratorObject::NEXT) {
- // Return completed iterator result when generator is closed.
- __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
- __ Push(x10);
- // Pop value from top-of-stack slot; box result into result register.
- EmitCreateIteratorResult(true);
- } else {
- // Throw the provided value.
- __ Push(value_reg);
- __ CallRuntime(Runtime::kThrow, 1);
- }
- __ B(&done);
-
- // Throw error if we attempt to operate on a running generator.
- __ Bind(&wrong_state);
- __ Push(generator_object);
- __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
-
- __ Bind(&done);
- context()->Plug(result_register());
-}
-
-
-void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
- Label gc_required;
- Label allocated;
-
- Handle<Map> map(isolate()->native_context()->generator_result_map());
-
- // Allocate and populate an object with this form: { value: VAL, done: DONE }
-
- Register result = x0;
- __ Allocate(map->instance_size(), result, x10, x11, &gc_required, TAG_OBJECT);
- __ B(&allocated);
-
- __ Bind(&gc_required);
- __ Push(Smi::FromInt(map->instance_size()));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ Ldr(context_register(),
- MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- __ Bind(&allocated);
- Register map_reg = x1;
- Register result_value = x2;
- Register boolean_done = x3;
- Register empty_fixed_array = x4;
- __ Mov(map_reg, Operand(map));
- __ Pop(result_value);
- __ Mov(boolean_done, Operand(isolate()->factory()->ToBoolean(done)));
- __ Mov(empty_fixed_array, Operand(isolate()->factory()->empty_fixed_array()));
- ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
- // TODO(jbramley): Use Stp if possible.
- __ Str(map_reg, FieldMemOperand(result, HeapObject::kMapOffset));
- __ Str(empty_fixed_array,
- FieldMemOperand(result, JSObject::kPropertiesOffset));
- __ Str(empty_fixed_array, FieldMemOperand(result, JSObject::kElementsOffset));
- __ Str(result_value,
- FieldMemOperand(result,
- JSGeneratorObject::kResultValuePropertyOffset));
- __ Str(boolean_done,
- FieldMemOperand(result,
- JSGeneratorObject::kResultDonePropertyOffset));
-
- // Only the value field needs a write barrier, as the other values are in the
- // root set.
- __ RecordWriteField(result, JSGeneratorObject::kResultValuePropertyOffset,
- x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
-}
-
-
-// TODO(all): I don't like this method.
-// It seems to me that in too many places x0 is used in place of this.
-// Also, this function is not suitable for all places where x0 should be
-// abstracted (eg. when used as an argument). But some places assume that the
-// first argument register is x0, and use this function instead.
-// Considering that most of the register allocation is hard-coded in the
-// FullCodeGen, that it is unlikely we will need to change it extensively, and
-// that abstracting the allocation through functions would not yield any
-// performance benefit, I think the existence of this function is debatable.
-Register FullCodeGenerator::result_register() {
- return x0;
-}
-
-
-Register FullCodeGenerator::context_register() {
- return cp;
-}
-
-
-void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
- ASSERT(POINTER_SIZE_ALIGN(frame_offset) == frame_offset);
- __ Str(value, MemOperand(fp, frame_offset));
-}
-
-
-void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ Ldr(dst, ContextMemOperand(cp, context_index));
-}
-
-
-void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- Scope* declaration_scope = scope()->DeclarationScope();
- if (declaration_scope->is_global_scope() ||
- declaration_scope->is_module_scope()) {
- // Contexts nested in the native context have a canonical empty function
- // as their closure, not the anonymous closure containing the global
- // code. Pass a smi sentinel and let the runtime look up the empty
- // function.
- ASSERT(kSmiTag == 0);
- __ Push(xzr);
- } else if (declaration_scope->is_eval_scope()) {
- // Contexts created by a call to eval have the same closure as the
- // context calling eval, not the anonymous closure containing the eval
- // code. Fetch it from the context.
- __ Ldr(x10, ContextMemOperand(cp, Context::CLOSURE_INDEX));
- __ Push(x10);
- } else {
- ASSERT(declaration_scope->is_function_scope());
- __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Push(x10);
- }
-}
-
-
-void FullCodeGenerator::EnterFinallyBlock() {
- ASM_LOCATION("FullCodeGenerator::EnterFinallyBlock");
- ASSERT(!result_register().is(x10));
- // Preserve the result register while executing finally block.
- // Also cook the return address in lr to the stack (smi encoded Code* delta).
- __ Sub(x10, lr, Operand(masm_->CodeObject()));
- __ SmiTag(x10);
- __ Push(result_register(), x10);
-
- // Store pending message while executing finally block.
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ Mov(x10, Operand(pending_message_obj));
- __ Ldr(x10, MemOperand(x10));
-
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ Mov(x11, Operand(has_pending_message));
- __ Ldr(x11, MemOperand(x11));
- __ SmiTag(x11);
-
- __ Push(x10, x11);
-
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ Mov(x10, Operand(pending_message_script));
- __ Ldr(x10, MemOperand(x10));
- __ Push(x10);
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
- ASM_LOCATION("FullCodeGenerator::ExitFinallyBlock");
- ASSERT(!result_register().is(x10));
-
- // Restore pending message from stack.
- __ Pop(x10, x11, x12);
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ Mov(x13, Operand(pending_message_script));
- __ Str(x10, MemOperand(x13));
-
- __ SmiUntag(x11);
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ Mov(x13, Operand(has_pending_message));
- __ Str(x11, MemOperand(x13));
-
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ Mov(x13, Operand(pending_message_obj));
- __ Str(x12, MemOperand(x13));
-
- // Restore result register and cooked return address from the stack.
- __ Pop(x10, result_register());
-
- // Uncook the return address (see EnterFinallyBlock).
- __ SmiUntag(x10);
- __ Add(x11, x10, Operand(masm_->CodeObject()));
- __ Br(x11);
-}
-
-
-#undef __
-
-
-void BackEdgeTable::PatchAt(Code* unoptimized_code,
- Address pc,
- BackEdgeState target_state,
- Code* replacement_code) {
- // Turn the jump into a nop.
- Address branch_address = pc - 3 * kInstructionSize;
- PatchingAssembler patcher(branch_address, 1);
-
- switch (target_state) {
- case INTERRUPT:
- // <decrement profiling counter>
- // .. .. .. .. b.pl ok
- // .. .. .. .. ldr x16, pc+<interrupt stub address>
- // .. .. .. .. blr x16
- // ... more instructions.
- // ok-label
- // Jump offset is 6 instructions.
- ASSERT(Instruction::Cast(branch_address)
- ->IsNop(Assembler::INTERRUPT_CODE_NOP));
- patcher.b(6, pl);
- break;
- case ON_STACK_REPLACEMENT:
- case OSR_AFTER_STACK_CHECK:
- // <decrement profiling counter>
- // .. .. .. .. mov x0, x0 (NOP)
- // .. .. .. .. ldr x16, pc+<on-stack replacement address>
- // .. .. .. .. blr x16
- ASSERT(Instruction::Cast(branch_address)->IsCondBranchImm());
- ASSERT(Instruction::Cast(branch_address)->ImmPCOffset() ==
- 6 * kInstructionSize);
- patcher.nop(Assembler::INTERRUPT_CODE_NOP);
- break;
- }
-
- // Replace the call address.
- Instruction* load = Instruction::Cast(pc)->preceding(2);
- Address interrupt_address_pointer =
- reinterpret_cast<Address>(load) + load->ImmPCOffset();
- ASSERT((Memory::uint64_at(interrupt_address_pointer) ==
- reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
- ->builtins()
- ->OnStackReplacement()
- ->entry())) ||
- (Memory::uint64_at(interrupt_address_pointer) ==
- reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
- ->builtins()
- ->InterruptCheck()
- ->entry())) ||
- (Memory::uint64_at(interrupt_address_pointer) ==
- reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
- ->builtins()
- ->OsrAfterStackCheck()
- ->entry())) ||
- (Memory::uint64_at(interrupt_address_pointer) ==
- reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
- ->builtins()
- ->OnStackReplacement()
- ->entry())));
- Memory::uint64_at(interrupt_address_pointer) =
- reinterpret_cast<uint64_t>(replacement_code->entry());
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, reinterpret_cast<Address>(load), replacement_code);
-}
-
-
-BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
- Isolate* isolate,
- Code* unoptimized_code,
- Address pc) {
- // TODO(jbramley): There should be some extra assertions here (as in the ARM
- // back-end), but this function is gone in bleeding_edge so it might not
- // matter anyway.
- Instruction* jump_or_nop = Instruction::Cast(pc)->preceding(3);
-
- if (jump_or_nop->IsNop(Assembler::INTERRUPT_CODE_NOP)) {
- Instruction* load = Instruction::Cast(pc)->preceding(2);
- uint64_t entry = Memory::uint64_at(reinterpret_cast<Address>(load) +
- load->ImmPCOffset());
- if (entry == reinterpret_cast<uint64_t>(
- isolate->builtins()->OnStackReplacement()->entry())) {
- return ON_STACK_REPLACEMENT;
- } else if (entry == reinterpret_cast<uint64_t>(
- isolate->builtins()->OsrAfterStackCheck()->entry())) {
- return OSR_AFTER_STACK_CHECK;
- } else {
- UNREACHABLE();
- }
- }
-
- return INTERRUPT;
-}
-
-
-#define __ ACCESS_MASM(masm())
-
-
-FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
- int* stack_depth,
- int* context_length) {
- ASM_LOCATION("FullCodeGenerator::TryFinally::Exit");
- // The macros used here must preserve the result register.
-
- // Because the handler block contains the context of the finally
- // code, we can restore it directly from there for the finally code
- // rather than iteratively unwinding contexts via their previous
- // links.
- __ Drop(*stack_depth); // Down to the handler block.
- if (*context_length > 0) {
- // Restore the context to its dedicated register and the stack.
- __ Peek(cp, StackHandlerConstants::kContextOffset);
- __ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ PopTryHandler();
- __ Bl(finally_entry_);
-
- *stack_depth = 0;
- *context_length = 0;
- return previous_;
-}
-
-
-#undef __
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/ic-a64.cc b/deps/v8/src/a64/ic-a64.cc
deleted file mode 100644
index 93d7857b05..0000000000
--- a/deps/v8/src/a64/ic-a64.cc
+++ /dev/null
@@ -1,1413 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if V8_TARGET_ARCH_A64
-
-#include "a64/assembler-a64.h"
-#include "code-stubs.h"
-#include "codegen.h"
-#include "disasm.h"
-#include "ic-inl.h"
-#include "runtime.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm)
-
-
-// "type" holds an instance type on entry and is not clobbered.
-// Generated code branch on "global_object" if type is any kind of global
-// JS object.
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
- Register type,
- Label* global_object) {
- __ Cmp(type, JS_GLOBAL_OBJECT_TYPE);
- __ Ccmp(type, JS_BUILTINS_OBJECT_TYPE, ZFlag, ne);
- __ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne);
- __ B(eq, global_object);
-}
-
-
-// Generated code falls through if the receiver is a regular non-global
-// JS object with slow properties and no interceptors.
-//
-// "receiver" holds the receiver on entry and is unchanged.
-// "elements" holds the property dictionary on fall through.
-static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register elements,
- Register scratch0,
- Register scratch1,
- Label* miss) {
- ASSERT(!AreAliased(receiver, elements, scratch0, scratch1));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // Check that the receiver is a valid JS object.
- // Let t be the object instance type, we want:
- // FIRST_SPEC_OBJECT_TYPE <= t <= LAST_SPEC_OBJECT_TYPE.
- // Since LAST_SPEC_OBJECT_TYPE is the last possible instance type we only
- // check the lower bound.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
-
- __ JumpIfObjectType(receiver, scratch0, scratch1, FIRST_SPEC_OBJECT_TYPE,
- miss, lt);
-
- // scratch0 now contains the map of the receiver and scratch1 the object type.
- Register map = scratch0;
- Register type = scratch1;
-
- // Check if the receiver is a global JS object.
- GenerateGlobalInstanceTypeCheck(masm, type, miss);
-
- // Check that the object does not require access checks.
- __ Ldrb(scratch1, FieldMemOperand(map, Map::kBitFieldOffset));
- __ Tbnz(scratch1, Map::kIsAccessCheckNeeded, miss);
- __ Tbnz(scratch1, Map::kHasNamedInterceptor, miss);
-
- // Check that the properties dictionary is valid.
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ Ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ JumpIfNotRoot(scratch1, Heap::kHashTableMapRootIndex, miss);
-}
-
-
-// Helper function used from LoadIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// result: Register for the result. It is only updated if a jump to the miss
-// label is not done.
-// The scratch registers need to be different from elements, name and result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register name,
- Register result,
- Register scratch1,
- Register scratch2) {
- ASSERT(!AreAliased(elements, name, scratch1, scratch2));
- ASSERT(!AreAliased(result, scratch1, scratch2));
-
- Label done;
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
-
- // If probing finds an entry check that the value is a normal property.
- __ Bind(&done);
-
- static const int kElementsStartOffset = NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
- __ Tst(scratch1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
- __ B(ne, miss);
-
- // Get the value at the masked, scaled index and return.
- __ Ldr(result,
- FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// value: The value to store (never clobbered).
-//
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register name,
- Register value,
- Register scratch1,
- Register scratch2) {
- ASSERT(!AreAliased(elements, name, value, scratch1, scratch2));
-
- Label done;
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
-
- // If probing finds an entry in the dictionary check that the value
- // is a normal property that is not read only.
- __ Bind(&done);
-
- static const int kElementsStartOffset = NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- static const int kTypeAndReadOnlyMask =
- PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY);
- __ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
- __ Tst(scratch1, kTypeAndReadOnlyMask);
- __ B(ne, miss);
-
- // Store the value at the masked, scaled index and return.
- static const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag);
- __ Str(value, MemOperand(scratch2));
-
- // Update the write barrier. Make sure not to clobber the value.
- __ Mov(scratch1, value);
- __ RecordWrite(
- elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
-}
-
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object and return the map of the
-// receiver in 'map_scratch' if the receiver is not a SMI.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register map_scratch,
- Register scratch,
- int interceptor_bit,
- Label* slow) {
- ASSERT(!AreAliased(map_scratch, scratch));
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
- // Get the map of the receiver.
- __ Ldr(map_scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check bit field.
- __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kBitFieldOffset));
- __ Tbnz(scratch, Map::kIsAccessCheckNeeded, slow);
- __ Tbnz(scratch, interceptor_bit, slow);
-
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object, we enter the
- // runtime system to make sure that indexing into string objects work
- // as intended.
- STATIC_ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
- __ Cmp(scratch, JS_OBJECT_TYPE);
- __ B(lt, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
-//
-// receiver - holds the receiver on entry.
-// Unchanged unless 'result' is the same register.
-//
-// key - holds the smi key on entry.
-// Unchanged unless 'result' is the same register.
-//
-// elements - holds the elements of the receiver on exit.
-//
-// elements_map - holds the elements map on exit if the not_fast_array branch is
-// taken. Otherwise, this is used as a scratch register.
-//
-// result - holds the result on exit if the load succeeded.
-// Allowed to be the the same as 'receiver' or 'key'.
-// Unchanged on bailout so 'receiver' and 'key' can be safely
-// used by further computation.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements,
- Register elements_map,
- Register scratch2,
- Register result,
- Label* not_fast_array,
- Label* slow) {
- ASSERT(!AreAliased(receiver, key, elements, elements_map, scratch2));
-
- // Check for fast array.
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- if (not_fast_array != NULL) {
- // Check that the object is in fast mode and writable.
- __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ JumpIfNotRoot(elements_map, Heap::kFixedArrayMapRootIndex,
- not_fast_array);
- } else {
- __ AssertFastElements(elements);
- }
-
- // The elements_map register is only used for the not_fast_array path, which
- // was handled above. From this point onward it is a scratch register.
- Register scratch1 = elements_map;
-
- // Check that the key (index) is within bounds.
- __ Ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Cmp(key, scratch1);
- __ B(hs, slow);
-
- // Fast case: Do the load.
- __ Add(scratch1, elements, FixedArray::kHeaderSize - kHeapObjectTag);
- __ SmiUntag(scratch2, key);
- __ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
-
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, slow);
-
- // Move the value to the result register.
- // 'result' can alias with 'receiver' or 'key' but these two must be
- // preserved if we jump to 'slow'.
- __ Mov(result, scratch2);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if a key is a unique name.
-// The map of the key is returned in 'map_scratch'.
-// If the jump to 'index_string' is done the hash of the key is left
-// in 'hash_scratch'.
-static void GenerateKeyNameCheck(MacroAssembler* masm,
- Register key,
- Register map_scratch,
- Register hash_scratch,
- Label* index_string,
- Label* not_unique) {
- ASSERT(!AreAliased(key, map_scratch, hash_scratch));
-
- // Is the key a name?
- Label unique;
- __ JumpIfObjectType(key, map_scratch, hash_scratch, LAST_UNIQUE_NAME_TYPE,
- not_unique, hi);
- STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
- __ B(eq, &unique);
-
- // Is the string an array index with cached numeric value?
- __ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset));
- __ TestAndBranchIfAllClear(hash_scratch,
- Name::kContainsCachedArrayIndexMask,
- index_string);
-
- // Is the string internalized? We know it's a string, so a single bit test is
- // enough.
- __ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag == 0);
- __ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique);
-
- __ Bind(&unique);
- // Fall through if the key is a unique name.
-}
-
-
-// Neither 'object' nor 'key' are modified by this function.
-//
-// If the 'unmapped_case' or 'slow_case' exit is taken, the 'map' register is
-// left with the object's elements map. Otherwise, it is used as a scratch
-// register.
-static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
- Register object,
- Register key,
- Register map,
- Register scratch1,
- Register scratch2,
- Label* unmapped_case,
- Label* slow_case) {
- ASSERT(!AreAliased(object, key, map, scratch1, scratch2));
-
- Heap* heap = masm->isolate()->heap();
-
- // Check that the receiver is a JSObject. Because of the elements
- // map check later, we do not need to check for interceptors or
- // whether it requires access checks.
- __ JumpIfSmi(object, slow_case);
- // Check that the object is some kind of JSObject.
- __ JumpIfObjectType(object, map, scratch1, FIRST_JS_RECEIVER_TYPE,
- slow_case, lt);
-
- // Check that the key is a positive smi.
- __ JumpIfNotSmi(key, slow_case);
- __ Tbnz(key, kXSignBit, slow_case);
-
- // Load the elements object and check its map.
- Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
- __ Ldr(map, FieldMemOperand(object, JSObject::kElementsOffset));
- __ CheckMap(map, scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
-
- // Check if element is in the range of mapped arguments. If not, jump
- // to the unmapped lookup.
- __ Ldr(scratch1, FieldMemOperand(map, FixedArray::kLengthOffset));
- __ Sub(scratch1, scratch1, Operand(Smi::FromInt(2)));
- __ Cmp(key, scratch1);
- __ B(hs, unmapped_case);
-
- // Load element index and check whether it is the hole.
- static const int offset =
- FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
-
- __ Add(scratch1, map, offset);
- __ SmiUntag(scratch2, key);
- __ Ldr(scratch1, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
- __ JumpIfRoot(scratch1, Heap::kTheHoleValueRootIndex, unmapped_case);
-
- // Load value from context and return it.
- __ Ldr(scratch2, FieldMemOperand(map, FixedArray::kHeaderSize));
- __ SmiUntag(scratch1);
- __ Add(scratch2, scratch2, Context::kHeaderSize - kHeapObjectTag);
- return MemOperand(scratch2, scratch1, LSL, kPointerSizeLog2);
-}
-
-
-// The 'parameter_map' register must be loaded with the parameter map of the
-// arguments object and is overwritten.
-static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
- Register key,
- Register parameter_map,
- Register scratch,
- Label* slow_case) {
- ASSERT(!AreAliased(key, parameter_map, scratch));
-
- // Element is in arguments backing store, which is referenced by the
- // second element of the parameter_map.
- const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
- Register backing_store = parameter_map;
- __ Ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
- Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
- __ CheckMap(
- backing_store, scratch, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
- __ Ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
- __ Cmp(key, scratch);
- __ B(hs, slow_case);
-
- __ Add(backing_store,
- backing_store,
- FixedArray::kHeaderSize - kHeapObjectTag);
- __ SmiUntag(scratch, key);
- return MemOperand(backing_store, scratch, LSL, kPointerSizeLog2);
-}
-
-
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x2 : name
- // -- lr : return address
- // -- x0 : receiver
- // -----------------------------------
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, x0, x2, x3, x4, x5, x6);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x2 : name
- // -- lr : return address
- // -- x0 : receiver
- // -----------------------------------
- Label miss;
-
- GenerateNameDictionaryReceiverCheck(masm, x0, x1, x3, x4, &miss);
-
- // x1 now holds the property dictionary.
- GenerateDictionaryLoad(masm, &miss, x1, x2, x0, x3, x4);
- __ Ret();
-
- // Cache miss: Jump to runtime.
- __ Bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x2 : name
- // -- lr : return address
- // -- x0 : receiver
- // -----------------------------------
- Isolate* isolate = masm->isolate();
- ASM_LOCATION("LoadIC::GenerateMiss");
-
- __ IncrementCounter(isolate->counters()->load_miss(), 1, x3, x4);
-
- // TODO(jbramley): Does the target actually expect an argument in x3, or is
- // this inherited from ARM's push semantics?
- __ Mov(x3, x0);
- __ Push(x3, x2);
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- x2 : name
- // -- lr : return address
- // -- x0 : receiver
- // -----------------------------------
-
- // TODO(jbramley): Does the target actually expect an argument in x3, or is
- // this inherited from ARM's push semantics?
- __ Mov(x3, x0);
- __ Push(x3, x2);
-
- __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- x0 : key
- // -- x1 : receiver
- // -----------------------------------
- Register result = x0;
- Register key = x0;
- Register receiver = x1;
- Label miss, unmapped;
-
- Register map_scratch = x2;
- MemOperand mapped_location = GenerateMappedArgumentsLookup(
- masm, receiver, key, map_scratch, x3, x4, &unmapped, &miss);
- __ Ldr(result, mapped_location);
- __ Ret();
-
- __ Bind(&unmapped);
- // Parameter map is left in map_scratch when a jump on unmapped is done.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, key, map_scratch, x3, &miss);
- __ Ldr(x2, unmapped_location);
- __ JumpIfRoot(x2, Heap::kTheHoleValueRootIndex, &miss);
- // Move the result in x0. x0 must be preserved on miss.
- __ Mov(result, x2);
- __ Ret();
-
- __ Bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
- ASM_LOCATION("KeyedStoreIC::GenerateNonStrictArguments");
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- x0 : value
- // -- x1 : key
- // -- x2 : receiver
- // -----------------------------------
-
- Label slow, notin;
-
- Register value = x0;
- Register key = x1;
- Register receiver = x2;
- Register map = x3;
-
- // These registers are used by GenerateMappedArgumentsLookup to build a
- // MemOperand. They are live for as long as the MemOperand is live.
- Register mapped1 = x4;
- Register mapped2 = x5;
-
- MemOperand mapped =
- GenerateMappedArgumentsLookup(masm, receiver, key, map,
- mapped1, mapped2,
- &notin, &slow);
- Operand mapped_offset = mapped.OffsetAsOperand();
- __ Str(value, mapped);
- __ Add(x10, mapped.base(), mapped_offset);
- __ Mov(x11, value);
- __ RecordWrite(mapped.base(), x10, x11, kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ Ret();
-
- __ Bind(&notin);
-
- // These registers are used by GenerateMappedArgumentsLookup to build a
- // MemOperand. They are live for as long as the MemOperand is live.
- Register unmapped1 = map; // This is assumed to alias 'map'.
- Register unmapped2 = x4;
- MemOperand unmapped =
- GenerateUnmappedArgumentsLookup(masm, key, unmapped1, unmapped2, &slow);
- Operand unmapped_offset = unmapped.OffsetAsOperand();
- __ Str(value, unmapped);
- __ Add(x10, unmapped.base(), unmapped_offset);
- __ Mov(x11, value);
- __ RecordWrite(unmapped.base(), x10, x11,
- kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ Ret();
- __ Bind(&slow);
- GenerateMiss(masm);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- x0 : key
- // -- x1 : receiver
- // -----------------------------------
- Isolate* isolate = masm->isolate();
-
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11);
-
- __ Push(x1, x0);
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
-
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- x0 : key
- // -- x1 : receiver
- // -----------------------------------
- Register key = x0;
- Register receiver = x1;
-
- __ Push(receiver, key);
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-}
-
-
-static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm,
- Register key,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label *slow) {
- ASSERT(!AreAliased(
- key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
-
- Isolate* isolate = masm->isolate();
- Label check_number_dictionary;
- // If we can load the value, it should be returned in x0.
- Register result = x0;
-
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, scratch1, scratch2, Map::kHasIndexedInterceptor, slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
-
- GenerateFastArrayLoad(
- masm, receiver, key, scratch3, scratch2, scratch1, result, NULL, slow);
- __ IncrementCounter(
- isolate->counters()->keyed_load_generic_smi(), 1, scratch1, scratch2);
- __ Ret();
-
- __ Bind(&check_number_dictionary);
- __ Ldr(scratch3, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ Ldr(scratch2, FieldMemOperand(scratch3, JSObject::kMapOffset));
-
- // Check whether we have a number dictionary.
- __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow);
-
- __ LoadFromNumberDictionary(
- slow, scratch3, key, result, scratch1, scratch2, scratch4, scratch5);
- __ Ret();
-}
-
-static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm,
- Register key,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label *slow) {
- ASSERT(!AreAliased(
- key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
-
- Isolate* isolate = masm->isolate();
- Label probe_dictionary, property_array_property;
- // If we can load the value, it should be returned in x0.
- Register result = x0;
-
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, scratch1, scratch2, Map::kHasNamedInterceptor, slow);
-
- // If the receiver is a fast-case object, check the keyed lookup cache.
- // Otherwise probe the dictionary.
- __ Ldr(scratch2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
- __ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary);
-
- // We keep the map of the receiver in scratch1.
- Register receiver_map = scratch1;
-
- // Load the map of the receiver, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the name hash.
- __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Mov(scratch2, Operand(receiver_map, ASR, KeyedLookupCache::kMapHashShift));
- __ Ldr(scratch3.W(), FieldMemOperand(key, Name::kHashFieldOffset));
- __ Eor(scratch2, scratch2, Operand(scratch3, ASR, Name::kHashShift));
- int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
- __ And(scratch2, scratch2, mask);
-
- // Load the key (consisting of map and unique name) from the cache and
- // check for match.
- Label load_in_object_property;
- static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
- Label hit_on_nth_entry[kEntriesPerBucket];
- ExternalReference cache_keys =
- ExternalReference::keyed_lookup_cache_keys(isolate);
-
- __ Mov(scratch3, Operand(cache_keys));
- __ Add(scratch3, scratch3, Operand(scratch2, LSL, kPointerSizeLog2 + 1));
-
- for (int i = 0; i < kEntriesPerBucket - 1; i++) {
- Label try_next_entry;
- // Load map and make scratch3 pointing to the next entry.
- __ Ldr(scratch4, MemOperand(scratch3, kPointerSize * 2, PostIndex));
- __ Cmp(receiver_map, scratch4);
- __ B(ne, &try_next_entry);
- __ Ldr(scratch4, MemOperand(scratch3, -kPointerSize)); // Load name
- __ Cmp(key, scratch4);
- __ B(eq, &hit_on_nth_entry[i]);
- __ Bind(&try_next_entry);
- }
-
- // Last entry.
- __ Ldr(scratch4, MemOperand(scratch3, kPointerSize, PostIndex));
- __ Cmp(receiver_map, scratch4);
- __ B(ne, slow);
- __ Ldr(scratch4, MemOperand(scratch3));
- __ Cmp(key, scratch4);
- __ B(ne, slow);
-
- // Get field offset.
- ExternalReference cache_field_offsets =
- ExternalReference::keyed_lookup_cache_field_offsets(isolate);
-
- // Hit on nth entry.
- for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
- __ Bind(&hit_on_nth_entry[i]);
- __ Mov(scratch3, Operand(cache_field_offsets));
- if (i != 0) {
- __ Add(scratch2, scratch2, i);
- }
- __ Ldr(scratch4.W(), MemOperand(scratch3, scratch2, LSL, 2));
- __ Ldrb(scratch5,
- FieldMemOperand(receiver_map, Map::kInObjectPropertiesOffset));
- __ Subs(scratch4, scratch4, scratch5);
- __ B(ge, &property_array_property);
- if (i != 0) {
- __ B(&load_in_object_property);
- }
- }
-
- // Load in-object property.
- __ Bind(&load_in_object_property);
- __ Ldrb(scratch5, FieldMemOperand(receiver_map, Map::kInstanceSizeOffset));
- __ Add(scratch5, scratch5, scratch4); // Index from start of object.
- __ Sub(receiver, receiver, kHeapObjectTag); // Remove the heap tag.
- __ Ldr(result, MemOperand(receiver, scratch5, LSL, kPointerSizeLog2));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
- 1, scratch1, scratch2);
- __ Ret();
-
- // Load property array property.
- __ Bind(&property_array_property);
- __ Ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Ldr(result, MemOperand(scratch1, scratch4, LSL, kPointerSizeLog2));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
- 1, scratch1, scratch2);
- __ Ret();
-
- // Do a quick inline probe of the receiver's dictionary, if it exists.
- __ Bind(&probe_dictionary);
- __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, scratch1, slow);
- // Load the property.
- GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
- 1, scratch1, scratch2);
- __ Ret();
-}
-
-
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- x0 : key
- // -- x1 : receiver
- // -----------------------------------
- Label slow, check_name, index_smi, index_name;
-
- Register key = x0;
- Register receiver = x1;
-
- __ JumpIfNotSmi(key, &check_name);
- __ Bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
- GenerateKeyedLoadWithSmiKey(masm, key, receiver, x2, x3, x4, x5, x6, &slow);
-
- // Slow case, key and receiver still in x0 and x1.
- __ Bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_generic_slow(), 1, x2, x3);
- GenerateRuntimeGetProperty(masm);
-
- __ Bind(&check_name);
- GenerateKeyNameCheck(masm, key, x2, x3, &index_name, &slow);
-
- GenerateKeyedLoadWithNameKey(masm, key, receiver, x2, x3, x4, x5, x6, &slow);
-
- __ Bind(&index_name);
- __ IndexFromHash(x3, key);
- // Now jump to the place where smi keys are handled.
- __ B(&index_smi);
-}
-
-
-void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- x0 : key (index)
- // -- x1 : receiver
- // -----------------------------------
- Label miss;
-
- Register index = x0;
- Register receiver = x1;
- Register result = x0;
- Register scratch = x3;
-
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- &miss, // When index out of range.
- STRING_INDEX_IS_ARRAY_INDEX);
- char_at_generator.GenerateFast(masm);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, call_helper);
-
- __ Bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- x0 : key
- // -- x1 : receiver
- // -----------------------------------
- Label slow;
- Register key = x0;
- Register receiver = x1;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &slow);
-
- // Check that the key is an array index, that is Uint32.
- __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow);
-
- // Get the map of the receiver.
- Register map = x2;
- __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
- // Check that it has indexed interceptor and access checks
- // are not enabled for this object.
- __ Ldrb(x3, FieldMemOperand(map, Map::kBitFieldOffset));
- ASSERT(kSlowCaseBitFieldMask ==
- ((1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor)));
- __ Tbnz(x3, Map::kIsAccessCheckNeeded, &slow);
- __ Tbz(x3, Map::kHasIndexedInterceptor, &slow);
-
- // Everything is fine, call runtime.
- __ Push(receiver, key);
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
- masm->isolate()),
- 2,
- 1);
-
- __ Bind(&slow);
- GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
- ASM_LOCATION("KeyedStoreIC::GenerateMiss");
- // ---------- S t a t e --------------
- // -- x0 : value
- // -- x1 : key
- // -- x2 : receiver
- // -- lr : return address
- // -----------------------------------
-
- // Push receiver, key and value for runtime call.
- __ Push(x2, x1, x0);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- ASM_LOCATION("KeyedStoreIC::GenerateSlow");
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- x0 : value
- // -- x1 : key
- // -- x2 : receiver
- // -----------------------------------
-
- // Push receiver, key and value for runtime call.
- __ Push(x2, x1, x0);
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- ASM_LOCATION("KeyedStoreIC::GenerateRuntimeSetProperty");
- // ---------- S t a t e --------------
- // -- x0 : value
- // -- x1 : key
- // -- x2 : receiver
- // -- lr : return address
- // -----------------------------------
-
- // Push receiver, key and value for runtime call.
- __ Push(x2, x1, x0);
-
- // Push PropertyAttributes(NONE) and strict_mode for runtime call.
- STATIC_ASSERT(NONE == 0);
- __ Mov(x10, Operand(Smi::FromInt(strict_mode)));
- __ Push(xzr, x10);
-
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-static void KeyedStoreGenerateGenericHelper(
- MacroAssembler* masm,
- Label* fast_object,
- Label* fast_double,
- Label* slow,
- KeyedStoreCheckMap check_map,
- KeyedStoreIncrementLength increment_length,
- Register value,
- Register key,
- Register receiver,
- Register receiver_map,
- Register elements_map,
- Register elements) {
- ASSERT(!AreAliased(
- value, key, receiver, receiver_map, elements_map, elements, x10, x11));
-
- Label transition_smi_elements;
- Label transition_double_elements;
- Label fast_double_without_map_check;
- Label non_double_value;
- Label finish_store;
-
- __ Bind(fast_object);
- if (check_map == kCheckMap) {
- __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ Cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_array_map()));
- __ B(ne, fast_double);
- }
-
- // HOLECHECK: guards "A[i] = V"
- // We have to go to the runtime if the current value is the hole because there
- // may be a callback on the element.
- Label holecheck_passed;
- // TODO(all): This address calculation is repeated later (for the store
- // itself). We should keep the result to avoid doing the work twice.
- __ Add(x10, elements, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
- __ Ldr(x11, MemOperand(x10));
- __ JumpIfNotRoot(x11, Heap::kTheHoleValueRootIndex, &holecheck_passed);
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
- __ bind(&holecheck_passed);
-
- // Smi stores don't require further checks.
- __ JumpIfSmi(value, &finish_store);
-
- // Escape to elements kind transition case.
- __ CheckFastObjectElements(receiver_map, x10, &transition_smi_elements);
-
- __ Bind(&finish_store);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ Add(x10, key, Operand(Smi::FromInt(1)));
- __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
-
- Register address = x11;
- __ Add(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Add(address, address, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
- __ Str(value, MemOperand(address));
-
- Label dont_record_write;
- __ JumpIfSmi(value, &dont_record_write);
-
- // Update write barrier for the elements array address.
- __ Mov(x10, value); // Preserve the value which is returned.
- __ RecordWrite(elements,
- address,
- x10,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- __ Bind(&dont_record_write);
- __ Ret();
-
-
- __ Bind(fast_double);
- if (check_map == kCheckMap) {
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow);
- }
-
- // HOLECHECK: guards "A[i] double hole?"
- // We have to see if the double version of the hole is present. If so go to
- // the runtime.
- // TODO(all): This address calculation was done earlier. We should keep the
- // result to avoid doing the work twice.
- __ Add(x10, elements, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
- __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
- __ Ldr(x11, MemOperand(x10));
- __ CompareAndBranch(x11, kHoleNanInt64, ne, &fast_double_without_map_check);
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
-
- __ Bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value,
- key,
- elements,
- x10,
- d0,
- d1,
- &transition_double_elements);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ Add(x10, key, Operand(Smi::FromInt(1)));
- __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
- __ Ret();
-
-
- __ Bind(&transition_smi_elements);
- // Transition the array appropriately depending on the value type.
- __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
- __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value);
-
- // Value is a double. Transition FAST_SMI_ELEMENTS ->
- // FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS,
- receiver_map,
- x10,
- slow);
- ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3.
- AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ B(&fast_double_without_map_check);
-
- __ Bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- x10,
- slow);
- ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3.
- mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
- slow);
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ B(&finish_store);
-
- __ Bind(&transition_double_elements);
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- x10,
- slow);
- ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3.
- mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ B(&finish_store);
-}
-
-
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- ASM_LOCATION("KeyedStoreIC::GenerateGeneric");
- // ---------- S t a t e --------------
- // -- x0 : value
- // -- x1 : key
- // -- x2 : receiver
- // -- lr : return address
- // -----------------------------------
- Label slow;
- Label array;
- Label fast_object;
- Label extra;
- Label fast_object_grow;
- Label fast_double_grow;
- Label fast_double;
-
- Register value = x0;
- Register key = x1;
- Register receiver = x2;
- Register receiver_map = x3;
- Register elements = x4;
- Register elements_map = x5;
-
- __ JumpIfNotSmi(key, &slow);
- __ JumpIfSmi(receiver, &slow);
- __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
- // Check that the receiver does not require access checks and is not observed.
- // The generic stub does not perform map checks or handle observed objects.
- __ Ldrb(x10, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
- __ TestAndBranchIfAnySet(
- x10, (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kIsObserved), &slow);
-
- // Check if the object is a JS array or not.
- Register instance_type = x10;
- __ CompareInstanceType(receiver_map, instance_type, JS_ARRAY_TYPE);
- __ B(eq, &array);
- // Check that the object is some kind of JSObject.
- __ Cmp(instance_type, FIRST_JS_OBJECT_TYPE);
- __ B(lt, &slow);
-
- // Object case: Check key against length in the elements array.
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // Check array bounds. Both the key and the length of FixedArray are smis.
- __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Cmp(x10, Operand::UntagSmi(key));
- __ B(hi, &fast_object);
-
-
- __ Bind(&slow);
- // Slow case, handle jump to runtime.
- // Live values:
- // x0: value
- // x1: key
- // x2: receiver
- GenerateRuntimeSetProperty(masm, strict_mode);
-
-
- __ Bind(&extra);
- // Extra capacity case: Check if there is extra capacity to
- // perform the store and update the length. Used for adding one
- // element to the array by writing to array[array.length].
-
- // Check for room in the elements backing store.
- // Both the key and the length of FixedArray are smis.
- __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Cmp(x10, Operand::UntagSmi(key));
- __ B(ls, &slow);
-
- __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ Cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
- __ B(eq, &fast_object_grow);
- __ Cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_double_array_map()));
- __ B(eq, &fast_double_grow);
- __ B(&slow);
-
-
- __ Bind(&array);
- // Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode (and writable); if it
- // is the length is always a smi.
-
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
- // Check the key against the length in the array.
- __ Ldrsw(x10, UntagSmiFieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Cmp(x10, Operand::UntagSmi(key));
- __ B(eq, &extra); // We can handle the case where we are appending 1 element.
- __ B(lo, &slow);
-
- KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
- &slow, kCheckMap, kDontIncrementLength,
- value, key, receiver, receiver_map,
- elements_map, elements);
- KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
- &slow, kDontCheckMap, kIncrementLength,
- value, key, receiver, receiver_map,
- elements_map, elements);
-}
-
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : value
- // -- x1 : receiver
- // -- x2 : name
- // -- lr : return address
- // -----------------------------------
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, x1, x2, x3, x4, x5, x6);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : value
- // -- x1 : receiver
- // -- x2 : name
- // -- lr : return address
- // -----------------------------------
-
- __ Push(x1, x2, x0);
-
- // Tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : value
- // -- x1 : receiver
- // -- x2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
- Register value = x0;
- Register receiver = x1;
- Register name = x2;
- Register dictionary = x3;
-
- GenerateNameDictionaryReceiverCheck(
- masm, receiver, dictionary, x4, x5, &miss);
-
- GenerateDictionaryStore(masm, &miss, dictionary, name, value, x4, x5);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1, x4, x5);
- __ Ret();
-
- // Cache miss: Jump to runtime.
- __ Bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1, x4, x5);
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- ASM_LOCATION("StoreIC::GenerateRuntimeSetProperty");
- // ----------- S t a t e -------------
- // -- x0 : value
- // -- x1 : receiver
- // -- x2 : name
- // -- lr : return address
- // -----------------------------------
-
- __ Push(x1, x2, x0);
-
- __ Mov(x11, Operand(Smi::FromInt(NONE))); // PropertyAttributes
- __ Mov(x10, Operand(Smi::FromInt(strict_mode)));
- __ Push(x11, x10);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-void StoreIC::GenerateSlow(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- x0 : value
- // -- x1 : receiver
- // -- x2 : name
- // -- lr : return address
- // -----------------------------------
-
- // Push receiver, name and value for runtime call.
- __ Push(x1, x2, x0);
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-Condition CompareIC::ComputeCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return eq;
- case Token::LT:
- return lt;
- case Token::GT:
- return gt;
- case Token::LTE:
- return le;
- case Token::GTE:
- return ge;
- default:
- UNREACHABLE();
- return al;
- }
-}
-
-
-bool CompareIC::HasInlinedSmiCode(Address address) {
- // The address of the instruction following the call.
- Address info_address =
- Assembler::return_address_from_call_start(address);
-
- InstructionSequence* patch_info = InstructionSequence::At(info_address);
- return patch_info->IsInlineData();
-}
-
-
-// Activate a SMI fast-path by patching the instructions generated by
-// JumpPatchSite::EmitJumpIf(Not)Smi(), using the information encoded by
-// JumpPatchSite::EmitPatchInfo().
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
- // The patch information is encoded in the instruction stream using
- // instructions which have no side effects, so we can safely execute them.
- // The patch information is encoded directly after the call to the helper
- // function which is requesting this patch operation.
- Address info_address =
- Assembler::return_address_from_call_start(address);
- InlineSmiCheckInfo info(info_address);
-
- // Check and decode the patch information instruction.
- if (!info.HasSmiCheck()) {
- return;
- }
-
- if (FLAG_trace_ic) {
- PrintF("[ Patching ic at %p, marker=%p, SMI check=%p\n",
- address, info_address, reinterpret_cast<void*>(info.SmiCheck()));
- }
-
- // Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi()
- // and JumpPatchSite::EmitJumpIfSmi().
- // Changing
- // tb(n)z xzr, #0, <target>
- // to
- // tb(!n)z test_reg, #0, <target>
- Instruction* to_patch = info.SmiCheck();
- PatchingAssembler patcher(to_patch, 1);
- ASSERT(to_patch->IsTestBranch());
- ASSERT(to_patch->ImmTestBranchBit5() == 0);
- ASSERT(to_patch->ImmTestBranchBit40() == 0);
-
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagMask == 1);
-
- int branch_imm = to_patch->ImmTestBranch();
- Register smi_reg;
- if (check == ENABLE_INLINED_SMI_CHECK) {
- ASSERT(to_patch->Rt() == xzr.code());
- smi_reg = info.SmiRegister();
- } else {
- ASSERT(check == DISABLE_INLINED_SMI_CHECK);
- ASSERT(to_patch->Rt() != xzr.code());
- smi_reg = xzr;
- }
-
- if (to_patch->Mask(TestBranchMask) == TBZ) {
- // This is JumpIfNotSmi(smi_reg, branch_imm).
- patcher.tbnz(smi_reg, 0, branch_imm);
- } else {
- ASSERT(to_patch->Mask(TestBranchMask) == TBNZ);
- // This is JumpIfSmi(smi_reg, branch_imm).
- patcher.tbz(smi_reg, 0, branch_imm);
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/instructions-a64.cc b/deps/v8/src/a64/instructions-a64.cc
deleted file mode 100644
index 4496d56753..0000000000
--- a/deps/v8/src/a64/instructions-a64.cc
+++ /dev/null
@@ -1,334 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if V8_TARGET_ARCH_A64
-
-#define A64_DEFINE_FP_STATICS
-
-#include "a64/instructions-a64.h"
-#include "a64/assembler-a64-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-bool Instruction::IsLoad() const {
- if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
- return false;
- }
-
- if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
- return Mask(LoadStorePairLBit) != 0;
- } else {
- LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
- switch (op) {
- case LDRB_w:
- case LDRH_w:
- case LDR_w:
- case LDR_x:
- case LDRSB_w:
- case LDRSB_x:
- case LDRSH_w:
- case LDRSH_x:
- case LDRSW_x:
- case LDR_s:
- case LDR_d: return true;
- default: return false;
- }
- }
-}
-
-
-bool Instruction::IsStore() const {
- if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
- return false;
- }
-
- if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
- return Mask(LoadStorePairLBit) == 0;
- } else {
- LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
- switch (op) {
- case STRB_w:
- case STRH_w:
- case STR_w:
- case STR_x:
- case STR_s:
- case STR_d: return true;
- default: return false;
- }
- }
-}
-
-
-static uint64_t RotateRight(uint64_t value,
- unsigned int rotate,
- unsigned int width) {
- ASSERT(width <= 64);
- rotate &= 63;
- return ((value & ((1UL << rotate) - 1UL)) << (width - rotate)) |
- (value >> rotate);
-}
-
-
-static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
- uint64_t value,
- unsigned width) {
- ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
- (width == 32));
- ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
- uint64_t result = value & ((1UL << width) - 1UL);
- for (unsigned i = width; i < reg_size; i *= 2) {
- result |= (result << i);
- }
- return result;
-}
-
-
-// Logical immediates can't encode zero, so a return value of zero is used to
-// indicate a failure case. Specifically, where the constraints on imm_s are not
-// met.
-uint64_t Instruction::ImmLogical() {
- unsigned reg_size = SixtyFourBits() ? kXRegSize : kWRegSize;
- int64_t n = BitN();
- int64_t imm_s = ImmSetBits();
- int64_t imm_r = ImmRotate();
-
- // An integer is constructed from the n, imm_s and imm_r bits according to
- // the following table:
- //
- // N imms immr size S R
- // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
- // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
- // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
- // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
- // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
- // 0 11110s xxxxxr 2 UInt(s) UInt(r)
- // (s bits must not be all set)
- //
- // A pattern is constructed of size bits, where the least significant S+1
- // bits are set. The pattern is rotated right by R, and repeated across a
- // 32 or 64-bit value, depending on destination register width.
- //
-
- if (n == 1) {
- if (imm_s == 0x3F) {
- return 0;
- }
- uint64_t bits = (1UL << (imm_s + 1)) - 1;
- return RotateRight(bits, imm_r, 64);
- } else {
- if ((imm_s >> 1) == 0x1F) {
- return 0;
- }
- for (int width = 0x20; width >= 0x2; width >>= 1) {
- if ((imm_s & width) == 0) {
- int mask = width - 1;
- if ((imm_s & mask) == mask) {
- return 0;
- }
- uint64_t bits = (1UL << ((imm_s & mask) + 1)) - 1;
- return RepeatBitsAcrossReg(reg_size,
- RotateRight(bits, imm_r & mask, width),
- width);
- }
- }
- }
- UNREACHABLE();
- return 0;
-}
-
-
-float Instruction::ImmFP32() {
- // ImmFP: abcdefgh (8 bits)
- // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
- // where B is b ^ 1
- uint32_t bits = ImmFP();
- uint32_t bit7 = (bits >> 7) & 0x1;
- uint32_t bit6 = (bits >> 6) & 0x1;
- uint32_t bit5_to_0 = bits & 0x3f;
- uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
-
- return rawbits_to_float(result);
-}
-
-
-double Instruction::ImmFP64() {
- // ImmFP: abcdefgh (8 bits)
- // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
- // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
- // where B is b ^ 1
- uint32_t bits = ImmFP();
- uint64_t bit7 = (bits >> 7) & 0x1;
- uint64_t bit6 = (bits >> 6) & 0x1;
- uint64_t bit5_to_0 = bits & 0x3f;
- uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
-
- return rawbits_to_double(result);
-}
-
-
-LSDataSize CalcLSPairDataSize(LoadStorePairOp op) {
- switch (op) {
- case STP_x:
- case LDP_x:
- case STP_d:
- case LDP_d: return LSDoubleWord;
- default: return LSWord;
- }
-}
-
-
-ptrdiff_t Instruction::ImmPCOffset() {
- ptrdiff_t offset;
- if (IsPCRelAddressing()) {
- // PC-relative addressing. Only ADR is supported.
- offset = ImmPCRel();
- } else if (BranchType() != UnknownBranchType) {
- // All PC-relative branches.
- // Relative branch offsets are instruction-size-aligned.
- offset = ImmBranch() << kInstructionSizeLog2;
- } else {
- // Load literal (offset from PC).
- ASSERT(IsLdrLiteral());
- // The offset is always shifted by 2 bits, even for loads to 64-bits
- // registers.
- offset = ImmLLiteral() << kInstructionSizeLog2;
- }
- return offset;
-}
-
-
-Instruction* Instruction::ImmPCOffsetTarget() {
- return this + ImmPCOffset();
-}
-
-
-bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
- int32_t offset) {
- return is_intn(offset, ImmBranchRangeBitwidth(branch_type));
-}
-
-
-bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) {
- int offset = target - this;
- return IsValidImmPCOffset(BranchType(), offset);
-}
-
-
-void Instruction::SetImmPCOffsetTarget(Instruction* target) {
- if (IsPCRelAddressing()) {
- SetPCRelImmTarget(target);
- } else if (BranchType() != UnknownBranchType) {
- SetBranchImmTarget(target);
- } else {
- SetImmLLiteral(target);
- }
-}
-
-
-void Instruction::SetPCRelImmTarget(Instruction* target) {
- // ADRP is not supported, so 'this' must point to an ADR instruction.
- ASSERT(Mask(PCRelAddressingMask) == ADR);
-
- Instr imm = Assembler::ImmPCRelAddress(target - this);
-
- SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
-}
-
-
-void Instruction::SetBranchImmTarget(Instruction* target) {
- ASSERT(((target - this) & 3) == 0);
- Instr branch_imm = 0;
- uint32_t imm_mask = 0;
- int offset = (target - this) >> kInstructionSizeLog2;
- switch (BranchType()) {
- case CondBranchType: {
- branch_imm = Assembler::ImmCondBranch(offset);
- imm_mask = ImmCondBranch_mask;
- break;
- }
- case UncondBranchType: {
- branch_imm = Assembler::ImmUncondBranch(offset);
- imm_mask = ImmUncondBranch_mask;
- break;
- }
- case CompareBranchType: {
- branch_imm = Assembler::ImmCmpBranch(offset);
- imm_mask = ImmCmpBranch_mask;
- break;
- }
- case TestBranchType: {
- branch_imm = Assembler::ImmTestBranch(offset);
- imm_mask = ImmTestBranch_mask;
- break;
- }
- default: UNREACHABLE();
- }
- SetInstructionBits(Mask(~imm_mask) | branch_imm);
-}
-
-
-void Instruction::SetImmLLiteral(Instruction* source) {
- ASSERT(((source - this) & 3) == 0);
- int offset = (source - this) >> kLiteralEntrySizeLog2;
- Instr imm = Assembler::ImmLLiteral(offset);
- Instr mask = ImmLLiteral_mask;
-
- SetInstructionBits(Mask(~mask) | imm);
-}
-
-
-// TODO(jbramley): We can't put this inline in the class because things like
-// xzr and Register are not defined in that header. Consider adding
-// instructions-a64-inl.h to work around this.
-bool InstructionSequence::IsInlineData() const {
- // Inline data is encoded as a single movz instruction which writes to xzr
- // (x31).
- return IsMovz() && SixtyFourBits() && (Rd() == xzr.code());
- // TODO(all): If we extend ::InlineData() to support bigger data, we need
- // to update this method too.
-}
-
-
-// TODO(jbramley): We can't put this inline in the class because things like
-// xzr and Register are not defined in that header. Consider adding
-// instructions-a64-inl.h to work around this.
-uint64_t InstructionSequence::InlineData() const {
- ASSERT(IsInlineData());
- uint64_t payload = ImmMoveWide();
- // TODO(all): If we extend ::InlineData() to support bigger data, we need
- // to update this method too.
- return payload;
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/instructions-a64.h b/deps/v8/src/a64/instructions-a64.h
deleted file mode 100644
index 472d4bf9fd..0000000000
--- a/deps/v8/src/a64/instructions-a64.h
+++ /dev/null
@@ -1,516 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_A64_INSTRUCTIONS_A64_H_
-#define V8_A64_INSTRUCTIONS_A64_H_
-
-#include "globals.h"
-#include "utils.h"
-#include "a64/constants-a64.h"
-#include "a64/utils-a64.h"
-
-namespace v8 {
-namespace internal {
-
-
-// ISA constants. --------------------------------------------------------------
-
-typedef uint32_t Instr;
-
-// The following macros initialize a float/double variable with a bit pattern
-// without using static initializers: If A64_DEFINE_FP_STATICS is defined, the
-// symbol is defined as uint32_t/uint64_t initialized with the desired bit
-// pattern. Otherwise, the same symbol is declared as an external float/double.
-#if defined(A64_DEFINE_FP_STATICS)
-#define DEFINE_FLOAT(name, value) extern const uint32_t name = value
-#define DEFINE_DOUBLE(name, value) extern const uint64_t name = value
-#else
-#define DEFINE_FLOAT(name, value) extern const float name
-#define DEFINE_DOUBLE(name, value) extern const double name
-#endif // defined(A64_DEFINE_FP_STATICS)
-
-DEFINE_FLOAT(kFP32PositiveInfinity, 0x7f800000);
-DEFINE_FLOAT(kFP32NegativeInfinity, 0xff800000);
-DEFINE_DOUBLE(kFP64PositiveInfinity, 0x7ff0000000000000UL);
-DEFINE_DOUBLE(kFP64NegativeInfinity, 0xfff0000000000000UL);
-
-// This value is a signalling NaN as both a double and as a float (taking the
-// least-significant word).
-DEFINE_DOUBLE(kFP64SignallingNaN, 0x7ff000007f800001);
-DEFINE_FLOAT(kFP32SignallingNaN, 0x7f800001);
-
-// A similar value, but as a quiet NaN.
-DEFINE_DOUBLE(kFP64QuietNaN, 0x7ff800007fc00001);
-DEFINE_FLOAT(kFP32QuietNaN, 0x7fc00001);
-
-#undef DEFINE_FLOAT
-#undef DEFINE_DOUBLE
-
-
-enum LSDataSize {
- LSByte = 0,
- LSHalfword = 1,
- LSWord = 2,
- LSDoubleWord = 3
-};
-
-LSDataSize CalcLSPairDataSize(LoadStorePairOp op);
-
-enum ImmBranchType {
- UnknownBranchType = 0,
- CondBranchType = 1,
- UncondBranchType = 2,
- CompareBranchType = 3,
- TestBranchType = 4
-};
-
-enum AddrMode {
- Offset,
- PreIndex,
- PostIndex
-};
-
-enum FPRounding {
- // The first four values are encodable directly by FPCR<RMode>.
- FPTieEven = 0x0,
- FPPositiveInfinity = 0x1,
- FPNegativeInfinity = 0x2,
- FPZero = 0x3,
-
- // The final rounding mode is only available when explicitly specified by the
- // instruction (such as with fcvta). It cannot be set in FPCR.
- FPTieAway
-};
-
-enum Reg31Mode {
- Reg31IsStackPointer,
- Reg31IsZeroRegister
-};
-
-// Instructions. ---------------------------------------------------------------
-
-class Instruction {
- public:
- Instr InstructionBits() const {
- Instr bits;
- memcpy(&bits, this, sizeof(bits));
- return bits;
- }
-
- void SetInstructionBits(Instr new_instr) {
- memcpy(this, &new_instr, sizeof(new_instr));
- }
-
- int Bit(int pos) const {
- return (InstructionBits() >> pos) & 1;
- }
-
- uint32_t Bits(int msb, int lsb) const {
- return unsigned_bitextract_32(msb, lsb, InstructionBits());
- }
-
- int32_t SignedBits(int msb, int lsb) const {
- int32_t bits = *(reinterpret_cast<const int32_t*>(this));
- return signed_bitextract_32(msb, lsb, bits);
- }
-
- Instr Mask(uint32_t mask) const {
- return InstructionBits() & mask;
- }
-
- Instruction* following(int count = 1) {
- return this + count * kInstructionSize;
- }
-
- Instruction* preceding(int count = 1) {
- return this - count * kInstructionSize;
- }
-
- #define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
- int64_t Name() const { return Func(HighBit, LowBit); }
- INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
- #undef DEFINE_GETTER
-
- // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
- // formed from ImmPCRelLo and ImmPCRelHi.
- int ImmPCRel() const {
- int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
- int const width = ImmPCRelLo_width + ImmPCRelHi_width;
- return signed_bitextract_32(width-1, 0, offset);
- }
-
- uint64_t ImmLogical();
- float ImmFP32();
- double ImmFP64();
-
- LSDataSize SizeLSPair() const {
- return CalcLSPairDataSize(
- static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
- }
-
- // Helpers.
- bool IsCondBranchImm() const {
- return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
- }
-
- bool IsUncondBranchImm() const {
- return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
- }
-
- bool IsCompareBranch() const {
- return Mask(CompareBranchFMask) == CompareBranchFixed;
- }
-
- bool IsTestBranch() const {
- return Mask(TestBranchFMask) == TestBranchFixed;
- }
-
- bool IsLdrLiteral() const {
- return Mask(LoadLiteralFMask) == LoadLiteralFixed;
- }
-
- bool IsLdrLiteralX() const {
- return Mask(LoadLiteralMask) == LDR_x_lit;
- }
-
- bool IsPCRelAddressing() const {
- return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
- }
-
- bool IsLogicalImmediate() const {
- return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
- }
-
- bool IsAddSubImmediate() const {
- return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
- }
-
- bool IsAddSubExtended() const {
- return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
- }
-
- // Match any loads or stores, including pairs.
- bool IsLoadOrStore() const {
- return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
- }
-
- // Match any loads, including pairs.
- bool IsLoad() const;
- // Match any stores, including pairs.
- bool IsStore() const;
-
- // Indicate whether Rd can be the stack pointer or the zero register. This
- // does not check that the instruction actually has an Rd field.
- Reg31Mode RdMode() const {
- // The following instructions use csp or wsp as Rd:
- // Add/sub (immediate) when not setting the flags.
- // Add/sub (extended) when not setting the flags.
- // Logical (immediate) when not setting the flags.
- // Otherwise, r31 is the zero register.
- if (IsAddSubImmediate() || IsAddSubExtended()) {
- if (Mask(AddSubSetFlagsBit)) {
- return Reg31IsZeroRegister;
- } else {
- return Reg31IsStackPointer;
- }
- }
- if (IsLogicalImmediate()) {
- // Of the logical (immediate) instructions, only ANDS (and its aliases)
- // can set the flags. The others can all write into csp.
- // Note that some logical operations are not available to
- // immediate-operand instructions, so we have to combine two masks here.
- if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
- return Reg31IsZeroRegister;
- } else {
- return Reg31IsStackPointer;
- }
- }
- return Reg31IsZeroRegister;
- }
-
- // Indicate whether Rn can be the stack pointer or the zero register. This
- // does not check that the instruction actually has an Rn field.
- Reg31Mode RnMode() const {
- // The following instructions use csp or wsp as Rn:
- // All loads and stores.
- // Add/sub (immediate).
- // Add/sub (extended).
- // Otherwise, r31 is the zero register.
- if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
- return Reg31IsStackPointer;
- }
- return Reg31IsZeroRegister;
- }
-
- ImmBranchType BranchType() const {
- if (IsCondBranchImm()) {
- return CondBranchType;
- } else if (IsUncondBranchImm()) {
- return UncondBranchType;
- } else if (IsCompareBranch()) {
- return CompareBranchType;
- } else if (IsTestBranch()) {
- return TestBranchType;
- } else {
- return UnknownBranchType;
- }
- }
-
- static int ImmBranchRangeBitwidth(ImmBranchType branch_type) {
- switch (branch_type) {
- case UncondBranchType:
- return ImmUncondBranch_width;
- case CondBranchType:
- return ImmCondBranch_width;
- case CompareBranchType:
- return ImmCmpBranch_width;
- case TestBranchType:
- return ImmTestBranch_width;
- default:
- UNREACHABLE();
- return 0;
- }
- }
-
- // The range of the branch instruction, expressed as 'instr +- range'.
- static int32_t ImmBranchRange(ImmBranchType branch_type) {
- return
- (1 << (ImmBranchRangeBitwidth(branch_type) + kInstructionSizeLog2)) / 2 -
- kInstructionSize;
- }
-
- int ImmBranch() const {
- switch (BranchType()) {
- case CondBranchType: return ImmCondBranch();
- case UncondBranchType: return ImmUncondBranch();
- case CompareBranchType: return ImmCmpBranch();
- case TestBranchType: return ImmTestBranch();
- default: UNREACHABLE();
- }
- return 0;
- }
-
- bool IsBranchAndLinkToRegister() const {
- return Mask(UnconditionalBranchToRegisterMask) == BLR;
- }
-
- bool IsMovz() const {
- return (Mask(MoveWideImmediateMask) == MOVZ_x) ||
- (Mask(MoveWideImmediateMask) == MOVZ_w);
- }
-
- bool IsMovk() const {
- return (Mask(MoveWideImmediateMask) == MOVK_x) ||
- (Mask(MoveWideImmediateMask) == MOVK_w);
- }
-
- bool IsMovn() const {
- return (Mask(MoveWideImmediateMask) == MOVN_x) ||
- (Mask(MoveWideImmediateMask) == MOVN_w);
- }
-
- bool IsNop(int n) {
- // A marking nop is an instruction
- // mov r<n>, r<n>
- // which is encoded as
- // orr r<n>, xzr, r<n>
- return (Mask(LogicalShiftedMask) == ORR_x) &&
- (Rd() == Rm()) &&
- (Rd() == n);
- }
-
- // Find the PC offset encoded in this instruction. 'this' may be a branch or
- // a PC-relative addressing instruction.
- // The offset returned is unscaled.
- ptrdiff_t ImmPCOffset();
-
- // Find the target of this instruction. 'this' may be a branch or a
- // PC-relative addressing instruction.
- Instruction* ImmPCOffsetTarget();
-
- static bool IsValidImmPCOffset(ImmBranchType branch_type, int32_t offset);
- bool IsTargetInImmPCOffsetRange(Instruction* target);
- // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
- // a PC-relative addressing instruction.
- void SetImmPCOffsetTarget(Instruction* target);
- // Patch a literal load instruction to load from 'source'.
- void SetImmLLiteral(Instruction* source);
-
- uint8_t* LiteralAddress() {
- int offset = ImmLLiteral() << kLiteralEntrySizeLog2;
- return reinterpret_cast<uint8_t*>(this) + offset;
- }
-
- uint32_t Literal32() {
- uint32_t literal;
- memcpy(&literal, LiteralAddress(), sizeof(literal));
-
- return literal;
- }
-
- uint64_t Literal64() {
- uint64_t literal;
- memcpy(&literal, LiteralAddress(), sizeof(literal));
-
- return literal;
- }
-
- float LiteralFP32() {
- return rawbits_to_float(Literal32());
- }
-
- double LiteralFP64() {
- return rawbits_to_double(Literal64());
- }
-
- Instruction* NextInstruction() {
- return this + kInstructionSize;
- }
-
- Instruction* InstructionAtOffset(int64_t offset) {
- ASSERT(IsAligned(reinterpret_cast<uintptr_t>(this) + offset,
- kInstructionSize));
- return this + offset;
- }
-
- template<typename T> static Instruction* Cast(T src) {
- return reinterpret_cast<Instruction*>(src);
- }
-
-
- void SetPCRelImmTarget(Instruction* target);
- void SetBranchImmTarget(Instruction* target);
-};
-
-
-// Where Instruction looks at instructions generated by the Assembler,
-// InstructionSequence looks at instructions sequences generated by the
-// MacroAssembler.
-class InstructionSequence : public Instruction {
- public:
- static InstructionSequence* At(Address address) {
- return reinterpret_cast<InstructionSequence*>(address);
- }
-
- // Sequences generated by MacroAssembler::InlineData().
- bool IsInlineData() const;
- uint64_t InlineData() const;
-};
-
-
-// Simulator/Debugger debug instructions ---------------------------------------
-// Each debug marker is represented by a HLT instruction. The immediate comment
-// field in the instruction is used to identify the type of debug marker. Each
-// marker encodes arguments in a different way, as described below.
-
-// Indicate to the Debugger that the instruction is a redirected call.
-const Instr kImmExceptionIsRedirectedCall = 0xca11;
-
-// Represent unreachable code. This is used as a guard in parts of the code that
-// should not be reachable, such as in data encoded inline in the instructions.
-const Instr kImmExceptionIsUnreachable = 0xdebf;
-
-// A pseudo 'printf' instruction. The arguments will be passed to the platform
-// printf method.
-const Instr kImmExceptionIsPrintf = 0xdeb1;
-// Parameters are stored in A64 registers as if the printf pseudo-instruction
-// was a call to the real printf method:
-//
-// x0: The format string, then either of:
-// x1-x7: Optional arguments.
-// d0-d7: Optional arguments.
-//
-// Floating-point and integer arguments are passed in separate sets of
-// registers in AAPCS64 (even for varargs functions), so it is not possible to
-// determine the type of location of each arguments without some information
-// about the values that were passed in. This information could be retrieved
-// from the printf format string, but the format string is not trivial to
-// parse so we encode the relevant information with the HLT instruction.
-// - Type
-// Either kRegister or kFPRegister, but stored as a uint32_t because there's
-// no way to guarantee the size of the CPURegister::RegisterType enum.
-const unsigned kPrintfTypeOffset = 1 * kInstructionSize;
-const unsigned kPrintfLength = 2 * kInstructionSize;
-
-// A pseudo 'debug' instruction.
-const Instr kImmExceptionIsDebug = 0xdeb0;
-// Parameters are inlined in the code after a debug pseudo-instruction:
-// - Debug code.
-// - Debug parameters.
-// - Debug message string. This is a NULL-terminated ASCII string, padded to
-// kInstructionSize so that subsequent instructions are correctly aligned.
-// - A kImmExceptionIsUnreachable marker, to catch accidental execution of the
-// string data.
-const unsigned kDebugCodeOffset = 1 * kInstructionSize;
-const unsigned kDebugParamsOffset = 2 * kInstructionSize;
-const unsigned kDebugMessageOffset = 3 * kInstructionSize;
-
-// Debug parameters.
-// Used without a TRACE_ option, the Debugger will print the arguments only
-// once. Otherwise TRACE_ENABLE and TRACE_DISABLE will enable or disable tracing
-// before every instruction for the specified LOG_ parameters.
-//
-// TRACE_OVERRIDE enables the specified LOG_ parameters, and disabled any
-// others that were not specified.
-//
-// For example:
-//
-// __ debug("print registers and fp registers", 0, LOG_REGS | LOG_FP_REGS);
-// will print the registers and fp registers only once.
-//
-// __ debug("trace disasm", 1, TRACE_ENABLE | LOG_DISASM);
-// starts disassembling the code.
-//
-// __ debug("trace rets", 2, TRACE_ENABLE | LOG_REGS);
-// adds the general purpose registers to the trace.
-//
-// __ debug("stop regs", 3, TRACE_DISABLE | LOG_REGS);
-// stops tracing the registers.
-const unsigned kDebuggerTracingDirectivesMask = 3 << 6;
-enum DebugParameters {
- NO_PARAM = 0,
- BREAK = 1 << 0,
- LOG_DISASM = 1 << 1, // Use only with TRACE. Disassemble the code.
- LOG_REGS = 1 << 2, // Log general purpose registers.
- LOG_FP_REGS = 1 << 3, // Log floating-point registers.
- LOG_SYS_REGS = 1 << 4, // Log the status flags.
- LOG_WRITE = 1 << 5, // Log any memory write.
-
- LOG_STATE = LOG_REGS | LOG_FP_REGS | LOG_SYS_REGS,
- LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE,
-
- // Trace control.
- TRACE_ENABLE = 1 << 6,
- TRACE_DISABLE = 2 << 6,
- TRACE_OVERRIDE = 3 << 6
-};
-
-
-} } // namespace v8::internal
-
-
-#endif // V8_A64_INSTRUCTIONS_A64_H_
diff --git a/deps/v8/src/a64/instrument-a64.cc b/deps/v8/src/a64/instrument-a64.cc
deleted file mode 100644
index 93892d9360..0000000000
--- a/deps/v8/src/a64/instrument-a64.cc
+++ /dev/null
@@ -1,618 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "a64/instrument-a64.h"
-
-namespace v8 {
-namespace internal {
-
-Counter::Counter(const char* name, CounterType type)
- : count_(0), enabled_(false), type_(type) {
- ASSERT(name != NULL);
- strncpy(name_, name, kCounterNameMaxLength);
-}
-
-
-void Counter::Enable() {
- enabled_ = true;
-}
-
-
-void Counter::Disable() {
- enabled_ = false;
-}
-
-
-bool Counter::IsEnabled() {
- return enabled_;
-}
-
-
-void Counter::Increment() {
- if (enabled_) {
- count_++;
- }
-}
-
-
-uint64_t Counter::count() {
- uint64_t result = count_;
- if (type_ == Gauge) {
- // If the counter is a Gauge, reset the count after reading.
- count_ = 0;
- }
- return result;
-}
-
-
-const char* Counter::name() {
- return name_;
-}
-
-
-CounterType Counter::type() {
- return type_;
-}
-
-
-typedef struct {
- const char* name;
- CounterType type;
-} CounterDescriptor;
-
-
-static const CounterDescriptor kCounterList[] = {
- {"Instruction", Cumulative},
-
- {"Move Immediate", Gauge},
- {"Add/Sub DP", Gauge},
- {"Logical DP", Gauge},
- {"Other Int DP", Gauge},
- {"FP DP", Gauge},
-
- {"Conditional Select", Gauge},
- {"Conditional Compare", Gauge},
-
- {"Unconditional Branch", Gauge},
- {"Compare and Branch", Gauge},
- {"Test and Branch", Gauge},
- {"Conditional Branch", Gauge},
-
- {"Load Integer", Gauge},
- {"Load FP", Gauge},
- {"Load Pair", Gauge},
- {"Load Literal", Gauge},
-
- {"Store Integer", Gauge},
- {"Store FP", Gauge},
- {"Store Pair", Gauge},
-
- {"PC Addressing", Gauge},
- {"Other", Gauge},
- {"SP Adjust", Gauge},
-};
-
-
-Instrument::Instrument(const char* datafile, uint64_t sample_period)
- : output_stream_(stderr), sample_period_(sample_period) {
-
- // Set up the output stream. If datafile is non-NULL, use that file. If it
- // can't be opened, or datafile is NULL, use stderr.
- if (datafile != NULL) {
- output_stream_ = fopen(datafile, "w");
- if (output_stream_ == NULL) {
- fprintf(stderr, "Can't open output file %s. Using stderr.\n", datafile);
- output_stream_ = stderr;
- }
- }
-
- static const int num_counters =
- sizeof(kCounterList) / sizeof(CounterDescriptor);
-
- // Dump an instrumentation description comment at the top of the file.
- fprintf(output_stream_, "# counters=%d\n", num_counters);
- fprintf(output_stream_, "# sample_period=%" PRIu64 "\n", sample_period_);
-
- // Construct Counter objects from counter description array.
- for (int i = 0; i < num_counters; i++) {
- Counter* counter = new Counter(kCounterList[i].name, kCounterList[i].type);
- counters_.push_back(counter);
- }
-
- DumpCounterNames();
-}
-
-
-Instrument::~Instrument() {
- // Dump any remaining instruction data to the output file.
- DumpCounters();
-
- // Free all the counter objects.
- std::list<Counter*>::iterator it;
- for (it = counters_.begin(); it != counters_.end(); it++) {
- delete *it;
- }
-
- if (output_stream_ != stderr) {
- fclose(output_stream_);
- }
-}
-
-
-void Instrument::Update() {
- // Increment the instruction counter, and dump all counters if a sample period
- // has elapsed.
- static Counter* counter = GetCounter("Instruction");
- ASSERT(counter->type() == Cumulative);
- counter->Increment();
-
- if (counter->IsEnabled() && (counter->count() % sample_period_) == 0) {
- DumpCounters();
- }
-}
-
-
-void Instrument::DumpCounters() {
- // Iterate through the counter objects, dumping their values to the output
- // stream.
- std::list<Counter*>::const_iterator it;
- for (it = counters_.begin(); it != counters_.end(); it++) {
- fprintf(output_stream_, "%" PRIu64 ",", (*it)->count());
- }
- fprintf(output_stream_, "\n");
- fflush(output_stream_);
-}
-
-
-void Instrument::DumpCounterNames() {
- // Iterate through the counter objects, dumping the counter names to the
- // output stream.
- std::list<Counter*>::const_iterator it;
- for (it = counters_.begin(); it != counters_.end(); it++) {
- fprintf(output_stream_, "%s,", (*it)->name());
- }
- fprintf(output_stream_, "\n");
- fflush(output_stream_);
-}
-
-
-void Instrument::HandleInstrumentationEvent(unsigned event) {
- switch (event) {
- case InstrumentStateEnable: Enable(); break;
- case InstrumentStateDisable: Disable(); break;
- default: DumpEventMarker(event);
- }
-}
-
-
-void Instrument::DumpEventMarker(unsigned marker) {
- // Dumpan event marker to the output stream as a specially formatted comment
- // line.
- static Counter* counter = GetCounter("Instruction");
-
- fprintf(output_stream_, "# %c%c @ %" PRId64 "\n", marker & 0xff,
- (marker >> 8) & 0xff, counter->count());
-}
-
-
-Counter* Instrument::GetCounter(const char* name) {
- // Get a Counter object by name from the counter list.
- std::list<Counter*>::const_iterator it;
- for (it = counters_.begin(); it != counters_.end(); it++) {
- if (strcmp((*it)->name(), name) == 0) {
- return *it;
- }
- }
-
- // A Counter by that name does not exist: print an error message to stderr
- // and the output file, and exit.
- static const char* error_message =
- "# Error: Unknown counter \"%s\". Exiting.\n";
- fprintf(stderr, error_message, name);
- fprintf(output_stream_, error_message, name);
- exit(1);
-}
-
-
-void Instrument::Enable() {
- std::list<Counter*>::iterator it;
- for (it = counters_.begin(); it != counters_.end(); it++) {
- (*it)->Enable();
- }
-}
-
-
-void Instrument::Disable() {
- std::list<Counter*>::iterator it;
- for (it = counters_.begin(); it != counters_.end(); it++) {
- (*it)->Disable();
- }
-}
-
-
-void Instrument::VisitPCRelAddressing(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("PC Addressing");
- counter->Increment();
-}
-
-
-void Instrument::VisitAddSubImmediate(Instruction* instr) {
- Update();
- static Counter* sp_counter = GetCounter("SP Adjust");
- static Counter* add_sub_counter = GetCounter("Add/Sub DP");
- if (((instr->Mask(AddSubOpMask) == SUB) ||
- (instr->Mask(AddSubOpMask) == ADD)) &&
- (instr->Rd() == 31) && (instr->Rn() == 31)) {
- // Count adjustments to the C stack pointer caused by V8 needing two SPs.
- sp_counter->Increment();
- } else {
- add_sub_counter->Increment();
- }
-}
-
-
-void Instrument::VisitLogicalImmediate(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("Logical DP");
- counter->Increment();
-}
-
-
-void Instrument::VisitMoveWideImmediate(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("Move Immediate");
-
- if (instr->IsMovn() && (instr->Rd() == kZeroRegCode)) {
- unsigned imm = instr->ImmMoveWide();
- HandleInstrumentationEvent(imm);
- } else {
- counter->Increment();
- }
-}
-
-
-void Instrument::VisitBitfield(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("Other Int DP");
- counter->Increment();
-}
-
-
-void Instrument::VisitExtract(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("Other Int DP");
- counter->Increment();
-}
-
-
-void Instrument::VisitUnconditionalBranch(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("Unconditional Branch");
- counter->Increment();
-}
-
-
-void Instrument::VisitUnconditionalBranchToRegister(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("Unconditional Branch");
- counter->Increment();
-}
-
-
-void Instrument::VisitCompareBranch(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("Compare and Branch");
- counter->Increment();
-}
-
-
-void Instrument::VisitTestBranch(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("Test and Branch");
- counter->Increment();
-}
-
-
-void Instrument::VisitConditionalBranch(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("Conditional Branch");
- counter->Increment();
-}
-
-
-void Instrument::VisitSystem(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("Other");
- counter->Increment();
-}
-
-
-void Instrument::VisitException(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("Other");
- counter->Increment();
-}
-
-
-void Instrument::InstrumentLoadStorePair(Instruction* instr) {
- static Counter* load_pair_counter = GetCounter("Load Pair");
- static Counter* store_pair_counter = GetCounter("Store Pair");
- if (instr->Mask(LoadStorePairLBit) != 0) {
- load_pair_counter->Increment();
- } else {
- store_pair_counter->Increment();
- }
-}
-
-
-void Instrument::VisitLoadStorePairPostIndex(Instruction* instr) {
- Update();
- InstrumentLoadStorePair(instr);
-}
-
-
-void Instrument::VisitLoadStorePairOffset(Instruction* instr) {
- Update();
- InstrumentLoadStorePair(instr);
-}
-
-
-void Instrument::VisitLoadStorePairPreIndex(Instruction* instr) {
- Update();
- InstrumentLoadStorePair(instr);
-}
-
-
-void Instrument::VisitLoadStorePairNonTemporal(Instruction* instr) {
- Update();
- InstrumentLoadStorePair(instr);
-}
-
-
-void Instrument::VisitLoadLiteral(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("Load Literal");
- counter->Increment();
-}
-
-
-void Instrument::InstrumentLoadStore(Instruction* instr) {
- static Counter* load_int_counter = GetCounter("Load Integer");
- static Counter* store_int_counter = GetCounter("Store Integer");
- static Counter* load_fp_counter = GetCounter("Load FP");
- static Counter* store_fp_counter = GetCounter("Store FP");
-
- switch (instr->Mask(LoadStoreOpMask)) {
- case STRB_w: // Fall through.
- case STRH_w: // Fall through.
- case STR_w: // Fall through.
- case STR_x: store_int_counter->Increment(); break;
- case STR_s: // Fall through.
- case STR_d: store_fp_counter->Increment(); break;
- case LDRB_w: // Fall through.
- case LDRH_w: // Fall through.
- case LDR_w: // Fall through.
- case LDR_x: // Fall through.
- case LDRSB_x: // Fall through.
- case LDRSH_x: // Fall through.
- case LDRSW_x: // Fall through.
- case LDRSB_w: // Fall through.
- case LDRSH_w: load_int_counter->Increment(); break;
- case LDR_s: // Fall through.
- case LDR_d: load_fp_counter->Increment(); break;
- default: UNREACHABLE();
- }
-}
-
-
-void Instrument::VisitLoadStoreUnscaledOffset(Instruction* instr) {
- Update();
- InstrumentLoadStore(instr);
-}
-
-
-void Instrument::VisitLoadStorePostIndex(Instruction* instr) {
- Update();
- InstrumentLoadStore(instr);
-}
-
-
-void Instrument::VisitLoadStorePreIndex(Instruction* instr) {
- Update();
- InstrumentLoadStore(instr);
-}
-
-
-void Instrument::VisitLoadStoreRegisterOffset(Instruction* instr) {
- Update();
- InstrumentLoadStore(instr);
-}
-
-
-void Instrument::VisitLoadStoreUnsignedOffset(Instruction* instr) {
- Update();
- InstrumentLoadStore(instr);
-}
-
-
-void Instrument::VisitLogicalShifted(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("Logical DP");
- counter->Increment();
-}
-
-
-void Instrument::VisitAddSubShifted(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("Add/Sub DP");
- counter->Increment();
-}
-
-
-void Instrument::VisitAddSubExtended(Instruction* instr) {
- Update();
- static Counter* sp_counter = GetCounter("SP Adjust");
- static Counter* add_sub_counter = GetCounter("Add/Sub DP");
- if (((instr->Mask(AddSubOpMask) == SUB) ||
- (instr->Mask(AddSubOpMask) == ADD)) &&
- (instr->Rd() == 31) && (instr->Rn() == 31)) {
- // Count adjustments to the C stack pointer caused by V8 needing two SPs.
- sp_counter->Increment();
- } else {
- add_sub_counter->Increment();
- }
-}
-
-
-void Instrument::VisitAddSubWithCarry(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("Add/Sub DP");
- counter->Increment();
-}
-
-
-void Instrument::VisitConditionalCompareRegister(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("Conditional Compare");
- counter->Increment();
-}
-
-
-void Instrument::VisitConditionalCompareImmediate(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("Conditional Compare");
- counter->Increment();
-}
-
-
-void Instrument::VisitConditionalSelect(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("Conditional Select");
- counter->Increment();
-}
-
-
-void Instrument::VisitDataProcessing1Source(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("Other Int DP");
- counter->Increment();
-}
-
-
-void Instrument::VisitDataProcessing2Source(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("Other Int DP");
- counter->Increment();
-}
-
-
-void Instrument::VisitDataProcessing3Source(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("Other Int DP");
- counter->Increment();
-}
-
-
-void Instrument::VisitFPCompare(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("FP DP");
- counter->Increment();
-}
-
-
-void Instrument::VisitFPConditionalCompare(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("Conditional Compare");
- counter->Increment();
-}
-
-
-void Instrument::VisitFPConditionalSelect(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("Conditional Select");
- counter->Increment();
-}
-
-
-void Instrument::VisitFPImmediate(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("FP DP");
- counter->Increment();
-}
-
-
-void Instrument::VisitFPDataProcessing1Source(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("FP DP");
- counter->Increment();
-}
-
-
-void Instrument::VisitFPDataProcessing2Source(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("FP DP");
- counter->Increment();
-}
-
-
-void Instrument::VisitFPDataProcessing3Source(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("FP DP");
- counter->Increment();
-}
-
-
-void Instrument::VisitFPIntegerConvert(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("FP DP");
- counter->Increment();
-}
-
-
-void Instrument::VisitFPFixedPointConvert(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("FP DP");
- counter->Increment();
-}
-
-
-void Instrument::VisitUnallocated(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("Other");
- counter->Increment();
-}
-
-
-void Instrument::VisitUnimplemented(Instruction* instr) {
- Update();
- static Counter* counter = GetCounter("Other");
- counter->Increment();
-}
-
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/a64/instrument-a64.h b/deps/v8/src/a64/instrument-a64.h
deleted file mode 100644
index 08dc1b2ad1..0000000000
--- a/deps/v8/src/a64/instrument-a64.h
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_A64_INSTRUMENT_A64_H_
-#define V8_A64_INSTRUMENT_A64_H_
-
-#include "globals.h"
-#include "utils.h"
-#include "a64/decoder-a64.h"
-#include "a64/constants-a64.h"
-#include "a64/instrument-a64.h"
-
-namespace v8 {
-namespace internal {
-
-const int kCounterNameMaxLength = 256;
-const uint64_t kDefaultInstrumentationSamplingPeriod = 1 << 22;
-
-
-enum InstrumentState {
- InstrumentStateDisable = 0,
- InstrumentStateEnable = 1
-};
-
-
-enum CounterType {
- Gauge = 0, // Gauge counters reset themselves after reading.
- Cumulative = 1 // Cumulative counters keep their value after reading.
-};
-
-
-class Counter {
- public:
- Counter(const char* name, CounterType type = Gauge);
-
- void Increment();
- void Enable();
- void Disable();
- bool IsEnabled();
- uint64_t count();
- const char* name();
- CounterType type();
-
- private:
- char name_[kCounterNameMaxLength];
- uint64_t count_;
- bool enabled_;
- CounterType type_;
-};
-
-
-class Instrument: public DecoderVisitor {
- public:
- explicit Instrument(const char* datafile = NULL,
- uint64_t sample_period = kDefaultInstrumentationSamplingPeriod);
- ~Instrument();
-
- // Declare all Visitor functions.
- #define DECLARE(A) void Visit##A(Instruction* instr);
- VISITOR_LIST(DECLARE)
- #undef DECLARE
-
- private:
- void Update();
- void Enable();
- void Disable();
- void DumpCounters();
- void DumpCounterNames();
- void DumpEventMarker(unsigned marker);
- void HandleInstrumentationEvent(unsigned event);
- Counter* GetCounter(const char* name);
-
- void InstrumentLoadStore(Instruction* instr);
- void InstrumentLoadStorePair(Instruction* instr);
-
- std::list<Counter*> counters_;
-
- FILE *output_stream_;
- uint64_t sample_period_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_A64_INSTRUMENT_A64_H_
diff --git a/deps/v8/src/a64/lithium-a64.cc b/deps/v8/src/a64/lithium-a64.cc
deleted file mode 100644
index fa351e3928..0000000000
--- a/deps/v8/src/a64/lithium-a64.cc
+++ /dev/null
@@ -1,2449 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "lithium-allocator-inl.h"
-#include "a64/lithium-a64.h"
-#include "a64/lithium-codegen-a64.h"
-#include "hydrogen-osr.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define DEFINE_COMPILE(type) \
- void L##type::CompileToNative(LCodeGen* generator) { \
- generator->Do##type(this); \
- }
-LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
-#undef DEFINE_COMPILE
-
-#ifdef DEBUG
-void LInstruction::VerifyCall() {
- // Call instructions can use only fixed registers as temporaries and
- // outputs because all registers are blocked by the calling convention.
- // Inputs operands must use a fixed register or use-at-start policy or
- // a non-register policy.
- ASSERT(Output() == NULL ||
- LUnallocated::cast(Output())->HasFixedPolicy() ||
- !LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- ASSERT(operand->HasFixedPolicy() ||
- operand->IsUsedAtStart());
- }
- for (TempIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
- }
-}
-#endif
-
-
-void LLabel::PrintDataTo(StringStream* stream) {
- LGap::PrintDataTo(stream);
- LLabel* rep = replacement();
- if (rep != NULL) {
- stream->Add(" Dead block replaced with B%d", rep->block_id());
- }
-}
-
-
-void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
- arguments()->PrintTo(stream);
- stream->Add(" length ");
- length()->PrintTo(stream);
- stream->Add(" index ");
- index()->PrintTo(stream);
-}
-
-
-void LBranch::PrintDataTo(StringStream* stream) {
- stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- value()->PrintTo(stream);
-}
-
-
-void LCallJSFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add("#%d / ", arity());
-}
-
-
-void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < InputCount(); i++) {
- InputAt(i)->PrintTo(stream);
- stream->Add(" ");
- }
- stream->Add("#%d / ", arity());
-}
-
-
-void LCallNew::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
-void LCallNewArray::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
- ElementsKind kind = hydrogen()->elements_kind();
- stream->Add(" (%s) ", ElementsKindToString(kind));
-}
-
-
-void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if class_of_test(");
- value()->PrintTo(stream);
- stream->Add(", \"%o\") then B%d else B%d",
- *hydrogen()->class_name(),
- true_block_id(),
- false_block_id());
-}
-
-
-void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- left()->PrintTo(stream);
- stream->Add(" %s ", Token::String(op()));
- right()->PrintTo(stream);
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_cached_array_index(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-bool LGoto::HasInterestingComment(LCodeGen* gen) const {
- return !gen->IsNextEmittedBlock(block_id());
-}
-
-
-void LGoto::PrintDataTo(StringStream* stream) {
- stream->Add("B%d", block_id());
-}
-
-
-void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
- stream->Add(" = ");
- base_object()->PrintTo(stream);
- stream->Add(" + ");
- offset()->PrintTo(stream);
-}
-
-
-void LInvokeFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
-void LInstruction::PrintTo(StringStream* stream) {
- stream->Add("%s ", this->Mnemonic());
-
- PrintOutputOperandTo(stream);
-
- PrintDataTo(stream);
-
- if (HasEnvironment()) {
- stream->Add(" ");
- environment()->PrintTo(stream);
- }
-
- if (HasPointerMap()) {
- stream->Add(" ");
- pointer_map()->PrintTo(stream);
- }
-}
-
-
-void LInstruction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- for (int i = 0; i < InputCount(); i++) {
- if (i > 0) stream->Add(" ");
- if (InputAt(i) == NULL) {
- stream->Add("NULL");
- } else {
- InputAt(i)->PrintTo(stream);
- }
- }
-}
-
-
-void LInstruction::PrintOutputOperandTo(StringStream* stream) {
- if (HasResult()) result()->PrintTo(stream);
-}
-
-
-void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_instance_type(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_object(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_string(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_smi(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if typeof ");
- value()->PrintTo(stream);
- stream->Add(" == \"%s\" then B%d else B%d",
- hydrogen()->type_literal()->ToCString().get(),
- true_block_id(), false_block_id());
-}
-
-
-void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_undetectable(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-bool LGap::IsRedundant() const {
- for (int i = 0; i < 4; i++) {
- if ((parallel_moves_[i] != NULL) && !parallel_moves_[i]->IsRedundant()) {
- return false;
- }
- }
-
- return true;
-}
-
-
-void LGap::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < 4; i++) {
- stream->Add("(");
- if (parallel_moves_[i] != NULL) {
- parallel_moves_[i]->PrintDataTo(stream);
- }
- stream->Add(") ");
- }
-}
-
-
-void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d]", slot_index());
-}
-
-
-void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
- stream->Add(" = ");
- function()->PrintTo(stream);
- stream->Add(".code_entry = ");
- code_object()->PrintTo(stream);
-}
-
-
-void LStoreContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d] <- ", slot_index());
- value()->PrintTo(stream);
-}
-
-
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
-void LStoreNamedField::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- hydrogen()->access().PrintTo(stream);
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(String::cast(*name())->ToCString().get());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
-void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if string_compare(");
- left()->PrintTo(stream);
- right()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("%p -> %p", *original_map(), *transitioned_map());
-}
-
-
-template<int T>
-void LUnaryMathOperation<T>::PrintDataTo(StringStream* stream) {
- value()->PrintTo(stream);
-}
-
-
-const char* LArithmeticD::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-d";
- case Token::SUB: return "sub-d";
- case Token::MUL: return "mul-d";
- case Token::DIV: return "div-d";
- case Token::MOD: return "mod-d";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-const char* LArithmeticT::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-t";
- case Token::SUB: return "sub-t";
- case Token::MUL: return "mul-t";
- case Token::MOD: return "mod-t";
- case Token::DIV: return "div-t";
- case Token::BIT_AND: return "bit-and-t";
- case Token::BIT_OR: return "bit-or-t";
- case Token::BIT_XOR: return "bit-xor-t";
- case Token::ROR: return "ror-t";
- case Token::SHL: return "shl-t";
- case Token::SAR: return "sar-t";
- case Token::SHR: return "shr-t";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-void LChunkBuilder::Abort(BailoutReason reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- DoubleRegister::ToAllocationIndex(reg));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
- if (value->EmitAtUses()) {
- HInstruction* instr = HInstruction::cast(value);
- VisitInstruction(instr);
- }
- operand->set_virtual_register(value->id());
- return operand;
-}
-
-
-LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
- return Use(value, ToUnallocated(fixed_register));
-}
-
-
-LOperand* LChunkBuilder::UseFixedDouble(HValue* value,
- DoubleRegister fixed_register) {
- return Use(value, ToUnallocated(fixed_register));
-}
-
-
-LOperand* LChunkBuilder::UseRegister(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::UseRegisterAndClobber(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
- return Use(value,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
- return value->IsConstant() ? UseConstant(value) : UseRegister(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
- return value->IsConstant() ? UseConstant(value) : UseRegisterAtStart(value);
-}
-
-
-LConstantOperand* LChunkBuilder::UseConstant(HValue* value) {
- return chunk_->DefineConstantOperand(HConstant::cast(value));
-}
-
-
-LOperand* LChunkBuilder::UseAny(HValue* value) {
- return value->IsConstant()
- ? UseConstant(value)
- : Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
-}
-
-
-LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
- LUnallocated* result) {
- result->set_virtual_register(current_instruction_->id());
- instr->set_result(result);
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateResultInstruction<1>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateResultInstruction<1>* instr, int index) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
-}
-
-
-LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateResultInstruction<1>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
-}
-
-
-LInstruction* LChunkBuilder::DefineFixed(
- LTemplateResultInstruction<1>* instr, Register reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize) {
- info()->MarkAsNonDeferredCalling();
-#ifdef DEBUG
- instr->VerifyCall();
-#endif
- instr->MarkAsCall();
- instr = AssignPointerMap(instr);
-
- if (hinstr->HasObservableSideEffects()) {
- ASSERT(hinstr->next()->IsSimulate());
- HSimulate* sim = HSimulate::cast(hinstr->next());
- ASSERT(instruction_pending_deoptimization_environment_ == NULL);
- ASSERT(pending_deoptimization_ast_id_.IsNone());
- instruction_pending_deoptimization_environment_ = instr;
- pending_deoptimization_ast_id_ = sim->ast_id();
- }
-
- // If instruction does not have side-effects lazy deoptimization
- // after the call will try to deoptimize to the point before the call.
- // Thus we still need to attach environment to this call even if
- // call sequence can not deoptimize eagerly.
- bool needs_environment =
- (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
- !hinstr->HasObservableSideEffects();
- if (needs_environment && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
-
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
- ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(zone()));
- return instr;
-}
-
-
-LUnallocated* LChunkBuilder::TempRegister() {
- LUnallocated* operand =
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- int vreg = allocator_->GetVirtualRegister();
- if (!allocator_->AllocationOk()) {
- Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
- vreg = 0;
- }
- operand->set_virtual_register(vreg);
- return operand;
-}
-
-
-int LPlatformChunk::GetNextSpillIndex() {
- return spill_slot_count_++;
-}
-
-
-LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
- int index = GetNextSpillIndex();
- if (kind == DOUBLE_REGISTERS) {
- return LDoubleStackSlot::Create(index, zone());
- } else {
- ASSERT(kind == GENERAL_REGISTERS);
- return LStackSlot::Create(index, zone());
- }
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
- LUnallocated* operand = ToUnallocated(reg);
- ASSERT(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LPlatformChunk* LChunkBuilder::Build() {
- ASSERT(is_unused());
- chunk_ = new(zone()) LPlatformChunk(info_, graph_);
- LPhase phase("L_Building chunk", chunk_);
- status_ = BUILDING;
-
- // If compiling for OSR, reserve space for the unoptimized frame,
- // which will be subsumed into this frame.
- if (graph()->has_osr()) {
- // TODO(all): GetNextSpillIndex just increments a field. It has no other
- // side effects, so we should get rid of this loop.
- for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
- chunk_->GetNextSpillIndex();
- }
- }
-
- const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
- for (int i = 0; i < blocks->length(); i++) {
- DoBasicBlock(blocks->at(i));
- if (is_aborted()) return NULL;
- }
- status_ = DONE;
- return chunk_;
-}
-
-
-void LChunkBuilder::DoBasicBlock(HBasicBlock* block) {
- ASSERT(is_building());
- current_block_ = block;
-
- if (block->IsStartBlock()) {
- block->UpdateEnvironment(graph_->start_environment());
- argument_count_ = 0;
- } else if (block->predecessors()->length() == 1) {
- // We have a single predecessor => copy environment and outgoing
- // argument count from the predecessor.
- ASSERT(block->phis()->length() == 0);
- HBasicBlock* pred = block->predecessors()->at(0);
- HEnvironment* last_environment = pred->last_environment();
- ASSERT(last_environment != NULL);
-
- // Only copy the environment, if it is later used again.
- if (pred->end()->SecondSuccessor() == NULL) {
- ASSERT(pred->end()->FirstSuccessor() == block);
- } else {
- if ((pred->end()->FirstSuccessor()->block_id() > block->block_id()) ||
- (pred->end()->SecondSuccessor()->block_id() > block->block_id())) {
- last_environment = last_environment->Copy();
- }
- }
- block->UpdateEnvironment(last_environment);
- ASSERT(pred->argument_count() >= 0);
- argument_count_ = pred->argument_count();
- } else {
- // We are at a state join => process phis.
- HBasicBlock* pred = block->predecessors()->at(0);
- // No need to copy the environment, it cannot be used later.
- HEnvironment* last_environment = pred->last_environment();
- for (int i = 0; i < block->phis()->length(); ++i) {
- HPhi* phi = block->phis()->at(i);
- if (phi->HasMergedIndex()) {
- last_environment->SetValueAt(phi->merged_index(), phi);
- }
- }
- for (int i = 0; i < block->deleted_phis()->length(); ++i) {
- if (block->deleted_phis()->at(i) < last_environment->length()) {
- last_environment->SetValueAt(block->deleted_phis()->at(i),
- graph_->GetConstantUndefined());
- }
- }
- block->UpdateEnvironment(last_environment);
- // Pick up the outgoing argument count of one of the predecessors.
- argument_count_ = pred->argument_count();
- }
-
- // Translate hydrogen instructions to lithium ones for the current block.
- HInstruction* current = block->first();
- int start = chunk_->instructions()->length();
- while ((current != NULL) && !is_aborted()) {
- // Code for constants in registers is generated lazily.
- if (!current->EmitAtUses()) {
- VisitInstruction(current);
- }
- current = current->next();
- }
- int end = chunk_->instructions()->length() - 1;
- if (end >= start) {
- block->set_first_instruction_index(start);
- block->set_last_instruction_index(end);
- }
- block->set_argument_count(argument_count_);
- current_block_ = NULL;
-}
-
-
-void LChunkBuilder::VisitInstruction(HInstruction* current) {
- HInstruction* old_current = current_instruction_;
- current_instruction_ = current;
-
- LInstruction* instr = NULL;
- if (current->CanReplaceWithDummyUses()) {
- if (current->OperandCount() == 0) {
- instr = DefineAsRegister(new(zone()) LDummy());
- } else {
- ASSERT(!current->OperandAt(0)->IsControlInstruction());
- instr = DefineAsRegister(new(zone())
- LDummyUse(UseAny(current->OperandAt(0))));
- }
- for (int i = 1; i < current->OperandCount(); ++i) {
- if (current->OperandAt(i)->IsControlInstruction()) continue;
- LInstruction* dummy =
- new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
- dummy->set_hydrogen_value(current);
- chunk_->AddInstruction(dummy, current_block_);
- }
- } else {
- instr = current->CompileToLithium(this);
- }
-
- argument_count_ += current->argument_delta();
- ASSERT(argument_count_ >= 0);
-
- if (instr != NULL) {
- // Associate the hydrogen instruction first, since we may need it for
- // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
- instr->set_hydrogen_value(current);
-
-#if DEBUG
- // Make sure that the lithium instruction has either no fixed register
- // constraints in temps or the result OR no uses that are only used at
- // start. If this invariant doesn't hold, the register allocator can decide
- // to insert a split of a range immediately before the instruction due to an
- // already allocated register needing to be used for the instruction's fixed
- // register constraint. In this case, the register allocator won't see an
- // interference between the split child and the use-at-start (it would if
- // the it was just a plain use), so it is free to move the split child into
- // the same register that is used for the use-at-start.
- // See https://code.google.com/p/chromium/issues/detail?id=201590
- if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) {
- int fixed = 0;
- int used_at_start = 0;
- for (UseIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->IsUsedAtStart()) ++used_at_start;
- }
- if (instr->Output() != NULL) {
- if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
- }
- for (TempIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->HasFixedPolicy()) ++fixed;
- }
- ASSERT(fixed == 0 || used_at_start == 0);
- }
-#endif
-
- if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
- instr = AssignPointerMap(instr);
- }
- if (FLAG_stress_environments && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
- chunk_->AddInstruction(instr, current_block_);
- }
- current_instruction_ = old_current;
-}
-
-
-LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
- HEnvironment* hydrogen_env = current_block_->last_environment();
- int argument_index_accumulator = 0;
- ZoneList<HValue*> objects_to_materialize(0, zone());
- instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator,
- &objects_to_materialize));
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
- // The control instruction marking the end of a block that completed
- // abruptly (e.g., threw an exception). There is nothing specific to do.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
-
- if (op == Token::MOD) {
- LOperand* left = UseFixedDouble(instr->left(), d0);
- LOperand* right = UseFixedDouble(instr->right(), d1);
- LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
- return MarkAsCall(DefineFixedDouble(result, d0), instr);
- } else {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineAsRegister(result);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HBinaryOperation* instr) {
- ASSERT((op == Token::ADD) || (op == Token::SUB) || (op == Token::MUL) ||
- (op == Token::DIV) || (op == Token::MOD) || (op == Token::SHR) ||
- (op == Token::SHL) || (op == Token::SAR) || (op == Token::ROR) ||
- (op == Token::BIT_OR) || (op == Token::BIT_AND) ||
- (op == Token::BIT_XOR));
- HValue* left = instr->left();
- HValue* right = instr->right();
-
- // TODO(jbramley): Once we've implemented smi support for all arithmetic
- // operations, these assertions should check IsTagged().
- ASSERT(instr->representation().IsSmiOrTagged());
- ASSERT(left->representation().IsSmiOrTagged());
- ASSERT(right->representation().IsSmiOrTagged());
-
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left_operand = UseFixed(left, x1);
- LOperand* right_operand = UseFixed(right, x0);
- LArithmeticT* result =
- new(zone()) LArithmeticT(op, context, left_operand, right_operand);
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
- HBoundsCheckBaseIndexInformation* instr) {
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- // TODO(all): Try to improve this, like ARM r17925.
- info()->MarkAsRequiresFrame();
- LOperand* args = NULL;
- LOperand* length = NULL;
- LOperand* index = NULL;
- LOperand* temp = NULL;
-
- if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
- args = UseRegisterAtStart(instr->arguments());
- length = UseConstant(instr->length());
- index = UseConstant(instr->index());
- } else {
- args = UseRegister(instr->arguments());
- length = UseRegisterAtStart(instr->length());
- index = UseRegisterOrConstantAtStart(instr->index());
- temp = TempRegister();
- }
-
- return DefineAsRegister(
- new(zone()) LAccessArgumentsAt(args, length, index, temp));
-}
-
-
-LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right =
- UseRegisterOrConstantAtStart(instr->BetterRightOperand());
- LInstruction* result = instr->representation().IsSmi() ?
- DefineAsRegister(new(zone()) LAddS(left, right)) :
- DefineAsRegister(new(zone()) LAddI(left, right));
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsExternal()) {
- ASSERT(instr->left()->representation().IsExternal());
- ASSERT(instr->right()->representation().IsInteger32());
- ASSERT(!instr->CheckFlag(HValue::kCanOverflow));
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterOrConstantAtStart(instr->right());
- return DefineAsRegister(new(zone()) LAddE(left, right));
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::ADD, instr);
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::ADD, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseAny(instr->context());
- LOperand* size = UseRegisterOrConstant(instr->size());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
- LOperand* function = UseFixed(instr->function(), x1);
- LOperand* receiver = UseFixed(instr->receiver(), x0);
- LOperand* length = UseFixed(instr->length(), x2);
- LOperand* elements = UseFixed(instr->elements(), x3);
- LApplyArguments* result = new(zone()) LApplyArguments(function,
- receiver,
- length,
- elements);
- return MarkAsCall(DefineFixed(result, x0), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* instr) {
- info()->MarkAsRequiresFrame();
- LOperand* temp = instr->from_inlined() ? NULL : TempRegister();
- return DefineAsRegister(new(zone()) LArgumentsElements(temp));
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) {
- info()->MarkAsRequiresFrame();
- LOperand* value = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LArgumentsLength(value));
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
- // There are no real uses of the arguments object.
- // arguments.length and element access are supported directly on
- // stack arguments, and any real arguments object use causes a bailout.
- // So this value is never used.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
-
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right =
- UseRegisterOrConstantAtStart(instr->BetterRightOperand());
- return instr->representation().IsSmi() ?
- DefineAsRegister(new(zone()) LBitS(left, right)) :
- DefineAsRegister(new(zone()) LBitI(left, right));
- } else {
- return DoArithmeticT(instr->op(), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
- // V8 expects a label to be generated for each basic block.
- // This is used in some places like LAllocator::IsBlockBoundary
- // in lithium-allocator.cc
- return new(zone()) LLabel(instr->block());
-}
-
-
-LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- LOperand* value = UseRegisterOrConstantAtStart(instr->index());
- LOperand* length = UseRegister(instr->length());
- return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
-}
-
-
-LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
-
- HValue* value = instr->value();
- Representation r = value->representation();
- HType type = value->type();
-
- if (r.IsInteger32() || r.IsSmi() || r.IsDouble()) {
- // These representations have simple checks that cannot deoptimize.
- return new(zone()) LBranch(UseRegister(value), NULL, NULL);
- } else {
- ASSERT(r.IsTagged());
- if (type.IsBoolean() || type.IsSmi() || type.IsJSArray() ||
- type.IsHeapNumber()) {
- // These types have simple checks that cannot deoptimize.
- return new(zone()) LBranch(UseRegister(value), NULL, NULL);
- }
-
- if (type.IsString()) {
- // This type cannot deoptimize, but needs a scratch register.
- return new(zone()) LBranch(UseRegister(value), TempRegister(), NULL);
- }
-
- ToBooleanStub::Types expected = instr->expected_input_types();
- bool needs_temps = expected.NeedsMap() || expected.IsEmpty();
- LOperand* temp1 = needs_temps ? TempRegister() : NULL;
- LOperand* temp2 = needs_temps ? TempRegister() : NULL;
-
- if (expected.IsGeneric() || expected.IsEmpty()) {
- // The generic case cannot deoptimize because it already supports every
- // possible input type.
- ASSERT(needs_temps);
- return new(zone()) LBranch(UseRegister(value), temp1, temp2);
- } else {
- return AssignEnvironment(
- new(zone()) LBranch(UseRegister(value), temp1, temp2));
- }
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCallJSFunction(
- HCallJSFunction* instr) {
- LOperand* function = UseFixed(instr->function(), x1);
-
- LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallWithDescriptor(
- HCallWithDescriptor* instr) {
- const CallInterfaceDescriptor* descriptor = instr->descriptor();
-
- LOperand* target = UseRegisterOrConstantAtStart(instr->target());
- ZoneList<LOperand*> ops(instr->OperandCount(), zone());
- ops.Add(target, zone());
- for (int i = 1; i < instr->OperandCount(); i++) {
- LOperand* op = UseFixed(instr->OperandAt(i),
- descriptor->GetParameterRegister(i - 1));
- ops.Add(op, zone());
- }
-
- LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(descriptor,
- ops,
- zone());
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* function = UseFixed(instr->function(), x1);
- LCallFunction* call = new(zone()) LCallFunction(context, function);
- return MarkAsCall(DefineFixed(call, x0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- // The call to CallConstructStub will expect the constructor to be in x1.
- LOperand* constructor = UseFixed(instr->constructor(), x1);
- LCallNew* result = new(zone()) LCallNew(context, constructor);
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- // The call to ArrayConstructCode will expect the constructor to be in x1.
- LOperand* constructor = UseFixed(instr->constructor(), x1);
- LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), x0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), x0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
- instr->ReplayEnvironment(current_block_->last_environment());
-
- // There are no real uses of a captured object.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoChange(HChange* instr) {
- Representation from = instr->from();
- Representation to = instr->to();
-
- if (from.IsSmi()) {
- if (to.IsTagged()) {
- LOperand* value = UseRegister(instr->value());
- return DefineSameAsFirst(new(zone()) LDummyUse(value));
- }
- from = Representation::Tagged();
- }
-
- if (from.IsTagged()) {
- if (to.IsDouble()) {
- LOperand* value = UseRegister(instr->value());
- LOperand* temp = TempRegister();
- LNumberUntagD* res = new(zone()) LNumberUntagD(value, temp);
- return AssignEnvironment(DefineAsRegister(res));
- } else if (to.IsSmi()) {
- LOperand* value = UseRegister(instr->value());
- if (instr->value()->type().IsSmi()) {
- return DefineSameAsFirst(new(zone()) LDummyUse(value));
- }
- return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
- } else {
- ASSERT(to.IsInteger32());
- LInstruction* res = NULL;
-
- if (instr->value()->type().IsSmi() ||
- instr->value()->representation().IsSmi()) {
- LOperand* value = UseRegisterAtStart(instr->value());
- res = DefineAsRegister(new(zone()) LSmiUntag(value, false));
- } else {
- LOperand* value = UseRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 =
- instr->CanTruncateToInt32() ? TempRegister() : FixedTemp(d24);
- res = DefineAsRegister(new(zone()) LTaggedToI(value, temp1, temp2));
- res = AssignEnvironment(res);
- }
-
- return res;
- }
- } else if (from.IsDouble()) {
- if (to.IsTagged()) {
- info()->MarkAsDeferredCalling();
- LOperand* value = UseRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
-
- LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
- return AssignPointerMap(DefineAsRegister(result));
- } else {
- ASSERT(to.IsSmi() || to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
-
- if (instr->CanTruncateToInt32()) {
- LTruncateDoubleToIntOrSmi* result =
- new(zone()) LTruncateDoubleToIntOrSmi(value);
- return DefineAsRegister(result);
- } else {
- LDoubleToIntOrSmi* result = new(zone()) LDoubleToIntOrSmi(value);
- return AssignEnvironment(DefineAsRegister(result));
- }
- }
- } else if (from.IsInteger32()) {
- info()->MarkAsDeferredCalling();
- if (to.IsTagged()) {
- if (instr->value()->CheckFlag(HInstruction::kUint32)) {
- LOperand* value = UseRegister(instr->value());
- LNumberTagU* result = new(zone()) LNumberTagU(value,
- TempRegister(),
- TempRegister());
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- } else {
- STATIC_ASSERT((kMinInt == Smi::kMinValue) &&
- (kMaxInt == Smi::kMaxValue));
- LOperand* value = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LSmiTag(value));
- }
- } else if (to.IsSmi()) {
- LOperand* value = UseRegisterAtStart(instr->value());
- if (instr->value()->CheckFlag(HInstruction::kUint32)) {
- LUint32ToSmi* result = new(zone()) LUint32ToSmi(value);
- return AssignEnvironment(DefineAsRegister(result));
- } else {
- // This cannot deoptimize because an A64 smi can represent any int32.
- return DefineAsRegister(new(zone()) LInteger32ToSmi(value));
- }
- } else {
- ASSERT(to.IsDouble());
- if (instr->value()->CheckFlag(HInstruction::kUint32)) {
- return DefineAsRegister(
- new(zone()) LUint32ToDouble(UseRegisterAtStart(instr->value())));
- } else {
- return DefineAsRegister(
- new(zone()) LInteger32ToDouble(UseRegisterAtStart(instr->value())));
- }
- }
- }
-
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
- // We only need a temp register if the target is in new space, but we can't
- // dereference the handle to test that here.
- // TODO(all): Check these constraints. The temp register is not always used.
- LOperand* value = UseRegister(instr->value());
- LOperand* temp = TempRegister();
- return AssignEnvironment(new(zone()) LCheckValue(value, temp));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- LInstruction* result = new(zone()) LCheckInstanceType(value, temp);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- if (instr->CanOmitMapChecks()) {
- // LCheckMaps does nothing in this case.
- return new(zone()) LCheckMaps(NULL);
- } else {
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
-
- if (instr->has_migration_target()) {
- info()->MarkAsDeferredCalling();
- LInstruction* result = new(zone()) LCheckMaps(value, temp);
- return AssignPointerMap(AssignEnvironment(result));
- } else {
- return AssignEnvironment(new(zone()) LCheckMaps(value, temp));
- }
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckNonSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
- HValue* value = instr->value();
- Representation input_rep = value->representation();
- LOperand* reg = UseRegister(value);
- if (input_rep.IsDouble()) {
- return DefineAsRegister(new(zone()) LClampDToUint8(reg));
- } else if (input_rep.IsInteger32()) {
- return DefineAsRegister(new(zone()) LClampIToUint8(reg));
- } else {
- ASSERT(input_rep.IsSmiOrTagged());
- return AssignEnvironment(
- DefineAsRegister(new(zone()) LClampTToUint8(reg,
- TempRegister(),
- FixedTemp(d24))));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
- HClassOfTestAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LClassOfTestAndBranch(value,
- TempRegister(),
- TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
- HCompareNumericAndBranch* instr) {
- Representation r = instr->representation();
-
- // TODO(all): This instruction has been replaced by HCompareNumericAndBranch
- // on bleeding_edge. We should update when we'll do the rebase.
- if (r.IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(r));
- ASSERT(instr->right()->representation().Equals(r));
- LOperand* left = UseRegisterOrConstantAtStart(instr->left());
- LOperand* right = UseRegisterOrConstantAtStart(instr->right());
- return new(zone()) LCompareNumericAndBranch(left, right);
- } else {
- ASSERT(r.IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- // TODO(all): In fact the only case that we can handle more efficiently is
- // when one of the operand is the constant 0. Currently the MacroAssembler
- // will be able to cope with any constant by loading it into an internal
- // scratch register. This means that if the constant is used more that once,
- // it will be loaded multiple times. Unfortunatly crankshaft already
- // duplicates constant loads, but we should modify the code below once this
- // issue has been addressed in crankshaft.
- LOperand* left = UseRegisterOrConstantAtStart(instr->left());
- LOperand* right = UseRegisterOrConstantAtStart(instr->right());
- return new(zone()) LCompareNumericAndBranch(left, right);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left = UseFixed(instr->left(), x1);
- LOperand* right = UseFixed(instr->right(), x0);
- LCmpT* result = new(zone()) LCmpT(context, left, right);
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
- HCompareHoleAndBranch* instr) {
- LOperand* value = UseRegister(instr->value());
- if (instr->representation().IsTagged()) {
- return new(zone()) LCmpHoleAndBranchT(value);
- } else {
- LOperand* temp = TempRegister();
- return new(zone()) LCmpHoleAndBranchD(value, temp);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
- HCompareObjectEqAndBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
-
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return new(zone()) LCmpObjectEqAndBranch(left, right);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
-
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- return new(zone()) LCmpMapAndBranch(value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
- Representation r = instr->representation();
- if (r.IsSmi()) {
- return DefineAsRegister(new(zone()) LConstantS);
- } else if (r.IsInteger32()) {
- return DefineAsRegister(new(zone()) LConstantI);
- } else if (r.IsDouble()) {
- return DefineAsRegister(new(zone()) LConstantD);
- } else if (r.IsExternal()) {
- return DefineAsRegister(new(zone()) LConstantE);
- } else if (r.IsTagged()) {
- return DefineAsRegister(new(zone()) LConstantT);
- } else {
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- if (instr->HasNoUses()) return NULL;
-
- if (info()->IsStub()) {
- return DefineFixed(new(zone()) LContext, cp);
- }
-
- return DefineAsRegister(new(zone()) LContext);
-}
-
-
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
- LOperand* object = UseFixed(instr->value(), x0);
- LDateField* result = new(zone()) LDateField(object, instr->index());
- return MarkAsCall(DefineFixed(result, x0), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
- return new(zone()) LDebugBreak();
-}
-
-
-LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsInteger32()) {
- // TODO(all): Update this case to support smi inputs.
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- if (instr->RightIsPowerOf2()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegister(instr->left());
- LDivI* div = new(zone()) LDivI(value, UseConstant(instr->right()), NULL);
- return AssignEnvironment(DefineAsRegister(div));
- }
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)
- ? NULL : TempRegister();
- LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineAsRegister(div));
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else {
- return DoArithmeticT(Token::DIV, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
- return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
- HEnvironment* outer = current_block_->last_environment();
- HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(instr->closure(),
- instr->arguments_count(),
- instr->function(),
- undefined,
- instr->inlining_kind());
- // Only replay binding of arguments object if it wasn't removed from graph.
- if ((instr->arguments_var() != NULL) &&
- instr->arguments_object()->IsLinked()) {
- inner->Bind(instr->arguments_var(), instr->arguments_object());
- }
- inner->set_entry(instr);
- current_block_->UpdateEnvironment(inner);
- chunk_->AddInlinedClosure(instr->closure());
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoForceRepresentation(
- HForceRepresentation* instr) {
- // All HForceRepresentation instructions should be eliminated in the
- // representation change phase of Hydrogen.
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(
- DefineFixed(new(zone()) LFunctionLiteral(context), x0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
- HGetCachedArrayIndex* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new(zone()) LGoto(instr->FirstSuccessor());
-}
-
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
- HHasCachedArrayIndexAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(instr->value()), TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
- HHasInstanceTypeAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LHasInstanceTypeAndBranch(value, TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoInnerAllocatedObject(
- HInnerAllocatedObject* instr) {
- LOperand* base_object = UseRegisterAtStart(instr->base_object());
- LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
- return DefineAsRegister(
- new(zone()) LInnerAllocatedObject(base_object, offset));
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LInstanceOf* result = new(zone()) LInstanceOf(
- context,
- UseFixed(instr->left(), InstanceofStub::left()),
- UseFixed(instr->right(), InstanceofStub::right()));
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
- HInstanceOfKnownGlobal* instr) {
- LInstanceOfKnownGlobal* result = new(zone()) LInstanceOfKnownGlobal(
- UseFixed(instr->context(), cp),
- UseFixed(instr->left(), InstanceofStub::left()));
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- // The function is required (by MacroAssembler::InvokeFunction) to be in x1.
- LOperand* function = UseFixed(instr->function(), x1);
- LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
- return MarkAsCall(DefineFixed(result, x0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
- HIsConstructCallAndBranch* instr) {
- return new(zone()) LIsConstructCallAndBranch(TempRegister(), TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
- HCompareMinusZeroAndBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
- LOperand* value = UseRegister(instr->value());
- LOperand* scratch = TempRegister();
- return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
-}
-
-
-LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- return new(zone()) LIsObjectAndBranch(value, temp1, temp2);
-}
-
-
-LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- return new(zone()) LIsStringAndBranch(value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LIsSmiAndBranch(UseRegisterAtStart(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
- HIsUndetectableAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LIsUndetectableAndBranch(value, TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- LInstruction* pop = NULL;
- HEnvironment* env = current_block_->last_environment();
-
- if (env->entry()->arguments_pushed()) {
- int argument_count = env->arguments_environment()->parameter_count();
- pop = new(zone()) LDrop(argument_count);
- ASSERT(instr->argument_delta() == -argument_count);
- }
-
- HEnvironment* outer =
- current_block_->last_environment()->DiscardInlined(false);
- current_block_->UpdateEnvironment(outer);
-
- return pop;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- LInstruction* result =
- DefineAsRegister(new(zone()) LLoadContextSlot(context));
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
- HLoadFunctionPrototype* instr) {
- LOperand* function = UseRegister(instr->function());
- LOperand* temp = TempRegister();
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LLoadFunctionPrototype(function, temp)));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
- LLoadGlobalCell* result = new(zone()) LLoadGlobalCell();
- return instr->RequiresHoleCheck()
- ? AssignEnvironment(DefineAsRegister(result))
- : DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* global_object = UseFixed(instr->global_object(), x0);
- LLoadGlobalGeneric* result =
- new(zone()) LLoadGlobalGeneric(context, global_object);
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
- ASSERT(instr->key()->representation().IsSmiOrInteger32());
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* elements = UseRegister(instr->elements());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
-
- if (!instr->is_typed_elements()) {
- if (instr->representation().IsDouble()) {
- LOperand* temp = (!instr->key()->IsConstant() ||
- instr->RequiresHoleCheck())
- ? TempRegister()
- : NULL;
-
- LLoadKeyedFixedDouble* result =
- new(zone()) LLoadKeyedFixedDouble(elements, key, temp);
- return instr->RequiresHoleCheck()
- ? AssignEnvironment(DefineAsRegister(result))
- : DefineAsRegister(result);
- } else {
- ASSERT(instr->representation().IsSmiOrTagged() ||
- instr->representation().IsInteger32());
- LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister();
- LLoadKeyedFixed* result =
- new(zone()) LLoadKeyedFixed(elements, key, temp);
- return instr->RequiresHoleCheck()
- ? AssignEnvironment(DefineAsRegister(result))
- : DefineAsRegister(result);
- }
- } else {
- ASSERT((instr->representation().IsInteger32() &&
- !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
- (instr->representation().IsDouble() &&
- IsDoubleOrFloatElementsKind(instr->elements_kind())));
-
- LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister();
- LLoadKeyedExternal* result =
- new(zone()) LLoadKeyedExternal(elements, key, temp);
- // An unsigned int array load might overflow and cause a deopt. Make sure it
- // has an environment.
- if (instr->RequiresHoleCheck() ||
- elements_kind == EXTERNAL_UINT32_ELEMENTS ||
- elements_kind == UINT32_ELEMENTS) {
- return AssignEnvironment(DefineAsRegister(result));
- } else {
- return DefineAsRegister(result);
- }
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = UseFixed(instr->object(), x1);
- LOperand* key = UseFixed(instr->key(), x0);
-
- LInstruction* result =
- DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key), x0);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LLoadNamedField(object));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = UseFixed(instr->object(), x0);
- LInstruction* result =
- DefineFixed(new(zone()) LLoadNamedGeneric(context, object), x0);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
- return DefineAsRegister(new(zone()) LLoadRoot);
-}
-
-
-LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
- LOperand* map = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LMapEnumLength(map));
-}
-
-
-LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- HValue* right = instr->right();
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(right);
- LOperand* remainder = TempRegister();
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, remainder)));
-}
-
-
-LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
- LOperand* left = NULL;
- LOperand* right = NULL;
- if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- left = UseRegisterAtStart(instr->BetterLeftOperand());
- right = UseRegisterOrConstantAtStart(instr->BetterRightOperand());
- } else {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- left = UseRegisterAtStart(instr->left());
- right = UseRegisterAtStart(instr->right());
- }
- return DefineAsRegister(new(zone()) LMathMinMax(left, right));
-}
-
-
-LInstruction* LChunkBuilder::DoMod(HMod* hmod) {
- HValue* hleft = hmod->left();
- HValue* hright = hmod->right();
-
- // TODO(jbramley): Add smi support.
- if (hmod->representation().IsInteger32()) {
- ASSERT(hleft->representation().IsInteger32());
- ASSERT(hleft->representation().IsInteger32());
- LOperand* left_op;
- LOperand* right_op;
-
- if (hmod->RightIsPowerOf2()) {
- left_op = UseRegisterAtStart(hleft);
- right_op = UseConstant(hright);
- } else {
- right_op = UseRegister(hright);
- left_op = UseRegister(hleft);
- }
-
- LModI* lmod = new(zone()) LModI(left_op, right_op);
-
- if (hmod->right()->CanBeZero() ||
- (hmod->CheckFlag(HValue::kBailoutOnMinusZero) &&
- hmod->left()->CanBeNegative() && hmod->CanBeZero())) {
- AssignEnvironment(lmod);
- }
- return DefineAsRegister(lmod);
-
- } else if (hmod->representation().IsSmiOrTagged()) {
- return DoArithmeticT(Token::MOD, hmod);
- } else {
- return DoArithmeticD(Token::MOD, hmod);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
-
- bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
- bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
- bool needs_environment = can_overflow || bailout_on_minus_zero;
-
- HValue* least_const = instr->BetterLeftOperand();
- HValue* most_const = instr->BetterRightOperand();
-
- LOperand* left = UseRegisterAtStart(least_const);
-
- // LMulConstI can handle a subset of constants:
- // With support for overflow detection:
- // -1, 0, 1, 2
- // Without support for overflow detection:
- // 2^n, -(2^n)
- // 2^n + 1, -(2^n - 1)
- if (most_const->IsConstant()) {
- int32_t constant = HConstant::cast(most_const)->Integer32Value();
- int32_t constant_abs = (constant >= 0) ? constant : -constant;
-
- if (((constant >= -1) && (constant <= 2)) ||
- (!can_overflow && (IsPowerOf2(constant_abs) ||
- IsPowerOf2(constant_abs + 1) ||
- IsPowerOf2(constant_abs - 1)))) {
- LConstantOperand* right = UseConstant(most_const);
- LMulConstIS* mul = new(zone()) LMulConstIS(left, right);
- if (needs_environment) AssignEnvironment(mul);
- return DefineAsRegister(mul);
- }
- }
-
- // LMulI/S can handle all cases, but it requires that a register is
- // allocated for the second operand.
- LInstruction* result;
- if (instr->representation().IsSmi()) {
- // TODO(jbramley/rmcilroy): Fix LMulS so we can UseRegisterAtStart here.
- LOperand* right = UseRegister(most_const);
- result = DefineAsRegister(new(zone()) LMulS(left, right));
- } else {
- LOperand* right = UseRegisterAtStart(most_const);
- result = DefineAsRegister(new(zone()) LMulI(left, right));
- }
- if (needs_environment) AssignEnvironment(result);
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::MUL, instr);
- } else {
- return DoArithmeticT(Token::MUL, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
- ASSERT(argument_count_ == 0);
- allocator_->MarkAsOsrEntry();
- current_block_->last_environment()->set_ast_id(instr->ast_id());
- return AssignEnvironment(new(zone()) LOsrEntry);
-}
-
-
-LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- LParameter* result = new(zone()) LParameter;
- if (instr->kind() == HParameter::STACK_PARAMETER) {
- int spill_index = chunk_->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(result, spill_index);
- } else {
- ASSERT(info()->IsStub());
- CodeStubInterfaceDescriptor* descriptor =
- info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
- int index = static_cast<int>(instr->index());
- Register reg = descriptor->GetParameterRegister(index);
- return DefineFixed(result, reg);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoPower(HPower* instr) {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double power. It can't trigger a GC.
- // We need to use fixed result register for the call.
- Representation exponent_type = instr->right()->representation();
- ASSERT(instr->left()->representation().IsDouble());
- LOperand* left = UseFixedDouble(instr->left(), d0);
- LOperand* right = exponent_type.IsInteger32()
- ? UseFixed(instr->right(), x12)
- : exponent_type.IsDouble()
- ? UseFixedDouble(instr->right(), d1)
- : UseFixed(instr->right(), x11);
- LPower* result = new(zone()) LPower(left, right);
- return MarkAsCall(DefineFixedDouble(result, d0),
- instr,
- CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- LOperand* argument = UseRegister(instr->argument());
- return new(zone()) LPushArgument(argument);
-}
-
-
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(
- DefineFixed(new(zone()) LRegExpLiteral(context), x0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- LOperand* context = info()->IsStub()
- ? UseFixed(instr->context(), cp)
- : NULL;
- LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
- return new(zone()) LReturn(UseFixed(instr->value(), x0), context,
- parameter_count);
-}
-
-
-LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
- // TODO(all): Use UseRegisterAtStart and UseRegisterOrConstantAtStart here.
- // We cannot do it now because the debug code in the implementation changes
- // temp.
- LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegisterOrConstant(instr->index());
- LOperand* temp = TempRegister();
- LSeqStringGetChar* result =
- new(zone()) LSeqStringGetChar(string, index, temp);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegister(instr->string());
- LOperand* index = FLAG_debug_code
- ? UseRegister(instr->index())
- : UseRegisterOrConstant(instr->index());
- LOperand* value = UseRegister(instr->value());
- LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
- LOperand* temp = TempRegister();
- LSeqStringSetChar* result =
- new(zone()) LSeqStringSetChar(context, string, index, value, temp);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoShift(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- return DoArithmeticT(op, instr);
- }
-
- ASSERT(instr->representation().IsInteger32() ||
- instr->representation().IsSmi());
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
-
- LOperand* left = instr->representation().IsSmi()
- ? UseRegister(instr->left())
- : UseRegisterAtStart(instr->left());
-
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- LOperand* temp = NULL;
- int constant_value = 0;
- if (right_value->IsConstant()) {
- right = UseConstant(right_value);
- HConstant* constant = HConstant::cast(right_value);
- constant_value = constant->Integer32Value() & 0x1f;
- } else {
- right = UseRegisterAtStart(right_value);
- if (op == Token::ROR) {
- temp = TempRegister();
- }
- }
-
- // Shift operations can only deoptimize if we do a logical shift by 0 and the
- // result cannot be truncated to int32.
- bool does_deopt = false;
- if ((op == Token::SHR) && (constant_value == 0)) {
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- } else {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
- }
- }
-
- LInstruction* result;
- if (instr->representation().IsInteger32()) {
- result = DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
- } else {
- ASSERT(instr->representation().IsSmi());
- result = DefineAsRegister(
- new(zone()) LShiftS(op, left, right, temp, does_deopt));
- }
-
- return does_deopt ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoRor(HRor* instr) {
- return DoShift(Token::ROR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoSar(HSar* instr) {
- return DoShift(Token::SAR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShl(HShl* instr) {
- return DoShift(Token::SHL, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShr(HShr* instr) {
- return DoShift(Token::SHR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- instr->ReplayEnvironment(current_block_->last_environment());
-
- // If there is an instruction pending deoptimization environment create a
- // lazy bailout instruction to capture the environment.
- if (pending_deoptimization_ast_id_ == instr->ast_id()) {
- LInstruction* result = new(zone()) LLazyBailout;
- result = AssignEnvironment(result);
- // Store the lazy deopt environment with the instruction if needed. Right
- // now it is only used for LInstanceOfKnownGlobal.
- instruction_pending_deoptimization_environment_->
- SetDeferredLazyDeoptimizationEnvironment(result->environment());
- instruction_pending_deoptimization_environment_ = NULL;
- pending_deoptimization_ast_id_ = BailoutId::None();
- return result;
- }
-
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- if (instr->is_function_entry()) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(new(zone()) LStackCheck(context), instr);
- } else {
- ASSERT(instr->is_backwards_branch());
- LOperand* context = UseAny(instr->context());
- return AssignEnvironment(
- AssignPointerMap(new(zone()) LStackCheck(context)));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoStoreCodeEntry(HStoreCodeEntry* instr) {
- LOperand* function = UseRegister(instr->function());
- LOperand* code_object = UseRegisterAtStart(instr->code_object());
- LOperand* temp = TempRegister();
- return new(zone()) LStoreCodeEntry(function, code_object, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
- LOperand* temp = TempRegister();
- LOperand* context;
- LOperand* value;
- if (instr->NeedsWriteBarrier()) {
- // TODO(all): Replace these constraints when RecordWriteStub has been
- // rewritten.
- context = UseRegisterAndClobber(instr->context());
- value = UseRegisterAndClobber(instr->value());
- } else {
- context = UseRegister(instr->context());
- value = UseRegister(instr->value());
- }
- LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
- LOperand* value = UseRegister(instr->value());
- if (instr->RequiresHoleCheck()) {
- return AssignEnvironment(new(zone()) LStoreGlobalCell(value,
- TempRegister(),
- TempRegister()));
- } else {
- return new(zone()) LStoreGlobalCell(value, TempRegister(), NULL);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- LOperand* temp = NULL;
- LOperand* elements = NULL;
- LOperand* val = NULL;
- LOperand* key = NULL;
-
- if (!instr->is_typed_elements() &&
- instr->value()->representation().IsTagged() &&
- instr->NeedsWriteBarrier()) {
- // RecordWrite() will clobber all registers.
- elements = UseRegisterAndClobber(instr->elements());
- val = UseRegisterAndClobber(instr->value());
- key = UseRegisterAndClobber(instr->key());
- } else {
- elements = UseRegister(instr->elements());
- val = UseRegister(instr->value());
- key = UseRegisterOrConstantAtStart(instr->key());
- }
-
- if (instr->is_typed_elements()) {
- ASSERT((instr->value()->representation().IsInteger32() &&
- !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
- (instr->value()->representation().IsDouble() &&
- IsDoubleOrFloatElementsKind(instr->elements_kind())));
- ASSERT((instr->is_fixed_typed_array() &&
- instr->elements()->representation().IsTagged()) ||
- (instr->is_external() &&
- instr->elements()->representation().IsExternal()));
- temp = instr->key()->IsConstant() ? NULL : TempRegister();
- return new(zone()) LStoreKeyedExternal(elements, key, val, temp);
-
- } else if (instr->value()->representation().IsDouble()) {
- ASSERT(instr->elements()->representation().IsTagged());
-
- // The constraint used here is UseRegister, even though the StoreKeyed
- // instruction may canonicalize the value in the register if it is a NaN.
- temp = TempRegister();
- return new(zone()) LStoreKeyedFixedDouble(elements, key, val, temp);
-
- } else {
- ASSERT(instr->elements()->representation().IsTagged());
- ASSERT(instr->value()->representation().IsSmiOrTagged() ||
- instr->value()->representation().IsInteger32());
-
- temp = TempRegister();
- return new(zone()) LStoreKeyedFixed(elements, key, val, temp);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = UseFixed(instr->object(), x2);
- LOperand* key = UseFixed(instr->key(), x1);
- LOperand* value = UseFixed(instr->value(), x0);
-
- ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsTagged());
- ASSERT(instr->value()->representation().IsTagged());
-
- return MarkAsCall(
- new(zone()) LStoreKeyedGeneric(context, object, key, value), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
- // TODO(jbramley): Optimize register usage in this instruction. For now, it
- // allocates everything that it might need because it keeps changing in the
- // merge and keeping it valid is time-consuming.
-
- // TODO(jbramley): It might be beneficial to allow value to be a constant in
- // some cases. x64 makes use of this with FLAG_track_fields, for example.
-
- LOperand* object = UseRegister(instr->object());
- LOperand* value = UseRegisterAndClobber(instr->value());
- LOperand* temp0 = TempRegister();
- LOperand* temp1 = TempRegister();
-
- LStoreNamedField* result =
- new(zone()) LStoreNamedField(object, value, temp0, temp1);
- if (FLAG_track_heap_object_fields &&
- instr->field_representation().IsHeapObject() &&
- !instr->value()->type().IsHeapObject()) {
- return AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = UseFixed(instr->object(), x1);
- LOperand* value = UseFixed(instr->value(), x0);
- LInstruction* result = new(zone()) LStoreNamedGeneric(context, object, value);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left = UseFixed(instr->left(), x1);
- LOperand* right = UseFixed(instr->right(), x0);
-
- LStringAdd* result = new(zone()) LStringAdd(context, left, right);
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
- LOperand* string = UseRegisterAndClobber(instr->string());
- LOperand* index = UseRegisterAndClobber(instr->index());
- LOperand* context = UseAny(instr->context());
- LStringCharCodeAt* result =
- new(zone()) LStringCharCodeAt(context, string, index);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
- // TODO(all) use at start and remove assert in codegen
- LOperand* char_code = UseRegister(instr->value());
- LOperand* context = UseAny(instr->context());
- LStringCharFromCode* result =
- new(zone()) LStringCharFromCode(context, char_code);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStringCompareAndBranch(
- HStringCompareAndBranch* instr) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left = UseFixed(instr->left(), x1);
- LOperand* right = UseFixed(instr->right(), x0);
- LStringCompareAndBranch* result =
- new(zone()) LStringCompareAndBranch(context, left, right);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- LOperand *left;
- if (instr->left()->IsConstant() &&
- (HConstant::cast(instr->left())->Integer32Value() == 0)) {
- left = UseConstant(instr->left());
- } else {
- left = UseRegisterAtStart(instr->left());
- }
- LOperand* right = UseRegisterOrConstantAtStart(instr->right());
- LInstruction* result = instr->representation().IsSmi() ?
- DefineAsRegister(new(zone()) LSubS(left, right)) :
- DefineAsRegister(new(zone()) LSubI(left, right));
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::SUB, instr);
- } else {
- return DoArithmeticT(Token::SUB, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
- if (instr->HasNoUses()) {
- return NULL;
- } else {
- return DefineAsRegister(new(zone()) LThisFunction);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
- LOperand* object = UseFixed(instr->value(), x0);
- LToFastProperties* result = new(zone()) LToFastProperties(object);
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTransitionElementsKind(
- HTransitionElementsKind* instr) {
- LOperand* object = UseRegister(instr->object());
- if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, NULL,
- TempRegister(), TempRegister());
- return result;
- } else {
- LOperand* context = UseFixed(instr->context(), cp);
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, context, TempRegister());
- return AssignPointerMap(result);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoTrapAllocationMemento(
- HTrapAllocationMemento* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LTrapAllocationMemento* result =
- new(zone()) LTrapAllocationMemento(object, temp1, temp2);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- // TODO(jbramley): In ARM, this uses UseFixed to force the input to x0.
- // However, LCodeGen::DoTypeof just pushes it to the stack (for CallRuntime)
- // anyway, so the input doesn't have to be in x0. We might be able to improve
- // the ARM back-end a little by relaxing this restriction.
- LTypeof* result =
- new(zone()) LTypeof(context, UseRegisterAtStart(instr->value()));
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- LInstruction* goto_instr = CheckElideControlInstruction(instr);
- if (goto_instr != NULL) return goto_instr;
-
- // We only need temp registers in some cases, but we can't dereference the
- // instr->type_literal() handle to test that here.
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
-
- return new(zone()) LTypeofIsAndBranch(
- UseRegister(instr->value()), temp1, temp2);
-}
-
-
-LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathAbs: {
- Representation r = instr->representation();
- if (r.IsTagged()) {
- // The tagged case might need to allocate a HeapNumber for the result,
- // so it is handled by a separate LInstruction.
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* input = UseRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LOperand* temp3 = TempRegister();
- LMathAbsTagged* result =
- new(zone()) LMathAbsTagged(context, input, temp1, temp2, temp3);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- } else {
- LOperand* input = UseRegisterAtStart(instr->value());
- LMathAbs* result = new(zone()) LMathAbs(input);
- if (r.IsDouble()) {
- // The Double case can never fail so it doesn't need an environment.
- return DefineAsRegister(result);
- } else {
- ASSERT(r.IsInteger32() || r.IsSmi());
- // The Integer32 and Smi cases need an environment because they can
- // deoptimize on minimum representable number.
- return AssignEnvironment(DefineAsRegister(result));
- }
- }
- }
- case kMathExp: {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
- LOperand* input = UseRegister(instr->value());
- // TODO(all): Implement TempFPRegister.
- LOperand* double_temp1 = FixedTemp(d24); // This was chosen arbitrarily.
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LOperand* temp3 = TempRegister();
- LMathExp* result = new(zone()) LMathExp(input, double_temp1,
- temp1, temp2, temp3);
- return DefineAsRegister(result);
- }
- case kMathFloor: {
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->value()->representation().IsDouble());
- // TODO(jbramley): A64 can easily handle a double argument with frintm,
- // but we're never asked for it here. At the moment, we fall back to the
- // runtime if the result doesn't fit, like the other architectures.
- LOperand* input = UseRegisterAtStart(instr->value());
- LMathFloor* result = new(zone()) LMathFloor(input);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- }
- case kMathLog: {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), d0);
- LMathLog* result = new(zone()) LMathLog(input);
- return MarkAsCall(DefineFixedDouble(result, d0), instr);
- }
- case kMathPowHalf: {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
- LOperand* input = UseRegister(instr->value());
- return DefineAsRegister(new(zone()) LMathPowHalf(input));
- }
- case kMathRound: {
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->value()->representation().IsDouble());
- // TODO(jbramley): As with kMathFloor, we can probably handle double
- // results fairly easily, but we are never asked for them.
- LOperand* input = UseRegister(instr->value());
- LOperand* temp = FixedTemp(d24); // Choosen arbitrarily.
- LMathRound* result = new(zone()) LMathRound(input, temp);
- return AssignEnvironment(DefineAsRegister(result));
- }
- case kMathSqrt: {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LMathSqrt(input));
- }
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- // Use an index that corresponds to the location in the unoptimized frame,
- // which the optimized frame will subsume.
- int env_index = instr->index();
- int spill_index = 0;
- if (instr->environment()->is_parameter_index(env_index)) {
- spill_index = chunk_->GetParameterStackSlot(env_index);
- } else {
- spill_index = env_index - instr->environment()->first_local_index();
- if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Abort(kTooManySpillSlotsNeededForOSR);
- spill_index = 0;
- }
- }
- return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
-}
-
-
-LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- // Assign object to a fixed register different from those already used in
- // LForInPrepareMap.
- LOperand* object = UseFixed(instr->enumerable(), x0);
- LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
- return MarkAsCall(DefineFixed(result, x0), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
- LOperand* map = UseRegister(instr->map());
- return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map)));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* map = UseRegister(instr->map());
- LOperand* temp = TempRegister();
- return AssignEnvironment(new(zone()) LCheckMapValue(value, map, temp));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- LOperand* index = UseRegister(instr->index());
- return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index));
-}
-
-
-LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
- LOperand* receiver = UseRegister(instr->receiver());
- LOperand* function = UseRegister(instr->function());
- LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
- return AssignEnvironment(DefineAsRegister(result));
-}
-
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/a64/lithium-a64.h b/deps/v8/src/a64/lithium-a64.h
deleted file mode 100644
index 33d11e6c5d..0000000000
--- a/deps/v8/src/a64/lithium-a64.h
+++ /dev/null
@@ -1,2967 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_A64_LITHIUM_A64_H_
-#define V8_A64_LITHIUM_A64_H_
-
-#include "hydrogen.h"
-#include "lithium-allocator.h"
-#include "lithium.h"
-#include "safepoint-table.h"
-#include "utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LCodeGen;
-
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
- V(AccessArgumentsAt) \
- V(AddE) \
- V(AddI) \
- V(AddS) \
- V(Allocate) \
- V(ApplyArguments) \
- V(ArgumentsElements) \
- V(ArgumentsLength) \
- V(ArithmeticD) \
- V(ArithmeticT) \
- V(BitI) \
- V(BitS) \
- V(BoundsCheck) \
- V(Branch) \
- V(CallFunction) \
- V(CallJSFunction) \
- V(CallNew) \
- V(CallNewArray) \
- V(CallRuntime) \
- V(CallStub) \
- V(CallWithDescriptor) \
- V(CheckInstanceType) \
- V(CheckMapValue) \
- V(CheckMaps) \
- V(CheckNonSmi) \
- V(CheckSmi) \
- V(CheckValue) \
- V(ClampDToUint8) \
- V(ClampIToUint8) \
- V(ClampTToUint8) \
- V(ClassOfTestAndBranch) \
- V(CmpHoleAndBranchD) \
- V(CmpHoleAndBranchT) \
- V(CmpMapAndBranch) \
- V(CmpObjectEqAndBranch) \
- V(CmpT) \
- V(CompareMinusZeroAndBranch) \
- V(CompareNumericAndBranch) \
- V(ConstantD) \
- V(ConstantE) \
- V(ConstantI) \
- V(ConstantS) \
- V(ConstantT) \
- V(Context) \
- V(DateField) \
- V(DebugBreak) \
- V(DeclareGlobals) \
- V(Deoptimize) \
- V(DivI) \
- V(DoubleToIntOrSmi) \
- V(Drop) \
- V(Dummy) \
- V(DummyUse) \
- V(ForInCacheArray) \
- V(ForInPrepareMap) \
- V(FunctionLiteral) \
- V(GetCachedArrayIndex) \
- V(Goto) \
- V(HasCachedArrayIndexAndBranch) \
- V(HasInstanceTypeAndBranch) \
- V(InnerAllocatedObject) \
- V(InstanceOf) \
- V(InstanceOfKnownGlobal) \
- V(InstructionGap) \
- V(Integer32ToDouble) \
- V(Integer32ToSmi) \
- V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
- V(IsObjectAndBranch) \
- V(IsSmiAndBranch) \
- V(IsStringAndBranch) \
- V(IsUndetectableAndBranch) \
- V(Label) \
- V(LazyBailout) \
- V(LoadContextSlot) \
- V(LoadFieldByIndex) \
- V(LoadFunctionPrototype) \
- V(LoadGlobalCell) \
- V(LoadGlobalGeneric) \
- V(LoadKeyedExternal) \
- V(LoadKeyedFixed) \
- V(LoadKeyedFixedDouble) \
- V(LoadKeyedGeneric) \
- V(LoadNamedField) \
- V(LoadNamedGeneric) \
- V(LoadRoot) \
- V(MapEnumLength) \
- V(MathAbs) \
- V(MathAbsTagged) \
- V(MathExp) \
- V(MathFloor) \
- V(MathFloorOfDiv) \
- V(MathLog) \
- V(MathMinMax) \
- V(MathPowHalf) \
- V(MathRound) \
- V(MathSqrt) \
- V(ModI) \
- V(MulConstIS) \
- V(MulI) \
- V(MulS) \
- V(NumberTagD) \
- V(NumberTagU) \
- V(NumberUntagD) \
- V(OsrEntry) \
- V(Parameter) \
- V(Power) \
- V(PushArgument) \
- V(RegExpLiteral) \
- V(Return) \
- V(SeqStringGetChar) \
- V(SeqStringSetChar) \
- V(ShiftI) \
- V(ShiftS) \
- V(SmiTag) \
- V(SmiUntag) \
- V(StackCheck) \
- V(StoreCodeEntry) \
- V(StoreContextSlot) \
- V(StoreGlobalCell) \
- V(StoreKeyedExternal) \
- V(StoreKeyedFixed) \
- V(StoreKeyedFixedDouble) \
- V(StoreKeyedGeneric) \
- V(StoreNamedField) \
- V(StoreNamedGeneric) \
- V(StringAdd) \
- V(StringCharCodeAt) \
- V(StringCharFromCode) \
- V(StringCompareAndBranch) \
- V(SubI) \
- V(SubS) \
- V(TaggedToI) \
- V(ThisFunction) \
- V(ToFastProperties) \
- V(TransitionElementsKind) \
- V(TrapAllocationMemento) \
- V(TruncateDoubleToIntOrSmi) \
- V(Typeof) \
- V(TypeofIsAndBranch) \
- V(Uint32ToDouble) \
- V(Uint32ToSmi) \
- V(UnknownOSRValue) \
- V(WrapReceiver)
-
-
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
- return LInstruction::k##type; \
- } \
- virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
- virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
- return mnemonic; \
- } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
- }
-
-
-#define DECLARE_HYDROGEN_ACCESSOR(type) \
- H##type* hydrogen() const { \
- return H##type::cast(this->hydrogen_value()); \
- }
-
-
-class LInstruction : public ZoneObject {
- public:
- LInstruction()
- : environment_(NULL),
- hydrogen_value_(NULL),
- bit_field_(IsCallBits::encode(false)) { }
-
- virtual ~LInstruction() { }
-
- virtual void CompileToNative(LCodeGen* generator) = 0;
- virtual const char* Mnemonic() const = 0;
- virtual void PrintTo(StringStream* stream);
- virtual void PrintDataTo(StringStream* stream);
- virtual void PrintOutputOperandTo(StringStream* stream);
-
- enum Opcode {
- // Declare a unique enum value for each instruction.
-#define DECLARE_OPCODE(type) k##type,
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
- kNumberOfInstructions
-#undef DECLARE_OPCODE
- };
-
- virtual Opcode opcode() const = 0;
-
- // Declare non-virtual type testers for all leaf IR classes.
-#define DECLARE_PREDICATE(type) \
- bool Is##type() const { return opcode() == k##type; }
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
-#undef DECLARE_PREDICATE
-
- // Declare virtual predicates for instructions that don't have
- // an opcode.
- virtual bool IsGap() const { return false; }
-
- virtual bool IsControl() const { return false; }
-
- void set_environment(LEnvironment* env) { environment_ = env; }
- LEnvironment* environment() const { return environment_; }
- bool HasEnvironment() const { return environment_ != NULL; }
-
- void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
- LPointerMap* pointer_map() const { return pointer_map_.get(); }
- bool HasPointerMap() const { return pointer_map_.is_set(); }
-
- void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
- HValue* hydrogen_value() const { return hydrogen_value_; }
-
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
-
- void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
- bool IsCall() const { return IsCallBits::decode(bit_field_); }
-
- // Interface to the register allocator and iterators.
- bool ClobbersTemps() const { return IsCall(); }
- bool ClobbersRegisters() const { return IsCall(); }
- virtual bool ClobbersDoubleRegisters() const { return IsCall(); }
- bool IsMarkedAsCall() const { return IsCall(); }
-
- virtual bool HasResult() const = 0;
- virtual LOperand* result() const = 0;
-
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
- virtual int TempCount() = 0;
- virtual LOperand* TempAt(int i) = 0;
-
- LOperand* FirstInput() { return InputAt(0); }
- LOperand* Output() { return HasResult() ? result() : NULL; }
-
- virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
-
-#ifdef DEBUG
- void VerifyCall();
-#endif
-
- private:
- class IsCallBits: public BitField<bool, 0, 1> {};
-
- LEnvironment* environment_;
- SetOncePointer<LPointerMap> pointer_map_;
- HValue* hydrogen_value_;
- int32_t bit_field_;
-};
-
-
-// R = number of result operands (0 or 1).
-template<int R>
-class LTemplateResultInstruction : public LInstruction {
- public:
- // Allow 0 or 1 output operands.
- STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
- return (R != 0) && (result() != NULL);
- }
- void set_result(LOperand* operand) { results_[0] = operand; }
- LOperand* result() const { return results_[0]; }
-
- protected:
- EmbeddedContainer<LOperand*, R> results_;
-};
-
-
-// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction : public LTemplateResultInstruction<R> {
- protected:
- EmbeddedContainer<LOperand*, I> inputs_;
- EmbeddedContainer<LOperand*, T> temps_;
-
- private:
- // Iterator support.
- virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
- virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
-
- virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
- virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
-};
-
-
-class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
- return false;
- }
- DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
-};
-
-
-template<int I, int T>
-class LControlInstruction : public LTemplateInstruction<0, I, T> {
- public:
- LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
-
- virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
-
- int SuccessorCount() { return hydrogen()->SuccessorCount(); }
- HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
-
- int TrueDestination(LChunk* chunk) {
- return chunk->LookupDestination(true_block_id());
- }
-
- int FalseDestination(LChunk* chunk) {
- return chunk->LookupDestination(false_block_id());
- }
-
- Label* TrueLabel(LChunk* chunk) {
- if (true_label_ == NULL) {
- true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
- }
- return true_label_;
- }
-
- Label* FalseLabel(LChunk* chunk) {
- if (false_label_ == NULL) {
- false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
- }
- return false_label_;
- }
-
- protected:
- int true_block_id() { return SuccessorAt(0)->block_id(); }
- int false_block_id() { return SuccessorAt(1)->block_id(); }
-
- private:
- DECLARE_HYDROGEN_ACCESSOR(ControlInstruction);
-
- Label* false_label_;
- Label* true_label_;
-};
-
-
-class LGap : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGap(HBasicBlock* block)
- : block_(block) {
- parallel_moves_[BEFORE] = NULL;
- parallel_moves_[START] = NULL;
- parallel_moves_[END] = NULL;
- parallel_moves_[AFTER] = NULL;
- }
-
- // Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const V8_OVERRIDE { return true; }
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- static LGap* cast(LInstruction* instr) {
- ASSERT(instr->IsGap());
- return reinterpret_cast<LGap*>(instr);
- }
-
- bool IsRedundant() const;
-
- HBasicBlock* block() const { return block_; }
-
- enum InnerPosition {
- BEFORE,
- START,
- END,
- AFTER,
- FIRST_INNER_POSITION = BEFORE,
- LAST_INNER_POSITION = AFTER
- };
-
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
- if (parallel_moves_[pos] == NULL) {
- parallel_moves_[pos] = new(zone) LParallelMove(zone);
- }
- return parallel_moves_[pos];
- }
-
- LParallelMove* GetParallelMove(InnerPosition pos) {
- return parallel_moves_[pos];
- }
-
- private:
- LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
- HBasicBlock* block_;
-};
-
-
-class LInstructionGap V8_FINAL : public LGap {
- public:
- explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
-
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
- return !IsRedundant();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
-};
-
-
-class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LDrop(int count) : count_(count) { }
-
- int count() const { return count_; }
-
- DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
-
- private:
- int count_;
-};
-
-
-class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- explicit LDummy() { }
- DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
-};
-
-
-class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDummyUse(LOperand* value) {
- inputs_[0] = value;
- }
- DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
-};
-
-
-class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGoto(HBasicBlock* block) : block_(block) { }
-
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
- DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual bool IsControl() const V8_OVERRIDE { return true; }
-
- int block_id() const { return block_->block_id(); }
-
- private:
- HBasicBlock* block_;
-};
-
-
-class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
- public:
- LLazyBailout() : gap_instructions_size_(0) { }
-
- DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
-
- void set_gap_instructions_size(int gap_instructions_size) {
- gap_instructions_size_ = gap_instructions_size;
- }
- int gap_instructions_size() { return gap_instructions_size_; }
-
- private:
- int gap_instructions_size_;
-};
-
-
-class LLabel V8_FINAL : public LGap {
- public:
- explicit LLabel(HBasicBlock* block)
- : LGap(block), replacement_(NULL) { }
-
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
- return false;
- }
- DECLARE_CONCRETE_INSTRUCTION(Label, "label")
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- int block_id() const { return block()->block_id(); }
- bool is_loop_header() const { return block()->IsLoopHeader(); }
- bool is_osr_entry() const { return block()->is_osr_entry(); }
- Label* label() { return &label_; }
- LLabel* replacement() const { return replacement_; }
- void set_replacement(LLabel* label) { replacement_ = label; }
- bool HasReplacement() const { return replacement_ != NULL; }
-
- private:
- Label label_;
- LLabel* replacement_;
-};
-
-
-class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
- public:
- LOsrEntry() {}
-
- virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
- return false;
- }
- DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-};
-
-
-class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 1> {
- public:
- LAccessArgumentsAt(LOperand* arguments,
- LOperand* length,
- LOperand* index,
- LOperand* temp) {
- inputs_[0] = arguments;
- inputs_[1] = length;
- inputs_[2] = index;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
-
- LOperand* arguments() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
- LOperand* temp() { return temps_[0]; }
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-};
-
-
-class LAddE V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LAddE(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AddE, "add-e")
- DECLARE_HYDROGEN_ACCESSOR(Add)
-};
-
-
-class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LAddI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
- DECLARE_HYDROGEN_ACCESSOR(Add)
-};
-
-
-class LAddS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LAddS(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AddS, "add-s")
- DECLARE_HYDROGEN_ACCESSOR(Add)
-};
-
-
-class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
- public:
- LAllocate(LOperand* context,
- LOperand* size,
- LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = context;
- inputs_[1] = size;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* size() { return inputs_[1]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
- DECLARE_HYDROGEN_ACCESSOR(Allocate)
-};
-
-
-class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
- public:
- LApplyArguments(LOperand* function,
- LOperand* receiver,
- LOperand* length,
- LOperand* elements) {
- inputs_[0] = function;
- inputs_[1] = receiver;
- inputs_[2] = length;
- inputs_[3] = elements;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
-
- LOperand* function() { return inputs_[0]; }
- LOperand* receiver() { return inputs_[1]; }
- LOperand* length() { return inputs_[2]; }
- LOperand* elements() { return inputs_[3]; }
-};
-
-
-class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 1> {
- public:
- explicit LArgumentsElements(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
- DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
-};
-
-
-class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LArgumentsLength(LOperand* elements) {
- inputs_[0] = elements;
- }
-
- LOperand* elements() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
-};
-
-
-class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LArithmeticD(Token::Value op,
- LOperand* left,
- LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- virtual Opcode opcode() const V8_OVERRIDE {
- return LInstruction::kArithmeticD;
- }
- virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
- virtual const char* Mnemonic() const V8_OVERRIDE;
-
- private:
- Token::Value op_;
-};
-
-
-class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
- public:
- LArithmeticT(Token::Value op,
- LOperand* context,
- LOperand* left,
- LOperand* right)
- : op_(op) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
- Token::Value op() const { return op_; }
-
- virtual Opcode opcode() const V8_OVERRIDE {
- return LInstruction::kArithmeticT;
- }
- virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
- virtual const char* Mnemonic() const V8_OVERRIDE;
-
- private:
- Token::Value op_;
-};
-
-
-class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
- public:
- explicit LBoundsCheck(LOperand* index, LOperand* length) {
- inputs_[0] = index;
- inputs_[1] = length;
- }
-
- LOperand* index() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
- DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
-};
-
-
-class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LBitI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- Token::Value op() const { return hydrogen()->op(); }
-
- DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
- DECLARE_HYDROGEN_ACCESSOR(Bitwise)
-};
-
-
-class LBitS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LBitS(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- Token::Value op() const { return hydrogen()->op(); }
-
- DECLARE_CONCRETE_INSTRUCTION(BitS, "bit-s")
- DECLARE_HYDROGEN_ACCESSOR(Bitwise)
-};
-
-
-class LBranch V8_FINAL : public LControlInstruction<1, 2> {
- public:
- explicit LBranch(LOperand* value, LOperand *temp1, LOperand *temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Branch)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-};
-
-
-class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallJSFunction(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
- DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallFunction(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
- DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNew(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
- DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNewArray(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
- DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallRuntime(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
- DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
-
- virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
- return save_doubles() == kDontSaveFPRegs;
- }
-
- const Runtime::Function* function() const { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count(); }
- SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
-};
-
-
-class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallStub(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
- DECLARE_HYDROGEN_ACCESSOR(CallStub)
-};
-
-
-class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 1> {
- public:
- explicit LCheckInstanceType(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
-};
-
-
-class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 1> {
- public:
- explicit LCheckMaps(LOperand* value, LOperand* temp = NULL) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
-};
-
-
-class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckNonSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
- DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
-};
-
-
-class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCheckSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
-};
-
-
-class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 1> {
- public:
- LCheckValue(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
- DECLARE_HYDROGEN_ACCESSOR(CheckValue)
-};
-
-
-class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampDToUint8(LOperand* unclamped) {
- inputs_[0] = unclamped;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
-};
-
-
-class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampIToUint8(LOperand* unclamped) {
- inputs_[0] = unclamped;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
-};
-
-
-class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 2> {
- public:
- LClampTToUint8(LOperand* unclamped, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = unclamped;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
-};
-
-
-class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
- public:
- LClassOfTestAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
- "class-of-test-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-};
-
-
-class LCmpHoleAndBranchD V8_FINAL : public LControlInstruction<1, 1> {
- public:
- explicit LCmpHoleAndBranchD(LOperand* object, LOperand* temp) {
- inputs_[0] = object;
- temps_[0] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranchD, "cmp-hole-and-branch-d")
- DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
-};
-
-
-class LCmpHoleAndBranchT V8_FINAL : public LControlInstruction<1, 0> {
- public:
- explicit LCmpHoleAndBranchT(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranchT, "cmp-hole-and-branch-t")
- DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
-};
-
-
-class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
- public:
- LCmpMapAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMap)
-
- Handle<Map> map() const { return hydrogen()->map().handle(); }
-};
-
-
-class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
- public:
- LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
-};
-
-
-class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
- public:
- LCmpT(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
- DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
- public:
- LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
- "cmp-minus-zero-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
-};
-
-
-class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
- public:
- LCompareNumericAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
- "compare-numeric-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->representation().IsDouble();
- }
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-};
-
-
-class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- double value() const { return hydrogen()->DoubleValue(); }
-};
-
-
-class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- ExternalReference value() const {
- return hydrogen()->ExternalReferenceValue();
- }
-};
-
-
-class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- int32_t value() const { return hydrogen()->Integer32Value(); }
-};
-
-
-class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
-};
-
-
-class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Handle<Object> value(Isolate* isolate) const {
- return hydrogen()->handle(isolate);
- }
-};
-
-
-class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Context, "context")
- DECLARE_HYDROGEN_ACCESSOR(Context)
-};
-
-
-class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- LDateField(LOperand* date, Smi* index) : index_(index) {
- inputs_[0] = date;
- }
-
- LOperand* date() { return inputs_[0]; }
- Smi* index() const { return index_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(DateField)
-
- private:
- Smi* index_;
-};
-
-
-class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
-};
-
-
-class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LDeclareGlobals(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
- DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
-};
-
-
-class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
- DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
-};
-
-
-class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
- public:
- LDivI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- bool is_flooring() { return hydrogen_value()->IsMathFloorOfDiv(); }
-
- DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-};
-
-
-class LDoubleToIntOrSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleToIntOrSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleToIntOrSmi, "double-to-int-or-smi")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool tag_result() { return hydrogen()->representation().IsSmi(); }
-};
-
-
-class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LForInCacheArray(LOperand* map) {
- inputs_[0] = map;
- }
-
- LOperand* map() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
-
- int idx() {
- return HForInCacheArray::cast(this->hydrogen_value())->idx();
- }
-};
-
-
-class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LForInPrepareMap(LOperand* context, LOperand* object) {
- inputs_[0] = context;
- inputs_[1] = object;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
-};
-
-
-class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGetCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch V8_FINAL
- : public LControlInstruction<1, 1> {
- public:
- LHasCachedArrayIndexAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
- "has-cached-array-index-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-};
-
-
-class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 1> {
- public:
- LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
- "has-instance-type-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-};
-
-
-class LInnerAllocatedObject V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
- inputs_[0] = base_object;
- inputs_[1] = offset;
- }
-
- LOperand* base_object() const { return inputs_[0]; }
- LOperand* offset() const { return inputs_[1]; }
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
-};
-
-
-class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
- public:
- LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
-
-class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LInstanceOfKnownGlobal(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
- "instance-of-known-global")
- DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
-
- Handle<JSFunction> function() const { return hydrogen()->function(); }
- LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
- return lazy_deopt_env_;
- }
- virtual void SetDeferredLazyDeoptimizationEnvironment(
- LEnvironment* env) V8_OVERRIDE {
- lazy_deopt_env_ = env;
- }
-
- private:
- LEnvironment* lazy_deopt_env_;
-};
-
-
-class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
-};
-
-
-class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
- public:
- LCallWithDescriptor(const CallInterfaceDescriptor* descriptor,
- ZoneList<LOperand*>& operands,
- Zone* zone)
- : descriptor_(descriptor),
- inputs_(descriptor->environment_length() + 1, zone) {
- ASSERT(descriptor->environment_length() + 1 == operands.length());
- inputs_.AddAll(operands, zone);
- }
-
- LOperand* target() const { return inputs_[0]; }
-
- const CallInterfaceDescriptor* descriptor() { return descriptor_; }
-
- private:
- DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
- DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-
- const CallInterfaceDescriptor* descriptor_;
- ZoneList<LOperand*> inputs_;
-
- // Iterator support.
- virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
- virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
-
- virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
- virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
-};
-
-
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LInvokeFunction(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 2> {
- public:
- LIsConstructCallAndBranch(LOperand* temp1, LOperand* temp2) {
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
- "is-construct-call-and-branch")
-};
-
-
-class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 2> {
- public:
- LIsObjectAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-};
-
-
-class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
- public:
- LIsStringAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-};
-
-
-class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
- public:
- explicit LIsSmiAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-};
-
-
-class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
- public:
- explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
- "is-undetectable-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-};
-
-
-class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadContextSlot(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
-
- int slot_index() const { return hydrogen()->slot_index(); }
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-};
-
-
-class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedField(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
-};
-
-
-class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFunctionLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
- DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
-};
-
-
-class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 1> {
- public:
- LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
- inputs_[0] = function;
- temps_[0] = temp;
- }
-
- LOperand* function() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
- DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
-};
-
-
-class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
-};
-
-
-class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
- inputs_[0] = context;
- inputs_[1] = global_object;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* global_object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- bool for_typeof() const { return hydrogen()->for_typeof(); }
-};
-
-
-template<int T>
-class LLoadKeyed : public LTemplateInstruction<1, 2, T> {
- public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
- this->inputs_[0] = elements;
- this->inputs_[1] = key;
- }
-
- LOperand* elements() { return this->inputs_[0]; }
- LOperand* key() { return this->inputs_[1]; }
- ElementsKind elements_kind() const {
- return this->hydrogen()->elements_kind();
- }
- bool is_external() const {
- return this->hydrogen()->is_external();
- }
- bool is_fixed_typed_array() const {
- return hydrogen()->is_fixed_typed_array();
- }
- bool is_typed_elements() const {
- return is_external() || is_fixed_typed_array();
- }
- uint32_t additional_index() const {
- return this->hydrogen()->index_offset();
- }
- void PrintDataTo(StringStream* stream) V8_OVERRIDE {
- this->elements()->PrintTo(stream);
- stream->Add("[");
- this->key()->PrintTo(stream);
- if (this->hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", this->additional_index());
- } else {
- stream->Add("]");
- }
- }
-
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
-};
-
-
-class LLoadKeyedExternal: public LLoadKeyed<1> {
- public:
- LLoadKeyedExternal(LOperand* elements, LOperand* key, LOperand* temp) :
- LLoadKeyed<1>(elements, key) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedExternal, "load-keyed-external");
-};
-
-
-class LLoadKeyedFixed: public LLoadKeyed<1> {
- public:
- LLoadKeyedFixed(LOperand* elements, LOperand* key, LOperand* temp) :
- LLoadKeyed<1>(elements, key) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFixed, "load-keyed-fixed");
-};
-
-
-class LLoadKeyedFixedDouble: public LLoadKeyed<1> {
- public:
- LLoadKeyedFixedDouble(LOperand* elements, LOperand* key, LOperand* temp) :
- LLoadKeyed<1>(elements, key) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFixedDouble, "load-keyed-fixed-double");
-};
-
-
-class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
- public:
- LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = key;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
-};
-
-
-class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadNamedGeneric(LOperand* context, LOperand* object) {
- inputs_[0] = context;
- inputs_[1] = object;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
-class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
- DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
-
- Heap::RootListIndex index() const { return hydrogen()->index(); }
-};
-
-
-class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMapEnumLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
-};
-
-
-template<int T>
-class LUnaryMathOperation : public LTemplateInstruction<1, 1, T> {
- public:
- explicit LUnaryMathOperation(LOperand* value) {
- this->inputs_[0] = value;
- }
-
- LOperand* value() { return this->inputs_[0]; }
- BuiltinFunctionId op() const { return this->hydrogen()->op(); }
-
- void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-
-class LMathAbs V8_FINAL : public LUnaryMathOperation<0> {
- public:
- explicit LMathAbs(LOperand* value) : LUnaryMathOperation<0>(value) {}
-
- DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
-};
-
-
-class LMathAbsTagged: public LTemplateInstruction<1, 2, 3> {
- public:
- LMathAbsTagged(LOperand* context, LOperand* value,
- LOperand* temp1, LOperand* temp2, LOperand* temp3) {
- inputs_[0] = context;
- inputs_[1] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- temps_[2] = temp3;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
- LOperand* temp3() { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathAbsTagged, "math-abs-tagged")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-
-class LMathExp V8_FINAL : public LUnaryMathOperation<4> {
- public:
- LMathExp(LOperand* value,
- LOperand* double_temp1,
- LOperand* temp1,
- LOperand* temp2,
- LOperand* temp3)
- : LUnaryMathOperation<4>(value) {
- temps_[0] = double_temp1;
- temps_[1] = temp1;
- temps_[2] = temp2;
- temps_[3] = temp3;
- ExternalReference::InitializeMathExpData();
- }
-
- LOperand* double_temp1() { return temps_[0]; }
- LOperand* temp1() { return temps_[1]; }
- LOperand* temp2() { return temps_[2]; }
- LOperand* temp3() { return temps_[3]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
-};
-
-
-class LMathFloor V8_FINAL : public LUnaryMathOperation<0> {
- public:
- explicit LMathFloor(LOperand* value) : LUnaryMathOperation<0>(value) { }
- DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
-};
-
-
-class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
- public:
- LMathFloorOfDiv(LOperand* left,
- LOperand* right,
- LOperand* temp = NULL) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-};
-
-
-class LMathLog V8_FINAL : public LUnaryMathOperation<0> {
- public:
- explicit LMathLog(LOperand* value) : LUnaryMathOperation<0>(value) { }
- DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
-};
-
-
-class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LMathMinMax(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
- DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
-};
-
-
-class LMathPowHalf V8_FINAL : public LUnaryMathOperation<0> {
- public:
- explicit LMathPowHalf(LOperand* value) : LUnaryMathOperation<0>(value) { }
- DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
-};
-
-
-class LMathRound V8_FINAL : public LUnaryMathOperation<1> {
- public:
- LMathRound(LOperand* value, LOperand* temp1)
- : LUnaryMathOperation<1>(value) {
- temps_[0] = temp1;
- }
-
- LOperand* temp1() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
-};
-
-
-class LMathSqrt V8_FINAL : public LUnaryMathOperation<0> {
- public:
- explicit LMathSqrt(LOperand* value) : LUnaryMathOperation<0>(value) { }
- DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
-};
-
-
-class LModI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LModI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-};
-
-
-class LMulConstIS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LMulConstIS(LOperand* left, LConstantOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LConstantOperand* right() { return LConstantOperand::cast(inputs_[1]); }
-
- DECLARE_CONCRETE_INSTRUCTION(MulConstIS, "mul-const-i-s")
- DECLARE_HYDROGEN_ACCESSOR(Mul)
-};
-
-
-class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LMulI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
- DECLARE_HYDROGEN_ACCESSOR(Mul)
-};
-
-
-class LMulS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LMulS(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-s")
- DECLARE_HYDROGEN_ACCESSOR(Mul)
-};
-
-
-class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 2> {
- public:
- LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
- public:
- explicit LNumberTagU(LOperand* value,
- LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
-};
-
-
-class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
- public:
- LNumberUntagD(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
- DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
-};
-
-
-class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LPower(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Power, "power")
- DECLARE_HYDROGEN_ACCESSOR(Power)
-};
-
-
-class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LPushArgument(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
-};
-
-
-class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LRegExpLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
- DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
-class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
- public:
- LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
- inputs_[0] = value;
- inputs_[1] = context;
- inputs_[2] = parameter_count;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* parameter_count() { return inputs_[2]; }
-
- bool has_constant_parameter_count() {
- return parameter_count()->IsConstantOperand();
- }
- LConstantOperand* constant_parameter_count() {
- ASSERT(has_constant_parameter_count());
- return LConstantOperand::cast(parameter_count());
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Return, "return")
-};
-
-
-class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 1> {
- public:
- LSeqStringGetChar(LOperand* string,
- LOperand* index,
- LOperand* temp) {
- inputs_[0] = string;
- inputs_[1] = index;
- temps_[0] = temp;
- }
-
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
-};
-
-
-class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 1> {
- public:
- LSeqStringSetChar(LOperand* context,
- LOperand* string,
- LOperand* index,
- LOperand* value,
- LOperand* temp) {
- inputs_[0] = context;
- inputs_[1] = string;
- inputs_[2] = index;
- inputs_[3] = value;
- temps_[0] = temp;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* string() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
- LOperand* value() { return inputs_[3]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-};
-
-
-class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LSmiTag(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
-};
-
-
-class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- LSmiUntag(LOperand* value, bool needs_check)
- : needs_check_(needs_check) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
- bool needs_check() const { return needs_check_; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
-
- private:
- bool needs_check_;
-};
-
-
-class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStackCheck(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
- DECLARE_HYDROGEN_ACCESSOR(StackCheck)
-
- Label* done_label() { return &done_label_; }
-
- private:
- Label done_label_;
-};
-
-
-template<int T>
-class LStoreKeyed : public LTemplateInstruction<0, 3, T> {
- public:
- LStoreKeyed(LOperand* elements, LOperand* key, LOperand* value) {
- this->inputs_[0] = elements;
- this->inputs_[1] = key;
- this->inputs_[2] = value;
- }
-
- bool is_external() const { return this->hydrogen()->is_external(); }
- bool is_fixed_typed_array() const {
- return hydrogen()->is_fixed_typed_array();
- }
- bool is_typed_elements() const {
- return is_external() || is_fixed_typed_array();
- }
- LOperand* elements() { return this->inputs_[0]; }
- LOperand* key() { return this->inputs_[1]; }
- LOperand* value() { return this->inputs_[2]; }
- ElementsKind elements_kind() const {
- return this->hydrogen()->elements_kind();
- }
-
- bool NeedsCanonicalization() {
- return this->hydrogen()->NeedsCanonicalization();
- }
- uint32_t additional_index() const { return this->hydrogen()->index_offset(); }
-
- void PrintDataTo(StringStream* stream) V8_OVERRIDE {
- this->elements()->PrintTo(stream);
- stream->Add("[");
- this->key()->PrintTo(stream);
- if (this->hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", this->additional_index());
- } else {
- stream->Add("] <- ");
- }
-
- if (this->value() == NULL) {
- ASSERT(hydrogen()->IsConstantHoleStore() &&
- hydrogen()->value()->representation().IsDouble());
- stream->Add("<the hole(nan)>");
- } else {
- this->value()->PrintTo(stream);
- }
- }
-
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
-};
-
-
-class LStoreKeyedExternal V8_FINAL : public LStoreKeyed<1> {
- public:
- LStoreKeyedExternal(LOperand* elements, LOperand* key, LOperand* value,
- LOperand* temp) :
- LStoreKeyed<1>(elements, key, value) {
- temps_[0] = temp;
- };
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedExternal, "store-keyed-external")
-};
-
-
-class LStoreKeyedFixed V8_FINAL : public LStoreKeyed<1> {
- public:
- LStoreKeyedFixed(LOperand* elements, LOperand* key, LOperand* value,
- LOperand* temp) :
- LStoreKeyed<1>(elements, key, value) {
- temps_[0] = temp;
- };
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFixed, "store-keyed-fixed")
-};
-
-
-class LStoreKeyedFixedDouble V8_FINAL : public LStoreKeyed<1> {
- public:
- LStoreKeyedFixedDouble(LOperand* elements, LOperand* key, LOperand* value,
- LOperand* temp) :
- LStoreKeyed<1>(elements, key, value) {
- temps_[0] = temp;
- };
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFixedDouble,
- "store-keyed-fixed-double")
-};
-
-
-class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
- public:
- LStoreKeyedGeneric(LOperand* context,
- LOperand* obj,
- LOperand* key,
- LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = obj;
- inputs_[2] = key;
- inputs_[3] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
- LOperand* value() { return inputs_[3]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
-class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 2> {
- public:
- LStoreNamedField(LOperand* object, LOperand* value,
- LOperand* temp0, LOperand* temp1) {
- inputs_[0] = object;
- inputs_[1] = value;
- temps_[0] = temp0;
- temps_[1] = temp1;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp0() { return temps_[0]; }
- LOperand* temp1() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- Handle<Map> transition() const { return hydrogen()->transition_map(); }
- Representation representation() const {
- return hydrogen()->field_representation();
- }
-};
-
-
-class LStoreNamedGeneric V8_FINAL: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
-class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
- public:
- LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
- DECLARE_HYDROGEN_ACCESSOR(StringAdd)
-};
-
-
-
-class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
- public:
- LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
- inputs_[0] = context;
- inputs_[1] = string;
- inputs_[2] = index;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* string() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
- DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-};
-
-
-class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LStringCharFromCode(LOperand* context, LOperand* char_code) {
- inputs_[0] = context;
- inputs_[1] = char_code;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* char_code() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
- DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-};
-
-
-class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
- public:
- LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
- "string-compare-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
-
- Token::Value op() const { return hydrogen()->token(); }
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-};
-
-
-// Truncating conversion from a tagged value to an int32.
-class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
- public:
- explicit LTaggedToI(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
- : op_(op), can_deopt_(can_deopt) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- bool can_deopt() const { return can_deopt_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
-
- private:
- Token::Value op_;
- bool can_deopt_;
-};
-
-
-class LShiftS V8_FINAL : public LTemplateInstruction<1, 2, 1> {
- public:
- LShiftS(Token::Value op, LOperand* left, LOperand* right, LOperand* temp,
- bool can_deopt) : op_(op), can_deopt_(can_deopt) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
- bool can_deopt() const { return can_deopt_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ShiftS, "shift-s")
-
- private:
- Token::Value op_;
- bool can_deopt_;
-};
-
-
-class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 1> {
- public:
- LStoreCodeEntry(LOperand* function, LOperand* code_object,
- LOperand* temp) {
- inputs_[0] = function;
- inputs_[1] = code_object;
- temps_[0] = temp;
- }
-
- LOperand* function() { return inputs_[0]; }
- LOperand* code_object() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
- DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
-};
-
-
-class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
- public:
- LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[0] = context;
- inputs_[1] = value;
- temps_[0] = temp;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-};
-
-
-class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 2> {
- public:
- LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-};
-
-
-class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LSubI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-
-class LSubS: public LTemplateInstruction<1, 2, 0> {
- public:
- LSubS(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SubS, "sub-s")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-
-class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
- DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
-};
-
-
-class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LToFastProperties(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
- DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
-class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
- public:
- LTransitionElementsKind(LOperand* object,
- LOperand* context,
- LOperand* temp1,
- LOperand* temp2 = NULL) {
- inputs_[0] = object;
- inputs_[1] = context;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* context() { return inputs_[1]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
- "transition-elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
- Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
- Handle<Map> transitioned_map() {
- return hydrogen()->transitioned_map().handle();
- }
- ElementsKind from_kind() const { return hydrogen()->from_kind(); }
- ElementsKind to_kind() const { return hydrogen()->to_kind(); }
-};
-
-
-class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 2> {
- public:
- LTrapAllocationMemento(LOperand* object, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = object;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, "trap-allocation-memento")
-};
-
-
-class LTruncateDoubleToIntOrSmi V8_FINAL
- : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LTruncateDoubleToIntOrSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TruncateDoubleToIntOrSmi,
- "truncate-double-to-int-or-smi")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool tag_result() { return hydrogen()->representation().IsSmi(); }
-};
-
-
-class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LTypeof(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
-};
-
-
-class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 2> {
- public:
- LTypeofIsAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
-
- Handle<String> type_literal() const { return hydrogen()->type_literal(); }
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-};
-
-
-class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LUint32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
-};
-
-
-class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LUint32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 1> {
- public:
- LCheckMapValue(LOperand* value, LOperand* map, LOperand* temp) {
- inputs_[0] = value;
- inputs_[1] = map;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* map() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
-};
-
-
-class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadFieldByIndex(LOperand* object, LOperand* index) {
- inputs_[0] = object;
- inputs_[1] = index;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
-};
-
-
-class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
- public:
- LWrapReceiver(LOperand* receiver, LOperand* function) {
- inputs_[0] = receiver;
- inputs_[1] = function;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
- DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
-
- LOperand* receiver() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-};
-
-
-class LChunkBuilder;
-class LPlatformChunk V8_FINAL : public LChunk {
- public:
- LPlatformChunk(CompilationInfo* info, HGraph* graph)
- : LChunk(info, graph) { }
-
- int GetNextSpillIndex();
- LOperand* GetNextSpillSlot(RegisterKind kind);
-};
-
-
-class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
- public:
- LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : LChunkBuilderBase(graph->zone()),
- chunk_(NULL),
- info_(info),
- graph_(graph),
- status_(UNUSED),
- current_instruction_(NULL),
- current_block_(NULL),
- allocator_(allocator),
- instruction_pending_deoptimization_environment_(NULL),
- pending_deoptimization_ast_id_(BailoutId::None()) { }
-
- // Build the sequence for the graph.
- LPlatformChunk* Build();
-
- LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- static bool HasMagicNumberForDivision(int32_t divisor);
-
- private:
- enum Status {
- UNUSED,
- BUILDING,
- DONE,
- ABORTED
- };
-
- HGraph* graph() const { return graph_; }
- Isolate* isolate() const { return info_->isolate(); }
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_building() const { return status_ == BUILDING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- int argument_count() const { return argument_count_; }
- CompilationInfo* info() const { return info_; }
- Heap* heap() const { return isolate()->heap(); }
-
- void Abort(BailoutReason reason);
-
- // Methods for getting operands for Use / Define / Temp.
- LUnallocated* ToUnallocated(Register reg);
- LUnallocated* ToUnallocated(DoubleRegister reg);
-
- // Methods for setting up define-use relationships.
- MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
- MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
- MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
- DoubleRegister fixed_register);
-
- // A value that is guaranteed to be allocated to a register.
- // The operand created by UseRegister is guaranteed to be live until the end
- // of the instruction. This means that register allocator will not reuse its
- // register for any other operand inside instruction.
- MUST_USE_RESULT LOperand* UseRegister(HValue* value);
-
- // The operand created by UseRegisterAndClobber is guaranteed to be live until
- // the end of the end of the instruction, and it may also be used as a scratch
- // register by the instruction implementation.
- //
- // This behaves identically to ARM's UseTempRegister. However, it is renamed
- // to discourage its use in A64, since in most cases it is better to allocate
- // a temporary register for the Lithium instruction.
- MUST_USE_RESULT LOperand* UseRegisterAndClobber(HValue* value);
-
- // The operand created by UseRegisterAtStart is guaranteed to be live only at
- // instruction start. The register allocator is free to assign the same
- // register to some other operand used inside instruction (i.e. temporary or
- // output).
- MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
-
- // An input operand in a register or a constant operand.
- MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
-
- // A constant operand.
- MUST_USE_RESULT LConstantOperand* UseConstant(HValue* value);
-
- // An input operand in register, stack slot or a constant operand.
- // Will not be moved to a register even if one is freely available.
- virtual MUST_USE_RESULT LOperand* UseAny(HValue* value);
-
- // Temporary operand that must be in a register.
- MUST_USE_RESULT LUnallocated* TempRegister();
-
- // Temporary operand that must be in a fixed double register.
- MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
-
- // Methods for setting up define-use relationships.
- // Return the same instruction that they are passed.
- LInstruction* Define(LTemplateResultInstruction<1>* instr,
- LUnallocated* result);
- LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
- LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
- int index);
-
- LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
- LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
- Register reg);
- LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
- DoubleRegister reg);
-
- enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
-
- // By default we assume that instruction sequences generated for calls
- // cannot deoptimize eagerly and we do not attach environment to this
- // instruction.
- LInstruction* MarkAsCall(
- LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
-
- LInstruction* AssignPointerMap(LInstruction* instr);
- LInstruction* AssignEnvironment(LInstruction* instr);
-
- void VisitInstruction(HInstruction* current);
- void DoBasicBlock(HBasicBlock* block);
-
- LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
- LInstruction* DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr);
- LInstruction* DoArithmeticT(Token::Value op,
- HBinaryOperation* instr);
-
- LPlatformChunk* chunk_;
- CompilationInfo* info_;
- HGraph* const graph_;
- Status status_;
- HInstruction* current_instruction_;
- HBasicBlock* current_block_;
- LAllocator* allocator_;
- LInstruction* instruction_pending_deoptimization_environment_;
- BailoutId pending_deoptimization_ast_id_;
-
- DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
-};
-
-#undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_CONCRETE_INSTRUCTION
-
-} } // namespace v8::internal
-
-#endif // V8_A64_LITHIUM_A64_H_
diff --git a/deps/v8/src/a64/lithium-codegen-a64.cc b/deps/v8/src/a64/lithium-codegen-a64.cc
deleted file mode 100644
index b4b875fb04..0000000000
--- a/deps/v8/src/a64/lithium-codegen-a64.cc
+++ /dev/null
@@ -1,5692 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "a64/lithium-codegen-a64.h"
-#include "a64/lithium-gap-resolver-a64.h"
-#include "code-stubs.h"
-#include "stub-cache.h"
-#include "hydrogen-osr.h"
-
-namespace v8 {
-namespace internal {
-
-
-class SafepointGenerator V8_FINAL : public CallWrapper {
- public:
- SafepointGenerator(LCodeGen* codegen,
- LPointerMap* pointers,
- Safepoint::DeoptMode mode)
- : codegen_(codegen),
- pointers_(pointers),
- deopt_mode_(mode) { }
- virtual ~SafepointGenerator() { }
-
- virtual void BeforeCall(int call_size) const { }
-
- virtual void AfterCall() const {
- codegen_->RecordSafepoint(pointers_, deopt_mode_);
- }
-
- private:
- LCodeGen* codegen_;
- LPointerMap* pointers_;
- Safepoint::DeoptMode deopt_mode_;
-};
-
-
-#define __ masm()->
-
-// Emit code to branch if the given condition holds.
-// The code generated here doesn't modify the flags and they must have
-// been set by some prior instructions.
-//
-// The EmitInverted function simply inverts the condition.
-class BranchOnCondition : public BranchGenerator {
- public:
- BranchOnCondition(LCodeGen* codegen, Condition cond)
- : BranchGenerator(codegen),
- cond_(cond) { }
-
- virtual void Emit(Label* label) const {
- __ B(cond_, label);
- }
-
- virtual void EmitInverted(Label* label) const {
- if (cond_ != al) {
- __ B(InvertCondition(cond_), label);
- }
- }
-
- private:
- Condition cond_;
-};
-
-
-// Emit code to compare lhs and rhs and branch if the condition holds.
-// This uses MacroAssembler's CompareAndBranch function so it will handle
-// converting the comparison to Cbz/Cbnz if the right-hand side is 0.
-//
-// EmitInverted still compares the two operands but inverts the condition.
-class CompareAndBranch : public BranchGenerator {
- public:
- CompareAndBranch(LCodeGen* codegen,
- Condition cond,
- const Register& lhs,
- const Operand& rhs)
- : BranchGenerator(codegen),
- cond_(cond),
- lhs_(lhs),
- rhs_(rhs) { }
-
- virtual void Emit(Label* label) const {
- __ CompareAndBranch(lhs_, rhs_, cond_, label);
- }
-
- virtual void EmitInverted(Label* label) const {
- __ CompareAndBranch(lhs_, rhs_, InvertCondition(cond_), label);
- }
-
- private:
- Condition cond_;
- const Register& lhs_;
- const Operand& rhs_;
-};
-
-
-// Test the input with the given mask and branch if the condition holds.
-// If the condition is 'eq' or 'ne' this will use MacroAssembler's
-// TestAndBranchIfAllClear and TestAndBranchIfAnySet so it will handle the
-// conversion to Tbz/Tbnz when possible.
-class TestAndBranch : public BranchGenerator {
- public:
- TestAndBranch(LCodeGen* codegen,
- Condition cond,
- const Register& value,
- uint64_t mask)
- : BranchGenerator(codegen),
- cond_(cond),
- value_(value),
- mask_(mask) { }
-
- virtual void Emit(Label* label) const {
- switch (cond_) {
- case eq:
- __ TestAndBranchIfAllClear(value_, mask_, label);
- break;
- case ne:
- __ TestAndBranchIfAnySet(value_, mask_, label);
- break;
- default:
- __ Tst(value_, mask_);
- __ B(cond_, label);
- }
- }
-
- virtual void EmitInverted(Label* label) const {
- // The inverse of "all clear" is "any set" and vice versa.
- switch (cond_) {
- case eq:
- __ TestAndBranchIfAnySet(value_, mask_, label);
- break;
- case ne:
- __ TestAndBranchIfAllClear(value_, mask_, label);
- break;
- default:
- __ Tst(value_, mask_);
- __ B(InvertCondition(cond_), label);
- }
- }
-
- private:
- Condition cond_;
- const Register& value_;
- uint64_t mask_;
-};
-
-
-// Test the input and branch if it is non-zero and not a NaN.
-class BranchIfNonZeroNumber : public BranchGenerator {
- public:
- BranchIfNonZeroNumber(LCodeGen* codegen, const FPRegister& value,
- const FPRegister& scratch)
- : BranchGenerator(codegen), value_(value), scratch_(scratch) { }
-
- virtual void Emit(Label* label) const {
- __ Fabs(scratch_, value_);
- // Compare with 0.0. Because scratch_ is positive, the result can be one of
- // nZCv (equal), nzCv (greater) or nzCV (unordered).
- __ Fcmp(scratch_, 0.0);
- __ B(gt, label);
- }
-
- virtual void EmitInverted(Label* label) const {
- __ Fabs(scratch_, value_);
- __ Fcmp(scratch_, 0.0);
- __ B(le, label);
- }
-
- private:
- const FPRegister& value_;
- const FPRegister& scratch_;
-};
-
-
-// Test the input and branch if it is a heap number.
-class BranchIfHeapNumber : public BranchGenerator {
- public:
- BranchIfHeapNumber(LCodeGen* codegen, const Register& value)
- : BranchGenerator(codegen), value_(value) { }
-
- virtual void Emit(Label* label) const {
- __ JumpIfHeapNumber(value_, label);
- }
-
- virtual void EmitInverted(Label* label) const {
- __ JumpIfNotHeapNumber(value_, label);
- }
-
- private:
- const Register& value_;
-};
-
-
-// Test the input and branch if it is the specified root value.
-class BranchIfRoot : public BranchGenerator {
- public:
- BranchIfRoot(LCodeGen* codegen, const Register& value,
- Heap::RootListIndex index)
- : BranchGenerator(codegen), value_(value), index_(index) { }
-
- virtual void Emit(Label* label) const {
- __ JumpIfRoot(value_, index_, label);
- }
-
- virtual void EmitInverted(Label* label) const {
- __ JumpIfNotRoot(value_, index_, label);
- }
-
- private:
- const Register& value_;
- const Heap::RootListIndex index_;
-};
-
-
-void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation) {
- if (environment == NULL) return;
-
- // The translation includes one command per value in the environment.
- int translation_size = environment->translation_size();
- // The output frame height does not include the parameters.
- int height = translation_size - environment->parameter_count();
-
- WriteTranslation(environment->outer(), translation);
- bool has_closure_id = !info()->closure().is_null() &&
- !info()->closure().is_identical_to(environment->closure());
- int closure_id = has_closure_id
- ? DefineDeoptimizationLiteral(environment->closure())
- : Translation::kSelfLiteralId;
-
- switch (environment->frame_type()) {
- case JS_FUNCTION:
- translation->BeginJSFrame(environment->ast_id(), closure_id, height);
- break;
- case JS_CONSTRUCT:
- translation->BeginConstructStubFrame(closure_id, translation_size);
- break;
- case JS_GETTER:
- ASSERT(translation_size == 1);
- ASSERT(height == 0);
- translation->BeginGetterStubFrame(closure_id);
- break;
- case JS_SETTER:
- ASSERT(translation_size == 2);
- ASSERT(height == 0);
- translation->BeginSetterStubFrame(closure_id);
- break;
- case STUB:
- translation->BeginCompiledStubFrame();
- break;
- case ARGUMENTS_ADAPTOR:
- translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
- break;
- default:
- UNREACHABLE();
- }
-
- int object_index = 0;
- int dematerialized_index = 0;
- for (int i = 0; i < translation_size; ++i) {
- LOperand* value = environment->values()->at(i);
-
- AddToTranslation(environment,
- translation,
- value,
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- &object_index,
- &dematerialized_index);
- }
-}
-
-
-void LCodeGen::AddToTranslation(LEnvironment* environment,
- Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- int* object_index_pointer,
- int* dematerialized_index_pointer) {
- if (op == LEnvironment::materialization_marker()) {
- int object_index = (*object_index_pointer)++;
- if (environment->ObjectIsDuplicateAt(object_index)) {
- int dupe_of = environment->ObjectDuplicateOfAt(object_index);
- translation->DuplicateObject(dupe_of);
- return;
- }
- int object_length = environment->ObjectLengthAt(object_index);
- if (environment->ObjectIsArgumentsAt(object_index)) {
- translation->BeginArgumentsObject(object_length);
- } else {
- translation->BeginCapturedObject(object_length);
- }
- int dematerialized_index = *dematerialized_index_pointer;
- int env_offset = environment->translation_size() + dematerialized_index;
- *dematerialized_index_pointer += object_length;
- for (int i = 0; i < object_length; ++i) {
- LOperand* value = environment->values()->at(env_offset + i);
- AddToTranslation(environment,
- translation,
- value,
- environment->HasTaggedValueAt(env_offset + i),
- environment->HasUint32ValueAt(env_offset + i),
- object_index_pointer,
- dematerialized_index_pointer);
- }
- return;
- }
-
- if (op->IsStackSlot()) {
- if (is_tagged) {
- translation->StoreStackSlot(op->index());
- } else if (is_uint32) {
- translation->StoreUint32StackSlot(op->index());
- } else {
- translation->StoreInt32StackSlot(op->index());
- }
- } else if (op->IsDoubleStackSlot()) {
- translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsArgument()) {
- ASSERT(is_tagged);
- int src_index = GetStackSlotCount() + op->index();
- translation->StoreStackSlot(src_index);
- } else if (op->IsRegister()) {
- Register reg = ToRegister(op);
- if (is_tagged) {
- translation->StoreRegister(reg);
- } else if (is_uint32) {
- translation->StoreUint32Register(reg);
- } else {
- translation->StoreInt32Register(reg);
- }
- } else if (op->IsDoubleRegister()) {
- DoubleRegister reg = ToDoubleRegister(op);
- translation->StoreDoubleRegister(reg);
- } else if (op->IsConstantOperand()) {
- HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
- translation->StoreLiteral(src_index);
- } else {
- UNREACHABLE();
- }
-}
-
-
-int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
- int result = deoptimization_literals_.length();
- for (int i = 0; i < deoptimization_literals_.length(); ++i) {
- if (deoptimization_literals_[i].is_identical_to(literal)) return i;
- }
- deoptimization_literals_.Add(literal, zone());
- return result;
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode) {
- if (!environment->HasBeenRegistered()) {
- int frame_count = 0;
- int jsframe_count = 0;
- for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
- ++frame_count;
- if (e->frame_type() == JS_FUNCTION) {
- ++jsframe_count;
- }
- }
- Translation translation(&translations_, frame_count, jsframe_count, zone());
- WriteTranslation(environment, &translation);
- int deoptimization_index = deoptimizations_.length();
- int pc_offset = masm()->pc_offset();
- environment->Register(deoptimization_index,
- translation.index(),
- (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
- deoptimizations_.Add(environment, zone());
- }
-}
-
-
-void LCodeGen::CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr) {
- CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode) {
- ASSERT(instr != NULL);
-
- Assembler::BlockConstPoolScope scope(masm_);
- __ Call(code, mode);
- RecordSafepointWithLazyDeopt(instr, safepoint_mode);
-
- if ((code->kind() == Code::BINARY_OP_IC) ||
- (code->kind() == Code::COMPARE_IC)) {
- // Signal that we don't inline smi code before these stubs in the
- // optimizing code generator.
- InlineSmiCheckInfo::EmitNotInlined(masm());
- }
-}
-
-
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->function()).Is(x1));
- ASSERT(ToRegister(instr->result()).Is(x0));
-
- int arity = instr->arity();
- CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoCallNew(LCallNew* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(instr->IsMarkedAsCall());
- ASSERT(ToRegister(instr->constructor()).is(x1));
-
- __ Mov(x0, instr->arity());
- // No cell in x2 for construct type feedback in optimized code.
- Handle<Object> undefined_value(isolate()->factory()->undefined_value());
- __ Mov(x2, Operand(undefined_value));
-
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
-
- ASSERT(ToRegister(instr->result()).is(x0));
-}
-
-
-void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
- ASSERT(instr->IsMarkedAsCall());
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->constructor()).is(x1));
-
- __ Mov(x0, Operand(instr->arity()));
- __ Mov(x2, Operand(factory()->undefined_value()));
-
- ElementsKind kind = instr->hydrogen()->elements_kind();
- AllocationSiteOverrideMode override_mode =
- (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
- ? DISABLE_ALLOCATION_SITES
- : DONT_OVERRIDE;
-
- if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(kind, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
- } else if (instr->arity() == 1) {
- Label done;
- if (IsFastPackedElementsKind(kind)) {
- Label packed_case;
-
- // We might need to create a holey array; look at the first argument.
- __ Peek(x10, 0);
- __ Cbz(x10, &packed_case);
-
- ElementsKind holey_kind = GetHoleyElementsKind(kind);
- ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
- __ B(&done);
- __ Bind(&packed_case);
- }
-
- ArraySingleArgumentConstructorStub stub(kind, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
- __ Bind(&done);
- } else {
- ArrayNArgumentsConstructorStub stub(kind, override_mode);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
- }
-
- ASSERT(ToRegister(instr->result()).is(x0));
-}
-
-
-void LCodeGen::CallRuntime(const Runtime::Function* function,
- int num_arguments,
- LInstruction* instr,
- SaveFPRegsMode save_doubles) {
- ASSERT(instr != NULL);
-
- __ CallRuntime(function, num_arguments, save_doubles);
-
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::LoadContextFromDeferred(LOperand* context) {
- if (context->IsRegister()) {
- __ Mov(cp, ToRegister(context));
- } else if (context->IsStackSlot()) {
- __ Ldr(cp, ToMemOperand(context));
- } else if (context->IsConstantOperand()) {
- HConstant* constant =
- chunk_->LookupConstant(LConstantOperand::cast(context));
- __ LoadHeapObject(cp,
- Handle<HeapObject>::cast(constant->handle(isolate())));
- } else {
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr,
- LOperand* context) {
- LoadContextFromDeferred(context);
- __ CallRuntimeSaveDoubles(id);
- RecordSafepointWithRegisters(
- instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
-}
-
-
-void LCodeGen::RecordAndWritePosition(int position) {
- if (position == RelocInfo::kNoPosition) return;
- masm()->positions_recorder()->RecordPosition(position);
- masm()->positions_recorder()->WriteRecordedPositions();
-}
-
-
-void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
- SafepointMode safepoint_mode) {
- if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
- RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
- } else {
- ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kLazyDeopt);
- }
-}
-
-
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- ASSERT(expected_safepoint_kind_ == kind);
-
- const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
- Safepoint safepoint = safepoints_.DefineSafepoint(
- masm(), kind, arguments, deopt_mode);
-
- for (int i = 0; i < operands->length(); i++) {
- LOperand* pointer = operands->at(i);
- if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index(), zone());
- } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer), zone());
- }
- }
-
- if (kind & Safepoint::kWithRegisters) {
- // Register cp always contains a pointer to the context.
- safepoint.DefinePointerRegister(cp, zone());
- }
-}
-
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(zone());
- RecordSafepoint(&empty_pointers, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepointWithRegistersAndDoubles(
- LPointerMap* pointers, int arguments, Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(
- pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
-}
-
-
-bool LCodeGen::GenerateCode() {
- LPhase phase("Z_Code generation", chunk());
- ASSERT(is_unused());
- status_ = GENERATING;
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // NONE indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done in GeneratePrologue).
- FrameScope frame_scope(masm_, StackFrame::NONE);
-
- return GeneratePrologue() &&
- GenerateBody() &&
- GenerateDeferredCode() &&
- GenerateDeoptJumpTable() &&
- GenerateSafepointTable();
-}
-
-
-void LCodeGen::SaveCallerDoubles() {
- ASSERT(info()->saves_caller_doubles());
- ASSERT(NeedsEagerFrame());
- Comment(";;; Save clobbered callee double registers");
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator iterator(doubles);
- int count = 0;
- while (!iterator.Done()) {
- // TODO(all): Is this supposed to save just the callee-saved doubles? It
- // looks like it's saving all of them.
- FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
- __ Poke(value, count * kDoubleSize);
- iterator.Advance();
- count++;
- }
-}
-
-
-void LCodeGen::RestoreCallerDoubles() {
- ASSERT(info()->saves_caller_doubles());
- ASSERT(NeedsEagerFrame());
- Comment(";;; Restore clobbered callee double registers");
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator iterator(doubles);
- int count = 0;
- while (!iterator.Done()) {
- // TODO(all): Is this supposed to restore just the callee-saved doubles? It
- // looks like it's restoring all of them.
- FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
- __ Peek(value, count * kDoubleSize);
- iterator.Advance();
- count++;
- }
-}
-
-
-bool LCodeGen::GeneratePrologue() {
- ASSERT(is_generating());
-
- if (info()->IsOptimizing()) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
- // TODO(all): Add support for stop_t FLAG in DEBUG mode.
-
- // Classic mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info_->this_has_uses() &&
- info_->is_classic_mode() &&
- !info_->is_native()) {
- Label ok;
- int receiver_offset = info_->scope()->num_parameters() * kXRegSizeInBytes;
- __ Peek(x10, receiver_offset);
- __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
-
- __ Ldr(x10, GlobalObjectMemOperand());
- __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
- __ Poke(x10, receiver_offset);
-
- __ Bind(&ok);
- }
- }
-
- ASSERT(__ StackPointer().Is(jssp));
- info()->set_prologue_offset(masm_->pc_offset());
- if (NeedsEagerFrame()) {
- __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
- frame_is_built_ = true;
- info_->AddNoFrameRange(0, masm_->pc_offset());
- }
-
- // Reserve space for the stack slots needed by the code.
- int slots = GetStackSlotCount();
- if (slots > 0) {
- __ Claim(slots, kPointerSize);
- }
-
- if (info()->saves_caller_doubles()) {
- SaveCallerDoubles();
- }
-
- // Allocate a local context if needed.
- int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- Comment(";;; Allocate local context");
- // Argument to NewContext is the function, which is in x1.
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- __ CallStub(&stub);
- } else {
- __ Push(x1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
- }
- RecordSafepoint(Safepoint::kNoLazyDeopt);
- // Context is returned in x0. It replaces the context passed to us. It's
- // saved in the stack and kept live in cp.
- __ Mov(cp, x0);
- __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
- if (var->IsContextSlot()) {
- Register value = x0;
- Register scratch = x3;
-
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ Ldr(value, MemOperand(fp, parameter_offset));
- // Store it in the context.
- MemOperand target = ContextMemOperand(cp, var->index());
- __ Str(value, target);
- // Update the write barrier. This clobbers value and scratch.
- __ RecordWriteContextSlot(cp, target.offset(), value, scratch,
- GetLinkRegisterState(), kSaveFPRegs);
- }
- }
- Comment(";;; End allocate local context");
- }
-
- // Trace the call.
- if (FLAG_trace && info()->IsOptimizing()) {
- // We have not executed any compiled code yet, so cp still holds the
- // incoming context.
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
-
- return !is_aborted();
-}
-
-
-void LCodeGen::GenerateOsrPrologue() {
- // Generate the OSR entry prologue at the first unknown OSR value, or if there
- // are none, at the OSR entrypoint instruction.
- if (osr_pc_offset_ >= 0) return;
-
- osr_pc_offset_ = masm()->pc_offset();
-
- // Adjust the frame size, subsuming the unoptimized frame into the
- // optimized frame.
- int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
- ASSERT(slots >= 0);
- __ Claim(slots);
-}
-
-
-bool LCodeGen::GenerateDeferredCode() {
- ASSERT(is_generating());
- if (deferred_.length() > 0) {
- for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) {
- LDeferredCode* code = deferred_[i];
-
- HValue* value =
- instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(
- chunk()->graph()->SourcePositionToScriptPosition(value->position()));
-
- Comment(";;; <@%d,#%d> "
- "-------------------- Deferred %s --------------------",
- code->instruction_index(),
- code->instr()->hydrogen_value()->id(),
- code->instr()->Mnemonic());
-
- __ Bind(code->entry());
-
- if (NeedsDeferredFrame()) {
- Comment(";;; Build frame");
- ASSERT(!frame_is_built_);
- ASSERT(info()->IsStub());
- frame_is_built_ = true;
- __ Push(lr, fp, cp);
- __ Mov(fp, Operand(Smi::FromInt(StackFrame::STUB)));
- __ Push(fp);
- __ Add(fp, __ StackPointer(),
- StandardFrameConstants::kFixedFrameSizeFromFp);
- Comment(";;; Deferred code");
- }
-
- code->Generate();
-
- if (NeedsDeferredFrame()) {
- Comment(";;; Destroy frame");
- ASSERT(frame_is_built_);
- __ Pop(xzr, cp, fp, lr);
- frame_is_built_ = false;
- }
-
- __ B(code->exit());
- }
- }
-
- // Force constant pool emission at the end of the deferred code to make
- // sure that no constant pools are emitted after deferred code because
- // deferred code generation is the last step which generates code. The two
- // following steps will only output data used by crakshaft.
- masm()->CheckConstPool(true, false);
-
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateDeoptJumpTable() {
- if (deopt_jump_table_.length() > 0) {
- Comment(";;; -------------------- Jump table --------------------");
- }
- Label table_start;
- __ bind(&table_start);
- Label needs_frame;
- for (int i = 0; i < deopt_jump_table_.length(); i++) {
- __ Bind(&deopt_jump_table_[i].label);
- Address entry = deopt_jump_table_[i].address;
- Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
- int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
- if (id == Deoptimizer::kNotDeoptimizationEntry) {
- Comment(";;; jump table entry %d.", i);
- } else {
- Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
- }
- if (deopt_jump_table_[i].needs_frame) {
- ASSERT(!info()->saves_caller_doubles());
- __ Mov(__ Tmp0(), Operand(ExternalReference::ForDeoptEntry(entry)));
- if (needs_frame.is_bound()) {
- __ B(&needs_frame);
- } else {
- __ Bind(&needs_frame);
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- // TODO(jochen): Revisit the use of TmpX().
- ASSERT(info()->IsStub());
- __ Mov(__ Tmp1(), Operand(Smi::FromInt(StackFrame::STUB)));
- __ Push(lr, fp, cp, __ Tmp1());
- __ Add(fp, __ StackPointer(), 2 * kPointerSize);
- __ Call(__ Tmp0());
- }
- } else {
- if (info()->saves_caller_doubles()) {
- ASSERT(info()->IsStub());
- RestoreCallerDoubles();
- }
- __ Call(entry, RelocInfo::RUNTIME_ENTRY);
- }
- masm()->CheckConstPool(false, false);
- }
-
- // Force constant pool emission at the end of the deopt jump table to make
- // sure that no constant pools are emitted after.
- masm()->CheckConstPool(true, false);
-
- // The deoptimization jump table is the last part of the instruction
- // sequence. Mark the generated code as done unless we bailed out.
- if (!is_aborted()) status_ = DONE;
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateSafepointTable() {
- ASSERT(is_done());
- safepoints_.Emit(masm(), GetStackSlotCount());
- return !is_aborted();
-}
-
-
-void LCodeGen::FinishCode(Handle<Code> code) {
- ASSERT(is_done());
- code->set_stack_slots(GetStackSlotCount());
- code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- RegisterDependentCodeForEmbeddedMaps(code);
- PopulateDeoptimizationData(code);
- info()->CommitDependencies(code);
-}
-
-
-void LCodeGen::Abort(BailoutReason reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
-}
-
-
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
-
- Handle<DeoptimizationInputData> data =
- factory()->NewDeoptimizationInputData(length, TENURED);
-
- Handle<ByteArray> translations =
- translations_.CreateByteArray(isolate()->factory());
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
- data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- { AllowDeferredHandleDereference copy_handles;
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
- }
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, env->ast_id());
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- data->SetPc(i, Smi::FromInt(env->pc_offset()));
- }
-
- code->set_deoptimization_data(*data);
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- ASSERT(deoptimization_literals_.length() == 0);
-
- const ZoneList<Handle<JSFunction> >* inlined_closures =
- chunk()->inlined_closures();
-
- for (int i = 0, length = inlined_closures->length(); i < length; i++) {
- DefineDeoptimizationLiteral(inlined_closures->at(i));
- }
-
- inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
-Deoptimizer::BailoutType LCodeGen::DeoptimizeHeader(
- LEnvironment* environment,
- Deoptimizer::BailoutType* override_bailout_type) {
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- ASSERT(environment->HasBeenRegistered());
- ASSERT(info()->IsOptimizing() || info()->IsStub());
- int id = environment->deoptimization_index();
- Deoptimizer::BailoutType bailout_type =
- info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
- if (override_bailout_type) bailout_type = *override_bailout_type;
- Address entry =
- Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
-
- if (entry == NULL) {
- Abort(kBailoutWasNotPrepared);
- return bailout_type;
- }
-
- if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
- Label not_zero;
- ExternalReference count = ExternalReference::stress_deopt_count(isolate());
-
- __ Push(x0, x1, x2);
- __ Mrs(x2, NZCV);
- __ Mov(x0, Operand(count));
- __ Ldr(w1, MemOperand(x0));
- __ Subs(x1, x1, 1);
- __ B(gt, &not_zero);
- __ Mov(w1, FLAG_deopt_every_n_times);
- __ Str(w1, MemOperand(x0));
- __ Pop(x0, x1, x2);
- ASSERT(frame_is_built_);
- __ Call(entry, RelocInfo::RUNTIME_ENTRY);
- __ Unreachable();
-
- __ Bind(&not_zero);
- __ Str(w1, MemOperand(x0));
- __ Msr(NZCV, x2);
- __ Pop(x0, x1, x2);
- }
-
- return bailout_type;
-}
-
-
-void LCodeGen::Deoptimize(LEnvironment* environment,
- Deoptimizer::BailoutType bailout_type) {
- ASSERT(environment->HasBeenRegistered());
- ASSERT(info()->IsOptimizing() || info()->IsStub());
- int id = environment->deoptimization_index();
- Address entry =
- Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
-
- if (info()->ShouldTrapOnDeopt()) {
- __ Debug("trap_on_deopt", __LINE__, BREAK);
- }
-
- ASSERT(info()->IsStub() || frame_is_built_);
- // Go through jump table if we need to build frame, or restore caller doubles.
- if (frame_is_built_ && !info()->saves_caller_doubles()) {
- __ Call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- // We often have several deopts to the same entry, reuse the last
- // jump entry if this is the case.
- if (deopt_jump_table_.is_empty() ||
- (deopt_jump_table_.last().address != entry) ||
- (deopt_jump_table_.last().bailout_type != bailout_type) ||
- (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
- Deoptimizer::JumpTableEntry table_entry(entry,
- bailout_type,
- !frame_is_built_);
- deopt_jump_table_.Add(table_entry, zone());
- }
- __ B(&deopt_jump_table_.last().label);
- }
-}
-
-
-void LCodeGen::Deoptimize(LEnvironment* environment) {
- Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL);
- Deoptimize(environment, bailout_type);
-}
-
-
-void LCodeGen::DeoptimizeIf(Condition cond, LEnvironment* environment) {
- Label dont_deopt;
- Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL);
- __ B(InvertCondition(cond), &dont_deopt);
- Deoptimize(environment, bailout_type);
- __ Bind(&dont_deopt);
-}
-
-
-void LCodeGen::DeoptimizeIfZero(Register rt, LEnvironment* environment) {
- Label dont_deopt;
- Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL);
- __ Cbnz(rt, &dont_deopt);
- Deoptimize(environment, bailout_type);
- __ Bind(&dont_deopt);
-}
-
-
-void LCodeGen::DeoptimizeIfNegative(Register rt, LEnvironment* environment) {
- Label dont_deopt;
- Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL);
- __ Tbz(rt, rt.Is64Bits() ? kXSignBit : kWSignBit, &dont_deopt);
- Deoptimize(environment, bailout_type);
- __ Bind(&dont_deopt);
-}
-
-
-void LCodeGen::DeoptimizeIfSmi(Register rt,
- LEnvironment* environment) {
- Label dont_deopt;
- Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL);
- __ JumpIfNotSmi(rt, &dont_deopt);
- Deoptimize(environment, bailout_type);
- __ Bind(&dont_deopt);
-}
-
-
-void LCodeGen::DeoptimizeIfNotSmi(Register rt, LEnvironment* environment) {
- Label dont_deopt;
- Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL);
- __ JumpIfSmi(rt, &dont_deopt);
- Deoptimize(environment, bailout_type);
- __ Bind(&dont_deopt);
-}
-
-
-void LCodeGen::DeoptimizeIfRoot(Register rt,
- Heap::RootListIndex index,
- LEnvironment* environment) {
- Label dont_deopt;
- Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL);
- __ JumpIfNotRoot(rt, index, &dont_deopt);
- Deoptimize(environment, bailout_type);
- __ Bind(&dont_deopt);
-}
-
-
-void LCodeGen::DeoptimizeIfNotRoot(Register rt,
- Heap::RootListIndex index,
- LEnvironment* environment) {
- Label dont_deopt;
- Deoptimizer::BailoutType bailout_type = DeoptimizeHeader(environment, NULL);
- __ JumpIfRoot(rt, index, &dont_deopt);
- Deoptimize(environment, bailout_type);
- __ Bind(&dont_deopt);
-}
-
-
-void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (!info()->IsStub()) {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- intptr_t current_pc = masm()->pc_offset();
-
- if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
- ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- ASSERT((padding_size % kInstructionSize) == 0);
- InstructionAccurateScope instruction_accurate(
- masm(), padding_size / kInstructionSize);
-
- while (padding_size > 0) {
- __ nop();
- padding_size -= kInstructionSize;
- }
- }
- }
- last_lazy_deopt_pc_ = masm()->pc_offset();
-}
-
-
-Register LCodeGen::ToRegister(LOperand* op) const {
- // TODO(all): support zero register results, as ToRegister32.
- ASSERT((op != NULL) && op->IsRegister());
- return Register::FromAllocationIndex(op->index());
-}
-
-
-Register LCodeGen::ToRegister32(LOperand* op) const {
- ASSERT(op != NULL);
- if (op->IsConstantOperand()) {
- // If this is a constant operand, the result must be the zero register.
- ASSERT(ToInteger32(LConstantOperand::cast(op)) == 0);
- return wzr;
- } else {
- return ToRegister(op).W();
- }
-}
-
-
-Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- return Smi::FromInt(constant->Integer32Value());
-}
-
-
-DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
- ASSERT((op != NULL) && op->IsDoubleRegister());
- return DoubleRegister::FromAllocationIndex(op->index());
-}
-
-
-Operand LCodeGen::ToOperand(LOperand* op) {
- ASSERT(op != NULL);
- if (op->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- HConstant* constant = chunk()->LookupConstant(const_op);
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsSmi()) {
- ASSERT(constant->HasSmiValue());
- return Operand(Smi::FromInt(constant->Integer32Value()));
- } else if (r.IsInteger32()) {
- ASSERT(constant->HasInteger32Value());
- return Operand(constant->Integer32Value());
- } else if (r.IsDouble()) {
- Abort(kToOperandUnsupportedDoubleImmediate);
- }
- ASSERT(r.IsTagged());
- return Operand(constant->handle(isolate()));
- } else if (op->IsRegister()) {
- return Operand(ToRegister(op));
- } else if (op->IsDoubleRegister()) {
- Abort(kToOperandIsDoubleRegisterUnimplemented);
- return Operand(0);
- }
- // Stack slots not implemented, use ToMemOperand instead.
- UNREACHABLE();
- return Operand(0);
-}
-
-
-Operand LCodeGen::ToOperand32I(LOperand* op) {
- return ToOperand32(op, SIGNED_INT32);
-}
-
-
-Operand LCodeGen::ToOperand32U(LOperand* op) {
- return ToOperand32(op, UNSIGNED_INT32);
-}
-
-
-Operand LCodeGen::ToOperand32(LOperand* op, IntegerSignedness signedness) {
- ASSERT(op != NULL);
- if (op->IsRegister()) {
- return Operand(ToRegister32(op));
- } else if (op->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- HConstant* constant = chunk()->LookupConstant(const_op);
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- ASSERT(constant->HasInteger32Value());
- return Operand(signedness == SIGNED_INT32
- ? constant->Integer32Value()
- : static_cast<uint32_t>(constant->Integer32Value()));
- } else {
- // Other constants not implemented.
- Abort(kToOperand32UnsupportedImmediate);
- }
- }
- // Other cases are not implemented.
- UNREACHABLE();
- return Operand(0);
-}
-
-
-static ptrdiff_t ArgumentsOffsetWithoutFrame(ptrdiff_t index) {
- ASSERT(index < 0);
- return -(index + 1) * kPointerSize;
-}
-
-
-MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
- ASSERT(op != NULL);
- ASSERT(!op->IsRegister());
- ASSERT(!op->IsDoubleRegister());
- ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- if (NeedsEagerFrame()) {
- return MemOperand(fp, StackSlotOffset(op->index()));
- } else {
- // Retrieve parameter without eager stack-frame relative to the
- // stack-pointer.
- return MemOperand(masm()->StackPointer(),
- ArgumentsOffsetWithoutFrame(op->index()));
- }
-}
-
-
-Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
- return constant->handle(isolate());
-}
-
-
-bool LCodeGen::IsSmi(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsSmi();
-}
-
-
-bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
- return op->IsConstantOperand() &&
- chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
-}
-
-
-int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- return constant->Integer32Value();
-}
-
-
-double LCodeGen::ToDouble(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(constant->HasDoubleValue());
- return constant->DoubleValue();
-}
-
-
-Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
- Condition cond = nv;
- switch (op) {
- case Token::EQ:
- case Token::EQ_STRICT:
- cond = eq;
- break;
- case Token::NE:
- case Token::NE_STRICT:
- cond = ne;
- break;
- case Token::LT:
- cond = is_unsigned ? lo : lt;
- break;
- case Token::GT:
- cond = is_unsigned ? hi : gt;
- break;
- case Token::LTE:
- cond = is_unsigned ? ls : le;
- break;
- case Token::GTE:
- cond = is_unsigned ? hs : ge;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
- return cond;
-}
-
-
-template<class InstrType>
-void LCodeGen::EmitBranchGeneric(InstrType instr,
- const BranchGenerator& branch) {
- int left_block = instr->TrueDestination(chunk_);
- int right_block = instr->FalseDestination(chunk_);
-
- int next_block = GetNextEmittedBlock();
-
- if (right_block == left_block) {
- EmitGoto(left_block);
- } else if (left_block == next_block) {
- branch.EmitInverted(chunk_->GetAssemblyLabel(right_block));
- } else if (right_block == next_block) {
- branch.Emit(chunk_->GetAssemblyLabel(left_block));
- } else {
- branch.Emit(chunk_->GetAssemblyLabel(left_block));
- __ B(chunk_->GetAssemblyLabel(right_block));
- }
-}
-
-
-template<class InstrType>
-void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
- ASSERT((condition != al) && (condition != nv));
- BranchOnCondition branch(this, condition);
- EmitBranchGeneric(instr, branch);
-}
-
-
-template<class InstrType>
-void LCodeGen::EmitCompareAndBranch(InstrType instr,
- Condition condition,
- const Register& lhs,
- const Operand& rhs) {
- ASSERT((condition != al) && (condition != nv));
- CompareAndBranch branch(this, condition, lhs, rhs);
- EmitBranchGeneric(instr, branch);
-}
-
-
-template<class InstrType>
-void LCodeGen::EmitTestAndBranch(InstrType instr,
- Condition condition,
- const Register& value,
- uint64_t mask) {
- ASSERT((condition != al) && (condition != nv));
- TestAndBranch branch(this, condition, value, mask);
- EmitBranchGeneric(instr, branch);
-}
-
-
-template<class InstrType>
-void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr,
- const FPRegister& value,
- const FPRegister& scratch) {
- BranchIfNonZeroNumber branch(this, value, scratch);
- EmitBranchGeneric(instr, branch);
-}
-
-
-template<class InstrType>
-void LCodeGen::EmitBranchIfHeapNumber(InstrType instr,
- const Register& value) {
- BranchIfHeapNumber branch(this, value);
- EmitBranchGeneric(instr, branch);
-}
-
-
-template<class InstrType>
-void LCodeGen::EmitBranchIfRoot(InstrType instr,
- const Register& value,
- Heap::RootListIndex index) {
- BranchIfRoot branch(this, value, index);
- EmitBranchGeneric(instr, branch);
-}
-
-
-void LCodeGen::DoGap(LGap* gap) {
- for (int i = LGap::FIRST_INNER_POSITION;
- i <= LGap::LAST_INNER_POSITION;
- i++) {
- LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
- LParallelMove* move = gap->GetParallelMove(inner_pos);
- if (move != NULL) {
- resolver_.Resolve(move);
- }
- }
-}
-
-
-void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
- // TODO(all): Try to improve this, like ARM r17925.
- Register arguments = ToRegister(instr->arguments());
- Register result = ToRegister(instr->result());
-
- if (instr->length()->IsConstantOperand() &&
- instr->index()->IsConstantOperand()) {
- ASSERT(instr->temp() == NULL);
- int index = ToInteger32(LConstantOperand::cast(instr->index()));
- int length = ToInteger32(LConstantOperand::cast(instr->length()));
- int offset = ((length - index) + 1) * kPointerSize;
- __ Ldr(result, MemOperand(arguments, offset));
- } else {
- ASSERT(instr->temp() != NULL);
- Register temp = ToRegister32(instr->temp());
- Register length = ToRegister32(instr->length());
- Operand index = ToOperand32I(instr->index());
- // There are two words between the frame pointer and the last arguments.
- // Subtracting from length accounts for only one, so we add one more.
- __ Sub(temp, length, index);
- __ Add(temp, temp, 1);
- __ Ldr(result, MemOperand(arguments, temp, UXTW, kPointerSizeLog2));
- }
-}
-
-
-void LCodeGen::DoAddE(LAddE* instr) {
- Register result = ToRegister(instr->result());
- Register left = ToRegister(instr->left());
- Operand right = (instr->right()->IsConstantOperand())
- ? ToInteger32(LConstantOperand::cast(instr->right()))
- : Operand(ToRegister32(instr->right()), SXTW);
-
- ASSERT(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
- __ Add(result, left, right);
-}
-
-
-void LCodeGen::DoAddI(LAddI* instr) {
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- Register result = ToRegister32(instr->result());
- Register left = ToRegister32(instr->left());
- Operand right = ToOperand32I(instr->right());
- if (can_overflow) {
- __ Adds(result, left, right);
- DeoptimizeIf(vs, instr->environment());
- } else {
- __ Add(result, left, right);
- }
-}
-
-
-void LCodeGen::DoAddS(LAddS* instr) {
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- Register result = ToRegister(instr->result());
- Register left = ToRegister(instr->left());
- Operand right = ToOperand(instr->right());
- if (can_overflow) {
- __ Adds(result, left, right);
- DeoptimizeIf(vs, instr->environment());
- } else {
- __ Add(result, left, right);
- }
-}
-
-
-void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate: public LDeferredCode {
- public:
- DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LAllocate* instr_;
- };
-
- DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
-
- Register result = ToRegister(instr->result());
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
-
- // Allocate memory for the object.
- AllocationFlags flags = TAG_OBJECT;
- if (instr->hydrogen()->MustAllocateDoubleAligned()) {
- flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
- }
-
- if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
- ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
- ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
- } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
- ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
- }
-
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
- } else {
- Register size = ToRegister32(instr->size());
- __ Sxtw(size.X(), size);
- __ Allocate(size.X(), result, temp1, temp2, deferred->entry(), flags);
- }
-
- __ Bind(deferred->exit());
-
- if (instr->hydrogen()->MustPrefillWithFiller()) {
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ Mov(temp1, size - kPointerSize);
- } else {
- __ Sub(temp1.W(), ToRegister32(instr->size()), kPointerSize);
- }
- __ Sub(result, result, kHeapObjectTag);
-
- // TODO(jbramley): Optimize this loop using stp.
- Label loop;
- __ Bind(&loop);
- __ Mov(temp2, Operand(isolate()->factory()->one_pointer_filler_map()));
- __ Str(temp2, MemOperand(result, temp1));
- __ Subs(temp1, temp1, kPointerSize);
- __ B(ge, &loop);
-
- __ Add(result, result, kHeapObjectTag);
- }
-}
-
-
-void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Mov(ToRegister(instr->result()), Operand(Smi::FromInt(0)));
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- // We're in a SafepointRegistersScope so we can use any scratch registers.
- Register size = x0;
- if (instr->size()->IsConstantOperand()) {
- __ Mov(size, Operand(ToSmi(LConstantOperand::cast(instr->size()))));
- } else {
- __ SmiTag(size, ToRegister32(instr->size()).X());
- }
- int flags = AllocateDoubleAlignFlag::encode(
- instr->hydrogen()->MustAllocateDoubleAligned());
- if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
- ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
- ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
- } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
- ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
- } else {
- flags = AllocateTargetSpace::update(flags, NEW_SPACE);
- }
- __ Mov(x10, Operand(Smi::FromInt(flags)));
- __ Push(size, x10);
-
- CallRuntimeFromDeferred(
- Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
- __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
-}
-
-
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register length = ToRegister32(instr->length());
-
- Register elements = ToRegister(instr->elements());
- Register scratch = x5;
- ASSERT(receiver.Is(x0)); // Used for parameter count.
- ASSERT(function.Is(x1)); // Required by InvokeFunction.
- ASSERT(ToRegister(instr->result()).Is(x0));
- ASSERT(instr->IsMarkedAsCall());
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- const uint32_t kArgumentsLimit = 1 * KB;
- __ Cmp(length, kArgumentsLimit);
- DeoptimizeIf(hi, instr->environment());
-
- // Push the receiver and use the register to keep the original
- // number of arguments.
- __ Push(receiver);
- Register argc = receiver;
- receiver = NoReg;
- __ Sxtw(argc, length);
- // The arguments are at a one pointer size offset from elements.
- __ Add(elements, elements, 1 * kPointerSize);
-
- // Loop through the arguments pushing them onto the execution
- // stack.
- Label invoke, loop;
- // length is a small non-negative integer, due to the test above.
- __ Cbz(length, &invoke);
- __ Bind(&loop);
- __ Ldr(scratch, MemOperand(elements, length, SXTW, kPointerSizeLog2));
- __ Push(scratch);
- __ Subs(length, length, 1);
- __ B(ne, &loop);
-
- __ Bind(&invoke);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
- // The number of arguments is stored in argc (receiver) which is x0, as
- // expected by InvokeFunction.
- ParameterCount actual(argc);
- __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
-}
-
-
-void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
- Register result = ToRegister(instr->result());
-
- if (instr->hydrogen()->from_inlined()) {
- // When we are inside an inlined function, the arguments are the last things
- // that have been pushed on the stack. Therefore the arguments array can be
- // accessed directly from jssp.
- // However in the normal case, it is accessed via fp but there are two words
- // on the stack between fp and the arguments (the saved lr and fp) and the
- // LAccessArgumentsAt implementation take that into account.
- // In the inlined case we need to subtract the size of 2 words to jssp to
- // get a pointer which will work well with LAccessArgumentsAt.
- ASSERT(masm()->StackPointer().Is(jssp));
- __ Sub(result, jssp, 2 * kPointerSize);
- } else {
- ASSERT(instr->temp() != NULL);
- Register previous_fp = ToRegister(instr->temp());
-
- __ Ldr(previous_fp,
- MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(result,
- MemOperand(previous_fp, StandardFrameConstants::kContextOffset));
- __ Cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ Csel(result, fp, previous_fp, ne);
- }
-}
-
-
-void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Register elements = ToRegister(instr->elements());
- Register result = ToRegister32(instr->result());
- Label done;
-
- // If no arguments adaptor frame the number of arguments is fixed.
- __ Cmp(fp, elements);
- __ Mov(result, scope()->num_parameters());
- __ B(eq, &done);
-
- // Arguments adaptor frame present. Get argument length from there.
- __ Ldr(result.X(), MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(result,
- UntagSmiMemOperand(result.X(),
- ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- // Argument length is in result register.
- __ Bind(&done);
-}
-
-
-void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- DoubleRegister left = ToDoubleRegister(instr->left());
- DoubleRegister right = ToDoubleRegister(instr->right());
- DoubleRegister result = ToDoubleRegister(instr->result());
-
- switch (instr->op()) {
- case Token::ADD: __ Fadd(result, left, right); break;
- case Token::SUB: __ Fsub(result, left, right); break;
- case Token::MUL: __ Fmul(result, left, right); break;
- case Token::DIV: __ Fdiv(result, left, right); break;
- case Token::MOD: {
- // The ECMA-262 remainder operator is the remainder from a truncating
- // (round-towards-zero) division. Note that this differs from IEEE-754.
- //
- // TODO(jbramley): See if it's possible to do this inline, rather than by
- // calling a helper function. With frintz (to produce the intermediate
- // quotient) and fmsub (to calculate the remainder without loss of
- // precision), it should be possible. However, we would need support for
- // fdiv in round-towards-zero mode, and the A64 simulator doesn't support
- // that yet.
- ASSERT(left.Is(d0));
- ASSERT(right.Is(d1));
- __ CallCFunction(
- ExternalReference::mod_two_doubles_operation(isolate()),
- 0, 2);
- ASSERT(result.Is(d0));
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->left()).is(x1));
- ASSERT(ToRegister(instr->right()).is(x0));
- ASSERT(ToRegister(instr->result()).is(x0));
-
- BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoBitI(LBitI* instr) {
- Register result = ToRegister32(instr->result());
- Register left = ToRegister32(instr->left());
- Operand right = ToOperand32U(instr->right());
-
- switch (instr->op()) {
- case Token::BIT_AND: __ And(result, left, right); break;
- case Token::BIT_OR: __ Orr(result, left, right); break;
- case Token::BIT_XOR: __ Eor(result, left, right); break;
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoBitS(LBitS* instr) {
- Register result = ToRegister(instr->result());
- Register left = ToRegister(instr->left());
- Operand right = ToOperand(instr->right());
-
- switch (instr->op()) {
- case Token::BIT_AND: __ And(result, left, right); break;
- case Token::BIT_OR: __ Orr(result, left, right); break;
- case Token::BIT_XOR: __ Eor(result, left, right); break;
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
- if (FLAG_debug_code && check->hydrogen()->skip_check()) {
- __ Assert(InvertCondition(cc), kEliminatedBoundsCheckFailed);
- } else {
- DeoptimizeIf(cc, check->environment());
- }
-}
-
-
-void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
- if (instr->hydrogen()->skip_check()) return;
-
- ASSERT(instr->hydrogen()->length()->representation().IsInteger32());
- Register length = ToRegister32(instr->length());
-
- if (instr->index()->IsConstantOperand()) {
- int constant_index =
- ToInteger32(LConstantOperand::cast(instr->index()));
-
- if (instr->hydrogen()->length()->representation().IsSmi()) {
- __ Cmp(length, Operand(Smi::FromInt(constant_index)));
- } else {
- __ Cmp(length, Operand(constant_index));
- }
- } else {
- ASSERT(instr->hydrogen()->index()->representation().IsInteger32());
- __ Cmp(length, ToRegister32(instr->index()));
- }
- Condition condition = instr->hydrogen()->allow_equality() ? lo : ls;
- ApplyCheckIf(condition, instr);
-}
-
-
-void LCodeGen::DoBranch(LBranch* instr) {
- Representation r = instr->hydrogen()->value()->representation();
- Label* true_label = instr->TrueLabel(chunk_);
- Label* false_label = instr->FalseLabel(chunk_);
-
- if (r.IsInteger32()) {
- ASSERT(!info()->IsStub());
- EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0);
- } else if (r.IsSmi()) {
- ASSERT(!info()->IsStub());
- STATIC_ASSERT(kSmiTag == 0);
- EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0);
- } else if (r.IsDouble()) {
- DoubleRegister value = ToDoubleRegister(instr->value());
- // Test the double value. Zero and NaN are false.
- EmitBranchIfNonZeroNumber(instr, value, double_scratch());
- } else {
- ASSERT(r.IsTagged());
- Register value = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
-
- if (type.IsBoolean()) {
- ASSERT(!info()->IsStub());
- __ CompareRoot(value, Heap::kTrueValueRootIndex);
- EmitBranch(instr, eq);
- } else if (type.IsSmi()) {
- ASSERT(!info()->IsStub());
- EmitCompareAndBranch(instr, ne, value, Operand(Smi::FromInt(0)));
- } else if (type.IsJSArray()) {
- ASSERT(!info()->IsStub());
- EmitGoto(instr->TrueDestination(chunk()));
- } else if (type.IsHeapNumber()) {
- ASSERT(!info()->IsStub());
- __ Ldr(double_scratch(), FieldMemOperand(value,
- HeapNumber::kValueOffset));
- // Test the double value. Zero and NaN are false.
- EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch());
- } else if (type.IsString()) {
- ASSERT(!info()->IsStub());
- Register temp = ToRegister(instr->temp1());
- __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
- EmitCompareAndBranch(instr, ne, temp, 0);
- } else {
- ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
- // Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
-
- if (expected.Contains(ToBooleanStub::UNDEFINED)) {
- // undefined -> false.
- __ JumpIfRoot(
- value, Heap::kUndefinedValueRootIndex, false_label);
- }
-
- if (expected.Contains(ToBooleanStub::BOOLEAN)) {
- // Boolean -> its value.
- __ JumpIfRoot(
- value, Heap::kTrueValueRootIndex, true_label);
- __ JumpIfRoot(
- value, Heap::kFalseValueRootIndex, false_label);
- }
-
- if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
- // 'null' -> false.
- __ JumpIfRoot(
- value, Heap::kNullValueRootIndex, false_label);
- }
-
- if (expected.Contains(ToBooleanStub::SMI)) {
- // Smis: 0 -> false, all other -> true.
- ASSERT(Smi::FromInt(0) == 0);
- __ Cbz(value, false_label);
- __ JumpIfSmi(value, true_label);
- } else if (expected.NeedsMap()) {
- // If we need a map later and have a smi, deopt.
- DeoptimizeIfSmi(value, instr->environment());
- }
-
- Register map = NoReg;
- Register scratch = NoReg;
-
- if (expected.NeedsMap()) {
- ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
- map = ToRegister(instr->temp1());
- scratch = ToRegister(instr->temp2());
-
- __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
-
- if (expected.CanBeUndetectable()) {
- // Undetectable -> false.
- __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- __ TestAndBranchIfAnySet(
- scratch, 1 << Map::kIsUndetectable, false_label);
- }
- }
-
- if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
- // spec object -> true.
- __ CompareInstanceType(map, scratch, FIRST_SPEC_OBJECT_TYPE);
- __ B(ge, true_label);
- }
-
- if (expected.Contains(ToBooleanStub::STRING)) {
- // String value -> false iff empty.
- Label not_string;
- __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE);
- __ B(ge, &not_string);
- __ Ldr(scratch, FieldMemOperand(value, String::kLengthOffset));
- __ Cbz(scratch, false_label);
- __ B(true_label);
- __ Bind(&not_string);
- }
-
- if (expected.Contains(ToBooleanStub::SYMBOL)) {
- // Symbol value -> true.
- __ CompareInstanceType(map, scratch, SYMBOL_TYPE);
- __ B(eq, true_label);
- }
-
- if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
- Label not_heap_number;
- __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, &not_heap_number);
-
- __ Ldr(double_scratch(),
- FieldMemOperand(value, HeapNumber::kValueOffset));
- __ Fcmp(double_scratch(), 0.0);
- // If we got a NaN (overflow bit is set), jump to the false branch.
- __ B(vs, false_label);
- __ B(eq, false_label);
- __ B(true_label);
- __ Bind(&not_heap_number);
- }
-
- if (!expected.IsGeneric()) {
- // We've seen something for the first time -> deopt.
- // This can only happen if we are not generic already.
- Deoptimize(instr->environment());
- }
- }
- }
-}
-
-
-void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count,
- int arity,
- LInstruction* instr,
- Register function_reg) {
- bool dont_adapt_arguments =
- formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
- bool can_invoke_directly =
- dont_adapt_arguments || formal_parameter_count == arity;
-
- // The function interface relies on the following register assignments.
- ASSERT(function_reg.Is(x1) || function_reg.IsNone());
- Register arity_reg = x0;
-
- LPointerMap* pointers = instr->pointer_map();
-
- // If necessary, load the function object.
- if (function_reg.IsNone()) {
- function_reg = x1;
- __ LoadObject(function_reg, function);
- }
-
- if (FLAG_debug_code) {
- Label is_not_smi;
- // Try to confirm that function_reg (x1) is a tagged pointer.
- __ JumpIfNotSmi(function_reg, &is_not_smi);
- __ Abort(kExpectedFunctionObject);
- __ Bind(&is_not_smi);
- }
-
- if (can_invoke_directly) {
- // Change context.
- __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
-
- // Set the arguments count if adaption is not needed. Assumes that x0 is
- // available to write to at this point.
- if (dont_adapt_arguments) {
- __ Mov(arity_reg, arity);
- }
-
- // Invoke function.
- __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
- __ Call(x10);
-
- // Set up deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
- } else {
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(arity);
- ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
- }
-}
-
-
-void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
- ASSERT(instr->IsMarkedAsCall());
- ASSERT(ToRegister(instr->result()).Is(x0));
-
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
- // TODO(all): on ARM we use a call descriptor to specify a storage mode
- // but on A64 we only have one storage mode so it isn't necessary. Check
- // this understanding is correct.
- __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
- } else {
- ASSERT(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- generator.BeforeCall(__ CallSize(target));
- __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
- __ Call(target);
- }
- generator.AfterCall();
-}
-
-
-void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
- ASSERT(instr->IsMarkedAsCall());
- ASSERT(ToRegister(instr->function()).is(x1));
-
- if (instr->hydrogen()->pass_argument_count()) {
- __ Mov(x0, Operand(instr->arity()));
- }
-
- // Change context.
- __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
-
- // Load the code entry address
- __ Ldr(x10, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
- __ Call(x10);
-
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr);
-}
-
-
-void LCodeGen::DoCallStub(LCallStub* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->result()).is(x0));
- switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpExec: {
- RegExpExecStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::SubString: {
- SubStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::StringCompare: {
- StringCompareStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- GenerateOsrPrologue();
-}
-
-
-void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
- Register temp = ToRegister(instr->temp());
- {
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ Push(object);
- __ Mov(cp, 0);
- __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(x0, temp);
- }
- DeoptimizeIfSmi(temp, instr->environment());
-}
-
-
-void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- class DeferredCheckMaps: public LDeferredCode {
- public:
- DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
- : LDeferredCode(codegen), instr_(instr), object_(object) {
- SetExit(check_maps());
- }
- virtual void Generate() {
- codegen()->DoDeferredInstanceMigration(instr_, object_);
- }
- Label* check_maps() { return &check_maps_; }
- virtual LInstruction* instr() { return instr_; }
- private:
- LCheckMaps* instr_;
- Label check_maps_;
- Register object_;
- };
-
- if (instr->hydrogen()->CanOmitMapChecks()) {
- ASSERT(instr->value() == NULL);
- ASSERT(instr->temp() == NULL);
- return;
- }
-
- Register object = ToRegister(instr->value());
- Register map_reg = ToRegister(instr->temp());
-
- __ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
-
- DeferredCheckMaps* deferred = NULL;
- if (instr->hydrogen()->has_migration_target()) {
- deferred = new(zone()) DeferredCheckMaps(this, instr, object);
- __ Bind(deferred->check_maps());
- }
-
- UniqueSet<Map> map_set = instr->hydrogen()->map_set();
- Label success;
- for (int i = 0; i < map_set.size(); i++) {
- Handle<Map> map = map_set.at(i).handle();
- __ CompareMap(map_reg, map, &success);
- __ B(eq, &success);
- }
-
- // We didn't match a map.
- if (instr->hydrogen()->has_migration_target()) {
- __ B(deferred->entry());
- } else {
- Deoptimize(instr->environment());
- }
-
- __ Bind(&success);
-}
-
-
-void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- if (!instr->hydrogen()->value()->IsHeapObject()) {
- // TODO(all): Depending of how we chose to implement the deopt, if we could
- // guarantee that we have a deopt handler reachable by a tbz instruction,
- // we could use tbz here and produce less code to support this instruction.
- DeoptimizeIfSmi(ToRegister(instr->value()), instr->environment());
- }
-}
-
-
-void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- Register value = ToRegister(instr->value());
- ASSERT(!instr->result() || ToRegister(instr->result()).Is(value));
- // TODO(all): See DoCheckNonSmi for comments on use of tbz.
- DeoptimizeIfNotSmi(value, instr->environment());
-}
-
-
-void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->value());
- Register scratch = ToRegister(instr->temp());
-
- __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
-
- if (instr->hydrogen()->is_interval_check()) {
- InstanceType first, last;
- instr->hydrogen()->GetCheckInterval(&first, &last);
-
- __ Cmp(scratch, first);
- if (first == last) {
- // If there is only one type in the interval check for equality.
- DeoptimizeIf(ne, instr->environment());
- } else if (last == LAST_TYPE) {
- // We don't need to compare with the higher bound of the interval.
- DeoptimizeIf(lo, instr->environment());
- } else {
- // If we are below the lower bound, set the C flag and clear the Z flag
- // to force a deopt.
- __ Ccmp(scratch, last, CFlag, hs);
- DeoptimizeIf(hi, instr->environment());
- }
- } else {
- uint8_t mask;
- uint8_t tag;
- instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
-
- if (IsPowerOf2(mask)) {
- ASSERT((tag == 0) || (tag == mask));
- // TODO(all): We might be able to use tbz/tbnz if we can guarantee that
- // the deopt handler is reachable by a tbz instruction.
- __ Tst(scratch, mask);
- DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
- } else {
- if (tag == 0) {
- __ Tst(scratch, mask);
- } else {
- __ And(scratch, scratch, mask);
- __ Cmp(scratch, tag);
- }
- DeoptimizeIf(ne, instr->environment());
- }
- }
-}
-
-
-void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
- DoubleRegister input = ToDoubleRegister(instr->unclamped());
- Register result = ToRegister32(instr->result());
- __ ClampDoubleToUint8(result, input, double_scratch());
-}
-
-
-void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
- Register input = ToRegister32(instr->unclamped());
- Register result = ToRegister32(instr->result());
- __ ClampInt32ToUint8(result, input);
-}
-
-
-void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
- Register input = ToRegister(instr->unclamped());
- Register result = ToRegister32(instr->result());
- Register scratch = ToRegister(instr->temp1());
- Label done;
-
- // Both smi and heap number cases are handled.
- Label is_not_smi;
- __ JumpIfNotSmi(input, &is_not_smi);
- __ SmiUntag(result.X(), input);
- __ ClampInt32ToUint8(result);
- __ B(&done);
-
- __ Bind(&is_not_smi);
-
- // Check for heap number.
- Label is_heap_number;
- __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ JumpIfRoot(scratch, Heap::kHeapNumberMapRootIndex, &is_heap_number);
-
- // Check for undefined. Undefined is coverted to zero for clamping conversion.
- DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
- instr->environment());
- __ Mov(result, 0);
- __ B(&done);
-
- // Heap number case.
- __ Bind(&is_heap_number);
- DoubleRegister dbl_scratch = double_scratch();
- DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp2());
- __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
-
- __ Bind(&done);
-}
-
-
-void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Handle<String> class_name = instr->hydrogen()->class_name();
- Label* true_label = instr->TrueLabel(chunk_);
- Label* false_label = instr->FalseLabel(chunk_);
- Register input = ToRegister(instr->value());
- Register scratch1 = ToRegister(instr->temp1());
- Register scratch2 = ToRegister(instr->temp2());
-
- __ JumpIfSmi(input, false_label);
-
- Register map = scratch2;
- if (class_name->IsUtf8EqualTo(CStrVector("Function"))) {
- // Assuming the following assertions, we can use the same compares to test
- // for both being a function type and being in the object type range.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-
- // We expect CompareObjectType to load the object instance type in scratch1.
- __ CompareObjectType(input, map, scratch1, FIRST_SPEC_OBJECT_TYPE);
- __ B(lt, false_label);
- __ B(eq, true_label);
- __ Cmp(scratch1, LAST_SPEC_OBJECT_TYPE);
- __ B(eq, true_label);
- } else {
- __ IsObjectJSObjectType(input, map, scratch1, false_label);
- }
-
- // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
- // Check if the constructor in the map is a function.
- __ Ldr(scratch1, FieldMemOperand(map, Map::kConstructorOffset));
-
- // Objects with a non-function constructor have class 'Object'.
- if (class_name->IsUtf8EqualTo(CStrVector("Object"))) {
- __ JumpIfNotObjectType(
- scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, true_label);
- } else {
- __ JumpIfNotObjectType(
- scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, false_label);
- }
-
- // The constructor function is in scratch1. Get its instance class name.
- __ Ldr(scratch1,
- FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(scratch1,
- FieldMemOperand(scratch1,
- SharedFunctionInfo::kInstanceClassNameOffset));
-
- // The class name we are testing against is internalized since it's a literal.
- // The name in the constructor is internalized because of the way the context
- // is booted. This routine isn't expected to work for random API-created
- // classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are internalized it is sufficient to use an
- // identity comparison.
- EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name));
-}
-
-
-void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) {
- ASSERT(instr->hydrogen()->representation().IsDouble());
- FPRegister object = ToDoubleRegister(instr->object());
- Register temp = ToRegister(instr->temp());
-
- // If we don't have a NaN, we don't have the hole, so branch now to avoid the
- // (relatively expensive) hole-NaN check.
- __ Fcmp(object, object);
- __ B(vc, instr->FalseLabel(chunk_));
-
- // We have a NaN, but is it the hole?
- __ Fmov(temp, object);
- EmitCompareAndBranch(instr, eq, temp, kHoleNanInt64);
-}
-
-
-void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) {
- ASSERT(instr->hydrogen()->representation().IsTagged());
- Register object = ToRegister(instr->object());
-
- EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex);
-}
-
-
-void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register value = ToRegister(instr->value());
- Register map = ToRegister(instr->temp());
-
- __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
- EmitCompareAndBranch(instr, eq, map, Operand(instr->map()));
-}
-
-
-void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
- Representation rep = instr->hydrogen()->value()->representation();
- ASSERT(!rep.IsInteger32());
- Register scratch = ToRegister(instr->temp());
-
- if (rep.IsDouble()) {
- __ JumpIfMinusZero(ToDoubleRegister(instr->value()),
- instr->TrueLabel(chunk()));
- } else {
- Register value = ToRegister(instr->value());
- __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
- instr->FalseLabel(chunk()), DO_SMI_CHECK);
- __ Ldr(double_scratch(), FieldMemOperand(value, HeapNumber::kValueOffset));
- __ JumpIfMinusZero(double_scratch(), instr->TrueLabel(chunk()));
- }
- EmitGoto(instr->FalseDestination(chunk()));
-}
-
-
-void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- Condition cond = TokenToCondition(instr->op(), false);
-
- if (left->IsConstantOperand() && right->IsConstantOperand()) {
- // We can statically evaluate the comparison.
- double left_val = ToDouble(LConstantOperand::cast(left));
- double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block = EvalComparison(instr->op(), left_val, right_val) ?
- instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
- EmitGoto(next_block);
- } else {
- if (instr->is_double()) {
- if (right->IsConstantOperand()) {
- __ Fcmp(ToDoubleRegister(left),
- ToDouble(LConstantOperand::cast(right)));
- } else if (left->IsConstantOperand()) {
- // Transpose the operands and reverse the condition.
- __ Fcmp(ToDoubleRegister(right),
- ToDouble(LConstantOperand::cast(left)));
- cond = ReverseConditionForCmp(cond);
- } else {
- __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right));
- }
-
- // If a NaN is involved, i.e. the result is unordered (V set),
- // jump to false block label.
- __ B(vs, instr->FalseLabel(chunk_));
- EmitBranch(instr, cond);
- } else {
- if (instr->hydrogen_value()->representation().IsInteger32()) {
- if (right->IsConstantOperand()) {
- EmitCompareAndBranch(instr,
- cond,
- ToRegister32(left),
- ToOperand32I(right));
- } else {
- // Transpose the operands and reverse the condition.
- EmitCompareAndBranch(instr,
- ReverseConditionForCmp(cond),
- ToRegister32(right),
- ToOperand32I(left));
- }
- } else {
- ASSERT(instr->hydrogen_value()->representation().IsSmi());
- if (right->IsConstantOperand()) {
- int32_t value = ToInteger32(LConstantOperand::cast(right));
- EmitCompareAndBranch(instr,
- cond,
- ToRegister(left),
- Operand(Smi::FromInt(value)));
- } else if (left->IsConstantOperand()) {
- // Transpose the operands and reverse the condition.
- int32_t value = ToInteger32(LConstantOperand::cast(left));
- EmitCompareAndBranch(instr,
- ReverseConditionForCmp(cond),
- ToRegister(right),
- Operand(Smi::FromInt(value)));
- } else {
- EmitCompareAndBranch(instr,
- cond,
- ToRegister(left),
- ToRegister(right));
- }
- }
- }
- }
-}
-
-
-void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
- Register right = ToRegister(instr->right());
- EmitCompareAndBranch(instr, eq, left, right);
-}
-
-
-void LCodeGen::DoCmpT(LCmpT* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- Token::Value op = instr->op();
- Condition cond = TokenToCondition(op, false);
-
- ASSERT(ToRegister(instr->left()).Is(x1));
- ASSERT(ToRegister(instr->right()).Is(x0));
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- // Signal that we don't inline smi code before this stub.
- InlineSmiCheckInfo::EmitNotInlined(masm());
-
- // Return true or false depending on CompareIC result.
- // This instruction is marked as call. We can clobber any register.
- ASSERT(instr->IsMarkedAsCall());
- __ LoadTrueFalseRoots(x1, x2);
- __ Cmp(x0, 0);
- __ Csel(ToRegister(instr->result()), x1, x2, cond);
-}
-
-
-void LCodeGen::DoConstantD(LConstantD* instr) {
- ASSERT(instr->result()->IsDoubleRegister());
- DoubleRegister result = ToDoubleRegister(instr->result());
- __ Fmov(result, instr->value());
-}
-
-
-void LCodeGen::DoConstantE(LConstantE* instr) {
- __ Mov(ToRegister(instr->result()), Operand(instr->value()));
-}
-
-
-void LCodeGen::DoConstantI(LConstantI* instr) {
- ASSERT(is_int32(instr->value()));
- // Cast the value here to ensure that the value isn't sign extended by the
- // implicit Operand constructor.
- __ Mov(ToRegister32(instr->result()), static_cast<uint32_t>(instr->value()));
-}
-
-
-void LCodeGen::DoConstantS(LConstantS* instr) {
- __ Mov(ToRegister(instr->result()), Operand(instr->value()));
-}
-
-
-void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> value = instr->value(isolate());
- AllowDeferredHandleDereference smi_check;
- __ LoadObject(ToRegister(instr->result()), value);
-}
-
-
-void LCodeGen::DoContext(LContext* instr) {
- // If there is a non-return use, the context must be moved to a register.
- Register result = ToRegister(instr->result());
- if (info()->IsOptimizing()) {
- __ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
- } else {
- // If there is no frame, the context must be in cp.
- ASSERT(result.is(cp));
- }
-}
-
-
-void LCodeGen::DoCheckValue(LCheckValue* instr) {
- Register reg = ToRegister(instr->value());
- Handle<HeapObject> object = instr->hydrogen()->object().handle();
- AllowDeferredHandleDereference smi_check;
- if (isolate()->heap()->InNewSpace(*object)) {
- Register temp = ToRegister(instr->temp());
- Handle<Cell> cell = isolate()->factory()->NewCell(object);
- __ Mov(temp, Operand(Handle<Object>(cell)));
- __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset));
- __ Cmp(reg, temp);
- } else {
- __ Cmp(reg, Operand(object));
- }
- DeoptimizeIf(ne, instr->environment());
-}
-
-
-void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- ASSERT(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->date());
- Register result = ToRegister(instr->result());
- Register temp1 = x10;
- Register temp2 = x11;
- Smi* index = instr->index();
- Label runtime, done, deopt, obj_ok;
-
- ASSERT(object.is(result) && object.Is(x0));
- ASSERT(instr->IsMarkedAsCall());
-
- __ JumpIfSmi(object, &deopt);
- __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
- __ B(eq, &obj_ok);
-
- __ Bind(&deopt);
- Deoptimize(instr->environment());
-
- __ Bind(&obj_ok);
- if (index->value() == 0) {
- __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
- } else {
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ Mov(temp1, Operand(stamp));
- __ Ldr(temp1, MemOperand(temp1));
- __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ Cmp(temp1, temp2);
- __ B(ne, &runtime);
- __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ B(&done);
- }
-
- __ Bind(&runtime);
- __ Mov(x1, Operand(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- }
-
- __ Bind(&done);
-}
-
-
-void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- Deoptimizer::BailoutType type = instr->hydrogen()->type();
- // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
- // needed return address), even though the implementation of LAZY and EAGER is
- // now identical. When LAZY is eventually completely folded into EAGER, remove
- // the special case below.
- if (info()->IsStub() && (type == Deoptimizer::EAGER)) {
- type = Deoptimizer::LAZY;
- }
-
- Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
- DeoptimizeHeader(instr->environment(), &type);
- Deoptimize(instr->environment(), type);
-}
-
-
-void LCodeGen::DoDivI(LDivI* instr) {
- if (!instr->is_flooring() && instr->hydrogen()->RightIsPowerOf2()) {
- HDiv* hdiv = instr->hydrogen();
- Register dividend = ToRegister32(instr->left());
- int32_t divisor = hdiv->right()->GetInteger32Constant();
- Register result = ToRegister32(instr->result());
- ASSERT(!result.is(dividend));
-
- // Check for (0 / -x) that will produce negative zero.
- if (hdiv->left()->RangeCanInclude(0) && divisor < 0 &&
- hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ Cmp(dividend, 0);
- DeoptimizeIf(eq, instr->environment());
- }
- // Check for (kMinInt / -1).
- if (hdiv->left()->RangeCanInclude(kMinInt) && divisor == -1 &&
- hdiv->CheckFlag(HValue::kCanOverflow)) {
- __ Cmp(dividend, kMinInt);
- DeoptimizeIf(eq, instr->environment());
- }
- // Deoptimize if remainder will not be 0.
- if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
- Abs(divisor) != 1) {
- __ Tst(dividend, Abs(divisor) - 1);
- DeoptimizeIf(ne, instr->environment());
- }
- if (divisor == -1) { // Nice shortcut, not needed for correctness.
- __ Neg(result, dividend);
- return;
- }
- int32_t shift = WhichPowerOf2(Abs(divisor));
- if (shift == 0) {
- __ Mov(result, dividend);
- } else if (shift == 1) {
- __ Add(result, dividend, Operand(dividend, LSR, 31));
- } else {
- __ Mov(result, Operand(dividend, ASR, 31));
- __ Add(result, dividend, Operand(result, LSR, 32 - shift));
- }
- if (shift > 0) __ Mov(result, Operand(result, ASR, shift));
- if (divisor < 0) __ Neg(result, result);
- return;
- }
-
- Register dividend = ToRegister32(instr->left());
- Register divisor = ToRegister32(instr->right());
- Register result = ToRegister32(instr->result());
- HValue* hdiv = instr->hydrogen_value();
-
- // Issue the division first, and then check for any deopt cases whilst the
- // result is computed.
- __ Sdiv(result, dividend, divisor);
-
- if (hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
- ASSERT_EQ(NULL, instr->temp());
- return;
- }
-
- Label deopt;
- // Check for x / 0.
- if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- __ Cbz(divisor, &deopt);
- }
-
- // Check for (0 / -x) as that will produce negative zero.
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ Cmp(divisor, 0);
-
- // If the divisor < 0 (mi), compare the dividend, and deopt if it is
- // zero, ie. zero dividend with negative divisor deopts.
- // If the divisor >= 0 (pl, the opposite of mi) set the flags to
- // condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
- __ Ccmp(dividend, 0, NoFlag, mi);
- __ B(eq, &deopt);
- }
-
- // Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow)) {
- // Test dividend for kMinInt by subtracting one (cmp) and checking for
- // overflow.
- __ Cmp(dividend, 1);
- // If overflow is set, ie. dividend = kMinInt, compare the divisor with
- // -1. If overflow is clear, set the flags for condition ne, as the
- // dividend isn't -1, and thus we shouldn't deopt.
- __ Ccmp(divisor, -1, NoFlag, vs);
- __ B(eq, &deopt);
- }
-
- // Compute remainder and deopt if it's not zero.
- Register remainder = ToRegister32(instr->temp());
- __ Msub(remainder, result, divisor, dividend);
- __ Cbnz(remainder, &deopt);
-
- Label div_ok;
- __ B(&div_ok);
- __ Bind(&deopt);
- Deoptimize(instr->environment());
- __ Bind(&div_ok);
-}
-
-
-void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- Register result = ToRegister32(instr->result());
- Label done, deopt;
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ JumpIfMinusZero(input, &deopt);
- }
-
- __ TryConvertDoubleToInt32(result, input, double_scratch(), &done);
- __ Bind(&deopt);
- Deoptimize(instr->environment());
- __ Bind(&done);
-
- if (instr->tag_result()) {
- __ SmiTag(result.X());
- }
-}
-
-
-void LCodeGen::DoDrop(LDrop* instr) {
- __ Drop(instr->count());
-}
-
-
-void LCodeGen::DoDummy(LDummy* instr) {
- // Nothing to see here, move on!
-}
-
-
-void LCodeGen::DoDummyUse(LDummyUse* instr) {
- // Nothing to see here, move on!
-}
-
-
-void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- // FunctionLiteral instruction is marked as call, we can trash any register.
- ASSERT(instr->IsMarkedAsCall());
-
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- bool pretenure = instr->hydrogen()->pretenure();
- if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(instr->hydrogen()->language_mode(),
- instr->hydrogen()->is_generator());
- __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- } else {
- __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
- __ Mov(x1, Operand(pretenure ? factory()->true_value()
- : factory()->false_value()));
- __ Push(cp, x2, x1);
- CallRuntime(Runtime::kNewClosure, 3, instr);
- }
-}
-
-
-void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
- Register map = ToRegister(instr->map());
- Register result = ToRegister(instr->result());
- Label load_cache, done;
-
- __ EnumLengthUntagged(result, map);
- __ Cbnz(result, &load_cache);
-
- __ Mov(result, Operand(isolate()->factory()->empty_fixed_array()));
- __ B(&done);
-
- __ Bind(&load_cache);
- __ LoadInstanceDescriptors(map, result);
- __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
- __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- DeoptimizeIfZero(result, instr->environment());
-
- __ Bind(&done);
-}
-
-
-void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
- Register object = ToRegister(instr->object());
- Register null_value = x5;
-
- ASSERT(instr->IsMarkedAsCall());
- ASSERT(object.Is(x0));
-
- Label deopt;
-
- __ JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &deopt);
-
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ Cmp(object, null_value);
- __ B(eq, &deopt);
-
- __ JumpIfSmi(object, &deopt);
-
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
- __ B(le, &deopt);
-
- Label use_cache, call_runtime;
- __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
-
- __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
- __ B(&use_cache);
-
- __ Bind(&deopt);
- Deoptimize(instr->environment());
-
- // Get the set of properties to enumerate.
- __ Bind(&call_runtime);
- __ Push(object);
- CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
-
- __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
- __ JumpIfNotRoot(x1, Heap::kMetaMapRootIndex, &deopt);
-
- __ Bind(&use_cache);
-}
-
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
-
- __ AssertString(input);
-
- // Assert that we can use a W register load to get the hash.
- ASSERT((String::kHashShift + String::kArrayIndexValueBits) < kWRegSize);
- __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset));
- __ IndexFromHash(result, result);
-}
-
-
-void LCodeGen::EmitGoto(int block) {
- // Do not emit jump if we are emitting a goto to the next block.
- if (!IsNextEmittedBlock(block)) {
- __ B(chunk_->GetAssemblyLabel(LookupDestination(block)));
- }
-}
-
-
-void LCodeGen::DoGoto(LGoto* instr) {
- EmitGoto(instr->block_id());
-}
-
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
- LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister32(instr->temp());
-
- // Assert that the cache status bits fit in a W register.
- ASSERT(is_uint32(String::kContainsCachedArrayIndexMask));
- __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset));
- __ Tst(temp, String::kContainsCachedArrayIndexMask);
- EmitBranch(instr, eq);
-}
-
-
-// HHasInstanceTypeAndBranch instruction is built with an interval of type
-// to test but is only used in very restricted ways. The only possible kinds
-// of intervals are:
-// - [ FIRST_TYPE, instr->to() ]
-// - [ instr->form(), LAST_TYPE ]
-// - instr->from() == instr->to()
-//
-// These kinds of intervals can be check with only one compare instruction
-// providing the correct value and test condition are used.
-//
-// TestType() will return the value to use in the compare instruction and
-// BranchCondition() will return the condition to use depending on the kind
-// of interval actually specified in the instruction.
-static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == FIRST_TYPE) return to;
- ASSERT((from == to) || (to == LAST_TYPE));
- return from;
-}
-
-
-// See comment above TestType function for what this function does.
-static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == to) return eq;
- if (to == LAST_TYPE) return hs;
- if (from == FIRST_TYPE) return ls;
- UNREACHABLE();
- return eq;
-}
-
-
-void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register scratch = ToRegister(instr->temp());
-
- if (!instr->hydrogen()->value()->IsHeapObject()) {
- __ JumpIfSmi(input, instr->FalseLabel(chunk_));
- }
- __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
- EmitBranch(instr, BranchCondition(instr->hydrogen()));
-}
-
-
-void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
- Register result = ToRegister(instr->result());
- Register base = ToRegister(instr->base_object());
- if (instr->offset()->IsConstantOperand()) {
- __ Add(result, base, ToOperand32I(instr->offset()));
- } else {
- __ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW));
- }
-}
-
-
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- // Assert that the arguments are in the registers expected by InstanceofStub.
- ASSERT(ToRegister(instr->left()).Is(InstanceofStub::left()));
- ASSERT(ToRegister(instr->right()).Is(InstanceofStub::right()));
-
- InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-
- // InstanceofStub returns a result in x0:
- // 0 => not an instance
- // smi 1 => instance.
- __ Cmp(x0, 0);
- __ LoadTrueFalseRoots(x0, x1);
- __ Csel(x0, x0, x1, eq);
-}
-
-
-void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal: public LDeferredCode {
- public:
- DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
- LInstanceOfKnownGlobal* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredInstanceOfKnownGlobal(instr_);
- }
- virtual LInstruction* instr() { return instr_; }
- private:
- LInstanceOfKnownGlobal* instr_;
- };
-
- DeferredInstanceOfKnownGlobal* deferred =
- new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
-
- Label map_check, return_false, cache_miss, done;
- Register object = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- // x4 is expected in the associated deferred code and stub.
- Register map_check_site = x4;
- Register map = x5;
-
- // This instruction is marked as call. We can clobber any register.
- ASSERT(instr->IsMarkedAsCall());
-
- // We must take into account that object is in x11.
- ASSERT(object.Is(x11));
- Register scratch = x10;
-
- // A Smi is not instance of anything.
- __ JumpIfSmi(object, &return_false);
-
- // This is the inlined call site instanceof cache. The two occurences of the
- // hole value will be patched to the last map/result pair generated by the
- // instanceof stub.
- __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
- {
- // Below we use Factory::the_hole_value() on purpose instead of loading from
- // the root array to force relocation and later be able to patch with a
- // custom value.
- InstructionAccurateScope scope(masm(), 5);
- __ bind(&map_check);
- // Will be patched with the cached map.
- Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
- __ LoadRelocated(scratch, Operand(Handle<Object>(cell)));
- __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ cmp(map, Operand(scratch));
- __ b(&cache_miss, ne);
- // The address of this instruction is computed relative to the map check
- // above, so check the size of the code generated.
- ASSERT(masm()->InstructionsGeneratedSince(&map_check) == 4);
- // Will be patched with the cached result.
- __ LoadRelocated(result, Operand(factory()->the_hole_value()));
- }
- __ B(&done);
-
- // The inlined call site cache did not match.
- // Check null and string before calling the deferred code.
- __ Bind(&cache_miss);
- // Compute the address of the map check. It must not be clobbered until the
- // InstanceOfStub has used it.
- __ Adr(map_check_site, &map_check);
- // Null is not instance of anything.
- __ JumpIfRoot(object, Heap::kNullValueRootIndex, &return_false);
-
- // String values are not instances of anything.
- // Return false if the object is a string. Otherwise, jump to the deferred
- // code.
- // Note that we can't jump directly to deferred code from
- // IsObjectJSStringType, because it uses tbz for the jump and the deferred
- // code can be out of range.
- __ IsObjectJSStringType(object, scratch, NULL, &return_false);
- __ B(deferred->entry());
-
- __ Bind(&return_false);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
-
- // Here result is either true or false.
- __ Bind(deferred->exit());
- __ Bind(&done);
-}
-
-
-void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- Register result = ToRegister(instr->result());
- ASSERT(result.Is(x0)); // InstanceofStub returns its result in x0.
- InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kArgsInRegisters);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kReturnTrueFalseObject);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kCallSiteInlineCheck);
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- LoadContextFromDeferred(instr->context());
-
- // Prepare InstanceofStub arguments.
- ASSERT(ToRegister(instr->value()).Is(InstanceofStub::left()));
- __ LoadObject(InstanceofStub::right(), instr->function());
-
- InstanceofStub stub(flags);
- CallCodeGeneric(stub.GetCode(isolate()),
- RelocInfo::CODE_TARGET,
- instr,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-
- // Put the result value into the result register slot.
- __ StoreToSafepointRegisterSlot(result, result);
-}
-
-
-void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
- DoGap(instr);
-}
-
-
-void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- Register value = ToRegister32(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- __ Scvtf(result, value);
-}
-
-
-void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
- // A64 smis can represent all Integer32 values, so this cannot deoptimize.
- ASSERT(!instr->hydrogen()->value()->HasRange() ||
- instr->hydrogen()->value()->range()->IsInSmiRange());
-
- Register value = ToRegister32(instr->value());
- Register result = ToRegister(instr->result());
- __ SmiTag(result, value.X());
-}
-
-
-void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- // The function is required to be in x1.
- ASSERT(ToRegister(instr->function()).is(x1));
- ASSERT(instr->HasPointerMap());
-
- Handle<JSFunction> known_function = instr->hydrogen()->known_function();
- if (known_function.is_null()) {
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(instr->arity());
- __ InvokeFunction(x1, count, CALL_FUNCTION, generator);
- } else {
- CallKnownFunction(known_function,
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(),
- instr,
- x1);
- }
-}
-
-
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
-
- // Get the frame pointer for the calling frame.
- __ Ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ Ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
- __ Cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ B(ne, &check_frame_marker);
- __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ Bind(&check_frame_marker);
- __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
-
- EmitCompareAndBranch(
- instr, eq, temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
-}
-
-
-void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Label* is_object = instr->TrueLabel(chunk_);
- Label* is_not_object = instr->FalseLabel(chunk_);
- Register value = ToRegister(instr->value());
- Register map = ToRegister(instr->temp1());
- Register scratch = ToRegister(instr->temp2());
-
- __ JumpIfSmi(value, is_not_object);
- __ JumpIfRoot(value, Heap::kNullValueRootIndex, is_object);
-
- __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
-
- // Check for undetectable objects.
- __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- __ TestAndBranchIfAnySet(scratch, 1 << Map::kIsUndetectable, is_not_object);
-
- // Check that instance type is in object type range.
- __ IsInstanceJSObjectType(map, scratch, NULL);
- // Flags have been updated by IsInstanceJSObjectType. We can now test the
- // flags for "le" condition to check if the object's type is a valid
- // JS object type.
- EmitBranch(instr, le);
-}
-
-
-Condition LCodeGen::EmitIsString(Register input,
- Register temp1,
- Label* is_not_string,
- SmiCheck check_needed = INLINE_SMI_CHECK) {
- if (check_needed == INLINE_SMI_CHECK) {
- __ JumpIfSmi(input, is_not_string);
- }
- __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
-
- return lt;
-}
-
-
-void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
- Register val = ToRegister(instr->value());
- Register scratch = ToRegister(instr->temp());
-
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- Condition true_cond =
- EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed);
-
- EmitBranch(instr, true_cond);
-}
-
-
-void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- Register value = ToRegister(instr->value());
- STATIC_ASSERT(kSmiTag == 0);
- EmitTestAndBranch(instr, eq, value, kSmiTagMask);
-}
-
-
-void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- if (!instr->hydrogen()->value()->IsHeapObject()) {
- __ JumpIfSmi(input, instr->FalseLabel(chunk_));
- }
- __ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
- __ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
-
- EmitTestAndBranch(instr, ne, temp, 1 << Map::kIsUndetectable);
-}
-
-
-static const char* LabelType(LLabel* label) {
- if (label->is_loop_header()) return " (loop header)";
- if (label->is_osr_entry()) return " (OSR entry)";
- return "";
-}
-
-
-void LCodeGen::DoLabel(LLabel* label) {
- Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
- current_instruction_,
- label->hydrogen_value()->id(),
- label->block_id(),
- LabelType(label));
-
- __ Bind(label->label());
- current_block_ = label->block_id();
- DoGap(label);
-}
-
-
-void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ Ldr(result, ContextMemOperand(context, instr->slot_index()));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
- instr->environment());
- } else {
- Label not_the_hole;
- __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, &not_the_hole);
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ Bind(&not_the_hole);
- }
- }
-}
-
-
-void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
- Register function = ToRegister(instr->function());
- Register result = ToRegister(instr->result());
- Register temp = ToRegister(instr->temp());
- Label deopt;
-
- // Check that the function really is a function. Leaves map in the result
- // register.
- __ JumpIfNotObjectType(function, result, temp, JS_FUNCTION_TYPE, &deopt);
-
- // Make sure that the function has an instance prototype.
- Label non_instance;
- __ Ldrb(temp, FieldMemOperand(result, Map::kBitFieldOffset));
- __ Tbnz(temp, Map::kHasNonInstancePrototype, &non_instance);
-
- // Get the prototype or initial map from the function.
- __ Ldr(result, FieldMemOperand(function,
- JSFunction::kPrototypeOrInitialMapOffset));
-
- // Check that the function has a prototype or an initial map.
- __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &deopt);
-
- // If the function does not have an initial map, we're done.
- Label done;
- __ CompareObjectType(result, temp, temp, MAP_TYPE);
- __ B(ne, &done);
-
- // Get the prototype from the initial map.
- __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
- __ B(&done);
-
- // Non-instance prototype: fetch prototype from constructor field in initial
- // map.
- __ Bind(&non_instance);
- __ Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
- __ B(&done);
-
- // Deoptimize case.
- __ Bind(&deopt);
- Deoptimize(instr->environment());
-
- // All done.
- __ Bind(&done);
-}
-
-
-void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
- Register result = ToRegister(instr->result());
- __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
- __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- DeoptimizeIfRoot(
- result, Heap::kTheHoleValueRootIndex, instr->environment());
- }
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->global_object()).Is(x0));
- ASSERT(ToRegister(instr->result()).Is(x0));
- __ Mov(x2, Operand(instr->name()));
- ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
- Register key,
- Register base,
- Register scratch,
- bool key_is_smi,
- bool key_is_constant,
- int constant_key,
- ElementsKind elements_kind,
- int additional_index) {
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
- ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
- : 0;
-
- if (key_is_constant) {
- int base_offset = ((constant_key + additional_index) << element_size_shift);
- return MemOperand(base, base_offset + additional_offset);
- }
-
- if (additional_index == 0) {
- if (key_is_smi) {
- // Key is smi: untag, and scale by element size.
- __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift));
- return MemOperand(scratch, additional_offset);
- } else {
- // Key is not smi, and element size is not byte: scale by element size.
- if (additional_offset == 0) {
- return MemOperand(base, key, SXTW, element_size_shift);
- } else {
- __ Add(scratch, base, Operand(key, SXTW, element_size_shift));
- return MemOperand(scratch, additional_offset);
- }
- }
- } else {
- // TODO(all): Try to combine these cases a bit more intelligently.
- if (additional_offset == 0) {
- if (key_is_smi) {
- __ SmiUntag(scratch, key);
- __ Add(scratch.W(), scratch.W(), additional_index);
- } else {
- __ Add(scratch.W(), key.W(), additional_index);
- }
- return MemOperand(base, scratch, LSL, element_size_shift);
- } else {
- if (key_is_smi) {
- __ Add(scratch, base,
- Operand::UntagSmiAndScale(key, element_size_shift));
- } else {
- __ Add(scratch, base, Operand(key, SXTW, element_size_shift));
- }
- return MemOperand(
- scratch,
- (additional_index << element_size_shift) + additional_offset);
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
- Register ext_ptr = ToRegister(instr->elements());
- Register scratch;
- ElementsKind elements_kind = instr->elements_kind();
-
- bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
- bool key_is_constant = instr->key()->IsConstantOperand();
- Register key = no_reg;
- int constant_key = 0;
- if (key_is_constant) {
- ASSERT(instr->temp() == NULL);
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xf0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- } else {
- scratch = ToRegister(instr->temp());
- key = ToRegister(instr->key());
- }
-
- MemOperand mem_op =
- PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
- key_is_constant, constant_key,
- elements_kind,
- instr->additional_index());
-
- if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
- (elements_kind == FLOAT32_ELEMENTS)) {
- DoubleRegister result = ToDoubleRegister(instr->result());
- __ Ldr(result.S(), mem_op);
- __ Fcvt(result, result.S());
- } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
- (elements_kind == FLOAT64_ELEMENTS)) {
- DoubleRegister result = ToDoubleRegister(instr->result());
- __ Ldr(result, mem_op);
- } else {
- Register result = ToRegister(instr->result());
-
- switch (elements_kind) {
- case EXTERNAL_INT8_ELEMENTS:
- case INT8_ELEMENTS:
- __ Ldrsb(result, mem_op);
- break;
- case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
- case EXTERNAL_UINT8_ELEMENTS:
- case UINT8_ELEMENTS:
- case UINT8_CLAMPED_ELEMENTS:
- __ Ldrb(result, mem_op);
- break;
- case EXTERNAL_INT16_ELEMENTS:
- case INT16_ELEMENTS:
- __ Ldrsh(result, mem_op);
- break;
- case EXTERNAL_UINT16_ELEMENTS:
- case UINT16_ELEMENTS:
- __ Ldrh(result, mem_op);
- break;
- case EXTERNAL_INT32_ELEMENTS:
- case INT32_ELEMENTS:
- __ Ldrsw(result, mem_op);
- break;
- case EXTERNAL_UINT32_ELEMENTS:
- case UINT32_ELEMENTS:
- __ Ldr(result.W(), mem_op);
- if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- // Deopt if value > 0x80000000.
- __ Tst(result, 0xFFFFFFFF80000000);
- DeoptimizeIf(ne, instr->environment());
- }
- break;
- case FLOAT32_ELEMENTS:
- case FLOAT64_ELEMENTS:
- case EXTERNAL_FLOAT32_ELEMENTS:
- case EXTERNAL_FLOAT64_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::CalcKeyedArrayBaseRegister(Register base,
- Register elements,
- Register key,
- bool key_is_tagged,
- ElementsKind elements_kind) {
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
-
- // Even though the HLoad/StoreKeyed instructions force the input
- // representation for the key to be an integer, the input gets replaced during
- // bounds check elimination with the index argument to the bounds check, which
- // can be tagged, so that case must be handled here, too.
- if (key_is_tagged) {
- __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
- } else {
- // Sign extend key because it could be a 32-bit negative value or contain
- // garbage in the top 32-bits. The address computation happens in 64-bit.
- ASSERT((element_size_shift >= 0) && (element_size_shift <= 4));
- __ Add(base, elements, Operand(key, SXTW, element_size_shift));
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
- Register elements = ToRegister(instr->elements());
- DoubleRegister result = ToDoubleRegister(instr->result());
- Register load_base;
- int offset = 0;
-
- if (instr->key()->IsConstantOperand()) {
- ASSERT(instr->hydrogen()->RequiresHoleCheck() ||
- (instr->temp() == NULL));
-
- int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xf0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
- instr->additional_index());
- load_base = elements;
- } else {
- load_base = ToRegister(instr->temp());
- Register key = ToRegister(instr->key());
- bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
- CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged,
- instr->hydrogen()->elements_kind());
- offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
- }
- __ Ldr(result, FieldMemOperand(load_base, offset));
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- Register scratch = ToRegister(instr->temp());
-
- // TODO(all): Is it faster to reload this value to an integer register, or
- // move from fp to integer?
- __ Fmov(scratch, result);
- __ Cmp(scratch, kHoleNanInt64);
- DeoptimizeIf(eq, instr->environment());
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
- Register elements = ToRegister(instr->elements());
- Register result = ToRegister(instr->result());
- Register load_base;
- int offset = 0;
-
- if (instr->key()->IsConstantOperand()) {
- ASSERT(instr->temp() == NULL);
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
- load_base = elements;
- } else {
- load_base = ToRegister(instr->temp());
- Register key = ToRegister(instr->key());
- bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
- CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged,
- instr->hydrogen()->elements_kind());
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
- }
- Representation representation = instr->hydrogen()->representation();
-
- if (representation.IsInteger32() &&
- instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS) {
- STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
- __ Load(result, UntagSmiFieldMemOperand(load_base, offset),
- Representation::Integer32());
- } else {
- __ Load(result, FieldMemOperand(load_base, offset),
- representation);
- }
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- DeoptimizeIfNotSmi(result, instr->environment());
- } else {
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
- instr->environment());
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->object()).Is(x1));
- ASSERT(ToRegister(instr->key()).Is(x0));
-
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
- ASSERT(ToRegister(instr->result()).Is(x0));
-}
-
-
-void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- HObjectAccess access = instr->hydrogen()->access();
- int offset = access.offset();
- Register object = ToRegister(instr->object());
-
- if (access.IsExternalMemory()) {
- Register result = ToRegister(instr->result());
- __ Load(result, MemOperand(object, offset), access.representation());
- return;
- }
-
- if (instr->hydrogen()->representation().IsDouble()) {
- FPRegister result = ToDoubleRegister(instr->result());
- __ Ldr(result, FieldMemOperand(object, offset));
- return;
- }
-
- Register result = ToRegister(instr->result());
- Register source;
- if (access.IsInobject()) {
- source = object;
- } else {
- // Load the properties array, using result as a scratch register.
- __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- source = result;
- }
-
- if (access.representation().IsSmi() &&
- instr->hydrogen()->representation().IsInteger32()) {
- // Read int value directly from upper half of the smi.
- STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
- __ Load(result, UntagSmiFieldMemOperand(source, offset),
- Representation::Integer32());
- } else {
- __ Load(result, FieldMemOperand(source, offset), access.representation());
- }
-}
-
-
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- // LoadIC expects x2 to hold the name, and x0 to hold the receiver.
- ASSERT(ToRegister(instr->object()).is(x0));
- __ Mov(x2, Operand(instr->name()));
-
- Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
- ASSERT(ToRegister(instr->result()).is(x0));
-}
-
-
-void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
- Register result = ToRegister(instr->result());
- __ LoadRoot(result, instr->index());
-}
-
-
-void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->value());
- __ EnumLengthSmi(result, map);
-}
-
-
-void LCodeGen::DoMathAbs(LMathAbs* instr) {
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsDouble()) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- __ Fabs(result, input);
- } else if (r.IsSmi() || r.IsInteger32()) {
- Register input = r.IsSmi() ? ToRegister(instr->value())
- : ToRegister32(instr->value());
- Register result = r.IsSmi() ? ToRegister(instr->result())
- : ToRegister32(instr->result());
- Label done;
- __ Abs(result, input, NULL, &done);
- Deoptimize(instr->environment());
- __ Bind(&done);
- }
-}
-
-
-void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
- Label* exit,
- Label* allocation_entry) {
- // Handle the tricky cases of MathAbsTagged:
- // - HeapNumber inputs.
- // - Negative inputs produce a positive result, so a new HeapNumber is
- // allocated to hold it.
- // - Positive inputs are returned as-is, since there is no need to allocate
- // a new HeapNumber for the result.
- // - The (smi) input -0x80000000, produces +0x80000000, which does not fit
- // a smi. In this case, the inline code sets the result and jumps directly
- // to the allocation_entry label.
- ASSERT(instr->context() != NULL);
- ASSERT(ToRegister(instr->context()).is(cp));
- Register input = ToRegister(instr->value());
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
- Register result_bits = ToRegister(instr->temp3());
- Register result = ToRegister(instr->result());
-
- Label runtime_allocation;
-
- // Deoptimize if the input is not a HeapNumber.
- __ Ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
- DeoptimizeIfNotRoot(temp1, Heap::kHeapNumberMapRootIndex,
- instr->environment());
-
- // If the argument is positive, we can return it as-is, without any need to
- // allocate a new HeapNumber for the result. We have to do this in integer
- // registers (rather than with fabs) because we need to be able to distinguish
- // the two zeroes.
- __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset));
- __ Mov(result, input);
- __ Tbz(result_bits, kXSignBit, exit);
-
- // Calculate abs(input) by clearing the sign bit.
- __ Bic(result_bits, result_bits, kXSignMask);
-
- // Allocate a new HeapNumber to hold the result.
- // result_bits The bit representation of the (double) result.
- __ Bind(allocation_entry);
- __ AllocateHeapNumber(result, &runtime_allocation, temp1, temp2);
- // The inline (non-deferred) code will store result_bits into result.
- __ B(exit);
-
- __ Bind(&runtime_allocation);
- if (FLAG_debug_code) {
- // Because result is in the pointer map, we need to make sure it has a valid
- // tagged value before we call the runtime. We speculatively set it to the
- // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already
- // be valid.
- Label result_ok;
- Register input = ToRegister(instr->value());
- __ JumpIfSmi(result, &result_ok);
- __ Cmp(input, result);
- // TODO(all): Shouldn't we assert here?
- DeoptimizeIf(ne, instr->environment());
- __ Bind(&result_ok);
- }
-
- { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
- instr->context());
- __ StoreToSafepointRegisterSlot(x0, result);
- }
- // The inline (non-deferred) code will store result_bits into result.
-}
-
-
-void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) {
- // Class for deferred case.
- class DeferredMathAbsTagged: public LDeferredCode {
- public:
- DeferredMathAbsTagged(LCodeGen* codegen, LMathAbsTagged* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredMathAbsTagged(instr_, exit(),
- allocation_entry());
- }
- virtual LInstruction* instr() { return instr_; }
- Label* allocation_entry() { return &allocation; }
- private:
- LMathAbsTagged* instr_;
- Label allocation;
- };
-
- // TODO(jbramley): The early-exit mechanism would skip the new frame handling
- // in GenerateDeferredCode. Tidy this up.
- ASSERT(!NeedsDeferredFrame());
-
- DeferredMathAbsTagged* deferred =
- new(zone()) DeferredMathAbsTagged(this, instr);
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged() ||
- instr->hydrogen()->value()->representation().IsSmi());
- Register input = ToRegister(instr->value());
- Register result_bits = ToRegister(instr->temp3());
- Register result = ToRegister(instr->result());
- Label done;
-
- // Handle smis inline.
- // We can treat smis as 64-bit integers, since the (low-order) tag bits will
- // never get set by the negation. This is therefore the same as the Integer32
- // case in DoMathAbs, except that it operates on 64-bit values.
- STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
-
- // TODO(jbramley): We can't use JumpIfNotSmi here because the tbz it uses
- // doesn't always have enough range. Consider making a variant of it, or a
- // TestIsSmi helper.
- STATIC_ASSERT(kSmiTag == 0);
- __ Tst(input, kSmiTagMask);
- __ B(ne, deferred->entry());
-
- __ Abs(result, input, NULL, &done);
-
- // The result is the magnitude (abs) of the smallest value a smi can
- // represent, encoded as a double.
- __ Mov(result_bits, double_to_rawbits(0x80000000));
- __ B(deferred->allocation_entry());
-
- __ Bind(deferred->exit());
- __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset));
-
- __ Bind(&done);
-}
-
-
-void LCodeGen::DoMathExp(LMathExp* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister double_temp1 = ToDoubleRegister(instr->double_temp1());
- DoubleRegister double_temp2 = double_scratch();
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
- Register temp3 = ToRegister(instr->temp3());
-
- MathExpGenerator::EmitMathExp(masm(), input, result,
- double_temp1, double_temp2,
- temp1, temp2, temp3);
-}
-
-
-void LCodeGen::DoMathFloor(LMathFloor* instr) {
- // TODO(jbramley): If we could provide a double result, we could use frintm
- // and produce a valid double result in a single instruction.
- DoubleRegister input = ToDoubleRegister(instr->value());
- Register result = ToRegister(instr->result());
- Label deopt;
- Label done;
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ JumpIfMinusZero(input, &deopt);
- }
-
- __ Fcvtms(result, input);
-
- // Check that the result fits into a 32-bit integer.
- // - The result did not overflow.
- __ Cmp(result, Operand(result, SXTW));
- // - The input was not NaN.
- __ Fccmp(input, input, NoFlag, eq);
- __ B(&done, eq);
-
- __ Bind(&deopt);
- Deoptimize(instr->environment());
-
- __ Bind(&done);
-}
-
-
-void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
- Register result = ToRegister32(instr->result());
- Register left = ToRegister32(instr->left());
- Register right = ToRegister32(instr->right());
- Register remainder = ToRegister32(instr->temp());
-
- // This can't cause an exception on ARM, so we can speculatively
- // execute it already now.
- __ Sdiv(result, left, right);
-
- // Check for x / 0.
- DeoptimizeIfZero(right, instr->environment());
-
- // Check for (kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- // The V flag will be set iff left == kMinInt.
- __ Cmp(left, 1);
- __ Ccmp(right, -1, NoFlag, vs);
- DeoptimizeIf(eq, instr->environment());
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ Cmp(right, 0);
- __ Ccmp(left, 0, ZFlag, mi);
- // "right" can't be null because the code would have already been
- // deoptimized. The Z flag is set only if (right < 0) and (left == 0).
- // In this case we need to deoptimize to produce a -0.
- DeoptimizeIf(eq, instr->environment());
- }
-
- Label done;
- // If both operands have the same sign then we are done.
- __ Eor(remainder, left, right);
- __ Tbz(remainder, kWSignBit, &done);
-
- // Check if the result needs to be corrected.
- __ Msub(remainder, result, right, left);
- __ Cbz(remainder, &done);
- __ Sub(result, result, 1);
-
- __ Bind(&done);
-}
-
-
-void LCodeGen::DoMathLog(LMathLog* instr) {
- ASSERT(instr->IsMarkedAsCall());
- ASSERT(ToDoubleRegister(instr->value()).is(d0));
- __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
- 0, 1);
- ASSERT(ToDoubleRegister(instr->result()).Is(d0));
-}
-
-
-void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- Label done;
-
- // Math.pow(x, 0.5) differs from fsqrt(x) in the following cases:
- // Math.pow(-Infinity, 0.5) == +Infinity
- // Math.pow(-0.0, 0.5) == +0.0
-
- // Catch -infinity inputs first.
- // TODO(jbramley): A constant infinity register would be helpful here.
- __ Fmov(double_scratch(), kFP64NegativeInfinity);
- __ Fcmp(double_scratch(), input);
- __ Fabs(result, input);
- __ B(&done, eq);
-
- // Add +0.0 to convert -0.0 to +0.0.
- // TODO(jbramley): A constant zero register would be helpful here.
- __ Fmov(double_scratch(), 0.0);
- __ Fadd(double_scratch(), input, double_scratch());
- __ Fsqrt(result, double_scratch());
-
- __ Bind(&done);
-}
-
-
-void LCodeGen::DoPower(LPower* instr) {
- Representation exponent_type = instr->hydrogen()->right()->representation();
- // Having marked this as a call, we can use any registers.
- // Just make sure that the input/output registers are the expected ones.
- ASSERT(!instr->right()->IsDoubleRegister() ||
- ToDoubleRegister(instr->right()).is(d1));
- ASSERT(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
- ToRegister(instr->right()).is(x11));
- ASSERT(!exponent_type.IsInteger32() || ToRegister(instr->right()).is(x12));
- ASSERT(ToDoubleRegister(instr->left()).is(d0));
- ASSERT(ToDoubleRegister(instr->result()).is(d0));
-
- if (exponent_type.IsSmi()) {
- MathPowStub stub(MathPowStub::TAGGED);
- __ CallStub(&stub);
- } else if (exponent_type.IsTagged()) {
- Label no_deopt;
- __ JumpIfSmi(x11, &no_deopt);
- __ Ldr(x0, FieldMemOperand(x11, HeapObject::kMapOffset));
- DeoptimizeIfNotRoot(x0, Heap::kHeapNumberMapRootIndex,
- instr->environment());
- __ Bind(&no_deopt);
- MathPowStub stub(MathPowStub::TAGGED);
- __ CallStub(&stub);
- } else if (exponent_type.IsInteger32()) {
- // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub
- // supports large integer exponents.
- Register exponent = ToRegister(instr->right());
- __ Sxtw(exponent, exponent);
- MathPowStub stub(MathPowStub::INTEGER);
- __ CallStub(&stub);
- } else {
- ASSERT(exponent_type.IsDouble());
- MathPowStub stub(MathPowStub::DOUBLE);
- __ CallStub(&stub);
- }
-}
-
-
-void LCodeGen::DoMathRound(LMathRound* instr) {
- // TODO(jbramley): We could provide a double result here using frint.
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister temp1 = ToDoubleRegister(instr->temp1());
- Register result = ToRegister(instr->result());
- Label try_rounding;
- Label deopt;
- Label done;
-
- // Math.round() rounds to the nearest integer, with ties going towards
- // +infinity. This does not match any IEEE-754 rounding mode.
- // - Infinities and NaNs are propagated unchanged, but cause deopts because
- // they can't be represented as integers.
- // - The sign of the result is the same as the sign of the input. This means
- // that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a
- // result of -0.0.
-
- DoubleRegister dot_five = double_scratch();
- __ Fmov(dot_five, 0.5);
- __ Fabs(temp1, input);
- __ Fcmp(temp1, dot_five);
- // If input is in [-0.5, -0], the result is -0.
- // If input is in [+0, +0.5[, the result is +0.
- // If the input is +0.5, the result is 1.
- __ B(hi, &try_rounding); // hi so NaN will also branch.
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ Fmov(result, input);
- __ Cmp(result, 0);
- DeoptimizeIf(mi, instr->environment()); // [-0.5, -0.0].
- }
- __ Fcmp(input, dot_five);
- __ Mov(result, 1); // +0.5.
- // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
- // flag kBailoutOnMinusZero, will return 0 (xzr).
- __ Csel(result, result, xzr, eq);
- __ B(&done);
-
- __ Bind(&deopt);
- Deoptimize(instr->environment());
-
- __ Bind(&try_rounding);
- // Since we're providing a 32-bit result, we can implement ties-to-infinity by
- // adding 0.5 to the input, then taking the floor of the result. This does not
- // work for very large positive doubles because adding 0.5 would cause an
- // intermediate rounding stage, so a different approach will be necessary if a
- // double result is needed.
- __ Fadd(temp1, input, dot_five);
- __ Fcvtms(result, temp1);
-
- // Deopt if
- // * the input was NaN
- // * the result is not representable using a 32-bit integer.
- __ Fcmp(input, 0.0);
- __ Ccmp(result, Operand(result.W(), SXTW), NoFlag, vc);
- __ B(ne, &deopt);
-
- __ Bind(&done);
-}
-
-
-void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- __ Fsqrt(result, input);
-}
-
-
-void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
- HMathMinMax::Operation op = instr->hydrogen()->operation();
- if (instr->hydrogen()->representation().IsInteger32()) {
- Register result = ToRegister32(instr->result());
- Register left = ToRegister32(instr->left());
- Operand right = ToOperand32I(instr->right());
-
- __ Cmp(left, right);
- __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
- } else if (instr->hydrogen()->representation().IsSmi()) {
- Register result = ToRegister(instr->result());
- Register left = ToRegister(instr->left());
- Operand right = ToOperand(instr->right());
-
- __ Cmp(left, right);
- __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
- } else {
- ASSERT(instr->hydrogen()->representation().IsDouble());
- DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister left = ToDoubleRegister(instr->left());
- DoubleRegister right = ToDoubleRegister(instr->right());
-
- if (op == HMathMinMax::kMathMax) {
- __ Fmax(result, left, right);
- } else {
- ASSERT(op == HMathMinMax::kMathMin);
- __ Fmin(result, left, right);
- }
- }
-}
-
-
-void LCodeGen::DoModI(LModI* instr) {
- HMod* hmod = instr->hydrogen();
- HValue* hleft = hmod->left();
- HValue* hright = hmod->right();
-
- Label done;
- Register result = ToRegister32(instr->result());
- Register dividend = ToRegister32(instr->left());
-
- bool need_minus_zero_check = (hmod->CheckFlag(HValue::kBailoutOnMinusZero) &&
- hleft->CanBeNegative() && hmod->CanBeZero());
-
- if (hmod->RightIsPowerOf2()) {
- // Note: The code below even works when right contains kMinInt.
- int32_t divisor = Abs(hright->GetInteger32Constant());
-
- if (hleft->CanBeNegative()) {
- __ Cmp(dividend, 0);
- __ Cneg(result, dividend, mi);
- __ And(result, result, divisor - 1);
- __ Cneg(result, result, mi);
- if (need_minus_zero_check) {
- __ Cbnz(result, &done);
- // The result is 0. Deoptimize if the dividend was negative.
- DeoptimizeIf(mi, instr->environment());
- }
- } else {
- __ And(result, dividend, divisor - 1);
- }
-
- } else {
- Label deopt;
- Register divisor = ToRegister32(instr->right());
- // Compute:
- // modulo = dividend - quotient * divisor
- __ Sdiv(result, dividend, divisor);
- if (hright->CanBeZero()) {
- // Combine the deoptimization sites.
- Label ok;
- __ Cbnz(divisor, &ok);
- __ Bind(&deopt);
- Deoptimize(instr->environment());
- __ Bind(&ok);
- }
- __ Msub(result, result, divisor, dividend);
- if (need_minus_zero_check) {
- __ Cbnz(result, &done);
- if (deopt.is_bound()) {
- __ Tbnz(dividend, kWSignBit, &deopt);
- } else {
- DeoptimizeIfNegative(dividend, instr->environment());
- }
- }
- }
- __ Bind(&done);
-}
-
-
-void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
- ASSERT(instr->hydrogen()->representation().IsSmiOrInteger32());
- bool is_smi = instr->hydrogen()->representation().IsSmi();
- Register result =
- is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
- Register left =
- is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ;
- int32_t right = ToInteger32(instr->right());
-
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- bool bailout_on_minus_zero =
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
-
- if (bailout_on_minus_zero) {
- if (right < 0) {
- // The result is -0 if right is negative and left is zero.
- DeoptimizeIfZero(left, instr->environment());
- } else if (right == 0) {
- // The result is -0 if the right is zero and the left is negative.
- DeoptimizeIfNegative(left, instr->environment());
- }
- }
-
- switch (right) {
- // Cases which can detect overflow.
- case -1:
- if (can_overflow) {
- // Only 0x80000000 can overflow here.
- __ Negs(result, left);
- DeoptimizeIf(vs, instr->environment());
- } else {
- __ Neg(result, left);
- }
- break;
- case 0:
- // This case can never overflow.
- __ Mov(result, 0);
- break;
- case 1:
- // This case can never overflow.
- __ Mov(result, left, kDiscardForSameWReg);
- break;
- case 2:
- if (can_overflow) {
- __ Adds(result, left, left);
- DeoptimizeIf(vs, instr->environment());
- } else {
- __ Add(result, left, left);
- }
- break;
-
- // All other cases cannot detect overflow, because it would probably be no
- // faster than using the smull method in LMulI.
- // TODO(jbramley): Investigate this, and add overflow support if it would
- // be useful.
- default:
- ASSERT(!can_overflow);
-
- // Multiplication by constant powers of two (and some related values)
- // can be done efficiently with shifted operands.
- if (right >= 0) {
- if (IsPowerOf2(right)) {
- // result = left << log2(right)
- __ Lsl(result, left, WhichPowerOf2(right));
- } else if (IsPowerOf2(right - 1)) {
- // result = left + left << log2(right - 1)
- __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1)));
- } else if (IsPowerOf2(right + 1)) {
- // result = -left + left << log2(right + 1)
- __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1)));
- __ Neg(result, result);
- } else {
- UNREACHABLE();
- }
- } else {
- if (IsPowerOf2(-right)) {
- // result = -left << log2(-right)
- __ Neg(result, Operand(left, LSL, WhichPowerOf2(-right)));
- } else if (IsPowerOf2(-right + 1)) {
- // result = left - left << log2(-right + 1)
- __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1)));
- } else if (IsPowerOf2(-right - 1)) {
- // result = -left - left << log2(-right - 1)
- __ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1)));
- __ Neg(result, result);
- } else {
- UNREACHABLE();
- }
- }
- break;
- }
-}
-
-
-void LCodeGen::DoMulI(LMulI* instr) {
- Register result = ToRegister32(instr->result());
- Register left = ToRegister32(instr->left());
- Register right = ToRegister32(instr->right());
-
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- bool bailout_on_minus_zero =
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
-
- if (bailout_on_minus_zero) {
- // If one operand is zero and the other is negative, the result is -0.
- // - Set Z (eq) if either left or right, or both, are 0.
- __ Cmp(left, 0);
- __ Ccmp(right, 0, ZFlag, ne);
- // - If so (eq), set N (mi) if left + right is negative.
- // - Otherwise, clear N.
- __ Ccmn(left, right, NoFlag, eq);
- DeoptimizeIf(mi, instr->environment());
- }
-
- if (can_overflow) {
- __ Smull(result.X(), left, right);
- __ Cmp(result.X(), Operand(result, SXTW));
- DeoptimizeIf(ne, instr->environment());
- } else {
- __ Mul(result, left, right);
- }
-}
-
-
-void LCodeGen::DoMulS(LMulS* instr) {
- Register result = ToRegister(instr->result());
- Register left = ToRegister(instr->left());
- Register right = ToRegister(instr->right());
-
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- bool bailout_on_minus_zero =
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
-
- if (bailout_on_minus_zero) {
- // If one operand is zero and the other is negative, the result is -0.
- // - Set Z (eq) if either left or right, or both, are 0.
- __ Cmp(left, 0);
- __ Ccmp(right, 0, ZFlag, ne);
- // - If so (eq), set N (mi) if left + right is negative.
- // - Otherwise, clear N.
- __ Ccmn(left, right, NoFlag, eq);
- DeoptimizeIf(mi, instr->environment());
- }
-
- STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
- if (can_overflow) {
- __ Smulh(result, left, right);
- __ Cmp(result, Operand(result.W(), SXTW));
- __ SmiTag(result);
- DeoptimizeIf(ne, instr->environment());
- } else {
- // TODO(jbramley): This could be rewritten to support UseRegisterAtStart.
- ASSERT(!AreAliased(result, right));
- __ SmiUntag(result, left);
- __ Mul(result, result, right);
- }
-}
-
-
-void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register result = ToRegister(instr->result());
- __ Mov(result, 0);
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- // NumberTagU and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(x0, result);
-}
-
-
-void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD: public LDeferredCode {
- public:
- DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LNumberTagD* instr_;
- };
-
- DoubleRegister input = ToDoubleRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
-
- DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
- if (FLAG_inline_new) {
- __ AllocateHeapNumber(result, deferred->entry(), temp1, temp2);
- } else {
- __ B(deferred->entry());
- }
-
- __ Bind(deferred->exit());
- __ Str(input, FieldMemOperand(result, HeapNumber::kValueOffset));
-}
-
-
-void LCodeGen::DoDeferredNumberTagU(LInstruction* instr,
- LOperand* value,
- LOperand* temp1,
- LOperand* temp2) {
- Label slow, convert_and_store;
- Register src = ToRegister32(value);
- Register dst = ToRegister(instr->result());
- Register scratch1 = ToRegister(temp1);
-
- if (FLAG_inline_new) {
- Register scratch2 = ToRegister(temp2);
- __ AllocateHeapNumber(dst, &slow, scratch1, scratch2);
- __ B(&convert_and_store);
- }
-
- // Slow case: call the runtime system to do the number allocation.
- __ Bind(&slow);
- // TODO(3095996): Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains an
- // integer value.
- __ Mov(dst, 0);
- {
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-
- // NumberTagU and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(x0, dst);
- }
-
- // Convert number to floating point and store in the newly allocated heap
- // number.
- __ Bind(&convert_and_store);
- DoubleRegister dbl_scratch = double_scratch();
- __ Ucvtf(dbl_scratch, src);
- __ Str(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
-}
-
-
-void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU: public LDeferredCode {
- public:
- DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredNumberTagU(instr_,
- instr_->value(),
- instr_->temp1(),
- instr_->temp2());
- }
- virtual LInstruction* instr() { return instr_; }
- private:
- LNumberTagU* instr_;
- };
-
- Register value = ToRegister32(instr->value());
- Register result = ToRegister(instr->result());
-
- DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
- __ Cmp(value, Smi::kMaxValue);
- __ B(hi, deferred->entry());
- __ SmiTag(result, value.X());
- __ Bind(deferred->exit());
-}
-
-
-void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- Register input = ToRegister(instr->value());
- Register scratch = ToRegister(instr->temp());
- DoubleRegister result = ToDoubleRegister(instr->result());
- bool can_convert_undefined_to_nan =
- instr->hydrogen()->can_convert_undefined_to_nan();
-
- Label done, load_smi;
-
- // Work out what untag mode we're working with.
- HValue* value = instr->hydrogen()->value();
- NumberUntagDMode mode = value->representation().IsSmi()
- ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
-
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
- __ JumpIfSmi(input, &load_smi);
-
- Label convert_undefined, deopt;
-
- // Heap number map check.
- Label* not_heap_number = can_convert_undefined_to_nan ? &convert_undefined
- : &deopt;
- __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ JumpIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex, not_heap_number);
-
- // Load heap number.
- __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
- if (instr->hydrogen()->deoptimize_on_minus_zero()) {
- __ JumpIfMinusZero(result, &deopt);
- }
- __ B(&done);
-
- if (can_convert_undefined_to_nan) {
- __ Bind(&convert_undefined);
- __ JumpIfNotRoot(input, Heap::kUndefinedValueRootIndex, &deopt);
-
- __ LoadRoot(scratch, Heap::kNanValueRootIndex);
- __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
- __ B(&done);
- }
-
- __ Bind(&deopt);
- Deoptimize(instr->environment());
- } else {
- ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
- // Fall through to load_smi.
- }
-
- // Smi to double register conversion.
- __ Bind(&load_smi);
- __ SmiUntagToDouble(result, input);
-
- __ Bind(&done);
-}
-
-
-void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
- // This is a pseudo-instruction that ensures that the environment here is
- // properly registered for deoptimization and records the assembler's PC
- // offset.
- LEnvironment* environment = instr->environment();
-
- // If the environment were already registered, we would have no way of
- // backpatching it with the spill slot operands.
- ASSERT(!environment->HasBeenRegistered());
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
-
- GenerateOsrPrologue();
-}
-
-
-void LCodeGen::DoParameter(LParameter* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->value();
- if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
- Abort(kDoPushArgumentNotImplementedForDoubleType);
- } else {
- __ Push(ToRegister(argument));
- }
-}
-
-
-void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace && info()->IsOptimizing()) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in x0. We're leaving the code
- // managed by the register allocator and tearing down the frame, it's
- // safe to write to the context register.
- __ Push(x0);
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
-
- if (info()->saves_caller_doubles()) {
- RestoreCallerDoubles();
- }
-
- int no_frame_start = -1;
- if (NeedsEagerFrame()) {
- Register stack_pointer = masm()->StackPointer();
- __ Mov(stack_pointer, fp);
- no_frame_start = masm_->pc_offset();
- __ Pop(fp, lr);
- }
-
- if (instr->has_constant_parameter_count()) {
- int parameter_count = ToInteger32(instr->constant_parameter_count());
- __ Drop(parameter_count + 1);
- } else {
- Register parameter_count = ToRegister(instr->parameter_count());
- __ DropBySMI(parameter_count);
- }
- __ Ret();
-
- if (no_frame_start != -1) {
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
- }
-}
-
-
-MemOperand LCodeGen::BuildSeqStringOperand(Register string,
- Register temp,
- LOperand* index,
- String::Encoding encoding) {
- if (index->IsConstantOperand()) {
- int offset = ToInteger32(LConstantOperand::cast(index));
- if (encoding == String::TWO_BYTE_ENCODING) {
- offset *= kUC16Size;
- }
- STATIC_ASSERT(kCharSize == 1);
- return FieldMemOperand(string, SeqString::kHeaderSize + offset);
- }
- ASSERT(!temp.is(string));
- ASSERT(!temp.is(ToRegister(index)));
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ Add(temp, string, Operand(ToRegister32(index), SXTW));
- } else {
- STATIC_ASSERT(kUC16Size == 2);
- __ Add(temp, string, Operand(ToRegister32(index), SXTW, 1));
- }
- return FieldMemOperand(temp, SeqString::kHeaderSize);
-}
-
-
-void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
- String::Encoding encoding = instr->hydrogen()->encoding();
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
- Register temp = ToRegister(instr->temp());
-
- if (FLAG_debug_code) {
- __ Ldr(temp, FieldMemOperand(string, HeapObject::kMapOffset));
- __ Ldrb(temp, FieldMemOperand(temp, Map::kInstanceTypeOffset));
-
- __ And(temp, temp,
- Operand(kStringRepresentationMask | kStringEncodingMask));
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ Cmp(temp, Operand(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
- __ Check(eq, kUnexpectedStringType);
- }
-
- MemOperand operand =
- BuildSeqStringOperand(string, temp, instr->index(), encoding);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ Ldrb(result, operand);
- } else {
- __ Ldrh(result, operand);
- }
-}
-
-
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- String::Encoding encoding = instr->hydrogen()->encoding();
- Register string = ToRegister(instr->string());
- Register value = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- if (FLAG_debug_code) {
- ASSERT(ToRegister(instr->context()).is(cp));
- Register index = ToRegister(instr->index());
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- int encoding_mask =
- instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type;
- __ EmitSeqStringSetCharCheck(string, index, kIndexIsInteger32, temp,
- encoding_mask);
- }
- MemOperand operand =
- BuildSeqStringOperand(string, temp, instr->index(), encoding);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ Strb(value, operand);
- } else {
- __ Strh(value, operand);
- }
-}
-
-
-void LCodeGen::DoSmiTag(LSmiTag* instr) {
- ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
-}
-
-
-void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Label done, untag;
-
- if (instr->needs_check()) {
- DeoptimizeIfNotSmi(input, instr->environment());
- }
-
- __ Bind(&untag);
- __ SmiUntag(result, input);
- __ Bind(&done);
-}
-
-
-void LCodeGen::DoShiftI(LShiftI* instr) {
- LOperand* right_op = instr->right();
- Register left = ToRegister32(instr->left());
- Register result = ToRegister32(instr->result());
-
- if (right_op->IsRegister()) {
- Register right = ToRegister32(instr->right());
- switch (instr->op()) {
- case Token::ROR: __ Ror(result, left, right); break;
- case Token::SAR: __ Asr(result, left, right); break;
- case Token::SHL: __ Lsl(result, left, right); break;
- case Token::SHR:
- if (instr->can_deopt()) {
- Label right_not_zero;
- __ Cbnz(right, &right_not_zero);
- DeoptimizeIfNegative(left, instr->environment());
- __ Bind(&right_not_zero);
- }
- __ Lsr(result, left, right);
- break;
- default: UNREACHABLE();
- }
- } else {
- ASSERT(right_op->IsConstantOperand());
- int shift_count = ToInteger32(LConstantOperand::cast(right_op)) & 0x1f;
- if (shift_count == 0) {
- if ((instr->op() == Token::SHR) && instr->can_deopt()) {
- DeoptimizeIfNegative(left, instr->environment());
- }
- __ Mov(result, left, kDiscardForSameWReg);
- } else {
- switch (instr->op()) {
- case Token::ROR: __ Ror(result, left, shift_count); break;
- case Token::SAR: __ Asr(result, left, shift_count); break;
- case Token::SHL: __ Lsl(result, left, shift_count); break;
- case Token::SHR: __ Lsr(result, left, shift_count); break;
- default: UNREACHABLE();
- }
- }
- }
-}
-
-
-void LCodeGen::DoShiftS(LShiftS* instr) {
- LOperand* right_op = instr->right();
- Register left = ToRegister(instr->left());
- Register result = ToRegister(instr->result());
-
- // Only ROR by register needs a temp.
- ASSERT(((instr->op() == Token::ROR) && right_op->IsRegister()) ||
- (instr->temp() == NULL));
-
- if (right_op->IsRegister()) {
- Register right = ToRegister(instr->right());
- switch (instr->op()) {
- case Token::ROR: {
- Register temp = ToRegister(instr->temp());
- __ Ubfx(temp, right, kSmiShift, 5);
- __ SmiUntag(result, left);
- __ Ror(result.W(), result.W(), temp.W());
- __ SmiTag(result);
- break;
- }
- case Token::SAR:
- __ Ubfx(result, right, kSmiShift, 5);
- __ Asr(result, left, result);
- __ Bic(result, result, kSmiShiftMask);
- break;
- case Token::SHL:
- __ Ubfx(result, right, kSmiShift, 5);
- __ Lsl(result, left, result);
- break;
- case Token::SHR:
- if (instr->can_deopt()) {
- Label right_not_zero;
- __ Cbnz(right, &right_not_zero);
- DeoptimizeIfNegative(left, instr->environment());
- __ Bind(&right_not_zero);
- }
- __ Ubfx(result, right, kSmiShift, 5);
- __ Lsr(result, left, result);
- __ Bic(result, result, kSmiShiftMask);
- break;
- default: UNREACHABLE();
- }
- } else {
- ASSERT(right_op->IsConstantOperand());
- int shift_count = ToInteger32(LConstantOperand::cast(right_op)) & 0x1f;
- if (shift_count == 0) {
- if ((instr->op() == Token::SHR) && instr->can_deopt()) {
- DeoptimizeIfNegative(left, instr->environment());
- }
- __ Mov(result, left);
- } else {
- switch (instr->op()) {
- case Token::ROR:
- __ SmiUntag(result, left);
- __ Ror(result.W(), result.W(), shift_count);
- __ SmiTag(result);
- break;
- case Token::SAR:
- __ Asr(result, left, shift_count);
- __ Bic(result, result, kSmiShiftMask);
- break;
- case Token::SHL:
- __ Lsl(result, left, shift_count);
- break;
- case Token::SHR:
- __ Lsr(result, left, shift_count);
- __ Bic(result, result, kSmiShiftMask);
- break;
- default: UNREACHABLE();
- }
- }
- }
-}
-
-
-void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
- __ Debug("LDebugBreak", 0, BREAK);
-}
-
-
-void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- Register scratch1 = x5;
- Register scratch2 = x6;
- ASSERT(instr->IsMarkedAsCall());
-
- ASM_UNIMPLEMENTED_BREAK("DoDeclareGlobals");
- // TODO(all): if Mov could handle object in new space then it could be used
- // here.
- __ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
- __ Mov(scratch2, Operand(Smi::FromInt(instr->hydrogen()->flags())));
- __ Push(cp, scratch1, scratch2); // The context is the first argument.
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
-}
-
-
-void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- LoadContextFromDeferred(instr->context());
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RecordSafepointWithLazyDeopt(
- instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- ASSERT(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck: public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LStackCheck* instr_;
- };
-
- ASSERT(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- // There is no LLazyBailout instruction for stack-checks. We have to
- // prepare for lazy deoptimization explicitly here.
- if (instr->hydrogen()->is_function_entry()) {
- // Perform stack overflow check.
- Label done;
- __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
- __ B(hs, &done);
-
- PredictableCodeSizeScope predictable(masm_,
- Assembler::kCallSizeWithRelocation);
- ASSERT(instr->context()->IsRegister());
- ASSERT(ToRegister(instr->context()).is(cp));
- CallCode(isolate()->builtins()->StackCheck(),
- RelocInfo::CODE_TARGET,
- instr);
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
-
- __ Bind(&done);
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
- } else {
- ASSERT(instr->hydrogen()->is_backwards_branch());
- // Perform stack overflow check if this goto needs it before jumping.
- DeferredStackCheck* deferred_stack_check =
- new(zone()) DeferredStackCheck(this, instr);
- __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
- __ B(lo, deferred_stack_check->entry());
-
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- __ Bind(instr->done_label());
- deferred_stack_check->SetExit(instr->done_label());
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- // Don't record a deoptimization index for the safepoint here.
- // This will be done explicitly when emitting call and the safepoint in
- // the deferred code.
- }
-}
-
-
-void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
- Register function = ToRegister(instr->function());
- Register code_object = ToRegister(instr->code_object());
- Register temp = ToRegister(instr->temp());
- __ Add(temp, code_object, Code::kHeaderSize - kHeapObjectTag);
- __ Str(temp, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
-}
-
-
-void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register value = ToRegister(instr->value());
- Register scratch = ToRegister(instr->temp());
- MemOperand target = ContextMemOperand(context, instr->slot_index());
-
- Label skip_assignment;
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ Ldr(scratch, target);
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex,
- instr->environment());
- } else {
- __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
- }
- }
-
- __ Str(value, target);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- __ RecordWriteContextSlot(context,
- target.offset(),
- value,
- scratch,
- GetLinkRegisterState(),
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
- __ Bind(&skip_assignment);
-}
-
-
-void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
- Register value = ToRegister(instr->value());
- Register cell = ToRegister(instr->temp1());
-
- // Load the cell.
- __ Mov(cell, Operand(instr->hydrogen()->cell().handle()));
-
- // If the cell we are storing to contains the hole it could have
- // been deleted from the property dictionary. In that case, we need
- // to update the property details in the property dictionary to mark
- // it as no longer deleted. We deoptimize in that case.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- Register payload = ToRegister(instr->temp2());
- __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
- DeoptimizeIfRoot(
- payload, Heap::kTheHoleValueRootIndex, instr->environment());
- }
-
- // Store the value.
- __ Str(value, FieldMemOperand(cell, Cell::kValueOffset));
- // Cells are always rescanned, so no write barrier here.
-}
-
-
-void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
- Register ext_ptr = ToRegister(instr->elements());
- Register key = no_reg;
- Register scratch;
- ElementsKind elements_kind = instr->elements_kind();
-
- bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- if (key_is_constant) {
- ASSERT(instr->temp() == NULL);
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xf0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- } else {
- key = ToRegister(instr->key());
- scratch = ToRegister(instr->temp());
- }
-
- MemOperand dst =
- PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
- key_is_constant, constant_key,
- elements_kind,
- instr->additional_index());
-
- if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
- (elements_kind == FLOAT32_ELEMENTS)) {
- DoubleRegister value = ToDoubleRegister(instr->value());
- DoubleRegister dbl_scratch = double_scratch();
- __ Fcvt(dbl_scratch.S(), value);
- __ Str(dbl_scratch.S(), dst);
- } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
- (elements_kind == FLOAT64_ELEMENTS)) {
- DoubleRegister value = ToDoubleRegister(instr->value());
- __ Str(value, dst);
- } else {
- Register value = ToRegister(instr->value());
-
- switch (elements_kind) {
- case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
- case EXTERNAL_INT8_ELEMENTS:
- case EXTERNAL_UINT8_ELEMENTS:
- case UINT8_ELEMENTS:
- case UINT8_CLAMPED_ELEMENTS:
- case INT8_ELEMENTS:
- __ Strb(value, dst);
- break;
- case EXTERNAL_INT16_ELEMENTS:
- case EXTERNAL_UINT16_ELEMENTS:
- case INT16_ELEMENTS:
- case UINT16_ELEMENTS:
- __ Strh(value, dst);
- break;
- case EXTERNAL_INT32_ELEMENTS:
- case EXTERNAL_UINT32_ELEMENTS:
- case INT32_ELEMENTS:
- case UINT32_ELEMENTS:
- __ Str(value.W(), dst);
- break;
- case FLOAT32_ELEMENTS:
- case FLOAT64_ELEMENTS:
- case EXTERNAL_FLOAT32_ELEMENTS:
- case EXTERNAL_FLOAT64_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) {
- Register elements = ToRegister(instr->elements());
- DoubleRegister value = ToDoubleRegister(instr->value());
- Register store_base = ToRegister(instr->temp());
- int offset = 0;
-
- if (instr->key()->IsConstantOperand()) {
- int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xf0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
- instr->additional_index());
- store_base = elements;
- } else {
- Register key = ToRegister(instr->key());
- bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
- CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged,
- instr->hydrogen()->elements_kind());
- offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
- }
-
- if (instr->NeedsCanonicalization()) {
- DoubleRegister dbl_scratch = double_scratch();
- __ Fmov(dbl_scratch,
- FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- __ Fmaxnm(dbl_scratch, dbl_scratch, value);
- __ Str(dbl_scratch, FieldMemOperand(store_base, offset));
- } else {
- __ Str(value, FieldMemOperand(store_base, offset));
- }
-}
-
-
-void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register store_base = ToRegister(instr->temp());
- Register key = no_reg;
- int offset = 0;
-
- if (instr->key()->IsConstantOperand()) {
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
- store_base = elements;
- } else {
- key = ToRegister(instr->key());
- bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
- CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged,
- instr->hydrogen()->elements_kind());
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
- }
- Representation representation = instr->hydrogen()->value()->representation();
- if (representation.IsInteger32()) {
- ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
- ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
- STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
- __ Store(value, UntagSmiFieldMemOperand(store_base, offset),
- Representation::Integer32());
- } else {
- __ Store(value, FieldMemOperand(store_base, offset), representation);
- }
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- // Compute address of modified element and store it into key register.
- __ Add(key, store_base, offset - kHeapObjectTag);
- __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs,
- EMIT_REMEMBERED_SET, check_needed);
- }
-}
-
-
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->object()).Is(x2));
- ASSERT(ToRegister(instr->key()).Is(x1));
- ASSERT(ToRegister(instr->value()).Is(x0));
-
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-// TODO(jbramley): Once the merge is done and we're tracking bleeding_edge, try
-// to tidy up this function.
-void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
- Representation representation = instr->representation();
-
- Register object = ToRegister(instr->object());
- Register temp0 = ToRegister(instr->temp0());
- Register temp1 = ToRegister(instr->temp1());
- HObjectAccess access = instr->hydrogen()->access();
- int offset = access.offset();
-
- if (access.IsExternalMemory()) {
- Register value = ToRegister(instr->value());
- __ Store(value, MemOperand(object, offset), representation);
- return;
- }
-
- Handle<Map> transition = instr->transition();
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
-
- if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
- Register value = ToRegister(instr->value());
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- DeoptimizeIfSmi(value, instr->environment());
-
- // We know that value is a smi now, so we can omit the check below.
- check_needed = OMIT_SMI_CHECK;
- }
- } else if (representation.IsDouble()) {
- ASSERT(transition.is_null());
- ASSERT(access.IsInobject());
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
- FPRegister value = ToDoubleRegister(instr->value());
- __ Str(value, FieldMemOperand(object, offset));
- return;
- }
-
- if (!transition.is_null()) {
- // Store the new map value.
- Register new_map_value = temp0;
- __ Mov(new_map_value, Operand(transition));
- __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset));
- if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
- // Update the write barrier for the map field.
- __ RecordWriteField(object,
- HeapObject::kMapOffset,
- new_map_value,
- temp1,
- GetLinkRegisterState(),
- kSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- }
- }
-
- // Do the store.
- Register value = ToRegister(instr->value());
- Register destination;
- if (access.IsInobject()) {
- destination = object;
- } else {
- __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset));
- destination = temp0;
- }
-
- if (representation.IsSmi() &&
- instr->hydrogen()->value()->representation().IsInteger32()) {
- ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
-#ifdef DEBUG
- __ Ldr(temp1, FieldMemOperand(destination, offset));
- __ AssertSmi(temp1);
-#endif
- STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
- __ Store(value, UntagSmiFieldMemOperand(destination, offset),
- Representation::Integer32());
- } else {
- __ Store(value, FieldMemOperand(destination, offset), representation);
- }
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- __ RecordWriteField(destination,
- offset,
- value, // Clobbered.
- temp1, // Clobbered.
- GetLinkRegisterState(),
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
-}
-
-
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->value()).is(x0));
- ASSERT(ToRegister(instr->object()).is(x1));
-
- // Name must be in x2.
- __ Mov(x2, Operand(instr->name()));
- Handle<Code> ic = StoreIC::initialize_stub(isolate(),
- instr->strict_mode_flag());
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoStringAdd(LStringAdd* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- ASSERT(ToRegister(instr->left()).Is(x1));
- ASSERT(ToRegister(instr->right()).Is(x0));
- StringAddStub stub(instr->hydrogen()->flags(),
- instr->hydrogen()->pretenure_flag());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt: public LDeferredCode {
- public:
- DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LStringCharCodeAt* instr_;
- };
-
- DeferredStringCharCodeAt* deferred =
- new(zone()) DeferredStringCharCodeAt(this, instr);
-
- StringCharLoadGenerator::Generate(masm(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->result()),
- deferred->entry());
- __ Bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Mov(result, 0);
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ Push(string);
- // Push the index as a smi. This is safe because of the checks in
- // DoStringCharCodeAt above.
- Register index = ToRegister(instr->index());
- __ SmiTag(index);
- __ Push(index);
-
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr,
- instr->context());
- __ AssertSmi(x0);
- __ SmiUntag(x0);
- __ StoreToSafepointRegisterSlot(x0, result);
-}
-
-
-void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode: public LDeferredCode {
- public:
- DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LStringCharFromCode* instr_;
- };
-
- DeferredStringCharFromCode* deferred =
- new(zone()) DeferredStringCharFromCode(this, instr);
-
- ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
-
- __ Cmp(char_code, Operand(String::kMaxOneByteCharCode));
- __ B(hi, deferred->entry());
- __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
- __ Add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
- __ Ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
- __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
- __ B(eq, deferred->entry());
- __ Bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Mov(result, 0);
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ SmiTag(char_code);
- __ Push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
- __ StoreToSafepointRegisterSlot(x0, result);
-}
-
-
-void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- Token::Value op = instr->op();
-
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- InlineSmiCheckInfo::EmitNotInlined(masm());
-
- Condition condition = TokenToCondition(op, false);
-
- EmitCompareAndBranch(instr, condition, x0, 0);
-}
-
-
-void LCodeGen::DoSubI(LSubI* instr) {
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- Register result = ToRegister32(instr->result());
- Register left = ToRegister32(instr->left());
- Operand right = ToOperand32I(instr->right());
- if (can_overflow) {
- __ Subs(result, left, right);
- DeoptimizeIf(vs, instr->environment());
- } else {
- __ Sub(result, left, right);
- }
-}
-
-
-void LCodeGen::DoSubS(LSubS* instr) {
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- Register result = ToRegister(instr->result());
- Register left = ToRegister(instr->left());
- Operand right = ToOperand(instr->right());
- if (can_overflow) {
- __ Subs(result, left, right);
- DeoptimizeIf(vs, instr->environment());
- } else {
- __ Sub(result, left, right);
- }
-}
-
-
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
- LOperand* value,
- LOperand* temp1,
- LOperand* temp2) {
- Register input = ToRegister(value);
- Register scratch1 = ToRegister(temp1);
- DoubleRegister dbl_scratch1 = double_scratch();
-
- Label done;
-
- // Load heap object map.
- __ Ldr(scratch1, FieldMemOperand(input, HeapObject::kMapOffset));
-
- if (instr->truncating()) {
- Register output = ToRegister(instr->result());
- Register scratch2 = ToRegister(temp2);
- Label check_bools;
-
- // If it's not a heap number, jump to undefined check.
- __ JumpIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, &check_bools);
-
- // A heap number: load value and convert to int32 using truncating function.
- __ TruncateHeapNumberToI(output, input);
- __ B(&done);
-
- __ Bind(&check_bools);
-
- Register true_root = output;
- Register false_root = scratch2;
- __ LoadTrueFalseRoots(true_root, false_root);
- __ Cmp(scratch1, true_root);
- __ Cset(output, eq);
- __ Ccmp(scratch1, false_root, ZFlag, ne);
- __ B(eq, &done);
-
- // Output contains zero, undefined is converted to zero for truncating
- // conversions.
- DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
- instr->environment());
- } else {
- Register output = ToRegister32(instr->result());
-
- DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
- Label converted;
-
- // Deoptimized if it's not a heap number.
- DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex,
- instr->environment());
-
- // A heap number: load value and convert to int32 using non-truncating
- // function. If the result is out of range, branch to deoptimize.
- __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
- __ TryConvertDoubleToInt32(output, dbl_scratch1, dbl_scratch2, &converted);
- Deoptimize(instr->environment());
-
- __ Bind(&converted);
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ Cmp(output, 0);
- __ B(ne, &done);
- __ Fmov(scratch1, dbl_scratch1);
- DeoptimizeIfNegative(scratch1, instr->environment());
- }
- }
- __ Bind(&done);
-}
-
-
-void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI: public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredTaggedToI(instr_, instr_->value(), instr_->temp1(),
- instr_->temp2());
- }
-
- virtual LInstruction* instr() { return instr_; }
- private:
- LTaggedToI* instr_;
- };
-
- Register input = ToRegister(instr->value());
- Register output = ToRegister(instr->result());
-
- if (instr->hydrogen()->value()->representation().IsSmi()) {
- __ SmiUntag(input);
- } else {
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
-
- // TODO(jbramley): We can't use JumpIfNotSmi here because the tbz it uses
- // doesn't always have enough range. Consider making a variant of it, or a
- // TestIsSmi helper.
- STATIC_ASSERT(kSmiTag == 0);
- __ Tst(input, kSmiTagMask);
- __ B(ne, deferred->entry());
-
- __ SmiUntag(output, input);
- __ Bind(deferred->exit());
- }
-}
-
-
-void LCodeGen::DoThisFunction(LThisFunction* instr) {
- Register result = ToRegister(instr->result());
- __ Ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-}
-
-
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- ASSERT(ToRegister(instr->value()).Is(x0));
- ASSERT(ToRegister(instr->result()).Is(x0));
- __ Push(x0);
- CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- ASSERT(ToRegister(instr->context()).is(cp));
- Label materialized;
- // Registers will be used as follows:
- // x7 = literals array.
- // x1 = regexp literal.
- // x0 = regexp literal clone.
- // x10-x12 are used as temporaries.
- int literal_offset =
- FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
- __ LoadObject(x7, instr->hydrogen()->literals());
- __ Ldr(x1, FieldMemOperand(x7, literal_offset));
- __ JumpIfNotRoot(x1, Heap::kUndefinedValueRootIndex, &materialized);
-
- // Create regexp literal using runtime function
- // Result will be in x0.
- __ Mov(x12, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ Mov(x11, Operand(instr->hydrogen()->pattern()));
- __ Mov(x10, Operand(instr->hydrogen()->flags()));
- __ Push(x7, x12, x11, x10);
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ Mov(x1, x0);
-
- __ Bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
-
- __ Allocate(size, x0, x10, x11, &runtime_allocate, TAG_OBJECT);
- __ B(&allocated);
-
- __ Bind(&runtime_allocate);
- __ Mov(x0, Operand(Smi::FromInt(size)));
- __ Push(x1, x0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ Pop(x1);
-
- __ Bind(&allocated);
- // Copy the content into the newly allocated memory.
- __ CopyFields(x0, x1, CPURegList(x10, x11, x12), size / kPointerSize);
-}
-
-
-void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
- Register object = ToRegister(instr->object());
- Register temp1 = ToRegister(instr->temp1());
-
- Handle<Map> from_map = instr->original_map();
- Handle<Map> to_map = instr->transitioned_map();
- ElementsKind from_kind = instr->from_kind();
- ElementsKind to_kind = instr->to_kind();
-
- Label not_applicable;
- __ CheckMap(object, temp1, from_map, &not_applicable, DONT_DO_SMI_CHECK);
-
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
- Register new_map = ToRegister(instr->temp2());
- __ Mov(new_map, Operand(to_map));
- __ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset));
- // Write barrier.
- __ RecordWriteField(object, HeapObject::kMapOffset, new_map, temp1,
- GetLinkRegisterState(), kDontSaveFPRegs);
- } else {
- ASSERT(ToRegister(instr->context()).is(cp));
- PushSafepointRegistersScope scope(
- this, Safepoint::kWithRegistersAndDoubles);
- __ Mov(x0, object);
- __ Mov(x1, Operand(to_map));
- TransitionElementsKindStub stub(from_kind, to_kind);
- __ CallStub(&stub);
- RecordSafepointWithRegistersAndDoubles(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- }
- __ Bind(&not_applicable);
-}
-
-
-void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
- Register object = ToRegister(instr->object());
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
-
- Label no_memento_found;
- __ JumpIfJSArrayHasAllocationMemento(object, temp1, temp2, &no_memento_found);
- Deoptimize(instr->environment());
- __ Bind(&no_memento_found);
-}
-
-
-void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- Register result = ToRegister(instr->result());
- __ TruncateDoubleToI(result, input);
- if (instr->tag_result()) {
- __ SmiTag(result, result);
- }
-}
-
-
-void LCodeGen::DoTypeof(LTypeof* instr) {
- Register input = ToRegister(instr->value());
- __ Push(input);
- CallRuntime(Runtime::kTypeof, 1, instr);
-}
-
-
-void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Handle<String> type_name = instr->type_literal();
- Label* true_label = instr->TrueLabel(chunk_);
- Label* false_label = instr->FalseLabel(chunk_);
- Register value = ToRegister(instr->value());
-
- if (type_name->Equals(heap()->number_string())) {
- ASSERT(instr->temp1() != NULL);
- Register map = ToRegister(instr->temp1());
-
- __ JumpIfSmi(value, true_label);
- __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
- __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- EmitBranch(instr, eq);
-
- } else if (type_name->Equals(heap()->string_string())) {
- ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
- Register map = ToRegister(instr->temp1());
- Register scratch = ToRegister(instr->temp2());
-
- __ JumpIfSmi(value, false_label);
- __ JumpIfObjectType(
- value, map, scratch, FIRST_NONSTRING_TYPE, false_label, ge);
- __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
-
- } else if (type_name->Equals(heap()->symbol_string())) {
- ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
- Register map = ToRegister(instr->temp1());
- Register scratch = ToRegister(instr->temp2());
-
- __ JumpIfSmi(value, false_label);
- __ CompareObjectType(value, map, scratch, SYMBOL_TYPE);
- EmitBranch(instr, eq);
-
- } else if (type_name->Equals(heap()->boolean_string())) {
- __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label);
- __ CompareRoot(value, Heap::kFalseValueRootIndex);
- EmitBranch(instr, eq);
-
- } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
- __ CompareRoot(value, Heap::kNullValueRootIndex);
- EmitBranch(instr, eq);
-
- } else if (type_name->Equals(heap()->undefined_string())) {
- ASSERT(instr->temp1() != NULL);
- Register scratch = ToRegister(instr->temp1());
-
- __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label);
- __ JumpIfSmi(value, false_label);
- // Check for undetectable objects and jump to the true branch in this case.
- __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable);
-
- } else if (type_name->Equals(heap()->function_string())) {
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- ASSERT(instr->temp1() != NULL);
- Register type = ToRegister(instr->temp1());
-
- __ JumpIfSmi(value, false_label);
- __ JumpIfObjectType(value, type, type, JS_FUNCTION_TYPE, true_label);
- // HeapObject's type has been loaded into type register by JumpIfObjectType.
- EmitCompareAndBranch(instr, eq, type, JS_FUNCTION_PROXY_TYPE);
-
- } else if (type_name->Equals(heap()->object_string())) {
- ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
- Register map = ToRegister(instr->temp1());
- Register scratch = ToRegister(instr->temp2());
-
- __ JumpIfSmi(value, false_label);
- if (!FLAG_harmony_typeof) {
- __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
- }
- __ JumpIfObjectType(value, map, scratch,
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label, lt);
- __ CompareInstanceType(map, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ B(gt, false_label);
- // Check for undetectable objects => false.
- __ Ldrb(scratch, FieldMemOperand(value, Map::kBitFieldOffset));
- EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
-
- } else {
- __ B(false_label);
- }
-}
-
-
-void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
- __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value()));
-}
-
-
-void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
- Register value = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
-
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange() ||
- instr->hydrogen()->value()->range()->upper() == kMaxInt) {
- // The Range class can't express upper bounds in the (kMaxInt, kMaxUint32]
- // interval, so we treat kMaxInt as a sentinel for this entire interval.
- DeoptimizeIfNegative(value.W(), instr->environment());
- }
- __ SmiTag(result, value);
-}
-
-
-void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
- Register object = ToRegister(instr->value());
- Register map = ToRegister(instr->map());
- Register temp = ToRegister(instr->temp());
- __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
- __ Cmp(map, temp);
- DeoptimizeIf(ne, instr->environment());
-}
-
-
-void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register result = ToRegister(instr->result());
-
- // If the receiver is null or undefined, we have to pass the global object as
- // a receiver to normal functions. Values have to be passed unchanged to
- // builtins and strict-mode functions.
- Label global_object, done, deopt;
-
- if (!instr->hydrogen()->known_function()) {
- __ Ldr(result, FieldMemOperand(function,
- JSFunction::kSharedFunctionInfoOffset));
-
- // CompilerHints is an int32 field. See objects.h.
- __ Ldr(result.W(),
- FieldMemOperand(result, SharedFunctionInfo::kCompilerHintsOffset));
-
- // Do not transform the receiver to object for strict mode functions.
- __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, &done);
-
- // Do not transform the receiver to object for builtins.
- __ Tbnz(result, SharedFunctionInfo::kNative, &done);
- }
-
- // Normal function. Replace undefined or null with global receiver.
- __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object);
- __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
-
- // Deoptimize if the receiver is not a JS object.
- __ JumpIfSmi(receiver, &deopt);
- __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
- __ Mov(result, receiver);
- __ B(ge, &done);
- // Otherwise, fall through to deopt.
-
- __ Bind(&deopt);
- Deoptimize(instr->environment());
-
- __ Bind(&global_object);
- __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
- __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX));
- __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
-
- __ Bind(&done);
-}
-
-
-void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
- Register object = ToRegister(instr->object());
- Register index = ToRegister(instr->index());
- Register result = ToRegister(instr->result());
-
- __ AssertSmi(index);
-
- Label out_of_object, done;
- __ Cmp(index, Operand(Smi::FromInt(0)));
- __ B(lt, &out_of_object);
-
- STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
- __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
- __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize));
-
- __ B(&done);
-
- __ Bind(&out_of_object);
- __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- // Index is equal to negated out of object property index plus 1.
- __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
- __ Ldr(result, FieldMemOperand(result,
- FixedArray::kHeaderSize - kPointerSize));
- __ Bind(&done);
-}
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/a64/lithium-codegen-a64.h b/deps/v8/src/a64/lithium-codegen-a64.h
deleted file mode 100644
index 006165157f..0000000000
--- a/deps/v8/src/a64/lithium-codegen-a64.h
+++ /dev/null
@@ -1,473 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_A64_LITHIUM_CODEGEN_A64_H_
-#define V8_A64_LITHIUM_CODEGEN_A64_H_
-
-#include "a64/lithium-a64.h"
-
-#include "a64/lithium-gap-resolver-a64.h"
-#include "deoptimizer.h"
-#include "lithium-codegen.h"
-#include "safepoint-table.h"
-#include "scopes.h"
-#include "v8utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LDeferredCode;
-class SafepointGenerator;
-class BranchGenerator;
-
-class LCodeGen: public LCodeGenBase {
- public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : LCodeGenBase(chunk, assembler, info),
- deoptimizations_(4, info->zone()),
- deopt_jump_table_(4, info->zone()),
- deoptimization_literals_(8, info->zone()),
- inlined_function_count_(0),
- scope_(info->scope()),
- translations_(info->zone()),
- deferred_(8, info->zone()),
- osr_pc_offset_(-1),
- frame_is_built_(false),
- safepoints_(info->zone()),
- resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple) {
- PopulateDeoptimizationLiteralsWithInlinedFunctions();
- }
-
- // Simple accessors.
- Scope* scope() const { return scope_; }
-
- int LookupDestination(int block_id) const {
- return chunk()->LookupDestination(block_id);
- }
-
- bool IsNextEmittedBlock(int block_id) const {
- return LookupDestination(block_id) == GetNextEmittedBlock();
- }
-
- bool NeedsEagerFrame() const {
- return GetStackSlotCount() > 0 ||
- info()->is_non_deferred_calling() ||
- !info()->IsStub() ||
- info()->requires_frame();
- }
- bool NeedsDeferredFrame() const {
- return !NeedsEagerFrame() && info()->is_deferred_calling();
- }
-
- LinkRegisterStatus GetLinkRegisterState() const {
- return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
- }
-
- // Try to generate code for the entire chunk, but it may fail if the
- // chunk contains constructs we cannot handle. Returns true if the
- // code generation attempt succeeded.
- bool GenerateCode();
-
- // Finish the code by setting stack height, safepoint, and bailout
- // information on it.
- void FinishCode(Handle<Code> code);
-
- // Support for converting LOperands to assembler types.
- // LOperand must be a register.
- Register ToRegister(LOperand* op) const;
- Register ToRegister32(LOperand* op) const;
- Operand ToOperand(LOperand* op);
- Operand ToOperand32I(LOperand* op);
- Operand ToOperand32U(LOperand* op);
- MemOperand ToMemOperand(LOperand* op) const;
- Handle<Object> ToHandle(LConstantOperand* op) const;
-
- // TODO(jbramley): Examine these helpers and check that they make sense.
- // IsInteger32Constant returns true for smi constants, for example.
- bool IsInteger32Constant(LConstantOperand* op) const;
- bool IsSmi(LConstantOperand* op) const;
-
- int32_t ToInteger32(LConstantOperand* op) const;
- Smi* ToSmi(LConstantOperand* op) const;
- double ToDouble(LConstantOperand* op) const;
- DoubleRegister ToDoubleRegister(LOperand* op) const;
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) void Do##type(L##type* node);
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- private:
- // Return a double scratch register which can be used locally
- // when generating code for a lithium instruction.
- DoubleRegister double_scratch() { return crankshaft_fp_scratch; }
-
- // Deferred code support.
- void DoDeferredNumberTagD(LNumberTagD* instr);
- void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
- void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredMathAbsTagged(LMathAbsTagged* instr,
- Label* exit,
- Label* allocation_entry);
-
- enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
- void DoDeferredNumberTagU(LInstruction* instr,
- LOperand* value,
- LOperand* temp1,
- LOperand* temp2);
- void DoDeferredTaggedToI(LTaggedToI* instr,
- LOperand* value,
- LOperand* temp1,
- LOperand* temp2);
- void DoDeferredAllocate(LAllocate* instr);
- void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr);
- void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
-
- Operand ToOperand32(LOperand* op, IntegerSignedness signedness);
-
- static Condition TokenToCondition(Token::Value op, bool is_unsigned);
- void EmitGoto(int block);
- void DoGap(LGap* instr);
-
- // Generic version of EmitBranch. It contains some code to avoid emitting a
- // branch on the next emitted basic block where we could just fall-through.
- // You shouldn't use that directly but rather consider one of the helper like
- // LCodeGen::EmitBranch, LCodeGen::EmitCompareAndBranch...
- template<class InstrType>
- void EmitBranchGeneric(InstrType instr,
- const BranchGenerator& branch);
-
- template<class InstrType>
- void EmitBranch(InstrType instr, Condition condition);
-
- template<class InstrType>
- void EmitCompareAndBranch(InstrType instr,
- Condition condition,
- const Register& lhs,
- const Operand& rhs);
-
- template<class InstrType>
- void EmitTestAndBranch(InstrType instr,
- Condition condition,
- const Register& value,
- uint64_t mask);
-
- template<class InstrType>
- void EmitBranchIfNonZeroNumber(InstrType instr,
- const FPRegister& value,
- const FPRegister& scratch);
-
- template<class InstrType>
- void EmitBranchIfHeapNumber(InstrType instr,
- const Register& value);
-
- template<class InstrType>
- void EmitBranchIfRoot(InstrType instr,
- const Register& value,
- Heap::RootListIndex index);
-
- // Emits optimized code to deep-copy the contents of statically known object
- // graphs (e.g. object literal boilerplate). Expects a pointer to the
- // allocated destination object in the result register, and a pointer to the
- // source object in the source register.
- void EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- Register scratch,
- int* offset,
- AllocationSiteMode mode);
-
- // Emits optimized code for %_IsString(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
- SmiCheck check_needed);
-
- int DefineDeoptimizationLiteral(Handle<Object> literal);
- void PopulateDeoptimizationData(Handle<Code> code);
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
-
- MemOperand BuildSeqStringOperand(Register string,
- Register temp,
- LOperand* index,
- String::Encoding encoding);
- Deoptimizer::BailoutType DeoptimizeHeader(
- LEnvironment* environment,
- Deoptimizer::BailoutType* override_bailout_type);
- void Deoptimize(LEnvironment* environment);
- void Deoptimize(LEnvironment* environment,
- Deoptimizer::BailoutType bailout_type);
- void DeoptimizeIf(Condition cc, LEnvironment* environment);
- void DeoptimizeIfZero(Register rt, LEnvironment* environment);
- void DeoptimizeIfNegative(Register rt, LEnvironment* environment);
- void DeoptimizeIfSmi(Register rt, LEnvironment* environment);
- void DeoptimizeIfNotSmi(Register rt, LEnvironment* environment);
- void DeoptimizeIfRoot(Register rt,
- Heap::RootListIndex index,
- LEnvironment* environment);
- void DeoptimizeIfNotRoot(Register rt,
- Heap::RootListIndex index,
- LEnvironment* environment);
- void ApplyCheckIf(Condition cc, LBoundsCheck* check);
-
- MemOperand PrepareKeyedExternalArrayOperand(Register key,
- Register base,
- Register scratch,
- bool key_is_smi,
- bool key_is_constant,
- int constant_key,
- ElementsKind elements_kind,
- int additional_index);
- void CalcKeyedArrayBaseRegister(Register base,
- Register elements,
- Register key,
- bool key_is_tagged,
- ElementsKind elements_kind);
-
- void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode);
-
- int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
-
- void Abort(BailoutReason reason);
-
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
-
- // Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment, Translation* translation);
-
- void AddToTranslation(LEnvironment* environment,
- Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- int* object_index_pointer,
- int* dematerialized_index_pointer);
-
- void SaveCallerDoubles();
- void RestoreCallerDoubles();
-
- // Code generation steps. Returns true if code generation should continue.
- bool GeneratePrologue();
- bool GenerateDeferredCode();
- bool GenerateDeoptJumpTable();
- bool GenerateSafepointTable();
-
- // Generates the custom OSR entrypoint and sets the osr_pc_offset.
- void GenerateOsrPrologue();
-
- enum SafepointMode {
- RECORD_SIMPLE_SAFEPOINT,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
- };
-
- void CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr);
-
- void CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode);
-
- void CallRuntime(const Runtime::Function* function,
- int num_arguments,
- LInstruction* instr,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
-
- void CallRuntime(Runtime::FunctionId id,
- int num_arguments,
- LInstruction* instr) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, num_arguments, instr);
- }
-
- void LoadContextFromDeferred(LOperand* context);
- void CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr,
- LOperand* context);
-
- // Generate a direct call to a known function.
- // If the function is already loaded into x1 by the caller, function_reg may
- // be set to x1. Otherwise, it must be NoReg, and CallKnownFunction will
- // automatically load it.
- void CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count,
- int arity,
- LInstruction* instr,
- Register function_reg = NoReg);
-
- // Support for recording safepoint and position information.
- void RecordAndWritePosition(int position) V8_OVERRIDE;
- void RecordSafepoint(LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
- void RecordSafepoint(Safepoint::DeoptMode mode);
- void RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordSafepointWithLazyDeopt(LInstruction* instr,
- SafepointMode safepoint_mode);
-
- void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
-
- ZoneList<LEnvironment*> deoptimizations_;
- ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
- ZoneList<Handle<Object> > deoptimization_literals_;
- int inlined_function_count_;
- Scope* const scope_;
- TranslationBuffer translations_;
- ZoneList<LDeferredCode*> deferred_;
- int osr_pc_offset_;
- bool frame_is_built_;
-
- // Builder that keeps track of safepoints in the code. The table itself is
- // emitted at the end of the generated code.
- SafepointTableBuilder safepoints_;
-
- // Compiler from a set of parallel moves to a sequential list of moves.
- LGapResolver resolver_;
-
- Safepoint::Kind expected_safepoint_kind_;
-
- int old_position_;
-
- class PushSafepointRegistersScope BASE_EMBEDDED {
- public:
- PushSafepointRegistersScope(LCodeGen* codegen,
- Safepoint::Kind kind)
- : codegen_(codegen) {
- ASSERT(codegen_->info()->is_calling());
- ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
- codegen_->expected_safepoint_kind_ = kind;
-
- switch (codegen_->expected_safepoint_kind_) {
- case Safepoint::kWithRegisters:
- codegen_->masm_->PushSafepointRegisters();
- break;
- case Safepoint::kWithRegistersAndDoubles:
- codegen_->masm_->PushSafepointRegisters();
- codegen_->masm_->PushSafepointFPRegisters();
- break;
- default:
- UNREACHABLE();
- }
- }
-
- ~PushSafepointRegistersScope() {
- Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
- ASSERT((kind & Safepoint::kWithRegisters) != 0);
- switch (kind) {
- case Safepoint::kWithRegisters:
- codegen_->masm_->PopSafepointRegisters();
- break;
- case Safepoint::kWithRegistersAndDoubles:
- codegen_->masm_->PopSafepointFPRegisters();
- codegen_->masm_->PopSafepointRegisters();
- break;
- default:
- UNREACHABLE();
- }
- codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
- }
-
- private:
- LCodeGen* codegen_;
- };
-
- friend class LDeferredCode;
- friend class SafepointGenerator;
- DISALLOW_COPY_AND_ASSIGN(LCodeGen);
-};
-
-
-class LDeferredCode: public ZoneObject {
- public:
- explicit LDeferredCode(LCodeGen* codegen)
- : codegen_(codegen),
- external_exit_(NULL),
- instruction_index_(codegen->current_instruction_) {
- codegen->AddDeferredCode(this);
- }
-
- virtual ~LDeferredCode() { }
- virtual void Generate() = 0;
- virtual LInstruction* instr() = 0;
-
- void SetExit(Label* exit) { external_exit_ = exit; }
- Label* entry() { return &entry_; }
- Label* exit() { return (external_exit_ != NULL) ? external_exit_ : &exit_; }
- int instruction_index() const { return instruction_index_; }
-
- protected:
- LCodeGen* codegen() const { return codegen_; }
- MacroAssembler* masm() const { return codegen_->masm(); }
-
- private:
- LCodeGen* codegen_;
- Label entry_;
- Label exit_;
- Label* external_exit_;
- int instruction_index_;
-};
-
-
-// This is the abstract class used by EmitBranchGeneric.
-// It is used to emit code for conditional branching. The Emit() function
-// emits code to branch when the condition holds and EmitInverted() emits
-// the branch when the inverted condition is verified.
-//
-// For actual examples of condition see the concrete implementation in
-// lithium-codegen-a64.cc (e.g. BranchOnCondition, CompareAndBranch).
-class BranchGenerator BASE_EMBEDDED {
- public:
- explicit BranchGenerator(LCodeGen* codegen)
- : codegen_(codegen) { }
-
- virtual ~BranchGenerator() { }
-
- virtual void Emit(Label* label) const = 0;
- virtual void EmitInverted(Label* label) const = 0;
-
- protected:
- MacroAssembler* masm() const { return codegen_->masm(); }
-
- LCodeGen* codegen_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_A64_LITHIUM_CODEGEN_A64_H_
diff --git a/deps/v8/src/a64/lithium-gap-resolver-a64.cc b/deps/v8/src/a64/lithium-gap-resolver-a64.cc
deleted file mode 100644
index 3087a3e930..0000000000
--- a/deps/v8/src/a64/lithium-gap-resolver-a64.cc
+++ /dev/null
@@ -1,326 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "a64/lithium-gap-resolver-a64.h"
-#include "a64/lithium-codegen-a64.h"
-
-namespace v8 {
-namespace internal {
-
-// We use the root register to spill a value while breaking a cycle in parallel
-// moves. We don't need access to roots while resolving the move list and using
-// the root register has two advantages:
-// - It is not in crankshaft allocatable registers list, so it can't interfere
-// with any of the moves we are resolving.
-// - We don't need to push it on the stack, as we can reload it with its value
-// once we have resolved a cycle.
-#define kSavedValue root
-
-LGapResolver::LGapResolver(LCodeGen* owner)
- : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
- saved_destination_(NULL), need_to_restore_root_(false) { }
-
-
-#define __ ACCESS_MASM(cgen_->masm())
-
-void LGapResolver::Resolve(LParallelMove* parallel_move) {
- ASSERT(moves_.is_empty());
-
- // Build up a worklist of moves.
- BuildInitialMoveList(parallel_move);
-
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands move = moves_[i];
-
- // Skip constants to perform them last. They don't block other moves
- // and skipping such moves with register destinations keeps those
- // registers free for the whole algorithm.
- if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
- root_index_ = i; // Any cycle is found when we reach this move again.
- PerformMove(i);
- if (in_cycle_) RestoreValue();
- }
- }
-
- // Perform the moves with constant sources.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands move = moves_[i];
-
- if (!move.IsEliminated()) {
- ASSERT(move.source()->IsConstantOperand());
- EmitMove(i);
- }
- }
-
- if (need_to_restore_root_) {
- ASSERT(kSavedValue.Is(root));
- __ InitializeRootRegister();
- need_to_restore_root_ = false;
- }
-
- moves_.Rewind(0);
-}
-
-
-void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
- // Perform a linear sweep of the moves to add them to the initial list of
- // moves to perform, ignoring any move that is redundant (the source is
- // the same as the destination, the destination is ignored and
- // unallocated, or the move was already eliminated).
- const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
- }
- Verify();
-}
-
-
-void LGapResolver::PerformMove(int index) {
- // Each call to this function performs a move and deletes it from the move
- // graph. We first recursively perform any move blocking this one. We
- // mark a move as "pending" on entry to PerformMove in order to detect
- // cycles in the move graph.
- LMoveOperands& current_move = moves_[index];
-
- ASSERT(!current_move.IsPending());
- ASSERT(!current_move.IsRedundant());
-
- // Clear this move's destination to indicate a pending move. The actual
- // destination is saved in a stack allocated local. Multiple moves can
- // be pending because this function is recursive.
- ASSERT(current_move.source() != NULL); // Otherwise it will look eliminated.
- LOperand* destination = current_move.destination();
- current_move.set_destination(NULL);
-
- // Perform a depth-first traversal of the move graph to resolve
- // dependencies. Any unperformed, unpending move with a source the same
- // as this one's destination blocks this one so recursively perform all
- // such moves.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination) && !other_move.IsPending()) {
- PerformMove(i);
- // If there is a blocking, pending move it must be moves_[root_index_]
- // and all other moves with the same source as moves_[root_index_] are
- // sucessfully executed (because they are cycle-free) by this loop.
- }
- }
-
- // We are about to resolve this move and don't need it marked as
- // pending, so restore its destination.
- current_move.set_destination(destination);
-
- // The move may be blocked on a pending move, which must be the starting move.
- // In this case, we have a cycle, and we save the source of this move to
- // a scratch register to break it.
- LMoveOperands other_move = moves_[root_index_];
- if (other_move.Blocks(destination)) {
- ASSERT(other_move.IsPending());
- BreakCycle(index);
- return;
- }
-
- // This move is no longer blocked.
- EmitMove(index);
-}
-
-
-void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_ASSERTS
- // No operand should be the destination for more than one move.
- for (int i = 0; i < moves_.length(); ++i) {
- LOperand* destination = moves_[i].destination();
- for (int j = i + 1; j < moves_.length(); ++j) {
- SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
- }
- }
-#endif
-}
-
-
-void LGapResolver::BreakCycle(int index) {
- ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
- ASSERT(!in_cycle_);
-
- // We use a register which is not allocatable by crankshaft to break the cycle
- // to be sure it doesn't interfere with the moves we are resolving.
- ASSERT(!kSavedValue.IsAllocatable());
- need_to_restore_root_ = true;
-
- // We save in a register the source of that move and we remember its
- // destination. Then we mark this move as resolved so the cycle is
- // broken and we can perform the other moves.
- in_cycle_ = true;
- LOperand* source = moves_[index].source();
- saved_destination_ = moves_[index].destination();
-
- if (source->IsRegister()) {
- __ Mov(kSavedValue, cgen_->ToRegister(source));
- } else if (source->IsStackSlot()) {
- __ Ldr(kSavedValue, cgen_->ToMemOperand(source));
- } else if (source->IsDoubleRegister()) {
- // TODO(all): We should use a double register to store the value to avoid
- // the penalty of the mov across register banks. We are going to reserve
- // d31 to hold 0.0 value. We could clobber this register while breaking the
- // cycle and restore it after like we do with the root register.
- // LGapResolver::RestoreValue() will need to be updated as well when we'll
- // do that.
- __ Fmov(kSavedValue, cgen_->ToDoubleRegister(source));
- } else if (source->IsDoubleStackSlot()) {
- __ Ldr(kSavedValue, cgen_->ToMemOperand(source));
- } else {
- UNREACHABLE();
- }
-
- // Mark this move as resolved.
- // This move will be actually performed by moving the saved value to this
- // move's destination in LGapResolver::RestoreValue().
- moves_[index].Eliminate();
-}
-
-
-void LGapResolver::RestoreValue() {
- ASSERT(in_cycle_);
- ASSERT(saved_destination_ != NULL);
-
- if (saved_destination_->IsRegister()) {
- __ Mov(cgen_->ToRegister(saved_destination_), kSavedValue);
- } else if (saved_destination_->IsStackSlot()) {
- __ Str(kSavedValue, cgen_->ToMemOperand(saved_destination_));
- } else if (saved_destination_->IsDoubleRegister()) {
- __ Fmov(cgen_->ToDoubleRegister(saved_destination_), kSavedValue);
- } else if (saved_destination_->IsDoubleStackSlot()) {
- __ Str(kSavedValue, cgen_->ToMemOperand(saved_destination_));
- } else {
- UNREACHABLE();
- }
-
- in_cycle_ = false;
- saved_destination_ = NULL;
-}
-
-
-void LGapResolver::EmitMove(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
-
- if (source->IsRegister()) {
- Register source_register = cgen_->ToRegister(source);
- if (destination->IsRegister()) {
- __ Mov(cgen_->ToRegister(destination), source_register);
- } else {
- ASSERT(destination->IsStackSlot());
- __ Str(source_register, cgen_->ToMemOperand(destination));
- }
-
- } else if (source->IsStackSlot()) {
- MemOperand source_operand = cgen_->ToMemOperand(source);
- if (destination->IsRegister()) {
- __ Ldr(cgen_->ToRegister(destination), source_operand);
- } else {
- ASSERT(destination->IsStackSlot());
- EmitStackSlotMove(index);
- }
-
- } else if (source->IsConstantOperand()) {
- LConstantOperand* constant_source = LConstantOperand::cast(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- if (cgen_->IsSmi(constant_source)) {
- __ Mov(dst, Operand(cgen_->ToSmi(constant_source)));
- } else if (cgen_->IsInteger32Constant(constant_source)) {
- __ Mov(dst, cgen_->ToInteger32(constant_source));
- } else {
- __ LoadObject(dst, cgen_->ToHandle(constant_source));
- }
- } else if (destination->IsDoubleRegister()) {
- DoubleRegister result = cgen_->ToDoubleRegister(destination);
- __ Fmov(result, cgen_->ToDouble(constant_source));
- } else {
- ASSERT(destination->IsStackSlot());
- ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
- need_to_restore_root_ = true;
- if (cgen_->IsSmi(constant_source)) {
- __ Mov(kSavedValue, Operand(cgen_->ToSmi(constant_source)));
- } else if (cgen_->IsInteger32Constant(constant_source)) {
- __ Mov(kSavedValue, cgen_->ToInteger32(constant_source));
- } else {
- __ LoadObject(kSavedValue, cgen_->ToHandle(constant_source));
- }
- __ Str(kSavedValue, cgen_->ToMemOperand(destination));
- }
-
- } else if (source->IsDoubleRegister()) {
- DoubleRegister src = cgen_->ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
- __ Fmov(cgen_->ToDoubleRegister(destination), src);
- } else {
- ASSERT(destination->IsDoubleStackSlot());
- __ Str(src, cgen_->ToMemOperand(destination));
- }
-
- } else if (source->IsDoubleStackSlot()) {
- MemOperand src = cgen_->ToMemOperand(source);
- if (destination->IsDoubleRegister()) {
- __ Ldr(cgen_->ToDoubleRegister(destination), src);
- } else {
- ASSERT(destination->IsDoubleStackSlot());
- EmitStackSlotMove(index);
- }
-
- } else {
- UNREACHABLE();
- }
-
- // The move has been emitted, we can eliminate it.
- moves_[index].Eliminate();
-}
-
-
-void LGapResolver::EmitStackSlotMove(int index) {
- // We need a temp register to perform a stack slot to stack slot move, and
- // the register must not be involved in breaking cycles.
-
- // Use the Crankshaft double scratch register as the temporary.
- DoubleRegister temp = crankshaft_fp_scratch;
-
- LOperand* src = moves_[index].source();
- LOperand* dst = moves_[index].destination();
-
- ASSERT(src->IsStackSlot());
- ASSERT(dst->IsStackSlot());
- __ Ldr(temp, cgen_->ToMemOperand(src));
- __ Str(temp, cgen_->ToMemOperand(dst));
-}
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/a64/lithium-gap-resolver-a64.h b/deps/v8/src/a64/lithium-gap-resolver-a64.h
deleted file mode 100644
index 427065933e..0000000000
--- a/deps/v8/src/a64/lithium-gap-resolver-a64.h
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_A64_LITHIUM_GAP_RESOLVER_A64_H_
-#define V8_A64_LITHIUM_GAP_RESOLVER_A64_H_
-
-#include "v8.h"
-
-#include "lithium.h"
-
-namespace v8 {
-namespace internal {
-
-class LCodeGen;
-class LGapResolver;
-
-class LGapResolver BASE_EMBEDDED {
- public:
- explicit LGapResolver(LCodeGen* owner);
-
- // Resolve a set of parallel moves, emitting assembler instructions.
- void Resolve(LParallelMove* parallel_move);
-
- private:
- // Build the initial list of moves.
- void BuildInitialMoveList(LParallelMove* parallel_move);
-
- // Perform the move at the moves_ index in question (possibly requiring
- // other moves to satisfy dependencies).
- void PerformMove(int index);
-
- // If a cycle is found in the series of moves, save the blocking value to
- // a scratch register. The cycle must be found by hitting the root of the
- // depth-first search.
- void BreakCycle(int index);
-
- // After a cycle has been resolved, restore the value from the scratch
- // register to its proper destination.
- void RestoreValue();
-
- // Emit a move and remove it from the move graph.
- void EmitMove(int index);
-
- // Emit a move from one stack slot to another.
- void EmitStackSlotMove(int index);
-
- // Verify the move list before performing moves.
- void Verify();
-
- LCodeGen* cgen_;
-
- // List of moves not yet resolved.
- ZoneList<LMoveOperands> moves_;
-
- int root_index_;
- bool in_cycle_;
- LOperand* saved_destination_;
-
- // We use the root register as a scratch in a few places. When that happens,
- // this flag is set to indicate that it needs to be restored.
- bool need_to_restore_root_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_A64_LITHIUM_GAP_RESOLVER_A64_H_
diff --git a/deps/v8/src/a64/macro-assembler-a64-inl.h b/deps/v8/src/a64/macro-assembler-a64-inl.h
deleted file mode 100644
index 0c62a8b62e..0000000000
--- a/deps/v8/src/a64/macro-assembler-a64-inl.h
+++ /dev/null
@@ -1,1647 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_A64_MACRO_ASSEMBLER_A64_INL_H_
-#define V8_A64_MACRO_ASSEMBLER_A64_INL_H_
-
-#include <ctype.h>
-
-#include "v8globals.h"
-#include "globals.h"
-
-#include "a64/assembler-a64.h"
-#include "a64/assembler-a64-inl.h"
-#include "a64/macro-assembler-a64.h"
-#include "a64/instrument-a64.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-MemOperand FieldMemOperand(Register object, int offset) {
- return MemOperand(object, offset - kHeapObjectTag);
-}
-
-
-MemOperand UntagSmiFieldMemOperand(Register object, int offset) {
- return UntagSmiMemOperand(object, offset - kHeapObjectTag);
-}
-
-
-MemOperand UntagSmiMemOperand(Register object, int offset) {
- // Assumes that Smis are shifted by 32 bits and little endianness.
- STATIC_ASSERT(kSmiShift == 32);
- return MemOperand(object, offset + (kSmiShift / kBitsPerByte));
-}
-
-
-Handle<Object> MacroAssembler::CodeObject() {
- ASSERT(!code_object_.is_null());
- return code_object_;
-}
-
-
-void MacroAssembler::And(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- LogicalMacro(rd, rn, operand, AND);
-}
-
-
-void MacroAssembler::Ands(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- LogicalMacro(rd, rn, operand, ANDS);
-}
-
-
-void MacroAssembler::Tst(const Register& rn,
- const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- LogicalMacro(AppropriateZeroRegFor(rn), rn, operand, ANDS);
-}
-
-
-void MacroAssembler::Bic(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- LogicalMacro(rd, rn, operand, BIC);
-}
-
-
-void MacroAssembler::Bics(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- LogicalMacro(rd, rn, operand, BICS);
-}
-
-
-void MacroAssembler::Orr(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- LogicalMacro(rd, rn, operand, ORR);
-}
-
-
-void MacroAssembler::Orn(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- LogicalMacro(rd, rn, operand, ORN);
-}
-
-
-void MacroAssembler::Eor(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- LogicalMacro(rd, rn, operand, EOR);
-}
-
-
-void MacroAssembler::Eon(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- LogicalMacro(rd, rn, operand, EON);
-}
-
-
-void MacroAssembler::Ccmp(const Register& rn,
- const Operand& operand,
- StatusFlags nzcv,
- Condition cond) {
- ASSERT(allow_macro_instructions_);
- if (operand.IsImmediate() && (operand.immediate() < 0)) {
- ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMN);
- } else {
- ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP);
- }
-}
-
-
-void MacroAssembler::Ccmn(const Register& rn,
- const Operand& operand,
- StatusFlags nzcv,
- Condition cond) {
- ASSERT(allow_macro_instructions_);
- if (operand.IsImmediate() && (operand.immediate() < 0)) {
- ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMP);
- } else {
- ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN);
- }
-}
-
-
-void MacroAssembler::Add(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- if (operand.IsImmediate() && (operand.immediate() < 0)) {
- AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, SUB);
- } else {
- AddSubMacro(rd, rn, operand, LeaveFlags, ADD);
- }
-}
-
-void MacroAssembler::Adds(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- if (operand.IsImmediate() && (operand.immediate() < 0)) {
- AddSubMacro(rd, rn, -operand.immediate(), SetFlags, SUB);
- } else {
- AddSubMacro(rd, rn, operand, SetFlags, ADD);
- }
-}
-
-
-void MacroAssembler::Sub(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- if (operand.IsImmediate() && (operand.immediate() < 0)) {
- AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, ADD);
- } else {
- AddSubMacro(rd, rn, operand, LeaveFlags, SUB);
- }
-}
-
-
-void MacroAssembler::Subs(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- if (operand.IsImmediate() && (operand.immediate() < 0)) {
- AddSubMacro(rd, rn, -operand.immediate(), SetFlags, ADD);
- } else {
- AddSubMacro(rd, rn, operand, SetFlags, SUB);
- }
-}
-
-
-void MacroAssembler::Cmn(const Register& rn, const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- Adds(AppropriateZeroRegFor(rn), rn, operand);
-}
-
-
-void MacroAssembler::Cmp(const Register& rn, const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- Subs(AppropriateZeroRegFor(rn), rn, operand);
-}
-
-
-void MacroAssembler::Neg(const Register& rd,
- const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- if (operand.IsImmediate()) {
- Mov(rd, -operand.immediate());
- } else {
- Sub(rd, AppropriateZeroRegFor(rd), operand);
- }
-}
-
-
-void MacroAssembler::Negs(const Register& rd,
- const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- Subs(rd, AppropriateZeroRegFor(rd), operand);
-}
-
-
-void MacroAssembler::Adc(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC);
-}
-
-
-void MacroAssembler::Adcs(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC);
-}
-
-
-void MacroAssembler::Sbc(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC);
-}
-
-
-void MacroAssembler::Sbcs(const Register& rd,
- const Register& rn,
- const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC);
-}
-
-
-void MacroAssembler::Ngc(const Register& rd,
- const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- Register zr = AppropriateZeroRegFor(rd);
- Sbc(rd, zr, operand);
-}
-
-
-void MacroAssembler::Ngcs(const Register& rd,
- const Operand& operand) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- Register zr = AppropriateZeroRegFor(rd);
- Sbcs(rd, zr, operand);
-}
-
-
-void MacroAssembler::Mvn(const Register& rd, uint64_t imm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- Mov(rd, ~imm);
-}
-
-
-#define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \
-void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \
- ASSERT(allow_macro_instructions_); \
- LoadStoreMacro(REG, addr, OP); \
-}
-LS_MACRO_LIST(DEFINE_FUNCTION)
-#undef DEFINE_FUNCTION
-
-
-void MacroAssembler::Adr(const Register& rd, Label* label) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- adr(rd, label);
-}
-
-
-void MacroAssembler::Asr(const Register& rd,
- const Register& rn,
- unsigned shift) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- asr(rd, rn, shift);
-}
-
-
-void MacroAssembler::Asr(const Register& rd,
- const Register& rn,
- const Register& rm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- asrv(rd, rn, rm);
-}
-
-
-void MacroAssembler::B(Label* label) {
- b(label);
- CheckVeneers(false);
-}
-
-
-void MacroAssembler::B(Condition cond, Label* label) {
- ASSERT(allow_macro_instructions_);
- B(label, cond);
-}
-
-
-void MacroAssembler::Bfi(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- bfi(rd, rn, lsb, width);
-}
-
-
-void MacroAssembler::Bfxil(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- bfxil(rd, rn, lsb, width);
-}
-
-
-void MacroAssembler::Bind(Label* label) {
- ASSERT(allow_macro_instructions_);
- bind(label);
-}
-
-
-void MacroAssembler::Bl(Label* label) {
- ASSERT(allow_macro_instructions_);
- bl(label);
-}
-
-
-void MacroAssembler::Blr(const Register& xn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!xn.IsZero());
- blr(xn);
-}
-
-
-void MacroAssembler::Br(const Register& xn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!xn.IsZero());
- br(xn);
-}
-
-
-void MacroAssembler::Brk(int code) {
- ASSERT(allow_macro_instructions_);
- brk(code);
-}
-
-
-void MacroAssembler::Cinc(const Register& rd,
- const Register& rn,
- Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- ASSERT((cond != al) && (cond != nv));
- cinc(rd, rn, cond);
-}
-
-
-void MacroAssembler::Cinv(const Register& rd,
- const Register& rn,
- Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- ASSERT((cond != al) && (cond != nv));
- cinv(rd, rn, cond);
-}
-
-
-void MacroAssembler::Cls(const Register& rd, const Register& rn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- cls(rd, rn);
-}
-
-
-void MacroAssembler::Clz(const Register& rd, const Register& rn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- clz(rd, rn);
-}
-
-
-void MacroAssembler::Cneg(const Register& rd,
- const Register& rn,
- Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- ASSERT((cond != al) && (cond != nv));
- cneg(rd, rn, cond);
-}
-
-
-// Conditionally zero the destination register. Only X registers are supported
-// due to the truncation side-effect when used on W registers.
-void MacroAssembler::CzeroX(const Register& rd,
- Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsSP() && rd.Is64Bits());
- ASSERT((cond != al) && (cond != nv));
- csel(rd, xzr, rd, cond);
-}
-
-
-// Conditionally move a value into the destination register. Only X registers
-// are supported due to the truncation side-effect when used on W registers.
-void MacroAssembler::CmovX(const Register& rd,
- const Register& rn,
- Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsSP());
- ASSERT(rd.Is64Bits() && rn.Is64Bits());
- ASSERT((cond != al) && (cond != nv));
- if (!rd.is(rn)) {
- csel(rd, rn, rd, cond);
- }
-}
-
-
-void MacroAssembler::Cset(const Register& rd, Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- ASSERT((cond != al) && (cond != nv));
- cset(rd, cond);
-}
-
-
-void MacroAssembler::Csetm(const Register& rd, Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- ASSERT((cond != al) && (cond != nv));
- csetm(rd, cond);
-}
-
-
-void MacroAssembler::Csinc(const Register& rd,
- const Register& rn,
- const Register& rm,
- Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- ASSERT((cond != al) && (cond != nv));
- csinc(rd, rn, rm, cond);
-}
-
-
-void MacroAssembler::Csinv(const Register& rd,
- const Register& rn,
- const Register& rm,
- Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- ASSERT((cond != al) && (cond != nv));
- csinv(rd, rn, rm, cond);
-}
-
-
-void MacroAssembler::Csneg(const Register& rd,
- const Register& rn,
- const Register& rm,
- Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- ASSERT((cond != al) && (cond != nv));
- csneg(rd, rn, rm, cond);
-}
-
-
-void MacroAssembler::Dmb(BarrierDomain domain, BarrierType type) {
- ASSERT(allow_macro_instructions_);
- dmb(domain, type);
-}
-
-
-void MacroAssembler::Dsb(BarrierDomain domain, BarrierType type) {
- ASSERT(allow_macro_instructions_);
- dsb(domain, type);
-}
-
-
-void MacroAssembler::Debug(const char* message, uint32_t code, Instr params) {
- ASSERT(allow_macro_instructions_);
- debug(message, code, params);
-}
-
-
-void MacroAssembler::Extr(const Register& rd,
- const Register& rn,
- const Register& rm,
- unsigned lsb) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- extr(rd, rn, rm, lsb);
-}
-
-
-void MacroAssembler::Fabs(const FPRegister& fd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
- fabs(fd, fn);
-}
-
-
-void MacroAssembler::Fadd(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- ASSERT(allow_macro_instructions_);
- fadd(fd, fn, fm);
-}
-
-
-void MacroAssembler::Fccmp(const FPRegister& fn,
- const FPRegister& fm,
- StatusFlags nzcv,
- Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT((cond != al) && (cond != nv));
- fccmp(fn, fm, nzcv, cond);
-}
-
-
-void MacroAssembler::Fcmp(const FPRegister& fn, const FPRegister& fm) {
- ASSERT(allow_macro_instructions_);
- fcmp(fn, fm);
-}
-
-
-void MacroAssembler::Fcmp(const FPRegister& fn, double value) {
- ASSERT(allow_macro_instructions_);
- if (value != 0.0) {
- FPRegister tmp = AppropriateTempFor(fn);
- Fmov(tmp, value);
- fcmp(fn, tmp);
- } else {
- fcmp(fn, value);
- }
-}
-
-
-void MacroAssembler::Fcsel(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT((cond != al) && (cond != nv));
- fcsel(fd, fn, fm, cond);
-}
-
-
-void MacroAssembler::Fcvt(const FPRegister& fd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
- fcvt(fd, fn);
-}
-
-
-void MacroAssembler::Fcvtas(const Register& rd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- fcvtas(rd, fn);
-}
-
-
-void MacroAssembler::Fcvtau(const Register& rd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- fcvtau(rd, fn);
-}
-
-
-void MacroAssembler::Fcvtms(const Register& rd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- fcvtms(rd, fn);
-}
-
-
-void MacroAssembler::Fcvtmu(const Register& rd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- fcvtmu(rd, fn);
-}
-
-
-void MacroAssembler::Fcvtns(const Register& rd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- fcvtns(rd, fn);
-}
-
-
-void MacroAssembler::Fcvtnu(const Register& rd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- fcvtnu(rd, fn);
-}
-
-
-void MacroAssembler::Fcvtzs(const Register& rd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- fcvtzs(rd, fn);
-}
-void MacroAssembler::Fcvtzu(const Register& rd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- fcvtzu(rd, fn);
-}
-
-
-void MacroAssembler::Fdiv(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- ASSERT(allow_macro_instructions_);
- fdiv(fd, fn, fm);
-}
-
-
-void MacroAssembler::Fmadd(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa) {
- ASSERT(allow_macro_instructions_);
- fmadd(fd, fn, fm, fa);
-}
-
-
-void MacroAssembler::Fmax(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- ASSERT(allow_macro_instructions_);
- fmax(fd, fn, fm);
-}
-
-
-void MacroAssembler::Fmaxnm(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- ASSERT(allow_macro_instructions_);
- fmaxnm(fd, fn, fm);
-}
-
-
-void MacroAssembler::Fmin(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- ASSERT(allow_macro_instructions_);
- fmin(fd, fn, fm);
-}
-
-
-void MacroAssembler::Fminnm(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- ASSERT(allow_macro_instructions_);
- fminnm(fd, fn, fm);
-}
-
-
-void MacroAssembler::Fmov(FPRegister fd, FPRegister fn) {
- ASSERT(allow_macro_instructions_);
- // Only emit an instruction if fd and fn are different, and they are both D
- // registers. fmov(s0, s0) is not a no-op because it clears the top word of
- // d0. Technically, fmov(d0, d0) is not a no-op either because it clears the
- // top of q0, but FPRegister does not currently support Q registers.
- if (!fd.Is(fn) || !fd.Is64Bits()) {
- fmov(fd, fn);
- }
-}
-
-
-void MacroAssembler::Fmov(FPRegister fd, Register rn) {
- ASSERT(allow_macro_instructions_);
- fmov(fd, rn);
-}
-
-
-void MacroAssembler::Fmov(FPRegister fd, double imm) {
- ASSERT(allow_macro_instructions_);
- if ((fd.Is64Bits() && IsImmFP64(imm)) ||
- (fd.Is32Bits() && IsImmFP32(imm)) ||
- ((imm == 0.0) && (copysign(1.0, imm) == 1.0))) {
- // These cases can be handled by the Assembler.
- fmov(fd, imm);
- } else {
- // TODO(all): The Assembler would try to relocate the immediate with
- // Assembler::ldr(const FPRegister& ft, double imm) but it is not
- // implemented yet.
- if (fd.SizeInBits() == kDRegSize) {
- Mov(Tmp0(), double_to_rawbits(imm));
- Fmov(fd, Tmp0());
- } else {
- ASSERT(fd.SizeInBits() == kSRegSize);
- Mov(WTmp0(), float_to_rawbits(static_cast<float>(imm)));
- Fmov(fd, WTmp0());
- }
- }
-}
-
-
-void MacroAssembler::Fmov(Register rd, FPRegister fn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- fmov(rd, fn);
-}
-
-
-void MacroAssembler::Fmsub(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa) {
- ASSERT(allow_macro_instructions_);
- fmsub(fd, fn, fm, fa);
-}
-
-
-void MacroAssembler::Fmul(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- ASSERT(allow_macro_instructions_);
- fmul(fd, fn, fm);
-}
-
-
-void MacroAssembler::Fneg(const FPRegister& fd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
- fneg(fd, fn);
-}
-
-
-void MacroAssembler::Fnmadd(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa) {
- ASSERT(allow_macro_instructions_);
- fnmadd(fd, fn, fm, fa);
-}
-
-
-void MacroAssembler::Fnmsub(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa) {
- ASSERT(allow_macro_instructions_);
- fnmsub(fd, fn, fm, fa);
-}
-
-
-void MacroAssembler::Frinta(const FPRegister& fd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
- frinta(fd, fn);
-}
-
-
-void MacroAssembler::Frintn(const FPRegister& fd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
- frintn(fd, fn);
-}
-
-
-void MacroAssembler::Frintz(const FPRegister& fd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
- frintz(fd, fn);
-}
-
-
-void MacroAssembler::Fsqrt(const FPRegister& fd, const FPRegister& fn) {
- ASSERT(allow_macro_instructions_);
- fsqrt(fd, fn);
-}
-
-
-void MacroAssembler::Fsub(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- ASSERT(allow_macro_instructions_);
- fsub(fd, fn, fm);
-}
-
-
-void MacroAssembler::Hint(SystemHint code) {
- ASSERT(allow_macro_instructions_);
- hint(code);
-}
-
-
-void MacroAssembler::Hlt(int code) {
- ASSERT(allow_macro_instructions_);
- hlt(code);
-}
-
-
-void MacroAssembler::Isb() {
- ASSERT(allow_macro_instructions_);
- isb();
-}
-
-
-void MacroAssembler::Ldnp(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& src) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!AreAliased(rt, rt2));
- ldnp(rt, rt2, src);
-}
-
-
-void MacroAssembler::Ldp(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& src) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!AreAliased(rt, rt2));
- ldp(rt, rt2, src);
-}
-
-
-void MacroAssembler::Ldpsw(const Register& rt,
- const Register& rt2,
- const MemOperand& src) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rt.IsZero());
- ASSERT(!rt2.IsZero());
- ldpsw(rt, rt2, src);
-}
-
-
-void MacroAssembler::Ldr(const FPRegister& ft, double imm) {
- ASSERT(allow_macro_instructions_);
- ldr(ft, imm);
-}
-
-
-void MacroAssembler::Ldr(const Register& rt, uint64_t imm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rt.IsZero());
- ldr(rt, imm);
-}
-
-
-void MacroAssembler::Lsl(const Register& rd,
- const Register& rn,
- unsigned shift) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- lsl(rd, rn, shift);
-}
-
-
-void MacroAssembler::Lsl(const Register& rd,
- const Register& rn,
- const Register& rm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- lslv(rd, rn, rm);
-}
-
-
-void MacroAssembler::Lsr(const Register& rd,
- const Register& rn,
- unsigned shift) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- lsr(rd, rn, shift);
-}
-
-
-void MacroAssembler::Lsr(const Register& rd,
- const Register& rn,
- const Register& rm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- lsrv(rd, rn, rm);
-}
-
-
-void MacroAssembler::Madd(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- madd(rd, rn, rm, ra);
-}
-
-
-void MacroAssembler::Mneg(const Register& rd,
- const Register& rn,
- const Register& rm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- mneg(rd, rn, rm);
-}
-
-
-void MacroAssembler::Mov(const Register& rd, const Register& rn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- // Emit a register move only if the registers are distinct, or if they are
- // not X registers. Note that mov(w0, w0) is not a no-op because it clears
- // the top word of x0.
- if (!rd.Is(rn) || !rd.Is64Bits()) {
- Assembler::mov(rd, rn);
- }
-}
-
-
-void MacroAssembler::Movk(const Register& rd, uint64_t imm, int shift) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- movk(rd, imm, shift);
-}
-
-
-void MacroAssembler::Mrs(const Register& rt, SystemRegister sysreg) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rt.IsZero());
- mrs(rt, sysreg);
-}
-
-
-void MacroAssembler::Msr(SystemRegister sysreg, const Register& rt) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rt.IsZero());
- msr(sysreg, rt);
-}
-
-
-void MacroAssembler::Msub(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- msub(rd, rn, rm, ra);
-}
-
-
-void MacroAssembler::Mul(const Register& rd,
- const Register& rn,
- const Register& rm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- mul(rd, rn, rm);
-}
-
-
-void MacroAssembler::Rbit(const Register& rd, const Register& rn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- rbit(rd, rn);
-}
-
-
-void MacroAssembler::Ret(const Register& xn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!xn.IsZero());
- ret(xn);
- CheckVeneers(false);
-}
-
-
-void MacroAssembler::Rev(const Register& rd, const Register& rn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- rev(rd, rn);
-}
-
-
-void MacroAssembler::Rev16(const Register& rd, const Register& rn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- rev16(rd, rn);
-}
-
-
-void MacroAssembler::Rev32(const Register& rd, const Register& rn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- rev32(rd, rn);
-}
-
-
-void MacroAssembler::Ror(const Register& rd,
- const Register& rs,
- unsigned shift) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- ror(rd, rs, shift);
-}
-
-
-void MacroAssembler::Ror(const Register& rd,
- const Register& rn,
- const Register& rm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- rorv(rd, rn, rm);
-}
-
-
-void MacroAssembler::Sbfiz(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- sbfiz(rd, rn, lsb, width);
-}
-
-
-void MacroAssembler::Sbfx(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- sbfx(rd, rn, lsb, width);
-}
-
-
-void MacroAssembler::Scvtf(const FPRegister& fd,
- const Register& rn,
- unsigned fbits) {
- ASSERT(allow_macro_instructions_);
- scvtf(fd, rn, fbits);
-}
-
-
-void MacroAssembler::Sdiv(const Register& rd,
- const Register& rn,
- const Register& rm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- sdiv(rd, rn, rm);
-}
-
-
-void MacroAssembler::Smaddl(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- smaddl(rd, rn, rm, ra);
-}
-
-
-void MacroAssembler::Smsubl(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- smsubl(rd, rn, rm, ra);
-}
-
-
-void MacroAssembler::Smull(const Register& rd,
- const Register& rn,
- const Register& rm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- smull(rd, rn, rm);
-}
-
-
-void MacroAssembler::Smulh(const Register& rd,
- const Register& rn,
- const Register& rm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- smulh(rd, rn, rm);
-}
-
-
-void MacroAssembler::Stnp(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& dst) {
- ASSERT(allow_macro_instructions_);
- stnp(rt, rt2, dst);
-}
-
-
-void MacroAssembler::Stp(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& dst) {
- ASSERT(allow_macro_instructions_);
- stp(rt, rt2, dst);
-}
-
-
-void MacroAssembler::Sxtb(const Register& rd, const Register& rn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- sxtb(rd, rn);
-}
-
-
-void MacroAssembler::Sxth(const Register& rd, const Register& rn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- sxth(rd, rn);
-}
-
-
-void MacroAssembler::Sxtw(const Register& rd, const Register& rn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- sxtw(rd, rn);
-}
-
-
-void MacroAssembler::Ubfiz(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- ubfiz(rd, rn, lsb, width);
-}
-
-
-void MacroAssembler::Ubfx(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- ubfx(rd, rn, lsb, width);
-}
-
-
-void MacroAssembler::Ucvtf(const FPRegister& fd,
- const Register& rn,
- unsigned fbits) {
- ASSERT(allow_macro_instructions_);
- ucvtf(fd, rn, fbits);
-}
-
-
-void MacroAssembler::Udiv(const Register& rd,
- const Register& rn,
- const Register& rm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- udiv(rd, rn, rm);
-}
-
-
-void MacroAssembler::Umaddl(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- umaddl(rd, rn, rm, ra);
-}
-
-
-void MacroAssembler::Umsubl(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- umsubl(rd, rn, rm, ra);
-}
-
-
-void MacroAssembler::Uxtb(const Register& rd, const Register& rn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- uxtb(rd, rn);
-}
-
-
-void MacroAssembler::Uxth(const Register& rd, const Register& rn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- uxth(rd, rn);
-}
-
-
-void MacroAssembler::Uxtw(const Register& rd, const Register& rn) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- uxtw(rd, rn);
-}
-
-
-void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
- ASSERT(!csp.Is(sp_));
- // TODO(jbramley): Several callers rely on this not using scratch registers,
- // so we use the assembler directly here. However, this means that large
- // immediate values of 'space' cannot be handled. Once we merge with V8, we
- // should try to use the new scope that controls scratch register usage.
- InstructionAccurateScope scope(this);
- if ((space.IsImmediate()) && !is_uint12(space.immediate())) {
- // The subtract instruction supports a 12-bit immediate, shifted left by
- // zero or 12 bits. So, in two instructions, we can subtract any immediate
- // between zero and (1 << 24) - 1.
- int64_t imm = space.immediate();
- ASSERT(is_uint24(imm));
-
- int64_t imm_top_12_bits = imm >> 12;
- sub(csp, StackPointer(), imm_top_12_bits << 12);
- imm -= imm_top_12_bits << 12;
- if (imm > 0) {
- sub(csp, csp, imm);
- }
- } else {
- sub(csp, StackPointer(), space);
- }
-}
-
-
-void MacroAssembler::InitializeRootRegister() {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- Mov(root, Operand(roots_array_start));
-}
-
-
-void MacroAssembler::SmiTag(Register dst, Register src) {
- ASSERT(dst.Is64Bits() && src.Is64Bits());
- Lsl(dst, src, kSmiShift);
-}
-
-
-void MacroAssembler::SmiTag(Register smi) { SmiTag(smi, smi); }
-
-
-void MacroAssembler::SmiUntag(Register dst, Register src) {
- ASSERT(dst.Is64Bits() && src.Is64Bits());
- if (FLAG_enable_slow_asserts) {
- AssertSmi(src);
- }
- Asr(dst, src, kSmiShift);
-}
-
-
-void MacroAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); }
-
-
-void MacroAssembler::SmiUntagToDouble(FPRegister dst,
- Register src,
- UntagMode mode) {
- ASSERT(dst.Is64Bits() && src.Is64Bits());
- if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) {
- AssertSmi(src);
- }
- Scvtf(dst, src, kSmiShift);
-}
-
-
-void MacroAssembler::SmiUntagToFloat(FPRegister dst,
- Register src,
- UntagMode mode) {
- ASSERT(dst.Is32Bits() && src.Is64Bits());
- if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) {
- AssertSmi(src);
- }
- Scvtf(dst, src, kSmiShift);
-}
-
-
-void MacroAssembler::JumpIfSmi(Register value,
- Label* smi_label,
- Label* not_smi_label) {
- STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
- // Check if the tag bit is set.
- if (smi_label) {
- Tbz(value, 0, smi_label);
- if (not_smi_label) {
- B(not_smi_label);
- }
- } else {
- ASSERT(not_smi_label);
- Tbnz(value, 0, not_smi_label);
- }
-}
-
-
-void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
- JumpIfSmi(value, NULL, not_smi_label);
-}
-
-
-void MacroAssembler::JumpIfBothSmi(Register value1,
- Register value2,
- Label* both_smi_label,
- Label* not_smi_label) {
- STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
- // Check if both tag bits are clear.
- Orr(Tmp0(), value1, value2);
- JumpIfSmi(Tmp0(), both_smi_label, not_smi_label);
-}
-
-
-void MacroAssembler::JumpIfEitherSmi(Register value1,
- Register value2,
- Label* either_smi_label,
- Label* not_smi_label) {
- STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
- // Check if either tag bit is clear.
- And(Tmp0(), value1, value2);
- JumpIfSmi(Tmp0(), either_smi_label, not_smi_label);
-}
-
-
-void MacroAssembler::JumpIfEitherNotSmi(Register value1,
- Register value2,
- Label* not_smi_label) {
- JumpIfBothSmi(value1, value2, NULL, not_smi_label);
-}
-
-
-void MacroAssembler::JumpIfBothNotSmi(Register value1,
- Register value2,
- Label* not_smi_label) {
- JumpIfEitherSmi(value1, value2, NULL, not_smi_label);
-}
-
-
-void MacroAssembler::IsObjectNameType(Register object,
- Register type,
- Label* fail) {
- CompareObjectType(object, type, type, LAST_NAME_TYPE);
- B(hi, fail);
-}
-
-
-void MacroAssembler::IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail) {
- Ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
- IsInstanceJSObjectType(map, scratch, fail);
-}
-
-
-void MacroAssembler::IsInstanceJSObjectType(Register map,
- Register scratch,
- Label* fail) {
- Ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- // If cmp result is lt, the following ccmp will clear all flags.
- // Z == 0, N == V implies gt condition.
- Cmp(scratch, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- Ccmp(scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE, NoFlag, ge);
-
- // If we didn't get a valid label object just fall through and leave the
- // flags updated.
- if (fail != NULL) {
- B(gt, fail);
- }
-}
-
-
-void MacroAssembler::IsObjectJSStringType(Register object,
- Register type,
- Label* not_string,
- Label* string) {
- Ldr(type, FieldMemOperand(object, HeapObject::kMapOffset));
- Ldrb(type.W(), FieldMemOperand(type, Map::kInstanceTypeOffset));
-
- STATIC_ASSERT(kStringTag == 0);
- ASSERT((string != NULL) || (not_string != NULL));
- if (string == NULL) {
- TestAndBranchIfAnySet(type.W(), kIsNotStringMask, not_string);
- } else if (not_string == NULL) {
- TestAndBranchIfAllClear(type.W(), kIsNotStringMask, string);
- } else {
- TestAndBranchIfAnySet(type.W(), kIsNotStringMask, not_string);
- B(string);
- }
-}
-
-
-void MacroAssembler::Push(Handle<Object> handle) {
- Mov(Tmp0(), Operand(handle));
- Push(Tmp0());
-}
-
-
-void MacroAssembler::Claim(uint64_t count, uint64_t unit_size) {
- uint64_t size = count * unit_size;
-
- if (size == 0) {
- return;
- }
-
- if (csp.Is(StackPointer())) {
- ASSERT(size % 16 == 0);
- } else {
- BumpSystemStackPointer(size);
- }
-
- Sub(StackPointer(), StackPointer(), size);
-}
-
-
-void MacroAssembler::Claim(const Register& count, uint64_t unit_size) {
- ASSERT(IsPowerOf2(unit_size));
-
- if (unit_size == 0) {
- return;
- }
-
- const int shift = CountTrailingZeros(unit_size, kXRegSize);
- const Operand size(count, LSL, shift);
-
- if (size.IsZero()) {
- return;
- }
-
- if (!csp.Is(StackPointer())) {
- BumpSystemStackPointer(size);
- }
-
- Sub(StackPointer(), StackPointer(), size);
-}
-
-
-void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
- ASSERT(IsPowerOf2(unit_size));
- const int shift = CountTrailingZeros(unit_size, kXRegSize) - kSmiShift;
- const Operand size(count_smi,
- (shift >= 0) ? (LSL) : (LSR),
- (shift >= 0) ? (shift) : (-shift));
-
- if (size.IsZero()) {
- return;
- }
-
- if (!csp.Is(StackPointer())) {
- BumpSystemStackPointer(size);
- }
-
- Sub(StackPointer(), StackPointer(), size);
-}
-
-
-void MacroAssembler::Drop(uint64_t count, uint64_t unit_size) {
- uint64_t size = count * unit_size;
-
- if (size == 0) {
- return;
- }
-
- Add(StackPointer(), StackPointer(), size);
-
- if (csp.Is(StackPointer())) {
- ASSERT(size % 16 == 0);
- } else if (emit_debug_code()) {
- // It is safe to leave csp where it is when unwinding the JavaScript stack,
- // but if we keep it matching StackPointer, the simulator can detect memory
- // accesses in the now-free part of the stack.
- Mov(csp, StackPointer());
- }
-}
-
-
-void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
- ASSERT(IsPowerOf2(unit_size));
-
- if (unit_size == 0) {
- return;
- }
-
- const int shift = CountTrailingZeros(unit_size, kXRegSize);
- const Operand size(count, LSL, shift);
-
- if (size.IsZero()) {
- return;
- }
-
- Add(StackPointer(), StackPointer(), size);
-
- if (!csp.Is(StackPointer()) && emit_debug_code()) {
- // It is safe to leave csp where it is when unwinding the JavaScript stack,
- // but if we keep it matching StackPointer, the simulator can detect memory
- // accesses in the now-free part of the stack.
- Mov(csp, StackPointer());
- }
-}
-
-
-void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
- ASSERT(IsPowerOf2(unit_size));
- const int shift = CountTrailingZeros(unit_size, kXRegSize) - kSmiShift;
- const Operand size(count_smi,
- (shift >= 0) ? (LSL) : (LSR),
- (shift >= 0) ? (shift) : (-shift));
-
- if (size.IsZero()) {
- return;
- }
-
- Add(StackPointer(), StackPointer(), size);
-
- if (!csp.Is(StackPointer()) && emit_debug_code()) {
- // It is safe to leave csp where it is when unwinding the JavaScript stack,
- // but if we keep it matching StackPointer, the simulator can detect memory
- // accesses in the now-free part of the stack.
- Mov(csp, StackPointer());
- }
-}
-
-
-void MacroAssembler::CompareAndBranch(const Register& lhs,
- const Operand& rhs,
- Condition cond,
- Label* label) {
- if (rhs.IsImmediate() && (rhs.immediate() == 0) &&
- ((cond == eq) || (cond == ne))) {
- if (cond == eq) {
- Cbz(lhs, label);
- } else {
- Cbnz(lhs, label);
- }
- } else {
- Cmp(lhs, rhs);
- B(cond, label);
- }
-}
-
-
-void MacroAssembler::TestAndBranchIfAnySet(const Register& reg,
- const uint64_t bit_pattern,
- Label* label) {
- int bits = reg.SizeInBits();
- ASSERT(CountSetBits(bit_pattern, bits) > 0);
- if (CountSetBits(bit_pattern, bits) == 1) {
- Tbnz(reg, MaskToBit(bit_pattern), label);
- } else {
- Tst(reg, bit_pattern);
- B(ne, label);
- }
-}
-
-
-void MacroAssembler::TestAndBranchIfAllClear(const Register& reg,
- const uint64_t bit_pattern,
- Label* label) {
- int bits = reg.SizeInBits();
- ASSERT(CountSetBits(bit_pattern, bits) > 0);
- if (CountSetBits(bit_pattern, bits) == 1) {
- Tbz(reg, MaskToBit(bit_pattern), label);
- } else {
- Tst(reg, bit_pattern);
- B(eq, label);
- }
-}
-
-
-void MacroAssembler::InlineData(uint64_t data) {
- ASSERT(is_uint16(data));
- InstructionAccurateScope scope(this, 1);
- movz(xzr, data);
-}
-
-
-void MacroAssembler::EnableInstrumentation() {
- InstructionAccurateScope scope(this, 1);
- movn(xzr, InstrumentStateEnable);
-}
-
-
-void MacroAssembler::DisableInstrumentation() {
- InstructionAccurateScope scope(this, 1);
- movn(xzr, InstrumentStateDisable);
-}
-
-
-void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
- ASSERT(strlen(marker_name) == 2);
-
- // We allow only printable characters in the marker names. Unprintable
- // characters are reserved for controlling features of the instrumentation.
- ASSERT(isprint(marker_name[0]) && isprint(marker_name[1]));
-
- InstructionAccurateScope scope(this, 1);
- movn(xzr, (marker_name[1] << 8) | marker_name[0]);
-}
-
-} } // namespace v8::internal
-
-#endif // V8_A64_MACRO_ASSEMBLER_A64_INL_H_
diff --git a/deps/v8/src/a64/macro-assembler-a64.cc b/deps/v8/src/a64/macro-assembler-a64.cc
deleted file mode 100644
index 14fb2fda63..0000000000
--- a/deps/v8/src/a64/macro-assembler-a64.cc
+++ /dev/null
@@ -1,4975 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if V8_TARGET_ARCH_A64
-
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "cpu-profiler.h"
-#include "debug.h"
-#include "isolate-inl.h"
-#include "runtime.h"
-
-namespace v8 {
-namespace internal {
-
-// Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros.
-#define __
-
-
-MacroAssembler::MacroAssembler(Isolate* arg_isolate,
- byte * buffer,
- unsigned buffer_size)
- : Assembler(arg_isolate, buffer, buffer_size),
- generating_stub_(false),
-#if DEBUG
- allow_macro_instructions_(true),
-#endif
- has_frame_(false),
- use_real_aborts_(true),
- sp_(jssp), tmp0_(ip0), tmp1_(ip1), fptmp0_(fp_scratch) {
- if (isolate() != NULL) {
- code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
- isolate());
- }
-}
-
-
-void MacroAssembler::LogicalMacro(const Register& rd,
- const Register& rn,
- const Operand& operand,
- LogicalOp op) {
- if (operand.NeedsRelocation()) {
- LoadRelocated(Tmp0(), operand);
- Logical(rd, rn, Tmp0(), op);
-
- } else if (operand.IsImmediate()) {
- int64_t immediate = operand.immediate();
- unsigned reg_size = rd.SizeInBits();
- ASSERT(rd.Is64Bits() || is_uint32(immediate));
-
- // If the operation is NOT, invert the operation and immediate.
- if ((op & NOT) == NOT) {
- op = static_cast<LogicalOp>(op & ~NOT);
- immediate = ~immediate;
- if (rd.Is32Bits()) {
- immediate &= kWRegMask;
- }
- }
-
- // Special cases for all set or all clear immediates.
- if (immediate == 0) {
- switch (op) {
- case AND:
- Mov(rd, 0);
- return;
- case ORR: // Fall through.
- case EOR:
- Mov(rd, rn);
- return;
- case ANDS: // Fall through.
- case BICS:
- break;
- default:
- UNREACHABLE();
- }
- } else if ((rd.Is64Bits() && (immediate == -1L)) ||
- (rd.Is32Bits() && (immediate == 0xffffffffL))) {
- switch (op) {
- case AND:
- Mov(rd, rn);
- return;
- case ORR:
- Mov(rd, immediate);
- return;
- case EOR:
- Mvn(rd, rn);
- return;
- case ANDS: // Fall through.
- case BICS:
- break;
- default:
- UNREACHABLE();
- }
- }
-
- unsigned n, imm_s, imm_r;
- if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
- // Immediate can be encoded in the instruction.
- LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
- } else {
- // Immediate can't be encoded: synthesize using move immediate.
- Register temp = AppropriateTempFor(rn);
- Mov(temp, immediate);
- if (rd.Is(csp)) {
- // If rd is the stack pointer we cannot use it as the destination
- // register so we use the temp register as an intermediate again.
- Logical(temp, rn, temp, op);
- Mov(csp, temp);
- } else {
- Logical(rd, rn, temp, op);
- }
- }
-
- } else if (operand.IsExtendedRegister()) {
- ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
- // Add/sub extended supports shift <= 4. We want to support exactly the
- // same modes here.
- ASSERT(operand.shift_amount() <= 4);
- ASSERT(operand.reg().Is64Bits() ||
- ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
- Register temp = AppropriateTempFor(rn, operand.reg());
- EmitExtendShift(temp, operand.reg(), operand.extend(),
- operand.shift_amount());
- Logical(rd, rn, temp, op);
-
- } else {
- // The operand can be encoded in the instruction.
- ASSERT(operand.IsShiftedRegister());
- Logical(rd, rn, operand, op);
- }
-}
-
-
-void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
- ASSERT(!rd.IsZero());
-
- // TODO(all) extend to support more immediates.
- //
- // Immediates on Aarch64 can be produced using an initial value, and zero to
- // three move keep operations.
- //
- // Initial values can be generated with:
- // 1. 64-bit move zero (movz).
- // 2. 32-bit move inverted (movn).
- // 3. 64-bit move inverted.
- // 4. 32-bit orr immediate.
- // 5. 64-bit orr immediate.
- // Move-keep may then be used to modify each of the 16-bit half-words.
- //
- // The code below supports all five initial value generators, and
- // applying move-keep operations to move-zero and move-inverted initial
- // values.
-
- unsigned reg_size = rd.SizeInBits();
- unsigned n, imm_s, imm_r;
- if (IsImmMovz(imm, reg_size) && !rd.IsSP()) {
- // Immediate can be represented in a move zero instruction. Movz can't
- // write to the stack pointer.
- movz(rd, imm);
- } else if (IsImmMovn(imm, reg_size) && !rd.IsSP()) {
- // Immediate can be represented in a move inverted instruction. Movn can't
- // write to the stack pointer.
- movn(rd, rd.Is64Bits() ? ~imm : (~imm & kWRegMask));
- } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
- // Immediate can be represented in a logical orr instruction.
- LogicalImmediate(rd, AppropriateZeroRegFor(rd), n, imm_s, imm_r, ORR);
- } else {
- // Generic immediate case. Imm will be represented by
- // [imm3, imm2, imm1, imm0], where each imm is 16 bits.
- // A move-zero or move-inverted is generated for the first non-zero or
- // non-0xffff immX, and a move-keep for subsequent non-zero immX.
-
- uint64_t ignored_halfword = 0;
- bool invert_move = false;
- // If the number of 0xffff halfwords is greater than the number of 0x0000
- // halfwords, it's more efficient to use move-inverted.
- if (CountClearHalfWords(~imm, reg_size) >
- CountClearHalfWords(imm, reg_size)) {
- ignored_halfword = 0xffffL;
- invert_move = true;
- }
-
- // Mov instructions can't move value into the stack pointer, so set up a
- // temporary register, if needed.
- Register temp = rd.IsSP() ? AppropriateTempFor(rd) : rd;
-
- // Iterate through the halfwords. Use movn/movz for the first non-ignored
- // halfword, and movk for subsequent halfwords.
- ASSERT((reg_size % 16) == 0);
- bool first_mov_done = false;
- for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) {
- uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
- if (imm16 != ignored_halfword) {
- if (!first_mov_done) {
- if (invert_move) {
- movn(temp, (~imm16) & 0xffffL, 16 * i);
- } else {
- movz(temp, imm16, 16 * i);
- }
- first_mov_done = true;
- } else {
- // Construct a wider constant.
- movk(temp, imm16, 16 * i);
- }
- }
- }
- ASSERT(first_mov_done);
-
- // Move the temporary if the original destination register was the stack
- // pointer.
- if (rd.IsSP()) {
- mov(rd, temp);
- }
- }
-}
-
-
-void MacroAssembler::Mov(const Register& rd,
- const Operand& operand,
- DiscardMoveMode discard_mode) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- // Provide a swap register for instructions that need to write into the
- // system stack pointer (and can't do this inherently).
- Register dst = (rd.Is(csp)) ? (Tmp1()) : (rd);
-
- if (operand.NeedsRelocation()) {
- LoadRelocated(dst, operand);
-
- } else if (operand.IsImmediate()) {
- // Call the macro assembler for generic immediates.
- Mov(dst, operand.immediate());
-
- } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
- // Emit a shift instruction if moving a shifted register. This operation
- // could also be achieved using an orr instruction (like orn used by Mvn),
- // but using a shift instruction makes the disassembly clearer.
- EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
-
- } else if (operand.IsExtendedRegister()) {
- // Emit an extend instruction if moving an extended register. This handles
- // extend with post-shift operations, too.
- EmitExtendShift(dst, operand.reg(), operand.extend(),
- operand.shift_amount());
-
- } else {
- // Otherwise, emit a register move only if the registers are distinct, or
- // if they are not X registers.
- //
- // Note that mov(w0, w0) is not a no-op because it clears the top word of
- // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
- // registers is not required to clear the top word of the X register. In
- // this case, the instruction is discarded.
- //
- // If csp is an operand, add #0 is emitted, otherwise, orr #0.
- if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
- (discard_mode == kDontDiscardForSameWReg))) {
- Assembler::mov(rd, operand.reg());
- }
- // This case can handle writes into the system stack pointer directly.
- dst = rd;
- }
-
- // Copy the result to the system stack pointer.
- if (!dst.Is(rd)) {
- ASSERT(rd.IsZero());
- ASSERT(dst.Is(Tmp1()));
- Assembler::mov(rd, dst);
- }
-}
-
-
-void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
- ASSERT(allow_macro_instructions_);
-
- if (operand.NeedsRelocation()) {
- LoadRelocated(Tmp0(), operand);
- Mvn(rd, Tmp0());
-
- } else if (operand.IsImmediate()) {
- // Call the macro assembler for generic immediates.
- Mov(rd, ~operand.immediate());
-
- } else if (operand.IsExtendedRegister()) {
- // Emit two instructions for the extend case. This differs from Mov, as
- // the extend and invert can't be achieved in one instruction.
- Register temp = AppropriateTempFor(rd, operand.reg());
- EmitExtendShift(temp, operand.reg(), operand.extend(),
- operand.shift_amount());
- mvn(rd, temp);
-
- } else {
- // Otherwise, emit a register move only if the registers are distinct.
- // If the jssp is an operand, add #0 is emitted, otherwise, orr #0.
- mvn(rd, operand);
- }
-}
-
-
-unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
- ASSERT((reg_size % 8) == 0);
- int count = 0;
- for (unsigned i = 0; i < (reg_size / 16); i++) {
- if ((imm & 0xffff) == 0) {
- count++;
- }
- imm >>= 16;
- }
- return count;
-}
-
-
-// The movz instruction can generate immediates containing an arbitrary 16-bit
-// half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
-bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
- ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize));
- return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
-}
-
-
-// The movn instruction can generate immediates containing an arbitrary 16-bit
-// half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
-bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
- return IsImmMovz(~imm, reg_size);
-}
-
-
-void MacroAssembler::ConditionalCompareMacro(const Register& rn,
- const Operand& operand,
- StatusFlags nzcv,
- Condition cond,
- ConditionalCompareOp op) {
- ASSERT((cond != al) && (cond != nv));
- if (operand.NeedsRelocation()) {
- LoadRelocated(Tmp0(), operand);
- ConditionalCompareMacro(rn, Tmp0(), nzcv, cond, op);
-
- } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
- (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) {
- // The immediate can be encoded in the instruction, or the operand is an
- // unshifted register: call the assembler.
- ConditionalCompare(rn, operand, nzcv, cond, op);
-
- } else {
- // The operand isn't directly supported by the instruction: perform the
- // operation on a temporary register.
- Register temp = AppropriateTempFor(rn);
- Mov(temp, operand);
- ConditionalCompare(rn, temp, nzcv, cond, op);
- }
-}
-
-
-void MacroAssembler::Csel(const Register& rd,
- const Register& rn,
- const Operand& operand,
- Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- ASSERT((cond != al) && (cond != nv));
- if (operand.IsImmediate()) {
- // Immediate argument. Handle special cases of 0, 1 and -1 using zero
- // register.
- int64_t imm = operand.immediate();
- Register zr = AppropriateZeroRegFor(rn);
- if (imm == 0) {
- csel(rd, rn, zr, cond);
- } else if (imm == 1) {
- csinc(rd, rn, zr, cond);
- } else if (imm == -1) {
- csinv(rd, rn, zr, cond);
- } else {
- Register temp = AppropriateTempFor(rn);
- Mov(temp, operand.immediate());
- csel(rd, rn, temp, cond);
- }
- } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
- // Unshifted register argument.
- csel(rd, rn, operand.reg(), cond);
- } else {
- // All other arguments.
- Register temp = AppropriateTempFor(rn);
- Mov(temp, operand);
- csel(rd, rn, temp, cond);
- }
-}
-
-
-void MacroAssembler::AddSubMacro(const Register& rd,
- const Register& rn,
- const Operand& operand,
- FlagsUpdate S,
- AddSubOp op) {
- if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
- !operand.NeedsRelocation() && (S == LeaveFlags)) {
- // The instruction would be a nop. Avoid generating useless code.
- return;
- }
-
- if (operand.NeedsRelocation()) {
- LoadRelocated(Tmp0(), operand);
- AddSubMacro(rd, rn, Tmp0(), S, op);
- } else if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) ||
- (rn.IsZero() && !operand.IsShiftedRegister()) ||
- (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
- Register temp = AppropriateTempFor(rn);
- Mov(temp, operand);
- AddSub(rd, rn, temp, S, op);
- } else {
- AddSub(rd, rn, operand, S, op);
- }
-}
-
-
-void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
- const Register& rn,
- const Operand& operand,
- FlagsUpdate S,
- AddSubWithCarryOp op) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
-
- if (operand.NeedsRelocation()) {
- LoadRelocated(Tmp0(), operand);
- AddSubWithCarryMacro(rd, rn, Tmp0(), S, op);
-
- } else if (operand.IsImmediate() ||
- (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
- // Add/sub with carry (immediate or ROR shifted register.)
- Register temp = AppropriateTempFor(rn);
- Mov(temp, operand);
- AddSubWithCarry(rd, rn, temp, S, op);
- } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
- // Add/sub with carry (shifted register).
- ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
- ASSERT(operand.shift() != ROR);
- ASSERT(is_uintn(operand.shift_amount(),
- rd.SizeInBits() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2));
- Register temp = AppropriateTempFor(rn, operand.reg());
- EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
- AddSubWithCarry(rd, rn, temp, S, op);
-
- } else if (operand.IsExtendedRegister()) {
- // Add/sub with carry (extended register).
- ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
- // Add/sub extended supports a shift <= 4. We want to support exactly the
- // same modes.
- ASSERT(operand.shift_amount() <= 4);
- ASSERT(operand.reg().Is64Bits() ||
- ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
- Register temp = AppropriateTempFor(rn, operand.reg());
- EmitExtendShift(temp, operand.reg(), operand.extend(),
- operand.shift_amount());
- AddSubWithCarry(rd, rn, temp, S, op);
-
- } else {
- // The addressing mode is directly supported by the instruction.
- AddSubWithCarry(rd, rn, operand, S, op);
- }
-}
-
-
-void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
- const MemOperand& addr,
- LoadStoreOp op) {
- int64_t offset = addr.offset();
- LSDataSize size = CalcLSDataSize(op);
-
- // Check if an immediate offset fits in the immediate field of the
- // appropriate instruction. If not, emit two instructions to perform
- // the operation.
- if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) &&
- !IsImmLSUnscaled(offset)) {
- // Immediate offset that can't be encoded using unsigned or unscaled
- // addressing modes.
- Register temp = AppropriateTempFor(addr.base());
- Mov(temp, addr.offset());
- LoadStore(rt, MemOperand(addr.base(), temp), op);
- } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
- // Post-index beyond unscaled addressing range.
- LoadStore(rt, MemOperand(addr.base()), op);
- add(addr.base(), addr.base(), offset);
- } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
- // Pre-index beyond unscaled addressing range.
- add(addr.base(), addr.base(), offset);
- LoadStore(rt, MemOperand(addr.base()), op);
- } else {
- // Encodable in one load/store instruction.
- LoadStore(rt, addr, op);
- }
-}
-
-
-void MacroAssembler::Load(const Register& rt,
- const MemOperand& addr,
- Representation r) {
- ASSERT(!r.IsDouble());
-
- if (r.IsInteger8()) {
- Ldrsb(rt, addr);
- } else if (r.IsUInteger8()) {
- Ldrb(rt, addr);
- } else if (r.IsInteger16()) {
- Ldrsh(rt, addr);
- } else if (r.IsUInteger16()) {
- Ldrh(rt, addr);
- } else if (r.IsInteger32()) {
- Ldr(rt.W(), addr);
- } else {
- ASSERT(rt.Is64Bits());
- Ldr(rt, addr);
- }
-}
-
-
-void MacroAssembler::Store(const Register& rt,
- const MemOperand& addr,
- Representation r) {
- ASSERT(!r.IsDouble());
-
- if (r.IsInteger8() || r.IsUInteger8()) {
- Strb(rt, addr);
- } else if (r.IsInteger16() || r.IsUInteger16()) {
- Strh(rt, addr);
- } else if (r.IsInteger32()) {
- Str(rt.W(), addr);
- } else {
- ASSERT(rt.Is64Bits());
- Str(rt, addr);
- }
-}
-
-
-bool MacroAssembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
- // Account for the branch around the veneers and the guard.
- int protection_offset = 2 * kInstructionSize;
- return pc_offset() > max_reachable_pc - margin - protection_offset -
- static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
-}
-
-
-void MacroAssembler::EmitVeneers(bool need_protection) {
- RecordComment("[ Veneers");
-
- Label end;
- if (need_protection) {
- B(&end);
- }
-
- EmitVeneersGuard();
-
- {
- InstructionAccurateScope scope(this);
- Label size_check;
-
- std::multimap<int, FarBranchInfo>::iterator it, it_to_delete;
-
- it = unresolved_branches_.begin();
- while (it != unresolved_branches_.end()) {
- if (ShouldEmitVeneer(it->first)) {
- Instruction* branch = InstructionAt(it->second.pc_offset_);
- Label* label = it->second.label_;
-
-#ifdef DEBUG
- __ bind(&size_check);
-#endif
- // Patch the branch to point to the current position, and emit a branch
- // to the label.
- Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
- RemoveBranchFromLabelLinkChain(branch, label, veneer);
- branch->SetImmPCOffsetTarget(veneer);
- b(label);
-#ifdef DEBUG
- ASSERT(SizeOfCodeGeneratedSince(&size_check) <=
- static_cast<uint64_t>(kMaxVeneerCodeSize));
- size_check.Unuse();
-#endif
-
- it_to_delete = it++;
- unresolved_branches_.erase(it_to_delete);
- } else {
- ++it;
- }
- }
- }
-
- Bind(&end);
-
- RecordComment("]");
-}
-
-
-void MacroAssembler::EmitVeneersGuard() {
- if (emit_debug_code()) {
- Unreachable();
- }
-}
-
-
-void MacroAssembler::CheckVeneers(bool need_protection) {
- if (unresolved_branches_.empty()) {
- return;
- }
-
- CHECK(pc_offset() < unresolved_branches_first_limit());
- int margin = kVeneerDistanceMargin;
- if (!need_protection) {
- // Prefer emitting veneers protected by an existing instruction.
- // The 4 divisor is a finger in the air guess. With a default margin of 2KB,
- // that leaves 512B = 128 instructions of extra margin to avoid requiring a
- // protective branch.
- margin += margin / 4;
- }
- if (ShouldEmitVeneer(unresolved_branches_first_limit(), margin)) {
- EmitVeneers(need_protection);
- }
-}
-
-
-bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
- Label *label, ImmBranchType b_type) {
- bool need_longer_range = false;
- // There are two situations in which we care about the offset being out of
- // range:
- // - The label is bound but too far away.
- // - The label is not bound but linked, and the previous branch
- // instruction in the chain is too far away.
- if (label->is_bound() || label->is_linked()) {
- need_longer_range =
- !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset());
- }
- if (!need_longer_range && !label->is_bound()) {
- int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type);
- unresolved_branches_.insert(
- std::pair<int, FarBranchInfo>(max_reachable_pc,
- FarBranchInfo(pc_offset(), label)));
- }
- return need_longer_range;
-}
-
-
-void MacroAssembler::B(Label* label, Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT((cond != al) && (cond != nv));
-
- Label done;
- bool need_extra_instructions =
- NeedExtraInstructionsOrRegisterBranch(label, CondBranchType);
-
- if (need_extra_instructions) {
- b(&done, InvertCondition(cond));
- b(label);
- } else {
- b(label, cond);
- }
- CheckVeneers(!need_extra_instructions);
- bind(&done);
-}
-
-
-void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
- ASSERT(allow_macro_instructions_);
-
- Label done;
- bool need_extra_instructions =
- NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
-
- if (need_extra_instructions) {
- tbz(rt, bit_pos, &done);
- b(label);
- } else {
- tbnz(rt, bit_pos, label);
- }
- CheckVeneers(!need_extra_instructions);
- bind(&done);
-}
-
-
-void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
- ASSERT(allow_macro_instructions_);
-
- Label done;
- bool need_extra_instructions =
- NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
-
- if (need_extra_instructions) {
- tbnz(rt, bit_pos, &done);
- b(label);
- } else {
- tbz(rt, bit_pos, label);
- }
- CheckVeneers(!need_extra_instructions);
- bind(&done);
-}
-
-
-void MacroAssembler::Cbnz(const Register& rt, Label* label) {
- ASSERT(allow_macro_instructions_);
-
- Label done;
- bool need_extra_instructions =
- NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
-
- if (need_extra_instructions) {
- cbz(rt, &done);
- b(label);
- } else {
- cbnz(rt, label);
- }
- CheckVeneers(!need_extra_instructions);
- bind(&done);
-}
-
-
-void MacroAssembler::Cbz(const Register& rt, Label* label) {
- ASSERT(allow_macro_instructions_);
-
- Label done;
- bool need_extra_instructions =
- NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
-
- if (need_extra_instructions) {
- cbnz(rt, &done);
- b(label);
- } else {
- cbz(rt, label);
- }
- CheckVeneers(!need_extra_instructions);
- bind(&done);
-}
-
-
-// Pseudo-instructions.
-
-
-void MacroAssembler::Abs(const Register& rd, const Register& rm,
- Label* is_not_representable,
- Label* is_representable) {
- ASSERT(allow_macro_instructions_);
- ASSERT(AreSameSizeAndType(rd, rm));
-
- Cmp(rm, 1);
- Cneg(rd, rm, lt);
-
- // If the comparison sets the v flag, the input was the smallest value
- // representable by rm, and the mathematical result of abs(rm) is not
- // representable using two's complement.
- if ((is_not_representable != NULL) && (is_representable != NULL)) {
- B(is_not_representable, vs);
- B(is_representable);
- } else if (is_not_representable != NULL) {
- B(is_not_representable, vs);
- } else if (is_representable != NULL) {
- B(is_representable, vc);
- }
-}
-
-
-// Abstracted stack operations.
-
-
-void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
- const CPURegister& src2, const CPURegister& src3) {
- ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
- ASSERT(src0.IsValid());
-
- int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
- int size = src0.SizeInBytes();
-
- PrepareForPush(count, size);
- PushHelper(count, size, src0, src1, src2, src3);
-}
-
-
-void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
- const CPURegister& dst2, const CPURegister& dst3) {
- // It is not valid to pop into the same register more than once in one
- // instruction, not even into the zero register.
- ASSERT(!AreAliased(dst0, dst1, dst2, dst3));
- ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
- ASSERT(dst0.IsValid());
-
- int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
- int size = dst0.SizeInBytes();
-
- PrepareForPop(count, size);
- PopHelper(count, size, dst0, dst1, dst2, dst3);
-
- if (!csp.Is(StackPointer()) && emit_debug_code()) {
- // It is safe to leave csp where it is when unwinding the JavaScript stack,
- // but if we keep it matching StackPointer, the simulator can detect memory
- // accesses in the now-free part of the stack.
- Mov(csp, StackPointer());
- }
-}
-
-
-void MacroAssembler::PushCPURegList(CPURegList registers) {
- int size = registers.RegisterSizeInBytes();
-
- PrepareForPush(registers.Count(), size);
- // Push up to four registers at a time because if the current stack pointer is
- // csp and reg_size is 32, registers must be pushed in blocks of four in order
- // to maintain the 16-byte alignment for csp.
- while (!registers.IsEmpty()) {
- int count_before = registers.Count();
- const CPURegister& src0 = registers.PopHighestIndex();
- const CPURegister& src1 = registers.PopHighestIndex();
- const CPURegister& src2 = registers.PopHighestIndex();
- const CPURegister& src3 = registers.PopHighestIndex();
- int count = count_before - registers.Count();
- PushHelper(count, size, src0, src1, src2, src3);
- }
-}
-
-
-void MacroAssembler::PopCPURegList(CPURegList registers) {
- int size = registers.RegisterSizeInBytes();
-
- PrepareForPop(registers.Count(), size);
- // Pop up to four registers at a time because if the current stack pointer is
- // csp and reg_size is 32, registers must be pushed in blocks of four in
- // order to maintain the 16-byte alignment for csp.
- while (!registers.IsEmpty()) {
- int count_before = registers.Count();
- const CPURegister& dst0 = registers.PopLowestIndex();
- const CPURegister& dst1 = registers.PopLowestIndex();
- const CPURegister& dst2 = registers.PopLowestIndex();
- const CPURegister& dst3 = registers.PopLowestIndex();
- int count = count_before - registers.Count();
- PopHelper(count, size, dst0, dst1, dst2, dst3);
- }
-
- if (!csp.Is(StackPointer()) && emit_debug_code()) {
- // It is safe to leave csp where it is when unwinding the JavaScript stack,
- // but if we keep it matching StackPointer, the simulator can detect memory
- // accesses in the now-free part of the stack.
- Mov(csp, StackPointer());
- }
-}
-
-
-void MacroAssembler::PushMultipleTimes(int count, Register src) {
- int size = src.SizeInBytes();
-
- PrepareForPush(count, size);
-
- if (FLAG_optimize_for_size && count > 8) {
- Label loop;
- __ Mov(Tmp0(), count / 2);
- __ Bind(&loop);
- PushHelper(2, size, src, src, NoReg, NoReg);
- __ Subs(Tmp0(), Tmp0(), 1);
- __ B(ne, &loop);
-
- count %= 2;
- }
-
- // Push up to four registers at a time if possible because if the current
- // stack pointer is csp and the register size is 32, registers must be pushed
- // in blocks of four in order to maintain the 16-byte alignment for csp.
- while (count >= 4) {
- PushHelper(4, size, src, src, src, src);
- count -= 4;
- }
- if (count >= 2) {
- PushHelper(2, size, src, src, NoReg, NoReg);
- count -= 2;
- }
- if (count == 1) {
- PushHelper(1, size, src, NoReg, NoReg, NoReg);
- count -= 1;
- }
- ASSERT(count == 0);
-}
-
-
-void MacroAssembler::PushHelper(int count, int size,
- const CPURegister& src0,
- const CPURegister& src1,
- const CPURegister& src2,
- const CPURegister& src3) {
- // Ensure that we don't unintentially modify scratch or debug registers.
- InstructionAccurateScope scope(this);
-
- ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
- ASSERT(size == src0.SizeInBytes());
-
- // When pushing multiple registers, the store order is chosen such that
- // Push(a, b) is equivalent to Push(a) followed by Push(b).
- switch (count) {
- case 1:
- ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone());
- str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
- break;
- case 2:
- ASSERT(src2.IsNone() && src3.IsNone());
- stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
- break;
- case 3:
- ASSERT(src3.IsNone());
- stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
- str(src0, MemOperand(StackPointer(), 2 * size));
- break;
- case 4:
- // Skip over 4 * size, then fill in the gap. This allows four W registers
- // to be pushed using csp, whilst maintaining 16-byte alignment for csp
- // at all times.
- stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
- stp(src1, src0, MemOperand(StackPointer(), 2 * size));
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void MacroAssembler::PopHelper(int count, int size,
- const CPURegister& dst0,
- const CPURegister& dst1,
- const CPURegister& dst2,
- const CPURegister& dst3) {
- // Ensure that we don't unintentially modify scratch or debug registers.
- InstructionAccurateScope scope(this);
-
- ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
- ASSERT(size == dst0.SizeInBytes());
-
- // When popping multiple registers, the load order is chosen such that
- // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
- switch (count) {
- case 1:
- ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
- ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
- break;
- case 2:
- ASSERT(dst2.IsNone() && dst3.IsNone());
- ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
- break;
- case 3:
- ASSERT(dst3.IsNone());
- ldr(dst2, MemOperand(StackPointer(), 2 * size));
- ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
- break;
- case 4:
- // Load the higher addresses first, then load the lower addresses and
- // skip the whole block in the second instruction. This allows four W
- // registers to be popped using csp, whilst maintaining 16-byte alignment
- // for csp at all times.
- ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
- ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void MacroAssembler::PrepareForPush(int count, int size) {
- // TODO(jbramley): Use AssertStackConsistency here, if possible. See the
- // AssertStackConsistency for details of why we can't at the moment.
- if (csp.Is(StackPointer())) {
- // If the current stack pointer is csp, then it must be aligned to 16 bytes
- // on entry and the total size of the specified registers must also be a
- // multiple of 16 bytes.
- ASSERT((count * size) % 16 == 0);
- } else {
- // Even if the current stack pointer is not the system stack pointer (csp),
- // the system stack pointer will still be modified in order to comply with
- // ABI rules about accessing memory below the system stack pointer.
- BumpSystemStackPointer(count * size);
- }
-}
-
-
-void MacroAssembler::PrepareForPop(int count, int size) {
- AssertStackConsistency();
- if (csp.Is(StackPointer())) {
- // If the current stack pointer is csp, then it must be aligned to 16 bytes
- // on entry and the total size of the specified registers must also be a
- // multiple of 16 bytes.
- ASSERT((count * size) % 16 == 0);
- }
-}
-
-
-void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
- if (offset.IsImmediate()) {
- ASSERT(offset.immediate() >= 0);
- } else if (emit_debug_code()) {
- Cmp(xzr, offset);
- Check(le, kStackAccessBelowStackPointer);
- }
-
- Str(src, MemOperand(StackPointer(), offset));
-}
-
-
-void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
- if (offset.IsImmediate()) {
- ASSERT(offset.immediate() >= 0);
- } else if (emit_debug_code()) {
- Cmp(xzr, offset);
- Check(le, kStackAccessBelowStackPointer);
- }
-
- Ldr(dst, MemOperand(StackPointer(), offset));
-}
-
-
-void MacroAssembler::PokePair(const CPURegister& src1,
- const CPURegister& src2,
- int offset) {
- ASSERT(AreSameSizeAndType(src1, src2));
- ASSERT((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
- Stp(src1, src2, MemOperand(StackPointer(), offset));
-}
-
-
-void MacroAssembler::PeekPair(const CPURegister& dst1,
- const CPURegister& dst2,
- int offset) {
- ASSERT(AreSameSizeAndType(dst1, dst2));
- ASSERT((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
- Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
-}
-
-
-void MacroAssembler::PushCalleeSavedRegisters() {
- // Ensure that the macro-assembler doesn't use any scratch registers.
- InstructionAccurateScope scope(this);
-
- // This method must not be called unless the current stack pointer is the
- // system stack pointer (csp).
- ASSERT(csp.Is(StackPointer()));
-
- MemOperand tos(csp, -2 * kXRegSizeInBytes, PreIndex);
-
- stp(d14, d15, tos);
- stp(d12, d13, tos);
- stp(d10, d11, tos);
- stp(d8, d9, tos);
-
- stp(x29, x30, tos);
- stp(x27, x28, tos); // x28 = jssp
- stp(x25, x26, tos);
- stp(x23, x24, tos);
- stp(x21, x22, tos);
- stp(x19, x20, tos);
-}
-
-
-void MacroAssembler::PopCalleeSavedRegisters() {
- // Ensure that the macro-assembler doesn't use any scratch registers.
- InstructionAccurateScope scope(this);
-
- // This method must not be called unless the current stack pointer is the
- // system stack pointer (csp).
- ASSERT(csp.Is(StackPointer()));
-
- MemOperand tos(csp, 2 * kXRegSizeInBytes, PostIndex);
-
- ldp(x19, x20, tos);
- ldp(x21, x22, tos);
- ldp(x23, x24, tos);
- ldp(x25, x26, tos);
- ldp(x27, x28, tos); // x28 = jssp
- ldp(x29, x30, tos);
-
- ldp(d8, d9, tos);
- ldp(d10, d11, tos);
- ldp(d12, d13, tos);
- ldp(d14, d15, tos);
-}
-
-
-void MacroAssembler::AssertStackConsistency() {
- if (emit_debug_code() && !csp.Is(StackPointer())) {
- if (csp.Is(StackPointer())) {
- // TODO(jbramley): Check for csp alignment if it is the stack pointer.
- } else {
- // TODO(jbramley): Currently we cannot use this assertion in Push because
- // some calling code assumes that the flags are preserved. For an example,
- // look at Builtins::Generate_ArgumentsAdaptorTrampoline.
- Cmp(csp, StackPointer());
- Check(ls, kTheCurrentStackPointerIsBelowCsp);
- }
- }
-}
-
-
-void MacroAssembler::LoadRoot(Register destination,
- Heap::RootListIndex index) {
- // TODO(jbramley): Most root values are constants, and can be synthesized
- // without a load. Refer to the ARM back end for details.
- Ldr(destination, MemOperand(root, index << kPointerSizeLog2));
-}
-
-
-void MacroAssembler::StoreRoot(Register source,
- Heap::RootListIndex index) {
- Str(source, MemOperand(root, index << kPointerSizeLog2));
-}
-
-
-void MacroAssembler::LoadTrueFalseRoots(Register true_root,
- Register false_root) {
- STATIC_ASSERT((Heap::kTrueValueRootIndex + 1) == Heap::kFalseValueRootIndex);
- Ldp(true_root, false_root,
- MemOperand(root, Heap::kTrueValueRootIndex << kPointerSizeLog2));
-}
-
-
-void MacroAssembler::LoadHeapObject(Register result,
- Handle<HeapObject> object) {
- AllowDeferredHandleDereference using_raw_address;
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<Cell> cell = isolate()->factory()->NewCell(object);
- Mov(result, Operand(cell));
- Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
- } else {
- Mov(result, Operand(object));
- }
-}
-
-
-void MacroAssembler::LoadInstanceDescriptors(Register map,
- Register descriptors) {
- Ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
-}
-
-
-void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
- Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
- DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
-}
-
-
-void MacroAssembler::EnumLengthUntagged(Register dst, Register map) {
- STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
- Ldrsw(dst, UntagSmiFieldMemOperand(map, Map::kBitField3Offset));
- And(dst, dst, Map::EnumLengthBits::kMask);
-}
-
-
-void MacroAssembler::EnumLengthSmi(Register dst, Register map) {
- STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
- Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
- And(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
-}
-
-
-void MacroAssembler::CheckEnumCache(Register object,
- Register null_value,
- Register scratch0,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* call_runtime) {
- ASSERT(!AreAliased(object, null_value, scratch0, scratch1, scratch2,
- scratch3));
-
- Register empty_fixed_array_value = scratch0;
- Register current_object = scratch1;
-
- LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
- Label next, start;
-
- Mov(current_object, object);
-
- // Check if the enum length field is properly initialized, indicating that
- // there is an enum cache.
- Register map = scratch2;
- Register enum_length = scratch3;
- Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
-
- EnumLengthUntagged(enum_length, map);
- Cmp(enum_length, kInvalidEnumCacheSentinel);
- B(eq, call_runtime);
-
- B(&start);
-
- Bind(&next);
- Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
-
- // For all objects but the receiver, check that the cache is empty.
- EnumLengthUntagged(enum_length, map);
- Cbnz(enum_length, call_runtime);
-
- Bind(&start);
-
- // Check that there are no elements. Register current_object contains the
- // current JS object we've reached through the prototype chain.
- Label no_elements;
- Ldr(current_object, FieldMemOperand(current_object,
- JSObject::kElementsOffset));
- Cmp(current_object, empty_fixed_array_value);
- B(eq, &no_elements);
-
- // Second chance, the object may be using the empty slow element dictionary.
- CompareRoot(current_object, Heap::kEmptySlowElementDictionaryRootIndex);
- B(ne, call_runtime);
-
- Bind(&no_elements);
- Ldr(current_object, FieldMemOperand(map, Map::kPrototypeOffset));
- Cmp(current_object, null_value);
- B(ne, &next);
-}
-
-
-void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
- Register scratch1,
- Register scratch2,
- Label* no_memento_found) {
- ExternalReference new_space_start =
- ExternalReference::new_space_start(isolate());
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- Add(scratch1, receiver,
- JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag);
- Cmp(scratch1, Operand(new_space_start));
- B(lt, no_memento_found);
-
- Mov(scratch2, Operand(new_space_allocation_top));
- Ldr(scratch2, MemOperand(scratch2));
- Cmp(scratch1, scratch2);
- B(gt, no_memento_found);
-
- Ldr(scratch1, MemOperand(scratch1, -AllocationMemento::kSize));
- Cmp(scratch1,
- Operand(isolate()->factory()->allocation_memento_map()));
-}
-
-
-void MacroAssembler::JumpToHandlerEntry(Register exception,
- Register object,
- Register state,
- Register scratch1,
- Register scratch2) {
- // Handler expects argument in x0.
- ASSERT(exception.Is(x0));
-
- // Compute the handler entry address and jump to it. The handler table is
- // a fixed array of (smi-tagged) code offsets.
- Ldr(scratch1, FieldMemOperand(object, Code::kHandlerTableOffset));
- Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
- STATIC_ASSERT(StackHandler::kKindWidth < kPointerSizeLog2);
- Lsr(scratch2, state, StackHandler::kKindWidth);
- Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
- Add(scratch1, object, Code::kHeaderSize - kHeapObjectTag);
- Add(scratch1, scratch1, Operand::UntagSmi(scratch2));
- Br(scratch1);
-}
-
-
-void MacroAssembler::InNewSpace(Register object,
- Condition cond,
- Label* branch) {
- ASSERT(cond == eq || cond == ne);
- // Use Tmp1() to have a different destination register, as Tmp0() will be used
- // for relocation.
- And(Tmp1(), object, Operand(ExternalReference::new_space_mask(isolate())));
- Cmp(Tmp1(), Operand(ExternalReference::new_space_start(isolate())));
- B(cond, branch);
-}
-
-
-void MacroAssembler::Throw(Register value,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The handler expects the exception in x0.
- ASSERT(value.Is(x0));
-
- // Drop the stack pointer to the top of the top handler.
- ASSERT(jssp.Is(StackPointer()));
- Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
- isolate())));
- Ldr(jssp, MemOperand(scratch1));
- // Restore the next handler.
- Pop(scratch2);
- Str(scratch2, MemOperand(scratch1));
-
- // Get the code object and state. Restore the context and frame pointer.
- Register object = scratch1;
- Register state = scratch2;
- Pop(object, state, cp, fp);
-
- // If the handler is a JS frame, restore the context to the frame.
- // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
- // or cp.
- Label not_js_frame;
- Cbz(cp, &not_js_frame);
- Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- Bind(&not_js_frame);
-
- JumpToHandlerEntry(value, object, state, scratch3, scratch4);
-}
-
-
-void MacroAssembler::ThrowUncatchable(Register value,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The handler expects the exception in x0.
- ASSERT(value.Is(x0));
-
- // Drop the stack pointer to the top of the top stack handler.
- ASSERT(jssp.Is(StackPointer()));
- Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
- isolate())));
- Ldr(jssp, MemOperand(scratch1));
-
- // Unwind the handlers until the ENTRY handler is found.
- Label fetch_next, check_kind;
- B(&check_kind);
- Bind(&fetch_next);
- Peek(jssp, StackHandlerConstants::kNextOffset);
-
- Bind(&check_kind);
- STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
- Peek(scratch2, StackHandlerConstants::kStateOffset);
- TestAndBranchIfAnySet(scratch2, StackHandler::KindField::kMask, &fetch_next);
-
- // Set the top handler address to next handler past the top ENTRY handler.
- Pop(scratch2);
- Str(scratch2, MemOperand(scratch1));
-
- // Get the code object and state. Clear the context and frame pointer (0 was
- // saved in the handler).
- Register object = scratch1;
- Register state = scratch2;
- Pop(object, state, cp, fp);
-
- JumpToHandlerEntry(value, object, state, scratch3, scratch4);
-}
-
-
-void MacroAssembler::Throw(BailoutReason reason) {
- Label throw_start;
- Bind(&throw_start);
-#ifdef DEBUG
- const char* msg = GetBailoutReason(reason);
- RecordComment("Throw message: ");
- RecordComment((msg != NULL) ? msg : "UNKNOWN");
-#endif
-
- Mov(x0, Operand(Smi::FromInt(reason)));
- Push(x0);
-
- // Disable stub call restrictions to always allow calls to throw.
- if (!has_frame_) {
- // We don't actually want to generate a pile of code for this, so just
- // claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kThrowMessage, 1);
- } else {
- CallRuntime(Runtime::kThrowMessage, 1);
- }
- // ThrowMessage should not return here.
- Unreachable();
-}
-
-
-void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
- Label ok;
- B(InvertCondition(cc), &ok);
- Throw(reason);
- Bind(&ok);
-}
-
-
-void MacroAssembler::ThrowIfSmi(const Register& value, BailoutReason reason) {
- Label ok;
- JumpIfNotSmi(value, &ok);
- Throw(reason);
- Bind(&ok);
-}
-
-
-void MacroAssembler::SmiAbs(const Register& smi, Label* slow) {
- ASSERT(smi.Is64Bits());
- Abs(smi, smi, slow);
-}
-
-
-void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
- if (emit_debug_code()) {
- STATIC_ASSERT(kSmiTag == 0);
- Tst(object, kSmiTagMask);
- Check(eq, reason);
- }
-}
-
-
-void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
- if (emit_debug_code()) {
- STATIC_ASSERT(kSmiTag == 0);
- Tst(object, kSmiTagMask);
- Check(ne, reason);
- }
-}
-
-
-void MacroAssembler::AssertName(Register object) {
- if (emit_debug_code()) {
- STATIC_ASSERT(kSmiTag == 0);
- // TODO(jbramley): Add AbortIfSmi and related functions.
- Label not_smi;
- JumpIfNotSmi(object, &not_smi);
- Abort(kOperandIsASmiAndNotAName);
- Bind(&not_smi);
-
- Ldr(Tmp1(), FieldMemOperand(object, HeapObject::kMapOffset));
- CompareInstanceType(Tmp1(), Tmp1(), LAST_NAME_TYPE);
- Check(ls, kOperandIsNotAName);
- }
-}
-
-
-void MacroAssembler::AssertString(Register object) {
- if (emit_debug_code()) {
- Register temp = Tmp1();
- STATIC_ASSERT(kSmiTag == 0);
- Tst(object, kSmiTagMask);
- Check(ne, kOperandIsASmiAndNotAString);
- Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
- CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
- Check(lo, kOperandIsNotAString);
- }
-}
-
-
-void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
- ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
-}
-
-
-void MacroAssembler::TailCallStub(CodeStub* stub) {
- Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
-}
-
-
-void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments,
- SaveFPRegsMode save_doubles) {
- // All arguments must be on the stack before this function is called.
- // x0 holds the return value after the call.
-
- // Check that the number of arguments matches what the function expects.
- // If f->nargs is -1, the function can accept a variable number of arguments.
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- // Illegal operation: drop the stack arguments and return undefined.
- if (num_arguments > 0) {
- Drop(num_arguments);
- }
- LoadRoot(x0, Heap::kUndefinedValueRootIndex);
- return;
- }
-
- // Place the necessary arguments.
- Mov(x0, num_arguments);
- Mov(x1, Operand(ExternalReference(f, isolate())));
-
- CEntryStub stub(1, save_doubles);
- CallStub(&stub);
-}
-
-
-static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
- return ref0.address() - ref1.address();
-}
-
-
-void MacroAssembler::CallApiFunctionAndReturn(
- Register function_address,
- ExternalReference thunk_ref,
- int stack_space,
- int spill_offset,
- MemOperand return_value_operand,
- MemOperand* context_restore_operand) {
- ASM_LOCATION("CallApiFunctionAndReturn");
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address(isolate());
- const int kNextOffset = 0;
- const int kLimitOffset = AddressOffset(
- ExternalReference::handle_scope_limit_address(isolate()),
- next_address);
- const int kLevelOffset = AddressOffset(
- ExternalReference::handle_scope_level_address(isolate()),
- next_address);
-
- ASSERT(function_address.is(x1) || function_address.is(x2));
-
- Label profiler_disabled;
- Label end_profiler_check;
- bool* is_profiling_flag = isolate()->cpu_profiler()->is_profiling_address();
- STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
- Mov(x10, reinterpret_cast<uintptr_t>(is_profiling_flag));
- Ldrb(w10, MemOperand(x10));
- Cbz(w10, &profiler_disabled);
- Mov(x3, Operand(thunk_ref));
- B(&end_profiler_check);
-
- Bind(&profiler_disabled);
- Mov(x3, function_address);
- Bind(&end_profiler_check);
-
- // Save the callee-save registers we are going to use.
- // TODO(all): Is this necessary? ARM doesn't do it.
- STATIC_ASSERT(kCallApiFunctionSpillSpace == 4);
- Poke(x19, (spill_offset + 0) * kXRegSizeInBytes);
- Poke(x20, (spill_offset + 1) * kXRegSizeInBytes);
- Poke(x21, (spill_offset + 2) * kXRegSizeInBytes);
- Poke(x22, (spill_offset + 3) * kXRegSizeInBytes);
-
- // Allocate HandleScope in callee-save registers.
- // We will need to restore the HandleScope after the call to the API function,
- // by allocating it in callee-save registers they will be preserved by C code.
- Register handle_scope_base = x22;
- Register next_address_reg = x19;
- Register limit_reg = x20;
- Register level_reg = w21;
-
- Mov(handle_scope_base, Operand(next_address));
- Ldr(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
- Ldr(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
- Ldr(level_reg, MemOperand(handle_scope_base, kLevelOffset));
- Add(level_reg, level_reg, 1);
- Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- Mov(x0, Operand(ExternalReference::isolate_address(isolate())));
- CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
- PopSafepointRegisters();
- }
-
- // Native call returns to the DirectCEntry stub which redirects to the
- // return address pushed on stack (could have moved after GC).
- // DirectCEntry stub itself is generated early and never moves.
- DirectCEntryStub stub;
- stub.GenerateCall(this, x3);
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- Mov(x0, Operand(ExternalReference::isolate_address(isolate())));
- CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
- PopSafepointRegisters();
- }
-
- Label promote_scheduled_exception;
- Label exception_handled;
- Label delete_allocated_handles;
- Label leave_exit_frame;
- Label return_value_loaded;
-
- // Load value from ReturnValue.
- Ldr(x0, return_value_operand);
- Bind(&return_value_loaded);
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
- if (emit_debug_code()) {
- Ldr(w1, MemOperand(handle_scope_base, kLevelOffset));
- Cmp(w1, level_reg);
- Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
- }
- Sub(level_reg, level_reg, 1);
- Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
- Ldr(x1, MemOperand(handle_scope_base, kLimitOffset));
- Cmp(limit_reg, x1);
- B(ne, &delete_allocated_handles);
-
- Bind(&leave_exit_frame);
- // Restore callee-saved registers.
- Peek(x19, (spill_offset + 0) * kXRegSizeInBytes);
- Peek(x20, (spill_offset + 1) * kXRegSizeInBytes);
- Peek(x21, (spill_offset + 2) * kXRegSizeInBytes);
- Peek(x22, (spill_offset + 3) * kXRegSizeInBytes);
-
- // Check if the function scheduled an exception.
- Mov(x5, Operand(ExternalReference::scheduled_exception_address(isolate())));
- Ldr(x5, MemOperand(x5));
- JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex, &promote_scheduled_exception);
- Bind(&exception_handled);
-
- bool restore_context = context_restore_operand != NULL;
- if (restore_context) {
- Ldr(cp, *context_restore_operand);
- }
-
- LeaveExitFrame(false, x1, !restore_context);
- Drop(stack_space);
- Ret();
-
- Bind(&promote_scheduled_exception);
- {
- FrameScope frame(this, StackFrame::INTERNAL);
- CallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0);
- }
- B(&exception_handled);
-
- // HandleScope limit has changed. Delete allocated extensions.
- Bind(&delete_allocated_handles);
- Str(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
- // Save the return value in a callee-save register.
- Register saved_result = x19;
- Mov(saved_result, x0);
- Mov(x0, Operand(ExternalReference::isolate_address(isolate())));
- CallCFunction(
- ExternalReference::delete_handle_scope_extensions(isolate()), 1);
- Mov(x0, saved_result);
- B(&leave_exit_frame);
-}
-
-
-void MacroAssembler::CallExternalReference(const ExternalReference& ext,
- int num_arguments) {
- Mov(x0, num_arguments);
- Mov(x1, Operand(ext));
-
- CEntryStub stub(1);
- CallStub(&stub);
-}
-
-
-void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
- Mov(x1, Operand(builtin));
- CEntryStub stub(1);
- Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
- Builtins::JavaScript id) {
- // Load the builtins object into target register.
- Ldr(target, GlobalObjectMemOperand());
- Ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
- // Load the JavaScript builtin function from the builtins object.
- Ldr(target, FieldMemOperand(target,
- JSBuiltinsObject::OffsetOfFunctionWithId(id)));
-}
-
-
-void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- ASSERT(!target.is(x1));
- GetBuiltinFunction(x1, id);
- // Load the code entry point from the builtins object.
- Ldr(target, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
-}
-
-
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- ASM_LOCATION("MacroAssembler::InvokeBuiltin");
- // You can't call a builtin without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- GetBuiltinEntry(x2, id);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(x2));
- Call(x2);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- Jump(x2);
- }
-}
-
-
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Mov(x0, num_arguments);
- JumpToExternalReference(ext);
-}
-
-
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
-}
-
-
-void MacroAssembler::InitializeNewString(Register string,
- Register length,
- Heap::RootListIndex map_index,
- Register scratch1,
- Register scratch2) {
- ASSERT(!AreAliased(string, length, scratch1, scratch2));
- LoadRoot(scratch2, map_index);
- SmiTag(scratch1, length);
- Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
-
- Mov(scratch2, String::kEmptyHashField);
- Str(scratch1, FieldMemOperand(string, String::kLengthOffset));
- Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset));
-}
-
-
-int MacroAssembler::ActivationFrameAlignment() {
-#if V8_HOST_ARCH_A64
- // Running on the real platform. Use the alignment as mandated by the local
- // environment.
- // Note: This will break if we ever start generating snapshots on one ARM
- // platform for another ARM platform with a different alignment.
- return OS::ActivationFrameAlignment();
-#else // V8_HOST_ARCH_A64
- // If we are using the simulator then we should always align to the expected
- // alignment. As the simulator is used to generate snapshots we do not know
- // if the target platform will need alignment, so this is controlled from a
- // flag.
- return FLAG_sim_stack_alignment;
-#endif // V8_HOST_ARCH_A64
-}
-
-
-void MacroAssembler::CallCFunction(ExternalReference function,
- int num_of_reg_args) {
- CallCFunction(function, num_of_reg_args, 0);
-}
-
-
-void MacroAssembler::CallCFunction(ExternalReference function,
- int num_of_reg_args,
- int num_of_double_args) {
- Mov(Tmp0(), Operand(function));
- CallCFunction(Tmp0(), num_of_reg_args, num_of_double_args);
-}
-
-
-void MacroAssembler::CallCFunction(Register function,
- int num_of_reg_args,
- int num_of_double_args) {
- ASSERT(has_frame());
- // We can pass 8 integer arguments in registers. If we need to pass more than
- // that, we'll need to implement support for passing them on the stack.
- ASSERT(num_of_reg_args <= 8);
-
- // If we're passing doubles, we're limited to the following prototypes
- // (defined by ExternalReference::Type):
- // BUILTIN_COMPARE_CALL: int f(double, double)
- // BUILTIN_FP_FP_CALL: double f(double, double)
- // BUILTIN_FP_CALL: double f(double)
- // BUILTIN_FP_INT_CALL: double f(double, int)
- if (num_of_double_args > 0) {
- ASSERT(num_of_reg_args <= 1);
- ASSERT((num_of_double_args + num_of_reg_args) <= 2);
- }
-
-
- // If the stack pointer is not csp, we need to derive an aligned csp from the
- // current stack pointer.
- const Register old_stack_pointer = StackPointer();
- if (!csp.Is(old_stack_pointer)) {
- AssertStackConsistency();
-
- int sp_alignment = ActivationFrameAlignment();
- // The ABI mandates at least 16-byte alignment.
- ASSERT(sp_alignment >= 16);
- ASSERT(IsPowerOf2(sp_alignment));
-
- // The current stack pointer is a callee saved register, and is preserved
- // across the call.
- ASSERT(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
-
- // Align and synchronize the system stack pointer with jssp.
- Bic(csp, old_stack_pointer, sp_alignment - 1);
- SetStackPointer(csp);
- }
-
- // Call directly. The function called cannot cause a GC, or allow preemption,
- // so the return address in the link register stays correct.
- Call(function);
-
- if (!csp.Is(old_stack_pointer)) {
- if (emit_debug_code()) {
- // Because the stack pointer must be aligned on a 16-byte boundary, the
- // aligned csp can be up to 12 bytes below the jssp. This is the case
- // where we only pushed one W register on top of an aligned jssp.
- Register temp = Tmp1();
- ASSERT(ActivationFrameAlignment() == 16);
- Sub(temp, csp, old_stack_pointer);
- // We want temp <= 0 && temp >= -12.
- Cmp(temp, 0);
- Ccmp(temp, -12, NFlag, le);
- Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
- }
- SetStackPointer(old_stack_pointer);
- }
-}
-
-
-void MacroAssembler::Jump(Register target) {
- Br(target);
-}
-
-
-void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) {
- Mov(Tmp0(), Operand(target, rmode));
- Br(Tmp0());
-}
-
-
-void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) {
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
- Jump(reinterpret_cast<intptr_t>(target), rmode);
-}
-
-
-void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) {
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- AllowDeferredHandleDereference embedding_raw_address;
- Jump(reinterpret_cast<intptr_t>(code.location()), rmode);
-}
-
-
-void MacroAssembler::Call(Register target) {
- BlockConstPoolScope scope(this);
-#ifdef DEBUG
- Label start_call;
- Bind(&start_call);
-#endif
-
- Blr(target);
-
-#ifdef DEBUG
- AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
-#endif
-}
-
-
-void MacroAssembler::Call(Label* target) {
- BlockConstPoolScope scope(this);
-#ifdef DEBUG
- Label start_call;
- Bind(&start_call);
-#endif
-
- Bl(target);
-
-#ifdef DEBUG
- AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
-#endif
-}
-
-
-// MacroAssembler::CallSize is sensitive to changes in this function, as it
-// requires to know how many instructions are used to branch to the target.
-void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
- BlockConstPoolScope scope(this);
-#ifdef DEBUG
- Label start_call;
- Bind(&start_call);
-#endif
- // Statement positions are expected to be recorded when the target
- // address is loaded.
- positions_recorder()->WriteRecordedPositions();
-
- // Addresses always have 64 bits, so we shouldn't encounter NONE32.
- ASSERT(rmode != RelocInfo::NONE32);
-
- if (rmode == RelocInfo::NONE64) {
- uint64_t imm = reinterpret_cast<uint64_t>(target);
- movz(Tmp0(), (imm >> 0) & 0xffff, 0);
- movk(Tmp0(), (imm >> 16) & 0xffff, 16);
- movk(Tmp0(), (imm >> 32) & 0xffff, 32);
- movk(Tmp0(), (imm >> 48) & 0xffff, 48);
- } else {
- LoadRelocated(Tmp0(), Operand(reinterpret_cast<intptr_t>(target), rmode));
- }
- Blr(Tmp0());
-#ifdef DEBUG
- AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
-#endif
-}
-
-
-void MacroAssembler::Call(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id) {
-#ifdef DEBUG
- Label start_call;
- Bind(&start_call);
-#endif
-
- if ((rmode == RelocInfo::CODE_TARGET) && (!ast_id.IsNone())) {
- SetRecordedAstId(ast_id);
- rmode = RelocInfo::CODE_TARGET_WITH_ID;
- }
-
- AllowDeferredHandleDereference embedding_raw_address;
- Call(reinterpret_cast<Address>(code.location()), rmode);
-
-#ifdef DEBUG
- // Check the size of the code generated.
- AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode, ast_id));
-#endif
-}
-
-
-int MacroAssembler::CallSize(Register target) {
- USE(target);
- return kInstructionSize;
-}
-
-
-int MacroAssembler::CallSize(Label* target) {
- USE(target);
- return kInstructionSize;
-}
-
-
-int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
- USE(target);
-
- // Addresses always have 64 bits, so we shouldn't encounter NONE32.
- ASSERT(rmode != RelocInfo::NONE32);
-
- if (rmode == RelocInfo::NONE64) {
- return kCallSizeWithoutRelocation;
- } else {
- return kCallSizeWithRelocation;
- }
-}
-
-
-int MacroAssembler::CallSize(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id) {
- USE(code);
- USE(ast_id);
-
- // Addresses always have 64 bits, so we shouldn't encounter NONE32.
- ASSERT(rmode != RelocInfo::NONE32);
-
- if (rmode == RelocInfo::NONE64) {
- return kCallSizeWithoutRelocation;
- } else {
- return kCallSizeWithRelocation;
- }
-}
-
-
-
-
-
-void MacroAssembler::JumpForHeapNumber(Register object,
- Register heap_number_map,
- Label* on_heap_number,
- Label* on_not_heap_number) {
- ASSERT(on_heap_number || on_not_heap_number);
- // Tmp0() is used as a scratch register.
- ASSERT(!AreAliased(Tmp0(), heap_number_map));
- AssertNotSmi(object);
-
- // Load the HeapNumber map if it is not passed.
- if (heap_number_map.Is(NoReg)) {
- heap_number_map = Tmp1();
- LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- } else {
- // This assert clobbers Tmp0(), so do it before loading Tmp0() with the map.
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- }
-
- Ldr(Tmp0(), FieldMemOperand(object, HeapObject::kMapOffset));
- Cmp(Tmp0(), heap_number_map);
-
- if (on_heap_number) {
- B(eq, on_heap_number);
- }
- if (on_not_heap_number) {
- B(ne, on_not_heap_number);
- }
-}
-
-
-void MacroAssembler::JumpIfHeapNumber(Register object,
- Label* on_heap_number,
- Register heap_number_map) {
- JumpForHeapNumber(object,
- heap_number_map,
- on_heap_number,
- NULL);
-}
-
-
-void MacroAssembler::JumpIfNotHeapNumber(Register object,
- Label* on_not_heap_number,
- Register heap_number_map) {
- JumpForHeapNumber(object,
- heap_number_map,
- NULL,
- on_not_heap_number);
-}
-
-
-void MacroAssembler::LookupNumberStringCache(Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found) {
- ASSERT(!AreAliased(object, result, scratch1, scratch2, scratch3));
-
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch3;
-
- // Load the number string cache.
- LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- Ldrsw(mask, UntagSmiFieldMemOperand(number_string_cache,
- FixedArray::kLengthOffset));
- Asr(mask, mask, 1); // Divide length by two.
- Sub(mask, mask, 1); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label is_smi;
- Label load_result_from_cache;
-
- JumpIfSmi(object, &is_smi);
- CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(kDoubleSize == (kWRegSizeInBytes * 2));
- Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag);
- Ldp(scratch1.W(), scratch2.W(), MemOperand(scratch1));
- Eor(scratch1, scratch1, scratch2);
- And(scratch1, scratch1, mask);
-
- // Calculate address of entry in string cache: each entry consists of two
- // pointer sized fields.
- Add(scratch1, number_string_cache,
- Operand(scratch1, LSL, kPointerSizeLog2 + 1));
-
- Register probe = mask;
- Ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- JumpIfSmi(probe, not_found);
- Ldr(d0, FieldMemOperand(object, HeapNumber::kValueOffset));
- Ldr(d1, FieldMemOperand(probe, HeapNumber::kValueOffset));
- Fcmp(d0, d1);
- B(ne, not_found);
- B(&load_result_from_cache);
-
- Bind(&is_smi);
- Register scratch = scratch1;
- And(scratch, mask, Operand::UntagSmi(object));
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- Add(scratch, number_string_cache,
- Operand(scratch, LSL, kPointerSizeLog2 + 1));
-
- // Check if the entry is the smi we are looking for.
- Ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- Cmp(object, probe);
- B(ne, not_found);
-
- // Get the result from the cache.
- Bind(&load_result_from_cache);
- Ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
- IncrementCounter(isolate()->counters()->number_to_string_native(), 1,
- scratch1, scratch2);
-}
-
-
-void MacroAssembler::TryConvertDoubleToInt(Register as_int,
- FPRegister value,
- FPRegister scratch_d,
- Label* on_successful_conversion,
- Label* on_failed_conversion) {
- // Convert to an int and back again, then compare with the original value.
- Fcvtzs(as_int, value);
- Scvtf(scratch_d, as_int);
- Fcmp(value, scratch_d);
-
- if (on_successful_conversion) {
- B(on_successful_conversion, eq);
- }
- if (on_failed_conversion) {
- B(on_failed_conversion, ne);
- }
-}
-
-
-void MacroAssembler::JumpIfMinusZero(DoubleRegister input,
- Label* on_negative_zero) {
- // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will
- // cause overflow.
- Fmov(Tmp0(), input);
- Cmp(Tmp0(), 1);
- B(vs, on_negative_zero);
-}
-
-
-void MacroAssembler::ClampInt32ToUint8(Register output, Register input) {
- // Clamp the value to [0..255].
- Cmp(input.W(), Operand(input.W(), UXTB));
- // If input < input & 0xff, it must be < 0, so saturate to 0.
- Csel(output.W(), wzr, input.W(), lt);
- // Create a constant 0xff.
- Mov(WTmp0(), 255);
- // If input > input & 0xff, it must be > 255, so saturate to 255.
- Csel(output.W(), WTmp0(), output.W(), gt);
-}
-
-
-void MacroAssembler::ClampInt32ToUint8(Register in_out) {
- ClampInt32ToUint8(in_out, in_out);
-}
-
-
-void MacroAssembler::ClampDoubleToUint8(Register output,
- DoubleRegister input,
- DoubleRegister dbl_scratch) {
- // This conversion follows the WebIDL "[Clamp]" rules for PIXEL types:
- // - Inputs lower than 0 (including -infinity) produce 0.
- // - Inputs higher than 255 (including +infinity) produce 255.
- // Also, it seems that PIXEL types use round-to-nearest rather than
- // round-towards-zero.
-
- // Squash +infinity before the conversion, since Fcvtnu will normally
- // convert it to 0.
- Fmov(dbl_scratch, 255);
- Fmin(dbl_scratch, dbl_scratch, input);
-
- // Convert double to unsigned integer. Values less than zero become zero.
- // Values greater than 255 have already been clamped to 255.
- Fcvtnu(output, dbl_scratch);
-}
-
-
-void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst,
- Register src,
- unsigned count,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- // Untag src and dst into scratch registers.
- // Copy src->dst in a tight loop.
- ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, Tmp0(), Tmp1()));
- ASSERT(count >= 2);
-
- const Register& remaining = scratch3;
- Mov(remaining, count / 2);
-
- // Only use the Assembler, so we can use Tmp0() and Tmp1().
- InstructionAccurateScope scope(this);
-
- const Register& dst_untagged = scratch1;
- const Register& src_untagged = scratch2;
- sub(dst_untagged, dst, kHeapObjectTag);
- sub(src_untagged, src, kHeapObjectTag);
-
- // Copy fields in pairs.
- Label loop;
- bind(&loop);
- ldp(Tmp0(), Tmp1(), MemOperand(src_untagged, kXRegSizeInBytes * 2,
- PostIndex));
- stp(Tmp0(), Tmp1(), MemOperand(dst_untagged, kXRegSizeInBytes * 2,
- PostIndex));
- sub(remaining, remaining, 1);
- cbnz(remaining, &loop);
-
- // Handle the leftovers.
- if (count & 1) {
- ldr(Tmp0(), MemOperand(src_untagged));
- str(Tmp0(), MemOperand(dst_untagged));
- }
-}
-
-
-void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst,
- Register src,
- unsigned count,
- Register scratch1,
- Register scratch2) {
- // Untag src and dst into scratch registers.
- // Copy src->dst in an unrolled loop.
- ASSERT(!AreAliased(dst, src, scratch1, scratch2, Tmp0(), Tmp1()));
-
- // Only use the Assembler, so we can use Tmp0() and Tmp1().
- InstructionAccurateScope scope(this);
-
- const Register& dst_untagged = scratch1;
- const Register& src_untagged = scratch2;
- sub(dst_untagged, dst, kHeapObjectTag);
- sub(src_untagged, src, kHeapObjectTag);
-
- // Copy fields in pairs.
- for (unsigned i = 0; i < count / 2; i++) {
- ldp(Tmp0(), Tmp1(), MemOperand(src_untagged, kXRegSizeInBytes * 2,
- PostIndex));
- stp(Tmp0(), Tmp1(), MemOperand(dst_untagged, kXRegSizeInBytes * 2,
- PostIndex));
- }
-
- // Handle the leftovers.
- if (count & 1) {
- ldr(Tmp0(), MemOperand(src_untagged));
- str(Tmp0(), MemOperand(dst_untagged));
- }
-}
-
-
-void MacroAssembler::CopyFieldsUnrolledHelper(Register dst,
- Register src,
- unsigned count,
- Register scratch1) {
- // Untag src and dst into scratch registers.
- // Copy src->dst in an unrolled loop.
- ASSERT(!AreAliased(dst, src, scratch1, Tmp0(), Tmp1()));
-
- // Only use the Assembler, so we can use Tmp0() and Tmp1().
- InstructionAccurateScope scope(this);
-
- const Register& dst_untagged = scratch1;
- const Register& src_untagged = Tmp1();
- sub(dst_untagged, dst, kHeapObjectTag);
- sub(src_untagged, src, kHeapObjectTag);
-
- // Copy fields one by one.
- for (unsigned i = 0; i < count; i++) {
- ldr(Tmp0(), MemOperand(src_untagged, kXRegSizeInBytes, PostIndex));
- str(Tmp0(), MemOperand(dst_untagged, kXRegSizeInBytes, PostIndex));
- }
-}
-
-
-void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps,
- unsigned count) {
- // One of two methods is used:
- //
- // For high 'count' values where many scratch registers are available:
- // Untag src and dst into scratch registers.
- // Copy src->dst in a tight loop.
- //
- // For low 'count' values or where few scratch registers are available:
- // Untag src and dst into scratch registers.
- // Copy src->dst in an unrolled loop.
- //
- // In both cases, fields are copied in pairs if possible, and left-overs are
- // handled separately.
- ASSERT(!temps.IncludesAliasOf(dst));
- ASSERT(!temps.IncludesAliasOf(src));
- ASSERT(!temps.IncludesAliasOf(Tmp0()));
- ASSERT(!temps.IncludesAliasOf(Tmp1()));
- ASSERT(!temps.IncludesAliasOf(xzr));
- ASSERT(!AreAliased(dst, src, Tmp0(), Tmp1()));
-
- if (emit_debug_code()) {
- Cmp(dst, src);
- Check(ne, kTheSourceAndDestinationAreTheSame);
- }
-
- // The value of 'count' at which a loop will be generated (if there are
- // enough scratch registers).
- static const unsigned kLoopThreshold = 8;
-
- ASSERT(!temps.IsEmpty());
- Register scratch1 = Register(temps.PopLowestIndex());
- Register scratch2 = Register(temps.PopLowestIndex());
- Register scratch3 = Register(temps.PopLowestIndex());
-
- if (scratch3.IsValid() && (count >= kLoopThreshold)) {
- CopyFieldsLoopPairsHelper(dst, src, count, scratch1, scratch2, scratch3);
- } else if (scratch2.IsValid()) {
- CopyFieldsUnrolledPairsHelper(dst, src, count, scratch1, scratch2);
- } else if (scratch1.IsValid()) {
- CopyFieldsUnrolledHelper(dst, src, count, scratch1);
- } else {
- UNREACHABLE();
- }
-}
-
-
-void MacroAssembler::CopyBytes(Register dst,
- Register src,
- Register length,
- Register scratch,
- CopyHint hint) {
- ASSERT(!AreAliased(src, dst, length, scratch));
-
- // TODO(all): Implement a faster copy function, and use hint to determine
- // which algorithm to use for copies.
- if (emit_debug_code()) {
- // Check copy length.
- Cmp(length, 0);
- Assert(ge, kUnexpectedNegativeValue);
-
- // Check src and dst buffers don't overlap.
- Add(scratch, src, length); // Calculate end of src buffer.
- Cmp(scratch, dst);
- Add(scratch, dst, length); // Calculate end of dst buffer.
- Ccmp(scratch, src, ZFlag, gt);
- Assert(le, kCopyBuffersOverlap);
- }
-
- Label loop, done;
- Cbz(length, &done);
-
- Bind(&loop);
- Sub(length, length, 1);
- Ldrb(scratch, MemOperand(src, 1, PostIndex));
- Strb(scratch, MemOperand(dst, 1, PostIndex));
- Cbnz(length, &loop);
- Bind(&done);
-}
-
-
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler) {
- Label loop, entry;
- B(&entry);
- Bind(&loop);
- // TODO(all): consider using stp here.
- Str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
- Bind(&entry);
- Cmp(start_offset, end_offset);
- B(lt, &loop);
-}
-
-
-void MacroAssembler::JumpIfEitherIsNotSequentialAsciiStrings(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure,
- SmiCheckType smi_check) {
-
- if (smi_check == DO_SMI_CHECK) {
- JumpIfEitherSmi(first, second, failure);
- } else if (emit_debug_code()) {
- ASSERT(smi_check == DONT_DO_SMI_CHECK);
- Label not_smi;
- JumpIfEitherSmi(first, second, NULL, &not_smi);
-
- // At least one input is a smi, but the flags indicated a smi check wasn't
- // needed.
- Abort(kUnexpectedSmi);
-
- Bind(&not_smi);
- }
-
- // Test that both first and second are sequential ASCII strings.
- Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
- Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
- Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
-
- JumpIfEitherInstanceTypeIsNotSequentialAscii(scratch1,
- scratch2,
- scratch1,
- scratch2,
- failure);
-}
-
-
-void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialAscii(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure) {
- ASSERT(!AreAliased(scratch1, second));
- ASSERT(!AreAliased(scratch1, scratch2));
- static const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- static const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- And(scratch1, first, kFlatAsciiStringMask);
- And(scratch2, second, kFlatAsciiStringMask);
- Cmp(scratch1, kFlatAsciiStringTag);
- Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
- B(ne, failure);
-}
-
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
- Register scratch,
- Label* failure) {
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatAsciiStringTag =
- kStringTag | kOneByteStringTag | kSeqStringTag;
- And(scratch, type, kFlatAsciiStringMask);
- Cmp(scratch, kFlatAsciiStringTag);
- B(ne, failure);
-}
-
-
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure) {
- ASSERT(!AreAliased(first, second, scratch1, scratch2));
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatAsciiStringTag =
- kStringTag | kOneByteStringTag | kSeqStringTag;
- And(scratch1, first, kFlatAsciiStringMask);
- And(scratch2, second, kFlatAsciiStringMask);
- Cmp(scratch1, kFlatAsciiStringTag);
- Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
- B(ne, failure);
-}
-
-
-void MacroAssembler::JumpIfNotUniqueName(Register type,
- Label* not_unique_name) {
- STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
- // if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
- // continue
- // } else {
- // goto not_unique_name
- // }
- Tst(type, kIsNotStringMask | kIsNotInternalizedMask);
- Ccmp(type, SYMBOL_TYPE, ZFlag, ne);
- B(ne, not_unique_name);
-}
-
-
-void MacroAssembler::InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
- Label* done,
- InvokeFlag flag,
- bool* definitely_mismatches,
- const CallWrapper& call_wrapper) {
- bool definitely_matches = false;
- *definitely_mismatches = false;
- Label regular_invoke;
-
- // Check whether the expected and actual arguments count match. If not,
- // setup registers according to contract with ArgumentsAdaptorTrampoline:
- // x0: actual arguments count.
- // x1: function (passed through to callee).
- // x2: expected arguments count.
-
- // The code below is made a lot easier because the calling code already sets
- // up actual and expected registers according to the contract if values are
- // passed in registers.
- ASSERT(actual.is_immediate() || actual.reg().is(x0));
- ASSERT(expected.is_immediate() || expected.reg().is(x2));
- ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3));
-
- if (expected.is_immediate()) {
- ASSERT(actual.is_immediate());
- if (expected.immediate() == actual.immediate()) {
- definitely_matches = true;
-
- } else {
- Mov(x0, actual.immediate());
- if (expected.immediate() ==
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
- // Don't worry about adapting arguments for builtins that
- // don't want that done. Skip adaption code by making it look
- // like we have a match between expected and actual number of
- // arguments.
- definitely_matches = true;
- } else {
- *definitely_mismatches = true;
- // Set up x2 for the argument adaptor.
- Mov(x2, expected.immediate());
- }
- }
-
- } else { // expected is a register.
- Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
- : Operand(actual.reg());
- // If actual == expected perform a regular invocation.
- Cmp(expected.reg(), actual_op);
- B(eq, &regular_invoke);
- // Otherwise set up x0 for the argument adaptor.
- Mov(x0, actual_op);
- }
-
- // If the argument counts may mismatch, generate a call to the argument
- // adaptor.
- if (!definitely_matches) {
- if (!code_constant.is_null()) {
- Mov(x3, Operand(code_constant));
- Add(x3, x3, Code::kHeaderSize - kHeapObjectTag);
- }
-
- Handle<Code> adaptor =
- isolate()->builtins()->ArgumentsAdaptorTrampoline();
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(adaptor));
- Call(adaptor);
- call_wrapper.AfterCall();
- if (!*definitely_mismatches) {
- // If the arg counts don't match, no extra code is emitted by
- // MAsm::InvokeCode and we can just fall through.
- B(done);
- }
- } else {
- Jump(adaptor, RelocInfo::CODE_TARGET);
- }
- }
- Bind(&regular_invoke);
-}
-
-
-void MacroAssembler::InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- Label done;
-
- bool definitely_mismatches = false;
- InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
- &definitely_mismatches, call_wrapper);
-
- // If we are certain that actual != expected, then we know InvokePrologue will
- // have handled the call through the argument adaptor mechanism.
- // The called function expects the call kind in x5.
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code));
- Call(code);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- Jump(code);
- }
- }
-
- // Continue here if InvokePrologue does handle the invocation due to
- // mismatched parameter counts.
- Bind(&done);
-}
-
-
-void MacroAssembler::InvokeFunction(Register function,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- // Contract with called JS functions requires that function is passed in x1.
- // (See FullCodeGenerator::Generate().)
- ASSERT(function.is(x1));
-
- Register expected_reg = x2;
- Register code_reg = x3;
-
- Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
- // The number of arguments is stored as an int32_t, and -1 is a marker
- // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
- // extension to correctly handle it.
- Ldr(expected_reg, FieldMemOperand(function,
- JSFunction::kSharedFunctionInfoOffset));
- Ldrsw(expected_reg,
- FieldMemOperand(expected_reg,
- SharedFunctionInfo::kFormalParameterCountOffset));
- Ldr(code_reg,
- FieldMemOperand(function, JSFunction::kCodeEntryOffset));
-
- ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, call_wrapper);
-}
-
-
-void MacroAssembler::InvokeFunction(Register function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- // Contract with called JS functions requires that function is passed in x1.
- // (See FullCodeGenerator::Generate().)
- ASSERT(function.Is(x1));
-
- Register code_reg = x3;
-
- // Set up the context.
- Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
-
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- Ldr(code_reg, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
- InvokeCode(code_reg, expected, actual, flag, call_wrapper);
-}
-
-
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- // Contract with called JS functions requires that function is passed in x1.
- // (See FullCodeGenerator::Generate().)
- __ LoadObject(x1, function);
- InvokeFunction(x1, expected, actual, flag, call_wrapper);
-}
-
-
-void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
- DoubleRegister double_input,
- Label* done) {
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiValueSize == 32);
-
- // Try to convert with a FPU convert instruction. It's trivial to compute
- // the modulo operation on an integer register so we convert to a 64-bit
- // integer, then find the 32-bit result from that.
- //
- // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff)
- // when the double is out of range. NaNs and infinities will be converted to 0
- // (as ECMA-262 requires).
- Fcvtzs(result, double_input);
-
- // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not
- // representable using a double, so if the result is one of those then we know
- // that saturation occured, and we need to manually handle the conversion.
- //
- // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting
- // 1 will cause signed overflow.
- Cmp(result, 1);
- Ccmp(result, -1, VFlag, vc);
-
- B(vc, done);
-}
-
-
-void MacroAssembler::TruncateDoubleToI(Register result,
- DoubleRegister double_input) {
- Label done;
- ASSERT(jssp.Is(StackPointer()));
-
- TryInlineTruncateDoubleToI(result, double_input, &done);
-
- // If we fell through then inline version didn't succeed - call stub instead.
- Push(lr);
- Push(double_input); // Put input on stack.
-
- DoubleToIStub stub(jssp,
- result,
- 0,
- true, // is_truncating
- true); // skip_fastpath
- CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
-
- Drop(1, kDoubleSize); // Drop the double input on the stack.
- Pop(lr);
-
- Bind(&done);
-
- // TODO(rmcilroy): Remove this Sxtw once the following bug is fixed:
- // https://code.google.com/p/v8/issues/detail?id=3149
- Sxtw(result, result.W());
-}
-
-
-void MacroAssembler::TruncateHeapNumberToI(Register result,
- Register object) {
- Label done;
- ASSERT(!result.is(object));
- ASSERT(jssp.Is(StackPointer()));
-
- Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
- TryInlineTruncateDoubleToI(result, fp_scratch, &done);
-
- // If we fell through then inline version didn't succeed - call stub instead.
- Push(lr);
- DoubleToIStub stub(object,
- result,
- HeapNumber::kValueOffset - kHeapObjectTag,
- true, // is_truncating
- true); // skip_fastpath
- CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
- Pop(lr);
-
- Bind(&done);
-
- // TODO(rmcilroy): Remove this Sxtw once the following bug is fixed:
- // https://code.google.com/p/v8/issues/detail?id=3149
- Sxtw(result, result.W());
-}
-
-
-void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
- if (frame_mode == BUILD_STUB_FRAME) {
- ASSERT(StackPointer().Is(jssp));
- // TODO(jbramley): Does x1 contain a JSFunction here, or does it already
- // have the special STUB smi?
- __ Mov(Tmp0(), Operand(Smi::FromInt(StackFrame::STUB)));
- // Compiled stubs don't age, and so they don't need the predictable code
- // ageing sequence.
- __ Push(lr, fp, cp, Tmp0());
- __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
- } else {
- if (isolate()->IsCodePreAgingActive()) {
- Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
- __ EmitCodeAgeSequence(stub);
- } else {
- __ EmitFrameSetupForCodeAgePatching();
- }
- }
-}
-
-
-void MacroAssembler::EnterFrame(StackFrame::Type type) {
- ASSERT(jssp.Is(StackPointer()));
- Push(lr, fp, cp);
- Mov(Tmp1(), Operand(Smi::FromInt(type)));
- Mov(Tmp0(), Operand(CodeObject()));
- Push(Tmp1(), Tmp0());
- // jssp[4] : lr
- // jssp[3] : fp
- // jssp[2] : cp
- // jssp[1] : type
- // jssp[0] : code object
-
- // Adjust FP to point to saved FP.
- add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
-}
-
-
-void MacroAssembler::LeaveFrame(StackFrame::Type type) {
- ASSERT(jssp.Is(StackPointer()));
- // Drop the execution stack down to the frame pointer and restore
- // the caller frame pointer and return address.
- Mov(jssp, fp);
- AssertStackConsistency();
- Pop(fp, lr);
-}
-
-
-void MacroAssembler::ExitFramePreserveFPRegs() {
- PushCPURegList(kCallerSavedFP);
-}
-
-
-void MacroAssembler::ExitFrameRestoreFPRegs() {
- // Read the registers from the stack without popping them. The stack pointer
- // will be reset as part of the unwinding process.
- CPURegList saved_fp_regs = kCallerSavedFP;
- ASSERT(saved_fp_regs.Count() % 2 == 0);
-
- int offset = ExitFrameConstants::kLastExitFrameField;
- while (!saved_fp_regs.IsEmpty()) {
- const CPURegister& dst0 = saved_fp_regs.PopHighestIndex();
- const CPURegister& dst1 = saved_fp_regs.PopHighestIndex();
- offset -= 2 * kDRegSizeInBytes;
- Ldp(dst1, dst0, MemOperand(fp, offset));
- }
-}
-
-
-// TODO(jbramley): Check that we're handling the frame pointer correctly.
-void MacroAssembler::EnterExitFrame(bool save_doubles,
- const Register& scratch,
- int extra_space) {
- ASSERT(jssp.Is(StackPointer()));
-
- // Set up the new stack frame.
- Mov(scratch, Operand(CodeObject()));
- Push(lr, fp);
- Mov(fp, StackPointer());
- Push(xzr, scratch);
- // fp[8]: CallerPC (lr)
- // fp -> fp[0]: CallerFP (old fp)
- // fp[-8]: Space reserved for SPOffset.
- // jssp -> fp[-16]: CodeObject()
- STATIC_ASSERT((2 * kPointerSize) ==
- ExitFrameConstants::kCallerSPDisplacement);
- STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
- STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
- STATIC_ASSERT((-1 * kPointerSize) == ExitFrameConstants::kSPOffset);
- STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kCodeOffset);
-
- // Save the frame pointer and context pointer in the top frame.
- Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
- isolate())));
- Str(fp, MemOperand(scratch));
- Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
- isolate())));
- Str(cp, MemOperand(scratch));
-
- STATIC_ASSERT((-2 * kPointerSize) ==
- ExitFrameConstants::kLastExitFrameField);
- if (save_doubles) {
- ExitFramePreserveFPRegs();
- }
-
- // Reserve space for the return address and for user requested memory.
- // We do this before aligning to make sure that we end up correctly
- // aligned with the minimum of wasted space.
- Claim(extra_space + 1, kXRegSizeInBytes);
- // fp[8]: CallerPC (lr)
- // fp -> fp[0]: CallerFP (old fp)
- // fp[-8]: Space reserved for SPOffset.
- // fp[-16]: CodeObject()
- // jssp[-16 - fp_size]: Saved doubles (if save_doubles is true).
- // jssp[8]: Extra space reserved for caller (if extra_space != 0).
- // jssp -> jssp[0]: Space reserved for the return address.
-
- // Align and synchronize the system stack pointer with jssp.
- AlignAndSetCSPForFrame();
- ASSERT(csp.Is(StackPointer()));
-
- // fp[8]: CallerPC (lr)
- // fp -> fp[0]: CallerFP (old fp)
- // fp[-8]: Space reserved for SPOffset.
- // fp[-16]: CodeObject()
- // csp[...]: Saved doubles, if saved_doubles is true.
- // csp[8]: Memory reserved for the caller if extra_space != 0.
- // Alignment padding, if necessary.
- // csp -> csp[0]: Space reserved for the return address.
-
- // ExitFrame::GetStateForFramePointer expects to find the return address at
- // the memory address immediately below the pointer stored in SPOffset.
- // It is not safe to derive much else from SPOffset, because the size of the
- // padding can vary.
- Add(scratch, csp, kXRegSizeInBytes);
- Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
-}
-
-
-// Leave the current exit frame.
-void MacroAssembler::LeaveExitFrame(bool restore_doubles,
- const Register& scratch,
- bool restore_context) {
- ASSERT(csp.Is(StackPointer()));
-
- if (restore_doubles) {
- ExitFrameRestoreFPRegs();
- }
-
- // Restore the context pointer from the top frame.
- if (restore_context) {
- Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
- isolate())));
- Ldr(cp, MemOperand(scratch));
- }
-
- if (emit_debug_code()) {
- // Also emit debug code to clear the cp in the top frame.
- Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
- isolate())));
- Str(xzr, MemOperand(scratch));
- }
- // Clear the frame pointer from the top frame.
- Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
- isolate())));
- Str(xzr, MemOperand(scratch));
-
- // Pop the exit frame.
- // fp[8]: CallerPC (lr)
- // fp -> fp[0]: CallerFP (old fp)
- // fp[...]: The rest of the frame.
- Mov(jssp, fp);
- SetStackPointer(jssp);
- AssertStackConsistency();
- Pop(fp, lr);
-}
-
-
-void MacroAssembler::SetCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
- if (FLAG_native_code_counters && counter->Enabled()) {
- Mov(scratch1, value);
- Mov(scratch2, Operand(ExternalReference(counter)));
- Str(scratch1, MemOperand(scratch2));
- }
-}
-
-
-void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
- ASSERT(value != 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- Mov(scratch2, Operand(ExternalReference(counter)));
- Ldr(scratch1, MemOperand(scratch2));
- Add(scratch1, scratch1, value);
- Str(scratch1, MemOperand(scratch2));
- }
-}
-
-
-void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
- IncrementCounter(counter, -value, scratch1, scratch2);
-}
-
-
-void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
- if (context_chain_length > 0) {
- // Move up the chain of contexts to the context containing the slot.
- Ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- for (int i = 1; i < context_chain_length; i++) {
- Ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- }
- } else {
- // Slot is in the current function context. Move it into the
- // destination register in case we store into it (the write barrier
- // cannot be allowed to destroy the context in cp).
- Mov(dst, cp);
- }
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void MacroAssembler::DebugBreak() {
- Mov(x0, 0);
- Mov(x1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
- CEntryStub ces(1);
- ASSERT(AllowThisStubCall(&ces));
- Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
-}
-#endif
-
-
-void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
- int handler_index) {
- ASSERT(jssp.Is(StackPointer()));
- // Adjust this code if the asserts don't hold.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // For the JSEntry handler, we must preserve the live registers x0-x4.
- // (See JSEntryStub::GenerateBody().)
-
- unsigned state =
- StackHandler::IndexField::encode(handler_index) |
- StackHandler::KindField::encode(kind);
-
- // Set up the code object and the state for pushing.
- Mov(x10, Operand(CodeObject()));
- Mov(x11, state);
-
- // Push the frame pointer, context, state, and code object.
- if (kind == StackHandler::JS_ENTRY) {
- ASSERT(Smi::FromInt(0) == 0);
- Push(xzr, xzr, x11, x10);
- } else {
- Push(fp, cp, x11, x10);
- }
-
- // Link the current handler as the next handler.
- Mov(x11, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- Ldr(x10, MemOperand(x11));
- Push(x10);
- // Set this new handler as the current one.
- Str(jssp, MemOperand(x11));
-}
-
-
-void MacroAssembler::PopTryHandler() {
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- Pop(x10);
- Mov(x11, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- Drop(StackHandlerConstants::kSize - kXRegSizeInBytes, kByteSizeInBytes);
- Str(x10, MemOperand(x11));
-}
-
-
-void MacroAssembler::Allocate(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
- ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- // We apply salt to the original zap value to easily spot the values.
- Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
- Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
- Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
- }
- B(gc_required);
- return;
- }
-
- ASSERT(!AreAliased(result, scratch1, scratch2, Tmp0(), Tmp1()));
- ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits() &&
- Tmp0().Is64Bits() && Tmp1().Is64Bits());
-
- // Make object size into bytes.
- if ((flags & SIZE_IN_WORDS) != 0) {
- object_size *= kPointerSize;
- }
- ASSERT(0 == (object_size & kObjectAlignmentMask));
-
- // Check relative positions of allocation top and limit addresses.
- // The values must be adjacent in memory to allow the use of LDP.
- ExternalReference heap_allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), flags);
- ExternalReference heap_allocation_limit =
- AllocationUtils::GetAllocationLimitReference(isolate(), flags);
- intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
- intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
- ASSERT((limit - top) == kPointerSize);
-
- // Set up allocation top address and object size registers.
- Register top_address = scratch1;
- Register allocation_limit = scratch2;
- Mov(top_address, Operand(heap_allocation_top));
-
- if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and the allocation limit.
- Ldp(result, allocation_limit, MemOperand(top_address));
- } else {
- if (emit_debug_code()) {
- // Assert that result actually contains top on entry.
- Ldr(Tmp0(), MemOperand(top_address));
- Cmp(result, Tmp0());
- Check(eq, kUnexpectedAllocationTop);
- }
- // Load the allocation limit. 'result' already contains the allocation top.
- Ldr(allocation_limit, MemOperand(top_address, limit - top));
- }
-
- // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
- // the same alignment on A64.
- STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
-
- // Calculate new top and bail out if new space is exhausted.
- Adds(Tmp1(), result, object_size);
- B(vs, gc_required);
- Cmp(Tmp1(), allocation_limit);
- B(hi, gc_required);
- Str(Tmp1(), MemOperand(top_address));
-
- // Tag the object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- Orr(result, result, kHeapObjectTag);
- }
-}
-
-
-void MacroAssembler::Allocate(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- // We apply salt to the original zap value to easily spot the values.
- Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
- Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
- Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
- }
- B(gc_required);
- return;
- }
-
- ASSERT(!AreAliased(object_size, result, scratch1, scratch2, Tmp0(), Tmp1()));
- ASSERT(object_size.Is64Bits() && result.Is64Bits() && scratch1.Is64Bits() &&
- scratch2.Is64Bits() && Tmp0().Is64Bits() && Tmp1().Is64Bits());
-
- // Check relative positions of allocation top and limit addresses.
- // The values must be adjacent in memory to allow the use of LDP.
- ExternalReference heap_allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), flags);
- ExternalReference heap_allocation_limit =
- AllocationUtils::GetAllocationLimitReference(isolate(), flags);
- intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
- intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
- ASSERT((limit - top) == kPointerSize);
-
- // Set up allocation top address and object size registers.
- Register top_address = scratch1;
- Register allocation_limit = scratch2;
- Mov(top_address, Operand(heap_allocation_top));
-
- if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and the allocation limit.
- Ldp(result, allocation_limit, MemOperand(top_address));
- } else {
- if (emit_debug_code()) {
- // Assert that result actually contains top on entry.
- Ldr(Tmp0(), MemOperand(top_address));
- Cmp(result, Tmp0());
- Check(eq, kUnexpectedAllocationTop);
- }
- // Load the allocation limit. 'result' already contains the allocation top.
- Ldr(allocation_limit, MemOperand(top_address, limit - top));
- }
-
- // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
- // the same alignment on A64.
- STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
-
- // Calculate new top and bail out if new space is exhausted
- if ((flags & SIZE_IN_WORDS) != 0) {
- Adds(Tmp1(), result, Operand(object_size, LSL, kPointerSizeLog2));
- } else {
- Adds(Tmp1(), result, object_size);
- }
-
- if (emit_debug_code()) {
- Tst(Tmp1(), kObjectAlignmentMask);
- Check(eq, kUnalignedAllocationInNewSpace);
- }
-
- B(vs, gc_required);
- Cmp(Tmp1(), allocation_limit);
- B(hi, gc_required);
- Str(Tmp1(), MemOperand(top_address));
-
- // Tag the object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- Orr(result, result, kHeapObjectTag);
- }
-}
-
-
-void MacroAssembler::UndoAllocationInNewSpace(Register object,
- Register scratch) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Make sure the object has no tag before resetting top.
- Bic(object, object, kHeapObjectTagMask);
-#ifdef DEBUG
- // Check that the object un-allocated is below the current top.
- Mov(scratch, Operand(new_space_allocation_top));
- Ldr(scratch, MemOperand(scratch));
- Cmp(object, scratch);
- Check(lt, kUndoAllocationOfNonAllocatedMemory);
-#endif
- // Write the address of the object to un-allocate as the current top.
- Mov(scratch, Operand(new_space_allocation_top));
- Str(object, MemOperand(scratch));
-}
-
-
-void MacroAssembler::AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3));
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- Add(scratch1, length, length); // Length in bytes, not chars.
- Add(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
- Bic(scratch1, scratch1, kObjectAlignmentMask);
-
- // Allocate two-byte string in new space.
- Allocate(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- InitializeNewString(result,
- length,
- Heap::kStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3));
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- STATIC_ASSERT(kCharSize == 1);
- Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
- Bic(scratch1, scratch1, kObjectAlignmentMask);
-
- // Allocate ASCII string in new space.
- Allocate(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- InitializeNewString(result,
- length,
- Heap::kAsciiStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
-
- InitializeNewString(result,
- length,
- Heap::kConsStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateAsciiConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Label allocate_new_space, install_map;
- AllocationFlags flags = TAG_OBJECT;
-
- ExternalReference high_promotion_mode = ExternalReference::
- new_space_high_promotion_mode_active_address(isolate());
- Mov(scratch1, Operand(high_promotion_mode));
- Ldr(scratch1, MemOperand(scratch1));
- Cbz(scratch1, &allocate_new_space);
-
- Allocate(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
-
- B(&install_map);
-
- Bind(&allocate_new_space);
- Allocate(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- flags);
-
- Bind(&install_map);
-
- InitializeNewString(result,
- length,
- Heap::kConsAsciiStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- ASSERT(!AreAliased(result, length, scratch1, scratch2));
- Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
-
- InitializeNewString(result,
- length,
- Heap::kSlicedStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateAsciiSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- ASSERT(!AreAliased(result, length, scratch1, scratch2));
- Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
-
- InitializeNewString(result,
- length,
- Heap::kSlicedAsciiStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-// Allocates a heap number or jumps to the need_gc label if the young space
-// is full and a scavenge is needed.
-void MacroAssembler::AllocateHeapNumber(Register result,
- Label* gc_required,
- Register scratch1,
- Register scratch2,
- Register heap_number_map) {
- // Allocate an object in the heap for the heap number and tag it as a heap
- // object.
- Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
-
- // Store heap number map in the allocated object.
- if (heap_number_map.Is(NoReg)) {
- heap_number_map = scratch1;
- LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- }
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- Str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
-}
-
-
-void MacroAssembler::AllocateHeapNumberWithValue(Register result,
- DoubleRegister value,
- Label* gc_required,
- Register scratch1,
- Register scratch2,
- Register heap_number_map) {
- // TODO(all): Check if it would be more efficient to use STP to store both
- // the map and the value.
- AllocateHeapNumber(result, gc_required, scratch1, scratch2, heap_number_map);
- Str(value, FieldMemOperand(result, HeapNumber::kValueOffset));
-}
-
-
-void MacroAssembler::JumpIfObjectType(Register object,
- Register map,
- Register type_reg,
- InstanceType type,
- Label* if_cond_pass,
- Condition cond) {
- CompareObjectType(object, map, type_reg, type);
- B(cond, if_cond_pass);
-}
-
-
-void MacroAssembler::JumpIfNotObjectType(Register object,
- Register map,
- Register type_reg,
- InstanceType type,
- Label* if_not_object) {
- JumpIfObjectType(object, map, type_reg, type, if_not_object, ne);
-}
-
-
-// Sets condition flags based on comparison, and returns type in type_reg.
-void MacroAssembler::CompareObjectType(Register object,
- Register map,
- Register type_reg,
- InstanceType type) {
- Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
- CompareInstanceType(map, type_reg, type);
-}
-
-
-// Sets condition flags based on comparison, and returns type in type_reg.
-void MacroAssembler::CompareInstanceType(Register map,
- Register type_reg,
- InstanceType type) {
- Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
- Cmp(type_reg, type);
-}
-
-
-void MacroAssembler::CompareMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* early_success) {
- // TODO(jbramley): The early_success label isn't used. Remove it.
- Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- CompareMap(scratch, map, early_success);
-}
-
-
-void MacroAssembler::CompareMap(Register obj_map,
- Handle<Map> map,
- Label* early_success) {
- // TODO(jbramley): The early_success label isn't used. Remove it.
- Cmp(obj_map, Operand(map));
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
-
- Label success;
- CompareMap(obj, scratch, map, &success);
- B(ne, fail);
- Bind(&success);
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Register scratch,
- Heap::RootListIndex index,
- Label* fail,
- SmiCheckType smi_check_type) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
- Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- JumpIfNotRoot(scratch, index, fail);
-}
-
-
-void MacroAssembler::CheckMap(Register obj_map,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj_map, fail);
- }
- Label success;
- CompareMap(obj_map, map, &success);
- B(ne, fail);
- Bind(&success);
-}
-
-
-void MacroAssembler::DispatchMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Handle<Code> success,
- SmiCheckType smi_check_type) {
- Label fail;
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, &fail);
- }
- Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- Cmp(scratch, Operand(map));
- B(ne, &fail);
- Jump(success, RelocInfo::CODE_TARGET);
- Bind(&fail);
-}
-
-
-void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) {
- Ldr(Tmp0(), FieldMemOperand(object, HeapObject::kMapOffset));
- Ldrb(Tmp0(), FieldMemOperand(Tmp0(), Map::kBitFieldOffset));
- Tst(Tmp0(), mask);
-}
-
-
-void MacroAssembler::LoadElementsKind(Register result, Register object) {
- // Load map.
- __ Ldr(result, FieldMemOperand(object, HeapObject::kMapOffset));
- // Load the map's "bit field 2".
- __ Ldrb(result, FieldMemOperand(result, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ Ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
-}
-
-
-void MacroAssembler::TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss,
- BoundFunctionAction action) {
- ASSERT(!AreAliased(function, result, scratch));
-
- // Check that the receiver isn't a smi.
- JumpIfSmi(function, miss);
-
- // Check that the function really is a function. Load map into result reg.
- JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss);
-
- if (action == kMissOnBoundFunction) {
- Register scratch_w = scratch.W();
- Ldr(scratch,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- // On 64-bit platforms, compiler hints field is not a smi. See definition of
- // kCompilerHintsOffset in src/objects.h.
- Ldr(scratch_w,
- FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
- Tbnz(scratch, SharedFunctionInfo::kBoundFunction, miss);
- }
-
- // Make sure that the function has an instance prototype.
- Label non_instance;
- Ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
- Tbnz(scratch, Map::kHasNonInstancePrototype, &non_instance);
-
- // Get the prototype or initial map from the function.
- Ldr(result,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // If the prototype or initial map is the hole, don't return it and simply
- // miss the cache instead. This will allow us to allocate a prototype object
- // on-demand in the runtime system.
- JumpIfRoot(result, Heap::kTheHoleValueRootIndex, miss);
-
- // If the function does not have an initial map, we're done.
- Label done;
- JumpIfNotObjectType(result, scratch, scratch, MAP_TYPE, &done);
-
- // Get the prototype from the initial map.
- Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
- B(&done);
-
- // Non-instance prototype: fetch prototype from constructor field in initial
- // map.
- Bind(&non_instance);
- Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
-
- // All done.
- Bind(&done);
-}
-
-
-void MacroAssembler::CompareRoot(const Register& obj,
- Heap::RootListIndex index) {
- ASSERT(!AreAliased(obj, Tmp0()));
- LoadRoot(Tmp0(), index);
- Cmp(obj, Tmp0());
-}
-
-
-void MacroAssembler::JumpIfRoot(const Register& obj,
- Heap::RootListIndex index,
- Label* if_equal) {
- CompareRoot(obj, index);
- B(eq, if_equal);
-}
-
-
-void MacroAssembler::JumpIfNotRoot(const Register& obj,
- Heap::RootListIndex index,
- Label* if_not_equal) {
- CompareRoot(obj, index);
- B(ne, if_not_equal);
-}
-
-
-void MacroAssembler::CompareAndSplit(const Register& lhs,
- const Operand& rhs,
- Condition cond,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if ((if_true == if_false) && (if_false == fall_through)) {
- // Fall through.
- } else if (if_true == if_false) {
- B(if_true);
- } else if (if_false == fall_through) {
- CompareAndBranch(lhs, rhs, cond, if_true);
- } else if (if_true == fall_through) {
- CompareAndBranch(lhs, rhs, InvertCondition(cond), if_false);
- } else {
- CompareAndBranch(lhs, rhs, cond, if_true);
- B(if_false);
- }
-}
-
-
-void MacroAssembler::TestAndSplit(const Register& reg,
- uint64_t bit_pattern,
- Label* if_all_clear,
- Label* if_any_set,
- Label* fall_through) {
- if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) {
- // Fall through.
- } else if (if_all_clear == if_any_set) {
- B(if_all_clear);
- } else if (if_all_clear == fall_through) {
- TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
- } else if (if_any_set == fall_through) {
- TestAndBranchIfAllClear(reg, bit_pattern, if_all_clear);
- } else {
- TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
- B(if_all_clear);
- }
-}
-
-
-void MacroAssembler::CheckFastElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- Cmp(scratch, Map::kMaximumBitField2FastHoleyElementValue);
- B(hi, fail);
-}
-
-
-void MacroAssembler::CheckFastObjectElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
- // If cond==ls, set cond=hi, otherwise compare.
- Ccmp(scratch,
- Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi);
- B(hi, fail);
-}
-
-
-void MacroAssembler::CheckFastSmiElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- Cmp(scratch, Map::kMaximumBitField2FastHoleySmiElementValue);
- B(hi, fail);
-}
-
-
-// Note: The ARM version of this clobbers elements_reg, but this version does
-// not. Some uses of this in A64 assume that elements_reg will be preserved.
-void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
- Register key_reg,
- Register elements_reg,
- Register scratch1,
- FPRegister fpscratch1,
- FPRegister fpscratch2,
- Label* fail,
- int elements_offset) {
- ASSERT(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
- Label store_num;
-
- // Speculatively convert the smi to a double - all smis can be exactly
- // represented as a double.
- SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag);
-
- // If value_reg is a smi, we're done.
- JumpIfSmi(value_reg, &store_num);
-
- // Ensure that the object is a heap number.
- CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(),
- fail, DONT_DO_SMI_CHECK);
-
- Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
- Fmov(fpscratch2, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
-
- // Check for NaN by comparing the number to itself: NaN comparison will
- // report unordered, indicated by the overflow flag being set.
- Fcmp(fpscratch1, fpscratch1);
- Fcsel(fpscratch1, fpscratch2, fpscratch1, vs);
-
- // Store the result.
- Bind(&store_num);
- Add(scratch1, elements_reg,
- Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2));
- Str(fpscratch1,
- FieldMemOperand(scratch1,
- FixedDoubleArray::kHeaderSize - elements_offset));
-}
-
-
-bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- return has_frame_ || !stub->SometimesSetsUpAFrame();
-}
-
-
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
- // If the hash field contains an array index pick it out. The assert checks
- // that the constants for the maximum number of digits for an array index
- // cached in the hash field and the number of bits reserved for it does not
- // conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
- // the low kHashShift bits.
- STATIC_ASSERT(kSmiTag == 0);
- Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
- SmiTag(index, hash);
-}
-
-
-void MacroAssembler::EmitSeqStringSetCharCheck(
- Register string,
- Register index,
- SeqStringSetCharCheckIndexType index_type,
- Register scratch,
- uint32_t encoding_mask) {
- ASSERT(!AreAliased(string, index, scratch));
-
- if (index_type == kIndexIsSmi) {
- AssertSmi(index);
- }
-
- // Check that string is an object.
- AssertNotSmi(string, kNonObject);
-
- // Check that string has an appropriate map.
- Ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
- Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
-
- And(scratch, scratch, kStringRepresentationMask | kStringEncodingMask);
- Cmp(scratch, encoding_mask);
- Check(eq, kUnexpectedStringType);
-
- Ldr(scratch, FieldMemOperand(string, String::kLengthOffset));
- Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
- Check(lt, kIndexIsTooLarge);
-
- ASSERT_EQ(0, Smi::FromInt(0));
- Cmp(index, 0);
- Check(ge, kIndexIsNegative);
-}
-
-
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss) {
- // TODO(jbramley): Sort out the uses of Tmp0() and Tmp1() in this function.
- // The ARM version takes two scratch registers, and that should be enough for
- // all of the checks.
-
- Label same_contexts;
-
- ASSERT(!AreAliased(holder_reg, scratch));
-
- // Load current lexical context from the stack frame.
- Ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // In debug mode, make sure the lexical context is set.
-#ifdef DEBUG
- Cmp(scratch, 0);
- Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
-#endif
-
- // Load the native context of the current context.
- int offset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- Ldr(scratch, FieldMemOperand(scratch, offset));
- Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- // Read the first word and compare to the global_context_map.
- Register temp = Tmp1();
- Ldr(temp, FieldMemOperand(scratch, HeapObject::kMapOffset));
- CompareRoot(temp, Heap::kNativeContextMapRootIndex);
- Check(eq, kExpectedNativeContext);
- }
-
- // Check if both contexts are the same.
- ldr(Tmp0(), FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- cmp(scratch, Tmp0());
- b(&same_contexts, eq);
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- // Move Tmp0() into a different register, as CompareRoot will use it.
- Register temp = Tmp1();
- mov(temp, Tmp0());
- CompareRoot(temp, Heap::kNullValueRootIndex);
- Check(ne, kExpectedNonNullContext);
-
- Ldr(temp, FieldMemOperand(temp, HeapObject::kMapOffset));
- CompareRoot(temp, Heap::kNativeContextMapRootIndex);
- Check(eq, kExpectedNativeContext);
-
- // Let's consider that Tmp0() has been cloberred by the MacroAssembler.
- // We reload it with its value.
- ldr(Tmp0(), FieldMemOperand(holder_reg,
- JSGlobalProxy::kNativeContextOffset));
- }
-
- // Check that the security token in the calling global object is
- // compatible with the security token in the receiving global
- // object.
- int token_offset = Context::kHeaderSize +
- Context::SECURITY_TOKEN_INDEX * kPointerSize;
-
- ldr(scratch, FieldMemOperand(scratch, token_offset));
- ldr(Tmp0(), FieldMemOperand(Tmp0(), token_offset));
- cmp(scratch, Tmp0());
- b(miss, ne);
-
- bind(&same_contexts);
-}
-
-
-// Compute the hash code from the untagged key. This must be kept in sync with
-// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
-// code-stub-hydrogen.cc
-void MacroAssembler::GetNumberHash(Register key, Register scratch) {
- ASSERT(!AreAliased(key, scratch));
-
- // Xor original key with a seed.
- LoadRoot(scratch, Heap::kHashSeedRootIndex);
- Eor(key, key, Operand::UntagSmi(scratch));
-
- // The algorithm uses 32-bit integer values.
- key = key.W();
- scratch = scratch.W();
-
- // Compute the hash code from the untagged key. This must be kept in sync
- // with ComputeIntegerHash in utils.h.
- //
- // hash = ~hash + (hash <<1 15);
- Mvn(scratch, key);
- Add(key, scratch, Operand(key, LSL, 15));
- // hash = hash ^ (hash >> 12);
- Eor(key, key, Operand(key, LSR, 12));
- // hash = hash + (hash << 2);
- Add(key, key, Operand(key, LSL, 2));
- // hash = hash ^ (hash >> 4);
- Eor(key, key, Operand(key, LSR, 4));
- // hash = hash * 2057;
- Mov(scratch, Operand(key, LSL, 11));
- Add(key, key, Operand(key, LSL, 3));
- Add(key, key, scratch);
- // hash = hash ^ (hash >> 16);
- Eor(key, key, Operand(key, LSR, 16));
-}
-
-
-void MacroAssembler::LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register result,
- Register scratch0,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- ASSERT(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
-
- Label done;
-
- SmiUntag(scratch0, key);
- GetNumberHash(scratch0, scratch1);
-
- // Compute the capacity mask.
- Ldrsw(scratch1,
- UntagSmiFieldMemOperand(elements,
- SeededNumberDictionary::kCapacityOffset));
- Sub(scratch1, scratch1, 1);
-
- // Generate an unrolled loop that performs a few probes before giving up.
- for (int i = 0; i < kNumberDictionaryProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- Add(scratch2, scratch0, SeededNumberDictionary::GetProbeOffset(i));
- } else {
- Mov(scratch2, scratch0);
- }
- And(scratch2, scratch2, scratch1);
-
- // Scale the index by multiplying by the element size.
- ASSERT(SeededNumberDictionary::kEntrySize == 3);
- Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
-
- // Check if the key is identical to the name.
- Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
- Ldr(scratch3,
- FieldMemOperand(scratch2,
- SeededNumberDictionary::kElementsStartOffset));
- Cmp(key, scratch3);
- if (i != (kNumberDictionaryProbes - 1)) {
- B(eq, &done);
- } else {
- B(ne, miss);
- }
- }
-
- Bind(&done);
- // Check that the value is a normal property.
- const int kDetailsOffset =
- SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
- TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss);
-
- // Get the value at the masked, scaled index and return.
- const int kValueOffset =
- SeededNumberDictionary::kElementsStartOffset + kPointerSize;
- Ldr(result, FieldMemOperand(scratch2, kValueOffset));
-}
-
-
-void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
- Register address,
- Register scratch,
- SaveFPRegsMode fp_mode,
- RememberedSetFinalAction and_then) {
- ASSERT(!AreAliased(object, address, scratch));
- Label done, store_buffer_overflow;
- if (emit_debug_code()) {
- Label ok;
- JumpIfNotInNewSpace(object, &ok);
- Abort(kRememberedSetPointerInNewSpace);
- bind(&ok);
- }
- // Load store buffer top.
- Mov(Tmp0(), Operand(ExternalReference::store_buffer_top(isolate())));
- Ldr(scratch, MemOperand(Tmp0()));
- // Store pointer to buffer and increment buffer top.
- Str(address, MemOperand(scratch, kPointerSize, PostIndex));
- // Write back new top of buffer.
- Str(scratch, MemOperand(Tmp0()));
- // Call stub on end of buffer.
- // Check for end of buffer.
- ASSERT(StoreBuffer::kStoreBufferOverflowBit ==
- (1 << (14 + kPointerSizeLog2)));
- if (and_then == kFallThroughAtEnd) {
- Tbz(scratch, (14 + kPointerSizeLog2), &done);
- } else {
- ASSERT(and_then == kReturnAtEnd);
- Tbnz(scratch, (14 + kPointerSizeLog2), &store_buffer_overflow);
- Ret();
- }
-
- Bind(&store_buffer_overflow);
- Push(lr);
- StoreBufferOverflowStub store_buffer_overflow_stub =
- StoreBufferOverflowStub(fp_mode);
- CallStub(&store_buffer_overflow_stub);
- Pop(lr);
-
- Bind(&done);
- if (and_then == kReturnAtEnd) {
- Ret();
- }
-}
-
-
-void MacroAssembler::PopSafepointRegisters() {
- const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- PopXRegList(kSafepointSavedRegisters);
- Drop(num_unsaved);
-}
-
-
-void MacroAssembler::PushSafepointRegisters() {
- // Safepoints expect a block of kNumSafepointRegisters values on the stack, so
- // adjust the stack for unsaved registers.
- const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- ASSERT(num_unsaved >= 0);
- Claim(num_unsaved);
- PushXRegList(kSafepointSavedRegisters);
-}
-
-
-void MacroAssembler::PushSafepointFPRegisters() {
- PushCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSize,
- FPRegister::kAllocatableFPRegisters));
-}
-
-
-void MacroAssembler::PopSafepointFPRegisters() {
- PopCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSize,
- FPRegister::kAllocatableFPRegisters));
-}
-
-
-int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
- // Make sure the safepoint registers list is what we expect.
- ASSERT(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
-
- // Safepoint registers are stored contiguously on the stack, but not all the
- // registers are saved. The following registers are excluded:
- // - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
- // the macro assembler.
- // - x28 (jssp) because JS stack pointer doesn't need to be included in
- // safepoint registers.
- // - x31 (csp) because the system stack pointer doesn't need to be included
- // in safepoint registers.
- //
- // This function implements the mapping of register code to index into the
- // safepoint register slots.
- if ((reg_code >= 0) && (reg_code <= 15)) {
- return reg_code;
- } else if ((reg_code >= 18) && (reg_code <= 27)) {
- // Skip ip0 and ip1.
- return reg_code - 2;
- } else if ((reg_code == 29) || (reg_code == 30)) {
- // Also skip jssp.
- return reg_code - 3;
- } else {
- // This register has no safepoint register slot.
- UNREACHABLE();
- return -1;
- }
-}
-
-
-void MacroAssembler::CheckPageFlagSet(const Register& object,
- const Register& scratch,
- int mask,
- Label* if_any_set) {
- And(scratch, object, ~Page::kPageAlignmentMask);
- Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
- TestAndBranchIfAnySet(scratch, mask, if_any_set);
-}
-
-
-void MacroAssembler::CheckPageFlagClear(const Register& object,
- const Register& scratch,
- int mask,
- Label* if_all_clear) {
- And(scratch, object, ~Page::kPageAlignmentMask);
- Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
- TestAndBranchIfAllClear(scratch, mask, if_all_clear);
-}
-
-
-void MacroAssembler::RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register scratch,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- // First, check if a write barrier is even needed. The tests below
- // catch stores of Smis.
- Label done;
-
- // Skip the barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
- JumpIfSmi(value, &done);
- }
-
- // Although the object register is tagged, the offset is relative to the start
- // of the object, so offset must be a multiple of kPointerSize.
- ASSERT(IsAligned(offset, kPointerSize));
-
- Add(scratch, object, offset - kHeapObjectTag);
- if (emit_debug_code()) {
- Label ok;
- Tst(scratch, (1 << kPointerSizeLog2) - 1);
- B(eq, &ok);
- Abort(kUnalignedCellInWriteBarrier);
- Bind(&ok);
- }
-
- RecordWrite(object,
- scratch,
- value,
- lr_status,
- save_fp,
- remembered_set_action,
- OMIT_SMI_CHECK);
-
- Bind(&done);
-
- // Clobber clobbered input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- Mov(value, Operand(BitCast<int64_t>(kZapValue + 4)));
- Mov(scratch, Operand(BitCast<int64_t>(kZapValue + 8)));
- }
-}
-
-
-// Will clobber: object, address, value, Tmp0(), Tmp1().
-// If lr_status is kLRHasBeenSaved, lr will also be clobbered.
-//
-// The register 'object' contains a heap object pointer. The heap object tag is
-// shifted away.
-void MacroAssembler::RecordWrite(Register object,
- Register address,
- Register value,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- ASM_LOCATION("MacroAssembler::RecordWrite");
- ASSERT(!AreAliased(object, value));
-
- if (emit_debug_code()) {
- Ldr(Tmp0(), MemOperand(address));
- Cmp(Tmp0(), value);
- Check(eq, kWrongAddressOrValuePassedToRecordWrite);
- }
-
- // Count number of write barriers in generated code.
- isolate()->counters()->write_barriers_static()->Increment();
- // TODO(mstarzinger): Dynamic counter missing.
-
- // First, check if a write barrier is even needed. The tests below
- // catch stores of smis and stores into the young generation.
- Label done;
-
- if (smi_check == INLINE_SMI_CHECK) {
- ASSERT_EQ(0, kSmiTag);
- JumpIfSmi(value, &done);
- }
-
- CheckPageFlagClear(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- &done);
- CheckPageFlagClear(object,
- value, // Used as scratch.
- MemoryChunk::kPointersFromHereAreInterestingMask,
- &done);
-
- // Record the actual write.
- if (lr_status == kLRHasNotBeenSaved) {
- Push(lr);
- }
- RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
- CallStub(&stub);
- if (lr_status == kLRHasNotBeenSaved) {
- Pop(lr);
- }
-
- Bind(&done);
-
- // Clobber clobbered registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- Mov(address, Operand(BitCast<int64_t>(kZapValue + 12)));
- Mov(value, Operand(BitCast<int64_t>(kZapValue + 16)));
- }
-}
-
-
-void MacroAssembler::AssertHasValidColor(const Register& reg) {
- if (emit_debug_code()) {
- // The bit sequence is backward. The first character in the string
- // represents the least significant bit.
- ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
- Label color_is_valid;
- Tbnz(reg, 0, &color_is_valid);
- Tbz(reg, 1, &color_is_valid);
- Abort(kUnexpectedColorFound);
- Bind(&color_is_valid);
- }
-}
-
-
-void MacroAssembler::GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register shift_reg) {
- ASSERT(!AreAliased(addr_reg, bitmap_reg, shift_reg, no_reg));
- // addr_reg is divided into fields:
- // |63 page base 20|19 high 8|7 shift 3|2 0|
- // 'high' gives the index of the cell holding color bits for the object.
- // 'shift' gives the offset in the cell for this object's color.
- const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
- Ubfx(Tmp0(), addr_reg, kShiftBits, kPageSizeBits - kShiftBits);
- Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask);
- Add(bitmap_reg, bitmap_reg, Operand(Tmp0(), LSL, Bitmap::kBytesPerCellLog2));
- // bitmap_reg:
- // |63 page base 20|19 zeros 15|14 high 3|2 0|
- Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
-}
-
-
-void MacroAssembler::HasColor(Register object,
- Register bitmap_scratch,
- Register shift_scratch,
- Label* has_color,
- int first_bit,
- int second_bit) {
- // See mark-compact.h for color definitions.
- ASSERT(!AreAliased(object, bitmap_scratch, shift_scratch));
-
- GetMarkBits(object, bitmap_scratch, shift_scratch);
- Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- // Shift the bitmap down to get the color of the object in bits [1:0].
- Lsr(bitmap_scratch, bitmap_scratch, shift_scratch);
-
- AssertHasValidColor(bitmap_scratch);
-
- // These bit sequences are backwards. The first character in the string
- // represents the least significant bit.
- ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
-
- // Check for the color.
- if (first_bit == 0) {
- // Checking for white.
- ASSERT(second_bit == 0);
- // We only need to test the first bit.
- Tbz(bitmap_scratch, 0, has_color);
- } else {
- Label other_color;
- // Checking for grey or black.
- Tbz(bitmap_scratch, 0, &other_color);
- if (second_bit == 0) {
- Tbz(bitmap_scratch, 1, has_color);
- } else {
- Tbnz(bitmap_scratch, 1, has_color);
- }
- Bind(&other_color);
- }
-
- // Fall through if it does not have the right color.
-}
-
-
-void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
- Register scratch,
- Label* if_deprecated) {
- if (map->CanBeDeprecated()) {
- Mov(scratch, Operand(map));
- Ldrsw(scratch, UntagSmiFieldMemOperand(scratch, Map::kBitField3Offset));
- TestAndBranchIfAnySet(scratch, Map::Deprecated::kMask, if_deprecated);
- }
-}
-
-
-void MacroAssembler::JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black) {
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
-}
-
-
-void MacroAssembler::JumpIfDictionaryInPrototypeChain(
- Register object,
- Register scratch0,
- Register scratch1,
- Label* found) {
- ASSERT(!AreAliased(object, scratch0, scratch1));
- Factory* factory = isolate()->factory();
- Register current = scratch0;
- Label loop_again;
-
- // Scratch contains elements pointer.
- Mov(current, object);
-
- // Loop based on the map going up the prototype chain.
- Bind(&loop_again);
- Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
- Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
- Ubfx(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount);
- CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
- Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
- CompareAndBranch(current, Operand(factory->null_value()), ne, &loop_again);
-}
-
-
-void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
- Register result) {
- ASSERT(!result.Is(ldr_location));
- const uint32_t kLdrLitOffset_lsb = 5;
- const uint32_t kLdrLitOffset_width = 19;
- Ldr(result, MemOperand(ldr_location));
- if (emit_debug_code()) {
- And(result, result, LoadLiteralFMask);
- Cmp(result, LoadLiteralFixed);
- Check(eq, kTheInstructionToPatchShouldBeAnLdrLiteral);
- // The instruction was clobbered. Reload it.
- Ldr(result, MemOperand(ldr_location));
- }
- Sbfx(result, result, kLdrLitOffset_lsb, kLdrLitOffset_width);
- Add(result, ldr_location, Operand(result, LSL, kWordSizeInBytesLog2));
-}
-
-
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register shift_scratch,
- Register load_scratch,
- Register length_scratch,
- Label* value_is_white_and_not_data) {
- ASSERT(!AreAliased(
- value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
-
- // These bit sequences are backwards. The first character in the string
- // represents the least significant bit.
- ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
-
- GetMarkBits(value, bitmap_scratch, shift_scratch);
- Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- Lsr(load_scratch, load_scratch, shift_scratch);
-
- AssertHasValidColor(load_scratch);
-
- // If the value is black or grey we don't need to do anything.
- // Since both black and grey have a 1 in the first position and white does
- // not have a 1 there we only need to check one bit.
- Label done;
- Tbnz(load_scratch, 0, &done);
-
- // Value is white. We check whether it is data that doesn't need scanning.
- Register map = load_scratch; // Holds map while checking type.
- Label is_data_object;
-
- // Check for heap-number.
- Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
- Mov(length_scratch, HeapNumber::kSize);
- JumpIfRoot(map, Heap::kHeapNumberMapRootIndex, &is_data_object);
-
- // Check for strings.
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = load_scratch;
- Ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
- TestAndBranchIfAnySet(instance_type,
- kIsIndirectStringMask | kIsNotStringMask,
- value_is_white_and_not_data);
-
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
- ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
- Mov(length_scratch, ExternalString::kSize);
- TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object);
-
- // Sequential string, either ASCII or UC16.
- // For ASCII (char-size of 1) we shift the smi tag away to get the length.
- // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
- // getting the length multiplied by 2.
- ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
- Ldrsw(length_scratch, UntagSmiFieldMemOperand(value,
- String::kLengthOffset));
- Tst(instance_type, kStringEncodingMask);
- Cset(load_scratch, eq);
- Lsl(length_scratch, length_scratch, load_scratch);
- Add(length_scratch,
- length_scratch,
- SeqString::kHeaderSize + kObjectAlignmentMask);
- Bic(length_scratch, length_scratch, kObjectAlignmentMask);
-
- Bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- Register mask = shift_scratch;
- Mov(load_scratch, 1);
- Lsl(mask, load_scratch, shift_scratch);
-
- Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- Orr(load_scratch, load_scratch, mask);
- Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
-
- Bic(bitmap_scratch, bitmap_scratch, Page::kPageAlignmentMask);
- Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
- Add(load_scratch, load_scratch, length_scratch);
- Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
-
- Bind(&done);
-}
-
-
-void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
- if (emit_debug_code()) {
- Check(cond, reason);
- }
-}
-
-
-
-void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) {
- if (emit_debug_code()) {
- CheckRegisterIsClear(reg, reason);
- }
-}
-
-
-void MacroAssembler::AssertRegisterIsRoot(Register reg,
- Heap::RootListIndex index,
- BailoutReason reason) {
- // CompareRoot uses Tmp0().
- ASSERT(!reg.Is(Tmp0()));
- if (emit_debug_code()) {
- CompareRoot(reg, index);
- Check(eq, reason);
- }
-}
-
-
-void MacroAssembler::AssertFastElements(Register elements) {
- if (emit_debug_code()) {
- Register temp = Tmp1();
- Label ok;
- Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset));
- JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok);
- JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok);
- JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok);
- Abort(kJSObjectWithFastElementsMapHasSlowElements);
- Bind(&ok);
- }
-}
-
-
-void MacroAssembler::AssertIsString(const Register& object) {
- if (emit_debug_code()) {
- Register temp = Tmp1();
- STATIC_ASSERT(kSmiTag == 0);
- Tst(object, Operand(kSmiTagMask));
- Check(ne, kOperandIsNotAString);
- Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
- CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
- Check(lo, kOperandIsNotAString);
- }
-}
-
-
-void MacroAssembler::Check(Condition cond, BailoutReason reason) {
- Label ok;
- B(cond, &ok);
- Abort(reason);
- // Will not return here.
- Bind(&ok);
-}
-
-
-void MacroAssembler::CheckRegisterIsClear(Register reg, BailoutReason reason) {
- Label ok;
- Cbz(reg, &ok);
- Abort(reason);
- // Will not return here.
- Bind(&ok);
-}
-
-
-void MacroAssembler::Abort(BailoutReason reason) {
-#ifdef DEBUG
- RecordComment("Abort message: ");
- RecordComment(GetBailoutReason(reason));
-
- if (FLAG_trap_on_abort) {
- Brk(0);
- return;
- }
-#endif
-
- // Abort is used in some contexts where csp is the stack pointer. In order to
- // simplify the CallRuntime code, make sure that jssp is the stack pointer.
- // There is no risk of register corruption here because Abort doesn't return.
- Register old_stack_pointer = StackPointer();
- SetStackPointer(jssp);
- Mov(jssp, old_stack_pointer);
-
- if (use_real_aborts()) {
- Mov(x0, Operand(Smi::FromInt(reason)));
- Push(x0);
-
- if (!has_frame_) {
- // We don't actually want to generate a pile of code for this, so just
- // claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 1);
- } else {
- CallRuntime(Runtime::kAbort, 1);
- }
- } else {
- // Load the string to pass to Printf.
- Label msg_address;
- Adr(x0, &msg_address);
-
- // Call Printf directly to report the error.
- CallPrintf();
-
- // We need a way to stop execution on both the simulator and real hardware,
- // and Unreachable() is the best option.
- Unreachable();
-
- // Emit the message string directly in the instruction stream.
- {
- BlockConstPoolScope scope(this);
- Bind(&msg_address);
- EmitStringData(GetBailoutReason(reason));
- }
- }
-
- SetStackPointer(old_stack_pointer);
-}
-
-
-void MacroAssembler::LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match) {
- // Load the global or builtins object from the current context.
- Ldr(scratch, GlobalObjectMemOperand());
- Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
-
- // Check that the function's map is the same as the expected cached map.
- Ldr(scratch, ContextMemOperand(scratch, Context::JS_ARRAY_MAPS_INDEX));
- size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
- Ldr(Tmp0(), FieldMemOperand(scratch, offset));
- Cmp(map_in_out, Tmp0());
- B(ne, no_map_match);
-
- // Use the transitioned cached map.
- offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
- Ldr(map_in_out, FieldMemOperand(scratch, offset));
-}
-
-
-void MacroAssembler::LoadInitialArrayMap(Register function_in,
- Register scratch,
- Register map_out,
- ArrayHasHoles holes) {
- ASSERT(!AreAliased(function_in, scratch, map_out));
- Label done;
- Ldr(map_out, FieldMemOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
-
- if (!FLAG_smi_only_arrays) {
- ElementsKind kind = (holes == kArrayCanHaveHoles) ? FAST_HOLEY_ELEMENTS
- : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, kind, map_out,
- scratch, &done);
- } else if (holes == kArrayCanHaveHoles) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS, map_out,
- scratch, &done);
- }
- Bind(&done);
-}
-
-
-void MacroAssembler::LoadArrayFunction(Register function) {
- // Load the global or builtins object from the current context.
- Ldr(function, GlobalObjectMemOperand());
- // Load the global context from the global or builtins object.
- Ldr(function,
- FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
- // Load the array function from the native context.
- Ldr(function, ContextMemOperand(function, Context::ARRAY_FUNCTION_INDEX));
-}
-
-
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- Ldr(function, GlobalObjectMemOperand());
- // Load the native context from the global or builtins object.
- Ldr(function, FieldMemOperand(function,
- GlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- Ldr(function, ContextMemOperand(function, index));
-}
-
-
-void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
- Register map,
- Register scratch) {
- // Load the initial map. The global functions all have initial maps.
- Ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (emit_debug_code()) {
- Label ok, fail;
- CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
- B(&ok);
- Bind(&fail);
- Abort(kGlobalFunctionsMustHaveInitialMap);
- Bind(&ok);
- }
-}
-
-
-// This is the main Printf implementation. All other Printf variants call
-// PrintfNoPreserve after setting up one or more PreserveRegisterScopes.
-void MacroAssembler::PrintfNoPreserve(const char * format,
- const CPURegister& arg0,
- const CPURegister& arg1,
- const CPURegister& arg2,
- const CPURegister& arg3) {
- // We cannot handle a caller-saved stack pointer. It doesn't make much sense
- // in most cases anyway, so this restriction shouldn't be too serious.
- ASSERT(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
-
- // We cannot print Tmp0() or Tmp1() as they're used internally by the macro
- // assembler. We cannot print the stack pointer because it is typically used
- // to preserve caller-saved registers (using other Printf variants which
- // depend on this helper).
- ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg0));
- ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg1));
- ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg2));
- ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg3));
-
- static const int kMaxArgCount = 4;
- // Assume that we have the maximum number of arguments until we know
- // otherwise.
- int arg_count = kMaxArgCount;
-
- // The provided arguments.
- CPURegister args[kMaxArgCount] = {arg0, arg1, arg2, arg3};
-
- // The PCS registers where the arguments need to end up.
- CPURegister pcs[kMaxArgCount] = {NoCPUReg, NoCPUReg, NoCPUReg, NoCPUReg};
-
- // Promote FP arguments to doubles, and integer arguments to X registers.
- // Note that FP and integer arguments cannot be mixed, but we'll check
- // AreSameSizeAndType once we've processed these promotions.
- for (int i = 0; i < kMaxArgCount; i++) {
- if (args[i].IsRegister()) {
- // Note that we use x1 onwards, because x0 will hold the format string.
- pcs[i] = Register::XRegFromCode(i + 1);
- // For simplicity, we handle all integer arguments as X registers. An X
- // register argument takes the same space as a W register argument in the
- // PCS anyway. The only limitation is that we must explicitly clear the
- // top word for W register arguments as the callee will expect it to be
- // clear.
- if (!args[i].Is64Bits()) {
- const Register& as_x = args[i].X();
- And(as_x, as_x, 0x00000000ffffffff);
- args[i] = as_x;
- }
- } else if (args[i].IsFPRegister()) {
- pcs[i] = FPRegister::DRegFromCode(i);
- // C and C++ varargs functions (such as printf) implicitly promote float
- // arguments to doubles.
- if (!args[i].Is64Bits()) {
- FPRegister s(args[i]);
- const FPRegister& as_d = args[i].D();
- Fcvt(as_d, s);
- args[i] = as_d;
- }
- } else {
- // This is the first empty (NoCPUReg) argument, so use it to set the
- // argument count and bail out.
- arg_count = i;
- break;
- }
- }
- ASSERT((arg_count >= 0) && (arg_count <= kMaxArgCount));
- // Check that every remaining argument is NoCPUReg.
- for (int i = arg_count; i < kMaxArgCount; i++) {
- ASSERT(args[i].IsNone());
- }
- ASSERT((arg_count == 0) || AreSameSizeAndType(args[0], args[1],
- args[2], args[3],
- pcs[0], pcs[1],
- pcs[2], pcs[3]));
-
- // Move the arguments into the appropriate PCS registers.
- //
- // Arranging an arbitrary list of registers into x1-x4 (or d0-d3) is
- // surprisingly complicated.
- //
- // * For even numbers of registers, we push the arguments and then pop them
- // into their final registers. This maintains 16-byte stack alignment in
- // case csp is the stack pointer, since we're only handling X or D
- // registers at this point.
- //
- // * For odd numbers of registers, we push and pop all but one register in
- // the same way, but the left-over register is moved directly, since we
- // can always safely move one register without clobbering any source.
- if (arg_count >= 4) {
- Push(args[3], args[2], args[1], args[0]);
- } else if (arg_count >= 2) {
- Push(args[1], args[0]);
- }
-
- if ((arg_count % 2) != 0) {
- // Move the left-over register directly.
- const CPURegister& leftover_arg = args[arg_count - 1];
- const CPURegister& leftover_pcs = pcs[arg_count - 1];
- if (leftover_arg.IsRegister()) {
- Mov(Register(leftover_pcs), Register(leftover_arg));
- } else {
- Fmov(FPRegister(leftover_pcs), FPRegister(leftover_arg));
- }
- }
-
- if (arg_count >= 4) {
- Pop(pcs[0], pcs[1], pcs[2], pcs[3]);
- } else if (arg_count >= 2) {
- Pop(pcs[0], pcs[1]);
- }
-
- // Load the format string into x0, as per the procedure-call standard.
- //
- // To make the code as portable as possible, the format string is encoded
- // directly in the instruction stream. It might be cleaner to encode it in a
- // literal pool, but since Printf is usually used for debugging, it is
- // beneficial for it to be minimally dependent on other features.
- Label format_address;
- Adr(x0, &format_address);
-
- // Emit the format string directly in the instruction stream.
- { BlockConstPoolScope scope(this);
- Label after_data;
- B(&after_data);
- Bind(&format_address);
- EmitStringData(format);
- Unreachable();
- Bind(&after_data);
- }
-
- // We don't pass any arguments on the stack, but we still need to align the C
- // stack pointer to a 16-byte boundary for PCS compliance.
- if (!csp.Is(StackPointer())) {
- Bic(csp, StackPointer(), 0xf);
- }
-
- CallPrintf(pcs[0].type());
-}
-
-
-void MacroAssembler::CallPrintf(CPURegister::RegisterType type) {
- // A call to printf needs special handling for the simulator, since the system
- // printf function will use a different instruction set and the procedure-call
- // standard will not be compatible.
-#ifdef USE_SIMULATOR
- { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
- hlt(kImmExceptionIsPrintf);
- dc32(type);
- }
-#else
- Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE);
-#endif
-}
-
-
-void MacroAssembler::Printf(const char * format,
- const CPURegister& arg0,
- const CPURegister& arg1,
- const CPURegister& arg2,
- const CPURegister& arg3) {
- // Preserve all caller-saved registers as well as NZCV.
- // If csp is the stack pointer, PushCPURegList asserts that the size of each
- // list is a multiple of 16 bytes.
- PushCPURegList(kCallerSaved);
- PushCPURegList(kCallerSavedFP);
- // Use Tmp0() as a scratch register. It is not accepted by Printf so it will
- // never overlap an argument register.
- Mrs(Tmp0(), NZCV);
- Push(Tmp0(), xzr);
-
- PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
-
- Pop(xzr, Tmp0());
- Msr(NZCV, Tmp0());
- PopCPURegList(kCallerSavedFP);
- PopCPURegList(kCallerSaved);
-}
-
-
-void MacroAssembler::EmitFrameSetupForCodeAgePatching() {
- // TODO(jbramley): Other architectures use the internal memcpy to copy the
- // sequence. If this is a performance bottleneck, we should consider caching
- // the sequence and copying it in the same way.
- InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize);
- ASSERT(jssp.Is(StackPointer()));
- EmitFrameSetupForCodeAgePatching(this);
-}
-
-
-
-void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
- InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize);
- ASSERT(jssp.Is(StackPointer()));
- EmitCodeAgeSequence(this, stub);
-}
-
-
-#undef __
-#define __ assm->
-
-
-void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) {
- Label start;
- __ bind(&start);
-
- // We can do this sequence using four instructions, but the code ageing
- // sequence that patches it needs five, so we use the extra space to try to
- // simplify some addressing modes and remove some dependencies (compared to
- // using two stp instructions with write-back).
- __ sub(jssp, jssp, 4 * kXRegSizeInBytes);
- __ sub(csp, csp, 4 * kXRegSizeInBytes);
- __ stp(x1, cp, MemOperand(jssp, 0 * kXRegSizeInBytes));
- __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSizeInBytes));
- __ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
-
- __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize);
-}
-
-
-void MacroAssembler::EmitCodeAgeSequence(Assembler * assm,
- Code * stub) {
- Label start;
- __ bind(&start);
- // When the stub is called, the sequence is replaced with the young sequence
- // (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the
- // stub jumps to &start, stored in x0. The young sequence does not call the
- // stub so there is no infinite loop here.
- //
- // A branch (br) is used rather than a call (blr) because this code replaces
- // the frame setup code that would normally preserve lr.
- __ LoadLiteral(ip0, kCodeAgeStubEntryOffset);
- __ adr(x0, &start);
- __ br(ip0);
- // IsCodeAgeSequence in codegen-a64.cc assumes that the code generated up
- // until now (kCodeAgeStubEntryOffset) is the same for all code age sequences.
- __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset);
- if (stub) {
- __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start()));
- __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize);
- }
-}
-
-
-bool MacroAssembler::IsYoungSequence(byte* sequence) {
- // Generate a young sequence to compare with.
- const int length = kCodeAgeSequenceSize / kInstructionSize;
- static bool initialized = false;
- static byte young[kCodeAgeSequenceSize];
- if (!initialized) {
- PatchingAssembler patcher(young, length);
- // The young sequence is the frame setup code for FUNCTION code types. It is
- // generated by FullCodeGenerator::Generate.
- MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
- initialized = true;
- }
-
- bool is_young = (memcmp(sequence, young, kCodeAgeSequenceSize) == 0);
- ASSERT(is_young || IsCodeAgeSequence(sequence));
- return is_young;
-}
-
-
-#ifdef DEBUG
-bool MacroAssembler::IsCodeAgeSequence(byte* sequence) {
- // The old sequence varies depending on the code age. However, the code up
- // until kCodeAgeStubEntryOffset does not change, so we can check that part to
- // get a reasonable level of verification.
- const int length = kCodeAgeStubEntryOffset / kInstructionSize;
- static bool initialized = false;
- static byte old[kCodeAgeStubEntryOffset];
- if (!initialized) {
- PatchingAssembler patcher(old, length);
- MacroAssembler::EmitCodeAgeSequence(&patcher, NULL);
- initialized = true;
- }
- return memcmp(sequence, old, kCodeAgeStubEntryOffset) == 0;
-}
-#endif
-
-
-#undef __
-#define __ masm->
-
-
-void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
- const Label* smi_check) {
- Assembler::BlockConstPoolScope scope(masm);
- if (reg.IsValid()) {
- ASSERT(smi_check->is_bound());
- ASSERT(reg.Is64Bits());
-
- // Encode the register (x0-x30) in the lowest 5 bits, then the offset to
- // 'check' in the other bits. The possible offset is limited in that we
- // use BitField to pack the data, and the underlying data type is a
- // uint32_t.
- uint32_t delta = __ InstructionsGeneratedSince(smi_check);
- __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
- } else {
- ASSERT(!smi_check->is_bound());
-
- // An offset of 0 indicates that there is no patch site.
- __ InlineData(0);
- }
-}
-
-
-InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
- : reg_(NoReg), smi_check_(NULL) {
- InstructionSequence* inline_data = InstructionSequence::At(info);
- ASSERT(inline_data->IsInlineData());
- if (inline_data->IsInlineData()) {
- uint64_t payload = inline_data->InlineData();
- // We use BitField to decode the payload, and BitField can only handle
- // 32-bit values.
- ASSERT(is_uint32(payload));
- if (payload != 0) {
- int reg_code = RegisterBits::decode(payload);
- reg_ = Register::XRegFromCode(reg_code);
- uint64_t smi_check_delta = DeltaBits::decode(payload);
- ASSERT(smi_check_delta != 0);
- smi_check_ = inline_data - (smi_check_delta * kInstructionSize);
- }
- }
-}
-
-
-#undef __
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/macro-assembler-a64.h b/deps/v8/src/a64/macro-assembler-a64.h
deleted file mode 100644
index 7b8dd3f806..0000000000
--- a/deps/v8/src/a64/macro-assembler-a64.h
+++ /dev/null
@@ -1,2238 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_A64_MACRO_ASSEMBLER_A64_H_
-#define V8_A64_MACRO_ASSEMBLER_A64_H_
-
-#include "v8globals.h"
-#include "globals.h"
-
-#include "a64/assembler-a64-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define LS_MACRO_LIST(V) \
- V(Ldrb, Register&, rt, LDRB_w) \
- V(Strb, Register&, rt, STRB_w) \
- V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
- V(Ldrh, Register&, rt, LDRH_w) \
- V(Strh, Register&, rt, STRH_w) \
- V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
- V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \
- V(Str, CPURegister&, rt, StoreOpFor(rt)) \
- V(Ldrsw, Register&, rt, LDRSW_x)
-
-
-// ----------------------------------------------------------------------------
-// Static helper functions
-
-// Generate a MemOperand for loading a field from an object.
-inline MemOperand FieldMemOperand(Register object, int offset);
-inline MemOperand UntagSmiFieldMemOperand(Register object, int offset);
-
-// Generate a MemOperand for loading a SMI from memory.
-inline MemOperand UntagSmiMemOperand(Register object, int offset);
-
-
-// ----------------------------------------------------------------------------
-// MacroAssembler
-
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
-enum TargetAddressStorageMode {
- CAN_INLINE_TARGET_ADDRESS,
- NEVER_INLINE_TARGET_ADDRESS
-};
-enum UntagMode { kNotSpeculativeUntag, kSpeculativeUntag };
-enum ArrayHasHoles { kArrayCantHaveHoles, kArrayCanHaveHoles };
-enum CopyHint { kCopyUnknown, kCopyShort, kCopyLong };
-enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
-enum SeqStringSetCharCheckIndexType { kIndexIsSmi, kIndexIsInteger32 };
-
-class MacroAssembler : public Assembler {
- public:
- MacroAssembler(Isolate* isolate, byte * buffer, unsigned buffer_size);
-
- inline Handle<Object> CodeObject();
-
- // Instruction set functions ------------------------------------------------
- // Logical macros.
- inline void And(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Ands(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Bic(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Bics(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Orr(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Orn(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Eor(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Eon(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Tst(const Register& rn, const Operand& operand);
- void LogicalMacro(const Register& rd,
- const Register& rn,
- const Operand& operand,
- LogicalOp op);
-
- // Add and sub macros.
- inline void Add(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Adds(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Sub(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Subs(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Cmn(const Register& rn, const Operand& operand);
- inline void Cmp(const Register& rn, const Operand& operand);
- inline void Neg(const Register& rd,
- const Operand& operand);
- inline void Negs(const Register& rd,
- const Operand& operand);
-
- void AddSubMacro(const Register& rd,
- const Register& rn,
- const Operand& operand,
- FlagsUpdate S,
- AddSubOp op);
-
- // Add/sub with carry macros.
- inline void Adc(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Adcs(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Sbc(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Sbcs(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Ngc(const Register& rd,
- const Operand& operand);
- inline void Ngcs(const Register& rd,
- const Operand& operand);
- void AddSubWithCarryMacro(const Register& rd,
- const Register& rn,
- const Operand& operand,
- FlagsUpdate S,
- AddSubWithCarryOp op);
-
- // Move macros.
- void Mov(const Register& rd,
- const Operand& operand,
- DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
- void Mov(const Register& rd, uint64_t imm);
- inline void Mvn(const Register& rd, uint64_t imm);
- void Mvn(const Register& rd, const Operand& operand);
- static bool IsImmMovn(uint64_t imm, unsigned reg_size);
- static bool IsImmMovz(uint64_t imm, unsigned reg_size);
- static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
-
- // Conditional macros.
- inline void Ccmp(const Register& rn,
- const Operand& operand,
- StatusFlags nzcv,
- Condition cond);
- inline void Ccmn(const Register& rn,
- const Operand& operand,
- StatusFlags nzcv,
- Condition cond);
- void ConditionalCompareMacro(const Register& rn,
- const Operand& operand,
- StatusFlags nzcv,
- Condition cond,
- ConditionalCompareOp op);
- void Csel(const Register& rd,
- const Register& rn,
- const Operand& operand,
- Condition cond);
-
- // Load/store macros.
-#define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
- inline void FN(const REGTYPE REG, const MemOperand& addr);
- LS_MACRO_LIST(DECLARE_FUNCTION)
-#undef DECLARE_FUNCTION
-
- void LoadStoreMacro(const CPURegister& rt,
- const MemOperand& addr,
- LoadStoreOp op);
-
- // V8-specific load/store helpers.
- void Load(const Register& rt, const MemOperand& addr, Representation r);
- void Store(const Register& rt, const MemOperand& addr, Representation r);
-
- // Remaining instructions are simple pass-through calls to the assembler.
- inline void Adr(const Register& rd, Label* label);
- inline void Asr(const Register& rd, const Register& rn, unsigned shift);
- inline void Asr(const Register& rd, const Register& rn, const Register& rm);
- inline void B(Label* label);
- inline void B(Condition cond, Label* label);
- void B(Label* label, Condition cond);
- inline void Bfi(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width);
- inline void Bfxil(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width);
- inline void Bind(Label* label);
- inline void Bl(Label* label);
- inline void Blr(const Register& xn);
- inline void Br(const Register& xn);
- inline void Brk(int code);
- void Cbnz(const Register& rt, Label* label);
- void Cbz(const Register& rt, Label* label);
- inline void Cinc(const Register& rd, const Register& rn, Condition cond);
- inline void Cinv(const Register& rd, const Register& rn, Condition cond);
- inline void Cls(const Register& rd, const Register& rn);
- inline void Clz(const Register& rd, const Register& rn);
- inline void Cneg(const Register& rd, const Register& rn, Condition cond);
- inline void CzeroX(const Register& rd, Condition cond);
- inline void CmovX(const Register& rd, const Register& rn, Condition cond);
- inline void Cset(const Register& rd, Condition cond);
- inline void Csetm(const Register& rd, Condition cond);
- inline void Csinc(const Register& rd,
- const Register& rn,
- const Register& rm,
- Condition cond);
- inline void Csinv(const Register& rd,
- const Register& rn,
- const Register& rm,
- Condition cond);
- inline void Csneg(const Register& rd,
- const Register& rn,
- const Register& rm,
- Condition cond);
- inline void Dmb(BarrierDomain domain, BarrierType type);
- inline void Dsb(BarrierDomain domain, BarrierType type);
- inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
- inline void Extr(const Register& rd,
- const Register& rn,
- const Register& rm,
- unsigned lsb);
- inline void Fabs(const FPRegister& fd, const FPRegister& fn);
- inline void Fadd(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm);
- inline void Fccmp(const FPRegister& fn,
- const FPRegister& fm,
- StatusFlags nzcv,
- Condition cond);
- inline void Fcmp(const FPRegister& fn, const FPRegister& fm);
- inline void Fcmp(const FPRegister& fn, double value);
- inline void Fcsel(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- Condition cond);
- inline void Fcvt(const FPRegister& fd, const FPRegister& fn);
- inline void Fcvtas(const Register& rd, const FPRegister& fn);
- inline void Fcvtau(const Register& rd, const FPRegister& fn);
- inline void Fcvtms(const Register& rd, const FPRegister& fn);
- inline void Fcvtmu(const Register& rd, const FPRegister& fn);
- inline void Fcvtns(const Register& rd, const FPRegister& fn);
- inline void Fcvtnu(const Register& rd, const FPRegister& fn);
- inline void Fcvtzs(const Register& rd, const FPRegister& fn);
- inline void Fcvtzu(const Register& rd, const FPRegister& fn);
- inline void Fdiv(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm);
- inline void Fmadd(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa);
- inline void Fmax(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm);
- inline void Fmaxnm(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm);
- inline void Fmin(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm);
- inline void Fminnm(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm);
- inline void Fmov(FPRegister fd, FPRegister fn);
- inline void Fmov(FPRegister fd, Register rn);
- inline void Fmov(FPRegister fd, double imm);
- inline void Fmov(Register rd, FPRegister fn);
- inline void Fmsub(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa);
- inline void Fmul(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm);
- inline void Fneg(const FPRegister& fd, const FPRegister& fn);
- inline void Fnmadd(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa);
- inline void Fnmsub(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa);
- inline void Frinta(const FPRegister& fd, const FPRegister& fn);
- inline void Frintn(const FPRegister& fd, const FPRegister& fn);
- inline void Frintz(const FPRegister& fd, const FPRegister& fn);
- inline void Fsqrt(const FPRegister& fd, const FPRegister& fn);
- inline void Fsub(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm);
- inline void Hint(SystemHint code);
- inline void Hlt(int code);
- inline void Isb();
- inline void Ldnp(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& src);
- inline void Ldp(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& src);
- inline void Ldpsw(const Register& rt,
- const Register& rt2,
- const MemOperand& src);
- inline void Ldr(const FPRegister& ft, double imm);
- inline void Ldr(const Register& rt, uint64_t imm);
- inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
- inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
- inline void Lsr(const Register& rd, const Register& rn, unsigned shift);
- inline void Lsr(const Register& rd, const Register& rn, const Register& rm);
- inline void Madd(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra);
- inline void Mneg(const Register& rd, const Register& rn, const Register& rm);
- inline void Mov(const Register& rd, const Register& rm);
- inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
- inline void Mrs(const Register& rt, SystemRegister sysreg);
- inline void Msr(SystemRegister sysreg, const Register& rt);
- inline void Msub(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra);
- inline void Mul(const Register& rd, const Register& rn, const Register& rm);
- inline void Nop() { nop(); }
- inline void Rbit(const Register& rd, const Register& rn);
- inline void Ret(const Register& xn = lr);
- inline void Rev(const Register& rd, const Register& rn);
- inline void Rev16(const Register& rd, const Register& rn);
- inline void Rev32(const Register& rd, const Register& rn);
- inline void Ror(const Register& rd, const Register& rs, unsigned shift);
- inline void Ror(const Register& rd, const Register& rn, const Register& rm);
- inline void Sbfiz(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width);
- inline void Sbfx(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width);
- inline void Scvtf(const FPRegister& fd,
- const Register& rn,
- unsigned fbits = 0);
- inline void Sdiv(const Register& rd, const Register& rn, const Register& rm);
- inline void Smaddl(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra);
- inline void Smsubl(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra);
- inline void Smull(const Register& rd,
- const Register& rn,
- const Register& rm);
- inline void Smulh(const Register& rd,
- const Register& rn,
- const Register& rm);
- inline void Stnp(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& dst);
- inline void Stp(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& dst);
- inline void Sxtb(const Register& rd, const Register& rn);
- inline void Sxth(const Register& rd, const Register& rn);
- inline void Sxtw(const Register& rd, const Register& rn);
- void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
- void Tbz(const Register& rt, unsigned bit_pos, Label* label);
- inline void Ubfiz(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width);
- inline void Ubfx(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width);
- inline void Ucvtf(const FPRegister& fd,
- const Register& rn,
- unsigned fbits = 0);
- inline void Udiv(const Register& rd, const Register& rn, const Register& rm);
- inline void Umaddl(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra);
- inline void Umsubl(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra);
- inline void Uxtb(const Register& rd, const Register& rn);
- inline void Uxth(const Register& rd, const Register& rn);
- inline void Uxtw(const Register& rd, const Register& rn);
-
- // Pseudo-instructions ------------------------------------------------------
-
- // Compute rd = abs(rm).
- // This function clobbers the condition flags.
- //
- // If rm is the minimum representable value, the result is not representable.
- // Handlers for each case can be specified using the relevant labels.
- void Abs(const Register& rd, const Register& rm,
- Label * is_not_representable = NULL,
- Label * is_representable = NULL);
-
- // Push or pop up to 4 registers of the same width to or from the stack,
- // using the current stack pointer as set by SetStackPointer.
- //
- // If an argument register is 'NoReg', all further arguments are also assumed
- // to be 'NoReg', and are thus not pushed or popped.
- //
- // Arguments are ordered such that "Push(a, b);" is functionally equivalent
- // to "Push(a); Push(b);".
- //
- // It is valid to push the same register more than once, and there is no
- // restriction on the order in which registers are specified.
- //
- // It is not valid to pop into the same register more than once in one
- // operation, not even into the zero register.
- //
- // If the current stack pointer (as set by SetStackPointer) is csp, then it
- // must be aligned to 16 bytes on entry and the total size of the specified
- // registers must also be a multiple of 16 bytes.
- //
- // Even if the current stack pointer is not the system stack pointer (csp),
- // Push (and derived methods) will still modify the system stack pointer in
- // order to comply with ABI rules about accessing memory below the system
- // stack pointer.
- //
- // Other than the registers passed into Pop, the stack pointer and (possibly)
- // the system stack pointer, these methods do not modify any other registers.
- // Scratch registers such as Tmp0() and Tmp1() are preserved.
- void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
- const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
- void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
- const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
-
- // Alternative forms of Push and Pop, taking a RegList or CPURegList that
- // specifies the registers that are to be pushed or popped. Higher-numbered
- // registers are associated with higher memory addresses (as in the A32 push
- // and pop instructions).
- //
- // (Push|Pop)SizeRegList allow you to specify the register size as a
- // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are
- // supported.
- //
- // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
- void PushCPURegList(CPURegList registers);
- void PopCPURegList(CPURegList registers);
-
- inline void PushSizeRegList(RegList registers, unsigned reg_size,
- CPURegister::RegisterType type = CPURegister::kRegister) {
- PushCPURegList(CPURegList(type, reg_size, registers));
- }
- inline void PopSizeRegList(RegList registers, unsigned reg_size,
- CPURegister::RegisterType type = CPURegister::kRegister) {
- PopCPURegList(CPURegList(type, reg_size, registers));
- }
- inline void PushXRegList(RegList regs) {
- PushSizeRegList(regs, kXRegSize);
- }
- inline void PopXRegList(RegList regs) {
- PopSizeRegList(regs, kXRegSize);
- }
- inline void PushWRegList(RegList regs) {
- PushSizeRegList(regs, kWRegSize);
- }
- inline void PopWRegList(RegList regs) {
- PopSizeRegList(regs, kWRegSize);
- }
- inline void PushDRegList(RegList regs) {
- PushSizeRegList(regs, kDRegSize, CPURegister::kFPRegister);
- }
- inline void PopDRegList(RegList regs) {
- PopSizeRegList(regs, kDRegSize, CPURegister::kFPRegister);
- }
- inline void PushSRegList(RegList regs) {
- PushSizeRegList(regs, kSRegSize, CPURegister::kFPRegister);
- }
- inline void PopSRegList(RegList regs) {
- PopSizeRegList(regs, kSRegSize, CPURegister::kFPRegister);
- }
-
- // Push the specified register 'count' times.
- void PushMultipleTimes(int count, Register src);
-
- // This is a convenience method for pushing a single Handle<Object>.
- inline void Push(Handle<Object> handle);
- void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
-
- // Aliases of Push and Pop, required for V8 compatibility.
- inline void push(Register src) {
- Push(src);
- }
- inline void pop(Register dst) {
- Pop(dst);
- }
-
- // Poke 'src' onto the stack. The offset is in bytes.
- //
- // If the current stack pointer (according to StackPointer()) is csp, then
- // csp must be aligned to 16 bytes.
- void Poke(const CPURegister& src, const Operand& offset);
-
- // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
- //
- // If the current stack pointer (according to StackPointer()) is csp, then
- // csp must be aligned to 16 bytes.
- void Peek(const CPURegister& dst, const Operand& offset);
-
- // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
- // with 'src2' at a higher address than 'src1'. The offset is in bytes.
- //
- // If the current stack pointer (according to StackPointer()) is csp, then
- // csp must be aligned to 16 bytes.
- void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);
-
- // Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
- // values peeked will be adjacent, with the value in 'dst2' being from a
- // higher address than 'dst1'. The offset is in bytes.
- //
- // If the current stack pointer (according to StackPointer()) is csp, then
- // csp must be aligned to 16 bytes.
- void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
-
- // Claim or drop stack space without actually accessing memory.
- //
- // In debug mode, both of these will write invalid data into the claimed or
- // dropped space.
- //
- // If the current stack pointer (according to StackPointer()) is csp, then it
- // must be aligned to 16 bytes and the size claimed or dropped must be a
- // multiple of 16 bytes.
- //
- // Note that unit_size must be specified in bytes. For variants which take a
- // Register count, the unit size must be a power of two.
- inline void Claim(uint64_t count, uint64_t unit_size = kXRegSizeInBytes);
- inline void Claim(const Register& count,
- uint64_t unit_size = kXRegSizeInBytes);
- inline void Drop(uint64_t count, uint64_t unit_size = kXRegSizeInBytes);
- inline void Drop(const Register& count,
- uint64_t unit_size = kXRegSizeInBytes);
-
- // Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
- // register.
- inline void ClaimBySMI(const Register& count_smi,
- uint64_t unit_size = kXRegSizeInBytes);
- inline void DropBySMI(const Register& count_smi,
- uint64_t unit_size = kXRegSizeInBytes);
-
- // Compare a register with an operand, and branch to label depending on the
- // condition. May corrupt the status flags.
- inline void CompareAndBranch(const Register& lhs,
- const Operand& rhs,
- Condition cond,
- Label* label);
-
- // Test the bits of register defined by bit_pattern, and branch if ANY of
- // those bits are set. May corrupt the status flags.
- inline void TestAndBranchIfAnySet(const Register& reg,
- const uint64_t bit_pattern,
- Label* label);
-
- // Test the bits of register defined by bit_pattern, and branch if ALL of
- // those bits are clear (ie. not set.) May corrupt the status flags.
- inline void TestAndBranchIfAllClear(const Register& reg,
- const uint64_t bit_pattern,
- Label* label);
-
- // Insert one or more instructions into the instruction stream that encode
- // some caller-defined data. The instructions used will be executable with no
- // side effects.
- inline void InlineData(uint64_t data);
-
- // Insert an instrumentation enable marker into the instruction stream.
- inline void EnableInstrumentation();
-
- // Insert an instrumentation disable marker into the instruction stream.
- inline void DisableInstrumentation();
-
- // Insert an instrumentation event marker into the instruction stream. These
- // will be picked up by the instrumentation system to annotate an instruction
- // profile. The argument marker_name must be a printable two character string;
- // it will be encoded in the event marker.
- inline void AnnotateInstrumentation(const char* marker_name);
-
- // If emit_debug_code() is true, emit a run-time check to ensure that
- // StackPointer() does not point below the system stack pointer.
- //
- // Whilst it is architecturally legal for StackPointer() to point below csp,
- // it can be evidence of a potential bug because the ABI forbids accesses
- // below csp.
- //
- // If emit_debug_code() is false, this emits no code.
- //
- // If StackPointer() is the system stack pointer, this emits no code.
- void AssertStackConsistency();
-
- // Preserve the callee-saved registers (as defined by AAPCS64).
- //
- // Higher-numbered registers are pushed before lower-numbered registers, and
- // thus get higher addresses.
- // Floating-point registers are pushed before general-purpose registers, and
- // thus get higher addresses.
- //
- // Note that registers are not checked for invalid values. Use this method
- // only if you know that the GC won't try to examine the values on the stack.
- //
- // This method must not be called unless the current stack pointer (as set by
- // SetStackPointer) is the system stack pointer (csp), and is aligned to
- // ActivationFrameAlignment().
- void PushCalleeSavedRegisters();
-
- // Restore the callee-saved registers (as defined by AAPCS64).
- //
- // Higher-numbered registers are popped after lower-numbered registers, and
- // thus come from higher addresses.
- // Floating-point registers are popped after general-purpose registers, and
- // thus come from higher addresses.
- //
- // This method must not be called unless the current stack pointer (as set by
- // SetStackPointer) is the system stack pointer (csp), and is aligned to
- // ActivationFrameAlignment().
- void PopCalleeSavedRegisters();
-
- // Set the current stack pointer, but don't generate any code.
- inline void SetStackPointer(const Register& stack_pointer) {
- ASSERT(!AreAliased(stack_pointer, Tmp0(), Tmp1()));
- sp_ = stack_pointer;
- }
-
- // Return the current stack pointer, as set by SetStackPointer.
- inline const Register& StackPointer() const {
- return sp_;
- }
-
- // Align csp for a frame, as per ActivationFrameAlignment, and make it the
- // current stack pointer.
- inline void AlignAndSetCSPForFrame() {
- int sp_alignment = ActivationFrameAlignment();
- // AAPCS64 mandates at least 16-byte alignment.
- ASSERT(sp_alignment >= 16);
- ASSERT(IsPowerOf2(sp_alignment));
- Bic(csp, StackPointer(), sp_alignment - 1);
- SetStackPointer(csp);
- }
-
- // Push the system stack pointer (csp) down to allow the same to be done to
- // the current stack pointer (according to StackPointer()). This must be
- // called _before_ accessing the memory.
- //
- // This is necessary when pushing or otherwise adding things to the stack, to
- // satisfy the AAPCS64 constraint that the memory below the system stack
- // pointer is not accessed.
- //
- // This method asserts that StackPointer() is not csp, since the call does
- // not make sense in that context.
- //
- // TODO(jbramley): Currently, this method can only accept values of 'space'
- // that can be encoded in one instruction. Refer to the implementation for
- // details.
- inline void BumpSystemStackPointer(const Operand& space);
-
- // Helpers ------------------------------------------------------------------
- // Root register.
- inline void InitializeRootRegister();
-
- // Load an object from the root table.
- void LoadRoot(Register destination,
- Heap::RootListIndex index);
- // Store an object to the root table.
- void StoreRoot(Register source,
- Heap::RootListIndex index);
-
- // Load both TrueValue and FalseValue roots.
- void LoadTrueFalseRoots(Register true_root, Register false_root);
-
- void LoadHeapObject(Register dst, Handle<HeapObject> object);
-
- void LoadObject(Register result, Handle<Object> object) {
- AllowDeferredHandleDereference heap_object_check;
- if (object->IsHeapObject()) {
- LoadHeapObject(result, Handle<HeapObject>::cast(object));
- } else {
- ASSERT(object->IsSmi());
- Mov(result, Operand(object));
- }
- }
-
- static int SafepointRegisterStackIndex(int reg_code);
-
- // This is required for compatibility with architecture independant code.
- // Remove if not needed.
- inline void Move(Register dst, Register src) { Mov(dst, src); }
-
- void LoadInstanceDescriptors(Register map,
- Register descriptors);
- void EnumLengthUntagged(Register dst, Register map);
- void EnumLengthSmi(Register dst, Register map);
- void NumberOfOwnDescriptors(Register dst, Register map);
-
- template<typename Field>
- void DecodeField(Register reg) {
- static const uint64_t shift = Field::kShift + kSmiShift;
- static const uint64_t setbits = CountSetBits(Field::kMask, 32);
- Ubfx(reg, reg, shift, setbits);
- }
-
- // ---- SMI and Number Utilities ----
-
- inline void SmiTag(Register dst, Register src);
- inline void SmiTag(Register smi);
- inline void SmiUntag(Register dst, Register src);
- inline void SmiUntag(Register smi);
- inline void SmiUntagToDouble(FPRegister dst,
- Register src,
- UntagMode mode = kNotSpeculativeUntag);
- inline void SmiUntagToFloat(FPRegister dst,
- Register src,
- UntagMode mode = kNotSpeculativeUntag);
-
- // Compute the absolute value of 'smi' and leave the result in 'smi'
- // register. If 'smi' is the most negative SMI, the absolute value cannot
- // be represented as a SMI and a jump to 'slow' is done.
- void SmiAbs(const Register& smi, Label* slow);
-
- inline void JumpIfSmi(Register value,
- Label* smi_label,
- Label* not_smi_label = NULL);
- inline void JumpIfNotSmi(Register value, Label* not_smi_label);
- inline void JumpIfBothSmi(Register value1,
- Register value2,
- Label* both_smi_label,
- Label* not_smi_label = NULL);
- inline void JumpIfEitherSmi(Register value1,
- Register value2,
- Label* either_smi_label,
- Label* not_smi_label = NULL);
- inline void JumpIfEitherNotSmi(Register value1,
- Register value2,
- Label* not_smi_label);
- inline void JumpIfBothNotSmi(Register value1,
- Register value2,
- Label* not_smi_label);
-
- // Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object, BailoutReason reason = kOperandIsASmi);
- void AssertSmi(Register object, BailoutReason reason = kOperandIsNotASmi);
-
- // Abort execution if argument is not a name, enabled via --debug-code.
- void AssertName(Register object);
-
- // Abort execution if argument is not a string, enabled via --debug-code.
- void AssertString(Register object);
-
- void JumpForHeapNumber(Register object,
- Register heap_number_map,
- Label* on_heap_number,
- Label* on_not_heap_number = NULL);
- void JumpIfHeapNumber(Register object,
- Label* on_heap_number,
- Register heap_number_map = NoReg);
- void JumpIfNotHeapNumber(Register object,
- Label* on_not_heap_number,
- Register heap_number_map = NoReg);
-
- // Jump to label if the input double register contains -0.0.
- void JumpIfMinusZero(DoubleRegister input, Label* on_negative_zero);
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- void LookupNumberStringCache(Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found);
-
- // Saturate a signed 32-bit integer in input to an unsigned 8-bit integer in
- // output.
- void ClampInt32ToUint8(Register in_out);
- void ClampInt32ToUint8(Register output, Register input);
-
- // Saturate a double in input to an unsigned 8-bit integer in output.
- void ClampDoubleToUint8(Register output,
- DoubleRegister input,
- DoubleRegister dbl_scratch);
-
- // Try to convert a double to a signed 32-bit int.
- // This succeeds if the result compares equal to the input, so inputs of -0.0
- // are converted to 0 and handled as a success.
- void TryConvertDoubleToInt32(Register as_int,
- FPRegister value,
- FPRegister scratch_d,
- Label* on_successful_conversion,
- Label* on_failed_conversion = NULL) {
- ASSERT(as_int.Is32Bits());
- TryConvertDoubleToInt(as_int, value, scratch_d, on_successful_conversion,
- on_failed_conversion);
- }
-
- // Try to convert a double to a signed 64-bit int.
- // This succeeds if the result compares equal to the input, so inputs of -0.0
- // are converted to 0 and handled as a success.
- void TryConvertDoubleToInt64(Register as_int,
- FPRegister value,
- FPRegister scratch_d,
- Label* on_successful_conversion,
- Label* on_failed_conversion = NULL) {
- ASSERT(as_int.Is64Bits());
- TryConvertDoubleToInt(as_int, value, scratch_d, on_successful_conversion,
- on_failed_conversion);
- }
-
- // ---- Object Utilities ----
-
- // Copy fields from 'src' to 'dst', where both are tagged objects.
- // The 'temps' list is a list of X registers which can be used for scratch
- // values. The temps list must include at least one register, and it must not
- // contain Tmp0() or Tmp1().
- //
- // Currently, CopyFields cannot make use of more than three registers from
- // the 'temps' list.
- //
- // As with several MacroAssembler methods, Tmp0() and Tmp1() will be used.
- void CopyFields(Register dst, Register src, CPURegList temps, unsigned count);
-
- // Copies a number of bytes from src to dst. All passed registers are
- // clobbered. On exit src and dst will point to the place just after where the
- // last byte was read or written and length will be zero. Hint may be used to
- // determine which is the most efficient algorithm to use for copying.
- void CopyBytes(Register dst,
- Register src,
- Register length,
- Register scratch,
- CopyHint hint = kCopyUnknown);
-
- // Initialize fields with filler values. Fields starting at start_offset not
- // including end_offset are overwritten with the value in filler. At the end
- // of the loop, start_offset takes the value of end_offset.
- void InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler);
-
- // ---- String Utilities ----
-
-
- // Jump to label if either object is not a sequential ASCII string.
- // Optionally perform a smi check on the objects first.
- void JumpIfEitherIsNotSequentialAsciiStrings(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure,
- SmiCheckType smi_check = DO_SMI_CHECK);
-
- // Check if instance type is sequential ASCII string and jump to label if
- // it is not.
- void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
- Register scratch,
- Label* failure);
-
- // Checks if both instance types are sequential ASCII strings and jumps to
- // label if either is not.
- void JumpIfEitherInstanceTypeIsNotSequentialAscii(
- Register first_object_instance_type,
- Register second_object_instance_type,
- Register scratch1,
- Register scratch2,
- Label* failure);
-
- // Checks if both instance types are sequential ASCII strings and jumps to
- // label if either is not.
- void JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first_object_instance_type,
- Register second_object_instance_type,
- Register scratch1,
- Register scratch2,
- Label* failure);
-
- void JumpIfNotUniqueName(Register type, Label* not_unique_name);
-
- // ---- Calling / Jumping helpers ----
-
- // This is required for compatibility in architecture indepenedant code.
- inline void jmp(Label* L) { B(L); }
-
- // Passes thrown value to the handler of top of the try handler chain.
- // Register value must be x0.
- void Throw(Register value,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4);
-
- // Propagates an uncatchable exception to the top of the current JS stack's
- // handler chain. Register value must be x0.
- void ThrowUncatchable(Register value,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4);
-
- // Throw a message string as an exception.
- void Throw(BailoutReason reason);
-
- // Throw a message string as an exception if a condition is not true.
- void ThrowIf(Condition cc, BailoutReason reason);
-
- // Throw a message string as an exception if the value is a smi.
- void ThrowIfSmi(const Register& value, BailoutReason reason);
-
- void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
- void TailCallStub(CodeStub* stub);
-
- void CallRuntime(const Runtime::Function* f,
- int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
-
- void CallRuntime(Runtime::FunctionId id,
- int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
- }
-
- // TODO(all): Why does this variant save FP regs unconditionally?
- void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, function->nargs, kSaveFPRegs);
- }
-
- void TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
-
- int ActivationFrameAlignment();
-
- // Calls a C function.
- // The called function is not allowed to trigger a
- // garbage collection, since that might move the code and invalidate the
- // return address (unless this is somehow accounted for by the called
- // function).
- void CallCFunction(ExternalReference function,
- int num_reg_arguments);
- void CallCFunction(ExternalReference function,
- int num_reg_arguments,
- int num_double_arguments);
- void CallCFunction(Register function,
- int num_reg_arguments,
- int num_double_arguments);
-
- // Calls an API function. Allocates HandleScope, extracts returned value
- // from handle and propagates exceptions.
- // 'stack_space' is the space to be unwound on exit (includes the call JS
- // arguments space and the additional space allocated for the fast call).
- // 'spill_offset' is the offset from the stack pointer where
- // CallApiFunctionAndReturn can spill registers.
- void CallApiFunctionAndReturn(Register function_address,
- ExternalReference thunk_ref,
- int stack_space,
- int spill_offset,
- MemOperand return_value_operand,
- MemOperand* context_restore_operand);
-
- // The number of register that CallApiFunctionAndReturn will need to save on
- // the stack. The space for these registers need to be allocated in the
- // ExitFrame before calling CallApiFunctionAndReturn.
- static const int kCallApiFunctionSpillSpace = 4;
-
- // Jump to a runtime routine.
- void JumpToExternalReference(const ExternalReference& builtin);
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size);
- void CallExternalReference(const ExternalReference& ext,
- int num_arguments);
-
-
- // Invoke specified builtin JavaScript function. Adds an entry to
- // the unresolved list if the name does not resolve.
- void InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- const CallWrapper& call_wrapper = NullCallWrapper());
-
- // Store the code object for the given builtin in the target register and
- // setup the function in x1.
- // TODO(all): Can we use another register than x1?
- void GetBuiltinEntry(Register target, Builtins::JavaScript id);
-
- // Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, Builtins::JavaScript id);
-
- void Jump(Register target);
- void Jump(Address target, RelocInfo::Mode rmode);
- void Jump(Handle<Code> code, RelocInfo::Mode rmode);
- void Jump(intptr_t target, RelocInfo::Mode rmode);
-
- void Call(Register target);
- void Call(Label* target);
- void Call(Address target, RelocInfo::Mode rmode);
- void Call(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- TypeFeedbackId ast_id = TypeFeedbackId::None());
-
- // For every Call variant, there is a matching CallSize function that returns
- // the size (in bytes) of the call sequence.
- static int CallSize(Register target);
- static int CallSize(Label* target);
- static int CallSize(Address target, RelocInfo::Mode rmode);
- static int CallSize(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- TypeFeedbackId ast_id = TypeFeedbackId::None());
-
- // Registers used through the invocation chain are hard-coded.
- // We force passing the parameters to ensure the contracts are correctly
- // honoured by the caller.
- // 'function' must be x1.
- // 'actual' must use an immediate or x0.
- // 'expected' must use an immediate or x2.
- // 'call_kind' must be x5.
- void InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
- Label* done,
- InvokeFlag flag,
- bool* definitely_mismatches,
- const CallWrapper& call_wrapper);
- void InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper);
- // Invoke the JavaScript function in the given register.
- // Changes the current context to the context in the function before invoking.
- void InvokeFunction(Register function,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper);
- void InvokeFunction(Register function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper);
- void InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper);
-
-
- // ---- Floating point helpers ----
-
-
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
- // succeeds, otherwise falls through if result is saturated. On return
- // 'result' either holds answer, or is clobbered on fall through.
- //
- // Only public for the test code in test-code-stubs-a64.cc.
- void TryInlineTruncateDoubleToI(Register result,
- DoubleRegister input,
- Label* done);
-
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
- // Exits with 'result' holding the answer.
- void TruncateDoubleToI(Register result, DoubleRegister double_input);
-
- // Performs a truncating conversion of a heap number as used by
- // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
- // must be different registers. Exits with 'result' holding the answer.
- void TruncateHeapNumberToI(Register result, Register object);
-
- // Converts the smi or heap number in object to an int32 using the rules
- // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
- // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
- // different registers.
- void TruncateNumberToI(Register object,
- Register result,
- Register heap_number_map,
- Label* not_int32);
-
- // ---- Code generation helpers ----
-
- void set_generating_stub(bool value) { generating_stub_ = value; }
- bool generating_stub() const { return generating_stub_; }
-#if DEBUG
- void set_allow_macro_instructions(bool value) {
- allow_macro_instructions_ = value;
- }
- bool allow_macro_instructions() const { return allow_macro_instructions_; }
-#endif
- bool use_real_aborts() const { return use_real_aborts_; }
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() const { return has_frame_; }
- bool AllowThisStubCall(CodeStub* stub);
-
- class NoUseRealAbortsScope {
- public:
- explicit NoUseRealAbortsScope(MacroAssembler* masm) :
- saved_(masm->use_real_aborts_), masm_(masm) {
- masm_->use_real_aborts_ = false;
- }
- ~NoUseRealAbortsScope() {
- masm_->use_real_aborts_ = saved_;
- }
- private:
- bool saved_;
- MacroAssembler* masm_;
- };
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // ---------------------------------------------------------------------------
- // Debugger Support
-
- void DebugBreak();
-#endif
- // ---------------------------------------------------------------------------
- // Exception handling
-
- // Push a new try handler and link into try handler chain.
- void PushTryHandler(StackHandler::Kind kind, int handler_index);
-
- // Unlink the stack handler on top of the stack from the try handler chain.
- // Must preserve the result register.
- void PopTryHandler();
-
-
- // ---------------------------------------------------------------------------
- // Allocation support
-
- // Allocate an object in new space or old pointer space. The object_size is
- // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
- // is passed. The allocated object is returned in result.
- //
- // If the new space is exhausted control continues at the gc_required label.
- // In this case, the result and scratch registers may still be clobbered.
- // If flags includes TAG_OBJECT, the result is tagged as as a heap object.
- void Allocate(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
-
- void Allocate(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
-
- // Undo allocation in new space. The object passed and objects allocated after
- // it will no longer be allocated. The caller must make sure that no pointers
- // are left to the object(s) no longer allocated as they would be invalid when
- // allocation is undone.
- void UndoAllocationInNewSpace(Register object, Register scratch);
-
- void AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateTwoByteConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateAsciiConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateTwoByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateAsciiSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
-
- // Allocates a heap number or jumps to the gc_required label if the young
- // space is full and a scavenge is needed.
- // All registers are clobbered.
- // If no heap_number_map register is provided, the function will take care of
- // loading it.
- void AllocateHeapNumber(Register result,
- Label* gc_required,
- Register scratch1,
- Register scratch2,
- Register heap_number_map = NoReg);
- void AllocateHeapNumberWithValue(Register result,
- DoubleRegister value,
- Label* gc_required,
- Register scratch1,
- Register scratch2,
- Register heap_number_map = NoReg);
-
- // ---------------------------------------------------------------------------
- // Support functions.
-
- // Try to get function prototype of a function and puts the value in the
- // result register. Checks that the function really is a function and jumps
- // to the miss label if the fast checks fail. The function register will be
- // untouched; the other registers may be clobbered.
- enum BoundFunctionAction {
- kMissOnBoundFunction,
- kDontMissOnBoundFunction
- };
-
- void TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss,
- BoundFunctionAction action =
- kDontMissOnBoundFunction);
-
- // Compare object type for heap object. heap_object contains a non-Smi
- // whose object type should be compared with the given type. This both
- // sets the flags and leaves the object type in the type_reg register.
- // It leaves the map in the map register (unless the type_reg and map register
- // are the same register). It leaves the heap object in the heap_object
- // register unless the heap_object register is the same register as one of the
- // other registers.
- void CompareObjectType(Register heap_object,
- Register map,
- Register type_reg,
- InstanceType type);
-
-
- // Compare object type for heap object, and branch if equal (or not.)
- // heap_object contains a non-Smi whose object type should be compared with
- // the given type. This both sets the flags and leaves the object type in
- // the type_reg register. It leaves the map in the map register (unless the
- // type_reg and map register are the same register). It leaves the heap
- // object in the heap_object register unless the heap_object register is the
- // same register as one of the other registers.
- void JumpIfObjectType(Register object,
- Register map,
- Register type_reg,
- InstanceType type,
- Label* if_cond_pass,
- Condition cond = eq);
-
- void JumpIfNotObjectType(Register object,
- Register map,
- Register type_reg,
- InstanceType type,
- Label* if_not_object);
-
- // Compare instance type in a map. map contains a valid map object whose
- // object type should be compared with the given type. This both
- // sets the flags and leaves the object type in the type_reg register.
- void CompareInstanceType(Register map,
- Register type_reg,
- InstanceType type);
-
- // Compare an object's map with the specified map and its transitioned
- // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
- // set with result of map compare. If multiple map compares are required, the
- // compare sequences branches to early_success.
- void CompareMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* early_success = NULL);
-
- // As above, but the map of the object is already loaded into the register
- // which is preserved by the code generated.
- void CompareMap(Register obj_map,
- Handle<Map> map,
- Label* early_success = NULL);
-
- // Check if the map of an object is equal to a specified map and branch to
- // label if not. Skip the smi check if not required (object is known to be a
- // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
- // against maps that are ElementsKind transition maps of the specified map.
- void CheckMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type);
-
-
- void CheckMap(Register obj,
- Register scratch,
- Heap::RootListIndex index,
- Label* fail,
- SmiCheckType smi_check_type);
-
- // As above, but the map of the object is already loaded into obj_map, and is
- // preserved.
- void CheckMap(Register obj_map,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type);
-
- // Check if the map of an object is equal to a specified map and branch to a
- // specified target if equal. Skip the smi check if not required (object is
- // known to be a heap object)
- void DispatchMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Handle<Code> success,
- SmiCheckType smi_check_type);
-
- // Test the bitfield of the heap object map with mask and set the condition
- // flags. The object register is preserved.
- void TestMapBitfield(Register object, uint64_t mask);
-
- // Load the elements kind field of an object, and return it in the result
- // register.
- void LoadElementsKind(Register result, Register object);
-
- // Compare the object in a register to a value from the root list.
- // Uses the Tmp0() register as scratch.
- void CompareRoot(const Register& obj, Heap::RootListIndex index);
-
- // Compare the object in a register to a value and jump if they are equal.
- void JumpIfRoot(const Register& obj,
- Heap::RootListIndex index,
- Label* if_equal);
-
- // Compare the object in a register to a value and jump if they are not equal.
- void JumpIfNotRoot(const Register& obj,
- Heap::RootListIndex index,
- Label* if_not_equal);
-
- // Load and check the instance type of an object for being a unique name.
- // Loads the type into the second argument register.
- // The object and type arguments can be the same register; in that case it
- // will be overwritten with the type.
- // Fall-through if the object was a string and jump on fail otherwise.
- inline void IsObjectNameType(Register object, Register type, Label* fail);
-
- inline void IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail);
-
- // Check the instance type in the given map to see if it corresponds to a
- // JS object type. Jump to the fail label if this is not the case and fall
- // through otherwise. However if fail label is NULL, no branch will be
- // performed and the flag will be updated. You can test the flag for "le"
- // condition to test if it is a valid JS object type.
- inline void IsInstanceJSObjectType(Register map,
- Register scratch,
- Label* fail);
-
- // Load and check the instance type of an object for being a string.
- // Loads the type into the second argument register.
- // The object and type arguments can be the same register; in that case it
- // will be overwritten with the type.
- // Jumps to not_string or string appropriate. If the appropriate label is
- // NULL, fall through.
- inline void IsObjectJSStringType(Register object, Register type,
- Label* not_string, Label* string = NULL);
-
- // Compare the contents of a register with an operand, and branch to true,
- // false or fall through, depending on condition.
- void CompareAndSplit(const Register& lhs,
- const Operand& rhs,
- Condition cond,
- Label* if_true,
- Label* if_false,
- Label* fall_through);
-
- // Test the bits of register defined by bit_pattern, and branch to
- // if_any_set, if_all_clear or fall_through accordingly.
- void TestAndSplit(const Register& reg,
- uint64_t bit_pattern,
- Label* if_all_clear,
- Label* if_any_set,
- Label* fall_through);
-
- // Check if a map for a JSObject indicates that the object has fast elements.
- // Jump to the specified label if it does not.
- void CheckFastElements(Register map,
- Register scratch,
- Label* fail);
-
- // Check if a map for a JSObject indicates that the object can have both smi
- // and HeapObject elements. Jump to the specified label if it does not.
- void CheckFastObjectElements(Register map,
- Register scratch,
- Label* fail);
-
- // Check if a map for a JSObject indicates that the object has fast smi only
- // elements. Jump to the specified label if it does not.
- void CheckFastSmiElements(Register map, Register scratch, Label* fail);
-
- // Check to see if number can be stored as a double in FastDoubleElements.
- // If it can, store it at the index specified by key_reg in the array,
- // otherwise jump to fail.
- void StoreNumberToDoubleElements(Register value_reg,
- Register key_reg,
- Register elements_reg,
- Register scratch1,
- FPRegister fpscratch1,
- FPRegister fpscratch2,
- Label* fail,
- int elements_offset = 0);
-
- // Picks out an array index from the hash field.
- // Register use:
- // hash - holds the index's hash. Clobbered.
- // index - holds the overwritten index on exit.
- void IndexFromHash(Register hash, Register index);
-
- // ---------------------------------------------------------------------------
- // Inline caching support.
-
- void EmitSeqStringSetCharCheck(Register string,
- Register index,
- SeqStringSetCharCheckIndexType index_type,
- Register scratch,
- uint32_t encoding_mask);
-
- // Generate code for checking access rights - used for security checks
- // on access to global objects across environments. The holder register
- // is left untouched, whereas both scratch registers are clobbered.
- void CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss);
-
- // Hash the interger value in 'key' register.
- // It uses the same algorithm as ComputeIntegerHash in utils.h.
- void GetNumberHash(Register key, Register scratch);
-
- // Load value from the dictionary.
- //
- // elements - holds the slow-case elements of the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the same as 'key' or 'result'.
- // Unchanged on bailout so 'key' or 'result' can be used
- // in further computation.
- void LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register result,
- Register scratch0,
- Register scratch1,
- Register scratch2,
- Register scratch3);
-
- // ---------------------------------------------------------------------------
- // Frames.
-
- // Activation support.
- // Note that Tmp0() and Tmp1() are used as a scratch registers. This is safe
- // because these methods are not used in Crankshaft.
- void EnterFrame(StackFrame::Type type);
- void LeaveFrame(StackFrame::Type type);
-
- // Returns map with validated enum cache in object register.
- void CheckEnumCache(Register object,
- Register null_value,
- Register scratch0,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* call_runtime);
-
- // AllocationMemento support. Arrays may have an associated
- // AllocationMemento object that can be checked for in order to pretransition
- // to another type.
- // On entry, receiver should point to the array object.
- // If allocation info is present, the Z flag is set (so that the eq
- // condition will pass).
- void TestJSArrayForAllocationMemento(Register receiver,
- Register scratch1,
- Register scratch2,
- Label* no_memento_found);
-
- void JumpIfJSArrayHasAllocationMemento(Register receiver,
- Register scratch1,
- Register scratch2,
- Label* memento_found) {
- Label no_memento_found;
- TestJSArrayForAllocationMemento(receiver, scratch1, scratch2,
- &no_memento_found);
- B(eq, memento_found);
- Bind(&no_memento_found);
- }
-
- // The stack pointer has to switch between csp and jssp when setting up and
- // destroying the exit frame. Hence preserving/restoring the registers is
- // slightly more complicated than simple push/pop operations.
- void ExitFramePreserveFPRegs();
- void ExitFrameRestoreFPRegs();
-
- // Generates function and stub prologue code.
- void Prologue(PrologueFrameMode frame_mode);
-
- // Enter exit frame. Exit frames are used when calling C code from generated
- // (JavaScript) code.
- //
- // The stack pointer must be jssp on entry, and will be set to csp by this
- // function. The frame pointer is also configured, but the only other
- // registers modified by this function are the provided scratch register, and
- // jssp.
- //
- // The 'extra_space' argument can be used to allocate some space in the exit
- // frame that will be ignored by the GC. This space will be reserved in the
- // bottom of the frame immediately above the return address slot.
- //
- // Set up a stack frame and registers as follows:
- // fp[8]: CallerPC (lr)
- // fp -> fp[0]: CallerFP (old fp)
- // fp[-8]: SPOffset (new csp)
- // fp[-16]: CodeObject()
- // fp[-16 - fp-size]: Saved doubles, if saved_doubles is true.
- // csp[8]: Memory reserved for the caller if extra_space != 0.
- // Alignment padding, if necessary.
- // csp -> csp[0]: Space reserved for the return address.
- //
- // This function also stores the new frame information in the top frame, so
- // that the new frame becomes the current frame.
- void EnterExitFrame(bool save_doubles,
- const Register& scratch,
- int extra_space = 0);
-
- // Leave the current exit frame, after a C function has returned to generated
- // (JavaScript) code.
- //
- // This effectively unwinds the operation of EnterExitFrame:
- // * Preserved doubles are restored (if restore_doubles is true).
- // * The frame information is removed from the top frame.
- // * The exit frame is dropped.
- // * The stack pointer is reset to jssp.
- //
- // The stack pointer must be csp on entry.
- void LeaveExitFrame(bool save_doubles,
- const Register& scratch,
- bool restore_context);
-
- void LoadContext(Register dst, int context_chain_length);
-
- // ---------------------------------------------------------------------------
- // StatsCounter support
-
- void SetCounter(StatsCounter* counter, int value, Register scratch1,
- Register scratch2);
- void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
- Register scratch2);
- void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
- Register scratch2);
-
- // ---------------------------------------------------------------------------
- // Garbage collector support (GC).
-
- enum RememberedSetFinalAction {
- kReturnAtEnd,
- kFallThroughAtEnd
- };
-
- // Record in the remembered set the fact that we have a pointer to new space
- // at the address pointed to by the addr register. Only works if addr is not
- // in new space.
- void RememberedSetHelper(Register object, // Used for debug code.
- Register addr,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetFinalAction and_then);
-
- // Push and pop the registers that can hold pointers, as defined by the
- // RegList constant kSafepointSavedRegisters.
- void PushSafepointRegisters();
- void PopSafepointRegisters();
-
- void PushSafepointFPRegisters();
- void PopSafepointFPRegisters();
-
- // Store value in register src in the safepoint stack slot for register dst.
- void StoreToSafepointRegisterSlot(Register src, Register dst) {
- Poke(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
- }
-
- // Load the value of the src register from its safepoint stack slot
- // into register dst.
- void LoadFromSafepointRegisterSlot(Register dst, Register src) {
- Peek(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
- }
-
- void CheckPageFlagSet(const Register& object,
- const Register& scratch,
- int mask,
- Label* if_any_set);
-
- void CheckPageFlagClear(const Register& object,
- const Register& scratch,
- int mask,
- Label* if_all_clear);
-
- void CheckMapDeprecated(Handle<Map> map,
- Register scratch,
- Label* if_deprecated);
-
- // Check if object is in new space and jump accordingly.
- // Register 'object' is preserved.
- void JumpIfNotInNewSpace(Register object,
- Label* branch) {
- InNewSpace(object, ne, branch);
- }
-
- void JumpIfInNewSpace(Register object,
- Label* branch) {
- InNewSpace(object, eq, branch);
- }
-
- // Notify the garbage collector that we wrote a pointer into an object.
- // |object| is the object being stored into, |value| is the object being
- // stored. value and scratch registers are clobbered by the operation.
- // The offset is the offset from the start of the object, not the offset from
- // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
- void RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register scratch,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
- // As above, but the offset has the tag presubtracted. For use with
- // MemOperand(reg, off).
- inline void RecordWriteContextSlot(
- Register context,
- int offset,
- Register value,
- Register scratch,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK) {
- RecordWriteField(context,
- offset + kHeapObjectTag,
- value,
- scratch,
- lr_status,
- save_fp,
- remembered_set_action,
- smi_check);
- }
-
- // For a given |object| notify the garbage collector that the slot |address|
- // has been written. |value| is the object being stored. The value and
- // address registers are clobbered by the operation.
- void RecordWrite(
- Register object,
- Register address,
- Register value,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* object_is_white_and_not_data);
-
- // Detects conservatively whether an object is data-only, i.e. it does need to
- // be scanned by the garbage collector.
- void JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object);
-
- // Helper for finding the mark bits for an address.
- // Note that the behaviour slightly differs from other architectures.
- // On exit:
- // - addr_reg is unchanged.
- // - The bitmap register points at the word with the mark bits.
- // - The shift register contains the index of the first color bit for this
- // object in the bitmap.
- inline void GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register shift_reg);
-
- // Check if an object has a given incremental marking color.
- void HasColor(Register object,
- Register scratch0,
- Register scratch1,
- Label* has_color,
- int first_bit,
- int second_bit);
-
- void JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black);
-
-
- // Get the location of a relocated constant (its address in the constant pool)
- // from its load site.
- void GetRelocatedValueLocation(Register ldr_location,
- Register result);
-
-
- // ---------------------------------------------------------------------------
- // Debugging.
-
- // Calls Abort(msg) if the condition cond is not satisfied.
- // Use --debug_code to enable.
- void Assert(Condition cond, BailoutReason reason);
- void AssertRegisterIsClear(Register reg, BailoutReason reason);
- void AssertRegisterIsRoot(
- Register reg,
- Heap::RootListIndex index,
- BailoutReason reason = kRegisterDidNotMatchExpectedRoot);
- void AssertFastElements(Register elements);
-
- // Abort if the specified register contains the invalid color bit pattern.
- // The pattern must be in bits [1:0] of 'reg' register.
- //
- // If emit_debug_code() is false, this emits no code.
- void AssertHasValidColor(const Register& reg);
-
- // Abort if 'object' register doesn't point to a string object.
- //
- // If emit_debug_code() is false, this emits no code.
- void AssertIsString(const Register& object);
-
- // Like Assert(), but always enabled.
- void Check(Condition cond, BailoutReason reason);
- void CheckRegisterIsClear(Register reg, BailoutReason reason);
-
- // Print a message to stderr and abort execution.
- void Abort(BailoutReason reason);
-
- // Conditionally load the cached Array transitioned map of type
- // transitioned_kind from the native context if the map in register
- // map_in_out is the cached Array map in the native context of
- // expected_kind.
- void LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match);
-
- // Load the initial map for new Arrays from a JSFunction.
- void LoadInitialArrayMap(Register function_in,
- Register scratch,
- Register map_out,
- ArrayHasHoles holes);
-
- void LoadArrayFunction(Register function);
- void LoadGlobalFunction(int index, Register function);
-
- // Load the initial map from the global function. The registers function and
- // map can be the same, function is then overwritten.
- void LoadGlobalFunctionInitialMap(Register function,
- Register map,
- Register scratch);
-
- // --------------------------------------------------------------------------
- // Set the registers used internally by the MacroAssembler as scratch
- // registers. These registers are used to implement behaviours which are not
- // directly supported by A64, and where an intermediate result is required.
- //
- // Both tmp0 and tmp1 may be set to any X register except for xzr, sp,
- // and StackPointer(). Also, they must not be the same register (though they
- // may both be NoReg).
- //
- // It is valid to set either or both of these registers to NoReg if you don't
- // want the MacroAssembler to use any scratch registers. In a debug build, the
- // Assembler will assert that any registers it uses are valid. Be aware that
- // this check is not present in release builds. If this is a problem, use the
- // Assembler directly.
- void SetScratchRegisters(const Register& tmp0, const Register& tmp1) {
- // V8 assumes the macro assembler uses ip0 and ip1 as temp registers.
- ASSERT(tmp0.IsNone() || tmp0.Is(ip0));
- ASSERT(tmp1.IsNone() || tmp1.Is(ip1));
-
- ASSERT(!AreAliased(xzr, csp, tmp0, tmp1));
- ASSERT(!AreAliased(StackPointer(), tmp0, tmp1));
- tmp0_ = tmp0;
- tmp1_ = tmp1;
- }
-
- const Register& Tmp0() const {
- return tmp0_;
- }
-
- const Register& Tmp1() const {
- return tmp1_;
- }
-
- const Register WTmp0() const {
- return Register::Create(tmp0_.code(), kWRegSize);
- }
-
- const Register WTmp1() const {
- return Register::Create(tmp1_.code(), kWRegSize);
- }
-
- void SetFPScratchRegister(const FPRegister& fptmp0) {
- fptmp0_ = fptmp0;
- }
-
- const FPRegister& FPTmp0() const {
- return fptmp0_;
- }
-
- const Register AppropriateTempFor(
- const Register& target,
- const CPURegister& forbidden = NoCPUReg) const {
- Register candidate = forbidden.Is(Tmp0()) ? Tmp1() : Tmp0();
- ASSERT(!candidate.Is(target));
- return Register::Create(candidate.code(), target.SizeInBits());
- }
-
- const FPRegister AppropriateTempFor(
- const FPRegister& target,
- const CPURegister& forbidden = NoCPUReg) const {
- USE(forbidden);
- FPRegister candidate = FPTmp0();
- ASSERT(!candidate.Is(forbidden));
- ASSERT(!candidate.Is(target));
- return FPRegister::Create(candidate.code(), target.SizeInBits());
- }
-
- // Like printf, but print at run-time from generated code.
- //
- // The caller must ensure that arguments for floating-point placeholders
- // (such as %e, %f or %g) are FPRegisters, and that arguments for integer
- // placeholders are Registers.
- //
- // A maximum of four arguments may be given to any single Printf call. The
- // arguments must be of the same type, but they do not need to have the same
- // size.
- //
- // The following registers cannot be printed:
- // Tmp0(), Tmp1(), StackPointer(), csp.
- //
- // This function automatically preserves caller-saved registers so that
- // calling code can use Printf at any point without having to worry about
- // corruption. The preservation mechanism generates a lot of code. If this is
- // a problem, preserve the important registers manually and then call
- // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
- // implicitly preserved.
- //
- // Unlike many MacroAssembler functions, x8 and x9 are guaranteed to be
- // preserved, and can be printed. This allows Printf to be used during debug
- // code.
- //
- // This function assumes (and asserts) that the current stack pointer is
- // callee-saved, not caller-saved. This is most likely the case anyway, as a
- // caller-saved stack pointer doesn't make a lot of sense.
- void Printf(const char * format,
- const CPURegister& arg0 = NoCPUReg,
- const CPURegister& arg1 = NoCPUReg,
- const CPURegister& arg2 = NoCPUReg,
- const CPURegister& arg3 = NoCPUReg);
-
- // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
- //
- // The return code from the system printf call will be returned in x0.
- void PrintfNoPreserve(const char * format,
- const CPURegister& arg0 = NoCPUReg,
- const CPURegister& arg1 = NoCPUReg,
- const CPURegister& arg2 = NoCPUReg,
- const CPURegister& arg3 = NoCPUReg);
-
- // Code ageing support functions.
-
- // Code ageing on A64 works similarly to on ARM. When V8 wants to mark a
- // function as old, it replaces some of the function prologue (generated by
- // FullCodeGenerator::Generate) with a call to a special stub (ultimately
- // generated by GenerateMakeCodeYoungAgainCommon). The stub restores the
- // function prologue to its initial young state (indicating that it has been
- // recently run) and continues. A young function is therefore one which has a
- // normal frame setup sequence, and an old function has a code age sequence
- // which calls a code ageing stub.
-
- // Set up a basic stack frame for young code (or code exempt from ageing) with
- // type FUNCTION. It may be patched later for code ageing support. This is
- // done by to Code::PatchPlatformCodeAge and EmitCodeAgeSequence.
- //
- // This function takes an Assembler so it can be called from either a
- // MacroAssembler or a PatchingAssembler context.
- static void EmitFrameSetupForCodeAgePatching(Assembler* assm);
-
- // Call EmitFrameSetupForCodeAgePatching from a MacroAssembler context.
- void EmitFrameSetupForCodeAgePatching();
-
- // Emit a code age sequence that calls the relevant code age stub. The code
- // generated by this sequence is expected to replace the code generated by
- // EmitFrameSetupForCodeAgePatching, and represents an old function.
- //
- // If stub is NULL, this function generates the code age sequence but omits
- // the stub address that is normally embedded in the instruction stream. This
- // can be used by debug code to verify code age sequences.
- static void EmitCodeAgeSequence(Assembler* assm, Code* stub);
-
- // Call EmitCodeAgeSequence from a MacroAssembler context.
- void EmitCodeAgeSequence(Code* stub);
-
- // Return true if the sequence is a young sequence geneated by
- // EmitFrameSetupForCodeAgePatching. Otherwise, this method asserts that the
- // sequence is a code age sequence (emitted by EmitCodeAgeSequence).
- static bool IsYoungSequence(byte* sequence);
-
-#ifdef DEBUG
- // Return true if the sequence is a code age sequence generated by
- // EmitCodeAgeSequence.
- static bool IsCodeAgeSequence(byte* sequence);
-#endif
-
- // Jumps to found label if a prototype map has dictionary elements.
- void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
- Register scratch1, Label* found);
-
- private:
- // Helpers for CopyFields.
- // These each implement CopyFields in a different way.
- void CopyFieldsLoopPairsHelper(Register dst, Register src, unsigned count,
- Register scratch1, Register scratch2,
- Register scratch3);
- void CopyFieldsUnrolledPairsHelper(Register dst, Register src, unsigned count,
- Register scratch1, Register scratch2);
- void CopyFieldsUnrolledHelper(Register dst, Register src, unsigned count,
- Register scratch1);
-
- // The actual Push and Pop implementations. These don't generate any code
- // other than that required for the push or pop. This allows
- // (Push|Pop)CPURegList to bundle together run-time assertions for a large
- // block of registers.
- //
- // Note that size is per register, and is specified in bytes.
- void PushHelper(int count, int size,
- const CPURegister& src0, const CPURegister& src1,
- const CPURegister& src2, const CPURegister& src3);
- void PopHelper(int count, int size,
- const CPURegister& dst0, const CPURegister& dst1,
- const CPURegister& dst2, const CPURegister& dst3);
-
- // Perform necessary maintenance operations before a push or pop.
- //
- // Note that size is per register, and is specified in bytes.
- void PrepareForPush(int count, int size);
- void PrepareForPop(int count, int size);
-
- // Call Printf. On a native build, a simple call will be generated, but if the
- // simulator is being used then a suitable pseudo-instruction is used. The
- // arguments and stack (csp) must be prepared by the caller as for a normal
- // AAPCS64 call to 'printf'.
- //
- // The 'type' argument specifies the type of the optional arguments.
- void CallPrintf(CPURegister::RegisterType type = CPURegister::kNoRegister);
-
- // Helper for throwing exceptions. Compute a handler address and jump to
- // it. See the implementation for register usage.
- void JumpToHandlerEntry(Register exception,
- Register object,
- Register state,
- Register scratch1,
- Register scratch2);
-
- // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object,
- Condition cond, // eq for new space, ne otherwise.
- Label* branch);
-
- // Try to convert a double to an int so that integer fast-paths may be
- // used. Not every valid integer value is guaranteed to be caught.
- // It supports both 32-bit and 64-bit integers depending whether 'as_int'
- // is a W or X register.
- //
- // This does not distinguish between +0 and -0, so if this distinction is
- // important it must be checked separately.
- void TryConvertDoubleToInt(Register as_int,
- FPRegister value,
- FPRegister scratch_d,
- Label* on_successful_conversion,
- Label* on_failed_conversion = NULL);
-
- bool generating_stub_;
-#if DEBUG
- // Tell whether any of the macro instruction can be used. When false the
- // MacroAssembler will assert if a method which can emit a variable number
- // of instructions is called.
- bool allow_macro_instructions_;
-#endif
- bool has_frame_;
-
- // The Abort method should call a V8 runtime function, but the CallRuntime
- // mechanism depends on CEntryStub. If use_real_aborts is false, Abort will
- // use a simpler abort mechanism that doesn't depend on CEntryStub.
- //
- // The purpose of this is to allow Aborts to be compiled whilst CEntryStub is
- // being generated.
- bool use_real_aborts_;
-
- // This handle will be patched with the code object on installation.
- Handle<Object> code_object_;
-
- // The register to use as a stack pointer for stack operations.
- Register sp_;
-
- // Scratch registers used internally by the MacroAssembler.
- Register tmp0_;
- Register tmp1_;
- FPRegister fptmp0_;
-
- void InitializeNewString(Register string,
- Register length,
- Heap::RootListIndex map_index,
- Register scratch1,
- Register scratch2);
-
- public:
- // Far branches resolving.
- //
- // The various classes of branch instructions with immediate offsets have
- // different ranges. While the Assembler will fail to assemble a branch
- // exceeding its range, the MacroAssembler offers a mechanism to resolve
- // branches to too distant targets, either by tweaking the generated code to
- // use branch instructions with wider ranges or generating veneers.
- //
- // Currently branches to distant targets are resolved using unconditional
- // branch isntructions with a range of +-128MB. If that becomes too little
- // (!), the mechanism can be extended to generate special veneers for really
- // far targets.
-
- // Returns true if we should emit a veneer as soon as possible for a branch
- // which can at most reach to specified pc.
- bool ShouldEmitVeneer(int max_reachable_pc,
- int margin = kVeneerDistanceMargin);
-
- // The maximum code size generated for a veneer. Currently one branch
- // instruction. This is for code size checking purposes, and can be extended
- // in the future for example if we decide to add nops between the veneers.
- static const int kMaxVeneerCodeSize = 1 * kInstructionSize;
-
- // Emits veneers for branches that are approaching their maximum range.
- // If need_protection is true, the veneers are protected by a branch jumping
- // over the code.
- void EmitVeneers(bool need_protection);
- void EmitVeneersGuard();
- // Checks wether veneers need to be emitted at this point.
- void CheckVeneers(bool need_protection);
-
- // Helps resolve branching to labels potentially out of range.
- // If the label is not bound, it registers the information necessary to later
- // be able to emit a veneer for this branch if necessary.
- // If the label is bound, it returns true if the label (or the previous link
- // in the label chain) is out of range. In that case the caller is responsible
- // for generating appropriate code.
- // Otherwise it returns false.
- // This function also checks wether veneers need to be emitted.
- bool NeedExtraInstructionsOrRegisterBranch(Label *label,
- ImmBranchType branch_type);
-
- private:
- // We generate a veneer for a branch if we reach within this distance of the
- // limit of the range.
- static const int kVeneerDistanceMargin = 2 * KB;
- int unresolved_branches_first_limit() const {
- ASSERT(!unresolved_branches_.empty());
- return unresolved_branches_.begin()->first;
- }
-};
-
-
-// Use this scope when you need a one-to-one mapping bewteen methods and
-// instructions. This scope prevents the MacroAssembler from being called and
-// literal pools from being emitted. It also asserts the number of instructions
-// emitted is what you specified when creating the scope.
-class InstructionAccurateScope BASE_EMBEDDED {
- public:
- InstructionAccurateScope(MacroAssembler* masm, size_t count = 0)
- : masm_(masm), size_(count * kInstructionSize) {
- masm_->StartBlockConstPool();
-#ifdef DEBUG
- if (count != 0) {
- masm_->bind(&start_);
- }
- previous_allow_macro_instructions_ = masm_->allow_macro_instructions();
- masm_->set_allow_macro_instructions(false);
-#endif
- }
-
- ~InstructionAccurateScope() {
- masm_->EndBlockConstPool();
-#ifdef DEBUG
- if (start_.is_bound()) {
- ASSERT(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
- }
- masm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
-#endif
- }
-
- private:
- MacroAssembler* masm_;
- size_t size_;
-#ifdef DEBUG
- Label start_;
- bool previous_allow_macro_instructions_;
-#endif
-};
-
-
-inline MemOperand ContextMemOperand(Register context, int index) {
- return MemOperand(context, Context::SlotOffset(index));
-}
-
-inline MemOperand GlobalObjectMemOperand() {
- return ContextMemOperand(cp, Context::GLOBAL_OBJECT_INDEX);
-}
-
-
-// Encode and decode information about patchable inline SMI checks.
-class InlineSmiCheckInfo {
- public:
- explicit InlineSmiCheckInfo(Address info);
-
- bool HasSmiCheck() const {
- return smi_check_ != NULL;
- }
-
- const Register& SmiRegister() const {
- return reg_;
- }
-
- Instruction* SmiCheck() const {
- return smi_check_;
- }
-
- // Use MacroAssembler::InlineData to emit information about patchable inline
- // SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to
- // indicate that there is no inline SMI check. Note that 'reg' cannot be csp.
- //
- // The generated patch information can be read using the InlineSMICheckInfo
- // class.
- static void Emit(MacroAssembler* masm, const Register& reg,
- const Label* smi_check);
-
- // Emit information to indicate that there is no inline SMI check.
- static void EmitNotInlined(MacroAssembler* masm) {
- Label unbound;
- Emit(masm, NoReg, &unbound);
- }
-
- private:
- Register reg_;
- Instruction* smi_check_;
-
- // Fields in the data encoded by InlineData.
-
- // A width of 5 (Rd_width) for the SMI register preclues the use of csp,
- // since kSPRegInternalCode is 63. However, csp should never hold a SMI or be
- // used in a patchable check. The Emit() method checks this.
- //
- // Note that the total size of the fields is restricted by the underlying
- // storage size handled by the BitField class, which is a uint32_t.
- class RegisterBits : public BitField<unsigned, 0, 5> {};
- class DeltaBits : public BitField<uint32_t, 5, 32-5> {};
-};
-
-} } // namespace v8::internal
-
-#ifdef GENERATED_CODE_COVERAGE
-#error "Unsupported option"
-#define CODE_COVERAGE_STRINGIFY(x) #x
-#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
-#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
-#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
-#else
-#define ACCESS_MASM(masm) masm->
-#endif
-
-#endif // V8_A64_MACRO_ASSEMBLER_A64_H_
diff --git a/deps/v8/src/a64/regexp-macro-assembler-a64.cc b/deps/v8/src/a64/regexp-macro-assembler-a64.cc
deleted file mode 100644
index 00558c017b..0000000000
--- a/deps/v8/src/a64/regexp-macro-assembler-a64.cc
+++ /dev/null
@@ -1,1730 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if V8_TARGET_ARCH_A64
-
-#include "cpu-profiler.h"
-#include "unicode.h"
-#include "log.h"
-#include "code-stubs.h"
-#include "regexp-stack.h"
-#include "macro-assembler.h"
-#include "regexp-macro-assembler.h"
-#include "a64/regexp-macro-assembler-a64.h"
-
-namespace v8 {
-namespace internal {
-
-#ifndef V8_INTERPRETED_REGEXP
-/*
- * This assembler uses the following register assignment convention:
- * - w19 : Used to temporarely store a value before a call to C code.
- * See CheckNotBackReferenceIgnoreCase.
- * - x20 : Pointer to the current code object (Code*),
- * it includes the heap object tag.
- * - w21 : Current position in input, as negative offset from
- * the end of the string. Please notice that this is
- * the byte offset, not the character offset!
- * - w22 : Currently loaded character. Must be loaded using
- * LoadCurrentCharacter before using any of the dispatch methods.
- * - x23 : Points to tip of backtrack stack.
- * - w24 : Position of the first character minus one: non_position_value.
- * Used to initialize capture registers.
- * - x25 : Address at the end of the input string: input_end.
- * Points to byte after last character in input.
- * - x26 : Address at the start of the input string: input_start.
- * - w27 : Where to start in the input string.
- * - x28 : Output array pointer.
- * - x29/fp : Frame pointer. Used to access arguments, local variables and
- * RegExp registers.
- * - x16/x17 : IP registers, used by assembler. Very volatile.
- * - csp : Points to tip of C stack.
- *
- * - x0-x7 : Used as a cache to store 32 bit capture registers. These
- * registers need to be retained every time a call to C code
- * is done.
- *
- * The remaining registers are free for computations.
- * Each call to a public method should retain this convention.
- *
- * The stack will have the following structure:
- *
- * Location Name Description
- * (as referred to in
- * the code)
- *
- * - fp[104] isolate Address of the current isolate.
- * - fp[96] return_address Secondary link/return address
- * used by an exit frame if this is a
- * native call.
- * ^^^ csp when called ^^^
- * - fp[88] lr Return from the RegExp code.
- * - fp[80] r29 Old frame pointer (CalleeSaved).
- * - fp[0..72] r19-r28 Backup of CalleeSaved registers.
- * - fp[-8] direct_call 1 => Direct call from JavaScript code.
- * 0 => Call through the runtime system.
- * - fp[-16] stack_base High end of the memory area to use as
- * the backtracking stack.
- * - fp[-24] output_size Output may fit multiple sets of matches.
- * - fp[-32] input Handle containing the input string.
- * - fp[-40] success_counter
- * ^^^^^^^^^^^^^ From here and downwards we store 32 bit values ^^^^^^^^^^^^^
- * - fp[-44] register N Capture registers initialized with
- * - fp[-48] register N + 1 non_position_value.
- * ... The first kNumCachedRegisters (N) registers
- * ... are cached in x0 to x7.
- * ... Only positions must be stored in the first
- * - ... num_saved_registers_ registers.
- * - ...
- * - register N + num_registers - 1
- * ^^^^^^^^^ csp ^^^^^^^^^
- *
- * The first num_saved_registers_ registers are initialized to point to
- * "character -1" in the string (i.e., char_size() bytes before the first
- * character of the string). The remaining registers start out as garbage.
- *
- * The data up to the return address must be placed there by the calling
- * code and the remaining arguments are passed in registers, e.g. by calling the
- * code entry as cast to a function with the signature:
- * int (*match)(String* input,
- * int start_offset,
- * Address input_start,
- * Address input_end,
- * int* output,
- * int output_size,
- * Address stack_base,
- * bool direct_call = false,
- * Address secondary_return_address, // Only used by native call.
- * Isolate* isolate)
- * The call is performed by NativeRegExpMacroAssembler::Execute()
- * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
- * in a64/simulator-a64.h.
- * When calling as a non-direct call (i.e., from C++ code), the return address
- * area is overwritten with the LR register by the RegExp code. When doing a
- * direct call from generated code, the return address is placed there by
- * the calling code, as in a normal exit frame.
- */
-
-#define __ ACCESS_MASM(masm_)
-
-RegExpMacroAssemblerA64::RegExpMacroAssemblerA64(
- Mode mode,
- int registers_to_save,
- Zone* zone)
- : NativeRegExpMacroAssembler(zone),
- masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
- mode_(mode),
- num_registers_(registers_to_save),
- num_saved_registers_(registers_to_save),
- entry_label_(),
- start_label_(),
- success_label_(),
- backtrack_label_(),
- exit_label_() {
- __ SetStackPointer(csp);
- ASSERT_EQ(0, registers_to_save % 2);
- // We can cache at most 16 W registers in x0-x7.
- STATIC_ASSERT(kNumCachedRegisters <= 16);
- STATIC_ASSERT((kNumCachedRegisters % 2) == 0);
- __ B(&entry_label_); // We'll write the entry code later.
- __ Bind(&start_label_); // And then continue from here.
-}
-
-
-RegExpMacroAssemblerA64::~RegExpMacroAssemblerA64() {
- delete masm_;
- // Unuse labels in case we throw away the assembler without calling GetCode.
- entry_label_.Unuse();
- start_label_.Unuse();
- success_label_.Unuse();
- backtrack_label_.Unuse();
- exit_label_.Unuse();
- check_preempt_label_.Unuse();
- stack_overflow_label_.Unuse();
-}
-
-int RegExpMacroAssemblerA64::stack_limit_slack() {
- return RegExpStack::kStackLimitSlack;
-}
-
-
-void RegExpMacroAssemblerA64::AdvanceCurrentPosition(int by) {
- if (by != 0) {
- __ Add(current_input_offset(),
- current_input_offset(), by * char_size());
- }
-}
-
-
-void RegExpMacroAssemblerA64::AdvanceRegister(int reg, int by) {
- ASSERT((reg >= 0) && (reg < num_registers_));
- if (by != 0) {
- Register to_advance;
- RegisterState register_state = GetRegisterState(reg);
- switch (register_state) {
- case STACKED:
- __ Ldr(w10, register_location(reg));
- __ Add(w10, w10, by);
- __ Str(w10, register_location(reg));
- break;
- case CACHED_LSW:
- to_advance = GetCachedRegister(reg);
- __ Add(to_advance, to_advance, by);
- break;
- case CACHED_MSW:
- to_advance = GetCachedRegister(reg);
- __ Add(to_advance, to_advance, static_cast<int64_t>(by) << kWRegSize);
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void RegExpMacroAssemblerA64::Backtrack() {
- CheckPreemption();
- Pop(w10);
- __ Add(x10, code_pointer(), Operand(w10, UXTW));
- __ Br(x10);
-}
-
-
-void RegExpMacroAssemblerA64::Bind(Label* label) {
- __ Bind(label);
-}
-
-
-void RegExpMacroAssemblerA64::CheckCharacter(uint32_t c, Label* on_equal) {
- CompareAndBranchOrBacktrack(current_character(), c, eq, on_equal);
-}
-
-
-void RegExpMacroAssemblerA64::CheckCharacterGT(uc16 limit, Label* on_greater) {
- CompareAndBranchOrBacktrack(current_character(), limit, hi, on_greater);
-}
-
-
-void RegExpMacroAssemblerA64::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the input string?
- CompareAndBranchOrBacktrack(start_offset(), 0, ne, &not_at_start);
- // If we did, are we still at the start of the input string?
- __ Add(x10, input_end(), Operand(current_input_offset(), SXTW));
- __ Cmp(x10, input_start());
- BranchOrBacktrack(eq, on_at_start);
- __ Bind(&not_at_start);
-}
-
-
-void RegExpMacroAssemblerA64::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the input string?
- CompareAndBranchOrBacktrack(start_offset(), 0, ne, on_not_at_start);
- // If we did, are we still at the start of the input string?
- __ Add(x10, input_end(), Operand(current_input_offset(), SXTW));
- __ Cmp(x10, input_start());
- BranchOrBacktrack(ne, on_not_at_start);
-}
-
-
-void RegExpMacroAssemblerA64::CheckCharacterLT(uc16 limit, Label* on_less) {
- CompareAndBranchOrBacktrack(current_character(), limit, lo, on_less);
-}
-
-
-void RegExpMacroAssemblerA64::CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
- // This method is only ever called from the cctests.
-
- if (check_end_of_string) {
- // Is last character of required match inside string.
- CheckPosition(cp_offset + str.length() - 1, on_failure);
- }
-
- Register characters_address = x11;
-
- __ Add(characters_address,
- input_end(),
- Operand(current_input_offset(), SXTW));
- if (cp_offset != 0) {
- __ Add(characters_address, characters_address, cp_offset * char_size());
- }
-
- for (int i = 0; i < str.length(); i++) {
- if (mode_ == ASCII) {
- __ Ldrb(w10, MemOperand(characters_address, 1, PostIndex));
- ASSERT(str[i] <= String::kMaxOneByteCharCode);
- } else {
- __ Ldrh(w10, MemOperand(characters_address, 2, PostIndex));
- }
- CompareAndBranchOrBacktrack(w10, str[i], ne, on_failure);
- }
-}
-
-
-void RegExpMacroAssemblerA64::CheckGreedyLoop(Label* on_equal) {
- __ Ldr(w10, MemOperand(backtrack_stackpointer()));
- __ Cmp(current_input_offset(), w10);
- __ Cset(x11, eq);
- __ Add(backtrack_stackpointer(),
- backtrack_stackpointer(), Operand(x11, LSL, kWRegSizeInBytesLog2));
- BranchOrBacktrack(eq, on_equal);
-}
-
-void RegExpMacroAssemblerA64::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
- Label fallthrough;
-
- Register capture_start_offset = w10;
- // Save the capture length in a callee-saved register so it will
- // be preserved if we call a C helper.
- Register capture_length = w19;
- ASSERT(kCalleeSaved.IncludesAliasOf(capture_length));
-
- // Find length of back-referenced capture.
- ASSERT((start_reg % 2) == 0);
- if (start_reg < kNumCachedRegisters) {
- __ Mov(capture_start_offset.X(), GetCachedRegister(start_reg));
- __ Lsr(x11, GetCachedRegister(start_reg), kWRegSize);
- } else {
- __ Ldp(w11, capture_start_offset, capture_location(start_reg, x10));
- }
- __ Sub(capture_length, w11, capture_start_offset); // Length to check.
- // Succeed on empty capture (including no capture).
- __ Cbz(capture_length, &fallthrough);
-
- // Check that there are enough characters left in the input.
- __ Cmn(capture_length, current_input_offset());
- BranchOrBacktrack(gt, on_no_match);
-
- if (mode_ == ASCII) {
- Label success;
- Label fail;
- Label loop_check;
-
- Register capture_start_address = x12;
- Register capture_end_addresss = x13;
- Register current_position_address = x14;
-
- __ Add(capture_start_address,
- input_end(),
- Operand(capture_start_offset, SXTW));
- __ Add(capture_end_addresss,
- capture_start_address,
- Operand(capture_length, SXTW));
- __ Add(current_position_address,
- input_end(),
- Operand(current_input_offset(), SXTW));
-
- Label loop;
- __ Bind(&loop);
- __ Ldrb(w10, MemOperand(capture_start_address, 1, PostIndex));
- __ Ldrb(w11, MemOperand(current_position_address, 1, PostIndex));
- __ Cmp(w10, w11);
- __ B(eq, &loop_check);
-
- // Mismatch, try case-insensitive match (converting letters to lower-case).
- __ Orr(w10, w10, 0x20); // Convert capture character to lower-case.
- __ Orr(w11, w11, 0x20); // Also convert input character.
- __ Cmp(w11, w10);
- __ B(ne, &fail);
- __ Sub(w10, w10, 'a');
- __ Cmp(w10, 'z' - 'a'); // Is w10 a lowercase letter?
- __ B(ls, &loop_check); // In range 'a'-'z'.
- // Latin-1: Check for values in range [224,254] but not 247.
- __ Sub(w10, w10, 224 - 'a');
- // TODO(jbramley): Use Ccmp here.
- __ Cmp(w10, 254 - 224);
- __ B(hi, &fail); // Weren't Latin-1 letters.
- __ Cmp(w10, 247 - 224); // Check for 247.
- __ B(eq, &fail);
-
- __ Bind(&loop_check);
- __ Cmp(capture_start_address, capture_end_addresss);
- __ B(lt, &loop);
- __ B(&success);
-
- __ Bind(&fail);
- BranchOrBacktrack(al, on_no_match);
-
- __ Bind(&success);
- // Compute new value of character position after the matched part.
- __ Sub(current_input_offset().X(), current_position_address, input_end());
- if (masm_->emit_debug_code()) {
- __ Cmp(current_input_offset().X(), Operand(current_input_offset(), SXTW));
- __ Ccmp(current_input_offset(), 0, NoFlag, eq);
- // The current input offset should be <= 0, and fit in a W register.
- __ Check(le, kOffsetOutOfRange);
- }
- } else {
- ASSERT(mode_ == UC16);
- int argument_count = 4;
-
- // The cached registers need to be retained.
- CPURegList cached_registers(CPURegister::kRegister, kXRegSize, 0, 7);
- ASSERT((cached_registers.Count() * 2) == kNumCachedRegisters);
- __ PushCPURegList(cached_registers);
-
- // Put arguments into arguments registers.
- // Parameters are
- // x0: Address byte_offset1 - Address captured substring's start.
- // x1: Address byte_offset2 - Address of current character position.
- // w2: size_t byte_length - length of capture in bytes(!)
- // x3: Isolate* isolate
-
- // Address of start of capture.
- __ Add(x0, input_end(), Operand(capture_start_offset, SXTW));
- // Length of capture.
- __ Mov(w2, capture_length);
- // Address of current input position.
- __ Add(x1, input_end(), Operand(current_input_offset(), SXTW));
- // Isolate.
- __ Mov(x3, Operand(ExternalReference::isolate_address(isolate())));
-
- {
- AllowExternalCallThatCantCauseGC scope(masm_);
- ExternalReference function =
- ExternalReference::re_case_insensitive_compare_uc16(isolate());
- __ CallCFunction(function, argument_count);
- }
-
- // Check if function returned non-zero for success or zero for failure.
- CompareAndBranchOrBacktrack(x0, 0, eq, on_no_match);
- // On success, increment position by length of capture.
- __ Add(current_input_offset(), current_input_offset(), capture_length);
- // Reset the cached registers.
- __ PopCPURegList(cached_registers);
- }
-
- __ Bind(&fallthrough);
-}
-
-void RegExpMacroAssemblerA64::CheckNotBackReference(
- int start_reg,
- Label* on_no_match) {
- Label fallthrough;
-
- Register capture_start_address = x12;
- Register capture_end_address = x13;
- Register current_position_address = x14;
- Register capture_length = w15;
-
- // Find length of back-referenced capture.
- ASSERT((start_reg % 2) == 0);
- if (start_reg < kNumCachedRegisters) {
- __ Mov(x10, GetCachedRegister(start_reg));
- __ Lsr(x11, GetCachedRegister(start_reg), kWRegSize);
- } else {
- __ Ldp(w11, w10, capture_location(start_reg, x10));
- }
- __ Sub(capture_length, w11, w10); // Length to check.
- // Succeed on empty capture (including no capture).
- __ Cbz(capture_length, &fallthrough);
-
- // Check that there are enough characters left in the input.
- __ Cmn(capture_length, current_input_offset());
- BranchOrBacktrack(gt, on_no_match);
-
- // Compute pointers to match string and capture string
- __ Add(capture_start_address, input_end(), Operand(w10, SXTW));
- __ Add(capture_end_address,
- capture_start_address,
- Operand(capture_length, SXTW));
- __ Add(current_position_address,
- input_end(),
- Operand(current_input_offset(), SXTW));
-
- Label loop;
- __ Bind(&loop);
- if (mode_ == ASCII) {
- __ Ldrb(w10, MemOperand(capture_start_address, 1, PostIndex));
- __ Ldrb(w11, MemOperand(current_position_address, 1, PostIndex));
- } else {
- ASSERT(mode_ == UC16);
- __ Ldrh(w10, MemOperand(capture_start_address, 2, PostIndex));
- __ Ldrh(w11, MemOperand(current_position_address, 2, PostIndex));
- }
- __ Cmp(w10, w11);
- BranchOrBacktrack(ne, on_no_match);
- __ Cmp(capture_start_address, capture_end_address);
- __ B(lt, &loop);
-
- // Move current character position to position after match.
- __ Sub(current_input_offset().X(), current_position_address, input_end());
- if (masm_->emit_debug_code()) {
- __ Cmp(current_input_offset().X(), Operand(current_input_offset(), SXTW));
- __ Ccmp(current_input_offset(), 0, NoFlag, eq);
- // The current input offset should be <= 0, and fit in a W register.
- __ Check(le, kOffsetOutOfRange);
- }
- __ Bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerA64::CheckNotCharacter(unsigned c,
- Label* on_not_equal) {
- CompareAndBranchOrBacktrack(current_character(), c, ne, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerA64::CheckCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_equal) {
- __ And(w10, current_character(), mask);
- CompareAndBranchOrBacktrack(w10, c, eq, on_equal);
-}
-
-
-void RegExpMacroAssemblerA64::CheckNotCharacterAfterAnd(unsigned c,
- unsigned mask,
- Label* on_not_equal) {
- __ And(w10, current_character(), mask);
- CompareAndBranchOrBacktrack(w10, c, ne, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerA64::CheckNotCharacterAfterMinusAnd(
- uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal) {
- ASSERT(minus < String::kMaxUtf16CodeUnit);
- __ Sub(w10, current_character(), minus);
- __ And(w10, w10, mask);
- CompareAndBranchOrBacktrack(w10, c, ne, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerA64::CheckCharacterInRange(
- uc16 from,
- uc16 to,
- Label* on_in_range) {
- __ Sub(w10, current_character(), from);
- // Unsigned lower-or-same condition.
- CompareAndBranchOrBacktrack(w10, to - from, ls, on_in_range);
-}
-
-
-void RegExpMacroAssemblerA64::CheckCharacterNotInRange(
- uc16 from,
- uc16 to,
- Label* on_not_in_range) {
- __ Sub(w10, current_character(), from);
- // Unsigned higher condition.
- CompareAndBranchOrBacktrack(w10, to - from, hi, on_not_in_range);
-}
-
-
-void RegExpMacroAssemblerA64::CheckBitInTable(
- Handle<ByteArray> table,
- Label* on_bit_set) {
- __ Mov(x11, Operand(table));
- if ((mode_ != ASCII) || (kTableMask != String::kMaxOneByteCharCode)) {
- __ And(w10, current_character(), kTableMask);
- __ Add(w10, w10, ByteArray::kHeaderSize - kHeapObjectTag);
- } else {
- __ Add(w10, current_character(), ByteArray::kHeaderSize - kHeapObjectTag);
- }
- __ Ldrb(w11, MemOperand(x11, w10, UXTW));
- CompareAndBranchOrBacktrack(w11, 0, ne, on_bit_set);
-}
-
-
-bool RegExpMacroAssemblerA64::CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match) {
- // Range checks (c in min..max) are generally implemented by an unsigned
- // (c - min) <= (max - min) check
- switch (type) {
- case 's':
- // Match space-characters
- if (mode_ == ASCII) {
- // One byte space characters are '\t'..'\r', ' ' and \u00a0.
- Label success;
- // Check for ' ' or 0x00a0.
- __ Cmp(current_character(), ' ');
- __ Ccmp(current_character(), 0x00a0, ZFlag, ne);
- __ B(eq, &success);
- // Check range 0x09..0x0d.
- __ Sub(w10, current_character(), '\t');
- CompareAndBranchOrBacktrack(w10, '\r' - '\t', hi, on_no_match);
- __ Bind(&success);
- return true;
- }
- return false;
- case 'S':
- // The emitted code for generic character classes is good enough.
- return false;
- case 'd':
- // Match ASCII digits ('0'..'9').
- __ Sub(w10, current_character(), '0');
- CompareAndBranchOrBacktrack(w10, '9' - '0', hi, on_no_match);
- return true;
- case 'D':
- // Match ASCII non-digits.
- __ Sub(w10, current_character(), '0');
- CompareAndBranchOrBacktrack(w10, '9' - '0', ls, on_no_match);
- return true;
- case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- // Here we emit the conditional branch only once at the end to make branch
- // prediction more efficient, even though we could branch out of here
- // as soon as a character matches.
- __ Cmp(current_character(), 0x0a);
- __ Ccmp(current_character(), 0x0d, ZFlag, ne);
- if (mode_ == UC16) {
- __ Sub(w10, current_character(), 0x2028);
- // If the Z flag was set we clear the flags to force a branch.
- __ Ccmp(w10, 0x2029 - 0x2028, NoFlag, ne);
- // ls -> !((C==1) && (Z==0))
- BranchOrBacktrack(ls, on_no_match);
- } else {
- BranchOrBacktrack(eq, on_no_match);
- }
- return true;
- }
- case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- // We have to check all 4 newline characters before emitting
- // the conditional branch.
- __ Cmp(current_character(), 0x0a);
- __ Ccmp(current_character(), 0x0d, ZFlag, ne);
- if (mode_ == UC16) {
- __ Sub(w10, current_character(), 0x2028);
- // If the Z flag was set we clear the flags to force a fall-through.
- __ Ccmp(w10, 0x2029 - 0x2028, NoFlag, ne);
- // hi -> (C==1) && (Z==0)
- BranchOrBacktrack(hi, on_no_match);
- } else {
- BranchOrBacktrack(ne, on_no_match);
- }
- return true;
- }
- case 'w': {
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
- CompareAndBranchOrBacktrack(current_character(), 'z', hi, on_no_match);
- }
- ExternalReference map = ExternalReference::re_word_character_map();
- __ Mov(x10, Operand(map));
- __ Ldrb(w10, MemOperand(x10, current_character(), UXTW));
- CompareAndBranchOrBacktrack(w10, 0, eq, on_no_match);
- return true;
- }
- case 'W': {
- Label done;
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
- __ Cmp(current_character(), 'z');
- __ B(hi, &done);
- }
- ExternalReference map = ExternalReference::re_word_character_map();
- __ Mov(x10, Operand(map));
- __ Ldrb(w10, MemOperand(x10, current_character(), UXTW));
- CompareAndBranchOrBacktrack(w10, 0, ne, on_no_match);
- __ Bind(&done);
- return true;
- }
- case '*':
- // Match any character.
- return true;
- // No custom implementation (yet): s(UC16), S(UC16).
- default:
- return false;
- }
-}
-
-
-void RegExpMacroAssemblerA64::Fail() {
- __ Mov(w0, FAILURE);
- __ B(&exit_label_);
-}
-
-
-Handle<HeapObject> RegExpMacroAssemblerA64::GetCode(Handle<String> source) {
- Label return_w0;
- // Finalize code - write the entry point code now we know how many
- // registers we need.
-
- // Entry code:
- __ Bind(&entry_label_);
-
- // Arguments on entry:
- // x0: String* input
- // x1: int start_offset
- // x2: byte* input_start
- // x3: byte* input_end
- // x4: int* output array
- // x5: int output array size
- // x6: Address stack_base
- // x7: int direct_call
-
- // The stack pointer should be csp on entry.
- // csp[8]: address of the current isolate
- // csp[0]: secondary link/return address used by native call
-
- // Tell the system that we have a stack frame. Because the type is MANUAL, no
- // code is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
-
- // Push registers on the stack, only push the argument registers that we need.
- CPURegList argument_registers(x0, x5, x6, x7);
-
- CPURegList registers_to_retain = kCalleeSaved;
- ASSERT(kCalleeSaved.Count() == 11);
- registers_to_retain.Combine(lr);
-
- ASSERT(csp.Is(__ StackPointer()));
- __ PushCPURegList(registers_to_retain);
- __ PushCPURegList(argument_registers);
-
- // Set frame pointer in place.
- __ Add(frame_pointer(), csp, argument_registers.Count() * kPointerSize);
-
- // Initialize callee-saved registers.
- __ Mov(start_offset(), w1);
- __ Mov(input_start(), x2);
- __ Mov(input_end(), x3);
- __ Mov(output_array(), x4);
-
- // Set the number of registers we will need to allocate, that is:
- // - success_counter (X register)
- // - (num_registers_ - kNumCachedRegisters) (W registers)
- int num_wreg_to_allocate = num_registers_ - kNumCachedRegisters;
- // Do not allocate registers on the stack if they can all be cached.
- if (num_wreg_to_allocate < 0) { num_wreg_to_allocate = 0; }
- // Make room for the success_counter.
- num_wreg_to_allocate += 2;
-
- // Make sure the stack alignment will be respected.
- int alignment = masm_->ActivationFrameAlignment();
- ASSERT_EQ(alignment % 16, 0);
- int align_mask = (alignment / kWRegSizeInBytes) - 1;
- num_wreg_to_allocate = (num_wreg_to_allocate + align_mask) & ~align_mask;
-
- // Check if we have space on the stack.
- Label stack_limit_hit;
- Label stack_ok;
-
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ Mov(x10, Operand(stack_limit));
- __ Ldr(x10, MemOperand(x10));
- __ Subs(x10, csp, x10);
-
- // Handle it if the stack pointer is already below the stack limit.
- __ B(ls, &stack_limit_hit);
-
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ Cmp(x10, num_wreg_to_allocate * kWRegSizeInBytes);
- __ B(hs, &stack_ok);
-
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ Mov(w0, EXCEPTION);
- __ B(&return_w0);
-
- __ Bind(&stack_limit_hit);
- CallCheckStackGuardState(x10);
- // If returned value is non-zero, we exit with the returned value as result.
- __ Cbnz(w0, &return_w0);
-
- __ Bind(&stack_ok);
-
- // Allocate space on stack.
- __ Claim(num_wreg_to_allocate, kWRegSizeInBytes);
-
- // Initialize success_counter with 0.
- __ Str(wzr, MemOperand(frame_pointer(), kSuccessCounter));
-
- // Find negative length (offset of start relative to end).
- __ Sub(x10, input_start(), input_end());
- if (masm_->emit_debug_code()) {
- // Check that the input string length is < 2^30.
- __ Neg(x11, x10);
- __ Cmp(x11, (1<<30) - 1);
- __ Check(ls, kInputStringTooLong);
- }
- __ Mov(current_input_offset(), w10);
-
- // The non-position value is used as a clearing value for the
- // capture registers, it corresponds to the position of the first character
- // minus one.
- __ Sub(non_position_value(), current_input_offset(), char_size());
- __ Sub(non_position_value(), non_position_value(),
- Operand(start_offset(), LSL, (mode_ == UC16) ? 1 : 0));
- // We can store this value twice in an X register for initializing
- // on-stack registers later.
- __ Orr(twice_non_position_value(),
- non_position_value().X(),
- Operand(non_position_value().X(), LSL, kWRegSize));
-
- // Initialize code pointer register.
- __ Mov(code_pointer(), Operand(masm_->CodeObject()));
-
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ Cbnz(start_offset(), &load_char_start_regexp);
- __ Mov(current_character(), '\n');
- __ B(&start_regexp);
-
- // Global regexp restarts matching here.
- __ Bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ Bind(&start_regexp);
- // Initialize on-stack registers.
- if (num_saved_registers_ > 0) {
- ClearRegisters(0, num_saved_registers_ - 1);
- }
-
- // Initialize backtrack stack pointer.
- __ Ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackBase));
-
- // Execute
- __ B(&start_label_);
-
- if (backtrack_label_.is_linked()) {
- __ Bind(&backtrack_label_);
- Backtrack();
- }
-
- if (success_label_.is_linked()) {
- Register first_capture_start = w15;
-
- // Save captures when successful.
- __ Bind(&success_label_);
-
- if (num_saved_registers_ > 0) {
- // V8 expects the output to be an int32_t array.
- Register capture_start = w12;
- Register capture_end = w13;
- Register input_length = w14;
-
- // Copy captures to output.
-
- // Get string length.
- __ Sub(x10, input_end(), input_start());
- if (masm_->emit_debug_code()) {
- // Check that the input string length is < 2^30.
- __ Cmp(x10, (1<<30) - 1);
- __ Check(ls, kInputStringTooLong);
- }
- // input_start has a start_offset offset on entry. We need to include
- // it when computing the length of the whole string.
- if (mode_ == UC16) {
- __ Add(input_length, start_offset(), Operand(w10, LSR, 1));
- } else {
- __ Add(input_length, start_offset(), w10);
- }
-
- // Copy the results to the output array from the cached registers first.
- for (int i = 0;
- (i < num_saved_registers_) && (i < kNumCachedRegisters);
- i += 2) {
- __ Mov(capture_start.X(), GetCachedRegister(i));
- __ Lsr(capture_end.X(), capture_start.X(), kWRegSize);
- if ((i == 0) && global_with_zero_length_check()) {
- // Keep capture start for the zero-length check later.
- __ Mov(first_capture_start, capture_start);
- }
- // Offsets need to be relative to the start of the string.
- if (mode_ == UC16) {
- __ Add(capture_start, input_length, Operand(capture_start, ASR, 1));
- __ Add(capture_end, input_length, Operand(capture_end, ASR, 1));
- } else {
- __ Add(capture_start, input_length, capture_start);
- __ Add(capture_end, input_length, capture_end);
- }
- // The output pointer advances for a possible global match.
- __ Stp(capture_start,
- capture_end,
- MemOperand(output_array(), kPointerSize, PostIndex));
- }
-
- // Only carry on if there are more than kNumCachedRegisters capture
- // registers.
- int num_registers_left_on_stack =
- num_saved_registers_ - kNumCachedRegisters;
- if (num_registers_left_on_stack > 0) {
- Register base = x10;
- // There are always an even number of capture registers. A couple of
- // registers determine one match with two offsets.
- ASSERT_EQ(0, num_registers_left_on_stack % 2);
- __ Add(base, frame_pointer(), kFirstCaptureOnStack);
-
- // We can unroll the loop here, we should not unroll for less than 2
- // registers.
- STATIC_ASSERT(kNumRegistersToUnroll > 2);
- if (num_registers_left_on_stack <= kNumRegistersToUnroll) {
- for (int i = 0; i < num_registers_left_on_stack / 2; i++) {
- __ Ldp(capture_end,
- capture_start,
- MemOperand(base, -kPointerSize, PostIndex));
- if ((i == 0) && global_with_zero_length_check()) {
- // Keep capture start for the zero-length check later.
- __ Mov(first_capture_start, capture_start);
- }
- // Offsets need to be relative to the start of the string.
- if (mode_ == UC16) {
- __ Add(capture_start,
- input_length,
- Operand(capture_start, ASR, 1));
- __ Add(capture_end, input_length, Operand(capture_end, ASR, 1));
- } else {
- __ Add(capture_start, input_length, capture_start);
- __ Add(capture_end, input_length, capture_end);
- }
- // The output pointer advances for a possible global match.
- __ Stp(capture_start,
- capture_end,
- MemOperand(output_array(), kPointerSize, PostIndex));
- }
- } else {
- Label loop, start;
- __ Mov(x11, num_registers_left_on_stack);
-
- __ Ldp(capture_end,
- capture_start,
- MemOperand(base, -kPointerSize, PostIndex));
- if (global_with_zero_length_check()) {
- __ Mov(first_capture_start, capture_start);
- }
- __ B(&start);
-
- __ Bind(&loop);
- __ Ldp(capture_end,
- capture_start,
- MemOperand(base, -kPointerSize, PostIndex));
- __ Bind(&start);
- if (mode_ == UC16) {
- __ Add(capture_start, input_length, Operand(capture_start, ASR, 1));
- __ Add(capture_end, input_length, Operand(capture_end, ASR, 1));
- } else {
- __ Add(capture_start, input_length, capture_start);
- __ Add(capture_end, input_length, capture_end);
- }
- // The output pointer advances for a possible global match.
- __ Stp(capture_start,
- capture_end,
- MemOperand(output_array(), kPointerSize, PostIndex));
- __ Sub(x11, x11, 2);
- __ Cbnz(x11, &loop);
- }
- }
- }
-
- if (global()) {
- Register success_counter = w0;
- Register output_size = x10;
- // Restart matching if the regular expression is flagged as global.
-
- // Increment success counter.
- __ Ldr(success_counter, MemOperand(frame_pointer(), kSuccessCounter));
- __ Add(success_counter, success_counter, 1);
- __ Str(success_counter, MemOperand(frame_pointer(), kSuccessCounter));
-
- // Capture results have been stored, so the number of remaining global
- // output registers is reduced by the number of stored captures.
- __ Ldr(output_size, MemOperand(frame_pointer(), kOutputSize));
- __ Sub(output_size, output_size, num_saved_registers_);
- // Check whether we have enough room for another set of capture results.
- __ Cmp(output_size, num_saved_registers_);
- __ B(lt, &return_w0);
-
- // The output pointer is already set to the next field in the output
- // array.
- // Update output size on the frame before we restart matching.
- __ Str(output_size, MemOperand(frame_pointer(), kOutputSize));
-
- if (global_with_zero_length_check()) {
- // Special case for zero-length matches.
- __ Cmp(current_input_offset(), first_capture_start);
- // Not a zero-length match, restart.
- __ B(ne, &load_char_start_regexp);
- // Offset from the end is zero if we already reached the end.
- __ Cbz(current_input_offset(), &return_w0);
- // Advance current position after a zero-length match.
- __ Add(current_input_offset(),
- current_input_offset(),
- Operand((mode_ == UC16) ? 2 : 1));
- }
-
- __ B(&load_char_start_regexp);
- } else {
- __ Mov(w0, SUCCESS);
- }
- }
-
- if (exit_label_.is_linked()) {
- // Exit and return w0
- __ Bind(&exit_label_);
- if (global()) {
- __ Ldr(w0, MemOperand(frame_pointer(), kSuccessCounter));
- }
- }
-
- __ Bind(&return_w0);
-
- // Set stack pointer back to first register to retain
- ASSERT(csp.Is(__ StackPointer()));
- __ Mov(csp, fp);
-
- // Restore registers.
- __ PopCPURegList(registers_to_retain);
-
- __ Ret();
-
- Label exit_with_exception;
- // Registers x0 to x7 are used to store the first captures, they need to be
- // retained over calls to C++ code.
- CPURegList cached_registers(CPURegister::kRegister, kXRegSize, 0, 7);
- ASSERT((cached_registers.Count() * 2) == kNumCachedRegisters);
-
- if (check_preempt_label_.is_linked()) {
- __ Bind(&check_preempt_label_);
- SaveLinkRegister();
- // The cached registers need to be retained.
- __ PushCPURegList(cached_registers);
- CallCheckStackGuardState(x10);
- // Returning from the regexp code restores the stack (csp <- fp)
- // so we don't need to drop the link register from it before exiting.
- __ Cbnz(w0, &return_w0);
- // Reset the cached registers.
- __ PopCPURegList(cached_registers);
- RestoreLinkRegister();
- __ Ret();
- }
-
- if (stack_overflow_label_.is_linked()) {
- __ Bind(&stack_overflow_label_);
- SaveLinkRegister();
- // The cached registers need to be retained.
- __ PushCPURegList(cached_registers);
- // Call GrowStack(backtrack_stackpointer(), &stack_base)
- __ Mov(x2, Operand(ExternalReference::isolate_address(isolate())));
- __ Add(x1, frame_pointer(), kStackBase);
- __ Mov(x0, backtrack_stackpointer());
- ExternalReference grow_stack =
- ExternalReference::re_grow_stack(isolate());
- __ CallCFunction(grow_stack, 3);
- // If return NULL, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
- // Returning from the regexp code restores the stack (csp <- fp)
- // so we don't need to drop the link register from it before exiting.
- __ Cbz(w0, &exit_with_exception);
- // Otherwise use return value as new stack pointer.
- __ Mov(backtrack_stackpointer(), x0);
- // Reset the cached registers.
- __ PopCPURegList(cached_registers);
- RestoreLinkRegister();
- __ Ret();
- }
-
- if (exit_with_exception.is_linked()) {
- __ Bind(&exit_with_exception);
- __ Mov(w0, EXCEPTION);
- __ B(&return_w0);
- }
-
- CodeDesc code_desc;
- masm_->GetCode(&code_desc);
- Handle<Code> code = isolate()->factory()->NewCode(
- code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
- PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
- return Handle<HeapObject>::cast(code);
-}
-
-
-void RegExpMacroAssemblerA64::GoTo(Label* to) {
- BranchOrBacktrack(al, to);
-}
-
-void RegExpMacroAssemblerA64::IfRegisterGE(int reg,
- int comparand,
- Label* if_ge) {
- Register to_compare = GetRegister(reg, w10);
- CompareAndBranchOrBacktrack(to_compare, comparand, ge, if_ge);
-}
-
-
-void RegExpMacroAssemblerA64::IfRegisterLT(int reg,
- int comparand,
- Label* if_lt) {
- Register to_compare = GetRegister(reg, w10);
- CompareAndBranchOrBacktrack(to_compare, comparand, lt, if_lt);
-}
-
-
-void RegExpMacroAssemblerA64::IfRegisterEqPos(int reg,
- Label* if_eq) {
- Register to_compare = GetRegister(reg, w10);
- __ Cmp(to_compare, current_input_offset());
- BranchOrBacktrack(eq, if_eq);
-}
-
-RegExpMacroAssembler::IrregexpImplementation
- RegExpMacroAssemblerA64::Implementation() {
- return kA64Implementation;
-}
-
-
-void RegExpMacroAssemblerA64::LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds,
- int characters) {
- // TODO(pielan): Make sure long strings are caught before this, and not
- // just asserted in debug mode.
- ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
- // Be sane! (And ensure that an int32_t can be used to index the string)
- ASSERT(cp_offset < (1<<30));
- if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
- }
- LoadCurrentCharacterUnchecked(cp_offset, characters);
-}
-
-
-void RegExpMacroAssemblerA64::PopCurrentPosition() {
- Pop(current_input_offset());
-}
-
-
-void RegExpMacroAssemblerA64::PopRegister(int register_index) {
- Pop(w10);
- StoreRegister(register_index, w10);
-}
-
-
-void RegExpMacroAssemblerA64::PushBacktrack(Label* label) {
- if (label->is_bound()) {
- int target = label->pos();
- __ Mov(w10, target + Code::kHeaderSize - kHeapObjectTag);
- } else {
- __ Adr(x10, label);
- __ Sub(x10, x10, code_pointer());
- if (masm_->emit_debug_code()) {
- __ Cmp(x10, kWRegMask);
- // The code offset has to fit in a W register.
- __ Check(ls, kOffsetOutOfRange);
- }
- }
- Push(w10);
- CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerA64::PushCurrentPosition() {
- Push(current_input_offset());
-}
-
-
-void RegExpMacroAssemblerA64::PushRegister(int register_index,
- StackCheckFlag check_stack_limit) {
- Register to_push = GetRegister(register_index, w10);
- Push(to_push);
- if (check_stack_limit) CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerA64::ReadCurrentPositionFromRegister(int reg) {
- Register cached_register;
- RegisterState register_state = GetRegisterState(reg);
- switch (register_state) {
- case STACKED:
- __ Ldr(current_input_offset(), register_location(reg));
- break;
- case CACHED_LSW:
- cached_register = GetCachedRegister(reg);
- __ Mov(current_input_offset(), cached_register.W());
- break;
- case CACHED_MSW:
- cached_register = GetCachedRegister(reg);
- __ Lsr(current_input_offset().X(), cached_register, kWRegSize);
- break;
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void RegExpMacroAssemblerA64::ReadStackPointerFromRegister(int reg) {
- Register read_from = GetRegister(reg, w10);
- __ Ldr(x11, MemOperand(frame_pointer(), kStackBase));
- __ Add(backtrack_stackpointer(), x11, Operand(read_from, SXTW));
-}
-
-
-void RegExpMacroAssemblerA64::SetCurrentPositionFromEnd(int by) {
- Label after_position;
- __ Cmp(current_input_offset(), -by * char_size());
- __ B(ge, &after_position);
- __ Mov(current_input_offset(), -by * char_size());
- // On RegExp code entry (where this operation is used), the character before
- // the current position is expected to be already loaded.
- // We have advanced the position, so it's safe to read backwards.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ Bind(&after_position);
-}
-
-
-void RegExpMacroAssemblerA64::SetRegister(int register_index, int to) {
- ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
- Register set_to = wzr;
- if (to != 0) {
- set_to = w10;
- __ Mov(set_to, to);
- }
- StoreRegister(register_index, set_to);
-}
-
-
-bool RegExpMacroAssemblerA64::Succeed() {
- __ B(&success_label_);
- return global();
-}
-
-
-void RegExpMacroAssemblerA64::WriteCurrentPositionToRegister(int reg,
- int cp_offset) {
- Register position = current_input_offset();
- if (cp_offset != 0) {
- position = w10;
- __ Add(position, current_input_offset(), cp_offset * char_size());
- }
- StoreRegister(reg, position);
-}
-
-
-void RegExpMacroAssemblerA64::ClearRegisters(int reg_from, int reg_to) {
- ASSERT(reg_from <= reg_to);
- int num_registers = reg_to - reg_from + 1;
-
- // If the first capture register is cached in a hardware register but not
- // aligned on a 64-bit one, we need to clear the first one specifically.
- if ((reg_from < kNumCachedRegisters) && ((reg_from % 2) != 0)) {
- StoreRegister(reg_from, non_position_value());
- num_registers--;
- reg_from++;
- }
-
- // Clear cached registers in pairs as far as possible.
- while ((num_registers >= 2) && (reg_from < kNumCachedRegisters)) {
- ASSERT(GetRegisterState(reg_from) == CACHED_LSW);
- __ Mov(GetCachedRegister(reg_from), twice_non_position_value());
- reg_from += 2;
- num_registers -= 2;
- }
-
- if ((num_registers % 2) == 1) {
- StoreRegister(reg_from, non_position_value());
- num_registers--;
- reg_from++;
- }
-
- if (num_registers > 0) {
- // If there are some remaining registers, they are stored on the stack.
- ASSERT(reg_from >= kNumCachedRegisters);
-
- // Move down the indexes of the registers on stack to get the correct offset
- // in memory.
- reg_from -= kNumCachedRegisters;
- reg_to -= kNumCachedRegisters;
- // We should not unroll the loop for less than 2 registers.
- STATIC_ASSERT(kNumRegistersToUnroll > 2);
- // We position the base pointer to (reg_from + 1).
- int base_offset = kFirstRegisterOnStack -
- kWRegSizeInBytes - (kWRegSizeInBytes * reg_from);
- if (num_registers > kNumRegistersToUnroll) {
- Register base = x10;
- __ Add(base, frame_pointer(), base_offset);
-
- Label loop;
- __ Mov(x11, num_registers);
- __ Bind(&loop);
- __ Str(twice_non_position_value(),
- MemOperand(base, -kPointerSize, PostIndex));
- __ Sub(x11, x11, 2);
- __ Cbnz(x11, &loop);
- } else {
- for (int i = reg_from; i <= reg_to; i += 2) {
- __ Str(twice_non_position_value(),
- MemOperand(frame_pointer(), base_offset));
- base_offset -= kWRegSizeInBytes * 2;
- }
- }
- }
-}
-
-
-void RegExpMacroAssemblerA64::WriteStackPointerToRegister(int reg) {
- __ Ldr(x10, MemOperand(frame_pointer(), kStackBase));
- __ Sub(x10, backtrack_stackpointer(), x10);
- if (masm_->emit_debug_code()) {
- __ Cmp(x10, Operand(w10, SXTW));
- // The stack offset needs to fit in a W register.
- __ Check(eq, kOffsetOutOfRange);
- }
- StoreRegister(reg, w10);
-}
-
-
-// Helper function for reading a value out of a stack frame.
-template <typename T>
-static T& frame_entry(Address re_frame, int frame_offset) {
- return *reinterpret_cast<T*>(re_frame + frame_offset);
-}
-
-
-int RegExpMacroAssemblerA64::CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame,
- int start_offset,
- const byte** input_start,
- const byte** input_end) {
- Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- if (isolate->stack_guard()->IsStackOverflow()) {
- isolate->StackOverflow();
- return EXCEPTION;
- }
-
- // If not real stack overflow the stack guard was used to interrupt
- // execution for another purpose.
-
- // If this is a direct call from JavaScript retry the RegExp forcing the call
- // through the runtime system. Currently the direct call cannot handle a GC.
- if (frame_entry<int>(re_frame, kDirectCall) == 1) {
- return RETRY;
- }
-
- // Prepare for possible GC.
- HandleScope handles(isolate);
- Handle<Code> code_handle(re_code);
-
- Handle<String> subject(frame_entry<String*>(re_frame, kInput));
-
- // Current string.
- bool is_ascii = subject->IsOneByteRepresentationUnderneath();
-
- ASSERT(re_code->instruction_start() <= *return_address);
- ASSERT(*return_address <=
- re_code->instruction_start() + re_code->instruction_size());
-
- MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
-
- if (*code_handle != re_code) { // Return address no longer valid
- int delta = code_handle->address() - re_code->address();
- // Overwrite the return address on the stack.
- *return_address += delta;
- }
-
- if (result->IsException()) {
- return EXCEPTION;
- }
-
- Handle<String> subject_tmp = subject;
- int slice_offset = 0;
-
- // Extract the underlying string and the slice offset.
- if (StringShape(*subject_tmp).IsCons()) {
- subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
- } else if (StringShape(*subject_tmp).IsSliced()) {
- SlicedString* slice = SlicedString::cast(*subject_tmp);
- subject_tmp = Handle<String>(slice->parent());
- slice_offset = slice->offset();
- }
-
- // String might have changed.
- if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
- // If we changed between an ASCII and an UC16 string, the specialized
- // code cannot be used, and we need to restart regexp matching from
- // scratch (including, potentially, compiling a new version of the code).
- return RETRY;
- }
-
- // Otherwise, the content of the string might have moved. It must still
- // be a sequential or external string with the same content.
- // Update the start and end pointers in the stack frame to the current
- // location (whether it has actually moved or not).
- ASSERT(StringShape(*subject_tmp).IsSequential() ||
- StringShape(*subject_tmp).IsExternal());
-
- // The original start address of the characters to match.
- const byte* start_address = *input_start;
-
- // Find the current start address of the same character at the current string
- // position.
- const byte* new_address = StringCharacterPosition(*subject_tmp,
- start_offset + slice_offset);
-
- if (start_address != new_address) {
- // If there is a difference, update the object pointer and start and end
- // addresses in the RegExp stack frame to match the new value.
- const byte* end_address = *input_end;
- int byte_length = static_cast<int>(end_address - start_address);
- frame_entry<const String*>(re_frame, kInput) = *subject;
- *input_start = new_address;
- *input_end = new_address + byte_length;
- } else if (frame_entry<const String*>(re_frame, kInput) != *subject) {
- // Subject string might have been a ConsString that underwent
- // short-circuiting during GC. That will not change start_address but
- // will change pointer inside the subject handle.
- frame_entry<const String*>(re_frame, kInput) = *subject;
- }
-
- return 0;
-}
-
-
-void RegExpMacroAssemblerA64::CheckPosition(int cp_offset,
- Label* on_outside_input) {
- CompareAndBranchOrBacktrack(current_input_offset(),
- -cp_offset * char_size(),
- ge,
- on_outside_input);
-}
-
-
-bool RegExpMacroAssemblerA64::CanReadUnaligned() {
- // TODO(pielan): See whether or not we should disable unaligned accesses.
- return !slow_safe();
-}
-
-
-// Private methods:
-
-void RegExpMacroAssemblerA64::CallCheckStackGuardState(Register scratch) {
- // Allocate space on the stack to store the return address. The
- // CheckStackGuardState C++ function will override it if the code
- // moved. Allocate extra space for 2 arguments passed by pointers.
- // AAPCS64 requires the stack to be 16 byte aligned.
- int alignment = masm_->ActivationFrameAlignment();
- ASSERT_EQ(alignment % 16, 0);
- int align_mask = (alignment / kXRegSizeInBytes) - 1;
- int xreg_to_claim = (3 + align_mask) & ~align_mask;
-
- ASSERT(csp.Is(__ StackPointer()));
- __ Claim(xreg_to_claim);
-
- // CheckStackGuardState needs the end and start addresses of the input string.
- __ Poke(input_end(), 2 * kPointerSize);
- __ Add(x5, csp, 2 * kPointerSize);
- __ Poke(input_start(), kPointerSize);
- __ Add(x4, csp, kPointerSize);
-
- __ Mov(w3, start_offset());
- // RegExp code frame pointer.
- __ Mov(x2, frame_pointer());
- // Code* of self.
- __ Mov(x1, Operand(masm_->CodeObject()));
-
- // We need to pass a pointer to the return address as first argument.
- // The DirectCEntry stub will place the return address on the stack before
- // calling so the stack pointer will point to it.
- __ Mov(x0, csp);
-
- ExternalReference check_stack_guard_state =
- ExternalReference::re_check_stack_guard_state(isolate());
- __ Mov(scratch, Operand(check_stack_guard_state));
- DirectCEntryStub stub;
- stub.GenerateCall(masm_, scratch);
-
- // The input string may have been moved in memory, we need to reload it.
- __ Peek(input_start(), kPointerSize);
- __ Peek(input_end(), 2 * kPointerSize);
-
- ASSERT(csp.Is(__ StackPointer()));
- __ Drop(xreg_to_claim);
-
- // Reload the Code pointer.
- __ Mov(code_pointer(), Operand(masm_->CodeObject()));
-}
-
-void RegExpMacroAssemblerA64::BranchOrBacktrack(Condition condition,
- Label* to) {
- if (condition == al) { // Unconditional.
- if (to == NULL) {
- Backtrack();
- return;
- }
- __ B(to);
- return;
- }
- if (to == NULL) {
- to = &backtrack_label_;
- }
- // TODO(ulan): do direct jump when jump distance is known and fits in imm19.
- Condition inverted_condition = InvertCondition(condition);
- Label no_branch;
- __ B(inverted_condition, &no_branch);
- __ B(to);
- __ Bind(&no_branch);
-}
-
-void RegExpMacroAssemblerA64::CompareAndBranchOrBacktrack(Register reg,
- int immediate,
- Condition condition,
- Label* to) {
- if ((immediate == 0) && ((condition == eq) || (condition == ne))) {
- if (to == NULL) {
- to = &backtrack_label_;
- }
- // TODO(ulan): do direct jump when jump distance is known and fits in imm19.
- Label no_branch;
- if (condition == eq) {
- __ Cbnz(reg, &no_branch);
- } else {
- __ Cbz(reg, &no_branch);
- }
- __ B(to);
- __ Bind(&no_branch);
- } else {
- __ Cmp(reg, immediate);
- BranchOrBacktrack(condition, to);
- }
-}
-
-
-void RegExpMacroAssemblerA64::CheckPreemption() {
- // Check for preemption.
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ Mov(x10, Operand(stack_limit));
- __ Ldr(x10, MemOperand(x10));
- ASSERT(csp.Is(__ StackPointer()));
- __ Cmp(csp, x10);
- CallIf(&check_preempt_label_, ls);
-}
-
-
-void RegExpMacroAssemblerA64::CheckStackLimit() {
- ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit(isolate());
- __ Mov(x10, Operand(stack_limit));
- __ Ldr(x10, MemOperand(x10));
- __ Cmp(backtrack_stackpointer(), x10);
- CallIf(&stack_overflow_label_, ls);
-}
-
-
-void RegExpMacroAssemblerA64::Push(Register source) {
- ASSERT(source.Is32Bits());
- ASSERT(!source.is(backtrack_stackpointer()));
- __ Str(source,
- MemOperand(backtrack_stackpointer(),
- -static_cast<int>(kWRegSizeInBytes),
- PreIndex));
-}
-
-
-void RegExpMacroAssemblerA64::Pop(Register target) {
- ASSERT(target.Is32Bits());
- ASSERT(!target.is(backtrack_stackpointer()));
- __ Ldr(target,
- MemOperand(backtrack_stackpointer(), kWRegSizeInBytes, PostIndex));
-}
-
-
-Register RegExpMacroAssemblerA64::GetCachedRegister(int register_index) {
- ASSERT(register_index < kNumCachedRegisters);
- return Register::Create(register_index / 2, kXRegSize);
-}
-
-
-Register RegExpMacroAssemblerA64::GetRegister(int register_index,
- Register maybe_result) {
- ASSERT(maybe_result.Is32Bits());
- ASSERT(register_index >= 0);
- if (num_registers_ <= register_index) {
- num_registers_ = register_index + 1;
- }
- Register result;
- RegisterState register_state = GetRegisterState(register_index);
- switch (register_state) {
- case STACKED:
- __ Ldr(maybe_result, register_location(register_index));
- result = maybe_result;
- break;
- case CACHED_LSW:
- result = GetCachedRegister(register_index).W();
- break;
- case CACHED_MSW:
- __ Lsr(maybe_result.X(), GetCachedRegister(register_index), kWRegSize);
- result = maybe_result;
- break;
- default:
- UNREACHABLE();
- break;
- }
- ASSERT(result.Is32Bits());
- return result;
-}
-
-
-void RegExpMacroAssemblerA64::StoreRegister(int register_index,
- Register source) {
- ASSERT(source.Is32Bits());
- ASSERT(register_index >= 0);
- if (num_registers_ <= register_index) {
- num_registers_ = register_index + 1;
- }
-
- Register cached_register;
- RegisterState register_state = GetRegisterState(register_index);
- switch (register_state) {
- case STACKED:
- __ Str(source, register_location(register_index));
- break;
- case CACHED_LSW:
- cached_register = GetCachedRegister(register_index);
- if (!source.Is(cached_register.W())) {
- __ Bfi(cached_register, source.X(), 0, kWRegSize);
- }
- break;
- case CACHED_MSW:
- cached_register = GetCachedRegister(register_index);
- __ Bfi(cached_register, source.X(), kWRegSize, kWRegSize);
- break;
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void RegExpMacroAssemblerA64::CallIf(Label* to, Condition condition) {
- Label skip_call;
- if (condition != al) __ B(&skip_call, InvertCondition(condition));
- __ Bl(to);
- __ Bind(&skip_call);
-}
-
-
-void RegExpMacroAssemblerA64::RestoreLinkRegister() {
- ASSERT(csp.Is(__ StackPointer()));
- __ Pop(lr, xzr);
- __ Add(lr, lr, Operand(masm_->CodeObject()));
-}
-
-
-void RegExpMacroAssemblerA64::SaveLinkRegister() {
- ASSERT(csp.Is(__ StackPointer()));
- __ Sub(lr, lr, Operand(masm_->CodeObject()));
- __ Push(xzr, lr);
-}
-
-
-MemOperand RegExpMacroAssemblerA64::register_location(int register_index) {
- ASSERT(register_index < (1<<30));
- ASSERT(register_index >= kNumCachedRegisters);
- if (num_registers_ <= register_index) {
- num_registers_ = register_index + 1;
- }
- register_index -= kNumCachedRegisters;
- int offset = kFirstRegisterOnStack - register_index * kWRegSizeInBytes;
- return MemOperand(frame_pointer(), offset);
-}
-
-MemOperand RegExpMacroAssemblerA64::capture_location(int register_index,
- Register scratch) {
- ASSERT(register_index < (1<<30));
- ASSERT(register_index < num_saved_registers_);
- ASSERT(register_index >= kNumCachedRegisters);
- ASSERT_EQ(register_index % 2, 0);
- register_index -= kNumCachedRegisters;
- int offset = kFirstCaptureOnStack - register_index * kWRegSizeInBytes;
- // capture_location is used with Stp instructions to load/store 2 registers.
- // The immediate field in the encoding is limited to 7 bits (signed).
- if (is_int7(offset)) {
- return MemOperand(frame_pointer(), offset);
- } else {
- __ Add(scratch, frame_pointer(), offset);
- return MemOperand(scratch);
- }
-}
-
-void RegExpMacroAssemblerA64::LoadCurrentCharacterUnchecked(int cp_offset,
- int characters) {
- Register offset = current_input_offset();
-
- // The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU
- // and the operating system running on the target allow it.
- // If unaligned load/stores are not supported then this function must only
- // be used to load a single character at a time.
-
- // ARMv8 supports unaligned accesses but V8 or the kernel can decide to
- // disable it.
- // TODO(pielan): See whether or not we should disable unaligned accesses.
- if (!CanReadUnaligned()) {
- ASSERT(characters == 1);
- }
-
- if (cp_offset != 0) {
- if (masm_->emit_debug_code()) {
- __ Mov(x10, cp_offset * char_size());
- __ Add(x10, x10, Operand(current_input_offset(), SXTW));
- __ Cmp(x10, Operand(w10, SXTW));
- // The offset needs to fit in a W register.
- __ Check(eq, kOffsetOutOfRange);
- } else {
- __ Add(w10, current_input_offset(), cp_offset * char_size());
- }
- offset = w10;
- }
-
- if (mode_ == ASCII) {
- if (characters == 4) {
- __ Ldr(current_character(), MemOperand(input_end(), offset, SXTW));
- } else if (characters == 2) {
- __ Ldrh(current_character(), MemOperand(input_end(), offset, SXTW));
- } else {
- ASSERT(characters == 1);
- __ Ldrb(current_character(), MemOperand(input_end(), offset, SXTW));
- }
- } else {
- ASSERT(mode_ == UC16);
- if (characters == 2) {
- __ Ldr(current_character(), MemOperand(input_end(), offset, SXTW));
- } else {
- ASSERT(characters == 1);
- __ Ldrh(current_character(), MemOperand(input_end(), offset, SXTW));
- }
- }
-}
-
-#endif // V8_INTERPRETED_REGEXP
-
-}} // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/regexp-macro-assembler-a64.h b/deps/v8/src/a64/regexp-macro-assembler-a64.h
deleted file mode 100644
index 0f6b44b9fe..0000000000
--- a/deps/v8/src/a64/regexp-macro-assembler-a64.h
+++ /dev/null
@@ -1,315 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_A64_REGEXP_MACRO_ASSEMBLER_A64_H_
-#define V8_A64_REGEXP_MACRO_ASSEMBLER_A64_H_
-
-#include "a64/assembler-a64.h"
-#include "a64/assembler-a64-inl.h"
-#include "macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-
-#ifndef V8_INTERPRETED_REGEXP
-class RegExpMacroAssemblerA64: public NativeRegExpMacroAssembler {
- public:
- RegExpMacroAssemblerA64(Mode mode, int registers_to_save, Zone* zone);
- virtual ~RegExpMacroAssemblerA64();
- virtual int stack_limit_slack();
- virtual void AdvanceCurrentPosition(int by);
- virtual void AdvanceRegister(int reg, int by);
- virtual void Backtrack();
- virtual void Bind(Label* label);
- virtual void CheckAtStart(Label* on_at_start);
- virtual void CheckCharacter(unsigned c, Label* on_equal);
- virtual void CheckCharacterAfterAnd(unsigned c,
- unsigned mask,
- Label* on_equal);
- virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
- virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
- // A "greedy loop" is a loop that is both greedy and with a simple
- // body. It has a particularly simple implementation.
- virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
- virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- Label* on_no_match);
- virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
- virtual void CheckNotCharacterAfterAnd(unsigned c,
- unsigned mask,
- Label* on_not_equal);
- virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal);
- virtual void CheckCharacterInRange(uc16 from,
- uc16 to,
- Label* on_in_range);
- virtual void CheckCharacterNotInRange(uc16 from,
- uc16 to,
- Label* on_not_in_range);
- virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
-
- // Checks whether the given offset from the current position is before
- // the end of the string.
- virtual void CheckPosition(int cp_offset, Label* on_outside_input);
- virtual bool CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match);
- virtual void Fail();
- virtual Handle<HeapObject> GetCode(Handle<String> source);
- virtual void GoTo(Label* label);
- virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
- virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
- virtual void IfRegisterEqPos(int reg, Label* if_eq);
- virtual IrregexpImplementation Implementation();
- virtual void LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds = true,
- int characters = 1);
- virtual void PopCurrentPosition();
- virtual void PopRegister(int register_index);
- virtual void PushBacktrack(Label* label);
- virtual void PushCurrentPosition();
- virtual void PushRegister(int register_index,
- StackCheckFlag check_stack_limit);
- virtual void ReadCurrentPositionFromRegister(int reg);
- virtual void ReadStackPointerFromRegister(int reg);
- virtual void SetCurrentPositionFromEnd(int by);
- virtual void SetRegister(int register_index, int to);
- virtual bool Succeed();
- virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
- virtual void ClearRegisters(int reg_from, int reg_to);
- virtual void WriteStackPointerToRegister(int reg);
- virtual bool CanReadUnaligned();
-
- // Called from RegExp if the stack-guard is triggered.
- // If the code object is relocated, the return address is fixed before
- // returning.
- static int CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame,
- int start_offset,
- const byte** input_start,
- const byte** input_end);
-
- private:
- // Above the frame pointer - Stored registers and stack passed parameters.
- // Callee-saved registers x19-x29, where x29 is the old frame pointer.
- static const int kCalleeSavedRegisters = 0;
- // Return address.
- // It is placed above the 11 callee-saved registers.
- static const int kReturnAddress = kCalleeSavedRegisters + 11 * kPointerSize;
- static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
- // Stack parameter placed by caller.
- static const int kIsolate = kSecondaryReturnAddress + kPointerSize;
-
- // Below the frame pointer.
- // Register parameters stored by setup code.
- static const int kDirectCall = kCalleeSavedRegisters - kPointerSize;
- static const int kStackBase = kDirectCall - kPointerSize;
- static const int kOutputSize = kStackBase - kPointerSize;
- static const int kInput = kOutputSize - kPointerSize;
- // When adding local variables remember to push space for them in
- // the frame in GetCode.
- static const int kSuccessCounter = kInput - kPointerSize;
- // First position register address on the stack. Following positions are
- // below it. A position is a 32 bit value.
- static const int kFirstRegisterOnStack = kSuccessCounter - kWRegSizeInBytes;
- // A capture is a 64 bit value holding two position.
- static const int kFirstCaptureOnStack = kSuccessCounter - kXRegSizeInBytes;
-
- // Initial size of code buffer.
- static const size_t kRegExpCodeSize = 1024;
-
- // When initializing registers to a non-position value we can unroll
- // the loop. Set the limit of registers to unroll.
- static const int kNumRegistersToUnroll = 16;
-
- // We are using x0 to x7 as a register cache. Each hardware register must
- // contain one capture, that is two 32 bit registers. We can cache at most
- // 16 registers.
- static const int kNumCachedRegisters = 16;
-
- // Load a number of characters at the given offset from the
- // current position, into the current-character register.
- void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
-
- // Check whether preemption has been requested.
- void CheckPreemption();
-
- // Check whether we are exceeding the stack limit on the backtrack stack.
- void CheckStackLimit();
-
- // Generate a call to CheckStackGuardState.
- void CallCheckStackGuardState(Register scratch);
-
- // Location of a 32 bit position register.
- MemOperand register_location(int register_index);
-
- // Location of a 64 bit capture, combining two position registers.
- MemOperand capture_location(int register_index, Register scratch);
-
- // Register holding the current input position as negative offset from
- // the end of the string.
- Register current_input_offset() { return w21; }
-
- // The register containing the current character after LoadCurrentCharacter.
- Register current_character() { return w22; }
-
- // Register holding address of the end of the input string.
- Register input_end() { return x25; }
-
- // Register holding address of the start of the input string.
- Register input_start() { return x26; }
-
- // Register holding the offset from the start of the string where we should
- // start matching.
- Register start_offset() { return w27; }
-
- // Pointer to the output array's first element.
- Register output_array() { return x28; }
-
- // Register holding the frame address. Local variables, parameters and
- // regexp registers are addressed relative to this.
- Register frame_pointer() { return fp; }
-
- // The register containing the backtrack stack top. Provides a meaningful
- // name to the register.
- Register backtrack_stackpointer() { return x23; }
-
- // Register holding pointer to the current code object.
- Register code_pointer() { return x20; }
-
- // Register holding the value used for clearing capture registers.
- Register non_position_value() { return w24; }
- // The top 32 bit of this register is used to store this value
- // twice. This is used for clearing more than one register at a time.
- Register twice_non_position_value() { return x24; }
-
- // Byte size of chars in the string to match (decided by the Mode argument)
- int char_size() { return static_cast<int>(mode_); }
-
- // Equivalent to a conditional branch to the label, unless the label
- // is NULL, in which case it is a conditional Backtrack.
- void BranchOrBacktrack(Condition condition, Label* to);
-
- // Compares reg against immmediate before calling BranchOrBacktrack.
- // It makes use of the Cbz and Cbnz instructions.
- void CompareAndBranchOrBacktrack(Register reg,
- int immediate,
- Condition condition,
- Label* to);
-
- inline void CallIf(Label* to, Condition condition);
-
- // Save and restore the link register on the stack in a way that
- // is GC-safe.
- inline void SaveLinkRegister();
- inline void RestoreLinkRegister();
-
- // Pushes the value of a register on the backtrack stack. Decrements the
- // stack pointer by a word size and stores the register's value there.
- inline void Push(Register source);
-
- // Pops a value from the backtrack stack. Reads the word at the stack pointer
- // and increments it by a word size.
- inline void Pop(Register target);
-
- // This state indicates where the register actually is.
- enum RegisterState {
- STACKED, // Resides in memory.
- CACHED_LSW, // Least Significant Word of a 64 bit hardware register.
- CACHED_MSW // Most Significant Word of a 64 bit hardware register.
- };
-
- RegisterState GetRegisterState(int register_index) {
- ASSERT(register_index >= 0);
- if (register_index >= kNumCachedRegisters) {
- return STACKED;
- } else {
- if ((register_index % 2) == 0) {
- return CACHED_LSW;
- } else {
- return CACHED_MSW;
- }
- }
- }
-
- // Store helper that takes the state of the register into account.
- inline void StoreRegister(int register_index, Register source);
-
- // Returns a hardware W register that holds the value of the capture
- // register.
- //
- // This function will try to use an existing cache register (w0-w7) for the
- // result. Otherwise, it will load the value into maybe_result.
- //
- // If the returned register is anything other than maybe_result, calling code
- // must not write to it.
- inline Register GetRegister(int register_index, Register maybe_result);
-
- // Returns the harware register (x0-x7) holding the value of the capture
- // register.
- // This assumes that the state of the register is not STACKED.
- inline Register GetCachedRegister(int register_index);
-
- Isolate* isolate() const { return masm_->isolate(); }
-
- MacroAssembler* masm_;
-
- // Which mode to generate code for (ASCII or UC16).
- Mode mode_;
-
- // One greater than maximal register index actually used.
- int num_registers_;
-
- // Number of registers to output at the end (the saved registers
- // are always 0..num_saved_registers_-1)
- int num_saved_registers_;
-
- // Labels used internally.
- Label entry_label_;
- Label start_label_;
- Label success_label_;
- Label backtrack_label_;
- Label exit_label_;
- Label check_preempt_label_;
- Label stack_overflow_label_;
-};
-
-#endif // V8_INTERPRETED_REGEXP
-
-
-}} // namespace v8::internal
-
-#endif // V8_A64_REGEXP_MACRO_ASSEMBLER_A64_H_
diff --git a/deps/v8/src/a64/simulator-a64.cc b/deps/v8/src/a64/simulator-a64.cc
deleted file mode 100644
index 014b71477d..0000000000
--- a/deps/v8/src/a64/simulator-a64.cc
+++ /dev/null
@@ -1,3414 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-#include <cmath>
-#include <cstdarg>
-#include "v8.h"
-
-#if V8_TARGET_ARCH_A64
-
-#include "disasm.h"
-#include "assembler.h"
-#include "a64/simulator-a64.h"
-#include "macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-#if defined(USE_SIMULATOR)
-
-
-// This macro provides a platform independent use of sscanf. The reason for
-// SScanF not being implemented in a platform independent way through
-// ::v8::internal::OS in the same way as SNPrintF is that the
-// Windows C Run-Time Library does not provide vsscanf.
-#define SScanF sscanf // NOLINT
-
-
-// This is basically the same as PrintF, with a guard for FLAG_trace_sim.
-void PRINTF_CHECKING TraceSim(const char* format, ...) {
- if (FLAG_trace_sim) {
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
- }
-}
-
-
-const Instruction* Simulator::kEndOfSimAddress = NULL;
-
-
-void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) {
- int width = msb - lsb + 1;
- ASSERT(is_uintn(bits, width) || is_intn(bits, width));
-
- bits <<= lsb;
- uint32_t mask = ((1 << width) - 1) << lsb;
- ASSERT((mask & write_ignore_mask_) == 0);
-
- value_ = (value_ & ~mask) | (bits & mask);
-}
-
-
-SimSystemRegister SimSystemRegister::DefaultValueFor(SystemRegister id) {
- switch (id) {
- case NZCV:
- return SimSystemRegister(0x00000000, NZCVWriteIgnoreMask);
- case FPCR:
- return SimSystemRegister(0x00000000, FPCRWriteIgnoreMask);
- default:
- UNREACHABLE();
- return SimSystemRegister();
- }
-}
-
-
-void Simulator::Initialize(Isolate* isolate) {
- if (isolate->simulator_initialized()) return;
- isolate->set_simulator_initialized(true);
- ExternalReference::set_redirector(isolate, &RedirectExternalReference);
-}
-
-
-// Get the active Simulator for the current thread.
-Simulator* Simulator::current(Isolate* isolate) {
- Isolate::PerIsolateThreadData* isolate_data =
- isolate->FindOrAllocatePerThreadDataForThisThread();
- ASSERT(isolate_data != NULL);
-
- Simulator* sim = isolate_data->simulator();
- if (sim == NULL) {
- // TODO(146): delete the simulator object when a thread/isolate goes away.
- sim = new Simulator(new Decoder(), isolate);
- isolate_data->set_simulator(sim);
- }
- return sim;
-}
-
-
-void Simulator::CallVoid(byte* entry, CallArgument* args) {
- int index_x = 0;
- int index_d = 0;
-
- std::vector<int64_t> stack_args(0);
- for (int i = 0; !args[i].IsEnd(); i++) {
- CallArgument arg = args[i];
- if (arg.IsX() && (index_x < 8)) {
- set_xreg(index_x++, arg.bits());
- } else if (arg.IsD() && (index_d < 8)) {
- set_dreg_bits(index_d++, arg.bits());
- } else {
- ASSERT(arg.IsD() || arg.IsX());
- stack_args.push_back(arg.bits());
- }
- }
-
- // Process stack arguments, and make sure the stack is suitably aligned.
- uintptr_t original_stack = sp();
- uintptr_t entry_stack = original_stack -
- stack_args.size() * sizeof(stack_args[0]);
- if (OS::ActivationFrameAlignment() != 0) {
- entry_stack &= -OS::ActivationFrameAlignment();
- }
- char * stack = reinterpret_cast<char*>(entry_stack);
- std::vector<int64_t>::const_iterator it;
- for (it = stack_args.begin(); it != stack_args.end(); it++) {
- memcpy(stack, &(*it), sizeof(*it));
- stack += sizeof(*it);
- }
-
- ASSERT(reinterpret_cast<uintptr_t>(stack) <= original_stack);
- set_sp(entry_stack);
-
- // Call the generated code.
- set_pc(entry);
- set_lr(kEndOfSimAddress);
- CheckPCSComplianceAndRun();
-
- set_sp(original_stack);
-}
-
-
-int64_t Simulator::CallInt64(byte* entry, CallArgument* args) {
- CallVoid(entry, args);
- return xreg(0);
-}
-
-
-double Simulator::CallDouble(byte* entry, CallArgument* args) {
- CallVoid(entry, args);
- return dreg(0);
-}
-
-
-int64_t Simulator::CallJS(byte* entry,
- byte* function_entry,
- JSFunction* func,
- Object* revc,
- int64_t argc,
- Object*** argv) {
- CallArgument args[] = {
- CallArgument(function_entry),
- CallArgument(func),
- CallArgument(revc),
- CallArgument(argc),
- CallArgument(argv),
- CallArgument::End()
- };
- return CallInt64(entry, args);
-}
-
-int64_t Simulator::CallRegExp(byte* entry,
- String* input,
- int64_t start_offset,
- const byte* input_start,
- const byte* input_end,
- int* output,
- int64_t output_size,
- Address stack_base,
- int64_t direct_call,
- void* return_address,
- Isolate* isolate) {
- CallArgument args[] = {
- CallArgument(input),
- CallArgument(start_offset),
- CallArgument(input_start),
- CallArgument(input_end),
- CallArgument(output),
- CallArgument(output_size),
- CallArgument(stack_base),
- CallArgument(direct_call),
- CallArgument(return_address),
- CallArgument(isolate),
- CallArgument::End()
- };
- return CallInt64(entry, args);
-}
-
-
-void Simulator::CheckPCSComplianceAndRun() {
-#ifdef DEBUG
- CHECK_EQ(kNumberOfCalleeSavedRegisters, kCalleeSaved.Count());
- CHECK_EQ(kNumberOfCalleeSavedFPRegisters, kCalleeSavedFP.Count());
-
- int64_t saved_registers[kNumberOfCalleeSavedRegisters];
- uint64_t saved_fpregisters[kNumberOfCalleeSavedFPRegisters];
-
- CPURegList register_list = kCalleeSaved;
- CPURegList fpregister_list = kCalleeSavedFP;
-
- for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) {
- // x31 is not a caller saved register, so no need to specify if we want
- // the stack or zero.
- saved_registers[i] = xreg(register_list.PopLowestIndex().code());
- }
- for (int i = 0; i < kNumberOfCalleeSavedFPRegisters; i++) {
- saved_fpregisters[i] =
- dreg_bits(fpregister_list.PopLowestIndex().code());
- }
- int64_t original_stack = sp();
-#endif
- // Start the simulation!
- Run();
-#ifdef DEBUG
- CHECK_EQ(original_stack, sp());
- // Check that callee-saved registers have been preserved.
- register_list = kCalleeSaved;
- fpregister_list = kCalleeSavedFP;
- for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) {
- CHECK_EQ(saved_registers[i], xreg(register_list.PopLowestIndex().code()));
- }
- for (int i = 0; i < kNumberOfCalleeSavedFPRegisters; i++) {
- ASSERT(saved_fpregisters[i] ==
- dreg_bits(fpregister_list.PopLowestIndex().code()));
- }
-
- // Corrupt caller saved register minus the return regiters.
-
- // In theory x0 to x7 can be used for return values, but V8 only uses x0, x1
- // for now .
- register_list = kCallerSaved;
- register_list.Remove(x0);
- register_list.Remove(x1);
-
- // In theory d0 to d7 can be used for return values, but V8 only uses d0
- // for now .
- fpregister_list = kCallerSavedFP;
- fpregister_list.Remove(d0);
-
- CorruptRegisters(&register_list, kCallerSavedRegisterCorruptionValue);
- CorruptRegisters(&fpregister_list, kCallerSavedFPRegisterCorruptionValue);
-#endif
-}
-
-
-#ifdef DEBUG
-// The least significant byte of the curruption value holds the corresponding
-// register's code.
-void Simulator::CorruptRegisters(CPURegList* list, uint64_t value) {
- if (list->type() == CPURegister::kRegister) {
- while (!list->IsEmpty()) {
- unsigned code = list->PopLowestIndex().code();
- set_xreg(code, value | code);
- }
- } else {
- ASSERT(list->type() == CPURegister::kFPRegister);
- while (!list->IsEmpty()) {
- unsigned code = list->PopLowestIndex().code();
- set_dreg_bits(code, value | code);
- }
- }
-}
-
-
-void Simulator::CorruptAllCallerSavedCPURegisters() {
- // Corrupt alters its parameter so copy them first.
- CPURegList register_list = kCallerSaved;
- CPURegList fpregister_list = kCallerSavedFP;
-
- CorruptRegisters(&register_list, kCallerSavedRegisterCorruptionValue);
- CorruptRegisters(&fpregister_list, kCallerSavedFPRegisterCorruptionValue);
-}
-#endif
-
-
-// Extending the stack by 2 * 64 bits is required for stack alignment purposes.
-// TODO(all): Insert a marker in the extra space allocated on the stack.
-uintptr_t Simulator::PushAddress(uintptr_t address) {
- ASSERT(sizeof(uintptr_t) < 2 * kXRegSizeInBytes);
- intptr_t new_sp = sp() - 2 * kXRegSizeInBytes;
- uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
- *stack_slot = address;
- set_sp(new_sp);
- return new_sp;
-}
-
-
-uintptr_t Simulator::PopAddress() {
- intptr_t current_sp = sp();
- uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
- uintptr_t address = *stack_slot;
- ASSERT(sizeof(uintptr_t) < 2 * kXRegSizeInBytes);
- set_sp(current_sp + 2 * kXRegSizeInBytes);
- return address;
-}
-
-
-// Returns the limit of the stack area to enable checking for stack overflows.
-uintptr_t Simulator::StackLimit() const {
- // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
- // pushing values.
- // TODO(all): Increase the stack limit protection.
-
- // The margin was decreased to 256 bytes, because we are intensively using
- // the stack. The stack usage should decrease when our code improves. Then
- // we can set it to 1024 again.
- return reinterpret_cast<uintptr_t>(stack_limit_) + 256;
-}
-
-
-Simulator::Simulator(Decoder* decoder, Isolate* isolate, FILE* stream)
- : decoder_(decoder), last_debugger_input_(NULL), log_parameters_(NO_PARAM),
- isolate_(isolate) {
- // Setup the decoder.
- decoder_->AppendVisitor(this);
-
- ResetState();
-
- // Allocate and setup the simulator stack.
- stack_size_ = (FLAG_sim_stack_size * KB) + (2 * stack_protection_size_);
- stack_ = new byte[stack_size_];
- stack_limit_ = stack_ + stack_protection_size_;
- byte* tos = stack_ + stack_size_ - stack_protection_size_;
- // The stack pointer must be 16 bytes aligned.
- set_sp(reinterpret_cast<int64_t>(tos) & ~0xfUL);
-
- stream_ = stream;
- print_disasm_ = new PrintDisassembler(stream_);
-
- if (FLAG_trace_sim) {
- decoder_->InsertVisitorBefore(print_disasm_, this);
- log_parameters_ = LOG_ALL;
- }
-
- // The debugger needs to disassemble code without the simulator executing an
- // instruction, so we create a dedicated decoder.
- disassembler_decoder_ = new Decoder();
- disassembler_decoder_->AppendVisitor(print_disasm_);
-
- if (FLAG_log_instruction_stats) {
- instrument_ = new Instrument(FLAG_log_instruction_file,
- FLAG_log_instruction_period);
- decoder_->AppendVisitor(instrument_);
- }
-}
-
-
-void Simulator::ResetState() {
- // Reset the system registers.
- nzcv_ = SimSystemRegister::DefaultValueFor(NZCV);
- fpcr_ = SimSystemRegister::DefaultValueFor(FPCR);
-
- // Reset registers to 0.
- pc_ = NULL;
- for (unsigned i = 0; i < kNumberOfRegisters; i++) {
- set_xreg(i, 0xbadbeef);
- }
- for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
- // Set FP registers to a value that is NaN in both 32-bit and 64-bit FP.
- set_dreg_bits(i, 0x7ff000007f800001UL);
- }
- // Returning to address 0 exits the Simulator.
- set_lr(kEndOfSimAddress);
-
- // Reset debug helpers.
- breakpoints_.empty();
- break_on_next_= false;
-}
-
-
-Simulator::~Simulator() {
- delete[] stack_;
- if (FLAG_log_instruction_stats) {
- delete instrument_;
- }
- delete disassembler_decoder_;
- delete print_disasm_;
- DeleteArray(last_debugger_input_);
-}
-
-
-void Simulator::Run() {
- pc_modified_ = false;
- while (pc_ != kEndOfSimAddress) {
- ExecuteInstruction();
- }
-}
-
-
-void Simulator::RunFrom(Instruction* start) {
- set_pc(start);
- Run();
-}
-
-
-void Simulator::CheckStackAlignment() {
- // TODO(aleram): The sp alignment check to perform depends on the processor
- // state. Check the specifications for more details.
-}
-
-
-// When the generated code calls an external reference we need to catch that in
-// the simulator. The external reference will be a function compiled for the
-// host architecture. We need to call that function instead of trying to
-// execute it with the simulator. We do that by redirecting the external
-// reference to a svc (Supervisor Call) instruction that is handled by
-// the simulator. We write the original destination of the jump just at a known
-// offset from the svc instruction so the simulator knows what to call.
-class Redirection {
- public:
- Redirection(void* external_function, ExternalReference::Type type)
- : external_function_(external_function),
- type_(type),
- next_(NULL) {
- redirect_call_.SetInstructionBits(
- HLT | Assembler::ImmException(kImmExceptionIsRedirectedCall));
- Isolate* isolate = Isolate::Current();
- next_ = isolate->simulator_redirection();
- // TODO(all): Simulator flush I cache
- isolate->set_simulator_redirection(this);
- }
-
- void* address_of_redirect_call() {
- return reinterpret_cast<void*>(&redirect_call_);
- }
-
- void* external_function() { return external_function_; }
- ExternalReference::Type type() { return type_; }
-
- static Redirection* Get(void* external_function,
- ExternalReference::Type type) {
- Isolate* isolate = Isolate::Current();
- Redirection* current = isolate->simulator_redirection();
- for (; current != NULL; current = current->next_) {
- if (current->external_function_ == external_function) {
- ASSERT_EQ(current->type(), type);
- return current;
- }
- }
- return new Redirection(external_function, type);
- }
-
- static Redirection* FromHltInstruction(Instruction* redirect_call) {
- char* addr_of_hlt = reinterpret_cast<char*>(redirect_call);
- char* addr_of_redirection =
- addr_of_hlt - OFFSET_OF(Redirection, redirect_call_);
- return reinterpret_cast<Redirection*>(addr_of_redirection);
- }
-
- static void* ReverseRedirection(int64_t reg) {
- Redirection* redirection =
- FromHltInstruction(reinterpret_cast<Instruction*>(reg));
- return redirection->external_function();
- }
-
- private:
- void* external_function_;
- Instruction redirect_call_;
- ExternalReference::Type type_;
- Redirection* next_;
-};
-
-
-void* Simulator::RedirectExternalReference(void* external_function,
- ExternalReference::Type type) {
- Redirection* redirection = Redirection::Get(external_function, type);
- return redirection->address_of_redirect_call();
-}
-
-
-const char* Simulator::xreg_names[] = {
-"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
-"x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
-"ip0", "ip1", "x18", "x19", "x20", "x21", "x22", "x23",
-"x24", "x25", "x26", "cp", "jssp", "fp", "lr", "xzr", "csp"};
-
-const char* Simulator::wreg_names[] = {
-"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7",
-"w8", "w9", "w10", "w11", "w12", "w13", "w14", "w15",
-"w16", "w17", "w18", "w19", "w20", "w21", "w22", "w23",
-"w24", "w25", "w26", "wcp", "wjssp", "wfp", "wlr", "wzr", "wcsp"};
-
-const char* Simulator::sreg_names[] = {
-"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
-"s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
-"s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
-"s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31"};
-
-const char* Simulator::dreg_names[] = {
-"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
-"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
-"d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
-"d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
-
-const char* Simulator::vreg_names[] = {
-"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
-"v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
-"v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
-"v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"};
-
-
-const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
- ASSERT(code < kNumberOfRegisters);
- // If the code represents the stack pointer, index the name after zr.
- if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
- code = kZeroRegCode + 1;
- }
- return wreg_names[code];
-}
-
-
-const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
- ASSERT(code < kNumberOfRegisters);
- // If the code represents the stack pointer, index the name after zr.
- if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
- code = kZeroRegCode + 1;
- }
- return xreg_names[code];
-}
-
-
-const char* Simulator::SRegNameForCode(unsigned code) {
- ASSERT(code < kNumberOfFPRegisters);
- return sreg_names[code];
-}
-
-
-const char* Simulator::DRegNameForCode(unsigned code) {
- ASSERT(code < kNumberOfFPRegisters);
- return dreg_names[code];
-}
-
-
-const char* Simulator::VRegNameForCode(unsigned code) {
- ASSERT(code < kNumberOfFPRegisters);
- return vreg_names[code];
-}
-
-
-int Simulator::CodeFromName(const char* name) {
- for (unsigned i = 0; i < kNumberOfRegisters; i++) {
- if ((strcmp(xreg_names[i], name) == 0) ||
- (strcmp(wreg_names[i], name) == 0)) {
- return i;
- }
- }
- for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
- if ((strcmp(vreg_names[i], name) == 0) ||
- (strcmp(dreg_names[i], name) == 0) ||
- (strcmp(sreg_names[i], name) == 0)) {
- return i;
- }
- }
- if ((strcmp("csp", name) == 0) || (strcmp("wcsp", name) == 0)) {
- return kSPRegInternalCode;
- }
- return -1;
-}
-
-
-// Helpers ---------------------------------------------------------------------
-int64_t Simulator::AddWithCarry(unsigned reg_size,
- bool set_flags,
- int64_t src1,
- int64_t src2,
- int64_t carry_in) {
- ASSERT((carry_in == 0) || (carry_in == 1));
- ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize));
-
- uint64_t u1, u2;
- int64_t result;
- int64_t signed_sum = src1 + src2 + carry_in;
-
- uint32_t N, Z, C, V;
-
- if (reg_size == kWRegSize) {
- u1 = static_cast<uint64_t>(src1) & kWRegMask;
- u2 = static_cast<uint64_t>(src2) & kWRegMask;
-
- result = signed_sum & kWRegMask;
- // Compute the C flag by comparing the sum to the max unsigned integer.
- C = ((kWMaxUInt - u1) < (u2 + carry_in)) ||
- ((kWMaxUInt - u1 - carry_in) < u2);
- // Overflow iff the sign bit is the same for the two inputs and different
- // for the result.
- int64_t s_src1 = src1 << (kXRegSize - kWRegSize);
- int64_t s_src2 = src2 << (kXRegSize - kWRegSize);
- int64_t s_result = result << (kXRegSize - kWRegSize);
- V = ((s_src1 ^ s_src2) >= 0) && ((s_src1 ^ s_result) < 0);
-
- } else {
- u1 = static_cast<uint64_t>(src1);
- u2 = static_cast<uint64_t>(src2);
-
- result = signed_sum;
- // Compute the C flag by comparing the sum to the max unsigned integer.
- C = ((kXMaxUInt - u1) < (u2 + carry_in)) ||
- ((kXMaxUInt - u1 - carry_in) < u2);
- // Overflow iff the sign bit is the same for the two inputs and different
- // for the result.
- V = ((src1 ^ src2) >= 0) && ((src1 ^ result) < 0);
- }
-
- N = CalcNFlag(result, reg_size);
- Z = CalcZFlag(result);
-
- if (set_flags) {
- nzcv().SetN(N);
- nzcv().SetZ(Z);
- nzcv().SetC(C);
- nzcv().SetV(V);
- }
- return result;
-}
-
-
-int64_t Simulator::ShiftOperand(unsigned reg_size,
- int64_t value,
- Shift shift_type,
- unsigned amount) {
- if (amount == 0) {
- return value;
- }
- int64_t mask = reg_size == kXRegSize ? kXRegMask : kWRegMask;
- switch (shift_type) {
- case LSL:
- return (value << amount) & mask;
- case LSR:
- return static_cast<uint64_t>(value) >> amount;
- case ASR: {
- // Shift used to restore the sign.
- unsigned s_shift = kXRegSize - reg_size;
- // Value with its sign restored.
- int64_t s_value = (value << s_shift) >> s_shift;
- return (s_value >> amount) & mask;
- }
- case ROR: {
- if (reg_size == kWRegSize) {
- value &= kWRegMask;
- }
- return (static_cast<uint64_t>(value) >> amount) |
- ((value & ((1L << amount) - 1L)) << (reg_size - amount));
- }
- default:
- UNIMPLEMENTED();
- return 0;
- }
-}
-
-
-int64_t Simulator::ExtendValue(unsigned reg_size,
- int64_t value,
- Extend extend_type,
- unsigned left_shift) {
- switch (extend_type) {
- case UXTB:
- value &= kByteMask;
- break;
- case UXTH:
- value &= kHalfWordMask;
- break;
- case UXTW:
- value &= kWordMask;
- break;
- case SXTB:
- value = (value << 56) >> 56;
- break;
- case SXTH:
- value = (value << 48) >> 48;
- break;
- case SXTW:
- value = (value << 32) >> 32;
- break;
- case UXTX:
- case SXTX:
- break;
- default:
- UNREACHABLE();
- }
- int64_t mask = (reg_size == kXRegSize) ? kXRegMask : kWRegMask;
- return (value << left_shift) & mask;
-}
-
-
-void Simulator::FPCompare(double val0, double val1) {
- AssertSupportedFPCR();
-
- // TODO(jbramley): This assumes that the C++ implementation handles
- // comparisons in the way that we expect (as per AssertSupportedFPCR()).
- if ((std::isnan(val0) != 0) || (std::isnan(val1) != 0)) {
- nzcv().SetRawValue(FPUnorderedFlag);
- } else if (val0 < val1) {
- nzcv().SetRawValue(FPLessThanFlag);
- } else if (val0 > val1) {
- nzcv().SetRawValue(FPGreaterThanFlag);
- } else if (val0 == val1) {
- nzcv().SetRawValue(FPEqualFlag);
- } else {
- UNREACHABLE();
- }
-}
-
-
-void Simulator::SetBreakpoint(Instruction* location) {
- for (unsigned i = 0; i < breakpoints_.size(); i++) {
- if (breakpoints_.at(i).location == location) {
- PrintF("Existing breakpoint at %p was %s\n",
- reinterpret_cast<void*>(location),
- breakpoints_.at(i).enabled ? "disabled" : "enabled");
- breakpoints_.at(i).enabled = !breakpoints_.at(i).enabled;
- return;
- }
- }
- Breakpoint new_breakpoint = {location, true};
- breakpoints_.push_back(new_breakpoint);
- PrintF("Set a breakpoint at %p\n", reinterpret_cast<void*>(location));
-}
-
-
-void Simulator::ListBreakpoints() {
- PrintF("Breakpoints:\n");
- for (unsigned i = 0; i < breakpoints_.size(); i++) {
- PrintF("%p : %s\n",
- reinterpret_cast<void*>(breakpoints_.at(i).location),
- breakpoints_.at(i).enabled ? "enabled" : "disabled");
- }
-}
-
-
-void Simulator::CheckBreakpoints() {
- bool hit_a_breakpoint = false;
- for (unsigned i = 0; i < breakpoints_.size(); i++) {
- if ((breakpoints_.at(i).location == pc_) &&
- breakpoints_.at(i).enabled) {
- hit_a_breakpoint = true;
- // Disable this breakpoint.
- breakpoints_.at(i).enabled = false;
- }
- }
- if (hit_a_breakpoint) {
- PrintF("Hit and disabled a breakpoint at %p.\n",
- reinterpret_cast<void*>(pc_));
- Debug();
- }
-}
-
-
-void Simulator::CheckBreakNext() {
- // If the current instruction is a BL, insert a breakpoint just after it.
- if (break_on_next_ && pc_->IsBranchAndLinkToRegister()) {
- SetBreakpoint(pc_->NextInstruction());
- break_on_next_ = false;
- }
-}
-
-
-void Simulator::PrintInstructionsAt(Instruction* start, uint64_t count) {
- Instruction* end = start->InstructionAtOffset(count * kInstructionSize);
- for (Instruction* pc = start; pc < end; pc = pc->NextInstruction()) {
- disassembler_decoder_->Decode(pc);
- }
-}
-
-
-void Simulator::PrintSystemRegisters(bool print_all) {
- static bool first_run = true;
-
- // Define some colour codes to use for the register dump.
- // TODO(jbramley): Find a more elegant way of defining these.
- char const * const clr_normal = (FLAG_log_colour) ? ("\033[m") : ("");
- char const * const clr_flag_name = (FLAG_log_colour) ? ("\033[1;30m") : ("");
- char const * const clr_flag_value = (FLAG_log_colour) ? ("\033[1;37m") : ("");
-
- static SimSystemRegister last_nzcv;
- if (print_all || first_run || (last_nzcv.RawValue() != nzcv().RawValue())) {
- fprintf(stream_, "# %sFLAGS: %sN:%d Z:%d C:%d V:%d%s\n",
- clr_flag_name,
- clr_flag_value,
- N(), Z(), C(), V(),
- clr_normal);
- }
- last_nzcv = nzcv();
-
- static SimSystemRegister last_fpcr;
- if (print_all || first_run || (last_fpcr.RawValue() != fpcr().RawValue())) {
- static const char * rmode[] = {
- "0b00 (Round to Nearest)",
- "0b01 (Round towards Plus Infinity)",
- "0b10 (Round towards Minus Infinity)",
- "0b11 (Round towards Zero)"
- };
- ASSERT(fpcr().RMode() <= (sizeof(rmode) / sizeof(rmode[0])));
- fprintf(stream_, "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n",
- clr_flag_name,
- clr_flag_value,
- fpcr().AHP(), fpcr().DN(), fpcr().FZ(), rmode[fpcr().RMode()],
- clr_normal);
- }
- last_fpcr = fpcr();
-
- first_run = false;
-}
-
-
-void Simulator::PrintRegisters(bool print_all_regs) {
- static bool first_run = true;
- static int64_t last_regs[kNumberOfRegisters];
-
- // Define some colour codes to use for the register dump.
- // TODO(jbramley): Find a more elegant way of defining these.
- char const * const clr_normal = (FLAG_log_colour) ? ("\033[m") : ("");
- char const * const clr_reg_name = (FLAG_log_colour) ? ("\033[1;34m") : ("");
- char const * const clr_reg_value = (FLAG_log_colour) ? ("\033[1;36m") : ("");
-
- for (unsigned i = 0; i < kNumberOfRegisters; i++) {
- if (print_all_regs || first_run ||
- (last_regs[i] != xreg(i, Reg31IsStackPointer))) {
- fprintf(stream_,
- "# %s%4s:%s 0x%016" PRIx64 "%s\n",
- clr_reg_name,
- XRegNameForCode(i, Reg31IsStackPointer),
- clr_reg_value,
- xreg(i, Reg31IsStackPointer),
- clr_normal);
- }
- // Cache the new register value so the next run can detect any changes.
- last_regs[i] = xreg(i, Reg31IsStackPointer);
- }
- first_run = false;
-}
-
-
-void Simulator::PrintFPRegisters(bool print_all_regs) {
- static bool first_run = true;
- static uint64_t last_regs[kNumberOfFPRegisters];
-
- // Define some colour codes to use for the register dump.
- // TODO(jbramley): Find a more elegant way of defining these.
- char const * const clr_normal = (FLAG_log_colour) ? ("\033[m") : ("");
- char const * const clr_reg_name = (FLAG_log_colour) ? ("\033[1;33m") : ("");
- char const * const clr_reg_value = (FLAG_log_colour) ? ("\033[1;35m") : ("");
-
- // Print as many rows of registers as necessary, keeping each individual
- // register in the same column each time (to make it easy to visually scan
- // for changes).
- for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
- if (print_all_regs || first_run || (last_regs[i] != dreg_bits(i))) {
- fprintf(stream_,
- "# %s %4s:%s 0x%016" PRIx64 "%s (%s%s:%s %g%s %s:%s %g%s)\n",
- clr_reg_name,
- VRegNameForCode(i),
- clr_reg_value,
- dreg_bits(i),
- clr_normal,
- clr_reg_name,
- DRegNameForCode(i),
- clr_reg_value,
- dreg(i),
- clr_reg_name,
- SRegNameForCode(i),
- clr_reg_value,
- sreg(i),
- clr_normal);
- }
- // Cache the new register value so the next run can detect any changes.
- last_regs[i] = dreg_bits(i);
- }
- first_run = false;
-}
-
-
-void Simulator::PrintProcessorState() {
- PrintSystemRegisters();
- PrintRegisters();
- PrintFPRegisters();
-}
-
-
-void Simulator::PrintWrite(uint8_t* address,
- uint64_t value,
- unsigned num_bytes) {
- // Define some color codes to use for memory logging.
- const char* const clr_normal = (FLAG_log_colour) ? ("\033[m")
- : ("");
- const char* const clr_memory_value = (FLAG_log_colour) ? ("\033[1;32m")
- : ("");
- const char* const clr_memory_address = (FLAG_log_colour) ? ("\033[32m")
- : ("");
-
- // The template is "# value -> address". The template is not directly used
- // in the printf since compilers tend to struggle with the parametrized
- // width (%0*).
- const char* format = "# %s0x%0*" PRIx64 "%s -> %s0x%016" PRIx64 "%s\n";
- fprintf(stream_,
- format,
- clr_memory_value,
- num_bytes * 2, // The width in hexa characters.
- value,
- clr_normal,
- clr_memory_address,
- address,
- clr_normal);
-}
-
-
-// Visitors---------------------------------------------------------------------
-
-void Simulator::VisitUnimplemented(Instruction* instr) {
- fprintf(stream_, "Unimplemented instruction at %p: 0x%08" PRIx32 "\n",
- reinterpret_cast<void*>(instr), instr->InstructionBits());
- UNIMPLEMENTED();
-}
-
-
-void Simulator::VisitUnallocated(Instruction* instr) {
- fprintf(stream_, "Unallocated instruction at %p: 0x%08" PRIx32 "\n",
- reinterpret_cast<void*>(instr), instr->InstructionBits());
- UNIMPLEMENTED();
-}
-
-
-void Simulator::VisitPCRelAddressing(Instruction* instr) {
- switch (instr->Mask(PCRelAddressingMask)) {
- case ADR:
- set_reg(instr->Rd(), instr->ImmPCOffsetTarget());
- break;
- case ADRP: // Not implemented in the assembler.
- UNIMPLEMENTED();
- break;
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void Simulator::VisitUnconditionalBranch(Instruction* instr) {
- switch (instr->Mask(UnconditionalBranchMask)) {
- case BL:
- set_lr(instr->NextInstruction());
- // Fall through.
- case B:
- set_pc(instr->ImmPCOffsetTarget());
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void Simulator::VisitConditionalBranch(Instruction* instr) {
- ASSERT(instr->Mask(ConditionalBranchMask) == B_cond);
- if (ConditionPassed(static_cast<Condition>(instr->ConditionBranch()))) {
- set_pc(instr->ImmPCOffsetTarget());
- }
-}
-
-
-void Simulator::VisitUnconditionalBranchToRegister(Instruction* instr) {
- Instruction* target = reg<Instruction*>(instr->Rn());
- switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
- case BLR: {
- set_lr(instr->NextInstruction());
- if (instr->Rn() == 31) {
- // BLR XZR is used as a guard for the constant pool. We should never hit
- // this, but if we do trap to allow debugging.
- Debug();
- }
- // Fall through.
- }
- case BR:
- case RET: set_pc(target); break;
- default: UNIMPLEMENTED();
- }
-}
-
-
-void Simulator::VisitTestBranch(Instruction* instr) {
- unsigned bit_pos = (instr->ImmTestBranchBit5() << 5) |
- instr->ImmTestBranchBit40();
- bool take_branch = ((xreg(instr->Rt()) & (1UL << bit_pos)) == 0);
- switch (instr->Mask(TestBranchMask)) {
- case TBZ: break;
- case TBNZ: take_branch = !take_branch; break;
- default: UNIMPLEMENTED();
- }
- if (take_branch) {
- set_pc(instr->ImmPCOffsetTarget());
- }
-}
-
-
-void Simulator::VisitCompareBranch(Instruction* instr) {
- unsigned rt = instr->Rt();
- bool take_branch = false;
- switch (instr->Mask(CompareBranchMask)) {
- case CBZ_w: take_branch = (wreg(rt) == 0); break;
- case CBZ_x: take_branch = (xreg(rt) == 0); break;
- case CBNZ_w: take_branch = (wreg(rt) != 0); break;
- case CBNZ_x: take_branch = (xreg(rt) != 0); break;
- default: UNIMPLEMENTED();
- }
- if (take_branch) {
- set_pc(instr->ImmPCOffsetTarget());
- }
-}
-
-
-void Simulator::AddSubHelper(Instruction* instr, int64_t op2) {
- unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
- bool set_flags = instr->FlagsUpdate();
- int64_t new_val = 0;
- Instr operation = instr->Mask(AddSubOpMask);
-
- switch (operation) {
- case ADD:
- case ADDS: {
- new_val = AddWithCarry(reg_size,
- set_flags,
- reg(reg_size, instr->Rn(), instr->RnMode()),
- op2);
- break;
- }
- case SUB:
- case SUBS: {
- new_val = AddWithCarry(reg_size,
- set_flags,
- reg(reg_size, instr->Rn(), instr->RnMode()),
- ~op2,
- 1);
- break;
- }
- default: UNREACHABLE();
- }
-
- set_reg(reg_size, instr->Rd(), new_val, instr->RdMode());
-}
-
-
-void Simulator::VisitAddSubShifted(Instruction* instr) {
- unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
- int64_t op2 = ShiftOperand(reg_size,
- reg(reg_size, instr->Rm()),
- static_cast<Shift>(instr->ShiftDP()),
- instr->ImmDPShift());
- AddSubHelper(instr, op2);
-}
-
-
-void Simulator::VisitAddSubImmediate(Instruction* instr) {
- int64_t op2 = instr->ImmAddSub() << ((instr->ShiftAddSub() == 1) ? 12 : 0);
- AddSubHelper(instr, op2);
-}
-
-
-void Simulator::VisitAddSubExtended(Instruction* instr) {
- unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
- int64_t op2 = ExtendValue(reg_size,
- reg(reg_size, instr->Rm()),
- static_cast<Extend>(instr->ExtendMode()),
- instr->ImmExtendShift());
- AddSubHelper(instr, op2);
-}
-
-
-void Simulator::VisitAddSubWithCarry(Instruction* instr) {
- unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
- int64_t op2 = reg(reg_size, instr->Rm());
- int64_t new_val;
-
- if ((instr->Mask(AddSubOpMask) == SUB) || instr->Mask(AddSubOpMask) == SUBS) {
- op2 = ~op2;
- }
-
- new_val = AddWithCarry(reg_size,
- instr->FlagsUpdate(),
- reg(reg_size, instr->Rn()),
- op2,
- C());
-
- set_reg(reg_size, instr->Rd(), new_val);
-}
-
-
-void Simulator::VisitLogicalShifted(Instruction* instr) {
- unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
- Shift shift_type = static_cast<Shift>(instr->ShiftDP());
- unsigned shift_amount = instr->ImmDPShift();
- int64_t op2 = ShiftOperand(reg_size, reg(reg_size, instr->Rm()), shift_type,
- shift_amount);
- if (instr->Mask(NOT) == NOT) {
- op2 = ~op2;
- }
- LogicalHelper(instr, op2);
-}
-
-
-void Simulator::VisitLogicalImmediate(Instruction* instr) {
- LogicalHelper(instr, instr->ImmLogical());
-}
-
-
-void Simulator::LogicalHelper(Instruction* instr, int64_t op2) {
- unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
- int64_t op1 = reg(reg_size, instr->Rn());
- int64_t result = 0;
- bool update_flags = false;
-
- // Switch on the logical operation, stripping out the NOT bit, as it has a
- // different meaning for logical immediate instructions.
- switch (instr->Mask(LogicalOpMask & ~NOT)) {
- case ANDS: update_flags = true; // Fall through.
- case AND: result = op1 & op2; break;
- case ORR: result = op1 | op2; break;
- case EOR: result = op1 ^ op2; break;
- default:
- UNIMPLEMENTED();
- }
-
- if (update_flags) {
- nzcv().SetN(CalcNFlag(result, reg_size));
- nzcv().SetZ(CalcZFlag(result));
- nzcv().SetC(0);
- nzcv().SetV(0);
- }
-
- set_reg(reg_size, instr->Rd(), result, instr->RdMode());
-}
-
-
-void Simulator::VisitConditionalCompareRegister(Instruction* instr) {
- unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
- ConditionalCompareHelper(instr, reg(reg_size, instr->Rm()));
-}
-
-
-void Simulator::VisitConditionalCompareImmediate(Instruction* instr) {
- ConditionalCompareHelper(instr, instr->ImmCondCmp());
-}
-
-
-void Simulator::ConditionalCompareHelper(Instruction* instr, int64_t op2) {
- unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
- int64_t op1 = reg(reg_size, instr->Rn());
-
- if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
- // If the condition passes, set the status flags to the result of comparing
- // the operands.
- if (instr->Mask(ConditionalCompareMask) == CCMP) {
- AddWithCarry(reg_size, true, op1, ~op2, 1);
- } else {
- ASSERT(instr->Mask(ConditionalCompareMask) == CCMN);
- AddWithCarry(reg_size, true, op1, op2, 0);
- }
- } else {
- // If the condition fails, set the status flags to the nzcv immediate.
- nzcv().SetFlags(instr->Nzcv());
- }
-}
-
-
-void Simulator::VisitLoadStoreUnsignedOffset(Instruction* instr) {
- int offset = instr->ImmLSUnsigned() << instr->SizeLS();
- LoadStoreHelper(instr, offset, Offset);
-}
-
-
-void Simulator::VisitLoadStoreUnscaledOffset(Instruction* instr) {
- LoadStoreHelper(instr, instr->ImmLS(), Offset);
-}
-
-
-void Simulator::VisitLoadStorePreIndex(Instruction* instr) {
- LoadStoreHelper(instr, instr->ImmLS(), PreIndex);
-}
-
-
-void Simulator::VisitLoadStorePostIndex(Instruction* instr) {
- LoadStoreHelper(instr, instr->ImmLS(), PostIndex);
-}
-
-
-void Simulator::VisitLoadStoreRegisterOffset(Instruction* instr) {
- Extend ext = static_cast<Extend>(instr->ExtendMode());
- ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX));
- unsigned shift_amount = instr->ImmShiftLS() * instr->SizeLS();
-
- int64_t offset = ExtendValue(kXRegSize, xreg(instr->Rm()), ext,
- shift_amount);
- LoadStoreHelper(instr, offset, Offset);
-}
-
-
-void Simulator::LoadStoreHelper(Instruction* instr,
- int64_t offset,
- AddrMode addrmode) {
- unsigned srcdst = instr->Rt();
- unsigned addr_reg = instr->Rn();
- uint8_t* address = LoadStoreAddress(addr_reg, offset, addrmode);
- int num_bytes = 1 << instr->SizeLS();
- uint8_t* stack = NULL;
-
- // Handle the writeback for stores before the store. On a CPU the writeback
- // and the store are atomic, but when running on the simulator it is possible
- // to be interrupted in between. The simulator is not thread safe and V8 does
- // not require it to be to run JavaScript therefore the profiler may sample
- // the "simulated" CPU in the middle of load/store with writeback. The code
- // below ensures that push operations are safe even when interrupted: the
- // stack pointer will be decremented before adding an element to the stack.
- if (instr->IsStore()) {
- LoadStoreWriteBack(addr_reg, offset, addrmode);
-
- // For store the address post writeback is used to check access below the
- // stack.
- stack = reinterpret_cast<uint8_t*>(sp());
- }
-
- LoadStoreOp op = static_cast<LoadStoreOp>(instr->Mask(LoadStoreOpMask));
- switch (op) {
- case LDRB_w:
- case LDRH_w:
- case LDR_w:
- case LDR_x: set_xreg(srcdst, MemoryRead(address, num_bytes)); break;
- case STRB_w:
- case STRH_w:
- case STR_w:
- case STR_x: MemoryWrite(address, xreg(srcdst), num_bytes); break;
- case LDRSB_w: {
- set_wreg(srcdst, ExtendValue(kWRegSize, MemoryRead8(address), SXTB));
- break;
- }
- case LDRSB_x: {
- set_xreg(srcdst, ExtendValue(kXRegSize, MemoryRead8(address), SXTB));
- break;
- }
- case LDRSH_w: {
- set_wreg(srcdst, ExtendValue(kWRegSize, MemoryRead16(address), SXTH));
- break;
- }
- case LDRSH_x: {
- set_xreg(srcdst, ExtendValue(kXRegSize, MemoryRead16(address), SXTH));
- break;
- }
- case LDRSW_x: {
- set_xreg(srcdst, ExtendValue(kXRegSize, MemoryRead32(address), SXTW));
- break;
- }
- case LDR_s: set_sreg(srcdst, MemoryReadFP32(address)); break;
- case LDR_d: set_dreg(srcdst, MemoryReadFP64(address)); break;
- case STR_s: MemoryWriteFP32(address, sreg(srcdst)); break;
- case STR_d: MemoryWriteFP64(address, dreg(srcdst)); break;
- default: UNIMPLEMENTED();
- }
-
- // Handle the writeback for loads after the load to ensure safe pop
- // operation even when interrupted in the middle of it. The stack pointer
- // is only updated after the load so pop(fp) will never break the invariant
- // sp <= fp expected while walking the stack in the sampler.
- if (instr->IsLoad()) {
- // For loads the address pre writeback is used to check access below the
- // stack.
- stack = reinterpret_cast<uint8_t*>(sp());
-
- LoadStoreWriteBack(addr_reg, offset, addrmode);
- }
-
- // Accesses below the stack pointer (but above the platform stack limit) are
- // not allowed in the ABI.
- CheckMemoryAccess(address, stack);
-}
-
-
-void Simulator::VisitLoadStorePairOffset(Instruction* instr) {
- LoadStorePairHelper(instr, Offset);
-}
-
-
-void Simulator::VisitLoadStorePairPreIndex(Instruction* instr) {
- LoadStorePairHelper(instr, PreIndex);
-}
-
-
-void Simulator::VisitLoadStorePairPostIndex(Instruction* instr) {
- LoadStorePairHelper(instr, PostIndex);
-}
-
-
-void Simulator::VisitLoadStorePairNonTemporal(Instruction* instr) {
- LoadStorePairHelper(instr, Offset);
-}
-
-
-void Simulator::LoadStorePairHelper(Instruction* instr,
- AddrMode addrmode) {
- unsigned rt = instr->Rt();
- unsigned rt2 = instr->Rt2();
- unsigned addr_reg = instr->Rn();
- int offset = instr->ImmLSPair() << instr->SizeLSPair();
- uint8_t* address = LoadStoreAddress(addr_reg, offset, addrmode);
- uint8_t* stack = NULL;
-
- // Handle the writeback for stores before the store. On a CPU the writeback
- // and the store are atomic, but when running on the simulator it is possible
- // to be interrupted in between. The simulator is not thread safe and V8 does
- // not require it to be to run JavaScript therefore the profiler may sample
- // the "simulated" CPU in the middle of load/store with writeback. The code
- // below ensures that push operations are safe even when interrupted: the
- // stack pointer will be decremented before adding an element to the stack.
- if (instr->IsStore()) {
- LoadStoreWriteBack(addr_reg, offset, addrmode);
-
- // For store the address post writeback is used to check access below the
- // stack.
- stack = reinterpret_cast<uint8_t*>(sp());
- }
-
- LoadStorePairOp op =
- static_cast<LoadStorePairOp>(instr->Mask(LoadStorePairMask));
-
- // 'rt' and 'rt2' can only be aliased for stores.
- ASSERT(((op & LoadStorePairLBit) == 0) || (rt != rt2));
-
- switch (op) {
- case LDP_w: {
- set_wreg(rt, MemoryRead32(address));
- set_wreg(rt2, MemoryRead32(address + kWRegSizeInBytes));
- break;
- }
- case LDP_s: {
- set_sreg(rt, MemoryReadFP32(address));
- set_sreg(rt2, MemoryReadFP32(address + kSRegSizeInBytes));
- break;
- }
- case LDP_x: {
- set_xreg(rt, MemoryRead64(address));
- set_xreg(rt2, MemoryRead64(address + kXRegSizeInBytes));
- break;
- }
- case LDP_d: {
- set_dreg(rt, MemoryReadFP64(address));
- set_dreg(rt2, MemoryReadFP64(address + kDRegSizeInBytes));
- break;
- }
- case LDPSW_x: {
- set_xreg(rt, ExtendValue(kXRegSize, MemoryRead32(address), SXTW));
- set_xreg(rt2, ExtendValue(kXRegSize,
- MemoryRead32(address + kWRegSizeInBytes), SXTW));
- break;
- }
- case STP_w: {
- MemoryWrite32(address, wreg(rt));
- MemoryWrite32(address + kWRegSizeInBytes, wreg(rt2));
- break;
- }
- case STP_s: {
- MemoryWriteFP32(address, sreg(rt));
- MemoryWriteFP32(address + kSRegSizeInBytes, sreg(rt2));
- break;
- }
- case STP_x: {
- MemoryWrite64(address, xreg(rt));
- MemoryWrite64(address + kXRegSizeInBytes, xreg(rt2));
- break;
- }
- case STP_d: {
- MemoryWriteFP64(address, dreg(rt));
- MemoryWriteFP64(address + kDRegSizeInBytes, dreg(rt2));
- break;
- }
- default: UNREACHABLE();
- }
-
- // Handle the writeback for loads after the load to ensure safe pop
- // operation even when interrupted in the middle of it. The stack pointer
- // is only updated after the load so pop(fp) will never break the invariant
- // sp <= fp expected while walking the stack in the sampler.
- if (instr->IsLoad()) {
- // For loads the address pre writeback is used to check access below the
- // stack.
- stack = reinterpret_cast<uint8_t*>(sp());
-
- LoadStoreWriteBack(addr_reg, offset, addrmode);
- }
-
- // Accesses below the stack pointer (but above the platform stack limit) are
- // not allowed in the ABI.
- CheckMemoryAccess(address, stack);
-}
-
-
-void Simulator::VisitLoadLiteral(Instruction* instr) {
- uint8_t* address = instr->LiteralAddress();
- unsigned rt = instr->Rt();
-
- switch (instr->Mask(LoadLiteralMask)) {
- case LDR_w_lit: set_wreg(rt, MemoryRead32(address)); break;
- case LDR_x_lit: set_xreg(rt, MemoryRead64(address)); break;
- case LDR_s_lit: set_sreg(rt, MemoryReadFP32(address)); break;
- case LDR_d_lit: set_dreg(rt, MemoryReadFP64(address)); break;
- default: UNREACHABLE();
- }
-}
-
-
-uint8_t* Simulator::LoadStoreAddress(unsigned addr_reg,
- int64_t offset,
- AddrMode addrmode) {
- const unsigned kSPRegCode = kSPRegInternalCode & kRegCodeMask;
- int64_t address = xreg(addr_reg, Reg31IsStackPointer);
- if ((addr_reg == kSPRegCode) && ((address % 16) != 0)) {
- // When the base register is SP the stack pointer is required to be
- // quadword aligned prior to the address calculation and write-backs.
- // Misalignment will cause a stack alignment fault.
- FATAL("ALIGNMENT EXCEPTION");
- }
-
- if ((addrmode == Offset) || (addrmode == PreIndex)) {
- address += offset;
- }
-
- return reinterpret_cast<uint8_t*>(address);
-}
-
-
-void Simulator::LoadStoreWriteBack(unsigned addr_reg,
- int64_t offset,
- AddrMode addrmode) {
- if ((addrmode == PreIndex) || (addrmode == PostIndex)) {
- ASSERT(offset != 0);
- uint64_t address = xreg(addr_reg, Reg31IsStackPointer);
- set_reg(addr_reg, address + offset, Reg31IsStackPointer);
- }
-}
-
-
-void Simulator::CheckMemoryAccess(uint8_t* address, uint8_t* stack) {
- if ((address >= stack_limit_) && (address < stack)) {
- fprintf(stream_, "ACCESS BELOW STACK POINTER:\n");
- fprintf(stream_, " sp is here: 0x%16p\n", stack);
- fprintf(stream_, " access was here: 0x%16p\n", address);
- fprintf(stream_, " stack limit is here: 0x%16p\n", stack_limit_);
- fprintf(stream_, "\n");
- FATAL("ACCESS BELOW STACK POINTER");
- }
-}
-
-
-uint64_t Simulator::MemoryRead(uint8_t* address, unsigned num_bytes) {
- ASSERT(address != NULL);
- ASSERT((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
- uint64_t read = 0;
- memcpy(&read, address, num_bytes);
- return read;
-}
-
-
-uint8_t Simulator::MemoryRead8(uint8_t* address) {
- return MemoryRead(address, sizeof(uint8_t));
-}
-
-
-uint16_t Simulator::MemoryRead16(uint8_t* address) {
- return MemoryRead(address, sizeof(uint16_t));
-}
-
-
-uint32_t Simulator::MemoryRead32(uint8_t* address) {
- return MemoryRead(address, sizeof(uint32_t));
-}
-
-
-float Simulator::MemoryReadFP32(uint8_t* address) {
- return rawbits_to_float(MemoryRead32(address));
-}
-
-
-uint64_t Simulator::MemoryRead64(uint8_t* address) {
- return MemoryRead(address, sizeof(uint64_t));
-}
-
-
-double Simulator::MemoryReadFP64(uint8_t* address) {
- return rawbits_to_double(MemoryRead64(address));
-}
-
-
-void Simulator::MemoryWrite(uint8_t* address,
- uint64_t value,
- unsigned num_bytes) {
- ASSERT(address != NULL);
- ASSERT((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
-
- LogWrite(address, value, num_bytes);
- memcpy(address, &value, num_bytes);
-}
-
-
-void Simulator::MemoryWrite32(uint8_t* address, uint32_t value) {
- MemoryWrite(address, value, sizeof(uint32_t));
-}
-
-
-void Simulator::MemoryWriteFP32(uint8_t* address, float value) {
- MemoryWrite32(address, float_to_rawbits(value));
-}
-
-
-void Simulator::MemoryWrite64(uint8_t* address, uint64_t value) {
- MemoryWrite(address, value, sizeof(uint64_t));
-}
-
-
-void Simulator::MemoryWriteFP64(uint8_t* address, double value) {
- MemoryWrite64(address, double_to_rawbits(value));
-}
-
-
-void Simulator::VisitMoveWideImmediate(Instruction* instr) {
- MoveWideImmediateOp mov_op =
- static_cast<MoveWideImmediateOp>(instr->Mask(MoveWideImmediateMask));
- int64_t new_xn_val = 0;
-
- bool is_64_bits = instr->SixtyFourBits() == 1;
- // Shift is limited for W operations.
- ASSERT(is_64_bits || (instr->ShiftMoveWide() < 2));
-
- // Get the shifted immediate.
- int64_t shift = instr->ShiftMoveWide() * 16;
- int64_t shifted_imm16 = instr->ImmMoveWide() << shift;
-
- // Compute the new value.
- switch (mov_op) {
- case MOVN_w:
- case MOVN_x: {
- new_xn_val = ~shifted_imm16;
- if (!is_64_bits) new_xn_val &= kWRegMask;
- break;
- }
- case MOVK_w:
- case MOVK_x: {
- unsigned reg_code = instr->Rd();
- int64_t prev_xn_val = is_64_bits ? xreg(reg_code)
- : wreg(reg_code);
- new_xn_val = (prev_xn_val & ~(0xffffL << shift)) | shifted_imm16;
- break;
- }
- case MOVZ_w:
- case MOVZ_x: {
- new_xn_val = shifted_imm16;
- break;
- }
- default:
- UNREACHABLE();
- }
-
- // Update the destination register.
- set_xreg(instr->Rd(), new_xn_val);
-}
-
-
-void Simulator::VisitConditionalSelect(Instruction* instr) {
- uint64_t new_val = xreg(instr->Rn());
-
- if (ConditionFailed(static_cast<Condition>(instr->Condition()))) {
- new_val = xreg(instr->Rm());
- switch (instr->Mask(ConditionalSelectMask)) {
- case CSEL_w:
- case CSEL_x: break;
- case CSINC_w:
- case CSINC_x: new_val++; break;
- case CSINV_w:
- case CSINV_x: new_val = ~new_val; break;
- case CSNEG_w:
- case CSNEG_x: new_val = -new_val; break;
- default: UNIMPLEMENTED();
- }
- }
- unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
- set_reg(reg_size, instr->Rd(), new_val);
-}
-
-
-void Simulator::VisitDataProcessing1Source(Instruction* instr) {
- unsigned dst = instr->Rd();
- unsigned src = instr->Rn();
-
- switch (instr->Mask(DataProcessing1SourceMask)) {
- case RBIT_w: set_wreg(dst, ReverseBits(wreg(src), kWRegSize)); break;
- case RBIT_x: set_xreg(dst, ReverseBits(xreg(src), kXRegSize)); break;
- case REV16_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse16)); break;
- case REV16_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse16)); break;
- case REV_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse32)); break;
- case REV32_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse32)); break;
- case REV_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse64)); break;
- case CLZ_w: set_wreg(dst, CountLeadingZeros(wreg(src), kWRegSize)); break;
- case CLZ_x: set_xreg(dst, CountLeadingZeros(xreg(src), kXRegSize)); break;
- case CLS_w: {
- set_wreg(dst, CountLeadingSignBits(wreg(src), kWRegSize));
- break;
- }
- case CLS_x: {
- set_xreg(dst, CountLeadingSignBits(xreg(src), kXRegSize));
- break;
- }
- default: UNIMPLEMENTED();
- }
-}
-
-
-uint64_t Simulator::ReverseBits(uint64_t value, unsigned num_bits) {
- ASSERT((num_bits == kWRegSize) || (num_bits == kXRegSize));
- uint64_t result = 0;
- for (unsigned i = 0; i < num_bits; i++) {
- result = (result << 1) | (value & 1);
- value >>= 1;
- }
- return result;
-}
-
-
-uint64_t Simulator::ReverseBytes(uint64_t value, ReverseByteMode mode) {
- // Split the 64-bit value into an 8-bit array, where b[0] is the least
- // significant byte, and b[7] is the most significant.
- uint8_t bytes[8];
- uint64_t mask = 0xff00000000000000UL;
- for (int i = 7; i >= 0; i--) {
- bytes[i] = (value & mask) >> (i * 8);
- mask >>= 8;
- }
-
- // Permutation tables for REV instructions.
- // permute_table[Reverse16] is used by REV16_x, REV16_w
- // permute_table[Reverse32] is used by REV32_x, REV_w
- // permute_table[Reverse64] is used by REV_x
- ASSERT((Reverse16 == 0) && (Reverse32 == 1) && (Reverse64 == 2));
- static const uint8_t permute_table[3][8] = { {6, 7, 4, 5, 2, 3, 0, 1},
- {4, 5, 6, 7, 0, 1, 2, 3},
- {0, 1, 2, 3, 4, 5, 6, 7} };
- uint64_t result = 0;
- for (int i = 0; i < 8; i++) {
- result <<= 8;
- result |= bytes[permute_table[mode][i]];
- }
- return result;
-}
-
-
-void Simulator::VisitDataProcessing2Source(Instruction* instr) {
- // TODO(mcapewel) move these to a higher level file, as they are global
- // assumptions.
- ASSERT((static_cast<int32_t>(-1) >> 1) == -1);
- ASSERT((static_cast<uint32_t>(-1) >> 1) == 0x7FFFFFFF);
-
- Shift shift_op = NO_SHIFT;
- int64_t result = 0;
- switch (instr->Mask(DataProcessing2SourceMask)) {
- case SDIV_w: {
- int32_t rn = wreg(instr->Rn());
- int32_t rm = wreg(instr->Rm());
- if ((rn == kWMinInt) && (rm == -1)) {
- result = kWMinInt;
- } else if (rm == 0) {
- // Division by zero can be trapped, but not on A-class processors.
- result = 0;
- } else {
- result = rn / rm;
- }
- break;
- }
- case SDIV_x: {
- int64_t rn = xreg(instr->Rn());
- int64_t rm = xreg(instr->Rm());
- if ((rn == kXMinInt) && (rm == -1)) {
- result = kXMinInt;
- } else if (rm == 0) {
- // Division by zero can be trapped, but not on A-class processors.
- result = 0;
- } else {
- result = rn / rm;
- }
- break;
- }
- case UDIV_w: {
- uint32_t rn = static_cast<uint32_t>(wreg(instr->Rn()));
- uint32_t rm = static_cast<uint32_t>(wreg(instr->Rm()));
- if (rm == 0) {
- // Division by zero can be trapped, but not on A-class processors.
- result = 0;
- } else {
- result = rn / rm;
- }
- break;
- }
- case UDIV_x: {
- uint64_t rn = static_cast<uint64_t>(xreg(instr->Rn()));
- uint64_t rm = static_cast<uint64_t>(xreg(instr->Rm()));
- if (rm == 0) {
- // Division by zero can be trapped, but not on A-class processors.
- result = 0;
- } else {
- result = rn / rm;
- }
- break;
- }
- case LSLV_w:
- case LSLV_x: shift_op = LSL; break;
- case LSRV_w:
- case LSRV_x: shift_op = LSR; break;
- case ASRV_w:
- case ASRV_x: shift_op = ASR; break;
- case RORV_w:
- case RORV_x: shift_op = ROR; break;
- default: UNIMPLEMENTED();
- }
-
- unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
- if (shift_op != NO_SHIFT) {
- // Shift distance encoded in the least-significant five/six bits of the
- // register.
- int mask = (instr->SixtyFourBits() == 1) ? 0x3f : 0x1f;
- unsigned shift = wreg(instr->Rm()) & mask;
- result = ShiftOperand(reg_size, reg(reg_size, instr->Rn()), shift_op,
- shift);
- }
- set_reg(reg_size, instr->Rd(), result);
-}
-
-
-// The algorithm used is described in section 8.2 of
-// Hacker's Delight, by Henry S. Warren, Jr.
-// It assumes that a right shift on a signed integer is an arithmetic shift.
-static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
- uint64_t u0, v0, w0;
- int64_t u1, v1, w1, w2, t;
-
- u0 = u & 0xffffffffL;
- u1 = u >> 32;
- v0 = v & 0xffffffffL;
- v1 = v >> 32;
-
- w0 = u0 * v0;
- t = u1 * v0 + (w0 >> 32);
- w1 = t & 0xffffffffL;
- w2 = t >> 32;
- w1 = u0 * v1 + w1;
-
- return u1 * v1 + w2 + (w1 >> 32);
-}
-
-
-void Simulator::VisitDataProcessing3Source(Instruction* instr) {
- unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
-
- int64_t result = 0;
- // Extract and sign- or zero-extend 32-bit arguments for widening operations.
- uint64_t rn_u32 = reg<uint32_t>(instr->Rn());
- uint64_t rm_u32 = reg<uint32_t>(instr->Rm());
- int64_t rn_s32 = reg<int32_t>(instr->Rn());
- int64_t rm_s32 = reg<int32_t>(instr->Rm());
- switch (instr->Mask(DataProcessing3SourceMask)) {
- case MADD_w:
- case MADD_x:
- result = xreg(instr->Ra()) + (xreg(instr->Rn()) * xreg(instr->Rm()));
- break;
- case MSUB_w:
- case MSUB_x:
- result = xreg(instr->Ra()) - (xreg(instr->Rn()) * xreg(instr->Rm()));
- break;
- case SMADDL_x: result = xreg(instr->Ra()) + (rn_s32 * rm_s32); break;
- case SMSUBL_x: result = xreg(instr->Ra()) - (rn_s32 * rm_s32); break;
- case UMADDL_x: result = xreg(instr->Ra()) + (rn_u32 * rm_u32); break;
- case UMSUBL_x: result = xreg(instr->Ra()) - (rn_u32 * rm_u32); break;
- case SMULH_x:
- ASSERT(instr->Ra() == kZeroRegCode);
- result = MultiplyHighSigned(xreg(instr->Rn()), xreg(instr->Rm()));
- break;
- default: UNIMPLEMENTED();
- }
- set_reg(reg_size, instr->Rd(), result);
-}
-
-
-void Simulator::VisitBitfield(Instruction* instr) {
- unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
- int64_t reg_mask = instr->SixtyFourBits() ? kXRegMask : kWRegMask;
- int64_t R = instr->ImmR();
- int64_t S = instr->ImmS();
- int64_t diff = S - R;
- int64_t mask;
- if (diff >= 0) {
- mask = diff < reg_size - 1 ? (1L << (diff + 1)) - 1
- : reg_mask;
- } else {
- mask = ((1L << (S + 1)) - 1);
- mask = (static_cast<uint64_t>(mask) >> R) | (mask << (reg_size - R));
- diff += reg_size;
- }
-
- // inzero indicates if the extracted bitfield is inserted into the
- // destination register value or in zero.
- // If extend is true, extend the sign of the extracted bitfield.
- bool inzero = false;
- bool extend = false;
- switch (instr->Mask(BitfieldMask)) {
- case BFM_x:
- case BFM_w:
- break;
- case SBFM_x:
- case SBFM_w:
- inzero = true;
- extend = true;
- break;
- case UBFM_x:
- case UBFM_w:
- inzero = true;
- break;
- default:
- UNIMPLEMENTED();
- }
-
- int64_t dst = inzero ? 0 : reg(reg_size, instr->Rd());
- int64_t src = reg(reg_size, instr->Rn());
- // Rotate source bitfield into place.
- int64_t result = (static_cast<uint64_t>(src) >> R) | (src << (reg_size - R));
- // Determine the sign extension.
- int64_t topbits = ((1L << (reg_size - diff - 1)) - 1) << (diff + 1);
- int64_t signbits = extend && ((src >> S) & 1) ? topbits : 0;
-
- // Merge sign extension, dest/zero and bitfield.
- result = signbits | (result & mask) | (dst & ~mask);
-
- set_reg(reg_size, instr->Rd(), result);
-}
-
-
-void Simulator::VisitExtract(Instruction* instr) {
- unsigned lsb = instr->ImmS();
- unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize
- : kWRegSize;
- set_reg(reg_size,
- instr->Rd(),
- (static_cast<uint64_t>(reg(reg_size, instr->Rm())) >> lsb) |
- (reg(reg_size, instr->Rn()) << (reg_size - lsb)));
-}
-
-
-void Simulator::VisitFPImmediate(Instruction* instr) {
- AssertSupportedFPCR();
-
- unsigned dest = instr->Rd();
- switch (instr->Mask(FPImmediateMask)) {
- case FMOV_s_imm: set_sreg(dest, instr->ImmFP32()); break;
- case FMOV_d_imm: set_dreg(dest, instr->ImmFP64()); break;
- default: UNREACHABLE();
- }
-}
-
-
-void Simulator::VisitFPIntegerConvert(Instruction* instr) {
- AssertSupportedFPCR();
-
- unsigned dst = instr->Rd();
- unsigned src = instr->Rn();
-
- FPRounding round = RMode();
-
- switch (instr->Mask(FPIntegerConvertMask)) {
- case FCVTAS_ws: set_wreg(dst, FPToInt32(sreg(src), FPTieAway)); break;
- case FCVTAS_xs: set_xreg(dst, FPToInt64(sreg(src), FPTieAway)); break;
- case FCVTAS_wd: set_wreg(dst, FPToInt32(dreg(src), FPTieAway)); break;
- case FCVTAS_xd: set_xreg(dst, FPToInt64(dreg(src), FPTieAway)); break;
- case FCVTAU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPTieAway)); break;
- case FCVTAU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPTieAway)); break;
- case FCVTAU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPTieAway)); break;
- case FCVTAU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPTieAway)); break;
- case FCVTMS_ws:
- set_wreg(dst, FPToInt32(sreg(src), FPNegativeInfinity));
- break;
- case FCVTMS_xs:
- set_xreg(dst, FPToInt64(sreg(src), FPNegativeInfinity));
- break;
- case FCVTMS_wd:
- set_wreg(dst, FPToInt32(dreg(src), FPNegativeInfinity));
- break;
- case FCVTMS_xd:
- set_xreg(dst, FPToInt64(dreg(src), FPNegativeInfinity));
- break;
- case FCVTMU_ws:
- set_wreg(dst, FPToUInt32(sreg(src), FPNegativeInfinity));
- break;
- case FCVTMU_xs:
- set_xreg(dst, FPToUInt64(sreg(src), FPNegativeInfinity));
- break;
- case FCVTMU_wd:
- set_wreg(dst, FPToUInt32(dreg(src), FPNegativeInfinity));
- break;
- case FCVTMU_xd:
- set_xreg(dst, FPToUInt64(dreg(src), FPNegativeInfinity));
- break;
- case FCVTNS_ws: set_wreg(dst, FPToInt32(sreg(src), FPTieEven)); break;
- case FCVTNS_xs: set_xreg(dst, FPToInt64(sreg(src), FPTieEven)); break;
- case FCVTNS_wd: set_wreg(dst, FPToInt32(dreg(src), FPTieEven)); break;
- case FCVTNS_xd: set_xreg(dst, FPToInt64(dreg(src), FPTieEven)); break;
- case FCVTNU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPTieEven)); break;
- case FCVTNU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPTieEven)); break;
- case FCVTNU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPTieEven)); break;
- case FCVTNU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPTieEven)); break;
- case FCVTZS_ws: set_wreg(dst, FPToInt32(sreg(src), FPZero)); break;
- case FCVTZS_xs: set_xreg(dst, FPToInt64(sreg(src), FPZero)); break;
- case FCVTZS_wd: set_wreg(dst, FPToInt32(dreg(src), FPZero)); break;
- case FCVTZS_xd: set_xreg(dst, FPToInt64(dreg(src), FPZero)); break;
- case FCVTZU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPZero)); break;
- case FCVTZU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPZero)); break;
- case FCVTZU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPZero)); break;
- case FCVTZU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPZero)); break;
- case FMOV_ws: set_wreg(dst, sreg_bits(src)); break;
- case FMOV_xd: set_xreg(dst, dreg_bits(src)); break;
- case FMOV_sw: set_sreg_bits(dst, wreg(src)); break;
- case FMOV_dx: set_dreg_bits(dst, xreg(src)); break;
-
- // A 32-bit input can be handled in the same way as a 64-bit input, since
- // the sign- or zero-extension will not affect the conversion.
- case SCVTF_dx: set_dreg(dst, FixedToDouble(xreg(src), 0, round)); break;
- case SCVTF_dw: set_dreg(dst, FixedToDouble(wreg(src), 0, round)); break;
- case UCVTF_dx: set_dreg(dst, UFixedToDouble(xreg(src), 0, round)); break;
- case UCVTF_dw: {
- set_dreg(dst, UFixedToDouble(reg<uint32_t>(src), 0, round));
- break;
- }
- case SCVTF_sx: set_sreg(dst, FixedToFloat(xreg(src), 0, round)); break;
- case SCVTF_sw: set_sreg(dst, FixedToFloat(wreg(src), 0, round)); break;
- case UCVTF_sx: set_sreg(dst, UFixedToFloat(xreg(src), 0, round)); break;
- case UCVTF_sw: {
- set_sreg(dst, UFixedToFloat(reg<uint32_t>(src), 0, round));
- break;
- }
-
- default: UNREACHABLE();
- }
-}
-
-
-void Simulator::VisitFPFixedPointConvert(Instruction* instr) {
- AssertSupportedFPCR();
-
- unsigned dst = instr->Rd();
- unsigned src = instr->Rn();
- int fbits = 64 - instr->FPScale();
-
- FPRounding round = RMode();
-
- switch (instr->Mask(FPFixedPointConvertMask)) {
- // A 32-bit input can be handled in the same way as a 64-bit input, since
- // the sign- or zero-extension will not affect the conversion.
- case SCVTF_dx_fixed:
- set_dreg(dst, FixedToDouble(xreg(src), fbits, round));
- break;
- case SCVTF_dw_fixed:
- set_dreg(dst, FixedToDouble(wreg(src), fbits, round));
- break;
- case UCVTF_dx_fixed:
- set_dreg(dst, UFixedToDouble(xreg(src), fbits, round));
- break;
- case UCVTF_dw_fixed: {
- set_dreg(dst,
- UFixedToDouble(reg<uint32_t>(src), fbits, round));
- break;
- }
- case SCVTF_sx_fixed:
- set_sreg(dst, FixedToFloat(xreg(src), fbits, round));
- break;
- case SCVTF_sw_fixed:
- set_sreg(dst, FixedToFloat(wreg(src), fbits, round));
- break;
- case UCVTF_sx_fixed:
- set_sreg(dst, UFixedToFloat(xreg(src), fbits, round));
- break;
- case UCVTF_sw_fixed: {
- set_sreg(dst,
- UFixedToFloat(reg<uint32_t>(src), fbits, round));
- break;
- }
- default: UNREACHABLE();
- }
-}
-
-
-int32_t Simulator::FPToInt32(double value, FPRounding rmode) {
- value = FPRoundInt(value, rmode);
- if (value >= kWMaxInt) {
- return kWMaxInt;
- } else if (value < kWMinInt) {
- return kWMinInt;
- }
- return std::isnan(value) ? 0 : static_cast<int32_t>(value);
-}
-
-
-int64_t Simulator::FPToInt64(double value, FPRounding rmode) {
- value = FPRoundInt(value, rmode);
- if (value >= kXMaxInt) {
- return kXMaxInt;
- } else if (value < kXMinInt) {
- return kXMinInt;
- }
- return std::isnan(value) ? 0 : static_cast<int64_t>(value);
-}
-
-
-uint32_t Simulator::FPToUInt32(double value, FPRounding rmode) {
- value = FPRoundInt(value, rmode);
- if (value >= kWMaxUInt) {
- return kWMaxUInt;
- } else if (value < 0.0) {
- return 0;
- }
- return std::isnan(value) ? 0 : static_cast<uint32_t>(value);
-}
-
-
-uint64_t Simulator::FPToUInt64(double value, FPRounding rmode) {
- value = FPRoundInt(value, rmode);
- if (value >= kXMaxUInt) {
- return kXMaxUInt;
- } else if (value < 0.0) {
- return 0;
- }
- return std::isnan(value) ? 0 : static_cast<uint64_t>(value);
-}
-
-
-void Simulator::VisitFPCompare(Instruction* instr) {
- AssertSupportedFPCR();
-
- unsigned reg_size = instr->FPType() == FP32 ? kSRegSize : kDRegSize;
- double fn_val = fpreg(reg_size, instr->Rn());
-
- switch (instr->Mask(FPCompareMask)) {
- case FCMP_s:
- case FCMP_d: FPCompare(fn_val, fpreg(reg_size, instr->Rm())); break;
- case FCMP_s_zero:
- case FCMP_d_zero: FPCompare(fn_val, 0.0); break;
- default: UNIMPLEMENTED();
- }
-}
-
-
-void Simulator::VisitFPConditionalCompare(Instruction* instr) {
- AssertSupportedFPCR();
-
- switch (instr->Mask(FPConditionalCompareMask)) {
- case FCCMP_s:
- case FCCMP_d: {
- if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
- // If the condition passes, set the status flags to the result of
- // comparing the operands.
- unsigned reg_size = instr->FPType() == FP32 ? kSRegSize : kDRegSize;
- FPCompare(fpreg(reg_size, instr->Rn()), fpreg(reg_size, instr->Rm()));
- } else {
- // If the condition fails, set the status flags to the nzcv immediate.
- nzcv().SetFlags(instr->Nzcv());
- }
- break;
- }
- default: UNIMPLEMENTED();
- }
-}
-
-
-void Simulator::VisitFPConditionalSelect(Instruction* instr) {
- AssertSupportedFPCR();
-
- Instr selected;
- if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
- selected = instr->Rn();
- } else {
- selected = instr->Rm();
- }
-
- switch (instr->Mask(FPConditionalSelectMask)) {
- case FCSEL_s: set_sreg(instr->Rd(), sreg(selected)); break;
- case FCSEL_d: set_dreg(instr->Rd(), dreg(selected)); break;
- default: UNIMPLEMENTED();
- }
-}
-
-
-void Simulator::VisitFPDataProcessing1Source(Instruction* instr) {
- AssertSupportedFPCR();
-
- unsigned fd = instr->Rd();
- unsigned fn = instr->Rn();
-
- switch (instr->Mask(FPDataProcessing1SourceMask)) {
- case FMOV_s: set_sreg(fd, sreg(fn)); break;
- case FMOV_d: set_dreg(fd, dreg(fn)); break;
- case FABS_s: set_sreg(fd, std::fabs(sreg(fn))); break;
- case FABS_d: set_dreg(fd, std::fabs(dreg(fn))); break;
- case FNEG_s: set_sreg(fd, -sreg(fn)); break;
- case FNEG_d: set_dreg(fd, -dreg(fn)); break;
- case FSQRT_s: set_sreg(fd, std::sqrt(sreg(fn))); break;
- case FSQRT_d: set_dreg(fd, std::sqrt(dreg(fn))); break;
- case FRINTA_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieAway)); break;
- case FRINTA_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieAway)); break;
- case FRINTN_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieEven)); break;
- case FRINTN_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieEven)); break;
- case FRINTZ_s: set_sreg(fd, FPRoundInt(sreg(fn), FPZero)); break;
- case FRINTZ_d: set_dreg(fd, FPRoundInt(dreg(fn), FPZero)); break;
- case FCVT_ds: set_dreg(fd, FPToDouble(sreg(fn))); break;
- case FCVT_sd: set_sreg(fd, FPToFloat(dreg(fn), FPTieEven)); break;
- default: UNIMPLEMENTED();
- }
-}
-
-
-// Assemble the specified IEEE-754 components into the target type and apply
-// appropriate rounding.
-// sign: 0 = positive, 1 = negative
-// exponent: Unbiased IEEE-754 exponent.
-// mantissa: The mantissa of the input. The top bit (which is not encoded for
-// normal IEEE-754 values) must not be omitted. This bit has the
-// value 'pow(2, exponent)'.
-//
-// The input value is assumed to be a normalized value. That is, the input may
-// not be infinity or NaN. If the source value is subnormal, it must be
-// normalized before calling this function such that the highest set bit in the
-// mantissa has the value 'pow(2, exponent)'.
-//
-// Callers should use FPRoundToFloat or FPRoundToDouble directly, rather than
-// calling a templated FPRound.
-template <class T, int ebits, int mbits>
-static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
- FPRounding round_mode) {
- ASSERT((sign == 0) || (sign == 1));
-
- // Only the FPTieEven rounding mode is implemented.
- ASSERT(round_mode == FPTieEven);
- USE(round_mode);
-
- // Rounding can promote subnormals to normals, and normals to infinities. For
- // example, a double with exponent 127 (FLT_MAX_EXP) would appear to be
- // encodable as a float, but rounding based on the low-order mantissa bits
- // could make it overflow. With ties-to-even rounding, this value would become
- // an infinity.
-
- // ---- Rounding Method ----
- //
- // The exponent is irrelevant in the rounding operation, so we treat the
- // lowest-order bit that will fit into the result ('onebit') as having
- // the value '1'. Similarly, the highest-order bit that won't fit into
- // the result ('halfbit') has the value '0.5'. The 'point' sits between
- // 'onebit' and 'halfbit':
- //
- // These bits fit into the result.
- // |---------------------|
- // mantissa = 0bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
- // ||
- // / |
- // / halfbit
- // onebit
- //
- // For subnormal outputs, the range of representable bits is smaller and
- // the position of onebit and halfbit depends on the exponent of the
- // input, but the method is otherwise similar.
- //
- // onebit(frac)
- // |
- // | halfbit(frac) halfbit(adjusted)
- // | / /
- // | | |
- // 0b00.0 (exact) -> 0b00.0 (exact) -> 0b00
- // 0b00.0... -> 0b00.0... -> 0b00
- // 0b00.1 (exact) -> 0b00.0111..111 -> 0b00
- // 0b00.1... -> 0b00.1... -> 0b01
- // 0b01.0 (exact) -> 0b01.0 (exact) -> 0b01
- // 0b01.0... -> 0b01.0... -> 0b01
- // 0b01.1 (exact) -> 0b01.1 (exact) -> 0b10
- // 0b01.1... -> 0b01.1... -> 0b10
- // 0b10.0 (exact) -> 0b10.0 (exact) -> 0b10
- // 0b10.0... -> 0b10.0... -> 0b10
- // 0b10.1 (exact) -> 0b10.0111..111 -> 0b10
- // 0b10.1... -> 0b10.1... -> 0b11
- // 0b11.0 (exact) -> 0b11.0 (exact) -> 0b11
- // ... / | / |
- // / | / |
- // / |
- // adjusted = frac - (halfbit(mantissa) & ~onebit(frac)); / |
- //
- // mantissa = (mantissa >> shift) + halfbit(adjusted);
-
- static const int mantissa_offset = 0;
- static const int exponent_offset = mantissa_offset + mbits;
- static const int sign_offset = exponent_offset + ebits;
- STATIC_ASSERT(sign_offset == (sizeof(T) * kByteSize - 1));
-
- // Bail out early for zero inputs.
- if (mantissa == 0) {
- return sign << sign_offset;
- }
-
- // If all bits in the exponent are set, the value is infinite or NaN.
- // This is true for all binary IEEE-754 formats.
- static const int infinite_exponent = (1 << ebits) - 1;
- static const int max_normal_exponent = infinite_exponent - 1;
-
- // Apply the exponent bias to encode it for the result. Doing this early makes
- // it easy to detect values that will be infinite or subnormal.
- exponent += max_normal_exponent >> 1;
-
- if (exponent > max_normal_exponent) {
- // Overflow: The input is too large for the result type to represent. The
- // FPTieEven rounding mode handles overflows using infinities.
- exponent = infinite_exponent;
- mantissa = 0;
- return (sign << sign_offset) |
- (exponent << exponent_offset) |
- (mantissa << mantissa_offset);
- }
-
- // Calculate the shift required to move the top mantissa bit to the proper
- // place in the destination type.
- const int highest_significant_bit = 63 - CountLeadingZeros(mantissa, 64);
- int shift = highest_significant_bit - mbits;
-
- if (exponent <= 0) {
- // The output will be subnormal (before rounding).
-
- // For subnormal outputs, the shift must be adjusted by the exponent. The +1
- // is necessary because the exponent of a subnormal value (encoded as 0) is
- // the same as the exponent of the smallest normal value (encoded as 1).
- shift += -exponent + 1;
-
- // Handle inputs that would produce a zero output.
- //
- // Shifts higher than highest_significant_bit+1 will always produce a zero
- // result. A shift of exactly highest_significant_bit+1 might produce a
- // non-zero result after rounding.
- if (shift > (highest_significant_bit + 1)) {
- // The result will always be +/-0.0.
- return sign << sign_offset;
- }
-
- // Properly encode the exponent for a subnormal output.
- exponent = 0;
- } else {
- // Clear the topmost mantissa bit, since this is not encoded in IEEE-754
- // normal values.
- mantissa &= ~(1UL << highest_significant_bit);
- }
-
- if (shift > 0) {
- // We have to shift the mantissa to the right. Some precision is lost, so we
- // need to apply rounding.
- uint64_t onebit_mantissa = (mantissa >> (shift)) & 1;
- uint64_t halfbit_mantissa = (mantissa >> (shift-1)) & 1;
- uint64_t adjusted = mantissa - (halfbit_mantissa & ~onebit_mantissa);
- T halfbit_adjusted = (adjusted >> (shift-1)) & 1;
-
- T result = (sign << sign_offset) |
- (exponent << exponent_offset) |
- ((mantissa >> shift) << mantissa_offset);
-
- // A very large mantissa can overflow during rounding. If this happens, the
- // exponent should be incremented and the mantissa set to 1.0 (encoded as
- // 0). Applying halfbit_adjusted after assembling the float has the nice
- // side-effect that this case is handled for free.
- //
- // This also handles cases where a very large finite value overflows to
- // infinity, or where a very large subnormal value overflows to become
- // normal.
- return result + halfbit_adjusted;
- } else {
- // We have to shift the mantissa to the left (or not at all). The input
- // mantissa is exactly representable in the output mantissa, so apply no
- // rounding correction.
- return (sign << sign_offset) |
- (exponent << exponent_offset) |
- ((mantissa << -shift) << mantissa_offset);
- }
-}
-
-
-// See FPRound for a description of this function.
-static inline double FPRoundToDouble(int64_t sign, int64_t exponent,
- uint64_t mantissa, FPRounding round_mode) {
- int64_t bits =
- FPRound<int64_t, kDoubleExponentBits, kDoubleMantissaBits>(sign,
- exponent,
- mantissa,
- round_mode);
- return rawbits_to_double(bits);
-}
-
-
-// See FPRound for a description of this function.
-static inline float FPRoundToFloat(int64_t sign, int64_t exponent,
- uint64_t mantissa, FPRounding round_mode) {
- int32_t bits =
- FPRound<int32_t, kFloatExponentBits, kFloatMantissaBits>(sign,
- exponent,
- mantissa,
- round_mode);
- return rawbits_to_float(bits);
-}
-
-
-double Simulator::FixedToDouble(int64_t src, int fbits, FPRounding round) {
- if (src >= 0) {
- return UFixedToDouble(src, fbits, round);
- } else {
- // This works for all negative values, including INT64_MIN.
- return -UFixedToDouble(-src, fbits, round);
- }
-}
-
-
-double Simulator::UFixedToDouble(uint64_t src, int fbits, FPRounding round) {
- // An input of 0 is a special case because the result is effectively
- // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit.
- if (src == 0) {
- return 0.0;
- }
-
- // Calculate the exponent. The highest significant bit will have the value
- // 2^exponent.
- const int highest_significant_bit = 63 - CountLeadingZeros(src, 64);
- const int64_t exponent = highest_significant_bit - fbits;
-
- return FPRoundToDouble(0, exponent, src, round);
-}
-
-
-float Simulator::FixedToFloat(int64_t src, int fbits, FPRounding round) {
- if (src >= 0) {
- return UFixedToFloat(src, fbits, round);
- } else {
- // This works for all negative values, including INT64_MIN.
- return -UFixedToFloat(-src, fbits, round);
- }
-}
-
-
-float Simulator::UFixedToFloat(uint64_t src, int fbits, FPRounding round) {
- // An input of 0 is a special case because the result is effectively
- // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit.
- if (src == 0) {
- return 0.0f;
- }
-
- // Calculate the exponent. The highest significant bit will have the value
- // 2^exponent.
- const int highest_significant_bit = 63 - CountLeadingZeros(src, 64);
- const int32_t exponent = highest_significant_bit - fbits;
-
- return FPRoundToFloat(0, exponent, src, round);
-}
-
-
-double Simulator::FPRoundInt(double value, FPRounding round_mode) {
- if ((value == 0.0) || (value == kFP64PositiveInfinity) ||
- (value == kFP64NegativeInfinity) || std::isnan(value)) {
- return value;
- }
-
- double int_result = floor(value);
- double error = value - int_result;
- switch (round_mode) {
- case FPTieAway: {
- // If the error is greater than 0.5, or is equal to 0.5 and the integer
- // result is positive, round up.
- if ((error > 0.5) || ((error == 0.5) && (int_result >= 0.0))) {
- int_result++;
- }
- break;
- }
- case FPTieEven: {
- // If the error is greater than 0.5, or is equal to 0.5 and the integer
- // result is odd, round up.
- if ((error > 0.5) ||
- ((error == 0.5) && (fmod(int_result, 2) != 0))) {
- int_result++;
- }
- break;
- }
- case FPZero: {
- // If value > 0 then we take floor(value)
- // otherwise, ceil(value)
- if (value < 0) {
- int_result = ceil(value);
- }
- break;
- }
- case FPNegativeInfinity: {
- // We always use floor(value).
- break;
- }
- default: UNIMPLEMENTED();
- }
- return int_result;
-}
-
-
-double Simulator::FPToDouble(float value) {
- switch (std::fpclassify(value)) {
- case FP_NAN: {
- // Convert NaNs as the processor would, assuming that FPCR.DN (default
- // NaN) is not set:
- // - The sign is propagated.
- // - The payload (mantissa) is transferred entirely, except that the top
- // bit is forced to '1', making the result a quiet NaN. The unused
- // (low-order) payload bits are set to 0.
- uint32_t raw = float_to_rawbits(value);
-
- uint64_t sign = raw >> 31;
- uint64_t exponent = (1 << 11) - 1;
- uint64_t payload = unsigned_bitextract_64(21, 0, raw);
- payload <<= (52 - 23); // The unused low-order bits should be 0.
- payload |= (1L << 51); // Force a quiet NaN.
-
- return rawbits_to_double((sign << 63) | (exponent << 52) | payload);
- }
-
- case FP_ZERO:
- case FP_NORMAL:
- case FP_SUBNORMAL:
- case FP_INFINITE: {
- // All other inputs are preserved in a standard cast, because every value
- // representable using an IEEE-754 float is also representable using an
- // IEEE-754 double.
- return static_cast<double>(value);
- }
- }
-
- UNREACHABLE();
- return static_cast<double>(value);
-}
-
-
-float Simulator::FPToFloat(double value, FPRounding round_mode) {
- // Only the FPTieEven rounding mode is implemented.
- ASSERT(round_mode == FPTieEven);
- USE(round_mode);
-
- switch (std::fpclassify(value)) {
- case FP_NAN: {
- // Convert NaNs as the processor would, assuming that FPCR.DN (default
- // NaN) is not set:
- // - The sign is propagated.
- // - The payload (mantissa) is transferred as much as possible, except
- // that the top bit is forced to '1', making the result a quiet NaN.
- uint64_t raw = double_to_rawbits(value);
-
- uint32_t sign = raw >> 63;
- uint32_t exponent = (1 << 8) - 1;
- uint32_t payload = unsigned_bitextract_64(50, 52 - 23, raw);
- payload |= (1 << 22); // Force a quiet NaN.
-
- return rawbits_to_float((sign << 31) | (exponent << 23) | payload);
- }
-
- case FP_ZERO:
- case FP_INFINITE: {
- // In a C++ cast, any value representable in the target type will be
- // unchanged. This is always the case for +/-0.0 and infinities.
- return static_cast<float>(value);
- }
-
- case FP_NORMAL:
- case FP_SUBNORMAL: {
- // Convert double-to-float as the processor would, assuming that FPCR.FZ
- // (flush-to-zero) is not set.
- uint64_t raw = double_to_rawbits(value);
- // Extract the IEEE-754 double components.
- uint32_t sign = raw >> 63;
- // Extract the exponent and remove the IEEE-754 encoding bias.
- int32_t exponent = unsigned_bitextract_64(62, 52, raw) - 1023;
- // Extract the mantissa and add the implicit '1' bit.
- uint64_t mantissa = unsigned_bitextract_64(51, 0, raw);
- if (std::fpclassify(value) == FP_NORMAL) {
- mantissa |= (1UL << 52);
- }
- return FPRoundToFloat(sign, exponent, mantissa, round_mode);
- }
- }
-
- UNREACHABLE();
- return value;
-}
-
-
-void Simulator::VisitFPDataProcessing2Source(Instruction* instr) {
- AssertSupportedFPCR();
-
- unsigned fd = instr->Rd();
- unsigned fn = instr->Rn();
- unsigned fm = instr->Rm();
-
- switch (instr->Mask(FPDataProcessing2SourceMask)) {
- case FADD_s: set_sreg(fd, sreg(fn) + sreg(fm)); break;
- case FADD_d: set_dreg(fd, dreg(fn) + dreg(fm)); break;
- case FSUB_s: set_sreg(fd, sreg(fn) - sreg(fm)); break;
- case FSUB_d: set_dreg(fd, dreg(fn) - dreg(fm)); break;
- case FMUL_s: set_sreg(fd, sreg(fn) * sreg(fm)); break;
- case FMUL_d: set_dreg(fd, dreg(fn) * dreg(fm)); break;
- case FDIV_s: set_sreg(fd, sreg(fn) / sreg(fm)); break;
- case FDIV_d: set_dreg(fd, dreg(fn) / dreg(fm)); break;
- case FMAX_s: set_sreg(fd, FPMax(sreg(fn), sreg(fm))); break;
- case FMAX_d: set_dreg(fd, FPMax(dreg(fn), dreg(fm))); break;
- case FMIN_s: set_sreg(fd, FPMin(sreg(fn), sreg(fm))); break;
- case FMIN_d: set_dreg(fd, FPMin(dreg(fn), dreg(fm))); break;
- case FMAXNM_s: set_sreg(fd, FPMaxNM(sreg(fn), sreg(fm))); break;
- case FMAXNM_d: set_dreg(fd, FPMaxNM(dreg(fn), dreg(fm))); break;
- case FMINNM_s: set_sreg(fd, FPMinNM(sreg(fn), sreg(fm))); break;
- case FMINNM_d: set_dreg(fd, FPMinNM(dreg(fn), dreg(fm))); break;
- default: UNIMPLEMENTED();
- }
-}
-
-
-void Simulator::VisitFPDataProcessing3Source(Instruction* instr) {
- AssertSupportedFPCR();
-
- unsigned fd = instr->Rd();
- unsigned fn = instr->Rn();
- unsigned fm = instr->Rm();
- unsigned fa = instr->Ra();
-
- // The C99 (and C++11) fma function performs a fused multiply-accumulate.
- switch (instr->Mask(FPDataProcessing3SourceMask)) {
- // fd = fa +/- (fn * fm)
- case FMADD_s: set_sreg(fd, fmaf(sreg(fn), sreg(fm), sreg(fa))); break;
- case FMSUB_s: set_sreg(fd, fmaf(-sreg(fn), sreg(fm), sreg(fa))); break;
- case FMADD_d: set_dreg(fd, fma(dreg(fn), dreg(fm), dreg(fa))); break;
- case FMSUB_d: set_dreg(fd, fma(-dreg(fn), dreg(fm), dreg(fa))); break;
- // Variants of the above where the result is negated.
- case FNMADD_s: set_sreg(fd, -fmaf(sreg(fn), sreg(fm), sreg(fa))); break;
- case FNMSUB_s: set_sreg(fd, -fmaf(-sreg(fn), sreg(fm), sreg(fa))); break;
- case FNMADD_d: set_dreg(fd, -fma(dreg(fn), dreg(fm), dreg(fa))); break;
- case FNMSUB_d: set_dreg(fd, -fma(-dreg(fn), dreg(fm), dreg(fa))); break;
- default: UNIMPLEMENTED();
- }
-}
-
-
-template <typename T>
-T Simulator::FPMax(T a, T b) {
- if (IsSignallingNaN(a)) {
- return a;
- } else if (IsSignallingNaN(b)) {
- return b;
- } else if (std::isnan(a)) {
- ASSERT(IsQuietNaN(a));
- return a;
- } else if (std::isnan(b)) {
- ASSERT(IsQuietNaN(b));
- return b;
- }
-
- if ((a == 0.0) && (b == 0.0) &&
- (copysign(1.0, a) != copysign(1.0, b))) {
- // a and b are zero, and the sign differs: return +0.0.
- return 0.0;
- } else {
- return (a > b) ? a : b;
- }
-}
-
-
-template <typename T>
-T Simulator::FPMaxNM(T a, T b) {
- if (IsQuietNaN(a) && !IsQuietNaN(b)) {
- a = kFP64NegativeInfinity;
- } else if (!IsQuietNaN(a) && IsQuietNaN(b)) {
- b = kFP64NegativeInfinity;
- }
- return FPMax(a, b);
-}
-
-template <typename T>
-T Simulator::FPMin(T a, T b) {
- if (IsSignallingNaN(a)) {
- return a;
- } else if (IsSignallingNaN(b)) {
- return b;
- } else if (std::isnan(a)) {
- ASSERT(IsQuietNaN(a));
- return a;
- } else if (std::isnan(b)) {
- ASSERT(IsQuietNaN(b));
- return b;
- }
-
- if ((a == 0.0) && (b == 0.0) &&
- (copysign(1.0, a) != copysign(1.0, b))) {
- // a and b are zero, and the sign differs: return -0.0.
- return -0.0;
- } else {
- return (a < b) ? a : b;
- }
-}
-
-
-template <typename T>
-T Simulator::FPMinNM(T a, T b) {
- if (IsQuietNaN(a) && !IsQuietNaN(b)) {
- a = kFP64PositiveInfinity;
- } else if (!IsQuietNaN(a) && IsQuietNaN(b)) {
- b = kFP64PositiveInfinity;
- }
- return FPMin(a, b);
-}
-
-
-void Simulator::VisitSystem(Instruction* instr) {
- // Some system instructions hijack their Op and Cp fields to represent a
- // range of immediates instead of indicating a different instruction. This
- // makes the decoding tricky.
- if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
- switch (instr->Mask(SystemSysRegMask)) {
- case MRS: {
- switch (instr->ImmSystemRegister()) {
- case NZCV: set_xreg(instr->Rt(), nzcv().RawValue()); break;
- case FPCR: set_xreg(instr->Rt(), fpcr().RawValue()); break;
- default: UNIMPLEMENTED();
- }
- break;
- }
- case MSR: {
- switch (instr->ImmSystemRegister()) {
- case NZCV: nzcv().SetRawValue(xreg(instr->Rt())); break;
- case FPCR: fpcr().SetRawValue(xreg(instr->Rt())); break;
- default: UNIMPLEMENTED();
- }
- break;
- }
- }
- } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
- ASSERT(instr->Mask(SystemHintMask) == HINT);
- switch (instr->ImmHint()) {
- case NOP: break;
- default: UNIMPLEMENTED();
- }
- } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
- __sync_synchronize();
- } else {
- UNIMPLEMENTED();
- }
-}
-
-
-bool Simulator::GetValue(const char* desc, int64_t* value) {
- int regnum = CodeFromName(desc);
- if (regnum >= 0) {
- unsigned code = regnum;
- if (code == kZeroRegCode) {
- // Catch the zero register and return 0.
- *value = 0;
- return true;
- } else if (code == kSPRegInternalCode) {
- // Translate the stack pointer code to 31, for Reg31IsStackPointer.
- code = 31;
- }
- if (desc[0] == 'w') {
- *value = wreg(code, Reg31IsStackPointer);
- } else {
- *value = xreg(code, Reg31IsStackPointer);
- }
- return true;
- } else if (strncmp(desc, "0x", 2) == 0) {
- return SScanF(desc + 2, "%" SCNx64,
- reinterpret_cast<uint64_t*>(value)) == 1;
- } else {
- return SScanF(desc, "%" SCNu64,
- reinterpret_cast<uint64_t*>(value)) == 1;
- }
-}
-
-
-bool Simulator::PrintValue(const char* desc) {
- // Define some colour codes to use for the register dump.
- // TODO(jbramley): Find a more elegant way of defining these.
- char const * const clr_normal = FLAG_log_colour ? "\033[m" : "";
- char const * const clr_reg_name = FLAG_log_colour ? "\033[1;34m" : "";
- char const * const clr_reg_value = FLAG_log_colour ? "\033[1;36m" : "";
- char const * const clr_fpreg_name = FLAG_log_colour ? "\033[1;33m" : "";
- char const * const clr_fpreg_value = FLAG_log_colour ? "\033[1;35m" : "";
-
- if (strcmp(desc, "csp") == 0) {
- ASSERT(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
- PrintF("%s csp:%s 0x%016" PRIx64 "%s\n",
- clr_reg_name, clr_reg_value, xreg(31, Reg31IsStackPointer), clr_normal);
- return true;
- } else if (strcmp(desc, "wcsp") == 0) {
- ASSERT(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
- PrintF("%s wcsp:%s 0x%08" PRIx32 "%s\n",
- clr_reg_name, clr_reg_value, wreg(31, Reg31IsStackPointer), clr_normal);
- return true;
- }
-
- int i = CodeFromName(desc);
- STATIC_ASSERT(kNumberOfRegisters == kNumberOfFPRegisters);
- if (i < 0 || static_cast<unsigned>(i) >= kNumberOfFPRegisters) return false;
-
- if (desc[0] == 'v') {
- PrintF("%s %s:%s 0x%016" PRIx64 "%s (%s%s:%s %g%s %s:%s %g%s)\n",
- clr_fpreg_name, VRegNameForCode(i),
- clr_fpreg_value, double_to_rawbits(dreg(i)),
- clr_normal,
- clr_fpreg_name, DRegNameForCode(i),
- clr_fpreg_value, dreg(i),
- clr_fpreg_name, SRegNameForCode(i),
- clr_fpreg_value, sreg(i),
- clr_normal);
- return true;
- } else if (desc[0] == 'd') {
- PrintF("%s %s:%s %g%s\n",
- clr_fpreg_name, DRegNameForCode(i),
- clr_fpreg_value, dreg(i),
- clr_normal);
- return true;
- } else if (desc[0] == 's') {
- PrintF("%s %s:%s %g%s\n",
- clr_fpreg_name, SRegNameForCode(i),
- clr_fpreg_value, sreg(i),
- clr_normal);
- return true;
- } else if (desc[0] == 'w') {
- PrintF("%s %s:%s 0x%08" PRIx32 "%s\n",
- clr_reg_name, WRegNameForCode(i), clr_reg_value, wreg(i), clr_normal);
- return true;
- } else {
- // X register names have a wide variety of starting characters, but anything
- // else will be an X register.
- PrintF("%s %s:%s 0x%016" PRIx64 "%s\n",
- clr_reg_name, XRegNameForCode(i), clr_reg_value, xreg(i), clr_normal);
- return true;
- }
-}
-
-
-void Simulator::Debug() {
-#define COMMAND_SIZE 63
-#define ARG_SIZE 255
-
-#define STR(a) #a
-#define XSTR(a) STR(a)
-
- char cmd[COMMAND_SIZE + 1];
- char arg1[ARG_SIZE + 1];
- char arg2[ARG_SIZE + 1];
- char* argv[3] = { cmd, arg1, arg2 };
-
- // Make sure to have a proper terminating character if reaching the limit.
- cmd[COMMAND_SIZE] = 0;
- arg1[ARG_SIZE] = 0;
- arg2[ARG_SIZE] = 0;
-
- bool done = false;
- bool cleared_log_disasm_bit = false;
-
- while (!done) {
- // Disassemble the next instruction to execute before doing anything else.
- PrintInstructionsAt(pc_, 1);
- // Read the command line.
- char* line = ReadLine("sim> ");
- if (line == NULL) {
- break;
- } else {
- // Repeat last command by default.
- char* last_input = last_debugger_input();
- if (strcmp(line, "\n") == 0 && (last_input != NULL)) {
- DeleteArray(line);
- line = last_input;
- } else {
- // Update the latest command ran
- set_last_debugger_input(line);
- }
-
- // Use sscanf to parse the individual parts of the command line. At the
- // moment no command expects more than two parameters.
- int argc = SScanF(line,
- "%" XSTR(COMMAND_SIZE) "s "
- "%" XSTR(ARG_SIZE) "s "
- "%" XSTR(ARG_SIZE) "s",
- cmd, arg1, arg2);
-
- // stepi / si ------------------------------------------------------------
- if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
- // We are about to execute instructions, after which by default we
- // should increment the pc_. If it was set when reaching this debug
- // instruction, it has not been cleared because this instruction has not
- // completed yet. So clear it manually.
- pc_modified_ = false;
-
- if (argc == 1) {
- ExecuteInstruction();
- } else {
- int64_t number_of_instructions_to_execute = 1;
- GetValue(arg1, &number_of_instructions_to_execute);
-
- set_log_parameters(log_parameters() | LOG_DISASM);
- while (number_of_instructions_to_execute-- > 0) {
- ExecuteInstruction();
- }
- set_log_parameters(log_parameters() & ~LOG_DISASM);
- PrintF("\n");
- }
-
- // If it was necessary, the pc has already been updated or incremented
- // when executing the instruction. So we do not want it to be updated
- // again. It will be cleared when exiting.
- pc_modified_ = true;
-
- // next / n --------------------------------------------------------------
- } else if ((strcmp(cmd, "next") == 0) || (strcmp(cmd, "n") == 0)) {
- // Tell the simulator to break after the next executed BL.
- break_on_next_ = true;
- // Continue.
- done = true;
-
- // continue / cont / c ---------------------------------------------------
- } else if ((strcmp(cmd, "continue") == 0) ||
- (strcmp(cmd, "cont") == 0) ||
- (strcmp(cmd, "c") == 0)) {
- // Leave the debugger shell.
- done = true;
-
- // disassemble / disasm / di ---------------------------------------------
- } else if (strcmp(cmd, "disassemble") == 0 ||
- strcmp(cmd, "disasm") == 0 ||
- strcmp(cmd, "di") == 0) {
- int64_t n_of_instrs_to_disasm = 10; // default value.
- int64_t address = reinterpret_cast<int64_t>(pc_); // default value.
- if (argc >= 2) { // disasm <n of instrs>
- GetValue(arg1, &n_of_instrs_to_disasm);
- }
- if (argc >= 3) { // disasm <n of instrs> <address>
- GetValue(arg2, &address);
- }
-
- // Disassemble.
- PrintInstructionsAt(reinterpret_cast<Instruction*>(address),
- n_of_instrs_to_disasm);
- PrintF("\n");
-
- // print / p -------------------------------------------------------------
- } else if ((strcmp(cmd, "print") == 0) || (strcmp(cmd, "p") == 0)) {
- if (argc == 2) {
- if (strcmp(arg1, "all") == 0) {
- // TODO(all): better support for printing in the debugger.
- PrintRegisters(true);
- PrintFPRegisters(true);
- } else {
- if (!PrintValue(arg1)) {
- PrintF("%s unrecognized\n", arg1);
- }
- }
- } else {
- PrintF(
- "print <register>\n"
- " Print the content of a register. (alias 'p')\n"
- " 'print all' will print all registers.\n"
- " Use 'printobject' to get more details about the value.\n");
- }
-
- // printobject / po ------------------------------------------------------
- } else if ((strcmp(cmd, "printobject") == 0) ||
- (strcmp(cmd, "po") == 0)) {
- if (argc == 2) {
- int64_t value;
- if (GetValue(arg1, &value)) {
- Object* obj = reinterpret_cast<Object*>(value);
- PrintF("%s: \n", arg1);
-#ifdef DEBUG
- obj->PrintLn();
-#else
- obj->ShortPrint();
- PrintF("\n");
-#endif
- } else {
- PrintF("%s unrecognized\n", arg1);
- }
- } else {
- PrintF("printobject <value>\n"
- "printobject <register>\n"
- " Print details about the value. (alias 'po')\n");
- }
-
- // stack / mem ----------------------------------------------------------
- } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
- int64_t* cur = NULL;
- int64_t* end = NULL;
- int next_arg = 1;
-
- if (strcmp(cmd, "stack") == 0) {
- cur = reinterpret_cast<int64_t*>(jssp());
-
- } else { // "mem"
- int64_t value;
- if (!GetValue(arg1, &value)) {
- PrintF("%s unrecognized\n", arg1);
- continue;
- }
- cur = reinterpret_cast<int64_t*>(value);
- next_arg++;
- }
-
- int64_t words = 0;
- if (argc == next_arg) {
- words = 10;
- } else if (argc == next_arg + 1) {
- if (!GetValue(argv[next_arg], &words)) {
- PrintF("%s unrecognized\n", argv[next_arg]);
- PrintF("Printing 10 double words by default");
- words = 10;
- }
- } else {
- UNREACHABLE();
- }
- end = cur + words;
-
- while (cur < end) {
- PrintF(" 0x%016" PRIx64 ": 0x%016" PRIx64 " %10" PRId64,
- reinterpret_cast<uint64_t>(cur), *cur, *cur);
- HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
- int64_t value = *cur;
- Heap* current_heap = v8::internal::Isolate::Current()->heap();
- if (((value & 1) == 0) || current_heap->Contains(obj)) {
- PrintF(" (");
- if ((value & kSmiTagMask) == 0) {
- STATIC_ASSERT(kSmiValueSize == 32);
- int32_t untagged = (value >> kSmiShift) & 0xffffffff;
- PrintF("smi %" PRId32, untagged);
- } else {
- obj->ShortPrint();
- }
- PrintF(")");
- }
- PrintF("\n");
- cur++;
- }
-
- // trace / t -------------------------------------------------------------
- } else if (strcmp(cmd, "trace") == 0 || strcmp(cmd, "t") == 0) {
- if ((log_parameters() & (LOG_DISASM | LOG_REGS)) !=
- (LOG_DISASM | LOG_REGS)) {
- PrintF("Enabling disassembly and registers tracing\n");
- set_log_parameters(log_parameters() | LOG_DISASM | LOG_REGS);
- } else {
- PrintF("Disabling disassembly and registers tracing\n");
- set_log_parameters(log_parameters() & ~(LOG_DISASM | LOG_REGS));
- }
-
- // break / b -------------------------------------------------------------
- } else if (strcmp(cmd, "break") == 0 || strcmp(cmd, "b") == 0) {
- if (argc == 2) {
- int64_t value;
- if (GetValue(arg1, &value)) {
- SetBreakpoint(reinterpret_cast<Instruction*>(value));
- } else {
- PrintF("%s unrecognized\n", arg1);
- }
- } else {
- ListBreakpoints();
- PrintF("Use `break <address>` to set or disable a breakpoint\n");
- }
-
- // gdb -------------------------------------------------------------------
- } else if (strcmp(cmd, "gdb") == 0) {
- PrintF("Relinquishing control to gdb.\n");
- OS::DebugBreak();
- PrintF("Regaining control from gdb.\n");
-
- // sysregs ---------------------------------------------------------------
- } else if (strcmp(cmd, "sysregs") == 0) {
- PrintSystemRegisters();
-
- // help / h --------------------------------------------------------------
- } else if (strcmp(cmd, "help") == 0 || strcmp(cmd, "h") == 0) {
- PrintF(
- "stepi / si\n"
- " stepi <n>\n"
- " Step <n> instructions.\n"
- "next / n\n"
- " Continue execution until a BL instruction is reached.\n"
- " At this point a breakpoint is set just after this BL.\n"
- " Then execution is resumed. It will probably later hit the\n"
- " breakpoint just set.\n"
- "continue / cont / c\n"
- " Continue execution from here.\n"
- "disassemble / disasm / di\n"
- " disassemble <n> <address>\n"
- " Disassemble <n> instructions from current <address>.\n"
- " By default <n> is 20 and <address> is the current pc.\n"
- "print / p\n"
- " print <register>\n"
- " Print the content of a register.\n"
- " 'print all' will print all registers.\n"
- " Use 'printobject' to get more details about the value.\n"
- "printobject / po\n"
- " printobject <value>\n"
- " printobject <register>\n"
- " Print details about the value.\n"
- "stack\n"
- " stack [<words>]\n"
- " Dump stack content, default dump 10 words\n"
- "mem\n"
- " mem <address> [<words>]\n"
- " Dump memory content, default dump 10 words\n"
- "trace / t\n"
- " Toggle disassembly and register tracing\n"
- "break / b\n"
- " break : list all breakpoints\n"
- " break <address> : set / enable / disable a breakpoint.\n"
- "gdb\n"
- " Enter gdb.\n"
- "sysregs\n"
- " Print all system registers (including NZCV).\n");
- } else {
- PrintF("Unknown command: %s\n", cmd);
- PrintF("Use 'help' for more information.\n");
- }
- }
- if (cleared_log_disasm_bit == true) {
- set_log_parameters(log_parameters_ | LOG_DISASM);
- }
- }
-}
-
-
-// Calls into the V8 runtime are based on this very simple interface.
-// Note: To be able to return two values from some calls the code in runtime.cc
-// uses the ObjectPair structure.
-// The simulator assumes all runtime calls return two 64-bits values. If they
-// don't, register x1 is clobbered. This is fine because x1 is caller-saved.
-struct ObjectPair {
- int64_t res0;
- int64_t res1;
-};
-
-
-typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0,
- int64_t arg1,
- int64_t arg2,
- int64_t arg3,
- int64_t arg4,
- int64_t arg5,
- int64_t arg6,
- int64_t arg7);
-
-typedef int64_t (*SimulatorRuntimeCompareCall)(double arg1, double arg2);
-typedef double (*SimulatorRuntimeFPFPCall)(double arg1, double arg2);
-typedef double (*SimulatorRuntimeFPCall)(double arg1);
-typedef double (*SimulatorRuntimeFPIntCall)(double arg1, int32_t arg2);
-
-// This signature supports direct call in to API function native callback
-// (refer to InvocationCallback in v8.h).
-typedef void (*SimulatorRuntimeDirectApiCall)(int64_t arg0);
-typedef void (*SimulatorRuntimeProfilingApiCall)(int64_t arg0, void* arg1);
-
-// This signature supports direct call to accessor getter callback.
-typedef void (*SimulatorRuntimeDirectGetterCall)(int64_t arg0, int64_t arg1);
-typedef void (*SimulatorRuntimeProfilingGetterCall)(int64_t arg0, int64_t arg1,
- void* arg2);
-
-void Simulator::VisitException(Instruction* instr) {
- // Define some colour codes to use for log messages.
- // TODO(jbramley): Find a more elegant way of defining these.
- char const* const clr_normal = (FLAG_log_colour) ? ("\033[m")
- : ("");
- char const* const clr_debug_number = (FLAG_log_colour) ? ("\033[1;33m")
- : ("");
- char const* const clr_debug_message = (FLAG_log_colour) ? ("\033[0;33m")
- : ("");
- char const* const clr_printf = (FLAG_log_colour) ? ("\033[0;32m")
- : ("");
-
- switch (instr->Mask(ExceptionMask)) {
- case HLT: {
- if (instr->ImmException() == kImmExceptionIsDebug) {
- // Read the arguments encoded inline in the instruction stream.
- uint32_t code;
- uint32_t parameters;
- char const * message;
-
- ASSERT(sizeof(*pc_) == 1);
- memcpy(&code, pc_ + kDebugCodeOffset, sizeof(code));
- memcpy(&parameters, pc_ + kDebugParamsOffset, sizeof(parameters));
- message = reinterpret_cast<char const *>(pc_ + kDebugMessageOffset);
-
- // Always print something when we hit a debug point that breaks.
- // We are going to break, so printing something is not an issue in
- // terms of speed.
- if (FLAG_trace_sim_messages || FLAG_trace_sim || (parameters & BREAK)) {
- if (message != NULL) {
- PrintF("%sDebugger hit %d: %s%s%s\n",
- clr_debug_number,
- code,
- clr_debug_message,
- message,
- clr_normal);
- } else {
- PrintF("%sDebugger hit %d.%s\n",
- clr_debug_number,
- code,
- clr_normal);
- }
- }
-
- // Other options.
- switch (parameters & kDebuggerTracingDirectivesMask) {
- case TRACE_ENABLE:
- set_log_parameters(log_parameters() | parameters);
- if (parameters & LOG_SYS_REGS) { PrintSystemRegisters(); }
- if (parameters & LOG_REGS) { PrintRegisters(); }
- if (parameters & LOG_FP_REGS) { PrintFPRegisters(); }
- break;
- case TRACE_DISABLE:
- set_log_parameters(log_parameters() & ~parameters);
- break;
- case TRACE_OVERRIDE:
- set_log_parameters(parameters);
- break;
- default:
- // We don't support a one-shot LOG_DISASM.
- ASSERT((parameters & LOG_DISASM) == 0);
- // Don't print information that is already being traced.
- parameters &= ~log_parameters();
- // Print the requested information.
- if (parameters & LOG_SYS_REGS) PrintSystemRegisters(true);
- if (parameters & LOG_REGS) PrintRegisters(true);
- if (parameters & LOG_FP_REGS) PrintFPRegisters(true);
- }
-
- // The stop parameters are inlined in the code. Skip them:
- // - Skip to the end of the message string.
- pc_ += kDebugMessageOffset + strlen(message) + 1;
- // - Advance to the next aligned location.
- pc_ = AlignUp(pc_, kInstructionSize);
- // - Verify that the unreachable marker is present.
- ASSERT(pc_->Mask(ExceptionMask) == HLT);
- ASSERT(pc_->ImmException() == kImmExceptionIsUnreachable);
- // - Skip past the unreachable marker.
- set_pc(pc_->NextInstruction());
-
- // Check if the debugger should break.
- if (parameters & BREAK) Debug();
-
- } else if (instr->ImmException() == kImmExceptionIsRedirectedCall) {
- // TODO(all): Extract the call redirection code into a separate
- // function.
-
- Redirection* redirection = Redirection::FromHltInstruction(instr);
-
- // The called C code might itself call simulated code, so any
- // caller-saved registers (including lr) could still be clobbered by a
- // redirected call.
- Instruction* return_address = lr();
-
- // TODO(jbramley): Make external_function() a template so that we don't
- // have to explicitly cast the result for each redirection type.
- int64_t external =
- reinterpret_cast<int64_t>(redirection->external_function());
-
- TraceSim("Call to host function at %p\n",
- reinterpret_cast<void*>(redirection->external_function()));
-
- // SP must be 16 bytes aligned at the call interface.
- bool stack_alignment_exception = ((sp() & 0xf) != 0);
- if (stack_alignment_exception) {
- TraceSim(" with unaligned stack 0x%016" PRIx64 ".\n", sp());
- FATAL("ALIGNMENT EXCEPTION");
- }
-
- switch (redirection->type()) {
- default:
- TraceSim("Type: Unknown.\n");
- UNREACHABLE();
- break;
-
- case ExternalReference::BUILTIN_CALL: {
- // MaybeObject* f(v8::internal::Arguments).
- TraceSim("Type: BUILTIN_CALL\n");
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
-
- // We don't know how many arguments are being passed, but we can
- // pass 8 without touching the stack. They will be ignored by the
- // host function if they aren't used.
- TraceSim("Arguments: "
- "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
- "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
- "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
- "0x%016" PRIx64 ", 0x%016" PRIx64,
- xreg(0), xreg(1), xreg(2), xreg(3),
- xreg(4), xreg(5), xreg(6), xreg(7));
- ObjectPair result = target(xreg(0), xreg(1), xreg(2), xreg(3),
- xreg(4), xreg(5), xreg(6), xreg(7));
- TraceSim("Returned: {0x%" PRIx64 ", 0x%" PRIx64"}\n",
- result.res0, result.res1);
-#ifdef DEBUG
- CorruptAllCallerSavedCPURegisters();
-#endif
- set_xreg(0, result.res0);
- set_xreg(1, result.res1);
- break;
- }
-
- case ExternalReference::DIRECT_API_CALL: {
- // void f(v8::FunctionCallbackInfo&)
- TraceSim("Type: DIRECT_API_CALL\n");
- SimulatorRuntimeDirectApiCall target =
- reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
- TraceSim("Arguments: 0x%016" PRIx64 "\n", xreg(0));
- target(xreg(0));
- TraceSim("No return value.");
-#ifdef DEBUG
- CorruptAllCallerSavedCPURegisters();
-#endif
- break;
- }
-
- case ExternalReference::BUILTIN_COMPARE_CALL: {
- // int f(double, double)
- TraceSim("Type: BUILTIN_COMPARE_CALL\n");
- SimulatorRuntimeCompareCall target =
- reinterpret_cast<SimulatorRuntimeCompareCall>(external);
- TraceSim("Arguments: %f, %f\n", dreg(0), dreg(1));
- int64_t result = target(dreg(0), dreg(1));
- TraceSim("Returned: %" PRId64 "\n", result);
-#ifdef DEBUG
- CorruptAllCallerSavedCPURegisters();
-#endif
- set_xreg(0, result);
- break;
- }
-
- case ExternalReference::BUILTIN_FP_CALL: {
- // double f(double)
- TraceSim("Type: BUILTIN_FP_CALL\n");
- SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
- TraceSim("Argument: %f\n", dreg(0));
- double result = target(dreg(0));
- TraceSim("Returned: %f\n", result);
-#ifdef DEBUG
- CorruptAllCallerSavedCPURegisters();
-#endif
- set_dreg(0, result);
- break;
- }
-
- case ExternalReference::BUILTIN_FP_FP_CALL: {
- // double f(double, double)
- TraceSim("Type: BUILTIN_FP_FP_CALL\n");
- SimulatorRuntimeFPFPCall target =
- reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
- TraceSim("Arguments: %f, %f\n", dreg(0), dreg(1));
- double result = target(dreg(0), dreg(1));
- TraceSim("Returned: %f\n", result);
-#ifdef DEBUG
- CorruptAllCallerSavedCPURegisters();
-#endif
- set_dreg(0, result);
- break;
- }
-
- case ExternalReference::BUILTIN_FP_INT_CALL: {
- // double f(double, int)
- TraceSim("Type: BUILTIN_FP_INT_CALL\n");
- SimulatorRuntimeFPIntCall target =
- reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
- TraceSim("Arguments: %f, %d\n", dreg(0), wreg(0));
- double result = target(dreg(0), wreg(0));
- TraceSim("Returned: %f\n", result);
-#ifdef DEBUG
- CorruptAllCallerSavedCPURegisters();
-#endif
- set_dreg(0, result);
- break;
- }
-
- case ExternalReference::DIRECT_GETTER_CALL: {
- // void f(Local<String> property, PropertyCallbackInfo& info)
- TraceSim("Type: DIRECT_GETTER_CALL\n");
- SimulatorRuntimeDirectGetterCall target =
- reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
- TraceSim("Arguments: 0x%016" PRIx64 ", 0x%016" PRIx64 "\n",
- xreg(0), xreg(1));
- target(xreg(0), xreg(1));
- TraceSim("No return value.");
-#ifdef DEBUG
- CorruptAllCallerSavedCPURegisters();
-#endif
- break;
- }
-
- case ExternalReference::PROFILING_API_CALL: {
- // void f(v8::FunctionCallbackInfo&, v8::FunctionCallback)
- TraceSim("Type: PROFILING_API_CALL\n");
- SimulatorRuntimeProfilingApiCall target =
- reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
- void* arg1 = Redirection::ReverseRedirection(xreg(1));
- TraceSim("Arguments: 0x%016" PRIx64 ", %p\n", xreg(0), arg1);
- target(xreg(0), arg1);
- TraceSim("No return value.");
-#ifdef DEBUG
- CorruptAllCallerSavedCPURegisters();
-#endif
- break;
- }
-
- case ExternalReference::PROFILING_GETTER_CALL: {
- // void f(Local<String> property, PropertyCallbackInfo& info,
- // AccessorGetterCallback callback)
- TraceSim("Type: PROFILING_GETTER_CALL\n");
- SimulatorRuntimeProfilingGetterCall target =
- reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(
- external);
- void* arg2 = Redirection::ReverseRedirection(xreg(2));
- TraceSim("Arguments: 0x%016" PRIx64 ", 0x%016" PRIx64 ", %p\n",
- xreg(0), xreg(1), arg2);
- target(xreg(0), xreg(1), arg2);
- TraceSim("No return value.");
-#ifdef DEBUG
- CorruptAllCallerSavedCPURegisters();
-#endif
- break;
- }
- }
-
- set_lr(return_address);
- set_pc(return_address);
- } else if (instr->ImmException() == kImmExceptionIsPrintf) {
- // Read the argument encoded inline in the instruction stream.
- uint32_t type;
- ASSERT(sizeof(*pc_) == 1);
- memcpy(&type, pc_ + kPrintfTypeOffset, sizeof(type));
-
- const char* format = reg<const char*>(0);
-
- // Pass all of the relevant PCS registers onto printf. It doesn't
- // matter if we pass too many as the extra ones won't be read.
- int result;
- fputs(clr_printf, stream_);
- if (type == CPURegister::kRegister) {
- result = fprintf(stream_, format,
- xreg(1), xreg(2), xreg(3), xreg(4),
- xreg(5), xreg(6), xreg(7));
- } else if (type == CPURegister::kFPRegister) {
- result = fprintf(stream_, format,
- dreg(0), dreg(1), dreg(2), dreg(3),
- dreg(4), dreg(5), dreg(6), dreg(7));
- } else {
- ASSERT(type == CPURegister::kNoRegister);
- result = fprintf(stream_, "%s", format);
- }
- fputs(clr_normal, stream_);
- set_xreg(0, result);
-
- // TODO(jbramley): Consider clobbering all caller-saved registers here.
-
- // The printf parameters are inlined in the code, so skip them.
- set_pc(pc_->InstructionAtOffset(kPrintfLength));
-
- // Set LR as if we'd just called a native printf function.
- set_lr(pc());
-
- } else if (instr->ImmException() == kImmExceptionIsUnreachable) {
- fprintf(stream_, "Hit UNREACHABLE marker at PC=%p.\n",
- reinterpret_cast<void*>(pc_));
- abort();
-
- } else {
- OS::DebugBreak();
- }
- break;
- }
-
- default:
- UNIMPLEMENTED();
- }
-}
-
-#endif // USE_SIMULATOR
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/simulator-a64.h b/deps/v8/src/a64/simulator-a64.h
deleted file mode 100644
index 535f287096..0000000000
--- a/deps/v8/src/a64/simulator-a64.h
+++ /dev/null
@@ -1,868 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_A64_SIMULATOR_A64_H_
-#define V8_A64_SIMULATOR_A64_H_
-
-#include <stdarg.h>
-#include <vector>
-
-#include "v8.h"
-
-#include "globals.h"
-#include "utils.h"
-#include "allocation.h"
-#include "assembler.h"
-#include "a64/assembler-a64.h"
-#include "a64/decoder-a64.h"
-#include "a64/disasm-a64.h"
-#include "a64/instrument-a64.h"
-
-#define REGISTER_CODE_LIST(R) \
-R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
-R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
-R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
-R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
-
-namespace v8 {
-namespace internal {
-
-#if !defined(USE_SIMULATOR)
-
-// Running without a simulator on a native A64 platform.
-// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- (entry(p0, p1, p2, p3, p4))
-
-typedef int (*a64_regexp_matcher)(String* input,
- int64_t start_offset,
- const byte* input_start,
- const byte* input_end,
- int* output,
- int64_t output_size,
- Address stack_base,
- int64_t direct_call,
- void* return_address,
- Isolate* isolate);
-
-// Call the generated regexp code directly. The code at the entry address
-// should act as a function matching the type a64_regexp_matcher.
-// The ninth argument is a dummy that reserves the space used for
-// the return address added by the ExitFrame in native calls.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- (FUNCTION_CAST<a64_regexp_matcher>(entry)( \
- p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
-
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- reinterpret_cast<TryCatch*>(try_catch_address)
-
-// Running without a simulator there is nothing to do.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
- uintptr_t c_limit) {
- USE(isolate);
- return c_limit;
- }
-
- static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- return try_catch_address;
- }
-
- static void UnregisterCTryCatch() { }
-};
-
-#else // !defined(USE_SIMULATOR)
-
-enum ReverseByteMode {
- Reverse16 = 0,
- Reverse32 = 1,
- Reverse64 = 2
-};
-
-
-// The proper way to initialize a simulated system register (such as NZCV) is as
-// follows:
-// SimSystemRegister nzcv = SimSystemRegister::DefaultValueFor(NZCV);
-class SimSystemRegister {
- public:
- // The default constructor represents a register which has no writable bits.
- // It is not possible to set its value to anything other than 0.
- SimSystemRegister() : value_(0), write_ignore_mask_(0xffffffff) { }
-
- uint32_t RawValue() const {
- return value_;
- }
-
- void SetRawValue(uint32_t new_value) {
- value_ = (value_ & write_ignore_mask_) | (new_value & ~write_ignore_mask_);
- }
-
- uint32_t Bits(int msb, int lsb) const {
- return unsigned_bitextract_32(msb, lsb, value_);
- }
-
- int32_t SignedBits(int msb, int lsb) const {
- return signed_bitextract_32(msb, lsb, value_);
- }
-
- void SetBits(int msb, int lsb, uint32_t bits);
-
- // Default system register values.
- static SimSystemRegister DefaultValueFor(SystemRegister id);
-
-#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
- uint32_t Name() const { return Func(HighBit, LowBit); } \
- void Set##Name(uint32_t bits) { SetBits(HighBit, LowBit, bits); }
-#define DEFINE_WRITE_IGNORE_MASK(Name, Mask) \
- static const uint32_t Name##WriteIgnoreMask = ~static_cast<uint32_t>(Mask);
-
- SYSTEM_REGISTER_FIELDS_LIST(DEFINE_GETTER, DEFINE_WRITE_IGNORE_MASK)
-
-#undef DEFINE_ZERO_BITS
-#undef DEFINE_GETTER
-
- protected:
- // Most system registers only implement a few of the bits in the word. Other
- // bits are "read-as-zero, write-ignored". The write_ignore_mask argument
- // describes the bits which are not modifiable.
- SimSystemRegister(uint32_t value, uint32_t write_ignore_mask)
- : value_(value), write_ignore_mask_(write_ignore_mask) { }
-
- uint32_t value_;
- uint32_t write_ignore_mask_;
-};
-
-
-// Represent a register (r0-r31, v0-v31).
-template<int kSizeInBytes>
-class SimRegisterBase {
- public:
- template<typename T>
- void Set(T new_value, unsigned size = sizeof(T)) {
- ASSERT(size <= kSizeInBytes);
- ASSERT(size <= sizeof(new_value));
- // All AArch64 registers are zero-extending; Writing a W register clears the
- // top bits of the corresponding X register.
- memset(value_, 0, kSizeInBytes);
- memcpy(value_, &new_value, size);
- }
-
- // Copy 'size' bytes of the register to the result, and zero-extend to fill
- // the result.
- template<typename T>
- T Get(unsigned size = sizeof(T)) const {
- ASSERT(size <= kSizeInBytes);
- T result;
- memset(&result, 0, sizeof(result));
- memcpy(&result, value_, size);
- return result;
- }
-
- protected:
- uint8_t value_[kSizeInBytes];
-};
-typedef SimRegisterBase<kXRegSizeInBytes> SimRegister; // r0-r31
-typedef SimRegisterBase<kDRegSizeInBytes> SimFPRegister; // v0-v31
-
-
-class Simulator : public DecoderVisitor {
- public:
- explicit Simulator(Decoder* decoder,
- Isolate* isolate = NULL,
- FILE* stream = stderr);
- ~Simulator();
-
- // System functions.
-
- static void Initialize(Isolate* isolate);
-
- static Simulator* current(v8::internal::Isolate* isolate);
-
- class CallArgument;
-
- // Call an arbitrary function taking an arbitrary number of arguments. The
- // varargs list must be a set of arguments with type CallArgument, and
- // terminated by CallArgument::End().
- void CallVoid(byte* entry, CallArgument* args);
-
- // Like CallVoid, but expect a return value.
- int64_t CallInt64(byte* entry, CallArgument* args);
- double CallDouble(byte* entry, CallArgument* args);
-
- // V8 calls into generated JS code with 5 parameters and into
- // generated RegExp code with 10 parameters. These are convenience functions,
- // which set up the simulator state and grab the result on return.
- int64_t CallJS(byte* entry,
- byte* function_entry,
- JSFunction* func,
- Object* revc,
- int64_t argc,
- Object*** argv);
- int64_t CallRegExp(byte* entry,
- String* input,
- int64_t start_offset,
- const byte* input_start,
- const byte* input_end,
- int* output,
- int64_t output_size,
- Address stack_base,
- int64_t direct_call,
- void* return_address,
- Isolate* isolate);
-
- // A wrapper class that stores an argument for one of the above Call
- // functions.
- //
- // Only arguments up to 64 bits in size are supported.
- class CallArgument {
- public:
- template<typename T>
- explicit CallArgument(T argument) {
- ASSERT(sizeof(argument) <= sizeof(bits_));
- memcpy(&bits_, &argument, sizeof(argument));
- type_ = X_ARG;
- }
-
- explicit CallArgument(double argument) {
- ASSERT(sizeof(argument) == sizeof(bits_));
- memcpy(&bits_, &argument, sizeof(argument));
- type_ = D_ARG;
- }
-
- explicit CallArgument(float argument) {
- // TODO(all): CallArgument(float) is untested, remove this check once
- // tested.
- UNIMPLEMENTED();
- // Make the D register a NaN to try to trap errors if the callee expects a
- // double. If it expects a float, the callee should ignore the top word.
- ASSERT(sizeof(kFP64SignallingNaN) == sizeof(bits_));
- memcpy(&bits_, &kFP64SignallingNaN, sizeof(kFP64SignallingNaN));
- // Write the float payload to the S register.
- ASSERT(sizeof(argument) <= sizeof(bits_));
- memcpy(&bits_, &argument, sizeof(argument));
- type_ = D_ARG;
- }
-
- // This indicates the end of the arguments list, so that CallArgument
- // objects can be passed into varargs functions.
- static CallArgument End() { return CallArgument(); }
-
- int64_t bits() const { return bits_; }
- bool IsEnd() const { return type_ == NO_ARG; }
- bool IsX() const { return type_ == X_ARG; }
- bool IsD() const { return type_ == D_ARG; }
-
- private:
- enum CallArgumentType { X_ARG, D_ARG, NO_ARG };
-
- // All arguments are aligned to at least 64 bits and we don't support
- // passing bigger arguments, so the payload size can be fixed at 64 bits.
- int64_t bits_;
- CallArgumentType type_;
-
- CallArgument() { type_ = NO_ARG; }
- };
-
-
- // Start the debugging command line.
- void Debug();
-
- bool GetValue(const char* desc, int64_t* value);
-
- bool PrintValue(const char* desc);
-
- // Push an address onto the JS stack.
- uintptr_t PushAddress(uintptr_t address);
-
- // Pop an address from the JS stack.
- uintptr_t PopAddress();
-
- // Accessor to the internal simulator stack area.
- uintptr_t StackLimit() const;
-
- void ResetState();
-
- // Runtime call support.
- static void* RedirectExternalReference(void* external_function,
- ExternalReference::Type type);
-
- // Run the simulator.
- static const Instruction* kEndOfSimAddress;
- void DecodeInstruction();
- void Run();
- void RunFrom(Instruction* start);
-
- // Simulation helpers.
- template <typename T>
- void set_pc(T new_pc) {
- ASSERT(sizeof(T) == sizeof(pc_));
- memcpy(&pc_, &new_pc, sizeof(T));
- pc_modified_ = true;
- }
- Instruction* pc() { return pc_; }
-
- void increment_pc() {
- if (!pc_modified_) {
- pc_ = pc_->NextInstruction();
- }
-
- pc_modified_ = false;
- }
-
- void ExecuteInstruction() {
- ASSERT(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstructionSize));
- CheckBreakNext();
- decoder_->Decode(pc_);
- LogProcessorState();
- increment_pc();
- CheckBreakpoints();
- }
-
- // Declare all Visitor functions.
- #define DECLARE(A) void Visit##A(Instruction* instr);
- VISITOR_LIST(DECLARE)
- #undef DECLARE
-
- // Register accessors.
-
- // Return 'size' bits of the value of an integer register, as the specified
- // type. The value is zero-extended to fill the result.
- //
- // The only supported values of 'size' are kXRegSize and kWRegSize.
- template<typename T>
- T reg(unsigned size, unsigned code,
- Reg31Mode r31mode = Reg31IsZeroRegister) const {
- unsigned size_in_bytes = size / 8;
- ASSERT(size_in_bytes <= sizeof(T));
- ASSERT((size == kXRegSize) || (size == kWRegSize));
- ASSERT(code < kNumberOfRegisters);
-
- if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
- T result;
- memset(&result, 0, sizeof(result));
- return result;
- }
- return registers_[code].Get<T>(size_in_bytes);
- }
-
- // Like reg(), but infer the access size from the template type.
- template<typename T>
- T reg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const {
- return reg<T>(sizeof(T) * 8, code, r31mode);
- }
-
- // Common specialized accessors for the reg() template.
- int32_t wreg(unsigned code,
- Reg31Mode r31mode = Reg31IsZeroRegister) const {
- return reg<int32_t>(code, r31mode);
- }
-
- int64_t xreg(unsigned code,
- Reg31Mode r31mode = Reg31IsZeroRegister) const {
- return reg<int64_t>(code, r31mode);
- }
-
- int64_t reg(unsigned size, unsigned code,
- Reg31Mode r31mode = Reg31IsZeroRegister) const {
- return reg<int64_t>(size, code, r31mode);
- }
-
- // Write 'size' bits of 'value' into an integer register. The value is
- // zero-extended. This behaviour matches AArch64 register writes.
- //
- // The only supported values of 'size' are kXRegSize and kWRegSize.
- template<typename T>
- void set_reg(unsigned size, unsigned code, T value,
- Reg31Mode r31mode = Reg31IsZeroRegister) {
- unsigned size_in_bytes = size / 8;
- ASSERT(size_in_bytes <= sizeof(T));
- ASSERT((size == kXRegSize) || (size == kWRegSize));
- ASSERT(code < kNumberOfRegisters);
-
- if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
- return;
- }
- return registers_[code].Set(value, size_in_bytes);
- }
-
- // Like set_reg(), but infer the access size from the template type.
- template<typename T>
- void set_reg(unsigned code, T value,
- Reg31Mode r31mode = Reg31IsZeroRegister) {
- set_reg(sizeof(value) * 8, code, value, r31mode);
- }
-
- // Common specialized accessors for the set_reg() template.
- void set_wreg(unsigned code, int32_t value,
- Reg31Mode r31mode = Reg31IsZeroRegister) {
- set_reg(kWRegSize, code, value, r31mode);
- }
-
- void set_xreg(unsigned code, int64_t value,
- Reg31Mode r31mode = Reg31IsZeroRegister) {
- set_reg(kXRegSize, code, value, r31mode);
- }
-
- // Commonly-used special cases.
- template<typename T>
- void set_lr(T value) {
- ASSERT(sizeof(T) == kPointerSize);
- set_reg(kLinkRegCode, value);
- }
-
- template<typename T>
- void set_sp(T value) {
- ASSERT(sizeof(T) == kPointerSize);
- set_reg(31, value, Reg31IsStackPointer);
- }
-
- int64_t sp() { return xreg(31, Reg31IsStackPointer); }
- int64_t jssp() { return xreg(kJSSPCode, Reg31IsStackPointer); }
- int64_t fp() {
- return xreg(kFramePointerRegCode, Reg31IsStackPointer);
- }
- Instruction* lr() { return reg<Instruction*>(kLinkRegCode); }
-
- Address get_sp() { return reg<Address>(31, Reg31IsStackPointer); }
-
- // Return 'size' bits of the value of a floating-point register, as the
- // specified type. The value is zero-extended to fill the result.
- //
- // The only supported values of 'size' are kDRegSize and kSRegSize.
- template<typename T>
- T fpreg(unsigned size, unsigned code) const {
- unsigned size_in_bytes = size / 8;
- ASSERT(size_in_bytes <= sizeof(T));
- ASSERT((size == kDRegSize) || (size == kSRegSize));
- ASSERT(code < kNumberOfFPRegisters);
- return fpregisters_[code].Get<T>(size_in_bytes);
- }
-
- // Like fpreg(), but infer the access size from the template type.
- template<typename T>
- T fpreg(unsigned code) const {
- return fpreg<T>(sizeof(T) * 8, code);
- }
-
- // Common specialized accessors for the fpreg() template.
- float sreg(unsigned code) const {
- return fpreg<float>(code);
- }
-
- uint32_t sreg_bits(unsigned code) const {
- return fpreg<uint32_t>(code);
- }
-
- double dreg(unsigned code) const {
- return fpreg<double>(code);
- }
-
- uint64_t dreg_bits(unsigned code) const {
- return fpreg<uint64_t>(code);
- }
-
- double fpreg(unsigned size, unsigned code) const {
- switch (size) {
- case kSRegSize: return sreg(code);
- case kDRegSize: return dreg(code);
- default:
- UNREACHABLE();
- return 0.0;
- }
- }
-
- // Write 'value' into a floating-point register. The value is zero-extended.
- // This behaviour matches AArch64 register writes.
- template<typename T>
- void set_fpreg(unsigned code, T value) {
- ASSERT((sizeof(value) == kDRegSizeInBytes) ||
- (sizeof(value) == kSRegSizeInBytes));
- ASSERT(code < kNumberOfFPRegisters);
- fpregisters_[code].Set(value, sizeof(value));
- }
-
- // Common specialized accessors for the set_fpreg() template.
- void set_sreg(unsigned code, float value) {
- set_fpreg(code, value);
- }
-
- void set_sreg_bits(unsigned code, uint32_t value) {
- set_fpreg(code, value);
- }
-
- void set_dreg(unsigned code, double value) {
- set_fpreg(code, value);
- }
-
- void set_dreg_bits(unsigned code, uint64_t value) {
- set_fpreg(code, value);
- }
-
- bool N() { return nzcv_.N() != 0; }
- bool Z() { return nzcv_.Z() != 0; }
- bool C() { return nzcv_.C() != 0; }
- bool V() { return nzcv_.V() != 0; }
- SimSystemRegister& nzcv() { return nzcv_; }
-
- // TODO(jbramley): Find a way to make the fpcr_ members return the proper
- // types, so this accessor is not necessary.
- FPRounding RMode() { return static_cast<FPRounding>(fpcr_.RMode()); }
- SimSystemRegister& fpcr() { return fpcr_; }
-
- // Debug helpers
-
- // Simulator breakpoints.
- struct Breakpoint {
- Instruction* location;
- bool enabled;
- };
- std::vector<Breakpoint> breakpoints_;
- void SetBreakpoint(Instruction* breakpoint);
- void ListBreakpoints();
- void CheckBreakpoints();
-
- // Helpers for the 'next' command.
- // When this is set, the Simulator will insert a breakpoint after the next BL
- // instruction it meets.
- bool break_on_next_;
- // Check if the Simulator should insert a break after the current instruction
- // for the 'next' command.
- void CheckBreakNext();
-
- // Disassemble instruction at the given address.
- void PrintInstructionsAt(Instruction* pc, uint64_t count);
-
- void PrintSystemRegisters(bool print_all = false);
- void PrintRegisters(bool print_all_regs = false);
- void PrintFPRegisters(bool print_all_regs = false);
- void PrintProcessorState();
- void PrintWrite(uint8_t* address, uint64_t value, unsigned num_bytes);
- void LogSystemRegisters() {
- if (log_parameters_ & LOG_SYS_REGS) PrintSystemRegisters();
- }
- void LogRegisters() {
- if (log_parameters_ & LOG_REGS) PrintRegisters();
- }
- void LogFPRegisters() {
- if (log_parameters_ & LOG_FP_REGS) PrintFPRegisters();
- }
- void LogProcessorState() {
- LogSystemRegisters();
- LogRegisters();
- LogFPRegisters();
- }
- void LogWrite(uint8_t* address, uint64_t value, unsigned num_bytes) {
- if (log_parameters_ & LOG_WRITE) PrintWrite(address, value, num_bytes);
- }
-
- int log_parameters() { return log_parameters_; }
- void set_log_parameters(int new_parameters) {
- if (new_parameters & LOG_DISASM) {
- decoder_->InsertVisitorBefore(print_disasm_, this);
- } else {
- decoder_->RemoveVisitor(print_disasm_);
- }
- log_parameters_ = new_parameters;
- }
-
- static inline const char* WRegNameForCode(unsigned code,
- Reg31Mode mode = Reg31IsZeroRegister);
- static inline const char* XRegNameForCode(unsigned code,
- Reg31Mode mode = Reg31IsZeroRegister);
- static inline const char* SRegNameForCode(unsigned code);
- static inline const char* DRegNameForCode(unsigned code);
- static inline const char* VRegNameForCode(unsigned code);
- static inline int CodeFromName(const char* name);
-
- protected:
- // Simulation helpers ------------------------------------
- bool ConditionPassed(Condition cond) {
- switch (cond) {
- case eq:
- return Z();
- case ne:
- return !Z();
- case hs:
- return C();
- case lo:
- return !C();
- case mi:
- return N();
- case pl:
- return !N();
- case vs:
- return V();
- case vc:
- return !V();
- case hi:
- return C() && !Z();
- case ls:
- return !(C() && !Z());
- case ge:
- return N() == V();
- case lt:
- return N() != V();
- case gt:
- return !Z() && (N() == V());
- case le:
- return !(!Z() && (N() == V()));
- case nv: // Fall through.
- case al:
- return true;
- default:
- UNREACHABLE();
- return false;
- }
- }
-
- bool ConditionFailed(Condition cond) {
- return !ConditionPassed(cond);
- }
-
- void AddSubHelper(Instruction* instr, int64_t op2);
- int64_t AddWithCarry(unsigned reg_size,
- bool set_flags,
- int64_t src1,
- int64_t src2,
- int64_t carry_in = 0);
- void LogicalHelper(Instruction* instr, int64_t op2);
- void ConditionalCompareHelper(Instruction* instr, int64_t op2);
- void LoadStoreHelper(Instruction* instr,
- int64_t offset,
- AddrMode addrmode);
- void LoadStorePairHelper(Instruction* instr, AddrMode addrmode);
- uint8_t* LoadStoreAddress(unsigned addr_reg,
- int64_t offset,
- AddrMode addrmode);
- void LoadStoreWriteBack(unsigned addr_reg,
- int64_t offset,
- AddrMode addrmode);
- void CheckMemoryAccess(uint8_t* address, uint8_t* stack);
-
- uint64_t MemoryRead(uint8_t* address, unsigned num_bytes);
- uint8_t MemoryRead8(uint8_t* address);
- uint16_t MemoryRead16(uint8_t* address);
- uint32_t MemoryRead32(uint8_t* address);
- float MemoryReadFP32(uint8_t* address);
- uint64_t MemoryRead64(uint8_t* address);
- double MemoryReadFP64(uint8_t* address);
-
- void MemoryWrite(uint8_t* address, uint64_t value, unsigned num_bytes);
- void MemoryWrite32(uint8_t* address, uint32_t value);
- void MemoryWriteFP32(uint8_t* address, float value);
- void MemoryWrite64(uint8_t* address, uint64_t value);
- void MemoryWriteFP64(uint8_t* address, double value);
-
- int64_t ShiftOperand(unsigned reg_size,
- int64_t value,
- Shift shift_type,
- unsigned amount);
- int64_t Rotate(unsigned reg_width,
- int64_t value,
- Shift shift_type,
- unsigned amount);
- int64_t ExtendValue(unsigned reg_width,
- int64_t value,
- Extend extend_type,
- unsigned left_shift = 0);
-
- uint64_t ReverseBits(uint64_t value, unsigned num_bits);
- uint64_t ReverseBytes(uint64_t value, ReverseByteMode mode);
-
- void FPCompare(double val0, double val1);
- double FPRoundInt(double value, FPRounding round_mode);
- double FPToDouble(float value);
- float FPToFloat(double value, FPRounding round_mode);
- double FixedToDouble(int64_t src, int fbits, FPRounding round_mode);
- double UFixedToDouble(uint64_t src, int fbits, FPRounding round_mode);
- float FixedToFloat(int64_t src, int fbits, FPRounding round_mode);
- float UFixedToFloat(uint64_t src, int fbits, FPRounding round_mode);
- int32_t FPToInt32(double value, FPRounding rmode);
- int64_t FPToInt64(double value, FPRounding rmode);
- uint32_t FPToUInt32(double value, FPRounding rmode);
- uint64_t FPToUInt64(double value, FPRounding rmode);
-
- template <typename T>
- T FPMax(T a, T b);
-
- template <typename T>
- T FPMin(T a, T b);
-
- template <typename T>
- T FPMaxNM(T a, T b);
-
- template <typename T>
- T FPMinNM(T a, T b);
-
- void CheckStackAlignment();
-
- inline void CheckPCSComplianceAndRun();
-
-#ifdef DEBUG
- // Corruption values should have their least significant byte cleared to
- // allow the code of the register being corrupted to be inserted.
- static const uint64_t kCallerSavedRegisterCorruptionValue =
- 0xca11edc0de000000UL;
- // This value is a NaN in both 32-bit and 64-bit FP.
- static const uint64_t kCallerSavedFPRegisterCorruptionValue =
- 0x7ff000007f801000UL;
- // This value is a mix of 32/64-bits NaN and "verbose" immediate.
- static const uint64_t kDefaultCPURegisterCorruptionValue =
- 0x7ffbad007f8bad00UL;
-
- void CorruptRegisters(CPURegList* list,
- uint64_t value = kDefaultCPURegisterCorruptionValue);
- void CorruptAllCallerSavedCPURegisters();
-#endif
-
- // Processor state ---------------------------------------
-
- // Output stream.
- FILE* stream_;
- PrintDisassembler* print_disasm_;
-
- // Instrumentation.
- Instrument* instrument_;
-
- // General purpose registers. Register 31 is the stack pointer.
- SimRegister registers_[kNumberOfRegisters];
-
- // Floating point registers
- SimFPRegister fpregisters_[kNumberOfFPRegisters];
-
- // Processor state
- // bits[31, 27]: Condition flags N, Z, C, and V.
- // (Negative, Zero, Carry, Overflow)
- SimSystemRegister nzcv_;
-
- // Floating-Point Control Register
- SimSystemRegister fpcr_;
-
- // Only a subset of FPCR features are supported by the simulator. This helper
- // checks that the FPCR settings are supported.
- //
- // This is checked when floating-point instructions are executed, not when
- // FPCR is set. This allows generated code to modify FPCR for external
- // functions, or to save and restore it when entering and leaving generated
- // code.
- void AssertSupportedFPCR() {
- ASSERT(fpcr().DN() == 0); // No default-NaN support.
- ASSERT(fpcr().FZ() == 0); // No flush-to-zero support.
- ASSERT(fpcr().RMode() == FPTieEven); // Ties-to-even rounding only.
-
- // The simulator does not support half-precision operations so fpcr().AHP()
- // is irrelevant, and is not checked here.
- }
-
- static int CalcNFlag(uint64_t result, unsigned reg_size) {
- return (result >> (reg_size - 1)) & 1;
- }
-
- static int CalcZFlag(uint64_t result) {
- return result == 0;
- }
-
- static const uint32_t kConditionFlagsMask = 0xf0000000;
-
- // Stack
- byte* stack_;
- static const intptr_t stack_protection_size_ = KB;
- intptr_t stack_size_;
- byte* stack_limit_;
- // TODO(aleram): protect the stack.
-
- Decoder* decoder_;
- Decoder* disassembler_decoder_;
-
- // Indicates if the pc has been modified by the instruction and should not be
- // automatically incremented.
- bool pc_modified_;
- Instruction* pc_;
-
- static const char* xreg_names[];
- static const char* wreg_names[];
- static const char* sreg_names[];
- static const char* dreg_names[];
- static const char* vreg_names[];
-
- // Debugger input.
- void set_last_debugger_input(char* input) {
- DeleteArray(last_debugger_input_);
- last_debugger_input_ = input;
- }
- char* last_debugger_input() { return last_debugger_input_; }
- char* last_debugger_input_;
-
- private:
- int log_parameters_;
- Isolate* isolate_;
-};
-
-
-// When running with the simulator transition into simulated execution at this
-// point.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->CallJS( \
- FUNCTION_ADDR(entry), \
- p0, p1, p2, p3, p4))
-
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- Simulator::current(Isolate::Current())->CallRegExp( \
- entry, \
- p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8)
-
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- try_catch_address == NULL ? \
- NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
-
-
-// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code.
-// See also 'class SimulatorStack' in arm/simulator-arm.h.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
- uintptr_t c_limit) {
- return Simulator::current(isolate)->StackLimit();
- }
-
- static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(Isolate::Current());
- return sim->PushAddress(try_catch_address);
- }
-
- static void UnregisterCTryCatch() {
- Simulator::current(Isolate::Current())->PopAddress();
- }
-};
-
-#endif // !defined(USE_SIMULATOR)
-
-} } // namespace v8::internal
-
-#endif // V8_A64_SIMULATOR_A64_H_
diff --git a/deps/v8/src/a64/stub-cache-a64.cc b/deps/v8/src/a64/stub-cache-a64.cc
deleted file mode 100644
index 57c03e8b96..0000000000
--- a/deps/v8/src/a64/stub-cache-a64.cc
+++ /dev/null
@@ -1,1548 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if V8_TARGET_ARCH_A64
-
-#include "ic-inl.h"
-#include "codegen.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm)
-
-
-void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- Handle<Name> name,
- Register scratch0,
- Register scratch1) {
- ASSERT(!AreAliased(receiver, scratch0, scratch1));
- ASSERT(name->IsUniqueName());
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
- Label done;
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- Register map = scratch1;
- __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
- __ Tst(scratch0, kInterceptorOrAccessCheckNeededMask);
- __ B(ne, miss_label);
-
- // Check that receiver is a JSObject.
- __ Ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Cmp(scratch0, FIRST_SPEC_OBJECT_TYPE);
- __ B(lt, miss_label);
-
- // Load properties array.
- Register properties = scratch0;
- __ Ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- // Check that the properties array is a dictionary.
- __ Ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
- __ JumpIfNotRoot(map, Heap::kHashTableMapRootIndex, miss_label);
-
- NameDictionaryLookupStub::GenerateNegativeLookup(masm,
- miss_label,
- &done,
- receiver,
- properties,
- name,
- scratch1);
- __ Bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-}
-
-
-// Probe primary or secondary table.
-// If the entry is found in the cache, the generated code jump to the first
-// instruction of the stub in the cache.
-// If there is a miss the code fall trough.
-//
-// 'receiver', 'name' and 'offset' registers are preserved on miss.
-static void ProbeTable(Isolate* isolate,
- MacroAssembler* masm,
- Code::Flags flags,
- StubCache::Table table,
- Register receiver,
- Register name,
- Register offset,
- Register scratch,
- Register scratch2,
- Register scratch3) {
- // Some code below relies on the fact that the Entry struct contains
- // 3 pointers (name, code, map).
- STATIC_ASSERT(sizeof(StubCache::Entry) == (3 * kPointerSize));
-
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
-
- uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
- uintptr_t value_off_addr =
- reinterpret_cast<uintptr_t>(value_offset.address());
- uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
-
- Label miss;
-
- ASSERT(!AreAliased(name, offset, scratch, scratch2, scratch3));
-
- // Multiply by 3 because there are 3 fields per entry.
- __ Add(scratch3, offset, Operand(offset, LSL, 1));
-
- // Calculate the base address of the entry.
- __ Mov(scratch, Operand(key_offset));
- __ Add(scratch, scratch, Operand(scratch3, LSL, kPointerSizeLog2));
-
- // Check that the key in the entry matches the name.
- __ Ldr(scratch2, MemOperand(scratch));
- __ Cmp(name, scratch2);
- __ B(ne, &miss);
-
- // Check the map matches.
- __ Ldr(scratch2, MemOperand(scratch, map_off_addr - key_off_addr));
- __ Ldr(scratch3, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Cmp(scratch2, scratch3);
- __ B(ne, &miss);
-
- // Get the code entry from the cache.
- __ Ldr(scratch, MemOperand(scratch, value_off_addr - key_off_addr));
-
- // Check that the flags match what we're looking for.
- __ Ldr(scratch2.W(), FieldMemOperand(scratch, Code::kFlagsOffset));
- __ Bic(scratch2.W(), scratch2.W(), Code::kFlagsNotUsedInLookup);
- __ Cmp(scratch2.W(), flags);
- __ B(ne, &miss);
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ B(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ B(&miss);
- }
-#endif
-
- // Jump to the first instruction in the code stub.
- __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
- __ Br(scratch);
-
- // Miss: fall through.
- __ Bind(&miss);
-}
-
-
-void StubCache::GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2,
- Register extra3) {
- Isolate* isolate = masm->isolate();
- Label miss;
-
- // Make sure the flags does not name a specific type.
- ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
-
- // Make sure that there are no register conflicts.
- ASSERT(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
-
- // Make sure extra and extra2 registers are valid.
- ASSERT(!extra.is(no_reg));
- ASSERT(!extra2.is(no_reg));
- ASSERT(!extra3.is(no_reg));
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
- extra2, extra3);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Compute the hash for primary table.
- __ Ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
- __ Ldr(extra, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Add(scratch, scratch, extra);
- __ Eor(scratch, scratch, flags);
- // We shift out the last two bits because they are not part of the hash.
- __ Ubfx(scratch, scratch, kHeapObjectTagSize,
- CountTrailingZeros(kPrimaryTableSize, 64));
-
- // Probe the primary table.
- ProbeTable(isolate, masm, flags, kPrimary, receiver, name,
- scratch, extra, extra2, extra3);
-
- // Primary miss: Compute hash for secondary table.
- __ Sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize));
- __ Add(scratch, scratch, flags >> kHeapObjectTagSize);
- __ And(scratch, scratch, kSecondaryTableSize - 1);
-
- // Probe the secondary table.
- ProbeTable(isolate, masm, flags, kSecondary, receiver, name,
- scratch, extra, extra2, extra3);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ Bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
- extra2, extra3);
-}
-
-
-void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
- int index,
- Register prototype) {
- // Load the global or builtins object from the current context.
- __ Ldr(prototype, GlobalObjectMemOperand());
- // Load the native context from the global or builtins object.
- __ Ldr(prototype,
- FieldMemOperand(prototype, GlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- __ Ldr(prototype, ContextMemOperand(prototype, index));
- // Load the initial map. The global functions all have initial maps.
- __ Ldr(prototype,
- FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the prototype from the initial map.
- __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm,
- int index,
- Register prototype,
- Label* miss) {
- Isolate* isolate = masm->isolate();
- // Get the global function with the given index.
- Handle<JSFunction> function(
- JSFunction::cast(isolate->native_context()->get(index)));
-
- // Check we're still in the same context.
- Register scratch = prototype;
- __ Ldr(scratch, GlobalObjectMemOperand());
- __ Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
- __ Ldr(scratch, ContextMemOperand(scratch, index));
- __ Cmp(scratch, Operand(function));
- __ B(ne, miss);
-
- // Load its initial map. The global functions all have initial maps.
- __ Mov(prototype, Operand(Handle<Map>(function->initial_map())));
- // Load the prototype from the initial map.
- __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- bool inobject,
- int index,
- Representation representation) {
- ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
- USE(representation);
- if (inobject) {
- int offset = index * kPointerSize;
- __ Ldr(dst, FieldMemOperand(src, offset));
- } else {
- // Calculate the offset into the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- __ Ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
- __ Ldr(dst, FieldMemOperand(dst, offset));
- }
-}
-
-
-void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* miss_label) {
- ASSERT(!AreAliased(receiver, scratch));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss_label);
-
- // Check that the object is a JS array.
- __ JumpIfNotObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE,
- miss_label);
-
- // Load length directly from the JS array.
- __ Ldr(x0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Ret();
-}
-
-
-// Generate code to check if an object is a string. If the object is a
-// heap object, its map's instance type is left in the scratch1 register.
-static void GenerateStringCheck(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Label* smi,
- Label* non_string_object) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, smi);
-
- // Get the object's instance type filed.
- __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- // Check if the "not string" bit is set.
- __ Tbnz(scratch1, MaskToBit(kNotStringTag), non_string_object);
-}
-
-
-// Generate code to load the length from a string object and return the length.
-// If the receiver object is not a string or a wrapped string object the
-// execution continues at the miss label. The register containing the
-// receiver is not clobbered if the receiver is not a string.
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- // Input registers can't alias because we don't want to clobber the
- // receiver register if the object is not a string.
- ASSERT(!AreAliased(receiver, scratch1, scratch2));
-
- Label check_wrapper;
-
- // Check if the object is a string leaving the instance type in the
- // scratch1 register.
- GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
-
- // Load length directly from the string.
- __ Ldr(x0, FieldMemOperand(receiver, String::kLengthOffset));
- __ Ret();
-
- // Check if the object is a JSValue wrapper.
- __ Bind(&check_wrapper);
- __ Cmp(scratch1, Operand(JS_VALUE_TYPE));
- __ B(ne, miss);
-
- // Unwrap the value and check if the wrapped value is a string.
- __ Ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch1, scratch2, miss, miss);
- __ Ldr(x0, FieldMemOperand(scratch1, String::kLengthOffset));
- __ Ret();
-}
-
-
-void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
- __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
- // TryGetFunctionPrototype can't put the result directly in x0 because the
- // 3 inputs registers can't alias and we call this function from
- // LoadIC::GenerateFunctionPrototype, where receiver is x0. So we explicitly
- // move the result in x0.
- __ Mov(x0, scratch1);
- __ Ret();
-}
-
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
- Handle<JSGlobalObject> global,
- Handle<Name> name,
- Register scratch,
- Label* miss) {
- Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
- ASSERT(cell->value()->IsTheHole());
- __ Mov(scratch, Operand(cell));
- __ Ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
- __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, miss);
-}
-
-
-void StoreStubCompiler::GenerateNegativeHolderLookup(
- MacroAssembler* masm,
- Handle<JSObject> holder,
- Register holder_reg,
- Handle<Name> name,
- Label* miss) {
- if (holder->IsJSGlobalObject()) {
- GenerateCheckPropertyCell(
- masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
- } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
- GenerateDictionaryNegativeLookup(
- masm, miss, holder_reg, name, scratch1(), scratch2());
- }
-}
-
-
-// Generate StoreTransition code, value is passed in x0 register.
-// When leaving generated code after success, the receiver_reg and storage_reg
-// may be clobbered. Upon branch to miss_label, the receiver and name registers
-// have their original values.
-void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Map> transition,
- Handle<Name> name,
- Register receiver_reg,
- Register storage_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss_label,
- Label* slow) {
- Label exit;
-
- ASSERT(!AreAliased(receiver_reg, storage_reg, value_reg,
- scratch1, scratch2, scratch3));
-
- // We don't need scratch3.
- scratch3 = NoReg;
-
- int descriptor = transition->LastAdded();
- DescriptorArray* descriptors = transition->instance_descriptors();
- PropertyDetails details = descriptors->GetDetails(descriptor);
- Representation representation = details.representation();
- ASSERT(!representation.IsNone());
-
- if (details.type() == CONSTANT) {
- Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
- __ LoadObject(scratch1, constant);
- __ Cmp(value_reg, scratch1);
- __ B(ne, miss_label);
- } else if (FLAG_track_fields && representation.IsSmi()) {
- __ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
- __ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
- Label do_store, heap_number;
- __ AllocateHeapNumber(storage_reg, slow, scratch1, scratch2);
-
- // TODO(jbramley): Is fp_scratch the most appropriate FP scratch register?
- // It's only used in Fcmp, but it's not really safe to use it like this.
- __ JumpIfNotSmi(value_reg, &heap_number);
- __ SmiUntagToDouble(fp_scratch, value_reg);
- __ B(&do_store);
-
- __ Bind(&heap_number);
- __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
- miss_label, DONT_DO_SMI_CHECK);
- __ Ldr(fp_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
-
- __ Bind(&do_store);
- __ Str(fp_scratch, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
- }
-
- // Stub never generated for non-global objects that require access checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
- // Perform map transition for the receiver if necessary.
- if ((details.type() == FIELD) &&
- (object->map()->unused_property_fields() == 0)) {
- // The properties must be extended before we can store the value.
- // We jump to a runtime call that extends the properties array.
- __ Mov(scratch1, Operand(transition));
- __ Push(receiver_reg, scratch1, value_reg);
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
- masm->isolate()),
- 3,
- 1);
- return;
- }
-
- // Update the map of the object.
- __ Mov(scratch1, Operand(transition));
- __ Str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
-
- // Update the write barrier for the map field.
- __ RecordWriteField(receiver_reg,
- HeapObject::kMapOffset,
- scratch1,
- scratch2,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- if (details.type() == CONSTANT) {
- ASSERT(value_reg.is(x0));
- __ Ret();
- return;
- }
-
- int index = transition->instance_descriptors()->GetFieldIndex(
- transition->LastAdded());
-
- // Adjust for the number of properties stored in the object. Even in the
- // face of a transition we can use the old map here because the size of the
- // object and the number of in-object properties is not going to change.
- index -= object->map()->inobject_properties();
-
- // TODO(verwaest): Share this code as a code stub.
- SmiCheck smi_check = representation.IsTagged()
- ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
- if (index < 0) {
- // Set the property straight into the object.
- int offset = object->map()->instance_size() + (index * kPointerSize);
- // TODO(jbramley): This construct appears in several places in this
- // function. Try to clean it up, perhaps using a result_reg.
- if (FLAG_track_double_fields && representation.IsDouble()) {
- __ Str(storage_reg, FieldMemOperand(receiver_reg, offset));
- } else {
- __ Str(value_reg, FieldMemOperand(receiver_reg, offset));
- }
-
- if (!FLAG_track_fields || !representation.IsSmi()) {
- // Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
- __ Mov(storage_reg, value_reg);
- }
- __ RecordWriteField(receiver_reg,
- offset,
- storage_reg,
- scratch1,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- smi_check);
- }
- } else {
- // Write to the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- // Get the properties array
- __ Ldr(scratch1,
- FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- if (FLAG_track_double_fields && representation.IsDouble()) {
- __ Str(storage_reg, FieldMemOperand(scratch1, offset));
- } else {
- __ Str(value_reg, FieldMemOperand(scratch1, offset));
- }
-
- if (!FLAG_track_fields || !representation.IsSmi()) {
- // Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
- __ Mov(storage_reg, value_reg);
- }
- __ RecordWriteField(scratch1,
- offset,
- storage_reg,
- receiver_reg,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- smi_check);
- }
- }
-
- __ Bind(&exit);
- // Return the value (register x0).
- ASSERT(value_reg.is(x0));
- __ Ret();
-}
-
-
-// Generate StoreField code, value is passed in x0 register.
-// When leaving generated code after success, the receiver_reg and name_reg may
-// be clobbered. Upon branch to miss_label, the receiver and name registers have
-// their original values.
-void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
- // x0 : value
- Label exit;
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
- int index = lookup->GetFieldIndex().field_index();
-
- // Adjust for the number of properties stored in the object. Even in the
- // face of a transition we can use the old map here because the size of the
- // object and the number of in-object properties is not going to change.
- index -= object->map()->inobject_properties();
-
- Representation representation = lookup->representation();
- ASSERT(!representation.IsNone());
- if (FLAG_track_fields && representation.IsSmi()) {
- __ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
- __ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
- // Load the double storage.
- if (index < 0) {
- int offset = (index * kPointerSize) + object->map()->instance_size();
- __ Ldr(scratch1, FieldMemOperand(receiver_reg, offset));
- } else {
- int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
- __ Ldr(scratch1,
- FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ Ldr(scratch1, FieldMemOperand(scratch1, offset));
- }
-
- // Store the value into the storage.
- Label do_store, heap_number;
- // TODO(jbramley): Is fp_scratch the most appropriate FP scratch register?
- // It's only used in Fcmp, but it's not really safe to use it like this.
- __ JumpIfNotSmi(value_reg, &heap_number);
- __ SmiUntagToDouble(fp_scratch, value_reg);
- __ B(&do_store);
-
- __ Bind(&heap_number);
- __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex,
- miss_label, DONT_DO_SMI_CHECK);
- __ Ldr(fp_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
-
- __ Bind(&do_store);
- __ Str(fp_scratch, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
-
- // Return the value (register x0).
- ASSERT(value_reg.is(x0));
- __ Ret();
- return;
- }
-
- // TODO(verwaest): Share this code as a code stub.
- SmiCheck smi_check = representation.IsTagged()
- ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
- if (index < 0) {
- // Set the property straight into the object.
- int offset = object->map()->instance_size() + (index * kPointerSize);
- __ Str(value_reg, FieldMemOperand(receiver_reg, offset));
-
- if (!FLAG_track_fields || !representation.IsSmi()) {
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(value_reg, &exit);
-
- // Update the write barrier for the array address.
- // Pass the now unused name_reg as a scratch register.
- __ Mov(name_reg, value_reg);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch1,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- smi_check);
- }
- } else {
- // Write to the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- // Get the properties array
- __ Ldr(scratch1,
- FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ Str(value_reg, FieldMemOperand(scratch1, offset));
-
- if (!FLAG_track_fields || !representation.IsSmi()) {
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(value_reg, &exit);
-
- // Update the write barrier for the array address.
- // Ok to clobber receiver_reg and name_reg, since we return.
- __ Mov(name_reg, value_reg);
- __ RecordWriteField(scratch1,
- offset,
- name_reg,
- receiver_reg,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- smi_check);
- }
- }
-
- __ Bind(&exit);
- // Return the value (register x0).
- ASSERT(value_reg.is(x0));
- __ Ret();
-}
-
-
-void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
- Label* label,
- Handle<Name> name) {
- if (!label->is_unused()) {
- __ Bind(label);
- __ Mov(this->name(), Operand(name));
- }
-}
-
-
-static void PushInterceptorArguments(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj) {
- STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
- STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
- STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
- STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
- STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
-
- __ Push(name);
- Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
- ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
- Register scratch = name;
- __ Mov(scratch, Operand(interceptor));
- __ Push(scratch, receiver, holder);
-}
-
-
-static void CompileCallLoadPropertyWithInterceptor(
- MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj,
- IC::UtilityId id) {
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
-
- __ CallExternalReference(
- ExternalReference(IC_Utility(id), masm->isolate()),
- StubCache::kInterceptorArgsLength);
-}
-
-
-// Generate call to api function.
-void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Handle<Map> receiver_map,
- Register receiver,
- Register scratch,
- bool is_store,
- int argc,
- Register* values) {
- ASSERT(!AreAliased(receiver, scratch));
- __ Push(receiver);
- // Write the arguments to stack frame.
- for (int i = 0; i < argc; i++) {
- // TODO(jbramley): Push these in as few Push() calls as possible.
- Register arg = values[argc-1-i];
- ASSERT(!AreAliased(receiver, scratch, arg));
- __ Push(arg);
- }
-
- ASSERT(optimization.is_simple_api_call());
-
- // Abi for CallApiFunctionStub.
- Register callee = x0;
- Register call_data = x4;
- Register holder = x2;
- Register api_function_address = x1;
-
- // Put holder in place.
- CallOptimization::HolderLookup holder_lookup;
- Handle<JSObject> api_holder =
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
- switch (holder_lookup) {
- case CallOptimization::kHolderIsReceiver:
- __ Mov(holder, receiver);
- break;
- case CallOptimization::kHolderFound:
- __ LoadObject(holder, api_holder);
- break;
- case CallOptimization::kHolderNotFound:
- UNREACHABLE();
- break;
- }
-
- Isolate* isolate = masm->isolate();
- Handle<JSFunction> function = optimization.constant_function();
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data_obj(api_call_info->data(), isolate);
-
- // Put callee in place.
- __ LoadObject(callee, function);
-
- bool call_data_undefined = false;
- // Put call_data in place.
- if (isolate->heap()->InNewSpace(*call_data_obj)) {
- __ LoadObject(call_data, api_call_info);
- __ Ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
- } else if (call_data_obj->IsUndefined()) {
- call_data_undefined = true;
- __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
- } else {
- __ LoadObject(call_data, call_data_obj);
- }
-
- // Put api_function_address in place.
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- ApiFunction fun(function_address);
- ExternalReference ref = ExternalReference(&fun,
- ExternalReference::DIRECT_API_CALL,
- masm->isolate());
- __ Mov(api_function_address, Operand(ref));
-
- // Jump to stub.
- CallApiFunctionStub stub(is_store, call_data_undefined, argc);
- __ TailCallStub(&stub);
-}
-
-
-void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
- Register object_reg,
- Handle<JSObject> holder,
- Register holder_reg,
- Register scratch1,
- Register scratch2,
- Handle<Name> name,
- Label* miss,
- PrototypeCheckType check) {
- Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
- // Make sure that the type feedback oracle harvests the receiver map.
- // TODO(svenpanne) Remove this hack when all ICs are reworked.
- __ Mov(scratch1, Operand(receiver_map));
-
- // object_reg and holder_reg registers can alias.
- ASSERT(!AreAliased(object_reg, scratch1, scratch2));
- ASSERT(!AreAliased(holder_reg, scratch1, scratch2));
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- int depth = 0;
-
- Handle<JSObject> current = Handle<JSObject>::null();
- if (type->IsConstant()) {
- current = Handle<JSObject>::cast(type->AsConstant());
- }
- Handle<JSObject> prototype = Handle<JSObject>::null();
- Handle<Map> current_map = receiver_map;
- Handle<Map> holder_map(holder->map());
- // Traverse the prototype chain and check the maps in the prototype chain for
- // fast and global objects or do negative lookup for normal objects.
- while (!current_map.is_identical_to(holder_map)) {
- ++depth;
-
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- ASSERT(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
-
- prototype = handle(JSObject::cast(current_map->prototype()));
- if (current_map->is_dictionary_map() &&
- !current_map->IsJSGlobalObjectMap() &&
- !current_map->IsJSGlobalProxyMap()) {
- if (!name->IsUniqueName()) {
- ASSERT(name->IsString());
- name = factory()->InternalizeString(Handle<String>::cast(name));
- }
- ASSERT(current.is_null() ||
- (current->property_dictionary()->FindEntry(*name) ==
- NameDictionary::kNotFound));
-
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
- scratch1, scratch2);
-
- __ Ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- reg = holder_reg; // From now on the object will be in holder_reg.
- __ Ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- } else {
- bool need_map = (depth != 1 || check == CHECK_ALL_MAPS) ||
- heap()->InNewSpace(*prototype);
- Register map_reg = NoReg;
- if (need_map) {
- map_reg = scratch1;
- __ Ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
- }
-
- if (depth != 1 || check == CHECK_ALL_MAPS) {
- __ CheckMap(map_reg, current_map, miss, DONT_DO_SMI_CHECK);
- }
-
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- if (current_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
- } else if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(
- masm(), Handle<JSGlobalObject>::cast(current), name,
- scratch2, miss);
- }
-
- reg = holder_reg; // From now on the object will be in holder_reg.
-
- if (heap()->InNewSpace(*prototype)) {
- // The prototype is in new space; we cannot store a reference to it
- // in the code. Load it from the map.
- __ Ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
- } else {
- // The prototype is in old space; load it directly.
- __ Mov(reg, Operand(prototype));
- }
- }
-
- // Go to the next object in the prototype chain.
- current = prototype;
- current_map = handle(current->map());
- }
-
- // Log the check depth.
- LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
- // Check the holder map.
- if (depth != 0 || check == CHECK_ALL_MAPS) {
- // Check the holder map.
- __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
- }
-
- // Perform security check for access to the global object.
- ASSERT(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
- if (current_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- }
-
- // Return the register containing the holder.
- return reg;
-}
-
-
-void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ B(&success);
-
- __ Bind(miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- __ Bind(&success);
- }
-}
-
-
-void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ B(&success);
-
- GenerateRestoreName(masm(), miss, name);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- __ Bind(&success);
- }
-}
-
-
-Register LoadStubCompiler::CallbackHandlerFrontend(Handle<HeapType> type,
- Register object_reg,
- Handle<JSObject> holder,
- Handle<Name> name,
- Handle<Object> callback) {
- Label miss;
-
- Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
-
- // TODO(jbramely): HandlerFrontendHeader returns its result in scratch1(), so
- // we can't use it below, but that isn't very obvious. Is there a better way
- // of handling this?
-
- if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
- ASSERT(!AreAliased(reg, scratch2(), scratch3(), scratch4()));
-
- // Load the properties dictionary.
- Register dictionary = scratch4();
- __ Ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset));
-
- // Probe the dictionary.
- Label probe_done;
- NameDictionaryLookupStub::GeneratePositiveLookup(masm(),
- &miss,
- &probe_done,
- dictionary,
- this->name(),
- scratch2(),
- scratch3());
- __ Bind(&probe_done);
-
- // If probing finds an entry in the dictionary, scratch3 contains the
- // pointer into the dictionary. Check that the value is the callback.
- Register pointer = scratch3();
- const int kElementsStartOffset = NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ Ldr(scratch2(), FieldMemOperand(pointer, kValueOffset));
- __ Cmp(scratch2(), Operand(callback));
- __ B(ne, &miss);
- }
-
- HandlerFrontendFooter(name, &miss);
- return reg;
-}
-
-
-void LoadStubCompiler::GenerateLoadField(Register reg,
- Handle<JSObject> holder,
- PropertyIndex field,
- Representation representation) {
- __ Mov(receiver(), reg);
- if (kind() == Code::LOAD_IC) {
- LoadFieldStub stub(field.is_inobject(holder),
- field.translate(holder),
- representation);
- GenerateTailCall(masm(), stub.GetCode(isolate()));
- } else {
- KeyedLoadFieldStub stub(field.is_inobject(holder),
- field.translate(holder),
- representation);
- GenerateTailCall(masm(), stub.GetCode(isolate()));
- }
-}
-
-
-void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
- // Return the constant value.
- __ LoadObject(x0, value);
- __ Ret();
-}
-
-
-void LoadStubCompiler::GenerateLoadCallback(
- Register reg,
- Handle<ExecutableAccessorInfo> callback) {
- ASSERT(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
-
- // Build ExecutableAccessorInfo::args_ list on the stack and push property
- // name below the exit frame to make GC aware of them and store pointers to
- // them.
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
-
- __ Push(receiver());
-
- if (heap()->InNewSpace(callback->data())) {
- __ Mov(scratch3(), Operand(callback));
- __ Ldr(scratch3(), FieldMemOperand(scratch3(),
- ExecutableAccessorInfo::kDataOffset));
- } else {
- __ Mov(scratch3(), Operand(Handle<Object>(callback->data(), isolate())));
- }
- // TODO(jbramley): Find another scratch register and combine the pushes
- // together. Can we use scratch1() here?
- __ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex);
- __ Push(scratch3(), scratch4());
- __ Mov(scratch3(), Operand(ExternalReference::isolate_address(isolate())));
- __ Push(scratch4(), scratch3(), reg, name());
-
- Register args_addr = scratch2();
- __ Add(args_addr, __ StackPointer(), kPointerSize);
-
- // Stack at this point:
- // sp[40] callback data
- // sp[32] undefined
- // sp[24] undefined
- // sp[16] isolate
- // args_addr -> sp[8] reg
- // sp[0] name
-
- // Abi for CallApiGetter.
- Register getter_address_reg = x2;
-
- // Set up the call.
- Address getter_address = v8::ToCData<Address>(callback->getter());
- ApiFunction fun(getter_address);
- ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
- ExternalReference ref = ExternalReference(&fun, type, isolate());
- __ Mov(getter_address_reg, Operand(ref));
-
- CallApiGetterStub stub;
- __ TailCallStub(&stub);
-}
-
-
-void LoadStubCompiler::GenerateLoadInterceptor(
- Register holder_reg,
- Handle<Object> object,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Handle<Name> name) {
- ASSERT(!AreAliased(receiver(), this->name(),
- scratch1(), scratch2(), scratch3()));
- ASSERT(interceptor_holder->HasNamedInterceptor());
- ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // So far the most popular follow ups for interceptor loads are FIELD
- // and CALLBACKS, so inline only them, other cases may be added later.
- bool compile_followup_inline = false;
- if (lookup->IsFound() && lookup->IsCacheable()) {
- if (lookup->IsField()) {
- compile_followup_inline = true;
- } else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
- ExecutableAccessorInfo* callback =
- ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
- compile_followup_inline = callback->getter() != NULL &&
- callback->IsCompatibleReceiver(*object);
- }
- }
-
- if (compile_followup_inline) {
- // Compile the interceptor call, followed by inline code to load the
- // property from further up the prototype chain if the call fails.
- // Check that the maps haven't changed.
- ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
-
- // Preserve the receiver register explicitly whenever it is different from
- // the holder and it is needed should the interceptor return without any
- // result. The CALLBACKS case needs the receiver to be passed into C++ code,
- // the FIELD case might cause a miss during the prototype check.
- bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
- bool must_preserve_receiver_reg = !receiver().Is(holder_reg) &&
- (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
-
- // Save necessary data before invoking an interceptor.
- // Requires a frame to make GC aware of pushed pointers.
- {
- FrameScope frame_scope(masm(), StackFrame::INTERNAL);
- if (must_preserve_receiver_reg) {
- __ Push(receiver(), holder_reg, this->name());
- } else {
- __ Push(holder_reg, this->name());
- }
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(
- masm(), receiver(), holder_reg, this->name(), interceptor_holder,
- IC::kLoadPropertyWithInterceptorOnly);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ JumpIfRoot(x0,
- Heap::kNoInterceptorResultSentinelRootIndex,
- &interceptor_failed);
- frame_scope.GenerateLeaveFrame();
- __ Ret();
-
- __ Bind(&interceptor_failed);
- if (must_preserve_receiver_reg) {
- __ Pop(this->name(), holder_reg, receiver());
- } else {
- __ Pop(this->name(), holder_reg);
- }
- // Leave the internal frame.
- }
- GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
- } else { // !compile_followup_inline
- // Call the runtime system to load the interceptor.
- // Check that the maps haven't changed.
- PushInterceptorArguments(
- masm(), receiver(), holder_reg, this->name(), interceptor_holder);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
- isolate());
- __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
- }
-}
-
-
-void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
- Label success;
- // Check that the object is a boolean.
- // TODO(all): Optimize this like LCodeGen::DoDeferredTaggedToI.
- __ JumpIfRoot(object, Heap::kTrueValueRootIndex, &success);
- __ JumpIfNotRoot(object, Heap::kFalseValueRootIndex, miss);
- __ Bind(&success);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Handle<ExecutableAccessorInfo> callback) {
- ASM_LOCATION("StoreStubCompiler::CompileStoreCallback");
- Register holder_reg = HandlerFrontend(
- IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
-
- // Stub never generated for non-global objects that require access checks.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
-
- // TODO(jbramley): Make Push take more than four arguments and combine these
- // two calls.
- __ Push(receiver(), holder_reg);
- __ Mov(scratch1(), Operand(callback));
- __ Mov(scratch2(), Operand(name));
- __ Push(scratch1(), scratch2(), value());
-
- // Do tail-call to the runtime system.
- ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 5, 1);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void StoreStubCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm,
- Handle<HeapType> type,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- x0 : value
- // -- x1 : receiver
- // -- x2 : name
- // -- lr : return address
- // -----------------------------------
- Register value = x0;
- Register receiver = x1;
- Label miss;
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Save value register, so we can restore it later.
- __ Push(value);
-
- if (!setter.is_null()) {
- // Call the JavaScript setter with receiver and value on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ Ldr(receiver,
- FieldMemOperand(
- receiver, JSGlobalObject::kGlobalReceiverOffset));
- }
- __ Push(receiver, value);
- ParameterCount actual(1);
- ParameterCount expected(setter);
- __ InvokeFunction(setter, expected, actual,
- CALL_FUNCTION, NullCallWrapper());
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- __ Pop(value);
-
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ Ret();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
- Handle<JSObject> object,
- Handle<Name> name) {
- Label miss;
-
- ASM_LOCATION("StoreStubCompiler::CompileStoreInterceptor");
-
- __ Push(receiver(), this->name(), value());
-
- // Do tail-call to the runtime system.
- ExternalReference store_ic_property =
- ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
- __ TailCallExternalReference(store_ic_property, 3, 1);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
- Handle<JSObject> last,
- Handle<Name> name) {
- NonexistentHandlerFrontend(type, last, name);
-
- // Return undefined if maps of the full prototype chain are still the
- // same and no global property with this name contains a value.
- __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
- __ Ret();
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-// TODO(all): The so-called scratch registers are significant in some cases. For
-// example, KeyedStoreStubCompiler::registers()[3] (x3) is actually used for
-// KeyedStoreCompiler::transition_map(). We should verify which registers are
-// actually scratch registers, and which are important. For now, we use the same
-// assignments as ARM to remain on the safe side.
-
-Register* LoadStubCompiler::registers() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
- static Register registers[] = { x0, x2, x3, x1, x4, x5 };
- return registers;
-}
-
-
-Register* KeyedLoadStubCompiler::registers() {
- // receiver, name/key, scratch1, scratch2, scratch3, scratch4.
- static Register registers[] = { x1, x0, x2, x3, x4, x5 };
- return registers;
-}
-
-
-Register* StoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { x1, x2, x0, x3, x4, x5 };
- return registers;
-}
-
-
-Register* KeyedStoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { x2, x1, x0, x3, x4, x5 };
- return registers;
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
- Handle<HeapType> type,
- Register receiver,
- Handle<JSFunction> getter) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- if (!getter.is_null()) {
- // Call the JavaScript getter with the receiver on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ Ldr(receiver,
- FieldMemOperand(
- receiver, JSGlobalObject::kGlobalReceiverOffset));
- }
- __ Push(receiver);
- ParameterCount actual(0);
- ParameterCount expected(getter);
- __ InvokeFunction(getter, expected, actual,
- CALL_FUNCTION, NullCallWrapper());
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ Ret();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> LoadStubCompiler::CompileLoadGlobal(
- Handle<HeapType> type,
- Handle<GlobalObject> global,
- Handle<PropertyCell> cell,
- Handle<Name> name,
- bool is_dont_delete) {
- Label miss;
- HandlerFrontendHeader(type, receiver(), global, name, &miss);
-
- // Get the value from the cell.
- __ Mov(x3, Operand(cell));
- __ Ldr(x4, FieldMemOperand(x3, Cell::kValueOffset));
-
- // Check for deleted property if property can actually be deleted.
- if (!is_dont_delete) {
- __ JumpIfRoot(x4, Heap::kTheHoleValueRootIndex, &miss);
- }
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_load_global_stub(), 1, x1, x3);
- __ Mov(x0, x4);
- __ Ret();
-
- HandlerFrontendFooter(name, &miss);
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, name);
-}
-
-
-Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
- TypeHandleList* types,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- __ CompareAndBranch(this->name(), Operand(name), ne, &miss);
- }
-
- Label number_case;
- Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target);
-
- Register map_reg = scratch1();
- __ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = types->length();
- int number_of_handled_maps = 0;
- for (int current = 0; current < receiver_count; ++current) {
- Handle<HeapType> type = types->at(current);
- Handle<Map> map = IC::TypeToMap(*type, isolate());
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- Label try_next;
- __ Cmp(map_reg, Operand(map));
- __ B(ne, &try_next);
- if (type->Is(HeapType::Number())) {
- ASSERT(!number_case.is_unused());
- __ Bind(&number_case);
- }
- __ Jump(handlers->at(current), RelocInfo::CODE_TARGET);
- __ Bind(&try_next);
- }
- }
- ASSERT(number_of_handled_maps != 0);
-
- __ Bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- (number_of_handled_maps > 1) ? POLYMORPHIC : MONOMORPHIC;
- return GetICCode(kind(), type, name, state);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
- MapHandleList* receiver_maps,
- CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
-
- ASM_LOCATION("KeyedStoreStubCompiler::CompileStorePolymorphic");
-
- __ JumpIfSmi(receiver(), &miss);
-
- int receiver_count = receiver_maps->length();
- __ Ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int i = 0; i < receiver_count; i++) {
- __ Cmp(scratch1(), Operand(receiver_maps->at(i)));
-
- Label skip;
- __ B(&skip, ne);
- if (!transitioned_maps->at(i).is_null()) {
- // This argument is used by the handler stub. For example, see
- // ElementsTransitionGenerator::GenerateMapChangeElementsTransition.
- __ Mov(transition_map(), Operand(transitioned_maps->at(i)));
- }
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
- __ Bind(&skip);
- }
-
- __ Bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- return GetICCode(
- kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
- MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- x0 : key
- // -- x1 : receiver
- // -----------------------------------
- Label slow, miss;
-
- Register result = x0;
- Register key = x0;
- Register receiver = x1;
-
- __ JumpIfNotSmi(key, &miss);
- __ Ldr(x4, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ LoadFromNumberDictionary(&slow, x4, key, result, x2, x3, x5, x6);
- __ Ret();
-
- __ Bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(), 1, x2, x3);
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
-
- // Miss case, call the runtime.
- __ Bind(&miss);
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/utils-a64.cc b/deps/v8/src/a64/utils-a64.cc
deleted file mode 100644
index 7e710d770e..0000000000
--- a/deps/v8/src/a64/utils-a64.cc
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#if V8_TARGET_ARCH_A64
-
-#include "a64/utils-a64.h"
-
-
-namespace v8 {
-namespace internal {
-
-#define __ assm->
-
-
-int CountLeadingZeros(uint64_t value, int width) {
- // TODO(jbramley): Optimize this for A64 hosts.
- ASSERT((width == 32) || (width == 64));
- int count = 0;
- uint64_t bit_test = 1UL << (width - 1);
- while ((count < width) && ((bit_test & value) == 0)) {
- count++;
- bit_test >>= 1;
- }
- return count;
-}
-
-
-int CountLeadingSignBits(int64_t value, int width) {
- // TODO(jbramley): Optimize this for A64 hosts.
- ASSERT((width == 32) || (width == 64));
- if (value >= 0) {
- return CountLeadingZeros(value, width) - 1;
- } else {
- return CountLeadingZeros(~value, width) - 1;
- }
-}
-
-
-int CountTrailingZeros(uint64_t value, int width) {
- // TODO(jbramley): Optimize this for A64 hosts.
- ASSERT((width == 32) || (width == 64));
- int count = 0;
- while ((count < width) && (((value >> count) & 1) == 0)) {
- count++;
- }
- return count;
-}
-
-
-int CountSetBits(uint64_t value, int width) {
- // TODO(jbramley): Would it be useful to allow other widths? The
- // implementation already supports them.
- ASSERT((width == 32) || (width == 64));
-
- // Mask out unused bits to ensure that they are not counted.
- value &= (0xffffffffffffffffUL >> (64-width));
-
- // Add up the set bits.
- // The algorithm works by adding pairs of bit fields together iteratively,
- // where the size of each bit field doubles each time.
- // An example for an 8-bit value:
- // Bits: h g f e d c b a
- // \ | \ | \ | \ |
- // value = h+g f+e d+c b+a
- // \ | \ |
- // value = h+g+f+e d+c+b+a
- // \ |
- // value = h+g+f+e+d+c+b+a
- value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555);
- value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333);
- value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f);
- value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff);
- value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff);
- value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff);
-
- return value;
-}
-
-
-int MaskToBit(uint64_t mask) {
- ASSERT(CountSetBits(mask, 64) == 1);
- return CountTrailingZeros(mask, 64);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_A64
diff --git a/deps/v8/src/a64/utils-a64.h b/deps/v8/src/a64/utils-a64.h
deleted file mode 100644
index 16c51a9c8b..0000000000
--- a/deps/v8/src/a64/utils-a64.h
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_A64_UTILS_A64_H_
-#define V8_A64_UTILS_A64_H_
-
-#include <cmath>
-#include "v8.h"
-#include "a64/constants-a64.h"
-
-#define REGISTER_CODE_LIST(R) \
-R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
-R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
-R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
-R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
-
-namespace v8 {
-namespace internal {
-
-// Floating point representation.
-static inline uint32_t float_to_rawbits(float value) {
- uint32_t bits = 0;
- memcpy(&bits, &value, 4);
- return bits;
-}
-
-
-static inline uint64_t double_to_rawbits(double value) {
- uint64_t bits = 0;
- memcpy(&bits, &value, 8);
- return bits;
-}
-
-
-static inline float rawbits_to_float(uint32_t bits) {
- float value = 0.0;
- memcpy(&value, &bits, 4);
- return value;
-}
-
-
-static inline double rawbits_to_double(uint64_t bits) {
- double value = 0.0;
- memcpy(&value, &bits, 8);
- return value;
-}
-
-
-// Bits counting.
-int CountLeadingZeros(uint64_t value, int width);
-int CountLeadingSignBits(int64_t value, int width);
-int CountTrailingZeros(uint64_t value, int width);
-int CountSetBits(uint64_t value, int width);
-int MaskToBit(uint64_t mask);
-
-
-// NaN tests.
-inline bool IsSignallingNaN(double num) {
- const uint64_t kFP64QuietNaNMask = 0x0008000000000000UL;
- uint64_t raw = double_to_rawbits(num);
- if (std::isnan(num) && ((raw & kFP64QuietNaNMask) == 0)) {
- return true;
- }
- return false;
-}
-
-
-inline bool IsSignallingNaN(float num) {
- const uint64_t kFP32QuietNaNMask = 0x00400000UL;
- uint32_t raw = float_to_rawbits(num);
- if (std::isnan(num) && ((raw & kFP32QuietNaNMask) == 0)) {
- return true;
- }
- return false;
-}
-
-
-template <typename T>
-inline bool IsQuietNaN(T num) {
- return std::isnan(num) && !IsSignallingNaN(num);
-}
-
-} } // namespace v8::internal
-
-#endif // V8_A64_UTILS_A64_H_
diff --git a/deps/v8/src/allocation-tracker.cc b/deps/v8/src/allocation-tracker.cc
index 83e1bb4b39..5ec6484601 100644
--- a/deps/v8/src/allocation-tracker.cc
+++ b/deps/v8/src/allocation-tracker.cc
@@ -267,7 +267,6 @@ AllocationTracker::UnresolvedLocation::~UnresolvedLocation() {
void AllocationTracker::UnresolvedLocation::Resolve() {
if (script_.is_null()) return;
- HandleScope scope(script_->GetIsolate());
info_->line = GetScriptLineNumber(script_, start_position_);
info_->column = GetScriptColumnNumber(script_, start_position_);
}
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 2c7db3be16..7a412df28d 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -6293,25 +6293,6 @@ void V8::AddCallCompletedCallback(CallCompletedCallback callback) {
}
-void V8::RunMicrotasks(Isolate* isolate) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::HandleScope scope(i_isolate);
- i::V8::RunMicrotasks(i_isolate);
-}
-
-
-void V8::EnqueueMicrotask(Isolate* isolate, Handle<Function> microtask) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- ENTER_V8(i_isolate);
- i::Execution::EnqueueMicrotask(i_isolate, Utils::OpenHandle(*microtask));
-}
-
-
-void V8::SetAutorunMicrotasks(Isolate* isolate, bool autorun) {
- reinterpret_cast<i::Isolate*>(isolate)->set_autorun_microtasks(autorun);
-}
-
-
void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
i::V8::RemoveCallCompletedCallback(callback);
}
@@ -6971,13 +6952,6 @@ SnapshotObjectId HeapGraphNode::GetId() const {
int HeapGraphNode::GetSelfSize() const {
- size_t size = ToInternal(this)->self_size();
- CHECK(size <= static_cast<size_t>(internal::kMaxInt));
- return static_cast<int>(size);
-}
-
-
-size_t HeapGraphNode::GetShallowSize() const {
return ToInternal(this)->self_size();
}
diff --git a/deps/v8/src/arm/OWNERS b/deps/v8/src/arm/OWNERS
deleted file mode 100644
index 906a5ce641..0000000000
--- a/deps/v8/src/arm/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-rmcilroy@chromium.org
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 501b5c7dff..44de7aabc3 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -105,8 +105,8 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { r2, r3 };
- descriptor->register_param_count_ = 2;
+ static Register registers[] = { r2 };
+ descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = NULL;
}
@@ -602,7 +602,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done;
Register input_reg = source();
Register result_reg = destination();
- ASSERT(is_truncating());
int double_offset = offset();
// Account for saved regs if input is sp.
@@ -3005,102 +3004,82 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a feedback vector slot. Cache states
+ // Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// r0 : number of arguments to the construct function
// r1 : the function to call
- // r2 : Feedback vector
- // r3 : slot in feedback vector (Smi)
- Label check_array, initialize_array, initialize_non_array, megamorphic, done;
+ // r2 : cache cell for call target
+ Label initialize, done, miss, megamorphic, not_array_function;
- ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->undefined_value());
- Heap::RootListIndex kMegamorphicRootIndex = Heap::kUndefinedValueRootIndex;
- ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
+ ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->the_hole_value());
- Heap::RootListIndex kUninitializedRootIndex = Heap::kTheHoleValueRootIndex;
- ASSERT_EQ(*TypeFeedbackInfo::PremonomorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->null_value());
- Heap::RootListIndex kPremonomorphicRootIndex = Heap::kNullValueRootIndex;
- // Load the cache state into r4.
- __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
- __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
+ // Load the cache state into r3.
+ __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
- __ cmp(r4, r1);
- __ b(eq, &done);
- __ CompareRoot(r4, kMegamorphicRootIndex);
+ __ cmp(r3, r1);
__ b(eq, &done);
- // Check if we're dealing with the Array function or not.
- __ LoadArrayFunction(r5);
- __ cmp(r1, r5);
- __ b(eq, &check_array);
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the cell either some other function or an
+ // AllocationSite. Do a map check on the object in ecx.
+ __ ldr(r5, FieldMemOperand(r3, 0));
+ __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
+ __ b(ne, &miss);
- // Non-array cache: Check the cache state.
- __ CompareRoot(r4, kPremonomorphicRootIndex);
- __ b(eq, &initialize_non_array);
- __ CompareRoot(r4, kUninitializedRootIndex);
+ // Make sure the function is the Array() function
+ __ LoadArrayFunction(r3);
+ __ cmp(r1, r3);
__ b(ne, &megamorphic);
-
- // Non-array cache: Uninitialized -> premonomorphic. The sentinel is an
- // immortal immovable object (null) so no write-barrier is needed.
- __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
- __ LoadRoot(ip, kPremonomorphicRootIndex);
- __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
__ jmp(&done);
- // Array cache: Check the cache state to see if we're in a monomorphic
- // state where the state object is an AllocationSite object.
- __ bind(&check_array);
- __ ldr(r5, FieldMemOperand(r4, 0));
- __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
- __ b(eq, &done);
-
- // Array cache: Uninitialized or premonomorphic -> monomorphic.
- __ CompareRoot(r4, kUninitializedRootIndex);
- __ b(eq, &initialize_array);
- __ CompareRoot(r4, kPremonomorphicRootIndex);
- __ b(eq, &initialize_array);
+ __ bind(&miss);
- // Both caches: Monomorphic -> megamorphic. The sentinel is an
- // immortal immovable object (undefined) so no write-barrier is needed.
+ // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+ // megamorphic.
+ __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ b(eq, &initialize);
+ // MegamorphicSentinel is an immortal immovable object (undefined) so no
+ // write-barrier is needed.
__ bind(&megamorphic);
- __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
- __ LoadRoot(ip, kMegamorphicRootIndex);
- __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ str(ip, FieldMemOperand(r2, Cell::kValueOffset));
__ jmp(&done);
- // Array cache: Uninitialized or premonomorphic -> monomorphic.
- __ bind(&initialize_array);
+ // An uninitialized cache is patched with the function or sentinel to
+ // indicate the ElementsKind if function is the Array constructor.
+ __ bind(&initialize);
+ // Make sure the function is the Array() function
+ __ LoadArrayFunction(r3);
+ __ cmp(r1, r3);
+ __ b(ne, &not_array_function);
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the cell
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Arguments register must be smi-tagged to call out.
__ SmiTag(r0);
- __ Push(r3, r2, r1, r0);
+ __ Push(r2, r1, r0);
CreateAllocationSiteStub create_stub;
__ CallStub(&create_stub);
- __ Pop(r3, r2, r1, r0);
+ __ Pop(r2, r1, r0);
__ SmiUntag(r0);
}
__ b(&done);
- // Non-array cache: Premonomorphic -> monomorphic.
- __ bind(&initialize_non_array);
- __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
- __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ str(r1, MemOperand(r4, 0));
-
- __ Push(r4, r2, r1);
- __ RecordWrite(r2, r4, r1, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Pop(r4, r2, r1);
+ __ bind(&not_array_function);
+ __ str(r1, FieldMemOperand(r2, Cell::kValueOffset));
+ // No need for a write barrier here - cells are rescanned.
__ bind(&done);
}
@@ -3108,8 +3087,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
void CallFunctionStub::Generate(MacroAssembler* masm) {
// r1 : the function to call
- // r2 : feedback vector
- // r3 : (only if r2 is not undefined) slot in feedback vector (Smi)
+ // r2 : cache cell for call target
Label slow, non_function, wrap, cont;
if (NeedsChecks()) {
@@ -3118,7 +3096,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(r1, &non_function);
// Goto slow case if we do not have a function.
- __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
+ __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
__ b(ne, &slow);
if (RecordCallTarget()) {
@@ -3166,14 +3144,13 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// If there is a call target cache, mark it megamorphic in the
// non-function case. MegamorphicSentinel is an immortal immovable
// object (undefined) so no write barrier is needed.
- ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->undefined_value());
- __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ str(ip, FieldMemOperand(r5, FixedArray::kHeaderSize));
+ __ str(ip, FieldMemOperand(r2, Cell::kValueOffset));
}
// Check for function proxy.
- __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(ne, &non_function);
__ push(r1); // put proxy as additional argument
__ mov(r0, Operand(argc_ + 1, RelocInfo::NONE32));
@@ -3213,14 +3190,13 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
void CallConstructStub::Generate(MacroAssembler* masm) {
// r0 : number of arguments
// r1 : the function to call
- // r2 : feedback vector
- // r3 : (only if r2 is not undefined) slot in feedback vector (Smi)
+ // r2 : cache cell for call target
Label slow, non_function_call;
// Check that the function is not a smi.
__ JumpIfSmi(r1, &non_function_call);
// Check that the function is a JSFunction.
- __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
+ __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
__ b(ne, &slow);
if (RecordCallTarget()) {
@@ -3228,7 +3204,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
}
// Jump to the function-specific construct stub.
- Register jmp_reg = r4;
+ Register jmp_reg = r3;
__ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(jmp_reg, FieldMemOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset));
@@ -3236,10 +3212,10 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// r0: number of arguments
// r1: called object
- // r4: object type
+ // r3: object type
Label do_call;
__ bind(&slow);
- __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(ne, &non_function_call);
__ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
__ jmp(&do_call);
@@ -5199,7 +5175,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
// We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the slot).
+ // Fix kind and retry (only if we have an allocation site in the cell).
__ add(r3, r3, Operand(1));
if (FLAG_debug_code) {
@@ -5307,8 +5283,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argc (only if argument_count_ == ANY)
// -- r1 : constructor
- // -- r2 : feedback vector (fixed array or undefined)
- // -- r3 : slot index (if r2 is fixed array)
+ // -- r2 : type info cell
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
@@ -5317,25 +5292,21 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
- __ tst(r4, Operand(kSmiTagMask));
+ __ tst(r3, Operand(kSmiTagMask));
__ Assert(ne, kUnexpectedInitialMapForArrayFunction);
- __ CompareObjectType(r4, r4, r5, MAP_TYPE);
+ __ CompareObjectType(r3, r3, r4, MAP_TYPE);
__ Assert(eq, kUnexpectedInitialMapForArrayFunction);
- // We should either have undefined in ebx or a valid fixed array.
+ // We should either have undefined in ebx or a valid cell
Label okay_here;
- Handle<Map> fixed_array_map = masm->isolate()->factory()->fixed_array_map();
+ Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
__ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
__ b(eq, &okay_here);
- __ ldr(r4, FieldMemOperand(r2, 0));
- __ cmp(r4, Operand(fixed_array_map));
- __ Assert(eq, kExpectedFixedArrayInRegisterR2);
-
- // r3 should be a smi if we don't have undefined in r2
- __ AssertSmi(r3);
-
+ __ ldr(r3, FieldMemOperand(r2, 0));
+ __ cmp(r3, Operand(cell_map));
+ __ Assert(eq, kExpectedPropertyCellInRegisterEbx);
__ bind(&okay_here);
}
@@ -5343,10 +5314,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Get the elements kind and case on that.
__ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
__ b(eq, &no_info);
- __ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
- __ ldr(r2, FieldMemOperand(r2, FixedArray::kHeaderSize));
+ __ ldr(r2, FieldMemOperand(r2, Cell::kValueOffset));
- // If the feedback vector is undefined, or contains anything other than an
+ // If the type cell is undefined, or contains anything other than an
// AllocationSite, call an array constructor that doesn't use AllocationSites.
__ ldr(r4, FieldMemOperand(r2, 0));
__ CompareRoot(r4, Heap::kAllocationSiteMapRootIndex);
@@ -5459,7 +5429,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register context = cp;
int argc = ArgumentBits::decode(bit_field_);
- bool is_store = IsStoreBits::decode(bit_field_);
+ bool restore_context = RestoreContextBits::decode(bit_field_);
bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
typedef FunctionCallbackArguments FCA;
@@ -5537,20 +5507,15 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
AllowExternalCallThatCantCauseGC scope(masm);
MemOperand context_restore_operand(
fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
- // Stores return the first js argument
- int return_value_offset = 0;
- if (is_store) {
- return_value_offset = 2 + FCA::kArgsLength;
- } else {
- return_value_offset = 2 + FCA::kReturnValueOffset;
- }
- MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
+ MemOperand return_value_operand(fp,
+ (2 + FCA::kReturnValueOffset) * kPointerSize);
__ CallApiFunctionAndReturn(api_function_address,
thunk_ref,
kStackUnwindSpace,
return_value_operand,
- &context_restore_operand);
+ restore_context ?
+ &context_restore_operand : NULL);
}
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index 9990bccdcf..efd11069b3 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -265,10 +265,9 @@ void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-arm.cc).
// ----------- S t a t e -------------
// -- r1 : function
- // -- r2 : feedback array
- // -- r3 : slot in feedback array
+ // -- r2 : cache cell for call target
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit() | r3.bit(), 0);
+ Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), 0);
}
@@ -287,10 +286,9 @@ void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments (not smi)
// -- r1 : constructor function
- // -- r2 : feedback array
- // -- r3 : feedback slot (smi)
+ // -- r2 : cache cell for call target
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit() | r3.bit(), r0.bit());
+ Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), r0.bit());
}
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 2eb5ccf974..813e9492df 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -130,9 +130,6 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
-
- InitializeFeedbackVector();
-
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -671,7 +668,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_false,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(ic, condition->test_id());
+ CallIC(ic, NOT_CONTEXTUAL, condition->test_id());
__ tst(result_register(), result_register());
Split(ne, if_true, if_false, fall_through);
}
@@ -1032,7 +1029,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, clause->CompareId());
+ CallIC(ic, NOT_CONTEXTUAL, clause->CompareId());
patch_site.EmitPatchInfo();
Label skip;
@@ -1077,7 +1074,6 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
- int slot = stmt->ForInFeedbackSlot();
SetStatementPosition(stmt);
Label loop, exit;
@@ -1167,13 +1163,13 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- Handle<Object> feedback = Handle<Object>(
- Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker),
- isolate());
- StoreFeedbackVectorSlot(slot, feedback);
- __ Move(r1, FeedbackVector());
- __ mov(r2, Operand(Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker)));
- __ str(r2, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(slot)));
+ Handle<Cell> cell = isolate()->factory()->NewCell(
+ Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
+ isolate()));
+ RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
+ __ Move(r1, cell);
+ __ mov(r2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
+ __ str(r2, FieldMemOperand(r1, Cell::kValueOffset));
__ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check
__ ldr(r2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
@@ -1482,7 +1478,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// variables.
switch (var->location()) {
case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "[ Global variable");
+ Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in r2 and the global
// object (receiver) in r0.
__ ldr(r0, GlobalObjectOperand());
@@ -1495,8 +1491,9 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
- : "[ Stack variable");
+ Comment cmnt(masm_, var->IsContextSlot()
+ ? "Context variable"
+ : "Stack variable");
if (var->binding_needs_init()) {
// var->scope() may be NULL when the proxy is located in eval code and
// refers to a potential outside binding. Currently those bindings are
@@ -1559,12 +1556,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
}
case Variable::LOOKUP: {
- Comment cmnt(masm_, "[ Lookup variable");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
+ Comment cmnt(masm_, "Lookup variable");
__ mov(r1, Operand(var->name()));
__ Push(cp, r1); // Context and name.
__ CallRuntime(Runtime::kLoadContextSlot, 2);
@@ -1695,7 +1692,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
__ mov(r2, Operand(key->value()));
__ ldr(r1, MemOperand(sp));
- CallStoreIC(key->LiteralFeedbackId());
+ CallStoreIC(NOT_CONTEXTUAL, key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -2097,7 +2094,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ ldr(r1, MemOperand(sp, kPointerSize));
__ ldr(r0, MemOperand(sp, 2 * kPointerSize));
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, TypeFeedbackId::None());
+ CallIC(ic, NOT_CONTEXTUAL, TypeFeedbackId::None());
__ mov(r1, r0);
__ str(r1, MemOperand(sp, 2 * kPointerSize));
CallFunctionStub stub(1, CALL_AS_METHOD);
@@ -2294,7 +2291,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, prop->PropertyFeedbackId());
+ CallIC(ic, NOT_CONTEXTUAL, prop->PropertyFeedbackId());
}
@@ -2321,7 +2318,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
BinaryOpICStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
+ expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2398,7 +2396,8 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(r1);
BinaryOpICStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
+ expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(r0);
}
@@ -2436,7 +2435,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ mov(r1, r0);
__ pop(r0); // Restore value.
__ mov(r2, Operand(prop->key()->AsLiteral()->value()));
- CallStoreIC();
+ CallStoreIC(NOT_CONTEXTUAL);
break;
}
case KEYED_PROPERTY: {
@@ -2456,60 +2455,41 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
-void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
- Variable* var, MemOperand location) {
- __ str(result_register(), location);
- if (var->IsContextSlot()) {
- // RecordWrite may destroy all its register arguments.
- __ mov(r3, result_register());
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
- }
-}
-
-
-void FullCodeGenerator::EmitCallStoreContextSlot(
- Handle<String> name, LanguageMode mode) {
- __ push(r0); // Value.
- __ mov(r1, Operand(name));
- __ mov(r0, Operand(Smi::FromInt(mode)));
- __ Push(cp, r1, r0); // Context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
-}
-
-
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(r2, Operand(var->name()));
__ ldr(r1, GlobalObjectOperand());
- CallStoreIC();
-
+ CallStoreIC(CONTEXTUAL);
} else if (op == Token::INIT_CONST) {
// Const initializers need a write barrier.
ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsLookupSlot()) {
+ if (var->IsStackLocal()) {
+ __ ldr(r1, StackOperand(var));
+ __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
+ __ str(result_register(), StackOperand(var), eq);
+ } else {
+ ASSERT(var->IsContextSlot() || var->IsLookupSlot());
+ // Like var declarations, const declarations are hoisted to function
+ // scope. However, unlike var initializers, const initializers are
+ // able to drill a hole to that function context, even from inside a
+ // 'with' context. We thus bypass the normal static scope lookup for
+ // var->IsContextSlot().
__ push(r0);
__ mov(r0, Operand(var->name()));
__ Push(cp, r0); // Context and name.
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- Label skip;
- MemOperand location = VarOperand(var, r1);
- __ ldr(r2, location);
- __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
- __ b(ne, &skip);
- EmitStoreToStackLocalOrContextSlot(var, location);
- __ bind(&skip);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
- EmitCallStoreContextSlot(var->name(), language_mode());
+ __ push(r0); // Value.
+ __ mov(r1, Operand(var->name()));
+ __ mov(r0, Operand(Smi::FromInt(language_mode())));
+ __ Push(cp, r1, r0); // Context, name, strict mode.
+ __ CallRuntime(Runtime::kStoreContextSlot, 4);
} else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label assign;
@@ -2522,16 +2502,20 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CallRuntime(Runtime::kThrowReferenceError, 1);
// Perform the assignment.
__ bind(&assign);
- EmitStoreToStackLocalOrContextSlot(var, location);
+ __ str(result_register(), location);
+ if (var->IsContextSlot()) {
+ // RecordWrite may destroy all its register arguments.
+ __ mov(r3, result_register());
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(
+ r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
+ }
}
} else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
// Assignment to var or initializing assignment to let/const
// in harmony mode.
- if (var->IsLookupSlot()) {
- EmitCallStoreContextSlot(var->name(), language_mode());
- } else {
- ASSERT((var->IsStackAllocated() || var->IsContextSlot()));
+ if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, r1);
if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
@@ -2539,7 +2523,21 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
__ Check(eq, kLetBindingReInitialization);
}
- EmitStoreToStackLocalOrContextSlot(var, location);
+ // Perform the assignment.
+ __ str(r0, location);
+ if (var->IsContextSlot()) {
+ __ mov(r3, r0);
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(
+ r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
+ }
+ } else {
+ ASSERT(var->IsLookupSlot());
+ __ push(r0); // Value.
+ __ mov(r1, Operand(var->name()));
+ __ mov(r0, Operand(Smi::FromInt(language_mode())));
+ __ Push(cp, r1, r0); // Context, name, strict mode.
+ __ CallRuntime(Runtime::kStoreContextSlot, 4);
}
}
// Non-initializing assignments to consts are ignored.
@@ -2557,7 +2555,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ mov(r2, Operand(prop->key()->AsLiteral()->value()));
__ pop(r1);
- CallStoreIC(expr->AssignmentFeedbackId());
+ CallStoreIC(NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0);
@@ -2574,7 +2572,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, expr->AssignmentFeedbackId());
+ CallIC(ic, NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0);
@@ -2601,10 +2599,12 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
+ ContextualMode mode,
TypeFeedbackId ast_id) {
ic_total_count_++;
// All calls must have a predictable size in full-codegen code to ensure that
// the debugger can patch them correctly.
+ ASSERT(mode != CONTEXTUAL || ast_id.IsNone());
__ Call(code, RelocInfo::CODE_TARGET, ast_id, al,
NEVER_INLINE_TARGET_ADDRESS);
}
@@ -2716,15 +2716,15 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
SetSourcePosition(expr->position());
Handle<Object> uninitialized =
- TypeFeedbackInfo::UninitializedSentinel(isolate());
- StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized);
- __ Move(r2, FeedbackVector());
- __ mov(r3, Operand(Smi::FromInt(expr->CallFeedbackSlot())));
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
+ RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
+ __ mov(r2, Operand(cell));
// Record call targets in unoptimized code.
CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
+ __ CallStub(&stub, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2905,10 +2905,10 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Record call targets in unoptimized code.
Handle<Object> uninitialized =
- TypeFeedbackInfo::UninitializedSentinel(isolate());
- StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized);
- __ Move(r2, FeedbackVector());
- __ mov(r3, Operand(Smi::FromInt(expr->CallNewFeedbackSlot())));
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
+ RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
+ __ mov(r2, Operand(cell));
CallConstructStub stub(RECORD_CALL_TARGET);
__ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
@@ -4411,7 +4411,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
BinaryOpICStub stub(Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId());
+ CallIC(stub.GetCode(isolate()),
+ NOT_CONTEXTUAL,
+ expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4440,7 +4442,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ mov(r2, Operand(prop->key()->AsLiteral()->value()));
__ pop(r1);
- CallStoreIC(expr->CountStoreFeedbackId());
+ CallStoreIC(NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4456,7 +4458,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, expr->CountStoreFeedbackId());
+ CallIC(ic, NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4476,7 +4478,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
ASSERT(!context()->IsTest());
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "[ Global variable");
+ Comment cmnt(masm_, "Global variable");
__ ldr(r0, GlobalObjectOperand());
__ mov(r2, Operand(proxy->name()));
// Use a regular load, not a contextual load, to avoid a reference
@@ -4485,7 +4487,6 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
PrepareForBailout(expr, TOS_REG);
context()->Plug(r0);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
- Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -4647,7 +4648,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, expr->CompareOperationFeedbackId());
+ CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ cmp(r0, Operand::Zero());
@@ -4682,7 +4683,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Split(eq, if_true, if_false, fall_through);
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, expr->CompareOperationFeedbackId());
+ CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
__ cmp(r0, Operand(0));
Split(ne, if_true, if_false, fall_through);
}
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index 1af6cf87b8..d324a8c6b3 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -333,7 +333,8 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
}
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
+ ExtraICState extra_state) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -341,7 +342,9 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::HANDLER, MONOMORPHIC, extra_state,
+ Code::NORMAL, Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r0, r2, r3, r4, r5, r6);
@@ -1159,7 +1162,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
+ ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@@ -1168,7 +1172,9 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::HANDLER, MONOMORPHIC, extra_ic_state,
+ Code::NORMAL, Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6);
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index fdf4ddfd80..38509a6ea8 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -840,6 +840,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
+ if (current->has_position()) position_ = current->position();
LInstruction* instr = NULL;
if (current->CanReplaceWithDummyUses()) {
@@ -1236,7 +1237,7 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->RightIsPowerOf2()) {
ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegister(instr->left());
+ LOperand* value = UseRegisterAtStart(instr->left());
LDivI* div = new(zone()) LDivI(value, UseConstant(instr->right()), NULL);
return AssignEnvironment(DefineAsRegister(div));
}
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index 982ac2c5a3..29a176628e 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -2580,6 +2580,7 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
current_block_(NULL),
next_block_(NULL),
allocator_(allocator),
+ position_(RelocInfo::kNoPosition),
instruction_pending_deoptimization_environment_(NULL),
pending_deoptimization_ast_id_(BailoutId::None()) { }
@@ -2716,6 +2717,7 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
HBasicBlock* current_block_;
HBasicBlock* next_block_;
LAllocator* allocator_;
+ int position_;
LInstruction* instruction_pending_deoptimization_environment_;
BailoutId pending_deoptimization_ast_id_;
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index 5ff3fa0764..05348bf018 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -277,8 +277,7 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(
- chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+ RecordAndWritePosition(value->position());
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -907,7 +906,6 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
- data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -1343,45 +1341,54 @@ void LCodeGen::EmitSignedIntegerDivisionByConstant(
void LCodeGen::DoDivI(LDivI* instr) {
if (!instr->is_flooring() && instr->hydrogen()->RightIsPowerOf2()) {
- Register dividend = ToRegister(instr->left());
- HDiv* hdiv = instr->hydrogen();
- int32_t divisor = hdiv->right()->GetInteger32Constant();
- Register result = ToRegister(instr->result());
- ASSERT(!result.is(dividend));
-
- // Check for (0 / -x) that will produce negative zero.
- if (hdiv->left()->RangeCanInclude(0) && divisor < 0 &&
- hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
- }
- // Check for (kMinInt / -1).
- if (hdiv->left()->RangeCanInclude(kMinInt) && divisor == -1 &&
- hdiv->CheckFlag(HValue::kCanOverflow)) {
- __ cmp(dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr->environment());
- }
- // Deoptimize if remainder will not be 0.
- if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
- Abs(divisor) != 1) {
- __ tst(dividend, Operand(Abs(divisor) - 1));
- DeoptimizeIf(ne, instr->environment());
- }
- if (divisor == -1) { // Nice shortcut, not needed for correctness.
- __ rsb(result, dividend, Operand(0));
- return;
+ const Register dividend = ToRegister(instr->left());
+ const Register result = ToRegister(instr->result());
+ int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
+ int32_t test_value = 0;
+ int32_t power = 0;
+
+ if (divisor > 0) {
+ test_value = divisor - 1;
+ power = WhichPowerOf2(divisor);
+ } else {
+ // Check for (0 / -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ cmp(dividend, Operand::Zero());
+ DeoptimizeIf(eq, instr->environment());
+ }
+ // Check for (kMinInt / -1).
+ if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ __ cmp(dividend, Operand(kMinInt));
+ DeoptimizeIf(eq, instr->environment());
+ }
+ test_value = - divisor - 1;
+ power = WhichPowerOf2(-divisor);
}
- int32_t shift = WhichPowerOf2(Abs(divisor));
- if (shift == 0) {
- __ mov(result, dividend);
- } else if (shift == 1) {
- __ add(result, dividend, Operand(dividend, LSR, 31));
+
+ if (test_value != 0) {
+ if (instr->hydrogen()->CheckFlag(
+ HInstruction::kAllUsesTruncatingToInt32)) {
+ __ sub(result, dividend, Operand::Zero(), SetCC);
+ __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
+ __ mov(result, Operand(result, ASR, power));
+ if (divisor > 0) __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
+ if (divisor < 0) __ rsb(result, result, Operand::Zero(), LeaveCC, gt);
+ return; // Don't fall through to "__ rsb" below.
+ } else {
+ // Deoptimize if remainder is not 0.
+ __ tst(dividend, Operand(test_value));
+ DeoptimizeIf(ne, instr->environment());
+ __ mov(result, Operand(dividend, ASR, power));
+ if (divisor < 0) __ rsb(result, result, Operand(0));
+ }
} else {
- __ mov(result, Operand(dividend, ASR, 31));
- __ add(result, dividend, Operand(result, LSR, 32 - shift));
+ if (divisor < 0) {
+ __ rsb(result, dividend, Operand(0));
+ } else {
+ __ Move(result, dividend);
+ }
}
- if (shift > 0) __ mov(result, Operand(result, ASR, shift));
- if (divisor < 0) __ rsb(result, result, Operand(0));
+
return;
}
@@ -4044,18 +4051,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
Handle<Map> transition = instr->transition();
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
Register value = ToRegister(instr->value());
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
__ SmiTst(value);
DeoptimizeIf(eq, instr->environment());
-
- // We know that value is a smi now, so we can omit the check below.
- check_needed = OMIT_SMI_CHECK;
}
} else if (representation.IsDouble()) {
ASSERT(transition.is_null());
@@ -4085,6 +4086,9 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
// Do the store.
Register value = ToRegister(instr->value());
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (access.IsInobject()) {
MemOperand operand = FieldMemOperand(object, offset);
__ Store(value, operand, representation);
@@ -5245,7 +5249,11 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+ if (size <= Page::kMaxRegularHeapObjectSize) {
+ __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+ } else {
+ __ jmp(deferred->entry());
+ }
} else {
Register size = ToRegister(instr->size());
__ Allocate(size,
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index c377274efb..77c514ff54 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -2806,8 +2806,16 @@ void MacroAssembler::Check(Condition cond, BailoutReason reason) {
void MacroAssembler::Abort(BailoutReason reason) {
Label abort_start;
bind(&abort_start);
-#ifdef DEBUG
+ // We want to pass the msg string like a smi to avoid GC
+ // problems, however msg is not guaranteed to be aligned
+ // properly. Instead, we pass an aligned pointer that is
+ // a proper v8 smi, but also pass the alignment difference
+ // from the real pointer as a smi.
const char* msg = GetBailoutReason(reason);
+ intptr_t p1 = reinterpret_cast<intptr_t>(msg);
+ intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
+ ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
+#ifdef DEBUG
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -2819,24 +2827,25 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- mov(r0, Operand(Smi::FromInt(reason)));
+ mov(r0, Operand(p0));
+ push(r0);
+ mov(r0, Operand(Smi::FromInt(p1 - p0)));
push(r0);
-
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort, 2);
} else {
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort, 2);
}
// will not return here
if (is_const_pool_blocked()) {
// If the calling code cares about the exact number of
// instructions generated, we insert padding here to keep the size
// of the Abort macro constant.
- static const int kExpectedAbortInstructions = 7;
+ static const int kExpectedAbortInstructions = 10;
int abort_instructions = InstructionsGeneratedSince(&abort_start);
ASSERT(abort_instructions <= kExpectedAbortInstructions);
while (abort_instructions++ < kExpectedAbortInstructions) {
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 24d7fe58c4..0af5162e93 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -207,10 +207,6 @@ class Simulator {
void set_pc(int32_t value);
int32_t get_pc() const;
- Address get_sp() {
- return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
- }
-
// Accessor to the internal simulator stack area.
uintptr_t StackLimit() const;
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 3bc9554594..694a4ed68f 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -783,14 +783,13 @@ static void CompileCallLoadPropertyWithInterceptor(
// Generate call to api function.
-void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Handle<Map> receiver_map,
- Register receiver,
- Register scratch_in,
- bool is_store,
- int argc,
- Register* values) {
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch_in,
+ int argc,
+ Register* values) {
ASSERT(!receiver.is(scratch_in));
__ push(receiver);
// Write the arguments to stack frame.
@@ -855,7 +854,7 @@ void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
__ mov(api_function_address, Operand(ref));
// Jump to stub.
- CallApiFunctionStub stub(is_store, call_data_undefined, argc);
+ CallApiFunctionStub stub(true, call_data_undefined, argc);
__ TailCallStub(&stub);
}
@@ -1077,6 +1076,15 @@ void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
void LoadStubCompiler::GenerateLoadCallback(
+ const CallOptimization& call_optimization,
+ Handle<Map> receiver_map) {
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver_map,
+ receiver(), scratch3(), 0, NULL);
+}
+
+
+void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Build AccessorInfo::args_ list on the stack and push property name below
@@ -1252,6 +1260,24 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
}
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization) {
+ HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
+ receiver(), holder, name);
+
+ Register values[] = { value() };
+ GenerateFastApiCall(
+ masm(), call_optimization, handle(object->map()),
+ receiver(), scratch3(), 1, values);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
#undef __
#define __ ACCESS_MASM(masm)
@@ -1310,6 +1336,21 @@ void StoreStubCompiler::GenerateStoreViaSetter(
Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> object,
Handle<Name> name) {
+ Label miss;
+
+ // Check that the map of the object hasn't changed.
+ __ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss,
+ DO_SMI_CHECK);
+
+ // Perform global security token check if needed.
+ if (object->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(receiver(), scratch1(), &miss);
+ }
+
+ // Stub is never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
@@ -1317,6 +1358,10 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
__ TailCallExternalReference(store_ic_property, 3, 1);
+ // Handle store cache miss.
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
}
diff --git a/deps/v8/src/arraybuffer.js b/deps/v8/src/arraybuffer.js
index 6125f0f61c..cfaa8d7efc 100644
--- a/deps/v8/src/arraybuffer.js
+++ b/deps/v8/src/arraybuffer.js
@@ -57,17 +57,18 @@ function ArrayBufferSlice(start, end) {
var relativeStart = TO_INTEGER(start);
var first;
+ var byte_length = %ArrayBufferGetByteLength(this);
if (relativeStart < 0) {
- first = MathMax(this.byteLength + relativeStart, 0);
+ first = MathMax(byte_length + relativeStart, 0);
} else {
- first = MathMin(relativeStart, this.byteLength);
+ first = MathMin(relativeStart, byte_length);
}
- var relativeEnd = IS_UNDEFINED(end) ? this.byteLength : TO_INTEGER(end);
+ var relativeEnd = IS_UNDEFINED(end) ? byte_length : TO_INTEGER(end);
var fin;
if (relativeEnd < 0) {
- fin = MathMax(this.byteLength + relativeEnd, 0);
+ fin = MathMax(byte_length + relativeEnd, 0);
} else {
- fin = MathMin(relativeEnd, this.byteLength);
+ fin = MathMin(relativeEnd, byte_length);
}
if (fin < first) {
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 4b4c3d4daf..436d035c3e 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -59,8 +59,6 @@
#include "ia32/assembler-ia32-inl.h"
#elif V8_TARGET_ARCH_X64
#include "x64/assembler-x64-inl.h"
-#elif V8_TARGET_ARCH_A64
-#include "a64/assembler-a64-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/assembler-arm-inl.h"
#elif V8_TARGET_ARCH_MIPS
@@ -75,8 +73,6 @@
#include "ia32/regexp-macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/regexp-macro-assembler-x64.h"
-#elif V8_TARGET_ARCH_A64
-#include "a64/regexp-macro-assembler-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
@@ -126,6 +122,7 @@ AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
if (FLAG_mask_constants_with_cookie && isolate != NULL) {
jit_cookie_ = isolate->random_number_generator()->NextInt();
}
+
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
@@ -1339,8 +1336,6 @@ ExternalReference ExternalReference::re_check_stack_guard_state(
function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState);
#elif V8_TARGET_ARCH_IA32
function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState);
-#elif V8_TARGET_ARCH_A64
- function = FUNCTION_ADDR(RegExpMacroAssemblerA64::CheckStackGuardState);
#elif V8_TARGET_ARCH_ARM
function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
#elif V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 89b0e5a622..ce7d9f5b7d 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -1002,6 +1002,32 @@ class PreservePositionScope BASE_EMBEDDED {
// -----------------------------------------------------------------------------
// Utility functions
+inline bool is_intn(int x, int n) {
+ return -(1 << (n-1)) <= x && x < (1 << (n-1));
+}
+
+inline bool is_int8(int x) { return is_intn(x, 8); }
+inline bool is_int16(int x) { return is_intn(x, 16); }
+inline bool is_int18(int x) { return is_intn(x, 18); }
+inline bool is_int24(int x) { return is_intn(x, 24); }
+
+inline bool is_uintn(int x, int n) {
+ return (x & -(1 << n)) == 0;
+}
+
+inline bool is_uint2(int x) { return is_uintn(x, 2); }
+inline bool is_uint3(int x) { return is_uintn(x, 3); }
+inline bool is_uint4(int x) { return is_uintn(x, 4); }
+inline bool is_uint5(int x) { return is_uintn(x, 5); }
+inline bool is_uint6(int x) { return is_uintn(x, 6); }
+inline bool is_uint8(int x) { return is_uintn(x, 8); }
+inline bool is_uint10(int x) { return is_uintn(x, 10); }
+inline bool is_uint12(int x) { return is_uintn(x, 12); }
+inline bool is_uint16(int x) { return is_uintn(x, 16); }
+inline bool is_uint24(int x) { return is_uintn(x, 24); }
+inline bool is_uint26(int x) { return is_uintn(x, 26); }
+inline bool is_uint28(int x) { return is_uintn(x, 28); }
+
inline int NumberOfBitsSet(uint32_t x) {
unsigned int num_bits_set;
for (num_bits_set = 0; x; x >>= 1) {
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index 6b2f48f017..1a9919b5aa 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -593,17 +593,6 @@ void Expression::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
}
-int Call::ComputeFeedbackSlotCount(Isolate* isolate) {
- CallType call_type = GetCallType(isolate);
- if (call_type == LOOKUP_SLOT_CALL || call_type == OTHER_CALL) {
- // Call only uses a slot in some cases.
- return 1;
- }
-
- return 0;
-}
-
-
Call::CallType Call::GetCallType(Isolate* isolate) const {
VariableProxy* proxy = expression()->AsVariableProxy();
if (proxy != NULL) {
@@ -644,10 +633,10 @@ bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
allocation_site_ =
- oracle->GetCallNewAllocationSite(CallNewFeedbackSlot());
- is_monomorphic_ = oracle->CallNewIsMonomorphic(CallNewFeedbackSlot());
+ oracle->GetCallNewAllocationSite(CallNewFeedbackId());
+ is_monomorphic_ = oracle->CallNewIsMonomorphic(CallNewFeedbackId());
if (is_monomorphic_) {
- target_ = oracle->GetCallNewTarget(CallNewFeedbackSlot());
+ target_ = oracle->GetCallNewTarget(CallNewFeedbackId());
if (!allocation_site_.is_null()) {
elements_kind_ = allocation_site_->GetElementsKind();
}
@@ -1050,11 +1039,6 @@ CaseClause::CaseClause(Zone* zone,
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
}
-#define REGULAR_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
- void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
- add_slot_node(node); \
- }
#define DONT_OPTIMIZE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
@@ -1067,12 +1051,6 @@ CaseClause::CaseClause(Zone* zone,
increase_node_count(); \
add_flag(kDontSelfOptimize); \
}
-#define DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
- void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
- increase_node_count(); \
- add_slot_node(node); \
- add_flag(kDontSelfOptimize); \
- }
#define DONT_CACHE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
@@ -1107,8 +1085,8 @@ REGULAR_NODE(CountOperation)
REGULAR_NODE(BinaryOperation)
REGULAR_NODE(CompareOperation)
REGULAR_NODE(ThisFunction)
-REGULAR_NODE_WITH_FEEDBACK_SLOTS(Call)
-REGULAR_NODE_WITH_FEEDBACK_SLOTS(CallNew)
+REGULAR_NODE(Call)
+REGULAR_NODE(CallNew)
// In theory, for VariableProxy we'd have to add:
// if (node->var()->IsLookupSlot()) add_flag(kDontInline);
// But node->var() is usually not bound yet at VariableProxy creation time, and
@@ -1133,12 +1111,11 @@ DONT_OPTIMIZE_NODE(NativeFunctionLiteral)
DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
DONT_SELFOPTIMIZE_NODE(WhileStatement)
DONT_SELFOPTIMIZE_NODE(ForStatement)
-DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(ForInStatement)
+DONT_SELFOPTIMIZE_NODE(ForInStatement)
DONT_SELFOPTIMIZE_NODE(ForOfStatement)
DONT_CACHE_NODE(ModuleLiteral)
-
void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
increase_node_count();
if (node->is_jsruntime()) {
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index aacc5e4fc8..2b33820f9e 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -32,7 +32,6 @@
#include "assembler.h"
#include "factory.h"
-#include "feedback-slots.h"
#include "isolate.h"
#include "jsregexp.h"
#include "list-inl.h"
@@ -182,7 +181,7 @@ class AstProperties V8_FINAL BASE_EMBEDDED {
public:
class Flags : public EnumSet<AstPropertiesFlag, int> {};
- AstProperties() : node_count_(0) {}
+ AstProperties() : node_count_(0) { }
Flags* flags() { return &flags_; }
int node_count() { return node_count_; }
@@ -915,8 +914,7 @@ class ForEachStatement : public IterationStatement {
};
-class ForInStatement V8_FINAL : public ForEachStatement,
- public FeedbackSlotInterface {
+class ForInStatement V8_FINAL : public ForEachStatement {
public:
DECLARE_NODE_TYPE(ForInStatement)
@@ -924,16 +922,7 @@ class ForInStatement V8_FINAL : public ForEachStatement,
return subject();
}
- // Type feedback information.
- virtual ComputablePhase GetComputablePhase() { return DURING_PARSE; }
- virtual int ComputeFeedbackSlotCount(Isolate* isolate) { return 1; }
- virtual void SetFirstFeedbackSlot(int slot) { for_in_feedback_slot_ = slot; }
-
- int ForInFeedbackSlot() {
- ASSERT(for_in_feedback_slot_ != kInvalidFeedbackSlot);
- return for_in_feedback_slot_;
- }
-
+ TypeFeedbackId ForInFeedbackId() const { return reuse(PrepareId()); }
enum ForInType { FAST_FOR_IN, SLOW_FOR_IN };
ForInType for_in_type() const { return for_in_type_; }
void set_for_in_type(ForInType type) { for_in_type_ = type; }
@@ -947,13 +936,11 @@ class ForInStatement V8_FINAL : public ForEachStatement,
ForInStatement(Zone* zone, ZoneStringList* labels, int pos)
: ForEachStatement(zone, labels, pos),
for_in_type_(SLOW_FOR_IN),
- for_in_feedback_slot_(kInvalidFeedbackSlot),
body_id_(GetNextId(zone)),
prepare_id_(GetNextId(zone)) {
}
ForInType for_in_type_;
- int for_in_feedback_slot_;
const BailoutId body_id_;
const BailoutId prepare_id_;
};
@@ -1746,7 +1733,7 @@ class Property V8_FINAL : public Expression {
};
-class Call V8_FINAL : public Expression, public FeedbackSlotInterface {
+class Call V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(Call)
@@ -1754,16 +1741,7 @@ class Call V8_FINAL : public Expression, public FeedbackSlotInterface {
ZoneList<Expression*>* arguments() const { return arguments_; }
// Type feedback information.
- virtual ComputablePhase GetComputablePhase() { return AFTER_SCOPING; }
- virtual int ComputeFeedbackSlotCount(Isolate* isolate);
- virtual void SetFirstFeedbackSlot(int slot) {
- call_feedback_slot_ = slot;
- }
-
- bool HasCallFeedbackSlot() const {
- return call_feedback_slot_ != kInvalidFeedbackSlot;
- }
- int CallFeedbackSlot() const { return call_feedback_slot_; }
+ TypeFeedbackId CallFeedbackId() const { return reuse(id()); }
virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
if (expression()->IsProperty()) {
@@ -1812,7 +1790,6 @@ class Call V8_FINAL : public Expression, public FeedbackSlotInterface {
: Expression(zone, pos),
expression_(expression),
arguments_(arguments),
- call_feedback_slot_(kInvalidFeedbackSlot),
return_id_(GetNextId(zone)) {
if (expression->IsProperty()) {
expression->AsProperty()->mark_for_call();
@@ -1825,13 +1802,12 @@ class Call V8_FINAL : public Expression, public FeedbackSlotInterface {
Handle<JSFunction> target_;
Handle<Cell> cell_;
- int call_feedback_slot_;
const BailoutId return_id_;
};
-class CallNew V8_FINAL : public Expression, public FeedbackSlotInterface {
+class CallNew V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(CallNew)
@@ -1839,17 +1815,6 @@ class CallNew V8_FINAL : public Expression, public FeedbackSlotInterface {
ZoneList<Expression*>* arguments() const { return arguments_; }
// Type feedback information.
- virtual ComputablePhase GetComputablePhase() { return DURING_PARSE; }
- virtual int ComputeFeedbackSlotCount(Isolate* isolate) { return 1; }
- virtual void SetFirstFeedbackSlot(int slot) {
- callnew_feedback_slot_ = slot;
- }
-
- int CallNewFeedbackSlot() {
- ASSERT(callnew_feedback_slot_ != kInvalidFeedbackSlot);
- return callnew_feedback_slot_;
- }
-
TypeFeedbackId CallNewFeedbackId() const { return reuse(id()); }
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
@@ -1859,8 +1824,6 @@ class CallNew V8_FINAL : public Expression, public FeedbackSlotInterface {
return allocation_site_;
}
- static int feedback_slots() { return 1; }
-
BailoutId ReturnId() const { return return_id_; }
protected:
@@ -1873,7 +1836,6 @@ class CallNew V8_FINAL : public Expression, public FeedbackSlotInterface {
arguments_(arguments),
is_monomorphic_(false),
elements_kind_(GetInitialFastElementsKind()),
- callnew_feedback_slot_(kInvalidFeedbackSlot),
return_id_(GetNextId(zone)) { }
private:
@@ -1884,7 +1846,6 @@ class CallNew V8_FINAL : public Expression, public FeedbackSlotInterface {
Handle<JSFunction> target_;
ElementsKind elements_kind_;
Handle<AllocationSite> allocation_site_;
- int callnew_feedback_slot_;
const BailoutId return_id_;
};
@@ -2371,15 +2332,7 @@ class FunctionLiteral V8_FINAL : public Expression {
void set_ast_properties(AstProperties* ast_properties) {
ast_properties_ = *ast_properties;
}
- void set_slot_processor(DeferredFeedbackSlotProcessor* slot_processor) {
- slot_processor_ = *slot_processor;
- }
- void ProcessFeedbackSlots(Isolate* isolate) {
- slot_processor_.ProcessFeedbackSlots(isolate);
- }
- int slot_count() {
- return slot_processor_.slot_count();
- }
+
bool dont_optimize() { return dont_optimize_reason_ != kNoReason; }
BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
void set_dont_optimize_reason(BailoutReason reason) {
@@ -2429,7 +2382,6 @@ class FunctionLiteral V8_FINAL : public Expression {
ZoneList<Statement*>* body_;
Handle<String> inferred_name_;
AstProperties ast_properties_;
- DeferredFeedbackSlotProcessor slot_processor_;
BailoutReason dont_optimize_reason_;
int materialized_literal_count_;
@@ -2904,13 +2856,10 @@ private: \
class AstConstructionVisitor BASE_EMBEDDED {
public:
- explicit AstConstructionVisitor(Zone* zone)
- : dont_optimize_reason_(kNoReason),
- zone_(zone) { }
+ AstConstructionVisitor() : dont_optimize_reason_(kNoReason) { }
AstProperties* ast_properties() { return &properties_; }
BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
- DeferredFeedbackSlotProcessor* slot_processor() { return &slot_processor_; }
private:
template<class> friend class AstNodeFactory;
@@ -2927,21 +2876,13 @@ class AstConstructionVisitor BASE_EMBEDDED {
dont_optimize_reason_ = reason;
}
- void add_slot_node(FeedbackSlotInterface* slot_node) {
- slot_processor_.add_slot_node(zone_, slot_node);
- }
-
AstProperties properties_;
- DeferredFeedbackSlotProcessor slot_processor_;
BailoutReason dont_optimize_reason_;
- Zone* zone_;
};
class AstNullVisitor BASE_EMBEDDED {
public:
- explicit AstNullVisitor(Zone* zone) {}
-
// Node visitors.
#define DEF_VISIT(type) \
void Visit##type(type* node) {}
@@ -2957,9 +2898,7 @@ class AstNullVisitor BASE_EMBEDDED {
template<class Visitor>
class AstNodeFactory V8_FINAL BASE_EMBEDDED {
public:
- explicit AstNodeFactory(Zone* zone)
- : zone_(zone),
- visitor_(zone) { }
+ explicit AstNodeFactory(Zone* zone) : zone_(zone) { }
Visitor* visitor() { return &visitor_; }
diff --git a/deps/v8/src/atomicops.h b/deps/v8/src/atomicops.h
index d7d4df6763..789721edfc 100644
--- a/deps/v8/src/atomicops.h
+++ b/deps/v8/src/atomicops.h
@@ -159,8 +159,6 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
#include "atomicops_internals_x86_macosx.h"
#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "atomicops_internals_x86_gcc.h"
-#elif defined(__GNUC__) && V8_HOST_ARCH_A64
-#include "atomicops_internals_a64_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_ARM
#include "atomicops_internals_arm_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS
diff --git a/deps/v8/src/atomicops_internals_a64_gcc.h b/deps/v8/src/atomicops_internals_a64_gcc.h
deleted file mode 100644
index 074da5841e..0000000000
--- a/deps/v8/src/atomicops_internals_a64_gcc.h
+++ /dev/null
@@ -1,416 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
-#define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
-
-namespace v8 {
-namespace internal {
-
-inline void MemoryBarrier() { /* Not used. */ }
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev;
- int32_t temp;
-
- __asm__ __volatile__ ( // NOLINT
- "0: \n\t"
- "ldxr %w[prev], [%[ptr]] \n\t" // Load the previous value.
- "cmp %w[prev], %w[old_value] \n\t"
- "bne 1f \n\t"
- "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value.
- "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
- "1: \n\t"
- "clrex \n\t" // In case we didn't swap.
- : [prev]"=&r" (prev),
- [temp]"=&r" (temp)
- : [ptr]"r" (ptr),
- [old_value]"r" (old_value),
- [new_value]"r" (new_value)
- : "memory", "cc"
- ); // NOLINT
-
- return prev;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- Atomic32 result;
- int32_t temp;
-
- __asm__ __volatile__ ( // NOLINT
- "0: \n\t"
- "ldxr %w[result], [%[ptr]] \n\t" // Load the previous value.
- "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value.
- "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
- : [result]"=&r" (result),
- [temp]"=&r" (temp)
- : [ptr]"r" (ptr),
- [new_value]"r" (new_value)
- : "memory"
- ); // NOLINT
-
- return result;
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- Atomic32 result;
- int32_t temp;
-
- __asm__ __volatile__ ( // NOLINT
- "0: \n\t"
- "ldxr %w[result], [%[ptr]] \n\t" // Load the previous value.
- "add %w[result], %w[result], %w[increment]\n\t"
- "stxr %w[temp], %w[result], [%[ptr]] \n\t" // Try to store the result.
- "cbnz %w[temp], 0b \n\t" // Retry on failure.
- : [result]"=&r" (result),
- [temp]"=&r" (temp)
- : [ptr]"r" (ptr),
- [increment]"r" (increment)
- : "memory"
- ); // NOLINT
-
- return result;
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- Atomic32 result;
- int32_t temp;
-
- __asm__ __volatile__ ( // NOLINT
- "dmb ish \n\t" // Data memory barrier.
- "0: \n\t"
- "ldxr %w[result], [%[ptr]] \n\t" // Load the previous value.
- "add %w[result], %w[result], %w[increment]\n\t"
- "stxr %w[temp], %w[result], [%[ptr]] \n\t" // Try to store the result.
- "cbnz %w[temp], 0b \n\t" // Retry on failure.
- "dmb ish \n\t" // Data memory barrier.
- : [result]"=&r" (result),
- [temp]"=&r" (temp)
- : [ptr]"r" (ptr),
- [increment]"r" (increment)
- : "memory"
- ); // NOLINT
-
- return result;
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev;
- int32_t temp;
-
- __asm__ __volatile__ ( // NOLINT
- "0: \n\t"
- "ldxr %w[prev], [%[ptr]] \n\t" // Load the previous value.
- "cmp %w[prev], %w[old_value] \n\t"
- "bne 1f \n\t"
- "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value.
- "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
- "dmb ish \n\t" // Data memory barrier.
- "1: \n\t"
- // If the compare failed the 'dmb' is unnecessary, but we still need a
- // 'clrex'.
- "clrex \n\t"
- : [prev]"=&r" (prev),
- [temp]"=&r" (temp)
- : [ptr]"r" (ptr),
- [old_value]"r" (old_value),
- [new_value]"r" (new_value)
- : "memory", "cc"
- ); // NOLINT
-
- return prev;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev;
- int32_t temp;
-
- __asm__ __volatile__ ( // NOLINT
- "dmb ish \n\t" // Data memory barrier.
- "0: \n\t"
- "ldxr %w[prev], [%[ptr]] \n\t" // Load the previous value.
- "cmp %w[prev], %w[old_value] \n\t"
- "bne 1f \n\t"
- "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value.
- "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
- "1: \n\t"
- // If the compare failed the we still need a 'clrex'.
- "clrex \n\t"
- : [prev]"=&r" (prev),
- [temp]"=&r" (temp)
- : [ptr]"r" (ptr),
- [old_value]"r" (old_value),
- [new_value]"r" (new_value)
- : "memory", "cc"
- ); // NOLINT
-
- return prev;
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- __asm__ __volatile__ ( // NOLINT
- "dmb ish \n\t" // Data memory barrier.
- ::: "memory" // Prevent gcc from reordering before the store above.
- ); // NOLINT
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- __asm__ __volatile__ ( // NOLINT
- "dmb ish \n\t" // Data memory barrier.
- ::: "memory" // Prevent gcc from reordering after the store below.
- ); // NOLINT
- *ptr = value;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr;
- __asm__ __volatile__ ( // NOLINT
- "dmb ish \n\t" // Data memory barrier.
- ::: "memory" // Prevent gcc from reordering before the load above.
- ); // NOLINT
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- __asm__ __volatile__ ( // NOLINT
- "dmb ish \n\t" // Data memory barrier.
- ::: "memory" // Prevent gcc from reordering after the load below.
- ); // NOLINT
- return *ptr;
-}
-
-// 64-bit versions of the operations.
-// See the 32-bit versions for comments.
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 prev;
- int32_t temp;
-
- __asm__ __volatile__ ( // NOLINT
- "0: \n\t"
- "ldxr %[prev], [%[ptr]] \n\t"
- "cmp %[prev], %[old_value] \n\t"
- "bne 1f \n\t"
- "stxr %w[temp], %[new_value], [%[ptr]] \n\t"
- "cbnz %w[temp], 0b \n\t"
- "1: \n\t"
- "clrex \n\t"
- : [prev]"=&r" (prev),
- [temp]"=&r" (temp)
- : [ptr]"r" (ptr),
- [old_value]"r" (old_value),
- [new_value]"r" (new_value)
- : "memory", "cc"
- ); // NOLINT
-
- return prev;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- Atomic64 result;
- int32_t temp;
-
- __asm__ __volatile__ ( // NOLINT
- "0: \n\t"
- "ldxr %[result], [%[ptr]] \n\t"
- "stxr %w[temp], %[new_value], [%[ptr]] \n\t"
- "cbnz %w[temp], 0b \n\t"
- : [result]"=&r" (result),
- [temp]"=&r" (temp)
- : [ptr]"r" (ptr),
- [new_value]"r" (new_value)
- : "memory"
- ); // NOLINT
-
- return result;
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- Atomic64 result;
- int32_t temp;
-
- __asm__ __volatile__ ( // NOLINT
- "0: \n\t"
- "ldxr %[result], [%[ptr]] \n\t"
- "add %[result], %[result], %[increment] \n\t"
- "stxr %w[temp], %[result], [%[ptr]] \n\t"
- "cbnz %w[temp], 0b \n\t"
- : [result]"=&r" (result),
- [temp]"=&r" (temp)
- : [ptr]"r" (ptr),
- [increment]"r" (increment)
- : "memory"
- ); // NOLINT
-
- return result;
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- Atomic64 result;
- int32_t temp;
-
- __asm__ __volatile__ ( // NOLINT
- "dmb ish \n\t"
- "0: \n\t"
- "ldxr %[result], [%[ptr]] \n\t"
- "add %[result], %[result], %[increment] \n\t"
- "stxr %w[temp], %[result], [%[ptr]] \n\t"
- "cbnz %w[temp], 0b \n\t"
- "dmb ish \n\t"
- : [result]"=&r" (result),
- [temp]"=&r" (temp)
- : [ptr]"r" (ptr),
- [increment]"r" (increment)
- : "memory"
- ); // NOLINT
-
- return result;
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 prev;
- int32_t temp;
-
- __asm__ __volatile__ ( // NOLINT
- "0: \n\t"
- "ldxr %[prev], [%[ptr]] \n\t"
- "cmp %[prev], %[old_value] \n\t"
- "bne 1f \n\t"
- "stxr %w[temp], %[new_value], [%[ptr]] \n\t"
- "cbnz %w[temp], 0b \n\t"
- "dmb ish \n\t"
- "1: \n\t"
- "clrex \n\t"
- : [prev]"=&r" (prev),
- [temp]"=&r" (temp)
- : [ptr]"r" (ptr),
- [old_value]"r" (old_value),
- [new_value]"r" (new_value)
- : "memory", "cc"
- ); // NOLINT
-
- return prev;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 prev;
- int32_t temp;
-
- __asm__ __volatile__ ( // NOLINT
- "dmb ish \n\t"
- "0: \n\t"
- "ldxr %[prev], [%[ptr]] \n\t"
- "cmp %[prev], %[old_value] \n\t"
- "bne 1f \n\t"
- "stxr %w[temp], %[new_value], [%[ptr]] \n\t"
- "cbnz %w[temp], 0b \n\t"
- "1: \n\t"
- "clrex \n\t"
- : [prev]"=&r" (prev),
- [temp]"=&r" (temp)
- : [ptr]"r" (ptr),
- [old_value]"r" (old_value),
- [new_value]"r" (new_value)
- : "memory", "cc"
- ); // NOLINT
-
- return prev;
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
- __asm__ __volatile__ ( // NOLINT
- "dmb ish \n\t"
- ::: "memory"
- ); // NOLINT
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- __asm__ __volatile__ ( // NOLINT
- "dmb ish \n\t"
- ::: "memory"
- ); // NOLINT
- *ptr = value;
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
- return *ptr;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- Atomic64 value = *ptr;
- __asm__ __volatile__ ( // NOLINT
- "dmb ish \n\t"
- ::: "memory"
- ); // NOLINT
- return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- __asm__ __volatile__ ( // NOLINT
- "dmb ish \n\t"
- ::: "memory"
- ); // NOLINT
- return *ptr;
-}
-
-} } // namespace v8::internal
-
-#endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index d11ff34754..ef802ba987 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -1582,9 +1582,6 @@ void Genesis::InstallNativeFunctions() {
void Genesis::InstallExperimentalNativeFunctions() {
INSTALL_NATIVE(JSFunction, "RunMicrotasks", run_microtasks);
- INSTALL_NATIVE(JSFunction, "EnqueueExternalMicrotask",
- enqueue_external_microtask);
-
if (FLAG_harmony_proxies) {
INSTALL_NATIVE(JSFunction, "DerivedHasTrap", derived_has_trap);
INSTALL_NATIVE(JSFunction, "DerivedGetTrap", derived_get_trap);
@@ -2569,9 +2566,7 @@ class NoTrackDoubleFieldsForSerializerScope {
}
}
~NoTrackDoubleFieldsForSerializerScope() {
- if (Serializer::enabled()) {
- FLAG_track_double_fields = flag_;
- }
+ FLAG_track_double_fields = flag_;
}
private:
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index b9ff9e1344..e68890fcb2 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -1599,7 +1599,9 @@ void Builtins::InitBuiltinFunctionTable() {
functions->c_code = NULL; \
functions->s_name = #aname; \
functions->name = k##aname; \
- functions->flags = Code::ComputeHandlerFlags(Code::kind); \
+ functions->flags = Code::ComputeFlags( \
+ Code::HANDLER, MONOMORPHIC, kNoExtraICState, \
+ Code::NORMAL, Code::kind); \
functions->extra_args = NO_EXTRA_ARGUMENTS; \
++functions;
@@ -1625,9 +1627,7 @@ void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
// For now we generate builtin adaptor code into a stack-allocated
// buffer, before copying it into individual code objects. Be careful
// with alignment, some platforms don't like unaligned code.
- // TODO(jbramley): I had to increase the size of this buffer from 8KB because
- // we can generate a lot of debug code on A64.
- union { int force_alignment; byte buffer[16*KB]; } u;
+ union { int force_alignment; byte buffer[8*KB]; } u;
// Traverse the list of builtins and generate an adaptor in a
// separate code object for each one.
diff --git a/deps/v8/src/char-predicates.h b/deps/v8/src/char-predicates.h
index f52feda6c1..767ad6513a 100644
--- a/deps/v8/src/char-predicates.h
+++ b/deps/v8/src/char-predicates.h
@@ -66,27 +66,6 @@ struct IdentifierPart {
}
};
-
-// WhiteSpace according to ECMA-262 5.1, 7.2.
-struct WhiteSpace {
- static inline bool Is(uc32 c) {
- return c == 0x0009 || // <TAB>
- c == 0x000B || // <VT>
- c == 0x000C || // <FF>
- c == 0xFEFF || // <BOM>
- // \u0020 and \u00A0 are included in unibrow::WhiteSpace.
- unibrow::WhiteSpace::Is(c);
- }
-};
-
-
-// WhiteSpace and LineTerminator according to ECMA-262 5.1, 7.2 and 7.3.
-struct WhiteSpaceOrLineTerminator {
- static inline bool Is(uc32 c) {
- return WhiteSpace::Is(c) || unibrow::LineTerminator::Is(c);
- }
-};
-
} } // namespace v8::internal
#endif // V8_CHAR_PREDICATES_H_
diff --git a/deps/v8/src/checks.h b/deps/v8/src/checks.h
index 57f1852618..f7b145fc8a 100644
--- a/deps/v8/src/checks.h
+++ b/deps/v8/src/checks.h
@@ -34,7 +34,6 @@
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
-
// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
// development, but they should not be relied on in the final product.
#ifdef DEBUG
@@ -52,23 +51,6 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
#define UNREACHABLE() ((void) 0)
#endif
-// Simulator specific helpers.
-#if defined(USE_SIMULATOR) && defined(V8_TARGET_ARCH_A64)
- // TODO(all): If possible automatically prepend an indicator like
- // UNIMPLEMENTED or LOCATION.
- #define ASM_UNIMPLEMENTED(message) \
- __ Debug(message, __LINE__, NO_PARAM)
- #define ASM_UNIMPLEMENTED_BREAK(message) \
- __ Debug(message, __LINE__, \
- FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK)
- #define ASM_LOCATION(message) \
- __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
-#else
- #define ASM_UNIMPLEMENTED(message)
- #define ASM_UNIMPLEMENTED_BREAK(message)
- #define ASM_LOCATION(message)
-#endif
-
// The CHECK macro checks that the given condition is true; if not, it
// prints a message to stderr and aborts.
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
index b7247eb6bf..638f9e5a1d 100644
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -81,11 +81,6 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
HContext* context() { return context_; }
Isolate* isolate() { return info_.isolate(); }
- HLoadNamedField* BuildLoadNamedField(HValue* object,
- Representation representation,
- int offset,
- bool is_inobject);
-
enum ArgumentClass {
NONE,
SINGLE,
@@ -252,7 +247,8 @@ Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode(Isolate* isolate) {
GetCodeKind(),
GetICState(),
GetExtraICState(),
- GetStubType());
+ GetStubType(),
+ GetStubFlags());
Handle<Code> new_object = factory->NewCode(
desc, flags, masm.CodeObject(), NeedsImmovableCode());
return new_object;
@@ -299,8 +295,7 @@ HValue* CodeStubGraphBuilder<ToNumberStub>::BuildCodeStub() {
// Check if the parameter is already a SMI or heap number.
IfBuilder if_number(this);
if_number.If<HIsSmiAndBranch>(value);
- if_number.OrIf<HCompareMap>(value, isolate()->factory()->heap_number_map(),
- top_info());
+ if_number.OrIf<HCompareMap>(value, isolate()->factory()->heap_number_map());
if_number.Then();
// Return the number.
@@ -363,8 +358,7 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
HValue* elements = AddLoadElements(boilerplate);
IfBuilder if_fixed_cow(this);
- if_fixed_cow.If<HCompareMap>(elements, factory->fixed_cow_array_map(),
- top_info());
+ if_fixed_cow.If<HCompareMap>(elements, factory->fixed_cow_array_map());
if_fixed_cow.Then();
push_value = BuildCloneShallowArray(boilerplate,
allocation_site,
@@ -375,7 +369,7 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
if_fixed_cow.Else();
IfBuilder if_fixed(this);
- if_fixed.If<HCompareMap>(elements, factory->fixed_array_map(), top_info());
+ if_fixed.If<HCompareMap>(elements, factory->fixed_array_map());
if_fixed.Then();
push_value = BuildCloneShallowArray(boilerplate,
allocation_site,
@@ -536,11 +530,15 @@ HValue* CodeStubGraphBuilder<CreateAllocationSiteStub>::BuildCodeStub() {
Add<HStoreNamedField>(site_list, HObjectAccess::ForAllocationSiteList(),
object);
- HInstruction* feedback_vector = GetParameter(0);
- HInstruction* slot = GetParameter(1);
- Add<HStoreKeyed>(feedback_vector, slot, object, FAST_ELEMENTS,
- INITIALIZING_STORE);
- return feedback_vector;
+ // We use a hammer (SkipWriteBarrier()) to indicate that we know the input
+ // cell is really a Cell, and so no write barrier is needed.
+ // TODO(mvstanton): Add a debug_code check to verify the input cell is really
+ // a cell. (perhaps with a new instruction, HAssert).
+ HInstruction* cell = GetParameter(0);
+ HObjectAccess access = HObjectAccess::ForCellValue();
+ store = Add<HStoreNamedField>(cell, access, object);
+ store->SkipWriteBarrier();
+ return cell;
}
@@ -554,7 +552,7 @@ HValue* CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() {
HInstruction* load = BuildUncheckedMonomorphicElementAccess(
GetParameter(0), GetParameter(1), NULL,
casted_stub()->is_js_array(), casted_stub()->elements_kind(),
- LOAD, NEVER_RETURN_HOLE, STANDARD_STORE);
+ false, NEVER_RETURN_HOLE, STANDARD_STORE);
return load;
}
@@ -564,32 +562,14 @@ Handle<Code> KeyedLoadFastElementStub::GenerateCode(Isolate* isolate) {
}
-HLoadNamedField* CodeStubGraphBuilderBase::BuildLoadNamedField(
- HValue* object,
- Representation representation,
- int offset,
- bool is_inobject) {
- HObjectAccess access = is_inobject
- ? HObjectAccess::ForObservableJSObjectOffset(offset, representation)
- : HObjectAccess::ForBackingStoreOffset(offset, representation);
- if (representation.IsDouble()) {
- // Load the heap number.
- object = Add<HLoadNamedField>(
- object, static_cast<HValue*>(NULL),
- access.WithRepresentation(Representation::Tagged()));
- // Load the double value from it.
- access = HObjectAccess::ForHeapNumberValue();
- }
- return Add<HLoadNamedField>(object, static_cast<HValue*>(NULL), access);
-}
-
-
template<>
HValue* CodeStubGraphBuilder<LoadFieldStub>::BuildCodeStub() {
- return BuildLoadNamedField(GetParameter(0),
- casted_stub()->representation(),
- casted_stub()->offset(),
- casted_stub()->is_inobject());
+ Representation rep = casted_stub()->representation();
+ int offset = casted_stub()->offset();
+ HObjectAccess access = casted_stub()->is_inobject() ?
+ HObjectAccess::ForObservableJSObjectOffset(offset, rep) :
+ HObjectAccess::ForBackingStoreOffset(offset, rep);
+ return AddLoadNamedField(GetParameter(0), access);
}
@@ -600,10 +580,12 @@ Handle<Code> LoadFieldStub::GenerateCode(Isolate* isolate) {
template<>
HValue* CodeStubGraphBuilder<KeyedLoadFieldStub>::BuildCodeStub() {
- return BuildLoadNamedField(GetParameter(0),
- casted_stub()->representation(),
- casted_stub()->offset(),
- casted_stub()->is_inobject());
+ Representation rep = casted_stub()->representation();
+ int offset = casted_stub()->offset();
+ HObjectAccess access = casted_stub()->is_inobject() ?
+ HObjectAccess::ForObservableJSObjectOffset(offset, rep) :
+ HObjectAccess::ForBackingStoreOffset(offset, rep);
+ return AddLoadNamedField(GetParameter(0), access);
}
@@ -617,7 +599,7 @@ HValue* CodeStubGraphBuilder<KeyedStoreFastElementStub>::BuildCodeStub() {
BuildUncheckedMonomorphicElementAccess(
GetParameter(0), GetParameter(1), GetParameter(2),
casted_stub()->is_js_array(), casted_stub()->elements_kind(),
- STORE, NEVER_RETURN_HOLE, casted_stub()->store_mode());
+ true, NEVER_RETURN_HOLE, casted_stub()->store_mode());
return GetParameter(2);
}
@@ -1051,16 +1033,13 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
Handle<PropertyCell> placeholder_cell =
isolate()->factory()->NewPropertyCell(placeholer_value);
+ HParameter* receiver = GetParameter(0);
HParameter* value = GetParameter(2);
- if (stub->check_global()) {
- // Check that the map of the global has not changed: use a placeholder map
- // that will be replaced later with the global object's map.
- Handle<Map> placeholder_map = isolate()->factory()->meta_map();
- HValue* global = Add<HConstant>(
- StoreGlobalStub::global_placeholder(isolate()));
- Add<HCheckMaps>(global, placeholder_map, top_info());
- }
+ // Check that the map of the global has not changed: use a placeholder map
+ // that will be replaced later with the global object's map.
+ Handle<Map> placeholder_map = isolate()->factory()->meta_map();
+ Add<HCheckMaps>(receiver, placeholder_map, top_info());
HValue* cell = Add<HConstant>(placeholder_cell);
HObjectAccess access(HObjectAccess::ForCellPayload(isolate()));
@@ -1117,7 +1096,7 @@ HValue* CodeStubGraphBuilder<ElementsTransitionAndStoreStub>::BuildCodeStub() {
BuildUncheckedMonomorphicElementAccess(object, key, value,
casted_stub()->is_jsarray(),
casted_stub()->to_kind(),
- STORE, ALLOW_RETURN_HOLE,
+ true, ALLOW_RETURN_HOLE,
casted_stub()->store_mode());
}
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index be14cf6e87..d86bc70dcf 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -119,7 +119,8 @@ Handle<Code> PlatformCodeStub::GenerateCode(Isolate* isolate) {
GetCodeKind(),
GetICState(),
GetExtraICState(),
- GetStubType());
+ GetStubType(),
+ GetStubFlags());
Handle<Code> new_object = factory->NewCode(
desc, flags, masm.CodeObject(), NeedsImmovableCode());
return new_object;
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 07e34be578..a7283ba642 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -101,7 +101,7 @@ namespace internal {
V(KeyedLoadField)
// List of code stubs only used on ARM platforms.
-#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_A64)
+#if V8_TARGET_ARCH_ARM
#define CODE_STUB_LIST_ARM(V) \
V(GetProperty) \
V(SetProperty) \
@@ -188,6 +188,9 @@ class CodeStub BASE_EMBEDDED {
virtual Code::StubType GetStubType() {
return Code::NORMAL;
}
+ virtual int GetStubFlags() {
+ return -1;
+ }
virtual void PrintName(StringStream* stream);
@@ -439,8 +442,6 @@ class RuntimeCallHelper {
#include "ia32/code-stubs-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/code-stubs-x64.h"
-#elif V8_TARGET_ARCH_A64
-#include "a64/code-stubs-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/code-stubs-arm.h"
#elif V8_TARGET_ARCH_MIPS
@@ -882,7 +883,7 @@ class HICStub: public HydrogenCodeStub {
class HandlerStub: public HICStub {
public:
virtual Code::Kind GetCodeKind() const { return Code::HANDLER; }
- virtual ExtraICState GetExtraICState() { return kind(); }
+ virtual int GetStubFlags() { return kind(); }
protected:
HandlerStub() : HICStub() { }
@@ -954,27 +955,19 @@ class LoadFieldStub: public HandlerStub {
class StoreGlobalStub : public HandlerStub {
public:
- explicit StoreGlobalStub(bool is_constant, bool check_global) {
- bit_field_ = IsConstantBits::encode(is_constant) |
- CheckGlobalBits::encode(check_global);
- }
-
- static Handle<HeapObject> global_placeholder(Isolate* isolate) {
- return isolate->factory()->uninitialized_value();
+ explicit StoreGlobalStub(bool is_constant) {
+ bit_field_ = IsConstantBits::encode(is_constant);
}
Handle<Code> GetCodeCopyFromTemplate(Isolate* isolate,
- GlobalObject* global,
+ Map* receiver_map,
PropertyCell* cell) {
Handle<Code> code = CodeStub::GetCodeCopyFromTemplate(isolate);
- if (check_global()) {
- // Replace the placeholder cell and global object map with the actual
- // global cell and receiver map.
- code->ReplaceNthObject(1, global_placeholder(isolate)->map(), global);
- code->ReplaceNthObject(1, isolate->heap()->meta_map(), global->map());
- }
+ // Replace the placeholder cell and global object map with the actual global
+ // cell and receiver map.
Map* cell_map = isolate->heap()->global_property_cell_map();
code->ReplaceNthObject(1, cell_map, cell);
+ code->ReplaceNthObject(1, isolate->heap()->meta_map(), receiver_map);
return code;
}
@@ -986,12 +979,11 @@ class StoreGlobalStub : public HandlerStub {
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
- bool is_constant() const {
+ virtual ExtraICState GetExtraICState() { return bit_field_; }
+
+ bool is_constant() {
return IsConstantBits::decode(bit_field_);
}
- bool check_global() const {
- return CheckGlobalBits::decode(bit_field_);
- }
void set_is_constant(bool value) {
bit_field_ = IsConstantBits::update(bit_field_, value);
}
@@ -1004,11 +996,13 @@ class StoreGlobalStub : public HandlerStub {
}
private:
+ virtual int NotMissMinorKey() { return GetExtraICState(); }
Major MajorKey() { return StoreGlobal; }
class IsConstantBits: public BitField<bool, 0, 1> {};
class RepresentationBits: public BitField<Representation::Kind, 1, 8> {};
- class CheckGlobalBits: public BitField<bool, 9, 1> {};
+
+ int bit_field_;
DISALLOW_COPY_AND_ASSIGN(StoreGlobalStub);
};
@@ -1016,14 +1010,13 @@ class StoreGlobalStub : public HandlerStub {
class CallApiFunctionStub : public PlatformCodeStub {
public:
- CallApiFunctionStub(bool is_store,
+ CallApiFunctionStub(bool restore_context,
bool call_data_undefined,
int argc) {
bit_field_ =
- IsStoreBits::encode(is_store) |
+ RestoreContextBits::encode(restore_context) |
CallDataUndefinedBits::encode(call_data_undefined) |
ArgumentBits::encode(argc);
- ASSERT(!is_store || argc == 1);
}
private:
@@ -1031,7 +1024,7 @@ class CallApiFunctionStub : public PlatformCodeStub {
virtual Major MajorKey() V8_OVERRIDE { return CallApiFunction; }
virtual int MinorKey() V8_OVERRIDE { return bit_field_; }
- class IsStoreBits: public BitField<bool, 0, 1> {};
+ class RestoreContextBits: public BitField<bool, 0, 1> {};
class CallDataUndefinedBits: public BitField<bool, 1, 1> {};
class ArgumentBits: public BitField<int, 2, Code::kArgumentsBits> {};
@@ -1873,21 +1866,23 @@ class DoubleToIStub : public PlatformCodeStub {
int offset,
bool is_truncating,
bool skip_fastpath = false) : bit_field_(0) {
- bit_field_ = SourceRegisterBits::encode(source.code()) |
- DestinationRegisterBits::encode(destination.code()) |
+ bit_field_ = SourceRegisterBits::encode(source.code_) |
+ DestinationRegisterBits::encode(destination.code_) |
OffsetBits::encode(offset) |
IsTruncatingBits::encode(is_truncating) |
SkipFastPathBits::encode(skip_fastpath) |
SSEBits::encode(CpuFeatures::IsSafeForSnapshot(SSE2) ?
- CpuFeatures::IsSafeForSnapshot(SSE3) ? 2 : 1 : 0);
+ CpuFeatures::IsSafeForSnapshot(SSE3) ? 2 : 1 : 0);
}
Register source() {
- return Register::from_code(SourceRegisterBits::decode(bit_field_));
+ Register result = { SourceRegisterBits::decode(bit_field_) };
+ return result;
}
Register destination() {
- return Register::from_code(DestinationRegisterBits::decode(bit_field_));
+ Register result = { DestinationRegisterBits::decode(bit_field_) };
+ return result;
}
bool is_truncating() {
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index f6c36682de..13ce2218df 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -165,8 +165,6 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
function->debug_name()->ToCString().get(), tracing_scope.file());
}
PrintF(tracing_scope.file(), "--- Optimized code ---\n");
- PrintF(tracing_scope.file(),
- "optimization_id = %d\n", info->optimization_id());
} else {
PrintF(tracing_scope.file(), "--- Code ---\n");
}
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index be76de8aea..8bd4302662 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -72,8 +72,6 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
#include "ia32/codegen-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/codegen-x64.h"
-#elif V8_TARGET_ARCH_A64
-#include "a64/codegen-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/codegen-arm.h"
#elif V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index d466778069..b9e13c1661 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -60,8 +60,7 @@ CompilationInfo::CompilationInfo(Handle<Script> script,
script_(script),
osr_ast_id_(BailoutId::None()),
parameter_count_(0),
- this_has_uses_(true),
- optimization_id_(-1) {
+ this_has_uses_(true) {
Initialize(script->GetIsolate(), BASE, zone);
}
@@ -73,8 +72,7 @@ CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
script_(Handle<Script>(Script::cast(shared_info->script()))),
osr_ast_id_(BailoutId::None()),
parameter_count_(0),
- this_has_uses_(true),
- optimization_id_(-1) {
+ this_has_uses_(true) {
Initialize(script_->GetIsolate(), BASE, zone);
}
@@ -88,8 +86,7 @@ CompilationInfo::CompilationInfo(Handle<JSFunction> closure,
context_(closure->context()),
osr_ast_id_(BailoutId::None()),
parameter_count_(0),
- this_has_uses_(true),
- optimization_id_(-1) {
+ this_has_uses_(true) {
Initialize(script_->GetIsolate(), BASE, zone);
}
@@ -101,8 +98,7 @@ CompilationInfo::CompilationInfo(HydrogenCodeStub* stub,
IsLazy::encode(true)),
osr_ast_id_(BailoutId::None()),
parameter_count_(0),
- this_has_uses_(true),
- optimization_id_(-1) {
+ this_has_uses_(true) {
Initialize(isolate, STUB, zone);
code_stub_ = stub;
}
@@ -215,7 +211,8 @@ Code::Flags CompilationInfo::flags() const {
return Code::ComputeFlags(code_stub()->GetCodeKind(),
code_stub()->GetICState(),
code_stub()->GetExtraICState(),
- code_stub()->GetStubType());
+ code_stub()->GetStubType(),
+ code_stub()->GetStubFlags());
} else {
return Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
}
@@ -246,13 +243,6 @@ bool CompilationInfo::ShouldSelfOptimize() {
}
-void CompilationInfo::PrepareForCompilation(Scope* scope) {
- ASSERT(scope_ == NULL);
- scope_ = scope;
- function()->ProcessFeedbackSlots(isolate_);
-}
-
-
class HOptimizedGraphBuilderWithPositions: public HOptimizedGraphBuilder {
public:
explicit HOptimizedGraphBuilderWithPositions(CompilationInfo* info)
@@ -373,7 +363,7 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
// Note that we use the same AST that we will use for generating the
// optimized code.
unoptimized.SetFunction(info()->function());
- unoptimized.PrepareForCompilation(info()->scope());
+ unoptimized.SetScope(info()->scope());
unoptimized.SetContext(info()->context());
if (should_recompile) unoptimized.EnableDeoptimizationSupport();
bool succeeded = FullCodeGenerator::MakeCode(&unoptimized);
@@ -408,7 +398,7 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
// Type-check the function.
AstTyper::Run(info());
- graph_builder_ = FLAG_hydrogen_track_positions
+ graph_builder_ = FLAG_emit_opt_code_positions
? new(info()->zone()) HOptimizedGraphBuilderWithPositions(info())
: new(info()->zone()) HOptimizedGraphBuilder(info());
@@ -992,7 +982,7 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
// Precondition: code has been parsed and scopes have been analyzed.
CompilationInfoWithZone info(script);
info.SetFunction(literal);
- info.PrepareForCompilation(literal->scope());
+ info.SetScope(literal->scope());
info.SetLanguageMode(literal->scope()->language_mode());
Isolate* isolate = info.isolate();
@@ -1188,7 +1178,7 @@ Handle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
if (FLAG_trace_opt) {
PrintF("[failed to optimize ");
function->PrintName();
- PrintF(": %s]\n", GetBailoutReason(info->bailout_reason()));
+ PrintF("]\n");
}
if (isolate->has_pending_exception()) isolate->clear_pending_exception();
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index f7ff09c57d..3bf4db5780 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -175,8 +175,10 @@ class CompilationInfo {
ASSERT(function_ == NULL);
function_ = literal;
}
- // When the scope is applied, we may have deferred work to do on the function.
- void PrepareForCompilation(Scope* scope);
+ void SetScope(Scope* scope) {
+ ASSERT(scope_ == NULL);
+ scope_ = scope;
+ }
void SetGlobalScope(Scope* global_scope) {
ASSERT(global_scope_ == NULL);
global_scope_ = global_scope;
@@ -227,7 +229,6 @@ class CompilationInfo {
SetMode(OPTIMIZE);
osr_ast_id_ = osr_ast_id;
unoptimized_code_ = unoptimized;
- optimization_id_ = isolate()->NextOptimizationId();
}
void DisableOptimization();
@@ -316,8 +317,6 @@ class CompilationInfo {
return osr_ast_id_ == osr_ast_id && function.is_identical_to(closure_);
}
- int optimization_id() const { return optimization_id_; }
-
protected:
CompilationInfo(Handle<Script> script,
Zone* zone);
@@ -453,8 +452,6 @@ class CompilationInfo {
Handle<Foreign> object_wrapper_;
- int optimization_id_;
-
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index b2e0661a34..bd6c6a2bbc 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -167,7 +167,6 @@ enum BindingFlags {
V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \
error_message_for_code_gen_from_strings) \
V(RUN_MICROTASKS_INDEX, JSFunction, run_microtasks) \
- V(ENQUEUE_EXTERNAL_MICROTASK_INDEX, JSFunction, enqueue_external_microtask) \
V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \
to_complete_property_descriptor) \
V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
@@ -319,7 +318,6 @@ class Context: public FixedArray {
ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX,
RUN_MICROTASKS_INDEX,
- ENQUEUE_EXTERNAL_MICROTASK_INDEX,
TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX,
DERIVED_HAS_TRAP_INDEX,
DERIVED_GET_TRAP_INDEX,
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index e503eb5027..3cb7ef2992 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -128,7 +128,7 @@ inline bool AdvanceToNonspace(UnicodeCache* unicode_cache,
Iterator* current,
EndMark end) {
while (*current != end) {
- if (!unicode_cache->IsWhiteSpaceOrLineTerminator(**current)) return true;
+ if (!unicode_cache->IsWhiteSpace(**current)) return true;
++*current;
}
return false;
diff --git a/deps/v8/src/dateparser.h b/deps/v8/src/dateparser.h
index 7dc489de34..27584ce39e 100644
--- a/deps/v8/src/dateparser.h
+++ b/deps/v8/src/dateparser.h
@@ -122,7 +122,7 @@ class DateParser : public AllStatic {
}
bool SkipWhiteSpace() {
- if (unicode_cache_->IsWhiteSpaceOrLineTerminator(ch_)) {
+ if (unicode_cache_->IsWhiteSpace(ch_)) {
Next();
return true;
}
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index d46c7b1ac6..d474e2059c 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -792,7 +792,7 @@ bool Debug::CompileDebuggerScript(Isolate* isolate, int index) {
isolate->ComputeLocation(&computed_location);
Handle<Object> message = MessageHandler::MakeMessageObject(
isolate, "error_loading_debugger", &computed_location,
- Vector<Handle<Object> >::empty(), Handle<JSArray>());
+ Vector<Handle<Object> >::empty(), Handle<String>(), Handle<JSArray>());
ASSERT(!isolate->has_pending_exception());
if (!exception.is_null()) {
isolate->set_pending_exception(*exception);
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index 68b10d9612..18be014c14 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -731,12 +731,6 @@ void Deoptimizer::DoComputeOutputFrames() {
LOG(isolate(), CodeDeoptEvent(compiled_code_));
}
ElapsedTimer timer;
-
- // Determine basic deoptimization information. The optimized frame is
- // described by the input data.
- DeoptimizationInputData* input_data =
- DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
-
if (trace_scope_ != NULL) {
timer.Start();
PrintF(trace_scope_->file(),
@@ -745,8 +739,7 @@ void Deoptimizer::DoComputeOutputFrames() {
reinterpret_cast<intptr_t>(function_));
PrintFunctionName();
PrintF(trace_scope_->file(),
- " (opt #%d) @%d, FP to SP delta: %d]\n",
- input_data->OptimizationId()->value(),
+ " @%d, FP to SP delta: %d]\n",
bailout_id_,
fp_to_sp_delta_);
if (bailout_type_ == EAGER || bailout_type_ == SOFT) {
@@ -754,6 +747,10 @@ void Deoptimizer::DoComputeOutputFrames() {
}
}
+ // Determine basic deoptimization information. The optimized frame is
+ // described by the input data.
+ DeoptimizationInputData* input_data =
+ DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
BailoutId node_id = input_data->AstId(bailout_id_);
ByteArray* translations = input_data->TranslationByteArray();
unsigned translation_index =
@@ -1753,7 +1750,11 @@ Handle<Object> Deoptimizer::MaterializeNextHeapObject() {
Handle<JSObject> arguments = Handle<JSObject>::cast(
Accessors::FunctionGetArguments(function));
materialized_objects_->Add(arguments);
- materialization_value_index_ += length;
+ // To keep consistent object counters, we still materialize the
+ // nested values (but we throw them away).
+ for (int i = 0; i < length; ++i) {
+ MaterializeNextValue();
+ }
} else if (desc.is_arguments()) {
// Construct an arguments object and copy the parameters to a newly
// allocated arguments object backing store.
@@ -2691,9 +2692,6 @@ FrameDescription::FrameDescription(uint32_t frame_size,
constant_pool_(kZapUint32) {
// Zap all the registers.
for (int r = 0; r < Register::kNumRegisters; r++) {
- // TODO(jbramley): It isn't safe to use kZapUint32 here. If the register
- // isn't used before the next safepoint, the GC will try to scan it as a
- // tagged value. kZapUint32 looks like a valid tagged pointer, but it isn't.
SetRegister(r, kZapUint32);
}
@@ -2998,8 +2996,7 @@ SlotRef SlotRefValueBuilder::ComputeSlotForNextArgument(
}
case Translation::ARGUMENTS_OBJECT:
- // This can be only emitted for local slots not for argument slots.
- break;
+ return SlotRef::NewArgumentsObject(iterator->Next());
case Translation::CAPTURED_OBJECT: {
return SlotRef::NewDeferredObject(iterator->Next());
@@ -3049,7 +3046,7 @@ SlotRef SlotRefValueBuilder::ComputeSlotForNextArgument(
break;
}
- UNREACHABLE();
+ FATAL("We should never get here - unexpected deopt info.");
return SlotRef();
}
@@ -3129,9 +3126,8 @@ SlotRefValueBuilder::SlotRefValueBuilder(JavaScriptFrame* frame,
// the nested slots of captured objects
number_of_slots--;
SlotRef& slot = slot_refs_.last();
- if (slot.Representation() == SlotRef::DEFERRED_OBJECT) {
- number_of_slots += slot.DeferredObjectLength();
- }
+ ASSERT(slot.Representation() != SlotRef::ARGUMENTS_OBJECT);
+ number_of_slots += slot.GetChildrenCount();
if (slot.Representation() == SlotRef::DEFERRED_OBJECT ||
slot.Representation() == SlotRef::DUPLICATE_OBJECT) {
should_deopt = true;
@@ -3185,7 +3181,7 @@ Handle<Object> SlotRef::GetValue(Isolate* isolate) {
return literal_;
default:
- UNREACHABLE();
+ FATAL("We should never get here - unexpected deopt info.");
return Handle<Object>::null();
}
}
@@ -3215,19 +3211,18 @@ Handle<Object> SlotRefValueBuilder::GetPreviouslyMaterialized(
previously_materialized_objects_->get(object_index), isolate);
materialized_objects_.Add(return_value);
- // Now need to skip all nested objects (and possibly read them from
- // the materialization store, too)
+ // Now need to skip all the nested objects (and possibly read them from
+ // the materialization store, too).
for (int i = 0; i < length; i++) {
SlotRef& slot = slot_refs_[current_slot_];
current_slot_++;
- // For nested deferred objects, we need to read its properties
- if (slot.Representation() == SlotRef::DEFERRED_OBJECT) {
- length += slot.DeferredObjectLength();
- }
+ // We need to read all the nested objects - add them to the
+ // number of objects we need to process.
+ length += slot.GetChildrenCount();
- // For nested deferred and duplicate objects, we need to put them into
- // our materialization array
+ // Put the nested deferred/duplicate objects into our materialization
+ // array.
if (slot.Representation() == SlotRef::DEFERRED_OBJECT ||
slot.Representation() == SlotRef::DUPLICATE_OBJECT) {
int nested_object_index = materialized_objects_.length();
@@ -3253,8 +3248,20 @@ Handle<Object> SlotRefValueBuilder::GetNext(Isolate* isolate, int lvl) {
case SlotRef::LITERAL: {
return slot.GetValue(isolate);
}
+ case SlotRef::ARGUMENTS_OBJECT: {
+ // We should never need to materialize an arguments object,
+ // but we still need to put something into the array
+ // so that the indexing is consistent.
+ materialized_objects_.Add(isolate->factory()->undefined_value());
+ int length = slot.GetChildrenCount();
+ for (int i = 0; i < length; ++i) {
+ // We don't need the argument, just ignore it
+ GetNext(isolate, lvl + 1);
+ }
+ return isolate->factory()->undefined_value();
+ }
case SlotRef::DEFERRED_OBJECT: {
- int length = slot.DeferredObjectLength();
+ int length = slot.GetChildrenCount();
ASSERT(slot_refs_[current_slot_].Representation() == SlotRef::LITERAL ||
slot_refs_[current_slot_].Representation() == SlotRef::TAGGED);
@@ -3323,7 +3330,7 @@ Handle<Object> SlotRefValueBuilder::GetNext(Isolate* isolate, int lvl) {
break;
}
- UNREACHABLE();
+ FATAL("We should never get here - unexpected deopt slot kind.");
return Handle<Object>::null();
}
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 806433c6f3..67690ded0d 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -794,7 +794,9 @@ class SlotRef BASE_EMBEDDED {
// with the DeferredObjectLength() method
// (the SlotRefs of the nested objects follow
// this SlotRef in the depth-first order.)
- DUPLICATE_OBJECT // Duplicated object of a deferred object.
+ DUPLICATE_OBJECT, // Duplicated object of a deferred object.
+ ARGUMENTS_OBJECT // Arguments object - only used to keep indexing
+ // in sync, it should not be materialized.
};
SlotRef()
@@ -806,6 +808,13 @@ class SlotRef BASE_EMBEDDED {
SlotRef(Isolate* isolate, Object* literal)
: literal_(literal, isolate), representation_(LITERAL) { }
+ static SlotRef NewArgumentsObject(int length) {
+ SlotRef slot;
+ slot.representation_ = ARGUMENTS_OBJECT;
+ slot.deferred_object_length_ = length;
+ return slot;
+ }
+
static SlotRef NewDeferredObject(int length) {
SlotRef slot;
slot.representation_ = DEFERRED_OBJECT;
@@ -822,7 +831,14 @@ class SlotRef BASE_EMBEDDED {
return slot;
}
- int DeferredObjectLength() { return deferred_object_length_; }
+ int GetChildrenCount() {
+ if (representation_ == DEFERRED_OBJECT ||
+ representation_ == ARGUMENTS_OBJECT) {
+ return deferred_object_length_;
+ } else {
+ return 0;
+ }
+ }
int DuplicateObjectId() { return duplicate_object_id_; }
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index 690a4e3f4e..da2d880a49 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -368,20 +368,6 @@ void Execution::RunMicrotasks(Isolate* isolate) {
}
-void Execution::EnqueueMicrotask(Isolate* isolate, Handle<Object> microtask) {
- bool threw = false;
- Handle<Object> args[] = { microtask };
- Execution::Call(
- isolate,
- isolate->enqueue_external_microtask(),
- isolate->factory()->undefined_value(),
- 1,
- args,
- &threw);
- ASSERT(!threw);
-}
-
-
bool StackGuard::IsStackOverflow() {
ExecutionAccess access(isolate_);
return (thread_local_.jslimit_ != kInterruptLimit &&
@@ -516,15 +502,15 @@ void StackGuard::FullDeopt() {
}
-bool StackGuard::IsDeoptMarkedAllocationSites() {
+bool StackGuard::IsDeoptMarkedCode() {
ExecutionAccess access(isolate_);
- return (thread_local_.interrupt_flags_ & DEOPT_MARKED_ALLOCATION_SITES) != 0;
+ return (thread_local_.interrupt_flags_ & DEOPT_MARKED_CODE) != 0;
}
-void StackGuard::DeoptMarkedAllocationSites() {
+void StackGuard::DeoptMarkedCode() {
ExecutionAccess access(isolate_);
- thread_local_.interrupt_flags_ |= DEOPT_MARKED_ALLOCATION_SITES;
+ thread_local_.interrupt_flags_ |= DEOPT_MARKED_CODE;
set_interrupt_limits(access);
}
@@ -1040,9 +1026,9 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
stack_guard->Continue(FULL_DEOPT);
Deoptimizer::DeoptimizeAll(isolate);
}
- if (stack_guard->IsDeoptMarkedAllocationSites()) {
- stack_guard->Continue(DEOPT_MARKED_ALLOCATION_SITES);
- isolate->heap()->DeoptMarkedAllocationSites();
+ if (stack_guard->IsDeoptMarkedCode()) {
+ stack_guard->Continue(DEOPT_MARKED_CODE);
+ Deoptimizer::DeoptimizeMarkedCode(isolate);
}
if (stack_guard->IsInstallCodeRequest()) {
ASSERT(isolate->concurrent_recompilation_enabled());
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index b53a83358c..abf4f1dc65 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -45,7 +45,7 @@ enum InterruptFlag {
FULL_DEOPT = 1 << 6,
INSTALL_CODE = 1 << 7,
API_INTERRUPT = 1 << 8,
- DEOPT_MARKED_ALLOCATION_SITES = 1 << 9
+ DEOPT_MARKED_CODE = 1 << 9
};
@@ -175,7 +175,6 @@ class Execution : public AllStatic {
bool* has_pending_exception);
static void RunMicrotasks(Isolate* isolate);
- static void EnqueueMicrotask(Isolate* isolate, Handle<Object> microtask);
};
@@ -223,8 +222,8 @@ class StackGuard {
void RequestInstallCode();
bool IsFullDeopt();
void FullDeopt();
- bool IsDeoptMarkedAllocationSites();
- void DeoptMarkedAllocationSites();
+ bool IsDeoptMarkedCode();
+ void DeoptMarkedCode();
void Continue(InterruptFlag after_what);
void RequestInterrupt(InterruptCallback callback, void* data);
@@ -282,7 +281,7 @@ class StackGuard {
void EnableInterrupts();
void DisableInterrupts();
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_A64
+#if V8_TARGET_ARCH_X64
static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8);
#else
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 6bce5d3a6a..aead7be0cc 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -1300,6 +1300,12 @@ Handle<Code> Factory::CopyCode(Handle<Code> code, Vector<byte> reloc_info) {
}
+Handle<String> Factory::InternalizedStringFromString(Handle<String> value) {
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->InternalizeString(*value), String);
+}
+
+
Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
PretenureFlag pretenure) {
JSFunction::EnsureHasInitialMap(constructor);
@@ -1566,6 +1572,7 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
int start_position,
int end_position,
Handle<Object> script,
+ Handle<Object> stack_trace,
Handle<Object> stack_frames) {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->AllocateJSMessageObject(*type,
@@ -1573,6 +1580,7 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
start_position,
end_position,
*script,
+ *stack_trace,
*stack_frames),
JSMessageObject);
}
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 00ae587d64..db25b09a91 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -225,6 +225,9 @@ class Factory {
Handle<Context> previous,
Handle<ScopeInfo> scope_info);
+ // Return the internalized version of the passed in string.
+ Handle<String> InternalizedStringFromString(Handle<String> value);
+
// Allocate a new struct. The struct is pretenured (allocated directly in
// the old generation).
Handle<Struct> NewStruct(InstanceType type);
@@ -525,6 +528,7 @@ class Factory {
int start_position,
int end_position,
Handle<Object> script,
+ Handle<Object> stack_trace,
Handle<Object> stack_frames);
Handle<SeededNumberDictionary> DictionaryAtNumberPut(
diff --git a/deps/v8/src/feedback-slots.h b/deps/v8/src/feedback-slots.h
deleted file mode 100644
index 9760c652bc..0000000000
--- a/deps/v8/src/feedback-slots.h
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_FEEDBACK_SLOTS_H_
-#define V8_FEEDBACK_SLOTS_H_
-
-#include "v8.h"
-
-#include "isolate.h"
-
-namespace v8 {
-namespace internal {
-
-enum ComputablePhase {
- DURING_PARSE,
- AFTER_SCOPING
-};
-
-
-class FeedbackSlotInterface {
- public:
- static const int kInvalidFeedbackSlot = -1;
-
- virtual ~FeedbackSlotInterface() {}
-
- // When can we ask how many feedback slots are necessary?
- virtual ComputablePhase GetComputablePhase() = 0;
- virtual int ComputeFeedbackSlotCount(Isolate* isolate) = 0;
- virtual void SetFirstFeedbackSlot(int slot) = 0;
-};
-
-
-class DeferredFeedbackSlotProcessor {
- public:
- DeferredFeedbackSlotProcessor()
- : slot_nodes_(NULL),
- slot_count_(0) { }
-
- void add_slot_node(Zone* zone, FeedbackSlotInterface* slot) {
- if (slot->GetComputablePhase() == DURING_PARSE) {
- // No need to add to the list
- int count = slot->ComputeFeedbackSlotCount(zone->isolate());
- slot->SetFirstFeedbackSlot(slot_count_);
- slot_count_ += count;
- } else {
- if (slot_nodes_ == NULL) {
- slot_nodes_ = new(zone) ZoneList<FeedbackSlotInterface*>(10, zone);
- }
- slot_nodes_->Add(slot, zone);
- }
- }
-
- void ProcessFeedbackSlots(Isolate* isolate) {
- // Scope analysis must have been done.
- if (slot_nodes_ == NULL) {
- return;
- }
-
- int current_slot = slot_count_;
- for (int i = 0; i < slot_nodes_->length(); i++) {
- FeedbackSlotInterface* slot_interface = slot_nodes_->at(i);
- int count = slot_interface->ComputeFeedbackSlotCount(isolate);
- if (count > 0) {
- slot_interface->SetFirstFeedbackSlot(current_slot);
- current_slot += count;
- }
- }
-
- slot_count_ = current_slot;
- slot_nodes_->Clear();
- }
-
- int slot_count() {
- ASSERT(slot_count_ >= 0);
- return slot_count_;
- }
-
- private:
- ZoneList<FeedbackSlotInterface*>* slot_nodes_;
- int slot_count_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_FEEDBACK_SLOTS_H_
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 476e5348af..c0eaf16da2 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -234,6 +234,7 @@ DEFINE_implication(track_double_fields, track_fields)
DEFINE_implication(track_heap_object_fields, track_fields)
DEFINE_implication(track_computed_fields, track_fields)
DEFINE_bool(smi_binop, true, "support smi representation in binary operations")
+DEFINE_bool(smi_x64_store_opt, false, "optimized stores of smi on x64")
// Flags for optimization types.
DEFINE_bool(optimize_for_size, false,
@@ -254,9 +255,6 @@ DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
DEFINE_bool(use_inlining, true, "use function inlining")
DEFINE_bool(use_escape_analysis, true, "use hydrogen escape analysis")
DEFINE_bool(use_allocation_folding, true, "use allocation folding")
-DEFINE_bool(use_local_allocation_folding, false, "only fold in basic blocks")
-DEFINE_bool(use_write_barrier_elimination, true,
- "eliminate write barriers targeting allocations in optimized code")
DEFINE_int(max_inlining_levels, 5, "maximum number of inlining levels")
DEFINE_int(max_inlined_source_size, 600,
"maximum source size in bytes considered for a single inlining")
@@ -418,6 +416,10 @@ DEFINE_bool(disable_native_files, false, "disable builtin natives files")
// builtins-ia32.cc
DEFINE_bool(inline_new, true, "use fast inline allocation")
+// checks.cc
+DEFINE_bool(stack_trace_on_abort, true,
+ "print a stack trace if an assertion failure occurs")
+
// codegen-ia32.cc / codegen-arm.cc
DEFINE_bool(trace_codegen, false,
"print name of functions for which code is generated")
@@ -533,7 +535,6 @@ DEFINE_bool(parallel_sweeping, true, "enable parallel sweeping")
DEFINE_bool(concurrent_sweeping, false, "enable concurrent sweeping")
DEFINE_int(sweeper_threads, 0,
"number of parallel and concurrent sweeping threads")
-DEFINE_bool(job_based_sweeping, false, "enable job based sweeping")
#ifdef VERIFY_HEAP
DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
#endif
@@ -581,35 +582,19 @@ DEFINE_bool(use_verbose_printer, true, "allows verbose printing")
DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
DEFINE_bool(trace_parse, false, "trace parsing and preparsing")
-// simulator-arm.cc, simulator-a64.cc and simulator-mips.cc
+// simulator-arm.cc and simulator-mips.cc
DEFINE_bool(trace_sim, false, "Trace simulator execution")
DEFINE_bool(check_icache, false,
"Check icache flushes in ARM and MIPS simulator")
DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
-#ifdef V8_TARGET_ARCH_A64
-DEFINE_int(sim_stack_alignment, 16,
- "Stack alignment in bytes in simulator. This must be a power of two "
- "and it must be at least 16. 16 is default.")
-#else
DEFINE_int(sim_stack_alignment, 8,
"Stack alingment in bytes in simulator (4 or 8, 8 is default)")
-#endif
-DEFINE_int(sim_stack_size, 2 * MB / KB,
- "Stack size of the A64 simulator in kBytes (default is 2 MB)")
-DEFINE_bool(log_regs_modified, true,
- "When logging register values, only print modified registers.")
-DEFINE_bool(log_colour, true,
- "When logging, try to use coloured output.")
-DEFINE_bool(ignore_asm_unimplemented_break, false,
- "Don't break for ASM_UNIMPLEMENTED_BREAK macros.")
-DEFINE_bool(trace_sim_messages, false,
- "Trace simulator debug messages. Implied by --trace-sim.")
// isolate.cc
-DEFINE_bool(stack_trace_on_illegal, false,
- "print stack trace when an illegal exception is thrown")
DEFINE_bool(abort_on_uncaught_exception, false,
"abort program (dump core) when an uncaught exception is thrown")
+DEFINE_bool(trace_exception, false,
+ "print stack trace when throwing exceptions")
DEFINE_bool(randomize_hashes, true,
"randomize hashes to avoid predictable hash collisions "
"(with snapshots this option cannot override the baked-in seed)")
@@ -814,11 +799,6 @@ DEFINE_bool(log_timer_events, false,
"Time events including external callbacks.")
DEFINE_implication(log_timer_events, log_internal_timer_events)
DEFINE_implication(log_internal_timer_events, prof)
-DEFINE_bool(log_instruction_stats, false, "Log AArch64 instruction statistics.")
-DEFINE_string(log_instruction_file, "a64_inst.csv",
- "AArch64 instruction statistics log file.")
-DEFINE_int(log_instruction_period, 1 << 22,
- "AArch64 instruction statistics logging period.")
DEFINE_bool(redirect_code_traces, false,
"output deopt information and disassembly into file "
@@ -826,9 +806,6 @@ DEFINE_bool(redirect_code_traces, false,
DEFINE_string(redirect_code_traces_to, NULL,
"output deopt information and disassembly into the given file")
-DEFINE_bool(hydrogen_track_positions, false,
- "track source code positions when building IR")
-
//
// Disassembler only flags
//
@@ -861,6 +838,8 @@ DEFINE_bool(print_unopt_code, false, "print unoptimized code before "
"printing optimized code based on it")
DEFINE_bool(print_code_verbose, false, "print more information for code")
DEFINE_bool(print_builtin_code, false, "print generated code for builtins")
+DEFINE_bool(emit_opt_code_positions, false,
+ "annotate optimize code with source code positions")
#ifdef ENABLE_DISASSEMBLER
DEFINE_bool(sodium, false, "print generated code output suitable for use with "
@@ -869,7 +848,7 @@ DEFINE_bool(sodium, false, "print generated code output suitable for use with "
DEFINE_implication(sodium, print_code_stubs)
DEFINE_implication(sodium, print_code)
DEFINE_implication(sodium, print_opt_code)
-DEFINE_implication(sodium, hydrogen_track_positions)
+DEFINE_implication(sodium, emit_opt_code_positions)
DEFINE_implication(sodium, code_comments)
DEFINE_bool(print_all_code, false, "enable all flags related to printing code")
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index 2973bad6af..2b15bfffab 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -36,8 +36,6 @@
#include "ia32/frames-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/frames-x64.h"
-#elif V8_TARGET_ARCH_A64
-#include "a64/frames-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/frames-arm.h"
#elif V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index af2b55afb5..e5b6d3dd02 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -35,11 +35,7 @@
namespace v8 {
namespace internal {
-#if V8_TARGET_ARCH_A64
-typedef uint64_t RegList;
-#else
typedef uint32_t RegList;
-#endif
// Get the number of registers in a given register list.
int NumRegs(RegList list);
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index 16bb6c0d01..e14afefda4 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -345,6 +345,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
info->function()->scope()->AllowsLazyCompilation());
cgen.PopulateDeoptimizationData(code);
cgen.PopulateTypeFeedbackInfo(code);
+ cgen.PopulateTypeFeedbackCells(code);
code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
code->set_handler_table(*cgen.handler_table());
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -386,15 +387,6 @@ unsigned FullCodeGenerator::EmitBackEdgeTable() {
}
-void FullCodeGenerator::InitializeFeedbackVector() {
- int length = info_->function()->slot_count();
- ASSERT_EQ(isolate()->heap()->the_hole_value(),
- *TypeFeedbackInfo::UninitializedSentinel(isolate()));
- feedback_vector_ = isolate()->factory()->NewFixedArrayWithHoles(length,
- TENURED);
-}
-
-
void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) {
// Fill in the deoptimization information.
ASSERT(info_->HasDeoptimizationSupport() || bailout_entries_.is_empty());
@@ -413,7 +405,6 @@ void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) {
void FullCodeGenerator::PopulateTypeFeedbackInfo(Handle<Code> code) {
Handle<TypeFeedbackInfo> info = isolate()->factory()->NewTypeFeedbackInfo();
info->set_ic_total_count(ic_total_count_);
- info->set_feedback_vector(*FeedbackVector());
ASSERT(!isolate()->heap()->InNewSpace(*info));
code->set_type_feedback_info(*info);
}
@@ -434,6 +425,21 @@ void FullCodeGenerator::Initialize() {
}
+void FullCodeGenerator::PopulateTypeFeedbackCells(Handle<Code> code) {
+ if (type_feedback_cells_.is_empty()) return;
+ int length = type_feedback_cells_.length();
+ int array_size = TypeFeedbackCells::LengthOfFixedArray(length);
+ Handle<TypeFeedbackCells> cache = Handle<TypeFeedbackCells>::cast(
+ isolate()->factory()->NewFixedArray(array_size, TENURED));
+ for (int i = 0; i < length; i++) {
+ cache->SetAstId(i, type_feedback_cells_[i].ast_id);
+ cache->SetCell(i, *type_feedback_cells_[i].cell);
+ }
+ TypeFeedbackInfo::cast(code->type_feedback_info())->set_type_feedback_cells(
+ *cache);
+}
+
+
void FullCodeGenerator::PrepareForBailout(Expression* node, State state) {
PrepareForBailoutForId(node->id(), state);
}
@@ -443,13 +449,13 @@ void FullCodeGenerator::CallLoadIC(ContextualMode contextual_mode,
TypeFeedbackId id) {
ExtraICState extra_state = LoadIC::ComputeExtraICState(contextual_mode);
Handle<Code> ic = LoadIC::initialize_stub(isolate(), extra_state);
- CallIC(ic, id);
+ CallIC(ic, contextual_mode, id);
}
-void FullCodeGenerator::CallStoreIC(TypeFeedbackId id) {
+void FullCodeGenerator::CallStoreIC(ContextualMode mode, TypeFeedbackId id) {
Handle<Code> ic = StoreIC::initialize_stub(isolate(), strict_mode());
- CallIC(ic, id);
+ CallIC(ic, mode, id);
}
@@ -484,6 +490,13 @@ void FullCodeGenerator::PrepareForBailoutForId(BailoutId id, State state) {
}
+void FullCodeGenerator::RecordTypeFeedbackCell(
+ TypeFeedbackId id, Handle<Cell> cell) {
+ TypeFeedbackCellEntry entry = { id, cell };
+ type_feedback_cells_.Add(entry, zone());
+}
+
+
void FullCodeGenerator::RecordBackEdge(BailoutId ast_id) {
// The pc offset does not need to be encoded and packed together with a state.
ASSERT(masm_->pc_offset() > 0);
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index d9090a8dc8..d52f3c410c 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -96,6 +96,9 @@ class FullCodeGenerator: public AstVisitor {
? info->function()->ast_node_count() : 0,
info->zone()),
back_edges_(2, info->zone()),
+ type_feedback_cells_(info->HasDeoptimizationSupport()
+ ? info->function()->ast_node_count() : 0,
+ info->zone()),
ic_total_count_(0) {
Initialize();
}
@@ -127,9 +130,6 @@ class FullCodeGenerator: public AstVisitor {
static const int kCodeSizeMultiplier = 162;
#elif V8_TARGET_ARCH_ARM
static const int kCodeSizeMultiplier = 142;
-#elif V8_TARGET_ARCH_A64
-// TODO(all): Copied ARM value. Check this is sensible for A64.
- static const int kCodeSizeMultiplier = 142;
#elif V8_TARGET_ARCH_MIPS
static const int kCodeSizeMultiplier = 142;
#else
@@ -434,15 +434,9 @@ class FullCodeGenerator: public AstVisitor {
void PrepareForBailout(Expression* node, State state);
void PrepareForBailoutForId(BailoutId id, State state);
- // Feedback slot support. The feedback vector will be cleared during gc and
- // collected by the type-feedback oracle.
- Handle<FixedArray> FeedbackVector() {
- return feedback_vector_;
- }
- void StoreFeedbackVectorSlot(int slot, Handle<Object> object) {
- feedback_vector_->set(slot, *object);
- }
- void InitializeFeedbackVector();
+ // Cache cell support. This associates AST ids with global property cells
+ // that will be cleared during GC and collected by the type-feedback oracle.
+ void RecordTypeFeedbackCell(TypeFeedbackId id, Handle<Cell> cell);
// Record a call's return site offset, used to rebuild the frame if the
// called function was inlined at the site.
@@ -558,11 +552,6 @@ class FullCodeGenerator: public AstVisitor {
void EmitVariableAssignment(Variable* var,
Token::Value op);
- // Helper functions to EmitVariableAssignment
- void EmitStoreToStackLocalOrContextSlot(Variable* var,
- MemOperand location);
- void EmitCallStoreContextSlot(Handle<String> name, LanguageMode mode);
-
// Complete a named property assignment. The receiver is expected on top
// of the stack and the right-hand-side value in the accumulator.
void EmitNamedPropertyAssignment(Assignment* expr);
@@ -573,11 +562,13 @@ class FullCodeGenerator: public AstVisitor {
void EmitKeyedPropertyAssignment(Assignment* expr);
void CallIC(Handle<Code> code,
+ ContextualMode mode = NOT_CONTEXTUAL,
TypeFeedbackId id = TypeFeedbackId::None());
void CallLoadIC(ContextualMode mode,
TypeFeedbackId id = TypeFeedbackId::None());
- void CallStoreIC(TypeFeedbackId id = TypeFeedbackId::None());
+ void CallStoreIC(ContextualMode mode,
+ TypeFeedbackId id = TypeFeedbackId::None());
void SetFunctionPosition(FunctionLiteral* fun);
void SetReturnPosition(FunctionLiteral* fun);
@@ -644,6 +635,7 @@ class FullCodeGenerator: public AstVisitor {
void Generate();
void PopulateDeoptimizationData(Handle<Code> code);
void PopulateTypeFeedbackInfo(Handle<Code> code);
+ void PopulateTypeFeedbackCells(Handle<Code> code);
Handle<FixedArray> handler_table() { return handler_table_; }
@@ -658,6 +650,12 @@ class FullCodeGenerator: public AstVisitor {
uint32_t loop_depth;
};
+ struct TypeFeedbackCellEntry {
+ TypeFeedbackId ast_id;
+ Handle<Cell> cell;
+ };
+
+
class ExpressionContext BASE_EMBEDDED {
public:
explicit ExpressionContext(FullCodeGenerator* codegen)
@@ -847,9 +845,9 @@ class FullCodeGenerator: public AstVisitor {
ZoneList<BailoutEntry> bailout_entries_;
GrowableBitVector prepared_bailout_ids_;
ZoneList<BackEdgeEntry> back_edges_;
+ ZoneList<TypeFeedbackCellEntry> type_feedback_cells_;
int ic_total_count_;
Handle<FixedArray> handler_table_;
- Handle<FixedArray> feedback_vector_;
Handle<Cell> profiling_counter_;
bool generate_debug_code_;
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 8a67632d2c..b9437f2ac4 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -71,10 +71,6 @@ namespace internal {
#define V8_HOST_ARCH_IA32 1
#define V8_HOST_ARCH_32_BIT 1
#define V8_HOST_CAN_READ_UNALIGNED 1
-#elif defined(__AARCH64EL__)
-#define V8_HOST_ARCH_A64 1
-#define V8_HOST_ARCH_64_BIT 1
-#define V8_HOST_CAN_READ_UNALIGNED 1
#elif defined(__ARMEL__)
#define V8_HOST_ARCH_ARM 1
#define V8_HOST_ARCH_32_BIT 1
@@ -82,7 +78,7 @@ namespace internal {
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
#else
-#error "Host architecture was not detected as supported by v8"
+#error Host architecture was not detected as supported by v8
#endif
#if defined(__ARM_ARCH_7A__) || \
@@ -99,13 +95,11 @@ namespace internal {
// in the same way as the host architecture, that is, target the native
// environment as presented by the compiler.
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && \
- !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_A64 && !V8_TARGET_ARCH_MIPS
+ !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
#define V8_TARGET_ARCH_IA32 1
-#elif defined(__AARCH64EL__)
-#define V8_TARGET_ARCH_A64 1
#elif defined(__ARMEL__)
#define V8_TARGET_ARCH_ARM 1
#elif defined(__MIPSEL__)
@@ -125,9 +119,6 @@ namespace internal {
#if (V8_TARGET_ARCH_ARM && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_ARM))
#error Target architecture arm is only supported on arm and ia32 host
#endif
-#if (V8_TARGET_ARCH_A64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_A64))
-#error Target architecture a64 is only supported on a64 and x64 host
-#endif
#if (V8_TARGET_ARCH_MIPS && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_MIPS))
#error Target architecture mips is only supported on mips and ia32 host
#endif
@@ -136,9 +127,6 @@ namespace internal {
// Setting USE_SIMULATOR explicitly from the build script will force
// the use of a simulated environment.
#if !defined(USE_SIMULATOR)
-#if (V8_TARGET_ARCH_A64 && !V8_HOST_ARCH_A64)
-#define USE_SIMULATOR 1
-#endif
#if (V8_TARGET_ARCH_ARM && !V8_HOST_ARCH_ARM)
#define USE_SIMULATOR 1
#endif
@@ -154,8 +142,6 @@ namespace internal {
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_ARM
#define V8_TARGET_LITTLE_ENDIAN 1
-#elif V8_TARGET_ARCH_A64
-#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_MIPS
#define V8_TARGET_LITTLE_ENDIAN 1
#else
diff --git a/deps/v8/src/harmony-math.js b/deps/v8/src/harmony-math.js
index c856ce72b2..d57a104042 100644
--- a/deps/v8/src/harmony-math.js
+++ b/deps/v8/src/harmony-math.js
@@ -59,7 +59,8 @@ function MathSinh(x) {
// ES6 draft 09-27-13, section 20.2.2.12.
function MathCosh(x) {
if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- if (!NUMBER_IS_FINITE(x)) return MathAbs(x);
+ // Idempotent for NaN and +/-Infinity.
+ if (!NUMBER_IS_FINITE(x)) return x;
return (MathExp(x) + MathExp(-x)) / 2;
}
@@ -109,19 +110,19 @@ function MathAtanh(x) {
}
-// ES6 draft 09-27-13, section 20.2.2.21.
+//ES6 draft 09-27-13, section 20.2.2.21.
function MathLog10(x) {
return MathLog(x) * 0.434294481903251828; // log10(x) = log(x)/log(10).
}
-// ES6 draft 09-27-13, section 20.2.2.22.
+//ES6 draft 09-27-13, section 20.2.2.22.
function MathLog2(x) {
return MathLog(x) * 1.442695040888963407; // log2(x) = log(x)/log(2).
}
-// ES6 draft 09-27-13, section 20.2.2.17.
+//ES6 draft 09-27-13, section 20.2.2.17.
function MathHypot(x, y) { // Function length is 2.
// We may want to introduce fast paths for two arguments and when
// normalization to avoid overflow is not necessary. For now, we
@@ -154,26 +155,6 @@ function MathHypot(x, y) { // Function length is 2.
}
-// ES6 draft 09-27-13, section 20.2.2.16.
-function MathFround(x) {
- return %Math_fround(TO_NUMBER_INLINE(x));
-}
-
-
-function MathClz32(x) {
- x = ToUint32(TO_NUMBER_INLINE(x));
- if (x == 0) return 32;
- var result = 0;
- // Binary search.
- if ((x & 0xFFFF0000) === 0) { x <<= 16; result += 16; };
- if ((x & 0xFF000000) === 0) { x <<= 8; result += 8; };
- if ((x & 0xF0000000) === 0) { x <<= 4; result += 4; };
- if ((x & 0xC0000000) === 0) { x <<= 2; result += 2; };
- if ((x & 0x80000000) === 0) { x <<= 1; result += 1; };
- return result;
-}
-
-
function ExtendMath() {
%CheckIsBootstrapping();
@@ -189,11 +170,8 @@ function ExtendMath() {
"atanh", MathAtanh,
"log10", MathLog10,
"log2", MathLog2,
- "hypot", MathHypot,
- "fround", MathFround,
- "clz32", MathClz32
+ "hypot", MathHypot
));
}
-
ExtendMath();
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index f74c4c7a7f..35bad4af39 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -809,21 +809,6 @@ NoWeakObjectVerificationScope::~NoWeakObjectVerificationScope() {
#endif
-GCCallbacksScope::GCCallbacksScope(Heap* heap) : heap_(heap) {
- heap_->gc_callbacks_depth_++;
-}
-
-
-GCCallbacksScope::~GCCallbacksScope() {
- heap_->gc_callbacks_depth_--;
-}
-
-
-bool GCCallbacksScope::CheckReenter() {
- return heap_->gc_callbacks_depth_ == 1;
-}
-
-
void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
@@ -835,13 +820,6 @@ void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
}
-void VerifySmisVisitor::VisitPointers(Object** start, Object** end) {
- for (Object** current = start; current < end; current++) {
- CHECK((*current)->IsSmi());
- }
-}
-
-
double GCTracer::SizeOfHeapObjects() {
return (static_cast<double>(heap_->SizeOfObjects())) / MB;
}
diff --git a/deps/v8/src/heap-snapshot-generator.cc b/deps/v8/src/heap-snapshot-generator.cc
index b67aa0f376..ccfbfb8d03 100644
--- a/deps/v8/src/heap-snapshot-generator.cc
+++ b/deps/v8/src/heap-snapshot-generator.cc
@@ -34,7 +34,6 @@
#include "heap-profiler.h"
#include "debug.h"
#include "types.h"
-#include "v8conversions.h"
namespace v8 {
namespace internal {
@@ -73,7 +72,7 @@ HeapEntry::HeapEntry(HeapSnapshot* snapshot,
Type type,
const char* name,
SnapshotObjectId id,
- size_t self_size)
+ int self_size)
: type_(type),
children_count_(0),
children_index_(-1),
@@ -104,7 +103,7 @@ void HeapEntry::SetIndexedReference(HeapGraphEdge::Type type,
void HeapEntry::Print(
const char* prefix, const char* edge_name, int max_depth, int indent) {
STATIC_CHECK(sizeof(unsigned) == sizeof(id()));
- OS::Print("%6" V8PRIuPTR " @%6u %*c %s%s: ",
+ OS::Print("%6d @%6u %*c %s%s: ",
self_size(), id(), indent, ' ', prefix, edge_name);
if (type() != kString) {
OS::Print("%s %.40s\n", TypeAsString(), name_);
@@ -194,7 +193,7 @@ template <> struct SnapshotSizeConstants<4> {
template <> struct SnapshotSizeConstants<8> {
static const int kExpectedHeapGraphEdgeSize = 24;
- static const int kExpectedHeapEntrySize = 40;
+ static const int kExpectedHeapEntrySize = 32;
};
} // namespace
@@ -277,7 +276,7 @@ HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag) {
HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type,
const char* name,
SnapshotObjectId id,
- size_t size) {
+ int size) {
HeapEntry entry(this, type, name, id, size);
entries_.Add(entry);
return &entries_.last();
@@ -900,17 +899,10 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
HeapEntry::Type type,
const char* name) {
- return AddEntry(object->address(), type, name, object->Size());
-}
-
-
-HeapEntry* V8HeapExplorer::AddEntry(Address address,
- HeapEntry::Type type,
- const char* name,
- size_t size) {
- SnapshotObjectId object_id = heap_object_map_->FindOrAddEntry(
- address, static_cast<unsigned int>(size));
- return snapshot_->AddEntry(type, name, object_id, size);
+ int object_size = object->Size();
+ SnapshotObjectId object_id =
+ heap_object_map_->FindOrAddEntry(object->address(), object_size);
+ return snapshot_->AddEntry(type, name, object_id, object_size);
}
@@ -1037,8 +1029,6 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
if (obj->IsJSGlobalProxy()) {
ExtractJSGlobalProxyReferences(entry, JSGlobalProxy::cast(obj));
- } else if (obj->IsJSArrayBuffer()) {
- ExtractJSArrayBufferReferences(entry, JSArrayBuffer::cast(obj));
} else if (obj->IsJSObject()) {
ExtractJSObjectReferences(entry, JSObject::cast(obj));
} else if (obj->IsString()) {
@@ -1157,6 +1147,13 @@ void V8HeapExplorer::ExtractJSObjectReferences(
JSArrayBufferView::kBufferOffset);
SetWeakReference(view, entry, "weak_next", view->weak_next(),
JSArrayBufferView::kWeakNextOffset);
+ } else if (obj->IsJSArrayBuffer()) {
+ JSArrayBuffer* buffer = JSArrayBuffer::cast(obj);
+ SetWeakReference(buffer, entry, "weak_next", buffer->weak_next(),
+ JSArrayBuffer::kWeakNextOffset);
+ SetWeakReference(buffer, entry,
+ "weak_first_view", buffer->weak_first_view(),
+ JSArrayBuffer::kWeakFirstViewOffset);
}
TagObject(js_obj->properties(), "(object properties)");
SetInternalReference(obj, entry,
@@ -1457,42 +1454,6 @@ void V8HeapExplorer::ExtractAllocationSiteReferences(int entry,
}
-class JSArrayBufferDataEntryAllocator : public HeapEntriesAllocator {
- public:
- JSArrayBufferDataEntryAllocator(size_t size, V8HeapExplorer* explorer)
- : size_(size)
- , explorer_(explorer) {
- }
- virtual HeapEntry* AllocateEntry(HeapThing ptr) {
- return explorer_->AddEntry(
- static_cast<Address>(ptr),
- HeapEntry::kNative, "system / JSArrayBufferData", size_);
- }
- private:
- size_t size_;
- V8HeapExplorer* explorer_;
-};
-
-
-void V8HeapExplorer::ExtractJSArrayBufferReferences(
- int entry, JSArrayBuffer* buffer) {
- SetWeakReference(buffer, entry, "weak_next", buffer->weak_next(),
- JSArrayBuffer::kWeakNextOffset);
- SetWeakReference(buffer, entry,
- "weak_first_view", buffer->weak_first_view(),
- JSArrayBuffer::kWeakFirstViewOffset);
- // Setup a reference to a native memory backing_store object.
- if (!buffer->backing_store())
- return;
- size_t data_size = NumberToSize(heap_->isolate(), buffer->byte_length());
- JSArrayBufferDataEntryAllocator allocator(data_size, this);
- HeapEntry* data_entry =
- filler_->FindOrAddEntry(buffer->backing_store(), &allocator);
- filler_->SetNamedReference(HeapGraphEdge::kInternal,
- entry, "backing_store", data_entry);
-}
-
-
void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj, int entry) {
if (!js_obj->IsJSFunction()) return;
@@ -2702,26 +2663,9 @@ int HeapSnapshotJSONSerializer::GetStringId(const char* s) {
}
-namespace {
-
-template<size_t size> struct ToUnsigned;
-
-template<> struct ToUnsigned<4> {
- typedef uint32_t Type;
-};
-
-template<> struct ToUnsigned<8> {
- typedef uint64_t Type;
-};
-
-} // namespace
-
-
-template<typename T>
-static int utoa_impl(T value, const Vector<char>& buffer, int buffer_pos) {
- STATIC_CHECK(static_cast<T>(-1) > 0); // Check that T is unsigned
+static int utoa(unsigned value, const Vector<char>& buffer, int buffer_pos) {
int number_of_digits = 0;
- T t = value;
+ unsigned t = value;
do {
++number_of_digits;
} while (t /= 10);
@@ -2729,7 +2673,7 @@ static int utoa_impl(T value, const Vector<char>& buffer, int buffer_pos) {
buffer_pos += number_of_digits;
int result = buffer_pos;
do {
- int last_digit = static_cast<int>(value % 10);
+ int last_digit = value % 10;
buffer[--buffer_pos] = '0' + last_digit;
value /= 10;
} while (value);
@@ -2737,14 +2681,6 @@ static int utoa_impl(T value, const Vector<char>& buffer, int buffer_pos) {
}
-template<typename T>
-static int utoa(T value, const Vector<char>& buffer, int buffer_pos) {
- typename ToUnsigned<sizeof(value)>::Type unsigned_value = value;
- STATIC_CHECK(sizeof(value) == sizeof(unsigned_value));
- return utoa_impl(unsigned_value, buffer, buffer_pos);
-}
-
-
void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge,
bool first_edge) {
// The buffer needs space for 3 unsigned ints, 3 commas, \n and \0
@@ -2781,10 +2717,9 @@ void HeapSnapshotJSONSerializer::SerializeEdges() {
void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry) {
- // The buffer needs space for 4 unsigned ints, 1 size_t, 5 commas, \n and \0
+ // The buffer needs space for 5 unsigned ints, 5 commas, \n and \0
static const int kBufferSize =
- 4 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
- + MaxDecimalDigitsIn<sizeof(size_t)>::kUnsigned // NOLINT
+ 5 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
+ 5 + 1 + 1;
EmbeddedVector<char, kBufferSize> buffer;
int buffer_pos = 0;
diff --git a/deps/v8/src/heap-snapshot-generator.h b/deps/v8/src/heap-snapshot-generator.h
index 8717f8f25e..e209eeabb1 100644
--- a/deps/v8/src/heap-snapshot-generator.h
+++ b/deps/v8/src/heap-snapshot-generator.h
@@ -114,14 +114,14 @@ class HeapEntry BASE_EMBEDDED {
Type type,
const char* name,
SnapshotObjectId id,
- size_t self_size);
+ int self_size);
HeapSnapshot* snapshot() { return snapshot_; }
Type type() { return static_cast<Type>(type_); }
const char* name() { return name_; }
void set_name(const char* name) { name_ = name; }
inline SnapshotObjectId id() { return id_; }
- size_t self_size() { return self_size_; }
+ int self_size() { return self_size_; }
INLINE(int index() const);
int children_count() const { return children_count_; }
INLINE(int set_children_index(int index));
@@ -146,7 +146,7 @@ class HeapEntry BASE_EMBEDDED {
unsigned type_: 4;
int children_count_: 28;
int children_index_;
- size_t self_size_;
+ int self_size_;
SnapshotObjectId id_;
HeapSnapshot* snapshot_;
const char* name_;
@@ -186,7 +186,7 @@ class HeapSnapshot {
HeapEntry* AddEntry(HeapEntry::Type type,
const char* name,
SnapshotObjectId id,
- size_t size);
+ int size);
HeapEntry* AddRootEntry();
HeapEntry* AddGcRootsEntry();
HeapEntry* AddGcSubrootEntry(int tag);
@@ -386,10 +386,6 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void TagGlobalObjects();
void TagCodeObject(Code* code);
void TagBuiltinCodeObject(Code* code, const char* name);
- HeapEntry* AddEntry(Address address,
- HeapEntry::Type type,
- const char* name,
- size_t size);
static String* GetConstructorName(JSObject* object);
@@ -400,7 +396,6 @@ class V8HeapExplorer : public HeapEntriesAllocator {
HeapEntry* AddEntry(HeapObject* object,
HeapEntry::Type type,
const char* name);
-
const char* GetSystemEntryName(HeapObject* object);
void ExtractReferences(HeapObject* obj);
@@ -419,7 +414,6 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void ExtractCellReferences(int entry, Cell* cell);
void ExtractPropertyCellReferences(int entry, PropertyCell* cell);
void ExtractAllocationSiteReferences(int entry, AllocationSite* site);
- void ExtractJSArrayBufferReferences(int entry, JSArrayBuffer* buffer);
void ExtractClosureReferences(JSObject* js_obj, int entry);
void ExtractPropertyReferences(JSObject* js_obj, int entry);
bool ExtractAccessorPairProperty(JSObject* js_obj, int entry,
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 8454dd51cb..dfe98ec080 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -155,8 +155,7 @@ Heap::Heap()
configured_(false),
external_string_table_(this),
chunks_queued_for_free_(NULL),
- relocation_mutex_(NULL),
- gc_callbacks_depth_(0) {
+ relocation_mutex_(NULL) {
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
// easier if you can define it as part of the build environment.
@@ -546,9 +545,7 @@ void Heap::ProcessPretenuringFeedback() {
}
}
- if (trigger_deoptimization) {
- isolate_->stack_guard()->DeoptMarkedAllocationSites();
- }
+ if (trigger_deoptimization) isolate_->stack_guard()->DeoptMarkedCode();
FlushAllocationSitesScratchpad();
@@ -570,25 +567,6 @@ void Heap::ProcessPretenuringFeedback() {
}
-void Heap::DeoptMarkedAllocationSites() {
- // TODO(hpayer): If iterating over the allocation sites list becomes a
- // performance issue, use a cache heap data structure instead (similar to the
- // allocation sites scratchpad).
- Object* list_element = allocation_sites_list();
- while (list_element->IsAllocationSite()) {
- AllocationSite* site = AllocationSite::cast(list_element);
- if (site->deopt_dependent_code()) {
- site->dependent_code()->MarkCodeForDeoptimization(
- isolate_,
- DependentCode::kAllocationSiteTenuringChangedGroup);
- site->set_deopt_dependent_code(false);
- }
- list_element = site->weak_next();
- }
- Deoptimizer::DeoptimizeMarkedCode(isolate_);
-}
-
-
void Heap::GarbageCollectionEpilogue() {
store_buffer()->GCEpilogue();
@@ -597,9 +575,6 @@ void Heap::GarbageCollectionEpilogue() {
ZapFromSpace();
}
- // Process pretenuring feedback and update allocation sites.
- ProcessPretenuringFeedback();
-
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
@@ -774,21 +749,6 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
}
-void Heap::EnsureFillerObjectAtTop() {
- // There may be an allocation memento behind every object in new space.
- // If we evacuate a not full new space or if we are on the last page of
- // the new space, then there may be uninitialized memory behind the top
- // pointer of the new space page. We store a filler object there to
- // identify the unused space.
- Address from_top = new_space_.top();
- Address from_limit = new_space_.limit();
- if (from_top < from_limit) {
- int remaining_in_page = static_cast<int>(from_limit - from_top);
- CreateFillerObjectAt(from_top, remaining_in_page);
- }
-}
-
-
bool Heap::CollectGarbage(GarbageCollector collector,
const char* gc_reason,
const char* collector_reason,
@@ -805,7 +765,17 @@ bool Heap::CollectGarbage(GarbageCollector collector,
allocation_timeout_ = Max(6, FLAG_gc_interval);
#endif
- EnsureFillerObjectAtTop();
+ // There may be an allocation memento behind every object in new space.
+ // If we evacuate a not full new space or if we are on the last page of
+ // the new space, then there may be uninitialized memory behind the top
+ // pointer of the new space page. We store a filler object there to
+ // identify the unused space.
+ Address from_top = new_space_.top();
+ Address from_limit = new_space_.limit();
+ if (from_top < from_limit) {
+ int remaining_in_page = static_cast<int>(from_limit - from_top);
+ CreateFillerObjectAt(from_top, remaining_in_page);
+ }
if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
if (FLAG_trace_incremental_marking) {
@@ -879,6 +849,16 @@ int Heap::NotifyContextDisposed() {
}
+void Heap::PerformScavenge() {
+ GCTracer tracer(this, NULL, NULL);
+ if (incremental_marking()->IsStopped()) {
+ PerformGarbageCollection(SCAVENGER, &tracer);
+ } else {
+ PerformGarbageCollection(MARK_COMPACTOR, &tracer);
+ }
+}
+
+
void Heap::MoveElements(FixedArray* array,
int dst_index,
int src_index,
@@ -1085,14 +1065,11 @@ bool Heap::PerformGarbageCollection(
GCType gc_type =
collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
- { GCCallbacksScope scope(this);
- if (scope.CheckReenter()) {
- AllowHeapAllocation allow_allocation;
- GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
- VMState<EXTERNAL> state(isolate_);
- HandleScope handle_scope(isolate_);
- CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
- }
+ {
+ GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
+ VMState<EXTERNAL> state(isolate_);
+ HandleScope handle_scope(isolate_);
+ CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
}
EnsureFromSpaceIsCommitted();
@@ -1197,14 +1174,11 @@ bool Heap::PerformGarbageCollection(
amount_of_external_allocated_memory_;
}
- { GCCallbacksScope scope(this);
- if (scope.CheckReenter()) {
- AllowHeapAllocation allow_allocation;
- GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
- VMState<EXTERNAL> state(isolate_);
- HandleScope handle_scope(isolate_);
- CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
- }
+ {
+ GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
+ VMState<EXTERNAL> state(isolate_);
+ HandleScope handle_scope(isolate_);
+ CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
}
#ifdef VERIFY_HEAP
@@ -1644,6 +1618,8 @@ void Heap::Scavenge() {
IncrementYoungSurvivorsCounter(static_cast<int>(
(PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
+ ProcessPretenuringFeedback();
+
LOG(isolate_, ResourceEvent("scavenge", "end"));
gc_state_ = NOT_IN_GC;
@@ -2023,12 +1999,14 @@ void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
AllocationSite* casted = AllocationSite::cast(cur);
if (casted->GetPretenureMode() == flag) {
casted->ResetPretenureDecision();
- casted->set_deopt_dependent_code(true);
- marked = true;
+ bool got_marked = casted->dependent_code()->MarkCodeForDeoptimization(
+ isolate_,
+ DependentCode::kAllocationSiteTenuringChangedGroup);
+ if (got_marked) marked = true;
}
cur = casted->weak_next();
}
- if (marked) isolate_->stack_guard()->DeoptMarkedAllocationSites();
+ if (marked) isolate_->stack_guard()->DeoptMarkedCode();
}
@@ -2691,7 +2669,8 @@ MaybeObject* Heap::AllocateTypeFeedbackInfo() {
if (!maybe_info->To(&info)) return maybe_info;
}
info->initialize_storage();
- info->set_feedback_vector(empty_fixed_array(), SKIP_WRITE_BARRIER);
+ info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
+ SKIP_WRITE_BARRIER);
return info;
}
@@ -3073,17 +3052,6 @@ void Heap::CreateFixedStubs() {
// The eliminates the need for doing dictionary lookup in the
// stub cache for these stubs.
HandleScope scope(isolate());
-
- // Create stubs that should be there, so we don't unexpectedly have to
- // create them if we need them during the creation of another stub.
- // Stub creation mixes raw pointers and handles in an unsafe manner so
- // we cannot create stubs while we are creating stubs.
- CodeStub::GenerateStubsAheadOfTime(isolate());
-
- // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
- // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
- // is created.
-
// gcc-4.4 has problem generating correct code of following snippet:
// { JSEntryStub stub;
// js_entry_code_ = *stub.GetCode();
@@ -3094,6 +3062,12 @@ void Heap::CreateFixedStubs() {
// To workaround the problem, make separate functions without inlining.
Heap::CreateJSEntryStub();
Heap::CreateJSConstructEntryStub();
+
+ // Create stubs that should be there, so we don't unexpectedly have to
+ // create them if we need them during the creation of another stub.
+ // Stub creation mixes raw pointers and handles in an unsafe manner so
+ // we cannot create stubs while we are creating stubs.
+ CodeStub::GenerateStubsAheadOfTime(isolate());
}
@@ -3295,15 +3269,6 @@ bool Heap::CreateInitialObjects() {
}
set_observation_state(JSObject::cast(obj));
- // Allocate object to hold object microtask state.
- { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_microtask_state(JSObject::cast(obj));
-
{ MaybeObject* maybe_obj = AllocateSymbol();
if (!maybe_obj->ToObject(&obj)) return false;
}
@@ -3657,14 +3622,8 @@ void Heap::InitializeAllocationSitesScratchpad() {
void Heap::AddAllocationSiteToScratchpad(AllocationSite* site) {
if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
- // We cannot use the normal write-barrier because slots need to be
- // recorded with non-incremental marking as well. We have to explicitly
- // record the slot to take evacuation candidates into account.
allocation_sites_scratchpad()->set(
- allocation_sites_scratchpad_length_, site, SKIP_WRITE_BARRIER);
- Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
- allocation_sites_scratchpad_length_);
- mark_compact_collector()->RecordSlot(slot, slot, *slot);
+ allocation_sites_scratchpad_length_, site);
allocation_sites_scratchpad_length_++;
}
}
@@ -3811,6 +3770,7 @@ MaybeObject* Heap::AllocateJSMessageObject(String* type,
int start_position,
int end_position,
Object* script,
+ Object* stack_trace,
Object* stack_frames) {
Object* result;
{ MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
@@ -3825,6 +3785,7 @@ MaybeObject* Heap::AllocateJSMessageObject(String* type,
message->set_start_position(start_position);
message->set_end_position(end_position);
message->set_script(script);
+ message->set_stack_trace(stack_trace);
message->set_stack_frames(stack_frames);
return result;
}
@@ -5862,9 +5823,6 @@ void Heap::Verify() {
VerifyPointersVisitor visitor;
IterateRoots(&visitor, VISIT_ONLY_STRONG);
- VerifySmisVisitor smis_visitor;
- IterateSmiRoots(&smis_visitor);
-
new_space_.Verify();
old_pointer_space_->Verify(&visitor);
@@ -6162,12 +6120,6 @@ void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
}
-void Heap::IterateSmiRoots(ObjectVisitor* v) {
- v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
- v->Synchronize(VisitorSynchronization::kSmiRootList);
-}
-
-
void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
v->Synchronize(VisitorSynchronization::kStrongRootList);
@@ -6390,7 +6342,7 @@ intptr_t Heap::PromotedSpaceSizeOfObjects() {
bool Heap::AdvanceSweepers(int step_size) {
- ASSERT(!mark_compact_collector()->AreSweeperThreadsActivated());
+ ASSERT(isolate()->num_sweeper_threads() == 0);
bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
return sweeping_complete;
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index 81c7d4732c..266cdb9684 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -78,6 +78,7 @@ namespace internal {
V(ByteArray, empty_byte_array, EmptyByteArray) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
V(ConstantPoolArray, empty_constant_pool_array, EmptyConstantPoolArray) \
+ V(Smi, stack_limit, StackLimit) \
V(Oddball, arguments_marker, ArgumentsMarker) \
/* The roots above this line should be boring from a GC point of view. */ \
/* This means they are never in new space and never on a page that is */ \
@@ -185,8 +186,14 @@ namespace internal {
V(Code, js_entry_code, JsEntryCode) \
V(Code, js_construct_entry_code, JsConstructEntryCode) \
V(FixedArray, natives_source_cache, NativesSourceCache) \
+ V(Smi, last_script_id, LastScriptId) \
V(Script, empty_script, EmptyScript) \
+ V(Smi, real_stack_limit, RealStackLimit) \
V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
+ V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
+ V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
+ V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
+ V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) \
V(Cell, undefined_cell, UndefineCell) \
V(JSObject, observation_state, ObservationState) \
V(Map, external_map, ExternalMap) \
@@ -196,22 +203,10 @@ namespace internal {
EmptySlowElementDictionary) \
V(Symbol, observed_symbol, ObservedSymbol) \
V(FixedArray, materialized_objects, MaterializedObjects) \
- V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad) \
- V(JSObject, microtask_state, MicrotaskState)
-
-// Entries in this list are limited to Smis and are not visited during GC.
-#define SMI_ROOT_LIST(V) \
- V(Smi, stack_limit, StackLimit) \
- V(Smi, real_stack_limit, RealStackLimit) \
- V(Smi, last_script_id, LastScriptId) \
- V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
- V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
- V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
- V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)
+ V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad)
#define ROOT_LIST(V) \
STRONG_ROOT_LIST(V) \
- SMI_ROOT_LIST(V) \
V(StringTable, string_table, StringTable)
// Heap roots that are known to be immortal immovable, for which we can safely
@@ -1139,6 +1134,7 @@ class Heap {
int start_position,
int end_position,
Object* script,
+ Object* stack_trace,
Object* stack_frames);
// Allocate a new external string object, which is backed by a string
@@ -1259,6 +1255,10 @@ class Heap {
// Notify the heap that a context has been disposed.
int NotifyContextDisposed();
+ // Utility to invoke the scavenger. This is needed in test code to
+ // ensure correct callback for weak global handles.
+ void PerformScavenge();
+
inline void increment_scan_on_scavenge_pages() {
scan_on_scavenge_pages_++;
if (FLAG_gc_verbose) {
@@ -1347,9 +1347,6 @@ class Heap {
void IterateRoots(ObjectVisitor* v, VisitMode mode);
// Iterates over all strong roots in the heap.
void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
- // Iterates over entries in the smi roots list. Only interesting to the
- // serializer/deserializer, since GC does not care about smis.
- void IterateSmiRoots(ObjectVisitor* v);
// Iterates over all the other roots in the heap.
void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
@@ -1585,7 +1582,7 @@ class Heap {
// Implements the corresponding V8 API function.
bool IdleNotification(int hint);
- // Declare all the root indices. This defines the root list order.
+ // Declare all the root indices.
enum RootListIndex {
#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
@@ -1601,14 +1598,8 @@ class Heap {
#undef DECLARE_STRUCT_MAP
kStringTableRootIndex,
-
-#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
- SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
-#undef ROOT_INDEX_DECLARATION
-
- kRootListLength,
kStrongRootListLength = kStringTableRootIndex,
- kSmiRootsStart = kStringTableRootIndex + 1
+ kRootListLength
};
STATIC_CHECK(kUndefinedValueRootIndex == Internals::kUndefinedValueRootIndex);
@@ -1843,8 +1834,6 @@ class Heap {
return amount_of_external_allocated_memory_;
}
- void DeoptMarkedAllocationSites();
-
// ObjectStats are kept in two arrays, counts and sizes. Related stats are
// stored in a contiguous linear buffer. Stats groups are stored one after
// another.
@@ -2131,11 +2120,6 @@ class Heap {
GarbageCollector SelectGarbageCollector(AllocationSpace space,
const char** reason);
- // Make sure there is a filler value behind the top of the new space
- // so that the GC does not confuse some unintialized/stale memory
- // with the allocation memento of the object at the top
- void EnsureFillerObjectAtTop();
-
// Performs garbage collection operation.
// Returns whether there is a chance that another major GC could
// collect more garbage.
@@ -2510,8 +2494,6 @@ class Heap {
bool relocation_mutex_locked_by_optimizer_thread_;
#endif // DEBUG;
- int gc_callbacks_depth_;
-
friend class Factory;
friend class GCTracer;
friend class DisallowAllocationFailure;
@@ -2524,7 +2506,6 @@ class Heap {
#ifdef VERIFY_HEAP
friend class NoWeakObjectVerificationScope;
#endif
- friend class GCCallbacksScope;
DISALLOW_COPY_AND_ASSIGN(Heap);
};
@@ -2597,18 +2578,6 @@ class NoWeakObjectVerificationScope {
#endif
-class GCCallbacksScope {
- public:
- explicit inline GCCallbacksScope(Heap* heap);
- inline ~GCCallbacksScope();
-
- inline bool CheckReenter();
-
- private:
- Heap* heap_;
-};
-
-
// Visitor class to verify interior pointers in spaces that do not contain
// or care about intergenerational references. All heap object pointers have to
// point into the heap to a location that has a map pointer at its first word.
@@ -2620,13 +2589,6 @@ class VerifyPointersVisitor: public ObjectVisitor {
};
-// Verify that all objects are Smis.
-class VerifySmisVisitor: public ObjectVisitor {
- public:
- inline void VisitPointers(Object** start, Object** end);
-};
-
-
// Space iterator for iterating over all spaces of the heap. Returns each space
// in turn, and null when it is done.
class AllSpaces BASE_EMBEDDED {
diff --git a/deps/v8/src/hydrogen-bce.cc b/deps/v8/src/hydrogen-bce.cc
index 869db54a2f..e1a2847127 100644
--- a/deps/v8/src/hydrogen-bce.cc
+++ b/deps/v8/src/hydrogen-bce.cc
@@ -91,8 +91,8 @@ class BoundsCheckKey : public ZoneObject {
private:
BoundsCheckKey(HValue* index_base, HValue* length)
- : index_base_(index_base),
- length_(length) { }
+ : index_base_(index_base),
+ length_(length) { }
HValue* index_base_;
HValue* length_;
@@ -144,10 +144,7 @@ class BoundsCheckBbData: public ZoneObject {
// (either upper or lower; note that HasSingleCheck() becomes false).
// Otherwise one of the current checks is modified so that it also covers
// new_offset, and new_check is removed.
- //
- // If the check cannot be modified because the context is unknown it
- // returns false, otherwise it returns true.
- bool CoverCheck(HBoundsCheck* new_check,
+ void CoverCheck(HBoundsCheck* new_check,
int32_t new_offset) {
ASSERT(new_check->index()->representation().IsSmiOrInteger32());
bool keep_new_check = false;
@@ -158,15 +155,7 @@ class BoundsCheckBbData: public ZoneObject {
keep_new_check = true;
upper_check_ = new_check;
} else {
- bool result = BuildOffsetAdd(upper_check_,
- &added_upper_index_,
- &added_upper_offset_,
- Key()->IndexBase(),
- new_check->index()->representation(),
- new_offset);
- if (!result) return false;
- upper_check_->ReplaceAllUsesWith(upper_check_->index());
- upper_check_->SetOperandAt(0, added_upper_index_);
+ TightenCheck(upper_check_, new_check);
}
} else if (new_offset < lower_offset_) {
lower_offset_ = new_offset;
@@ -174,32 +163,27 @@ class BoundsCheckBbData: public ZoneObject {
keep_new_check = true;
lower_check_ = new_check;
} else {
- bool result = BuildOffsetAdd(lower_check_,
- &added_lower_index_,
- &added_lower_offset_,
- Key()->IndexBase(),
- new_check->index()->representation(),
- new_offset);
- if (!result) return false;
- lower_check_->ReplaceAllUsesWith(lower_check_->index());
- lower_check_->SetOperandAt(0, added_lower_index_);
+ TightenCheck(lower_check_, new_check);
}
} else {
- ASSERT(false);
+ // Should never have called CoverCheck() in this case.
+ UNREACHABLE();
}
if (!keep_new_check) {
new_check->block()->graph()->isolate()->counters()->
bounds_checks_eliminated()->Increment();
new_check->DeleteAndReplaceWith(new_check->ActualValue());
+ } else {
+ HBoundsCheck* first_check = new_check == lower_check_ ? upper_check_
+ : lower_check_;
+ // The length is guaranteed to be live at first_check.
+ ASSERT(new_check->length() == first_check->length());
+ HInstruction* old_position = new_check->next();
+ new_check->Unlink();
+ new_check->InsertAfter(first_check);
+ MoveIndexIfNecessary(new_check->index(), new_check, old_position);
}
-
- return true;
- }
-
- void RemoveZeroOperations() {
- RemoveZeroAdd(&added_lower_index_, &added_lower_offset_);
- RemoveZeroAdd(&added_upper_index_, &added_upper_offset_);
}
BoundsCheckBbData(BoundsCheckKey* key,
@@ -210,18 +194,14 @@ class BoundsCheckBbData: public ZoneObject {
HBoundsCheck* upper_check,
BoundsCheckBbData* next_in_bb,
BoundsCheckBbData* father_in_dt)
- : key_(key),
- lower_offset_(lower_offset),
- upper_offset_(upper_offset),
- basic_block_(bb),
- lower_check_(lower_check),
- upper_check_(upper_check),
- added_lower_index_(NULL),
- added_lower_offset_(NULL),
- added_upper_index_(NULL),
- added_upper_offset_(NULL),
- next_in_bb_(next_in_bb),
- father_in_dt_(father_in_dt) { }
+ : key_(key),
+ lower_offset_(lower_offset),
+ upper_offset_(upper_offset),
+ basic_block_(bb),
+ lower_check_(lower_check),
+ upper_check_(upper_check),
+ next_in_bb_(next_in_bb),
+ father_in_dt_(father_in_dt) { }
private:
BoundsCheckKey* key_;
@@ -230,57 +210,56 @@ class BoundsCheckBbData: public ZoneObject {
HBasicBlock* basic_block_;
HBoundsCheck* lower_check_;
HBoundsCheck* upper_check_;
- HInstruction* added_lower_index_;
- HConstant* added_lower_offset_;
- HInstruction* added_upper_index_;
- HConstant* added_upper_offset_;
BoundsCheckBbData* next_in_bb_;
BoundsCheckBbData* father_in_dt_;
- // Given an existing add instruction and a bounds check it tries to
- // find the current context (either of the add or of the check index).
- HValue* IndexContext(HInstruction* add, HBoundsCheck* check) {
- if (add != NULL && add->IsAdd()) {
- return HAdd::cast(add)->context();
+ void MoveIndexIfNecessary(HValue* index_raw,
+ HBoundsCheck* insert_before,
+ HInstruction* end_of_scan_range) {
+ if (!index_raw->IsAdd() && !index_raw->IsSub()) {
+ // index_raw can be HAdd(index_base, offset), HSub(index_base, offset),
+ // or index_base directly. In the latter case, no need to move anything.
+ return;
}
- if (check->index()->IsBinaryOperation()) {
- return HBinaryOperation::cast(check->index())->context();
+ HArithmeticBinaryOperation* index =
+ HArithmeticBinaryOperation::cast(index_raw);
+ HValue* left_input = index->left();
+ HValue* right_input = index->right();
+ bool must_move_index = false;
+ bool must_move_left_input = false;
+ bool must_move_right_input = false;
+ for (HInstruction* cursor = end_of_scan_range; cursor != insert_before;) {
+ if (cursor == left_input) must_move_left_input = true;
+ if (cursor == right_input) must_move_right_input = true;
+ if (cursor == index) must_move_index = true;
+ if (cursor->previous() == NULL) {
+ cursor = cursor->block()->dominator()->end();
+ } else {
+ cursor = cursor->previous();
+ }
}
- return NULL;
- }
-
- // This function returns false if it cannot build the add because the
- // current context cannot be determined.
- bool BuildOffsetAdd(HBoundsCheck* check,
- HInstruction** add,
- HConstant** constant,
- HValue* original_value,
- Representation representation,
- int32_t new_offset) {
- HValue* index_context = IndexContext(*add, check);
- if (index_context == NULL) return false;
-
- Zone* zone = BasicBlock()->zone();
- HConstant* new_constant = HConstant::New(zone, index_context,
- new_offset, representation);
- if (*add == NULL) {
- new_constant->InsertBefore(check);
- (*add) = HAdd::New(zone, index_context, original_value, new_constant);
- (*add)->AssumeRepresentation(representation);
- (*add)->InsertBefore(check);
- } else {
- new_constant->InsertBefore(*add);
- (*constant)->DeleteAndReplaceWith(new_constant);
+ if (must_move_index) {
+ index->Unlink();
+ index->InsertBefore(insert_before);
+ }
+ // The BCE algorithm only selects mergeable bounds checks that share
+ // the same "index_base", so we'll only ever have to move constants.
+ if (must_move_left_input) {
+ HConstant::cast(left_input)->Unlink();
+ HConstant::cast(left_input)->InsertBefore(index);
+ }
+ if (must_move_right_input) {
+ HConstant::cast(right_input)->Unlink();
+ HConstant::cast(right_input)->InsertBefore(index);
}
- *constant = new_constant;
- return true;
}
- void RemoveZeroAdd(HInstruction** add, HConstant** constant) {
- if (*add != NULL && (*add)->IsAdd() && (*constant)->Integer32Value() == 0) {
- (*add)->DeleteAndReplaceWith(HAdd::cast(*add)->left());
- (*constant)->DeleteAndReplaceWith(NULL);
- }
+ void TightenCheck(HBoundsCheck* original_check,
+ HBoundsCheck* tighter_check) {
+ ASSERT(original_check->length() == tighter_check->length());
+ MoveIndexIfNecessary(tighter_check->index(), original_check, tighter_check);
+ original_check->ReplaceAllUsesWith(original_check->index());
+ original_check->SetOperandAt(0, tighter_check->index());
}
DISALLOW_COPY_AND_ASSIGN(BoundsCheckBbData);
@@ -394,11 +373,10 @@ BoundsCheckBbData* HBoundsCheckEliminationPhase::PreProcessBlock(
bb->graph()->isolate()->counters()->
bounds_checks_eliminated()->Increment();
check->DeleteAndReplaceWith(check->ActualValue());
- } else if (data->BasicBlock() != bb ||
- !data->CoverCheck(check, offset)) {
- // If the check is in the current BB we try to modify it by calling
- // "CoverCheck", but if also that fails we record the current offsets
- // in a new data instance because from now on they are covered.
+ } else if (data->BasicBlock() == bb) {
+ data->CoverCheck(check, offset);
+ } else if (graph()->use_optimistic_licm() ||
+ bb->IsLoopSuccessorDominator()) {
int32_t new_lower_offset = offset < data->LowerOffset()
? offset
: data->LowerOffset();
@@ -424,7 +402,6 @@ BoundsCheckBbData* HBoundsCheckEliminationPhase::PreProcessBlock(
void HBoundsCheckEliminationPhase::PostProcessBlock(
HBasicBlock* block, BoundsCheckBbData* data) {
while (data != NULL) {
- data->RemoveZeroOperations();
if (data->FatherInDominatorTree()) {
table_.Insert(data->Key(), data->FatherInDominatorTree(), zone());
} else {
diff --git a/deps/v8/src/hydrogen-check-elimination.cc b/deps/v8/src/hydrogen-check-elimination.cc
index f15267349f..e12f14a13f 100644
--- a/deps/v8/src/hydrogen-check-elimination.cc
+++ b/deps/v8/src/hydrogen-check-elimination.cc
@@ -48,13 +48,12 @@ typedef UniqueSet<Map>* MapSet;
struct HCheckTableEntry {
HValue* object_; // The object being approximated. NULL => invalid entry.
- HInstruction* check_; // The last check instruction.
- MapSet maps_; // The set of known maps for the object.
- bool is_stable_;
+ HValue* check_; // The last check instruction.
+ MapSet maps_; // The set of known maps for the object.
};
-// The main data structure used during check elimination, which stores a
+// The main datastructure used during check elimination, which stores a
// set of known maps for each object.
class HCheckTable : public ZoneObject {
public:
@@ -104,10 +103,9 @@ class HCheckTable : public ZoneObject {
}
default: {
// If the instruction changes maps uncontrollably, drop everything.
- if (instr->CheckChangesFlag(kOsrEntries)) {
- Reset();
- } else if (instr->CheckChangesFlag(kMaps)) {
- KillUnstableEntries();
+ if (instr->CheckGVNFlag(kChangesMaps) ||
+ instr->CheckGVNFlag(kChangesOsrEntries)) {
+ Kill();
}
}
// Improvements possible:
@@ -118,101 +116,39 @@ class HCheckTable : public ZoneObject {
return this;
}
- // Support for global analysis with HFlowEngine: Merge given state with
- // the other incoming state.
- static HCheckTable* Merge(HCheckTable* succ_state, HBasicBlock* succ_block,
- HCheckTable* pred_state, HBasicBlock* pred_block,
- Zone* zone) {
- if (pred_state == NULL || pred_block->IsUnreachable()) {
- return succ_state;
- }
- if (succ_state == NULL) {
- return pred_state->Copy(succ_block, pred_block, zone);
- } else {
- return succ_state->Merge(succ_block, pred_state, pred_block, zone);
- }
- }
-
- // Support for global analysis with HFlowEngine: Given state merged with all
- // the other incoming states, prepare it for use.
- static HCheckTable* Finish(HCheckTable* state, HBasicBlock* block,
- Zone* zone) {
- if (state == NULL) {
- block->MarkUnreachable();
- }
- return state;
- }
-
- private:
- // Copy state to successor block.
+ // Global analysis: Copy state to successor block.
HCheckTable* Copy(HBasicBlock* succ, HBasicBlock* from_block, Zone* zone) {
HCheckTable* copy = new(phase_->zone()) HCheckTable(phase_);
for (int i = 0; i < size_; i++) {
HCheckTableEntry* old_entry = &entries_[i];
HCheckTableEntry* new_entry = &copy->entries_[i];
+ // TODO(titzer): keep the check if this block dominates the successor?
new_entry->object_ = old_entry->object_;
+ new_entry->check_ = NULL;
new_entry->maps_ = old_entry->maps_->Copy(phase_->zone());
- new_entry->is_stable_ = old_entry->is_stable_;
- // Keep the check if the existing check's block dominates the successor.
- if (old_entry->check_ != NULL &&
- old_entry->check_->block()->Dominates(succ)) {
- new_entry->check_ = old_entry->check_;
- } else {
- // Leave it NULL till we meet a new check instruction for this object
- // in the control flow.
- new_entry->check_ = NULL;
- }
}
copy->cursor_ = cursor_;
copy->size_ = size_;
- // Create entries for succ block's phis.
- if (!succ->IsLoopHeader() && succ->phis()->length() > 0) {
- int pred_index = succ->PredecessorIndexOf(from_block);
- for (int phi_index = 0;
- phi_index < succ->phis()->length();
- ++phi_index) {
- HPhi* phi = succ->phis()->at(phi_index);
- HValue* phi_operand = phi->OperandAt(pred_index);
-
- HCheckTableEntry* pred_entry = copy->Find(phi_operand);
- if (pred_entry != NULL) {
- // Create an entry for a phi in the table.
- copy->Insert(phi, NULL, pred_entry->maps_->Copy(phase_->zone()),
- pred_entry->is_stable_);
- }
- }
- }
-
// Branch-sensitive analysis for certain comparisons may add more facts
// to the state for the successor on the true branch.
bool learned = false;
- if (succ->predecessors()->length() == 1) {
- HControlInstruction* end = succ->predecessors()->at(0)->end();
- bool is_true_branch = end->SuccessorAt(0) == succ;
+ HControlInstruction* end = succ->predecessors()->at(0)->end();
+ if (succ->predecessors()->length() == 1 && end->SuccessorAt(0) == succ) {
if (end->IsCompareMap()) {
+ // Learn on the true branch of if(CompareMap(x)).
HCompareMap* cmp = HCompareMap::cast(end);
HValue* object = cmp->value()->ActualValue();
HCheckTableEntry* entry = copy->Find(object);
- if (is_true_branch) {
- // Learn on the true branch of if(CompareMap(x)).
- if (entry == NULL) {
- copy->Insert(object, cmp, cmp->map(), cmp->is_stable());
- } else {
- MapSet list = new(phase_->zone()) UniqueSet<Map>();
- list->Add(cmp->map(), phase_->zone());
- entry->maps_ = list;
- entry->check_ = cmp;
- entry->is_stable_ = cmp->is_stable();
- }
+ if (entry == NULL) {
+ copy->Insert(object, cmp->map());
} else {
- // Learn on the false branch of if(CompareMap(x)).
- if (entry != NULL) {
- entry->maps_->Remove(cmp->map());
- }
+ MapSet list = new(phase_->zone()) UniqueSet<Map>();
+ list->Add(cmp->map(), phase_->zone());
+ entry->maps_ = list;
}
learned = true;
- } else if (is_true_branch && end->IsCompareObjectEqAndBranch()) {
+ } else if (end->IsCompareObjectEqAndBranch()) {
// Learn on the true branch of if(CmpObjectEq(x, y)).
HCompareObjectEqAndBranch* cmp =
HCompareObjectEqAndBranch::cast(end);
@@ -222,10 +158,10 @@ class HCheckTable : public ZoneObject {
HCheckTableEntry* re = copy->Find(right);
if (le == NULL) {
if (re != NULL) {
- copy->Insert(left, NULL, re->maps_->Copy(zone), re->is_stable_);
+ copy->Insert(left, NULL, re->maps_->Copy(zone));
}
} else if (re == NULL) {
- copy->Insert(right, NULL, le->maps_->Copy(zone), le->is_stable_);
+ copy->Insert(right, NULL, le->maps_->Copy(zone));
} else {
MapSet intersect = le->maps_->Intersect(re->maps_, zone);
le->maps_ = intersect;
@@ -247,48 +183,37 @@ class HCheckTable : public ZoneObject {
return copy;
}
- // Merge this state with the other incoming state.
+ // Global analysis: Merge this state with the other incoming state.
HCheckTable* Merge(HBasicBlock* succ, HCheckTable* that,
- HBasicBlock* pred_block, Zone* zone) {
- if (that->size_ == 0) {
- // If the other state is empty, simply reset.
- Reset();
- } else {
- int pred_index = succ->PredecessorIndexOf(pred_block);
- bool compact = false;
- for (int i = 0; i < size_; i++) {
- HCheckTableEntry* this_entry = &entries_[i];
- HCheckTableEntry* that_entry;
- if (this_entry->object_->IsPhi() &&
- this_entry->object_->block() == succ) {
- HPhi* phi = HPhi::cast(this_entry->object_);
- HValue* phi_operand = phi->OperandAt(pred_index);
- that_entry = that->Find(phi_operand);
-
- } else {
- that_entry = that->Find(this_entry->object_);
- }
-
- if (that_entry == NULL) {
- this_entry->object_ = NULL;
- compact = true;
- } else {
- this_entry->maps_ =
- this_entry->maps_->Union(that_entry->maps_, phase_->zone());
- this_entry->is_stable_ =
- this_entry->is_stable_ && that_entry->is_stable_;
- if (this_entry->check_ != that_entry->check_) {
- this_entry->check_ = NULL;
+ HBasicBlock* that_block, Zone* zone) {
+ if (that_block->IsReachable()) {
+ if (that->size_ == 0) {
+ // If the other state is empty, simply reset.
+ size_ = 0;
+ cursor_ = 0;
+ } else {
+ bool compact = false;
+ for (int i = 0; i < size_; i++) {
+ HCheckTableEntry* this_entry = &entries_[i];
+ HCheckTableEntry* that_entry = that->Find(this_entry->object_);
+ if (that_entry == NULL) {
+ this_entry->object_ = NULL;
+ compact = true;
+ } else {
+ this_entry->maps_ =
+ this_entry->maps_->Union(that_entry->maps_, phase_->zone());
+ if (this_entry->check_ != that_entry->check_) {
+ this_entry->check_ = NULL;
+ }
+ ASSERT(this_entry->maps_->size() > 0);
}
- ASSERT(this_entry->maps_->size() > 0);
}
+ if (compact) Compact();
}
- if (compact) Compact();
}
-
if (FLAG_trace_check_elimination) {
PrintF("B%d checkmaps-table merged with B%d table:\n",
- succ->block_id(), pred_block->block_id());
+ succ->block_id(), that_block->block_id());
Print();
}
return this;
@@ -319,46 +244,18 @@ class HCheckTable : public ZoneObject {
}
return;
}
- MapSet intersection = i->Intersect(a, phase_->zone());
- if (intersection->size() == 0) {
+ i = i->Intersect(a, phase_->zone());
+ if (i->size() == 0) {
// Intersection is empty; probably megamorphic, which is likely to
// deopt anyway, so just leave things as they are.
INC_STAT(empty_);
} else {
- // Update set of maps in the entry.
- entry->maps_ = intersection;
- if (intersection->size() != i->size()) {
- // Narrow set of maps in the second check maps instruction.
- HGraph* graph = instr->block()->graph();
- if (entry->check_ != NULL &&
- entry->check_->block() == instr->block() &&
- entry->check_->IsCheckMaps()) {
- // There is a check in the same block so replace it with a more
- // strict check and eliminate the second check entirely.
- HCheckMaps* check = HCheckMaps::cast(entry->check_);
- TRACE(("CheckMaps #%d at B%d narrowed\n", check->id(),
- check->block()->block_id()));
- check->set_map_set(intersection, graph->zone());
- TRACE(("Replacing redundant CheckMaps #%d at B%d with #%d\n",
- instr->id(), instr->block()->block_id(), entry->check_->id()));
- instr->DeleteAndReplaceWith(entry->check_);
- } else {
- TRACE(("CheckMaps #%d at B%d narrowed\n", instr->id(),
- instr->block()->block_id()));
- instr->set_map_set(intersection, graph->zone());
- entry->check_ = instr;
- }
-
- if (FLAG_trace_check_elimination) {
- Print();
- }
- INC_STAT(narrowed_);
- }
+ // TODO(titzer): replace the first check with a more strict check
+ INC_STAT(narrowed_);
}
} else {
// No entry; insert a new one.
- Insert(object, instr, instr->map_set().Copy(phase_->zone()),
- instr->is_stable());
+ Insert(object, instr, instr->map_set().Copy(phase_->zone()));
}
}
@@ -395,33 +292,22 @@ class HCheckTable : public ZoneObject {
HValue* object = instr->value()->ActualValue();
// Match a HCheckMapValue(object, HConstant(map))
Unique<Map> map = MapConstant(instr->map());
-
- HCheckTableEntry* entry = Find(object);
- if (entry != NULL) {
- MapSet maps = entry->maps_;
+ MapSet maps = FindMaps(object);
+ if (maps != NULL) {
if (maps->Contains(map)) {
if (maps->size() == 1) {
// Object is known to have exactly this map.
- if (entry->check_ != NULL) {
- instr->DeleteAndReplaceWith(entry->check_);
- } else {
- // Mark check as dead but leave it in the graph as a checkpoint for
- // subsequent checks.
- instr->SetFlag(HValue::kIsDead);
- entry->check_ = instr;
- }
+ instr->DeleteAndReplaceWith(NULL);
INC_STAT(removed_);
} else {
// Only one map survives the check.
maps->Clear();
maps->Add(map, phase_->zone());
- entry->check_ = instr;
}
}
} else {
// No prior information.
- // TODO(verwaest): Tag map constants with stability.
- Insert(object, instr, map, false);
+ Insert(object, map);
}
}
@@ -438,46 +324,34 @@ class HCheckTable : public ZoneObject {
if (instr->has_transition()) {
// This store transitions the object to a new map.
Kill(object);
- Insert(object, NULL, MapConstant(instr->transition()),
- instr->is_stable());
+ Insert(object, MapConstant(instr->transition()));
} else if (IsMapAccess(instr->access())) {
// This is a store directly to the map field of the object.
Kill(object);
if (!instr->value()->IsConstant()) return;
- // TODO(verwaest): Tag with stability.
- Insert(object, NULL, MapConstant(instr->value()), false);
+ Insert(object, MapConstant(instr->value()));
} else {
// If the instruction changes maps, it should be handled above.
- CHECK(!instr->CheckChangesFlag(kMaps));
+ CHECK(!instr->CheckGVNFlag(kChangesMaps));
}
}
void ReduceCompareMap(HCompareMap* instr) {
MapSet maps = FindMaps(instr->value()->ActualValue());
if (maps == NULL) return;
-
- int succ;
if (maps->Contains(instr->map())) {
- if (maps->size() != 1) {
- TRACE(("CompareMap #%d for #%d at B%d can't be eliminated: "
- "ambiguous set of maps\n", instr->id(), instr->value()->id(),
- instr->block()->block_id()));
- return;
+ if (maps->size() == 1) {
+ TRACE(("Marking redundant CompareMap #%d at B%d as true\n",
+ instr->id(), instr->block()->block_id()));
+ instr->set_known_successor_index(0);
+ INC_STAT(compares_true_);
}
- succ = 0;
- INC_STAT(compares_true_);
} else {
- succ = 1;
+ TRACE(("Marking redundant CompareMap #%d at B%d as false\n",
+ instr->id(), instr->block()->block_id()));
+ instr->set_known_successor_index(1);
INC_STAT(compares_false_);
}
-
- TRACE(("Marking redundant CompareMap #%d for #%d at B%d as %s\n",
- instr->id(), instr->value()->id(), instr->block()->block_id(),
- succ == 0 ? "true" : "false"));
- instr->set_known_successor_index(succ);
-
- int unreachable_succ = 1 - succ;
- instr->block()->MarkSuccEdgeUnreachable(unreachable_succ);
}
void ReduceTransitionElementsKind(HTransitionElementsKind* instr) {
@@ -495,26 +369,12 @@ class HCheckTable : public ZoneObject {
}
}
- // Reset the table.
- void Reset() {
+ // Kill everything in the table.
+ void Kill() {
size_ = 0;
cursor_ = 0;
}
- // Kill everything in the table.
- void KillUnstableEntries() {
- bool compact = false;
- for (int i = 0; i < size_; i++) {
- HCheckTableEntry* entry = &entries_[i];
- ASSERT(entry->object_ != NULL);
- if (!entry->is_stable_) {
- entry->object_ = NULL;
- compact = true;
- }
- }
- if (compact) Compact();
- }
-
// Kill everything in the table that may alias {object}.
void Kill(HValue* object) {
bool compact = false;
@@ -566,8 +426,7 @@ class HCheckTable : public ZoneObject {
for (int i = 0; i < size_; i++) {
HCheckTableEntry* entry = &entries_[i];
ASSERT(entry->object_ != NULL);
- PrintF(" checkmaps-table @%d: %s #%d ", i,
- entry->object_->IsPhi() ? "phi" : "object", entry->object_->id());
+ PrintF(" checkmaps-table @%d: object #%d ", i, entry->object_->id());
if (entry->check_ != NULL) {
PrintF("check #%d ", entry->check_->id());
}
@@ -581,6 +440,7 @@ class HCheckTable : public ZoneObject {
}
}
+ private:
HCheckTableEntry* Find(HValue* object) {
for (int i = size_ - 1; i >= 0; i--) {
// Search from most-recently-inserted to least-recently-inserted.
@@ -596,24 +456,17 @@ class HCheckTable : public ZoneObject {
return entry == NULL ? NULL : entry->maps_;
}
- void Insert(HValue* object,
- HInstruction* check,
- Unique<Map> map,
- bool is_stable) {
+ void Insert(HValue* object, Unique<Map> map) {
MapSet list = new(phase_->zone()) UniqueSet<Map>();
list->Add(map, phase_->zone());
- Insert(object, check, list, is_stable);
+ Insert(object, NULL, list);
}
- void Insert(HValue* object,
- HInstruction* check,
- MapSet maps,
- bool is_stable) {
+ void Insert(HValue* object, HCheckMaps* check, MapSet maps) {
HCheckTableEntry* entry = &entries_[cursor_++];
entry->object_ = object;
entry->check_ = check;
entry->maps_ = maps;
- entry->is_stable_ = is_stable;
// If the table becomes full, wrap around and overwrite older entries.
if (cursor_ == kMaxTrackedObjects) cursor_ = 0;
if (size_ < kMaxTrackedObjects) size_++;
@@ -628,7 +481,6 @@ class HCheckTable : public ZoneObject {
}
friend class HCheckMapsEffects;
- friend class HCheckEliminationPhase;
HCheckEliminationPhase* phase_;
HCheckTableEntry entries_[kMaxTrackedObjects];
@@ -643,7 +495,8 @@ class HCheckTable : public ZoneObject {
class HCheckMapsEffects : public ZoneObject {
public:
explicit HCheckMapsEffects(Zone* zone)
- : stores_(5, zone) { }
+ : maps_stored_(false),
+ stores_(5, zone) { }
inline bool Disabled() {
return false; // Effects are _not_ disabled.
@@ -651,22 +504,27 @@ class HCheckMapsEffects : public ZoneObject {
// Process a possibly side-effecting instruction.
void Process(HInstruction* instr, Zone* zone) {
- if (instr->IsStoreNamedField()) {
- stores_.Add(HStoreNamedField::cast(instr), zone);
- } else {
- flags_.Add(instr->ChangesFlags());
+ switch (instr->opcode()) {
+ case HValue::kStoreNamedField: {
+ stores_.Add(HStoreNamedField::cast(instr), zone);
+ break;
+ }
+ case HValue::kOsrEntry: {
+ // Kill everything. Loads must not be hoisted past the OSR entry.
+ maps_stored_ = true;
+ }
+ default: {
+ maps_stored_ |= (instr->CheckGVNFlag(kChangesMaps) |
+ instr->CheckGVNFlag(kChangesElementsKind));
+ }
}
}
// Apply these effects to the given check elimination table.
void Apply(HCheckTable* table) {
- if (flags_.Contains(kOsrEntries)) {
- table->Reset();
- return;
- }
- if (flags_.Contains(kMaps) || flags_.Contains(kElementsKind)) {
+ if (maps_stored_) {
// Uncontrollable map modifications; kill everything.
- table->KillUnstableEntries();
+ table->Kill();
return;
}
@@ -681,14 +539,14 @@ class HCheckMapsEffects : public ZoneObject {
// Union these effects with the other effects.
void Union(HCheckMapsEffects* that, Zone* zone) {
- flags_.Add(that->flags_);
+ maps_stored_ |= that->maps_stored_;
for (int i = 0; i < that->stores_.length(); i++) {
stores_.Add(that->stores_[i], zone);
}
}
private:
- GVNFlagSet flags_;
+ bool maps_stored_ : 1;
ZoneList<HStoreNamedField*> stores_;
};
@@ -705,7 +563,7 @@ void HCheckEliminationPhase::Run() {
} else {
// Perform only local analysis.
for (int i = 0; i < graph()->blocks()->length(); i++) {
- table->Reset();
+ table->Kill();
engine.AnalyzeOneBlock(graph()->blocks()->at(i), table);
}
}
diff --git a/deps/v8/src/hydrogen-flow-engine.h b/deps/v8/src/hydrogen-flow-engine.h
index 99a2f841a7..fe786a5c5c 100644
--- a/deps/v8/src/hydrogen-flow-engine.h
+++ b/deps/v8/src/hydrogen-flow-engine.h
@@ -122,10 +122,9 @@ class HFlowEngine {
// Skip blocks not dominated by the root node.
if (SkipNonDominatedBlock(root, block)) continue;
- State* state = State::Finish(StateAt(block), block, zone_);
+ State* state = StateAt(block);
if (block->IsReachable()) {
- ASSERT(state != NULL);
if (block->IsLoopHeader()) {
// Apply loop effects before analyzing loop body.
ComputeLoopEffects(block)->Apply(state);
@@ -145,14 +144,18 @@ class HFlowEngine {
for (int i = 0; i < max; i++) {
HBasicBlock* succ = block->end()->SuccessorAt(i);
IncrementPredecessorCount(succ);
-
- if (max == 1 && succ->predecessors()->length() == 1) {
- // Optimization: successor can inherit this state.
- SetStateAt(succ, state);
+ if (StateAt(succ) == NULL) {
+ // This is the first state to reach the successor.
+ if (max == 1 && succ->predecessors()->length() == 1) {
+ // Optimization: successor can inherit this state.
+ SetStateAt(succ, state);
+ } else {
+ // Successor needs a copy of the state.
+ SetStateAt(succ, state->Copy(succ, block, zone_));
+ }
} else {
// Merge the current state with the state already at the successor.
- SetStateAt(succ,
- State::Merge(StateAt(succ), succ, state, block, zone_));
+ SetStateAt(succ, StateAt(succ)->Merge(succ, state, block, zone_));
}
}
}
diff --git a/deps/v8/src/hydrogen-gvn.cc b/deps/v8/src/hydrogen-gvn.cc
index 6bf5a1b68e..bc836890bb 100644
--- a/deps/v8/src/hydrogen-gvn.cc
+++ b/deps/v8/src/hydrogen-gvn.cc
@@ -32,39 +32,39 @@
namespace v8 {
namespace internal {
-class HInstructionMap V8_FINAL : public ZoneObject {
+class HValueMap: public ZoneObject {
public:
- HInstructionMap(Zone* zone, SideEffectsTracker* side_effects_tracker)
+ explicit HValueMap(Zone* zone)
: array_size_(0),
lists_size_(0),
count_(0),
+ present_flags_(0),
array_(NULL),
lists_(NULL),
- free_list_head_(kNil),
- side_effects_tracker_(side_effects_tracker) {
+ free_list_head_(kNil) {
ResizeLists(kInitialSize, zone);
Resize(kInitialSize, zone);
}
- void Kill(SideEffects side_effects);
+ void Kill(GVNFlagSet flags);
- void Add(HInstruction* instr, Zone* zone) {
- present_depends_on_.Add(side_effects_tracker_->ComputeDependsOn(instr));
- Insert(instr, zone);
+ void Add(HValue* value, Zone* zone) {
+ present_flags_.Add(value->gvn_flags());
+ Insert(value, zone);
}
- HInstruction* Lookup(HInstruction* instr) const;
+ HValue* Lookup(HValue* value) const;
- HInstructionMap* Copy(Zone* zone) const {
- return new(zone) HInstructionMap(zone, this);
+ HValueMap* Copy(Zone* zone) const {
+ return new(zone) HValueMap(zone, this);
}
bool IsEmpty() const { return count_ == 0; }
private:
- // A linked list of HInstruction* values. Stored in arrays.
- struct HInstructionMapListElement {
- HInstruction* instr;
+ // A linked list of HValue* values. Stored in arrays.
+ struct HValueMapListElement {
+ HValue* value;
int next; // Index in the array of the next list element.
};
static const int kNil = -1; // The end of a linked list
@@ -72,36 +72,34 @@ class HInstructionMap V8_FINAL : public ZoneObject {
// Must be a power of 2.
static const int kInitialSize = 16;
- HInstructionMap(Zone* zone, const HInstructionMap* other);
+ HValueMap(Zone* zone, const HValueMap* other);
void Resize(int new_size, Zone* zone);
void ResizeLists(int new_size, Zone* zone);
- void Insert(HInstruction* instr, Zone* zone);
+ void Insert(HValue* value, Zone* zone);
uint32_t Bound(uint32_t value) const { return value & (array_size_ - 1); }
int array_size_;
int lists_size_;
- int count_; // The number of values stored in the HInstructionMap.
- SideEffects present_depends_on_;
- HInstructionMapListElement* array_;
- // Primary store - contains the first value
+ int count_; // The number of values stored in the HValueMap.
+ GVNFlagSet present_flags_; // All flags that are in any value in the
+ // HValueMap.
+ HValueMapListElement* array_; // Primary store - contains the first value
// with a given hash. Colliding elements are stored in linked lists.
- HInstructionMapListElement* lists_;
- // The linked lists containing hash collisions.
+ HValueMapListElement* lists_; // The linked lists containing hash collisions.
int free_list_head_; // Unused elements in lists_ are on the free list.
- SideEffectsTracker* side_effects_tracker_;
};
-class HSideEffectMap V8_FINAL BASE_EMBEDDED {
+class HSideEffectMap BASE_EMBEDDED {
public:
HSideEffectMap();
explicit HSideEffectMap(HSideEffectMap* other);
HSideEffectMap& operator= (const HSideEffectMap& other);
- void Kill(SideEffects side_effects);
+ void Kill(GVNFlagSet flags);
- void Store(SideEffects side_effects, HInstruction* instr);
+ void Store(GVNFlagSet flags, HInstruction* instr);
bool IsEmpty() const { return count_ == 0; }
@@ -154,36 +152,35 @@ void TraceGVN(const char* msg, ...) {
}
-HInstructionMap::HInstructionMap(Zone* zone, const HInstructionMap* other)
+HValueMap::HValueMap(Zone* zone, const HValueMap* other)
: array_size_(other->array_size_),
lists_size_(other->lists_size_),
count_(other->count_),
- present_depends_on_(other->present_depends_on_),
- array_(zone->NewArray<HInstructionMapListElement>(other->array_size_)),
- lists_(zone->NewArray<HInstructionMapListElement>(other->lists_size_)),
- free_list_head_(other->free_list_head_),
- side_effects_tracker_(other->side_effects_tracker_) {
+ present_flags_(other->present_flags_),
+ array_(zone->NewArray<HValueMapListElement>(other->array_size_)),
+ lists_(zone->NewArray<HValueMapListElement>(other->lists_size_)),
+ free_list_head_(other->free_list_head_) {
OS::MemCopy(
- array_, other->array_, array_size_ * sizeof(HInstructionMapListElement));
+ array_, other->array_, array_size_ * sizeof(HValueMapListElement));
OS::MemCopy(
- lists_, other->lists_, lists_size_ * sizeof(HInstructionMapListElement));
+ lists_, other->lists_, lists_size_ * sizeof(HValueMapListElement));
}
-void HInstructionMap::Kill(SideEffects changes) {
- if (!present_depends_on_.ContainsAnyOf(changes)) return;
- present_depends_on_.RemoveAll();
+void HValueMap::Kill(GVNFlagSet flags) {
+ GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(flags);
+ if (!present_flags_.ContainsAnyOf(depends_flags)) return;
+ present_flags_.RemoveAll();
for (int i = 0; i < array_size_; ++i) {
- HInstruction* instr = array_[i].instr;
- if (instr != NULL) {
+ HValue* value = array_[i].value;
+ if (value != NULL) {
// Clear list of collisions first, so we know if it becomes empty.
int kept = kNil; // List of kept elements.
int next;
for (int current = array_[i].next; current != kNil; current = next) {
next = lists_[current].next;
- HInstruction* instr = lists_[current].instr;
- SideEffects depends_on = side_effects_tracker_->ComputeDependsOn(instr);
- if (depends_on.ContainsAnyOf(changes)) {
+ HValue* value = lists_[current].value;
+ if (value->gvn_flags().ContainsAnyOf(depends_flags)) {
// Drop it.
count_--;
lists_[current].next = free_list_head_;
@@ -192,41 +189,40 @@ void HInstructionMap::Kill(SideEffects changes) {
// Keep it.
lists_[current].next = kept;
kept = current;
- present_depends_on_.Add(depends_on);
+ present_flags_.Add(value->gvn_flags());
}
}
array_[i].next = kept;
// Now possibly drop directly indexed element.
- instr = array_[i].instr;
- SideEffects depends_on = side_effects_tracker_->ComputeDependsOn(instr);
- if (depends_on.ContainsAnyOf(changes)) { // Drop it.
+ value = array_[i].value;
+ if (value->gvn_flags().ContainsAnyOf(depends_flags)) { // Drop it.
count_--;
int head = array_[i].next;
if (head == kNil) {
- array_[i].instr = NULL;
+ array_[i].value = NULL;
} else {
- array_[i].instr = lists_[head].instr;
+ array_[i].value = lists_[head].value;
array_[i].next = lists_[head].next;
lists_[head].next = free_list_head_;
free_list_head_ = head;
}
} else {
- present_depends_on_.Add(depends_on); // Keep it.
+ present_flags_.Add(value->gvn_flags()); // Keep it.
}
}
}
}
-HInstruction* HInstructionMap::Lookup(HInstruction* instr) const {
- uint32_t hash = static_cast<uint32_t>(instr->Hashcode());
+HValue* HValueMap::Lookup(HValue* value) const {
+ uint32_t hash = static_cast<uint32_t>(value->Hashcode());
uint32_t pos = Bound(hash);
- if (array_[pos].instr != NULL) {
- if (array_[pos].instr->Equals(instr)) return array_[pos].instr;
+ if (array_[pos].value != NULL) {
+ if (array_[pos].value->Equals(value)) return array_[pos].value;
int next = array_[pos].next;
while (next != kNil) {
- if (lists_[next].instr->Equals(instr)) return lists_[next].instr;
+ if (lists_[next].value->Equals(value)) return lists_[next].value;
next = lists_[next].next;
}
}
@@ -234,7 +230,7 @@ HInstruction* HInstructionMap::Lookup(HInstruction* instr) const {
}
-void HInstructionMap::Resize(int new_size, Zone* zone) {
+void HValueMap::Resize(int new_size, Zone* zone) {
ASSERT(new_size > count_);
// Hashing the values into the new array has no more collisions than in the
// old hash map, so we can use the existing lists_ array, if we are careful.
@@ -244,33 +240,33 @@ void HInstructionMap::Resize(int new_size, Zone* zone) {
ResizeLists(lists_size_ << 1, zone);
}
- HInstructionMapListElement* new_array =
- zone->NewArray<HInstructionMapListElement>(new_size);
- memset(new_array, 0, sizeof(HInstructionMapListElement) * new_size);
+ HValueMapListElement* new_array =
+ zone->NewArray<HValueMapListElement>(new_size);
+ memset(new_array, 0, sizeof(HValueMapListElement) * new_size);
- HInstructionMapListElement* old_array = array_;
+ HValueMapListElement* old_array = array_;
int old_size = array_size_;
int old_count = count_;
count_ = 0;
- // Do not modify present_depends_on_. It is currently correct.
+ // Do not modify present_flags_. It is currently correct.
array_size_ = new_size;
array_ = new_array;
if (old_array != NULL) {
// Iterate over all the elements in lists, rehashing them.
for (int i = 0; i < old_size; ++i) {
- if (old_array[i].instr != NULL) {
+ if (old_array[i].value != NULL) {
int current = old_array[i].next;
while (current != kNil) {
- Insert(lists_[current].instr, zone);
+ Insert(lists_[current].value, zone);
int next = lists_[current].next;
lists_[current].next = free_list_head_;
free_list_head_ = current;
current = next;
}
- // Rehash the directly stored instruction.
- Insert(old_array[i].instr, zone);
+ // Rehash the directly stored value.
+ Insert(old_array[i].value, zone);
}
}
}
@@ -279,22 +275,21 @@ void HInstructionMap::Resize(int new_size, Zone* zone) {
}
-void HInstructionMap::ResizeLists(int new_size, Zone* zone) {
+void HValueMap::ResizeLists(int new_size, Zone* zone) {
ASSERT(new_size > lists_size_);
- HInstructionMapListElement* new_lists =
- zone->NewArray<HInstructionMapListElement>(new_size);
- memset(new_lists, 0, sizeof(HInstructionMapListElement) * new_size);
+ HValueMapListElement* new_lists =
+ zone->NewArray<HValueMapListElement>(new_size);
+ memset(new_lists, 0, sizeof(HValueMapListElement) * new_size);
- HInstructionMapListElement* old_lists = lists_;
+ HValueMapListElement* old_lists = lists_;
int old_size = lists_size_;
lists_size_ = new_size;
lists_ = new_lists;
if (old_lists != NULL) {
- OS::MemCopy(
- lists_, old_lists, old_size * sizeof(HInstructionMapListElement));
+ OS::MemCopy(lists_, old_lists, old_size * sizeof(HValueMapListElement));
}
for (int i = old_size; i < lists_size_; ++i) {
lists_[i].next = free_list_head_;
@@ -303,15 +298,15 @@ void HInstructionMap::ResizeLists(int new_size, Zone* zone) {
}
-void HInstructionMap::Insert(HInstruction* instr, Zone* zone) {
- ASSERT(instr != NULL);
+void HValueMap::Insert(HValue* value, Zone* zone) {
+ ASSERT(value != NULL);
// Resizing when half of the hashtable is filled up.
if (count_ >= array_size_ >> 1) Resize(array_size_ << 1, zone);
ASSERT(count_ < array_size_);
count_++;
- uint32_t pos = Bound(static_cast<uint32_t>(instr->Hashcode()));
- if (array_[pos].instr == NULL) {
- array_[pos].instr = instr;
+ uint32_t pos = Bound(static_cast<uint32_t>(value->Hashcode()));
+ if (array_[pos].value == NULL) {
+ array_[pos].value = value;
array_[pos].next = kNil;
} else {
if (free_list_head_ == kNil) {
@@ -320,9 +315,9 @@ void HInstructionMap::Insert(HInstruction* instr, Zone* zone) {
int new_element_pos = free_list_head_;
ASSERT(new_element_pos != kNil);
free_list_head_ = lists_[free_list_head_].next;
- lists_[new_element_pos].instr = instr;
+ lists_[new_element_pos].value = value;
lists_[new_element_pos].next = array_[pos].next;
- ASSERT(array_[pos].next == kNil || lists_[array_[pos].next].instr != NULL);
+ ASSERT(array_[pos].next == kNil || lists_[array_[pos].next].value != NULL);
array_[pos].next = new_element_pos;
}
}
@@ -346,9 +341,10 @@ HSideEffectMap& HSideEffectMap::operator= (const HSideEffectMap& other) {
}
-void HSideEffectMap::Kill(SideEffects side_effects) {
+void HSideEffectMap::Kill(GVNFlagSet flags) {
for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
- if (side_effects.ContainsFlag(GVNFlagFromInt(i))) {
+ GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
+ if (flags.Contains(changes_flag)) {
if (data_[i] != NULL) count_--;
data_[i] = NULL;
}
@@ -356,9 +352,10 @@ void HSideEffectMap::Kill(SideEffects side_effects) {
}
-void HSideEffectMap::Store(SideEffects side_effects, HInstruction* instr) {
+void HSideEffectMap::Store(GVNFlagSet flags, HInstruction* instr) {
for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
- if (side_effects.ContainsFlag(GVNFlagFromInt(i))) {
+ GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
+ if (flags.Contains(changes_flag)) {
if (data_[i] == NULL) count_++;
data_[i] = instr;
}
@@ -366,96 +363,6 @@ void HSideEffectMap::Store(SideEffects side_effects, HInstruction* instr) {
}
-SideEffects SideEffectsTracker::ComputeChanges(HInstruction* instr) {
- SideEffects result(instr->ChangesFlags());
- if (result.ContainsFlag(kInobjectFields)) {
- int index;
- if (instr->IsStoreNamedField() &&
- ComputeInobjectField(HStoreNamedField::cast(instr)->access(), &index)) {
- result.RemoveFlag(kInobjectFields);
- result.AddSpecial(index);
- } else {
- result.AddAllSpecial();
- }
- }
- return result;
-}
-
-
-SideEffects SideEffectsTracker::ComputeDependsOn(HInstruction* instr) {
- SideEffects result(instr->DependsOnFlags());
- if (result.ContainsFlag(kInobjectFields)) {
- int index;
- if (instr->IsLoadNamedField() &&
- ComputeInobjectField(HLoadNamedField::cast(instr)->access(), &index)) {
- result.RemoveFlag(kInobjectFields);
- result.AddSpecial(index);
- } else {
- result.AddAllSpecial();
- }
- }
- return result;
-}
-
-
-void SideEffectsTracker::PrintSideEffectsTo(StringStream* stream,
- SideEffects side_effects) const {
- const char* separator = "";
- stream->Add("[");
- for (int bit = 0; bit < kNumberOfFlags; ++bit) {
- GVNFlag flag = GVNFlagFromInt(bit);
- if (side_effects.ContainsFlag(flag)) {
- stream->Add(separator);
- separator = ", ";
- switch (flag) {
-#define DECLARE_FLAG(Type) \
- case k##Type: \
- stream->Add(#Type); \
- break;
-GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
-GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
-#undef DECLARE_FLAG
- default:
- break;
- }
- }
- }
- for (int index = 0; index < num_inobject_fields_; ++index) {
- if (side_effects.ContainsSpecial(index)) {
- stream->Add(separator);
- separator = ", ";
- inobject_fields_[index].PrintTo(stream);
- }
- }
- stream->Add("]");
-}
-
-
-bool SideEffectsTracker::ComputeInobjectField(HObjectAccess access,
- int* index) {
- for (int i = 0; i < num_inobject_fields_; ++i) {
- if (access.Equals(inobject_fields_[i])) {
- *index = i;
- return true;
- }
- }
- if (num_inobject_fields_ < SideEffects::kNumberOfSpecials) {
- if (FLAG_trace_gvn) {
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- stream.Add("Tracking inobject field access ");
- access.PrintTo(&stream);
- stream.Add(" (mapped to special index %d)\n", num_inobject_fields_);
- stream.OutputToStdOut();
- }
- *index = num_inobject_fields_;
- inobject_fields_[num_inobject_fields_++] = access;
- return true;
- }
- return false;
-}
-
-
HGlobalValueNumberingPhase::HGlobalValueNumberingPhase(HGraph* graph)
: HPhase("H_Global value numbering", graph),
removed_side_effects_(false),
@@ -463,10 +370,10 @@ HGlobalValueNumberingPhase::HGlobalValueNumberingPhase(HGraph* graph)
loop_side_effects_(graph->blocks()->length(), zone()),
visited_on_paths_(graph->blocks()->length(), zone()) {
ASSERT(!AllowHandleAllocation::IsAllowed());
- block_side_effects_.AddBlock(
- SideEffects(), graph->blocks()->length(), zone());
- loop_side_effects_.AddBlock(
- SideEffects(), graph->blocks()->length(), zone());
+ block_side_effects_.AddBlock(GVNFlagSet(), graph->blocks()->length(),
+ zone());
+ loop_side_effects_.AddBlock(GVNFlagSet(), graph->blocks()->length(),
+ zone());
}
@@ -502,12 +409,12 @@ void HGlobalValueNumberingPhase::ComputeBlockSideEffects() {
for (int i = graph()->blocks()->length() - 1; i >= 0; --i) {
// Compute side effects for the block.
HBasicBlock* block = graph()->blocks()->at(i);
- SideEffects side_effects;
+ GVNFlagSet side_effects;
if (block->IsReachable() && !block->IsDeoptimizing()) {
int id = block->block_id();
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
- side_effects.Add(side_effects_tracker_.ComputeChanges(instr));
+ side_effects.Add(instr->ChangesFlags());
}
block_side_effects_[id].Add(side_effects);
@@ -531,22 +438,103 @@ void HGlobalValueNumberingPhase::ComputeBlockSideEffects() {
}
+SmartArrayPointer<char> GetGVNFlagsString(GVNFlagSet flags) {
+ char underlying_buffer[kNumberOfFlags * 128];
+ Vector<char> buffer(underlying_buffer, sizeof(underlying_buffer));
+#if DEBUG
+ int offset = 0;
+ const char* separator = "";
+ const char* comma = ", ";
+ buffer[0] = 0;
+ uint32_t set_depends_on = 0;
+ uint32_t set_changes = 0;
+ for (int bit = 0; bit < kNumberOfFlags; ++bit) {
+ if (flags.Contains(static_cast<GVNFlag>(bit))) {
+ if (bit % 2 == 0) {
+ set_changes++;
+ } else {
+ set_depends_on++;
+ }
+ }
+ }
+ bool positive_changes = set_changes < (kNumberOfFlags / 2);
+ bool positive_depends_on = set_depends_on < (kNumberOfFlags / 2);
+ if (set_changes > 0) {
+ if (positive_changes) {
+ offset += OS::SNPrintF(buffer + offset, "changes [");
+ } else {
+ offset += OS::SNPrintF(buffer + offset, "changes all except [");
+ }
+ for (int bit = 0; bit < kNumberOfFlags; ++bit) {
+ if (flags.Contains(static_cast<GVNFlag>(bit)) == positive_changes) {
+ switch (static_cast<GVNFlag>(bit)) {
+#define DECLARE_FLAG(type) \
+ case kChanges##type: \
+ offset += OS::SNPrintF(buffer + offset, separator); \
+ offset += OS::SNPrintF(buffer + offset, #type); \
+ separator = comma; \
+ break;
+GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
+GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
+#undef DECLARE_FLAG
+ default:
+ break;
+ }
+ }
+ }
+ offset += OS::SNPrintF(buffer + offset, "]");
+ }
+ if (set_depends_on > 0) {
+ separator = "";
+ if (set_changes > 0) {
+ offset += OS::SNPrintF(buffer + offset, ", ");
+ }
+ if (positive_depends_on) {
+ offset += OS::SNPrintF(buffer + offset, "depends on [");
+ } else {
+ offset += OS::SNPrintF(buffer + offset, "depends on all except [");
+ }
+ for (int bit = 0; bit < kNumberOfFlags; ++bit) {
+ if (flags.Contains(static_cast<GVNFlag>(bit)) == positive_depends_on) {
+ switch (static_cast<GVNFlag>(bit)) {
+#define DECLARE_FLAG(type) \
+ case kDependsOn##type: \
+ offset += OS::SNPrintF(buffer + offset, separator); \
+ offset += OS::SNPrintF(buffer + offset, #type); \
+ separator = comma; \
+ break;
+GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
+GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
+#undef DECLARE_FLAG
+ default:
+ break;
+ }
+ }
+ }
+ offset += OS::SNPrintF(buffer + offset, "]");
+ }
+#else
+ OS::SNPrintF(buffer, "0x%08X", flags.ToIntegral());
+#endif
+ size_t string_len = strlen(underlying_buffer) + 1;
+ ASSERT(string_len <= sizeof(underlying_buffer));
+ char* result = new char[strlen(underlying_buffer) + 1];
+ OS::MemCopy(result, underlying_buffer, string_len);
+ return SmartArrayPointer<char>(result);
+}
+
+
void HGlobalValueNumberingPhase::LoopInvariantCodeMotion() {
TRACE_GVN_1("Using optimistic loop invariant code motion: %s\n",
graph()->use_optimistic_licm() ? "yes" : "no");
for (int i = graph()->blocks()->length() - 1; i >= 0; --i) {
HBasicBlock* block = graph()->blocks()->at(i);
if (block->IsLoopHeader()) {
- SideEffects side_effects = loop_side_effects_[block->block_id()];
- if (FLAG_trace_gvn) {
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- stream.Add("Try loop invariant motion for block B%d changes ",
- block->block_id());
- side_effects_tracker_.PrintSideEffectsTo(&stream, side_effects);
- stream.Add("\n");
- stream.OutputToStdOut();
- }
+ GVNFlagSet side_effects = loop_side_effects_[block->block_id()];
+ TRACE_GVN_2("Try loop invariant motion for block B%d %s\n",
+ block->block_id(),
+ GetGVNFlagsString(side_effects).get());
+
HBasicBlock* last = block->loop_information()->GetLastBackEdge();
for (int j = block->block_id(); j <= last->block_id(); ++j) {
ProcessLoopBlock(graph()->blocks()->at(j), block, side_effects);
@@ -559,37 +547,22 @@ void HGlobalValueNumberingPhase::LoopInvariantCodeMotion() {
void HGlobalValueNumberingPhase::ProcessLoopBlock(
HBasicBlock* block,
HBasicBlock* loop_header,
- SideEffects loop_kills) {
+ GVNFlagSet loop_kills) {
HBasicBlock* pre_header = loop_header->predecessors()->at(0);
- if (FLAG_trace_gvn) {
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- stream.Add("Loop invariant code motion for B%d depends on ",
- block->block_id());
- side_effects_tracker_.PrintSideEffectsTo(&stream, loop_kills);
- stream.Add("\n");
- stream.OutputToStdOut();
- }
+ GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(loop_kills);
+ TRACE_GVN_2("Loop invariant motion for B%d %s\n",
+ block->block_id(),
+ GetGVNFlagsString(depends_flags).get());
HInstruction* instr = block->first();
while (instr != NULL) {
HInstruction* next = instr->next();
if (instr->CheckFlag(HValue::kUseGVN)) {
- SideEffects changes = side_effects_tracker_.ComputeChanges(instr);
- SideEffects depends_on = side_effects_tracker_.ComputeDependsOn(instr);
- if (FLAG_trace_gvn) {
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- stream.Add("Checking instruction i%d (%s) changes ",
- instr->id(), instr->Mnemonic());
- side_effects_tracker_.PrintSideEffectsTo(&stream, changes);
- stream.Add(", depends on ");
- side_effects_tracker_.PrintSideEffectsTo(&stream, depends_on);
- stream.Add(". Loop changes ");
- side_effects_tracker_.PrintSideEffectsTo(&stream, loop_kills);
- stream.Add("\n");
- stream.OutputToStdOut();
- }
- bool can_hoist = !depends_on.ContainsAnyOf(loop_kills);
+ TRACE_GVN_4("Checking instruction %d (%s) %s. Loop %s\n",
+ instr->id(),
+ instr->Mnemonic(),
+ GetGVNFlagsString(instr->gvn_flags()).get(),
+ GetGVNFlagsString(loop_kills).get());
+ bool can_hoist = !instr->gvn_flags().ContainsAnyOf(depends_flags);
if (can_hoist && !graph()->use_optimistic_licm()) {
can_hoist = block->IsLoopSuccessorDominator();
}
@@ -631,10 +604,10 @@ bool HGlobalValueNumberingPhase::ShouldMove(HInstruction* instr,
}
-SideEffects
+GVNFlagSet
HGlobalValueNumberingPhase::CollectSideEffectsOnPathsToDominatedBlock(
HBasicBlock* dominator, HBasicBlock* dominated) {
- SideEffects side_effects;
+ GVNFlagSet side_effects;
for (int i = 0; i < dominated->predecessors()->length(); ++i) {
HBasicBlock* block = dominated->predecessors()->at(i);
if (dominator->block_id() < block->block_id() &&
@@ -663,13 +636,13 @@ class GvnBasicBlockState: public ZoneObject {
public:
static GvnBasicBlockState* CreateEntry(Zone* zone,
HBasicBlock* entry_block,
- HInstructionMap* entry_map) {
+ HValueMap* entry_map) {
return new(zone)
GvnBasicBlockState(NULL, entry_block, entry_map, NULL, zone);
}
HBasicBlock* block() { return block_; }
- HInstructionMap* map() { return map_; }
+ HValueMap* map() { return map_; }
HSideEffectMap* dominators() { return &dominators_; }
GvnBasicBlockState* next_in_dominator_tree_traversal(
@@ -696,7 +669,7 @@ class GvnBasicBlockState: public ZoneObject {
private:
void Initialize(HBasicBlock* block,
- HInstructionMap* map,
+ HValueMap* map,
HSideEffectMap* dominators,
bool copy_map,
Zone* zone) {
@@ -712,7 +685,7 @@ class GvnBasicBlockState: public ZoneObject {
GvnBasicBlockState(GvnBasicBlockState* previous,
HBasicBlock* block,
- HInstructionMap* map,
+ HValueMap* map,
HSideEffectMap* dominators,
Zone* zone)
: previous_(previous), next_(NULL) {
@@ -759,7 +732,7 @@ class GvnBasicBlockState: public ZoneObject {
GvnBasicBlockState* previous_;
GvnBasicBlockState* next_;
HBasicBlock* block_;
- HInstructionMap* map_;
+ HValueMap* map_;
HSideEffectMap dominators_;
int dominated_index_;
int length_;
@@ -772,14 +745,13 @@ class GvnBasicBlockState: public ZoneObject {
// GvnBasicBlockState instances.
void HGlobalValueNumberingPhase::AnalyzeGraph() {
HBasicBlock* entry_block = graph()->entry_block();
- HInstructionMap* entry_map =
- new(zone()) HInstructionMap(zone(), &side_effects_tracker_);
+ HValueMap* entry_map = new(zone()) HValueMap(zone());
GvnBasicBlockState* current =
GvnBasicBlockState::CreateEntry(zone(), entry_block, entry_map);
while (current != NULL) {
HBasicBlock* block = current->block();
- HInstructionMap* map = current->map();
+ HValueMap* map = current->map();
HSideEffectMap* dominators = current->dominators();
TRACE_GVN_2("Analyzing block B%d%s\n",
@@ -798,15 +770,17 @@ void HGlobalValueNumberingPhase::AnalyzeGraph() {
if (instr->CheckFlag(HValue::kTrackSideEffectDominators)) {
for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
HValue* other = dominators->at(i);
- GVNFlag flag = GVNFlagFromInt(i);
- if (instr->DependsOnFlags().Contains(flag) && other != NULL) {
+ GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
+ GVNFlag depends_on_flag = HValue::DependsOnFlagFromInt(i);
+ if (instr->DependsOnFlags().Contains(depends_on_flag) &&
+ (other != NULL)) {
TRACE_GVN_5("Side-effect #%d in %d (%s) is dominated by %d (%s)\n",
i,
instr->id(),
instr->Mnemonic(),
other->id(),
other->Mnemonic());
- if (instr->HandleSideEffectDominator(flag, other)) {
+ if (instr->HandleSideEffectDominator(changes_flag, other)) {
removed_side_effects_ = true;
}
}
@@ -815,27 +789,21 @@ void HGlobalValueNumberingPhase::AnalyzeGraph() {
// Instruction was unlinked during graph traversal.
if (!instr->IsLinked()) continue;
- SideEffects changes = side_effects_tracker_.ComputeChanges(instr);
- if (!changes.IsEmpty()) {
+ GVNFlagSet flags = instr->ChangesFlags();
+ if (!flags.IsEmpty()) {
// Clear all instructions in the map that are affected by side effects.
// Store instruction as the dominating one for tracked side effects.
- map->Kill(changes);
- dominators->Store(changes, instr);
- if (FLAG_trace_gvn) {
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- stream.Add("Instruction i%d changes ", instr->id());
- side_effects_tracker_.PrintSideEffectsTo(&stream, changes);
- stream.Add("\n");
- stream.OutputToStdOut();
- }
+ map->Kill(flags);
+ dominators->Store(flags, instr);
+ TRACE_GVN_2("Instruction %d %s\n", instr->id(),
+ GetGVNFlagsString(flags).get());
}
if (instr->CheckFlag(HValue::kUseGVN)) {
ASSERT(!instr->HasObservableSideEffects());
- HInstruction* other = map->Lookup(instr);
+ HValue* other = map->Lookup(instr);
if (other != NULL) {
ASSERT(instr->Equals(other) && other->Equals(instr));
- TRACE_GVN_4("Replacing instruction i%d (%s) with i%d (%s)\n",
+ TRACE_GVN_4("Replacing value %d (%s) with value %d (%s)\n",
instr->id(),
instr->Mnemonic(),
other->id(),
@@ -855,7 +823,7 @@ void HGlobalValueNumberingPhase::AnalyzeGraph() {
if (next != NULL) {
HBasicBlock* dominated = next->block();
- HInstructionMap* successor_map = next->map();
+ HValueMap* successor_map = next->map();
HSideEffectMap* successor_dominators = next->dominators();
// Kill everything killed on any path between this block and the
@@ -866,7 +834,7 @@ void HGlobalValueNumberingPhase::AnalyzeGraph() {
if ((!successor_map->IsEmpty() || !successor_dominators->IsEmpty()) &&
dominator_block->block_id() + 1 < dominated->block_id()) {
visited_on_paths_.Clear();
- SideEffects side_effects_on_all_paths =
+ GVNFlagSet side_effects_on_all_paths =
CollectSideEffectsOnPathsToDominatedBlock(dominator_block,
dominated);
successor_map->Kill(side_effects_on_all_paths);
diff --git a/deps/v8/src/hydrogen-gvn.h b/deps/v8/src/hydrogen-gvn.h
index cb83354a7e..30333cca61 100644
--- a/deps/v8/src/hydrogen-gvn.h
+++ b/deps/v8/src/hydrogen-gvn.h
@@ -36,77 +36,15 @@
namespace v8 {
namespace internal {
-// This class extends GVNFlagSet with additional "special" dynamic side effects,
-// which can be used to represent side effects that cannot be expressed using
-// the GVNFlags of an HInstruction. These special side effects are tracked by a
-// SideEffectsTracker (see below).
-class SideEffects V8_FINAL {
- public:
- static const int kNumberOfSpecials = 64 - kNumberOfFlags;
-
- SideEffects() : bits_(0) {
- ASSERT(kNumberOfFlags + kNumberOfSpecials == sizeof(bits_) * CHAR_BIT);
- }
- explicit SideEffects(GVNFlagSet flags) : bits_(flags.ToIntegral()) {}
- bool IsEmpty() const { return bits_ == 0; }
- bool ContainsFlag(GVNFlag flag) const {
- return (bits_ & MaskFlag(flag)) != 0;
- }
- bool ContainsSpecial(int special) const {
- return (bits_ & MaskSpecial(special)) != 0;
- }
- bool ContainsAnyOf(SideEffects set) const { return (bits_ & set.bits_) != 0; }
- void Add(SideEffects set) { bits_ |= set.bits_; }
- void AddSpecial(int special) { bits_ |= MaskSpecial(special); }
- void AddAllSpecial() { bits_ |= ~static_cast<uint64_t>(0) << kNumberOfFlags; }
- void RemoveFlag(GVNFlag flag) { bits_ &= ~MaskFlag(flag); }
- void RemoveAll() { bits_ = 0; }
- uint64_t ToIntegral() const { return bits_; }
- void PrintTo(StringStream* stream) const;
-
- private:
- uint64_t MaskFlag(GVNFlag flag) const {
- return static_cast<uint64_t>(1) << static_cast<unsigned>(flag);
- }
- uint64_t MaskSpecial(int special) const {
- ASSERT(special >= 0);
- ASSERT(special < kNumberOfSpecials);
- return static_cast<uint64_t>(1) << static_cast<unsigned>(
- special + kNumberOfFlags);
- }
-
- uint64_t bits_;
-};
-
-
-// Tracks inobject field loads/stores in a fine grained fashion, and represents
-// them using the "special" dynamic side effects of the SideEffects class (see
-// above). This way unrelated inobject field stores don't prevent hoisting and
-// merging of inobject field loads.
-class SideEffectsTracker V8_FINAL BASE_EMBEDDED {
- public:
- SideEffectsTracker() : num_inobject_fields_(0) {}
- SideEffects ComputeChanges(HInstruction* instr);
- SideEffects ComputeDependsOn(HInstruction* instr);
- void PrintSideEffectsTo(StringStream* stream, SideEffects side_effects) const;
-
- private:
- bool ComputeInobjectField(HObjectAccess access, int* index);
-
- HObjectAccess inobject_fields_[SideEffects::kNumberOfSpecials];
- int num_inobject_fields_;
-};
-
-
// Perform common subexpression elimination and loop-invariant code motion.
-class HGlobalValueNumberingPhase V8_FINAL : public HPhase {
+class HGlobalValueNumberingPhase : public HPhase {
public:
explicit HGlobalValueNumberingPhase(HGraph* graph);
void Run();
private:
- SideEffects CollectSideEffectsOnPathsToDominatedBlock(
+ GVNFlagSet CollectSideEffectsOnPathsToDominatedBlock(
HBasicBlock* dominator,
HBasicBlock* dominated);
void AnalyzeGraph();
@@ -114,18 +52,17 @@ class HGlobalValueNumberingPhase V8_FINAL : public HPhase {
void LoopInvariantCodeMotion();
void ProcessLoopBlock(HBasicBlock* block,
HBasicBlock* before_loop,
- SideEffects loop_kills);
+ GVNFlagSet loop_kills);
bool AllowCodeMotion();
bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header);
- SideEffectsTracker side_effects_tracker_;
bool removed_side_effects_;
// A map of block IDs to their side effects.
- ZoneList<SideEffects> block_side_effects_;
+ ZoneList<GVNFlagSet> block_side_effects_;
// A map of loop header block IDs to their loop's side effects.
- ZoneList<SideEffects> loop_side_effects_;
+ ZoneList<GVNFlagSet> loop_side_effects_;
// Used when collecting side effects on paths from dominator to
// dominated.
@@ -134,6 +71,7 @@ class HGlobalValueNumberingPhase V8_FINAL : public HPhase {
DISALLOW_COPY_AND_ASSIGN(HGlobalValueNumberingPhase);
};
+
} } // namespace v8::internal
#endif // V8_HYDROGEN_GVN_H_
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index 5795385728..b14873eb62 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -35,8 +35,6 @@
#include "ia32/lithium-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-x64.h"
-#elif V8_TARGET_ARCH_A64
-#include "a64/lithium-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
@@ -606,11 +604,11 @@ void HValue::PrintChangesTo(StringStream* stream) {
stream->Add("*");
} else {
bool add_comma = false;
-#define PRINT_DO(Type) \
- if (changes_flags.Contains(k##Type)) { \
- if (add_comma) stream->Add(","); \
- add_comma = true; \
- stream->Add(#Type); \
+#define PRINT_DO(type) \
+ if (changes_flags.Contains(kChanges##type)) { \
+ if (add_comma) stream->Add(","); \
+ add_comma = true; \
+ stream->Add(#type); \
}
GVN_TRACKED_FLAG_LIST(PRINT_DO);
GVN_UNTRACKED_FLAG_LIST(PRINT_DO);
@@ -682,19 +680,6 @@ void HValue::ComputeInitialRange(Zone* zone) {
}
-void HSourcePosition::PrintTo(FILE* out) {
- if (IsUnknown()) {
- PrintF(out, "<?>");
- } else {
- if (FLAG_hydrogen_track_positions) {
- PrintF(out, "<%d:%d>", inlining_id(), position());
- } else {
- PrintF(out, "<0:%d>", raw());
- }
- }
-}
-
-
void HInstruction::PrintTo(StringStream* stream) {
PrintMnemonicTo(stream);
PrintDataTo(stream);
@@ -751,7 +736,8 @@ void HInstruction::InsertBefore(HInstruction* next) {
next_ = next;
previous_ = prev;
SetBlock(next->block());
- if (!has_position() && next->has_position()) {
+ if (position() == RelocInfo::kNoPosition &&
+ next->position() != RelocInfo::kNoPosition) {
set_position(next->position());
}
}
@@ -788,7 +774,8 @@ void HInstruction::InsertAfter(HInstruction* previous) {
if (block->last() == previous) {
block->set_last(this);
}
- if (!has_position() && previous->has_position()) {
+ if (position() == RelocInfo::kNoPosition &&
+ previous->position() != RelocInfo::kNoPosition) {
set_position(previous->position());
}
}
@@ -1529,7 +1516,7 @@ void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) {
bool HCheckMaps::HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) {
- ASSERT(side_effect == kMaps);
+ ASSERT(side_effect == kChangesMaps);
// TODO(mstarzinger): For now we specialize on HStoreNamedField, but once
// type information is rich enough we should generalize this to any HType
// for which the map is known.
@@ -1637,7 +1624,7 @@ Range* HChange::InferRange(Zone* zone) {
input_range != NULL &&
input_range->IsInSmiRange()))) {
set_type(HType::Smi());
- ClearChangesFlag(kNewSpacePromotion);
+ ClearGVNFlag(kChangesNewSpacePromotion);
}
Range* result = (input_range != NULL)
? input_range->Copy(zone)
@@ -1660,7 +1647,7 @@ Range* HConstant::InferRange(Zone* zone) {
}
-HSourcePosition HPhi::position() const {
+int HPhi::position() const {
return block()->first()->position();
}
@@ -2562,7 +2549,11 @@ HConstant::HConstant(int32_t integer_value,
boolean_value_(integer_value != 0),
int32_value_(integer_value),
double_value_(FastI2D(integer_value)) {
- set_type(has_smi_value_ ? HType::Smi() : HType::TaggedNumber());
+ // It's possible to create a constant with a value in Smi-range but stored
+ // in a (pre-existing) HeapNumber. See crbug.com/349878.
+ bool could_be_heapobject = r.IsTagged() && !object.handle().is_null();
+ bool is_smi = has_smi_value_ && !could_be_heapobject;
+ set_type(is_smi ? HType::Smi() : HType::TaggedNumber());
Initialize(r);
}
@@ -2582,7 +2573,11 @@ HConstant::HConstant(double double_value,
int32_value_(DoubleToInt32(double_value)),
double_value_(double_value) {
has_smi_value_ = has_int32_value_ && Smi::IsValid(int32_value_);
- set_type(has_smi_value_ ? HType::Smi() : HType::TaggedNumber());
+ // It's possible to create a constant with a value in Smi-range but stored
+ // in a (pre-existing) HeapNumber. See crbug.com/349878.
+ bool could_be_heapobject = r.IsTagged() && !object.handle().is_null();
+ bool is_smi = has_smi_value_ && !could_be_heapobject;
+ set_type(is_smi ? HType::Smi() : HType::TaggedNumber());
Initialize(r);
}
@@ -3016,7 +3011,7 @@ void HCompareObjectEqAndBranch::PrintDataTo(StringStream* stream) {
bool HCompareObjectEqAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
if (left()->IsConstant() && right()->IsConstant()) {
bool comparison_result =
- HConstant::cast(left())->Equals(HConstant::cast(right()));
+ HConstant::cast(left())->DataEquals(HConstant::cast(right()));
*block = comparison_result
? FirstSuccessor()
: SecondSuccessor();
@@ -3118,7 +3113,7 @@ HCheckMaps* HCheckMaps::New(Zone* zone,
CompilationInfo* info,
HValue* typecheck) {
HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck);
- check_map->Add(map, info, zone);
+ check_map->Add(map, zone);
if (map->CanOmitMapChecks() &&
value->IsConstant() &&
HConstant::cast(value)->HasMap(map)) {
@@ -3425,7 +3420,7 @@ Representation HUnaryMathOperation::RepresentationFromInputs() {
bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) {
- ASSERT(side_effect == kNewSpacePromotion);
+ ASSERT(side_effect == kChangesNewSpacePromotion);
Zone* zone = block()->zone();
if (!FLAG_use_allocation_folding) return false;
@@ -3438,15 +3433,6 @@ bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
return false;
}
- // Check whether we are folding within the same block for local folding.
- if (FLAG_use_local_allocation_folding && dominator->block() != block()) {
- if (FLAG_trace_allocation_folding) {
- PrintF("#%d (%s) cannot fold into #%d (%s), crosses basic blocks\n",
- id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
- }
- return false;
- }
-
HAllocate* dominator_allocate = HAllocate::cast(dominator);
HValue* dominator_size = dominator_allocate->size();
HValue* current_size = size();
@@ -4412,80 +4398,56 @@ HObjectAccess HObjectAccess::ForCellPayload(Isolate* isolate) {
}
-void HObjectAccess::SetGVNFlags(HValue *instr, PropertyAccessType access_type) {
+void HObjectAccess::SetGVNFlags(HValue *instr, bool is_store) {
// set the appropriate GVN flags for a given load or store instruction
- if (access_type == STORE) {
+ if (is_store) {
// track dominating allocations in order to eliminate write barriers
- instr->SetDependsOnFlag(::v8::internal::kNewSpacePromotion);
+ instr->SetGVNFlag(kDependsOnNewSpacePromotion);
instr->SetFlag(HValue::kTrackSideEffectDominators);
} else {
// try to GVN loads, but don't hoist above map changes
instr->SetFlag(HValue::kUseGVN);
- instr->SetDependsOnFlag(::v8::internal::kMaps);
+ instr->SetGVNFlag(kDependsOnMaps);
}
switch (portion()) {
case kArrayLengths:
- if (access_type == STORE) {
- instr->SetChangesFlag(::v8::internal::kArrayLengths);
- } else {
- instr->SetDependsOnFlag(::v8::internal::kArrayLengths);
- }
+ instr->SetGVNFlag(is_store
+ ? kChangesArrayLengths : kDependsOnArrayLengths);
break;
case kStringLengths:
- if (access_type == STORE) {
- instr->SetChangesFlag(::v8::internal::kStringLengths);
- } else {
- instr->SetDependsOnFlag(::v8::internal::kStringLengths);
- }
+ instr->SetGVNFlag(is_store
+ ? kChangesStringLengths : kDependsOnStringLengths);
break;
case kInobject:
- if (access_type == STORE) {
- instr->SetChangesFlag(::v8::internal::kInobjectFields);
- } else {
- instr->SetDependsOnFlag(::v8::internal::kInobjectFields);
- }
+ instr->SetGVNFlag(is_store
+ ? kChangesInobjectFields : kDependsOnInobjectFields);
break;
case kDouble:
- if (access_type == STORE) {
- instr->SetChangesFlag(::v8::internal::kDoubleFields);
- } else {
- instr->SetDependsOnFlag(::v8::internal::kDoubleFields);
- }
+ instr->SetGVNFlag(is_store
+ ? kChangesDoubleFields : kDependsOnDoubleFields);
break;
case kBackingStore:
- if (access_type == STORE) {
- instr->SetChangesFlag(::v8::internal::kBackingStoreFields);
- } else {
- instr->SetDependsOnFlag(::v8::internal::kBackingStoreFields);
- }
+ instr->SetGVNFlag(is_store
+ ? kChangesBackingStoreFields : kDependsOnBackingStoreFields);
break;
case kElementsPointer:
- if (access_type == STORE) {
- instr->SetChangesFlag(::v8::internal::kElementsPointer);
- } else {
- instr->SetDependsOnFlag(::v8::internal::kElementsPointer);
- }
+ instr->SetGVNFlag(is_store
+ ? kChangesElementsPointer : kDependsOnElementsPointer);
break;
case kMaps:
- if (access_type == STORE) {
- instr->SetChangesFlag(::v8::internal::kMaps);
- } else {
- instr->SetDependsOnFlag(::v8::internal::kMaps);
- }
+ instr->SetGVNFlag(is_store
+ ? kChangesMaps : kDependsOnMaps);
break;
case kExternalMemory:
- if (access_type == STORE) {
- instr->SetChangesFlag(::v8::internal::kExternalMemory);
- } else {
- instr->SetDependsOnFlag(::v8::internal::kExternalMemory);
- }
+ instr->SetGVNFlag(is_store
+ ? kChangesExternalMemory : kDependsOnExternalMemory);
break;
}
}
-void HObjectAccess::PrintTo(StringStream* stream) const {
+void HObjectAccess::PrintTo(StringStream* stream) {
stream->Add(".");
switch (portion()) {
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index 4976f7b90c..f897e91bb3 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -224,9 +224,6 @@ class LChunkBuilder;
}
-enum PropertyAccessType { LOAD, STORE };
-
-
class Range V8_FINAL : public ZoneObject {
public:
Range()
@@ -476,28 +473,22 @@ class HUseIterator V8_FINAL BASE_EMBEDDED {
};
-// All tracked flags should appear before untracked ones.
+// There must be one corresponding kDepends flag for every kChanges flag and
+// the order of the kChanges flags must be exactly the same as of the kDepends
+// flags. All tracked flags should appear before untracked ones.
enum GVNFlag {
// Declare global value numbering flags.
-#define DECLARE_FLAG(Type) k##Type,
+#define DECLARE_FLAG(type) kChanges##type, kDependsOn##type,
GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
#undef DECLARE_FLAG
-#define COUNT_FLAG(Type) + 1
- kNumberOfTrackedSideEffects = 0 GVN_TRACKED_FLAG_LIST(COUNT_FLAG),
- kNumberOfUntrackedSideEffects = 0 GVN_UNTRACKED_FLAG_LIST(COUNT_FLAG),
+ kNumberOfFlags,
+#define COUNT_FLAG(type) + 1
+ kNumberOfTrackedSideEffects = 0 GVN_TRACKED_FLAG_LIST(COUNT_FLAG)
#undef COUNT_FLAG
- kNumberOfFlags = kNumberOfTrackedSideEffects + kNumberOfUntrackedSideEffects
};
-static inline GVNFlag GVNFlagFromInt(int i) {
- ASSERT(i >= 0);
- ASSERT(i < kNumberOfFlags);
- return static_cast<GVNFlag>(i);
-}
-
-
class DecompositionResult V8_FINAL BASE_EMBEDDED {
public:
DecompositionResult() : base_(NULL), offset_(0), scale_(0) {}
@@ -543,62 +534,7 @@ class DecompositionResult V8_FINAL BASE_EMBEDDED {
};
-typedef EnumSet<GVNFlag, int32_t> GVNFlagSet;
-
-
-// This class encapsulates encoding and decoding of sources positions from
-// which hydrogen values originated.
-// When FLAG_track_hydrogen_positions is set this object encodes the
-// identifier of the inlining and absolute offset from the start of the
-// inlined function.
-// When the flag is not set we simply track absolute offset from the
-// script start.
-class HSourcePosition {
- public:
- HSourcePosition(const HSourcePosition& other) : value_(other.value_) { }
-
- static HSourcePosition Unknown() {
- return HSourcePosition(RelocInfo::kNoPosition);
- }
-
- bool IsUnknown() const { return value_ == RelocInfo::kNoPosition; }
-
- int position() const { return PositionField::decode(value_); }
- void set_position(int position) {
- if (FLAG_hydrogen_track_positions) {
- value_ = static_cast<int>(PositionField::update(value_, position));
- } else {
- value_ = position;
- }
- }
-
- int inlining_id() const { return InliningIdField::decode(value_); }
- void set_inlining_id(int inlining_id) {
- if (FLAG_hydrogen_track_positions) {
- value_ = static_cast<int>(InliningIdField::update(value_, inlining_id));
- }
- }
-
- int raw() const { return value_; }
-
- void PrintTo(FILE* f);
-
- private:
- typedef BitField<int, 0, 9> InliningIdField;
-
- // Offset from the start of the inlined function.
- typedef BitField<int, 9, 22> PositionField;
-
- // On HPositionInfo can use this constructor.
- explicit HSourcePosition(int value) : value_(value) { }
-
- friend class HPositionInfo;
-
- // If FLAG_hydrogen_track_positions is set contains bitfields InliningIdField
- // and PositionField.
- // Otherwise contains absolute offset from the script start.
- int value_;
-};
+typedef EnumSet<GVNFlag, int64_t> GVNFlagSet;
class HValue : public ZoneObject {
@@ -649,6 +585,18 @@ class HValue : public ZoneObject {
STATIC_ASSERT(kLastFlag < kBitsPerInt);
+ static const int kChangesToDependsFlagsLeftShift = 1;
+
+ static GVNFlag ChangesFlagFromInt(int x) {
+ return static_cast<GVNFlag>(x * 2);
+ }
+ static GVNFlag DependsOnFlagFromInt(int x) {
+ return static_cast<GVNFlag>(x * 2 + 1);
+ }
+ static GVNFlagSet ConvertChangesToDependsFlags(GVNFlagSet flags) {
+ return GVNFlagSet(flags.ToIntegral() << kChangesToDependsFlagsLeftShift);
+ }
+
static HValue* cast(HValue* value) { return value; }
enum Opcode {
@@ -682,12 +630,8 @@ class HValue : public ZoneObject {
flags_(0) {}
virtual ~HValue() {}
- virtual HSourcePosition position() const {
- return HSourcePosition::Unknown();
- }
- virtual HSourcePosition operand_position(int index) const {
- return position();
- }
+ virtual int position() const { return RelocInfo::kNoPosition; }
+ virtual int operand_position(int index) const { return position(); }
HBasicBlock* block() const { return block_; }
void SetBlock(HBasicBlock* block);
@@ -828,38 +772,43 @@ class HValue : public ZoneObject {
// of uses is non-empty.
bool HasAtLeastOneUseWithFlagAndNoneWithout(Flag f) const;
- GVNFlagSet ChangesFlags() const { return changes_flags_; }
- GVNFlagSet DependsOnFlags() const { return depends_on_flags_; }
- void SetChangesFlag(GVNFlag f) { changes_flags_.Add(f); }
- void SetDependsOnFlag(GVNFlag f) { depends_on_flags_.Add(f); }
- void ClearChangesFlag(GVNFlag f) { changes_flags_.Remove(f); }
- void ClearDependsOnFlag(GVNFlag f) { depends_on_flags_.Remove(f); }
- bool CheckChangesFlag(GVNFlag f) const {
- return changes_flags_.Contains(f);
- }
- bool CheckDependsOnFlag(GVNFlag f) const {
- return depends_on_flags_.Contains(f);
- }
- void SetAllSideEffects() { changes_flags_.Add(AllSideEffectsFlagSet()); }
+ GVNFlagSet gvn_flags() const { return gvn_flags_; }
+ void SetGVNFlag(GVNFlag f) { gvn_flags_.Add(f); }
+ void ClearGVNFlag(GVNFlag f) { gvn_flags_.Remove(f); }
+ bool CheckGVNFlag(GVNFlag f) const { return gvn_flags_.Contains(f); }
+ void SetAllSideEffects() { gvn_flags_.Add(AllSideEffectsFlagSet()); }
void ClearAllSideEffects() {
- changes_flags_.Remove(AllSideEffectsFlagSet());
+ gvn_flags_.Remove(AllSideEffectsFlagSet());
}
bool HasSideEffects() const {
- return changes_flags_.ContainsAnyOf(AllSideEffectsFlagSet());
+ return gvn_flags_.ContainsAnyOf(AllSideEffectsFlagSet());
}
bool HasObservableSideEffects() const {
return !CheckFlag(kHasNoObservableSideEffects) &&
- changes_flags_.ContainsAnyOf(AllObservableSideEffectsFlagSet());
+ gvn_flags_.ContainsAnyOf(AllObservableSideEffectsFlagSet());
+ }
+
+ GVNFlagSet DependsOnFlags() const {
+ GVNFlagSet result = gvn_flags_;
+ result.Intersect(AllDependsOnFlagSet());
+ return result;
}
GVNFlagSet SideEffectFlags() const {
- GVNFlagSet result = ChangesFlags();
+ GVNFlagSet result = gvn_flags_;
result.Intersect(AllSideEffectsFlagSet());
return result;
}
+ GVNFlagSet ChangesFlags() const {
+ GVNFlagSet result = gvn_flags_;
+ result.Intersect(AllChangesFlagSet());
+ return result;
+ }
+
GVNFlagSet ObservableChangesFlags() const {
- GVNFlagSet result = ChangesFlags();
+ GVNFlagSet result = gvn_flags_;
+ result.Intersect(AllChangesFlagSet());
result.Intersect(AllObservableSideEffectsFlagSet());
return result;
}
@@ -1000,9 +949,20 @@ class HValue : public ZoneObject {
representation_ = r;
}
- static GVNFlagSet AllFlagSet() {
+ static GVNFlagSet AllDependsOnFlagSet() {
+ GVNFlagSet result;
+ // Create changes mask.
+#define ADD_FLAG(type) result.Add(kDependsOn##type);
+ GVN_TRACKED_FLAG_LIST(ADD_FLAG)
+ GVN_UNTRACKED_FLAG_LIST(ADD_FLAG)
+#undef ADD_FLAG
+ return result;
+ }
+
+ static GVNFlagSet AllChangesFlagSet() {
GVNFlagSet result;
-#define ADD_FLAG(Type) result.Add(k##Type);
+ // Create changes mask.
+#define ADD_FLAG(type) result.Add(kChanges##type);
GVN_TRACKED_FLAG_LIST(ADD_FLAG)
GVN_UNTRACKED_FLAG_LIST(ADD_FLAG)
#undef ADD_FLAG
@@ -1011,19 +971,19 @@ class HValue : public ZoneObject {
// A flag mask to mark an instruction as having arbitrary side effects.
static GVNFlagSet AllSideEffectsFlagSet() {
- GVNFlagSet result = AllFlagSet();
- result.Remove(kOsrEntries);
+ GVNFlagSet result = AllChangesFlagSet();
+ result.Remove(kChangesOsrEntries);
return result;
}
// A flag mask of all side effects that can make observable changes in
// an executing program (i.e. are not safe to repeat, move or remove);
static GVNFlagSet AllObservableSideEffectsFlagSet() {
- GVNFlagSet result = AllFlagSet();
- result.Remove(kNewSpacePromotion);
- result.Remove(kElementsKind);
- result.Remove(kElementsPointer);
- result.Remove(kMaps);
+ GVNFlagSet result = AllChangesFlagSet();
+ result.Remove(kChangesNewSpacePromotion);
+ result.Remove(kChangesElementsKind);
+ result.Remove(kChangesElementsPointer);
+ result.Remove(kChangesMaps);
return result;
}
@@ -1044,8 +1004,7 @@ class HValue : public ZoneObject {
HUseListNode* use_list_;
Range* range_;
int flags_;
- GVNFlagSet changes_flags_;
- GVNFlagSet depends_on_flags_;
+ GVNFlagSet gvn_flags_;
private:
virtual bool IsDeletable() const { return false; }
@@ -1144,22 +1103,25 @@ class HValue : public ZoneObject {
// In the first case it contains intruction's position as a tagged value.
// In the second case it points to an array which contains instruction's
// position and operands' positions.
+// TODO(vegorov): what we really want to track here is a combination of
+// source position and a script id because cross script inlining can easily
+// result in optimized functions composed of several scripts.
class HPositionInfo {
public:
explicit HPositionInfo(int pos) : data_(TagPosition(pos)) { }
- HSourcePosition position() const {
+ int position() const {
if (has_operand_positions()) {
- return operand_positions()[kInstructionPosIndex];
+ return static_cast<int>(operand_positions()[kInstructionPosIndex]);
}
- return HSourcePosition(static_cast<int>(UntagPosition(data_)));
+ return static_cast<int>(UntagPosition(data_));
}
- void set_position(HSourcePosition pos) {
+ void set_position(int pos) {
if (has_operand_positions()) {
operand_positions()[kInstructionPosIndex] = pos;
} else {
- data_ = TagPosition(pos.raw());
+ data_ = TagPosition(pos);
}
}
@@ -1169,27 +1131,27 @@ class HPositionInfo {
}
const int length = kFirstOperandPosIndex + operand_count;
- HSourcePosition* positions =
- zone->NewArray<HSourcePosition>(length);
+ intptr_t* positions =
+ zone->NewArray<intptr_t>(length);
for (int i = 0; i < length; i++) {
- positions[i] = HSourcePosition::Unknown();
+ positions[i] = RelocInfo::kNoPosition;
}
- const HSourcePosition pos = position();
+ const int pos = position();
data_ = reinterpret_cast<intptr_t>(positions);
set_position(pos);
ASSERT(has_operand_positions());
}
- HSourcePosition operand_position(int idx) const {
+ int operand_position(int idx) const {
if (!has_operand_positions()) {
return position();
}
- return *operand_position_slot(idx);
+ return static_cast<int>(*operand_position_slot(idx));
}
- void set_operand_position(int idx, HSourcePosition pos) {
+ void set_operand_position(int idx, int pos) {
*operand_position_slot(idx) = pos;
}
@@ -1197,7 +1159,7 @@ class HPositionInfo {
static const intptr_t kInstructionPosIndex = 0;
static const intptr_t kFirstOperandPosIndex = 1;
- HSourcePosition* operand_position_slot(int idx) const {
+ intptr_t* operand_position_slot(int idx) const {
ASSERT(has_operand_positions());
return &(operand_positions()[kFirstOperandPosIndex + idx]);
}
@@ -1206,9 +1168,9 @@ class HPositionInfo {
return !IsTaggedPosition(data_);
}
- HSourcePosition* operand_positions() const {
+ intptr_t* operand_positions() const {
ASSERT(has_operand_positions());
- return reinterpret_cast<HSourcePosition*>(data_);
+ return reinterpret_cast<intptr_t*>(data_);
}
static const intptr_t kPositionTag = 1;
@@ -1256,23 +1218,23 @@ class HInstruction : public HValue {
}
// The position is a write-once variable.
- virtual HSourcePosition position() const V8_OVERRIDE {
- return HSourcePosition(position_.position());
+ virtual int position() const V8_OVERRIDE {
+ return position_.position();
}
bool has_position() const {
- return !position().IsUnknown();
+ return position_.position() != RelocInfo::kNoPosition;
}
- void set_position(HSourcePosition position) {
+ void set_position(int position) {
ASSERT(!has_position());
- ASSERT(!position.IsUnknown());
+ ASSERT(position != RelocInfo::kNoPosition);
position_.set_position(position);
}
- virtual HSourcePosition operand_position(int index) const V8_OVERRIDE {
- const HSourcePosition pos = position_.operand_position(index);
- return pos.IsUnknown() ? position() : pos;
+ virtual int operand_position(int index) const V8_OVERRIDE {
+ const int pos = position_.operand_position(index);
+ return (pos != RelocInfo::kNoPosition) ? pos : position();
}
- void set_operand_position(Zone* zone, int index, HSourcePosition pos) {
+ void set_operand_position(Zone* zone, int index, int pos) {
ASSERT(0 <= index && index < OperandCount());
position_.ensure_storage_for_operand_positions(zone, OperandCount());
position_.set_operand_position(index, pos);
@@ -1296,7 +1258,7 @@ class HInstruction : public HValue {
next_(NULL),
previous_(NULL),
position_(RelocInfo::kNoPosition) {
- SetDependsOnFlag(kOsrEntries);
+ SetGVNFlag(kDependsOnOsrEntries);
}
virtual void DeleteFromGraph() V8_OVERRIDE { Unlink(); }
@@ -1564,10 +1526,8 @@ class HBranch V8_FINAL : public HUnaryControlInstruction {
class HCompareMap V8_FINAL : public HUnaryControlInstruction {
public:
- DECLARE_INSTRUCTION_FACTORY_P3(HCompareMap, HValue*, Handle<Map>,
- CompilationInfo*);
- DECLARE_INSTRUCTION_FACTORY_P5(HCompareMap, HValue*, Handle<Map>,
- CompilationInfo*,
+ DECLARE_INSTRUCTION_FACTORY_P2(HCompareMap, HValue*, Handle<Map>);
+ DECLARE_INSTRUCTION_FACTORY_P4(HCompareMap, HValue*, Handle<Map>,
HBasicBlock*, HBasicBlock*);
virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE {
@@ -1593,10 +1553,6 @@ class HCompareMap V8_FINAL : public HUnaryControlInstruction {
return Representation::Tagged();
}
- bool is_stable() const {
- return is_stable_;
- }
-
DECLARE_CONCRETE_INSTRUCTION(CompareMap)
protected:
@@ -1605,22 +1561,15 @@ class HCompareMap V8_FINAL : public HUnaryControlInstruction {
private:
HCompareMap(HValue* value,
Handle<Map> map,
- CompilationInfo* info,
HBasicBlock* true_target = NULL,
HBasicBlock* false_target = NULL)
: HUnaryControlInstruction(value, true_target, false_target),
known_successor_index_(kNoKnownSuccessorIndex), map_(Unique<Map>(map)) {
ASSERT(!map.is_null());
- is_stable_ = map->is_stable();
-
- if (is_stable_) {
- map->AddDependentCompilationInfo(
- DependentCode::kPrototypeCheckGroup, info);
- }
+ set_representation(Representation::Tagged());
}
int known_successor_index_;
- bool is_stable_;
Unique<Map> map_;
};
@@ -1770,7 +1719,7 @@ class HChange V8_FINAL : public HUnaryOperation {
set_type(HType::Smi());
} else {
set_type(HType::TaggedNumber());
- if (to.IsTagged()) SetChangesFlag(kNewSpacePromotion);
+ if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
}
}
@@ -2016,7 +1965,7 @@ class HStackCheck V8_FINAL : public HTemplateInstruction<1> {
private:
HStackCheck(HValue* context, Type type) : type_(type) {
SetOperandAt(0, context);
- SetChangesFlag(kNewSpacePromotion);
+ SetGVNFlag(kChangesNewSpacePromotion);
}
Type type_;
@@ -2564,7 +2513,7 @@ class HMapEnumLength V8_FINAL : public HUnaryOperation {
: HUnaryOperation(value, HType::Smi()) {
set_representation(Representation::Smi());
SetFlag(kUseGVN);
- SetDependsOnFlag(kMaps);
+ SetGVNFlag(kDependsOnMaps);
}
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
@@ -2638,7 +2587,7 @@ class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
SetFlag(kFlexibleRepresentation);
// TODO(svenpanne) This flag is actually only needed if representation()
// is tagged, and not when it is an unboxed double or unboxed integer.
- SetChangesFlag(kNewSpacePromotion);
+ SetGVNFlag(kChangesNewSpacePromotion);
break;
case kMathLog:
case kMathExp:
@@ -2687,7 +2636,7 @@ class HLoadRoot V8_FINAL : public HTemplateInstruction<0> {
SetFlag(kUseGVN);
// TODO(bmeurer): We'll need kDependsOnRoots once we add the
// corresponding HStoreRoot instruction.
- SetDependsOnFlag(kCalls);
+ SetGVNFlag(kDependsOnCalls);
}
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
@@ -2700,14 +2649,13 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
public:
static HCheckMaps* New(Zone* zone, HValue* context, HValue* value,
Handle<Map> map, CompilationInfo* info,
- HValue* typecheck = NULL);
+ HValue *typecheck = NULL);
static HCheckMaps* New(Zone* zone, HValue* context,
HValue* value, SmallMapList* maps,
- CompilationInfo* info,
- HValue* typecheck = NULL) {
+ HValue *typecheck = NULL) {
HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck);
for (int i = 0; i < maps->length(); i++) {
- check_map->Add(maps->at(i), info, zone);
+ check_map->Add(maps->at(i), zone);
}
return check_map;
}
@@ -2723,26 +2671,14 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* value() { return OperandAt(0); }
- HValue* typecheck() { return OperandAt(1); }
Unique<Map> first_map() const { return map_set_.at(0); }
UniqueSet<Map> map_set() const { return map_set_; }
- void set_map_set(UniqueSet<Map>* maps, Zone *zone) {
- map_set_.Clear();
- for (int i = 0; i < maps->size(); i++) {
- map_set_.Add(maps->at(i), zone);
- }
- }
-
bool has_migration_target() const {
return has_migration_target_;
}
- bool is_stable() const {
- return is_stable_;
- }
-
DECLARE_CONCRETE_INSTRUCTION(CheckMaps)
protected:
@@ -2753,38 +2689,30 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
virtual int RedefinedOperandIndex() { return 0; }
private:
- void Add(Handle<Map> map, CompilationInfo* info, Zone* zone) {
+ void Add(Handle<Map> map, Zone* zone) {
map_set_.Add(Unique<Map>(map), zone);
- is_stable_ = is_stable_ && map->is_stable();
- if (is_stable_) {
- map->AddDependentCompilationInfo(
- DependentCode::kPrototypeCheckGroup, info);
- } else {
- SetDependsOnFlag(kMaps);
- SetDependsOnFlag(kElementsKind);
- }
-
if (!has_migration_target_ && map->is_migration_target()) {
has_migration_target_ = true;
- SetChangesFlag(kNewSpacePromotion);
+ SetGVNFlag(kChangesNewSpacePromotion);
}
}
// Clients should use one of the static New* methods above.
HCheckMaps(HValue* value, Zone *zone, HValue* typecheck)
: HTemplateInstruction<2>(value->type()),
- omit_(false), has_migration_target_(false), is_stable_(true) {
+ omit_(false), has_migration_target_(false) {
SetOperandAt(0, value);
// Use the object value for the dependency if NULL is passed.
SetOperandAt(1, typecheck != NULL ? typecheck : value);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetFlag(kTrackSideEffectDominators);
+ SetGVNFlag(kDependsOnMaps);
+ SetGVNFlag(kDependsOnElementsKind);
}
bool omit_;
bool has_migration_target_;
- bool is_stable_;
UniqueSet<Map> map_set_;
};
@@ -3219,7 +3147,7 @@ class HPhi V8_FINAL : public HValue {
bool IsReceiver() const { return merged_index_ == 0; }
bool HasMergedIndex() const { return merged_index_ != kInvalidMergedIndex; }
- virtual HSourcePosition position() const V8_OVERRIDE;
+ virtual int position() const V8_OVERRIDE;
int merged_index() const { return merged_index_; }
@@ -3384,7 +3312,7 @@ class HCapturedObject V8_FINAL : public HDematerializedObject {
void ReuseSideEffectsFromStore(HInstruction* store) {
ASSERT(store->HasObservableSideEffects());
ASSERT(store->IsStoreNamedField());
- changes_flags_.Add(store->ChangesFlags());
+ gvn_flags_.Add(store->gvn_flags());
}
// Replay effects of this instruction on the given environment.
@@ -3563,15 +3491,6 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
return object_;
}
-#ifdef DEBUG
- virtual void Verify() V8_OVERRIDE { }
-#endif
-
- DECLARE_CONCRETE_INSTRUCTION(Constant)
-
- protected:
- virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
-
virtual bool DataEquals(HValue* other) V8_OVERRIDE {
HConstant* other_constant = HConstant::cast(other);
if (has_int32_value_) {
@@ -3596,6 +3515,15 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
}
}
+#ifdef DEBUG
+ virtual void Verify() V8_OVERRIDE { }
+#endif
+
+ DECLARE_CONCRETE_INSTRUCTION(Constant)
+
+ protected:
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+
private:
friend class HGraph;
HConstant(Handle<Object> handle, Representation r = Representation::None());
@@ -3724,9 +3652,7 @@ class HBinaryOperation : public HTemplateInstruction<3> {
return representation();
}
- void SetOperandPositions(Zone* zone,
- HSourcePosition left_pos,
- HSourcePosition right_pos) {
+ void SetOperandPositions(Zone* zone, int left_pos, int right_pos) {
set_operand_position(zone, 1, left_pos);
set_operand_position(zone, 2, right_pos);
}
@@ -4019,7 +3945,7 @@ class HBitwiseBinaryOperation : public HBinaryOperation {
}
virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
- if (to.IsTagged()) SetChangesFlag(kNewSpacePromotion);
+ if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
if (to.IsTagged() &&
(left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) {
SetAllSideEffects();
@@ -4096,7 +4022,7 @@ class HArithmeticBinaryOperation : public HBinaryOperation {
}
virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
- if (to.IsTagged()) SetChangesFlag(kNewSpacePromotion);
+ if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
if (to.IsTagged() &&
(left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) {
SetAllSideEffects();
@@ -4181,9 +4107,7 @@ class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
}
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- void SetOperandPositions(Zone* zone,
- HSourcePosition left_pos,
- HSourcePosition right_pos) {
+ void SetOperandPositions(Zone* zone, int left_pos, int right_pos) {
set_operand_position(zone, 0, left_pos);
set_operand_position(zone, 1, right_pos);
}
@@ -4260,24 +4184,6 @@ class HCompareMinusZeroAndBranch V8_FINAL : public HUnaryControlInstruction {
class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
public:
- HCompareObjectEqAndBranch(HValue* left,
- HValue* right,
- HBasicBlock* true_target = NULL,
- HBasicBlock* false_target = NULL) {
- // TODO(danno): make this private when the IfBuilder properly constructs
- // control flow instructions.
- ASSERT(!left->IsConstant() ||
- (!HConstant::cast(left)->HasInteger32Value() ||
- HConstant::cast(left)->HasSmiValue()));
- ASSERT(!right->IsConstant() ||
- (!HConstant::cast(right)->HasInteger32Value() ||
- HConstant::cast(right)->HasSmiValue()));
- SetOperandAt(0, left);
- SetOperandAt(1, right);
- SetSuccessorAt(0, true_target);
- SetSuccessorAt(1, false_target);
- }
-
DECLARE_INSTRUCTION_FACTORY_P2(HCompareObjectEqAndBranch, HValue*, HValue*);
DECLARE_INSTRUCTION_FACTORY_P4(HCompareObjectEqAndBranch, HValue*, HValue*,
HBasicBlock*, HBasicBlock*);
@@ -4298,6 +4204,23 @@ class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
}
DECLARE_CONCRETE_INSTRUCTION(CompareObjectEqAndBranch)
+
+ private:
+ HCompareObjectEqAndBranch(HValue* left,
+ HValue* right,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL) {
+ ASSERT(!left->IsConstant() ||
+ (!HConstant::cast(left)->HasInteger32Value() ||
+ HConstant::cast(left)->HasSmiValue()));
+ ASSERT(!right->IsConstant() ||
+ (!HConstant::cast(right)->HasInteger32Value() ||
+ HConstant::cast(right)->HasSmiValue()));
+ SetOperandAt(0, left);
+ SetOperandAt(1, right);
+ SetSuccessorAt(0, true_target);
+ SetSuccessorAt(1, false_target);
+ }
};
@@ -4423,7 +4346,7 @@ class HStringCompareAndBranch : public HTemplateControlInstruction<2, 3> {
SetOperandAt(1, left);
SetOperandAt(2, right);
set_representation(Representation::Tagged());
- SetChangesFlag(kNewSpacePromotion);
+ SetGVNFlag(kChangesNewSpacePromotion);
}
Token::Value token_;
@@ -4648,7 +4571,7 @@ class HPower V8_FINAL : public HTemplateInstruction<2> {
SetOperandAt(1, right);
set_representation(Representation::Double());
SetFlag(kUseGVN);
- SetChangesFlag(kNewSpacePromotion);
+ SetGVNFlag(kChangesNewSpacePromotion);
}
virtual bool IsDeletable() const V8_OVERRIDE {
@@ -4690,7 +4613,7 @@ class HAdd V8_FINAL : public HArithmeticBinaryOperation {
virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
if (to.IsTagged()) {
- SetChangesFlag(kNewSpacePromotion);
+ SetGVNFlag(kChangesNewSpacePromotion);
ClearFlag(kAllowUndefinedAsNaN);
}
if (to.IsTagged() &&
@@ -5147,8 +5070,8 @@ class HOsrEntry V8_FINAL : public HTemplateInstruction<0> {
private:
explicit HOsrEntry(BailoutId ast_id) : ast_id_(ast_id) {
- SetChangesFlag(kOsrEntries);
- SetChangesFlag(kNewSpacePromotion);
+ SetGVNFlag(kChangesOsrEntries);
+ SetGVNFlag(kChangesNewSpacePromotion);
}
BailoutId ast_id_;
@@ -5290,7 +5213,7 @@ class HLoadGlobalCell V8_FINAL : public HTemplateInstruction<0> {
: cell_(Unique<Cell>::CreateUninitialized(cell)), details_(details) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetDependsOnFlag(kGlobalVars);
+ SetGVNFlag(kDependsOnGlobalVars);
}
virtual bool IsDeletable() const V8_OVERRIDE { return !RequiresHoleCheck(); }
@@ -5442,8 +5365,8 @@ class HAllocate V8_FINAL : public HTemplateInstruction<2> {
SetOperandAt(1, size);
set_representation(Representation::Tagged());
SetFlag(kTrackSideEffectDominators);
- SetChangesFlag(kNewSpacePromotion);
- SetDependsOnFlag(kNewSpacePromotion);
+ SetGVNFlag(kChangesNewSpacePromotion);
+ SetGVNFlag(kDependsOnNewSpacePromotion);
if (FLAG_trace_pretenuring) {
PrintF("HAllocate with AllocationSite %p %s\n",
@@ -5641,7 +5564,7 @@ class HStoreGlobalCell V8_FINAL : public HUnaryOperation {
: HUnaryOperation(value),
cell_(Unique<PropertyCell>::CreateUninitialized(cell)),
details_(details) {
- SetChangesFlag(kGlobalVars);
+ SetGVNFlag(kChangesGlobalVars);
}
Unique<PropertyCell> cell_;
@@ -5680,7 +5603,7 @@ class HLoadContextSlot V8_FINAL : public HUnaryOperation {
}
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetDependsOnFlag(kContextSlots);
+ SetGVNFlag(kDependsOnContextSlots);
}
int slot_index() const { return slot_index_; }
@@ -5764,7 +5687,7 @@ class HStoreContextSlot V8_FINAL : public HTemplateInstruction<2> {
: slot_index_(slot_index), mode_(mode) {
SetOperandAt(0, context);
SetOperandAt(1, value);
- SetChangesFlag(kContextSlots);
+ SetGVNFlag(kChangesContextSlots);
}
int slot_index_;
@@ -6042,14 +5965,14 @@ class HObjectAccess V8_FINAL {
return HObjectAccess(kInobject, GlobalObject::kNativeContextOffset);
}
- void PrintTo(StringStream* stream) const;
+ void PrintTo(StringStream* stream);
inline bool Equals(HObjectAccess that) const {
return value_ == that.value_; // portion and offset must match
}
protected:
- void SetGVNFlags(HValue *instr, PropertyAccessType access_type);
+ void SetGVNFlags(HValue *instr, bool is_store);
private:
// internal use only; different parts of an object or array
@@ -6064,8 +5987,6 @@ class HObjectAccess V8_FINAL {
kExternalMemory // some field in external memory
};
- HObjectAccess() : value_(0) {}
-
HObjectAccess(Portion portion, int offset,
Representation representation = Representation::Tagged(),
Handle<String> name = Handle<String>::null(),
@@ -6098,7 +6019,6 @@ class HObjectAccess V8_FINAL {
friend class HLoadNamedField;
friend class HStoreNamedField;
- friend class SideEffectsTracker;
inline Portion portion() const {
return PortionField::decode(value_);
@@ -6176,7 +6096,7 @@ class HLoadNamedField V8_FINAL : public HTemplateInstruction<2> {
} else {
set_representation(Representation::Tagged());
}
- access.SetGVNFlags(this, LOAD);
+ access.SetGVNFlags(this, false);
}
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
@@ -6235,7 +6155,7 @@ class HLoadFunctionPrototype V8_FINAL : public HUnaryOperation {
: HUnaryOperation(function) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetDependsOnFlag(kCalls);
+ SetGVNFlag(kDependsOnCalls);
}
};
@@ -6380,10 +6300,10 @@ class HLoadKeyed V8_FINAL
set_representation(Representation::Tagged());
}
- SetDependsOnFlag(kArrayElements);
+ SetGVNFlag(kDependsOnArrayElements);
} else {
set_representation(Representation::Double());
- SetDependsOnFlag(kDoubleArrayElements);
+ SetGVNFlag(kDependsOnDoubleArrayElements);
}
} else {
if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
@@ -6396,14 +6316,14 @@ class HLoadKeyed V8_FINAL
}
if (is_external()) {
- SetDependsOnFlag(kExternalMemory);
+ SetGVNFlag(kDependsOnExternalMemory);
} else if (is_fixed_typed_array()) {
- SetDependsOnFlag(kTypedArrayElements);
+ SetGVNFlag(kDependsOnTypedArrayElements);
} else {
UNREACHABLE();
}
// Native code could change the specialized array.
- SetDependsOnFlag(kCalls);
+ SetGVNFlag(kDependsOnCalls);
}
SetFlag(kUseGVN);
@@ -6527,8 +6447,7 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
}
virtual bool HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) V8_OVERRIDE {
- ASSERT(side_effect == kNewSpacePromotion);
- if (!FLAG_use_write_barrier_elimination) return false;
+ ASSERT(side_effect == kChangesNewSpacePromotion);
new_space_dominator_ = dominator;
return false;
}
@@ -6565,16 +6484,6 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
}
SetOperandAt(2, map_constant);
has_transition_ = true;
- is_stable_ = map->is_stable();
-
- if (is_stable_) {
- map->AddDependentCompilationInfo(
- DependentCode::kPrototypeCheckGroup, info);
- }
- }
-
- bool is_stable() const {
- return is_stable_;
}
bool NeedsWriteBarrier() {
@@ -6613,8 +6522,8 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
new_space_dominator_(NULL),
write_barrier_mode_(UPDATE_WRITE_BARRIER),
has_transition_(false),
- is_stable_(false),
store_mode_(store_mode) {
+ if (!FLAG_smi_x64_store_opt) store_mode_ = INITIALIZING_STORE;
// Stores to a non existing in-object property are allowed only to the
// newly allocated objects (via HAllocate or HInnerAllocatedObject).
ASSERT(!access.IsInobject() || access.existing_inobject_property() ||
@@ -6622,14 +6531,13 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
SetOperandAt(0, obj);
SetOperandAt(1, val);
SetOperandAt(2, obj);
- access.SetGVNFlags(this, STORE);
+ access.SetGVNFlags(this, true);
}
HObjectAccess access_;
HValue* new_space_dominator_;
WriteBarrierMode write_barrier_mode_ : 1;
bool has_transition_ : 1;
- bool is_stable_ : 1;
StoreFieldOrKeyedMode store_mode_ : 1;
};
@@ -6771,7 +6679,7 @@ class HStoreKeyed V8_FINAL
virtual bool HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) V8_OVERRIDE {
- ASSERT(side_effect == kNewSpacePromotion);
+ ASSERT(side_effect == kChangesNewSpacePromotion);
new_space_dominator_ = dominator;
return false;
}
@@ -6804,6 +6712,7 @@ class HStoreKeyed V8_FINAL
is_uninitialized_(false),
store_mode_(store_mode),
new_space_dominator_(NULL) {
+ if (!FLAG_smi_x64_store_opt) store_mode_ = INITIALIZING_STORE;
SetOperandAt(0, obj);
SetOperandAt(1, key);
SetOperandAt(2, val);
@@ -6813,20 +6722,20 @@ class HStoreKeyed V8_FINAL
if (IsFastObjectElementsKind(elements_kind)) {
SetFlag(kTrackSideEffectDominators);
- SetDependsOnFlag(kNewSpacePromotion);
+ SetGVNFlag(kDependsOnNewSpacePromotion);
}
if (is_external()) {
- SetChangesFlag(kExternalMemory);
+ SetGVNFlag(kChangesExternalMemory);
SetFlag(kAllowUndefinedAsNaN);
} else if (IsFastDoubleElementsKind(elements_kind)) {
- SetChangesFlag(kDoubleArrayElements);
+ SetGVNFlag(kChangesDoubleArrayElements);
} else if (IsFastSmiElementsKind(elements_kind)) {
- SetChangesFlag(kArrayElements);
+ SetGVNFlag(kChangesArrayElements);
} else if (is_fixed_typed_array()) {
- SetChangesFlag(kTypedArrayElements);
+ SetGVNFlag(kChangesTypedArrayElements);
SetFlag(kAllowUndefinedAsNaN);
} else {
- SetChangesFlag(kArrayElements);
+ SetGVNFlag(kChangesArrayElements);
}
// EXTERNAL_{UNSIGNED_,}{BYTE,SHORT,INT}_ELEMENTS are truncating.
@@ -6930,10 +6839,10 @@ class HTransitionElementsKind V8_FINAL : public HTemplateInstruction<2> {
SetOperandAt(0, object);
SetOperandAt(1, context);
SetFlag(kUseGVN);
- SetChangesFlag(kElementsKind);
+ SetGVNFlag(kChangesElementsKind);
if (!IsSimpleMapChangeTransition(from_kind_, to_kind_)) {
- SetChangesFlag(kElementsPointer);
- SetChangesFlag(kNewSpacePromotion);
+ SetGVNFlag(kChangesElementsPointer);
+ SetGVNFlag(kChangesNewSpacePromotion);
}
set_representation(Representation::Tagged());
}
@@ -6984,8 +6893,8 @@ class HStringAdd V8_FINAL : public HBinaryOperation {
flags_(flags), pretenure_flag_(pretenure_flag) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetDependsOnFlag(kMaps);
- SetChangesFlag(kNewSpacePromotion);
+ SetGVNFlag(kDependsOnMaps);
+ SetGVNFlag(kChangesNewSpacePromotion);
if (FLAG_trace_pretenuring) {
PrintF("HStringAdd with AllocationSite %p %s\n",
allocation_site.is_null()
@@ -7036,9 +6945,9 @@ class HStringCharCodeAt V8_FINAL : public HTemplateInstruction<3> {
SetOperandAt(2, index);
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
- SetDependsOnFlag(kMaps);
- SetDependsOnFlag(kStringChars);
- SetChangesFlag(kNewSpacePromotion);
+ SetGVNFlag(kDependsOnMaps);
+ SetGVNFlag(kDependsOnStringChars);
+ SetGVNFlag(kChangesNewSpacePromotion);
}
// No side effects: runtime function assumes string + number inputs.
@@ -7072,7 +6981,7 @@ class HStringCharFromCode V8_FINAL : public HTemplateInstruction<2> {
SetOperandAt(1, char_code);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetChangesFlag(kNewSpacePromotion);
+ SetGVNFlag(kChangesNewSpacePromotion);
}
virtual bool IsDeletable() const V8_OVERRIDE {
@@ -7181,7 +7090,7 @@ class HFunctionLiteral V8_FINAL : public HTemplateInstruction<1> {
language_mode_(shared->language_mode()) {
SetOperandAt(0, context);
set_representation(Representation::Tagged());
- SetChangesFlag(kNewSpacePromotion);
+ SetGVNFlag(kChangesNewSpacePromotion);
}
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
@@ -7252,7 +7161,7 @@ class HToFastProperties V8_FINAL : public HUnaryOperation {
private:
explicit HToFastProperties(HValue* value) : HUnaryOperation(value) {
set_representation(Representation::Tagged());
- SetChangesFlag(kNewSpacePromotion);
+ SetGVNFlag(kChangesNewSpacePromotion);
// This instruction is not marked as kChangesMaps, but does
// change the map of the input operand. Use it only when creating
@@ -7331,7 +7240,7 @@ class HSeqStringGetChar V8_FINAL : public HTemplateInstruction<2> {
SetOperandAt(1, index);
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
- SetDependsOnFlag(kStringChars);
+ SetGVNFlag(kDependsOnStringChars);
}
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
@@ -7370,7 +7279,7 @@ class HSeqStringSetChar V8_FINAL : public HTemplateInstruction<4> {
SetOperandAt(2, index);
SetOperandAt(3, value);
set_representation(Representation::Tagged());
- SetChangesFlag(kStringChars);
+ SetGVNFlag(kChangesStringChars);
}
String::Encoding encoding_;
@@ -7410,8 +7319,8 @@ class HCheckMapValue V8_FINAL : public HTemplateInstruction<2> {
SetOperandAt(1, map);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetDependsOnFlag(kMaps);
- SetDependsOnFlag(kElementsKind);
+ SetGVNFlag(kDependsOnMaps);
+ SetGVNFlag(kDependsOnElementsKind);
}
};
diff --git a/deps/v8/src/hydrogen-load-elimination.cc b/deps/v8/src/hydrogen-load-elimination.cc
index 222811678b..0c7de85169 100644
--- a/deps/v8/src/hydrogen-load-elimination.cc
+++ b/deps/v8/src/hydrogen-load-elimination.cc
@@ -76,7 +76,9 @@ class HLoadEliminationTable : public ZoneObject {
FieldOf(l->access()),
l->object()->ActualValue()->id()));
HValue* result = load(l);
- if (result != instr) {
+ if (result != instr &&
+ result->type().Equals(instr->type()) &&
+ result->representation().Equals(instr->representation())) {
// The load can be replaced with a previous load or a value.
TRACE((" replace L%d -> v%d\n", instr->id(), result->id()));
instr->DeleteAndReplaceWith(result);
@@ -98,33 +100,26 @@ class HLoadEliminationTable : public ZoneObject {
}
break;
}
- case HValue::kTransitionElementsKind: {
- HTransitionElementsKind* t = HTransitionElementsKind::cast(instr);
- HValue* object = t->object()->ActualValue();
- KillFieldInternal(object, FieldOf(JSArray::kElementsOffset), NULL);
- KillFieldInternal(object, FieldOf(JSObject::kMapOffset), NULL);
- break;
- }
default: {
- if (instr->CheckChangesFlag(kInobjectFields)) {
+ if (instr->CheckGVNFlag(kChangesInobjectFields)) {
TRACE((" kill-all i%d\n", instr->id()));
Kill();
break;
}
- if (instr->CheckChangesFlag(kMaps)) {
+ if (instr->CheckGVNFlag(kChangesMaps)) {
TRACE((" kill-maps i%d\n", instr->id()));
KillOffset(JSObject::kMapOffset);
}
- if (instr->CheckChangesFlag(kElementsKind)) {
+ if (instr->CheckGVNFlag(kChangesElementsKind)) {
TRACE((" kill-elements-kind i%d\n", instr->id()));
KillOffset(JSObject::kMapOffset);
KillOffset(JSObject::kElementsOffset);
}
- if (instr->CheckChangesFlag(kElementsPointer)) {
+ if (instr->CheckGVNFlag(kChangesElementsPointer)) {
TRACE((" kill-elements i%d\n", instr->id()));
KillOffset(JSObject::kElementsOffset);
}
- if (instr->CheckChangesFlag(kOsrEntries)) {
+ if (instr->CheckGVNFlag(kChangesOsrEntries)) {
TRACE((" kill-osr i%d\n", instr->id()));
Kill();
}
@@ -139,32 +134,8 @@ class HLoadEliminationTable : public ZoneObject {
return this;
}
- // Support for global analysis with HFlowEngine: Merge given state with
- // the other incoming state.
- static HLoadEliminationTable* Merge(HLoadEliminationTable* succ_state,
- HBasicBlock* succ_block,
- HLoadEliminationTable* pred_state,
- HBasicBlock* pred_block,
- Zone* zone) {
- ASSERT(pred_state != NULL);
- if (succ_state == NULL) {
- return pred_state->Copy(succ_block, pred_block, zone);
- } else {
- return succ_state->Merge(succ_block, pred_state, pred_block, zone);
- }
- }
-
- // Support for global analysis with HFlowEngine: Given state merged with all
- // the other incoming states, prepare it for use.
- static HLoadEliminationTable* Finish(HLoadEliminationTable* state,
- HBasicBlock* block,
- Zone* zone) {
- ASSERT(state != NULL);
- return state;
- }
-
- private:
- // Copy state to successor block.
+ // Support for global analysis with HFlowEngine: Copy state to successor
+ // block.
HLoadEliminationTable* Copy(HBasicBlock* succ, HBasicBlock* from_block,
Zone* zone) {
HLoadEliminationTable* copy =
@@ -180,7 +151,8 @@ class HLoadEliminationTable : public ZoneObject {
return copy;
}
- // Merge this state with the other incoming state.
+ // Support for global analysis with HFlowEngine: Merge this state with
+ // the other incoming state.
HLoadEliminationTable* Merge(HBasicBlock* succ, HLoadEliminationTable* that,
HBasicBlock* that_block, Zone* zone) {
if (that->fields_.length() < fields_.length()) {
@@ -460,7 +432,11 @@ class HLoadEliminationTable : public ZoneObject {
class HLoadEliminationEffects : public ZoneObject {
public:
explicit HLoadEliminationEffects(Zone* zone)
- : zone_(zone), stores_(5, zone) { }
+ : zone_(zone),
+ maps_stored_(false),
+ fields_stored_(false),
+ elements_stored_(false),
+ stores_(5, zone) { }
inline bool Disabled() {
return false; // Effects are _not_ disabled.
@@ -468,25 +444,37 @@ class HLoadEliminationEffects : public ZoneObject {
// Process a possibly side-effecting instruction.
void Process(HInstruction* instr, Zone* zone) {
- if (instr->IsStoreNamedField()) {
- stores_.Add(HStoreNamedField::cast(instr), zone_);
- } else {
- flags_.Add(instr->ChangesFlags());
+ switch (instr->opcode()) {
+ case HValue::kStoreNamedField: {
+ stores_.Add(HStoreNamedField::cast(instr), zone_);
+ break;
+ }
+ case HValue::kOsrEntry: {
+ // Kill everything. Loads must not be hoisted past the OSR entry.
+ maps_stored_ = true;
+ fields_stored_ = true;
+ elements_stored_ = true;
+ }
+ default: {
+ fields_stored_ |= instr->CheckGVNFlag(kChangesInobjectFields);
+ maps_stored_ |= instr->CheckGVNFlag(kChangesMaps);
+ maps_stored_ |= instr->CheckGVNFlag(kChangesElementsKind);
+ elements_stored_ |= instr->CheckGVNFlag(kChangesElementsKind);
+ elements_stored_ |= instr->CheckGVNFlag(kChangesElementsPointer);
+ }
}
}
// Apply these effects to the given load elimination table.
void Apply(HLoadEliminationTable* table) {
- // Loads must not be hoisted past the OSR entry, therefore we kill
- // everything if we see an OSR entry.
- if (flags_.Contains(kInobjectFields) || flags_.Contains(kOsrEntries)) {
+ if (fields_stored_) {
table->Kill();
return;
}
- if (flags_.Contains(kElementsKind) || flags_.Contains(kMaps)) {
+ if (maps_stored_) {
table->KillOffset(JSObject::kMapOffset);
}
- if (flags_.Contains(kElementsKind) || flags_.Contains(kElementsPointer)) {
+ if (elements_stored_) {
table->KillOffset(JSObject::kElementsOffset);
}
@@ -498,7 +486,9 @@ class HLoadEliminationEffects : public ZoneObject {
// Union these effects with the other effects.
void Union(HLoadEliminationEffects* that, Zone* zone) {
- flags_.Add(that->flags_);
+ maps_stored_ |= that->maps_stored_;
+ fields_stored_ |= that->fields_stored_;
+ elements_stored_ |= that->elements_stored_;
for (int i = 0; i < that->stores_.length(); i++) {
stores_.Add(that->stores_[i], zone);
}
@@ -506,7 +496,9 @@ class HLoadEliminationEffects : public ZoneObject {
private:
Zone* zone_;
- GVNFlagSet flags_;
+ bool maps_stored_ : 1;
+ bool fields_stored_ : 1;
+ bool elements_stored_ : 1;
ZoneList<HStoreNamedField*> stores_;
};
diff --git a/deps/v8/src/hydrogen-representation-changes.cc b/deps/v8/src/hydrogen-representation-changes.cc
index 7d0720c604..07fc8be38c 100644
--- a/deps/v8/src/hydrogen-representation-changes.cc
+++ b/deps/v8/src/hydrogen-representation-changes.cc
@@ -61,11 +61,10 @@ void HRepresentationChangesPhase::InsertRepresentationChangeForUse(
if (new_value == NULL) {
new_value = new(graph()->zone()) HChange(
value, to, is_truncating_to_smi, is_truncating_to_int);
- if (!use_value->operand_position(use_index).IsUnknown()) {
+ if (use_value->operand_position(use_index) != RelocInfo::kNoPosition) {
new_value->set_position(use_value->operand_position(use_index));
} else {
- ASSERT(!FLAG_hydrogen_track_positions ||
- !graph()->info()->IsOptimizing());
+ ASSERT(!FLAG_emit_opt_code_positions || !graph()->info()->IsOptimizing());
}
}
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index b471faa9b2..7ee8180cca 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -68,8 +68,6 @@
#include "ia32/lithium-codegen-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-codegen-x64.h"
-#elif V8_TARGET_ARCH_A64
-#include "a64/lithium-codegen-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-codegen-arm.h"
#elif V8_TARGET_ARCH_MIPS
@@ -143,13 +141,12 @@ void HBasicBlock::RemovePhi(HPhi* phi) {
}
-void HBasicBlock::AddInstruction(HInstruction* instr,
- HSourcePosition position) {
+void HBasicBlock::AddInstruction(HInstruction* instr, int position) {
ASSERT(!IsStartBlock() || !IsFinished());
ASSERT(!instr->IsLinked());
ASSERT(!IsFinished());
- if (!position.IsUnknown()) {
+ if (position != RelocInfo::kNoPosition) {
instr->set_position(position);
}
if (first_ == NULL) {
@@ -157,10 +154,10 @@ void HBasicBlock::AddInstruction(HInstruction* instr,
ASSERT(!last_environment()->ast_id().IsNone());
HBlockEntry* entry = new(zone()) HBlockEntry();
entry->InitializeAsFirst(this);
- if (!position.IsUnknown()) {
+ if (position != RelocInfo::kNoPosition) {
entry->set_position(position);
} else {
- ASSERT(!FLAG_hydrogen_track_positions ||
+ ASSERT(!FLAG_emit_opt_code_positions ||
!graph()->info()->IsOptimizing());
}
first_ = last_ = entry;
@@ -213,7 +210,7 @@ HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id,
}
-void HBasicBlock::Finish(HControlInstruction* end, HSourcePosition position) {
+void HBasicBlock::Finish(HControlInstruction* end, int position) {
ASSERT(!IsFinished());
AddInstruction(end, position);
end_ = end;
@@ -224,7 +221,7 @@ void HBasicBlock::Finish(HControlInstruction* end, HSourcePosition position) {
void HBasicBlock::Goto(HBasicBlock* block,
- HSourcePosition position,
+ int position,
FunctionState* state,
bool add_simulate) {
bool drop_extra = state != NULL &&
@@ -247,7 +244,7 @@ void HBasicBlock::Goto(HBasicBlock* block,
void HBasicBlock::AddLeaveInlined(HValue* return_value,
FunctionState* state,
- HSourcePosition position) {
+ int position) {
HBasicBlock* target = state->function_return();
bool drop_extra = state->inlining_kind() == NORMAL_RETURN;
@@ -340,15 +337,6 @@ void HBasicBlock::PostProcessLoopHeader(IterationStatement* stmt) {
}
-void HBasicBlock::MarkSuccEdgeUnreachable(int succ) {
- ASSERT(IsFinished());
- HBasicBlock* succ_block = end()->SuccessorAt(succ);
-
- ASSERT(succ_block->predecessors()->length() == 1);
- succ_block->MarkUnreachable();
-}
-
-
void HBasicBlock::RegisterPredecessor(HBasicBlock* pred) {
if (HasPredecessor()) {
// Only loop header blocks can have a predecessor added after
@@ -1044,9 +1032,9 @@ void HGraphBuilder::IfBuilder::End() {
current = merge_at_join_blocks_;
while (current != NULL) {
if (current->deopt_ && current->block_ != NULL) {
- current->block_->FinishExit(
- HAbnormalExit::New(builder_->zone(), NULL),
- HSourcePosition::Unknown());
+ builder_->PadEnvironmentForContinuation(current->block_,
+ merge_block);
+ builder_->GotoNoSimulate(current->block_, merge_block);
}
current = current->next_;
}
@@ -1179,10 +1167,9 @@ HGraph* HGraphBuilder::CreateGraph() {
HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
ASSERT(current_block() != NULL);
- ASSERT(!FLAG_hydrogen_track_positions ||
- !position_.IsUnknown() ||
- !info_->IsOptimizing());
- current_block()->AddInstruction(instr, source_position());
+ ASSERT(!FLAG_emit_opt_code_positions ||
+ position_ != RelocInfo::kNoPosition || !info_->IsOptimizing());
+ current_block()->AddInstruction(instr, position_);
if (graph()->IsInsideNoSideEffectsScope()) {
instr->SetFlag(HValue::kHasNoObservableSideEffects);
}
@@ -1191,10 +1178,9 @@ HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
void HGraphBuilder::FinishCurrentBlock(HControlInstruction* last) {
- ASSERT(!FLAG_hydrogen_track_positions ||
- !info_->IsOptimizing() ||
- !position_.IsUnknown());
- current_block()->Finish(last, source_position());
+ ASSERT(!FLAG_emit_opt_code_positions || !info_->IsOptimizing() ||
+ position_ != RelocInfo::kNoPosition);
+ current_block()->Finish(last, position_);
if (last->IsReturn() || last->IsAbnormalExit()) {
set_current_block(NULL);
}
@@ -1202,9 +1188,9 @@ void HGraphBuilder::FinishCurrentBlock(HControlInstruction* last) {
void HGraphBuilder::FinishExitCurrentBlock(HControlInstruction* instruction) {
- ASSERT(!FLAG_hydrogen_track_positions || !info_->IsOptimizing() ||
- !position_.IsUnknown());
- current_block()->FinishExit(instruction, source_position());
+ ASSERT(!FLAG_emit_opt_code_positions || !info_->IsOptimizing() ||
+ position_ != RelocInfo::kNoPosition);
+ current_block()->FinishExit(instruction, position_);
if (instruction->IsReturn() || instruction->IsAbnormalExit()) {
set_current_block(NULL);
}
@@ -1228,7 +1214,7 @@ void HGraphBuilder::AddSimulate(BailoutId id,
RemovableSimulate removable) {
ASSERT(current_block() != NULL);
ASSERT(!graph()->IsInsideNoSideEffectsScope());
- current_block()->AddNewSimulate(id, source_position(), removable);
+ current_block()->AddNewSimulate(id, position_, removable);
}
@@ -1254,9 +1240,38 @@ HValue* HGraphBuilder::BuildCheckHeapObject(HValue* obj) {
}
-void HGraphBuilder::FinishExitWithHardDeoptimization(const char* reason) {
+void HGraphBuilder::FinishExitWithHardDeoptimization(
+ const char* reason, HBasicBlock* continuation) {
+ PadEnvironmentForContinuation(current_block(), continuation);
Add<HDeoptimize>(reason, Deoptimizer::EAGER);
- FinishExitCurrentBlock(New<HAbnormalExit>());
+ if (graph()->IsInsideNoSideEffectsScope()) {
+ GotoNoSimulate(continuation);
+ } else {
+ Goto(continuation);
+ }
+}
+
+
+void HGraphBuilder::PadEnvironmentForContinuation(
+ HBasicBlock* from,
+ HBasicBlock* continuation) {
+ if (continuation->last_environment() != NULL) {
+ // When merging from a deopt block to a continuation, resolve differences in
+ // environment by pushing constant 0 and popping extra values so that the
+ // environments match during the join. Push 0 since it has the most specific
+ // representation, and will not influence representation inference of the
+ // phi.
+ int continuation_env_length = continuation->last_environment()->length();
+ while (continuation_env_length != from->last_environment()->length()) {
+ if (continuation_env_length > from->last_environment()->length()) {
+ from->last_environment()->Push(graph()->GetConstant0());
+ } else {
+ from->last_environment()->Pop();
+ }
+ }
+ } else {
+ ASSERT(continuation->predecessors()->length() == 0);
+ }
}
@@ -1289,14 +1304,13 @@ HValue* HGraphBuilder::BuildWrapReceiver(HValue* object, HValue* function) {
}
-HValue* HGraphBuilder::BuildCheckForCapacityGrow(
- HValue* object,
- HValue* elements,
- ElementsKind kind,
- HValue* length,
- HValue* key,
- bool is_js_array,
- PropertyAccessType access_type) {
+HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
+ HValue* elements,
+ ElementsKind kind,
+ HValue* length,
+ HValue* key,
+ bool is_js_array,
+ bool is_store) {
IfBuilder length_checker(this);
Token::Value token = IsHoleyElementsKind(kind) ? Token::GTE : Token::EQ;
@@ -1339,7 +1353,7 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(
new_length);
}
- if (access_type == STORE && kind == FAST_SMI_ELEMENTS) {
+ if (is_store && kind == FAST_SMI_ELEMENTS) {
HValue* checked_elements = environment()->Top();
// Write zero to ensure that the new element is initialized with some smi.
@@ -1364,8 +1378,7 @@ HValue* HGraphBuilder::BuildCopyElementsOnWrite(HValue* object,
IfBuilder cow_checker(this);
- cow_checker.If<HCompareMap>(
- elements, factory->fixed_cow_array_map(), top_info());
+ cow_checker.If<HCompareMap>(elements, factory->fixed_cow_array_map());
cow_checker.Then();
HValue* capacity = AddLoadFixedArrayLength(elements);
@@ -1451,7 +1464,7 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoadHelper(
HValue* candidate_key = Add<HLoadKeyed>(elements, key_index,
static_cast<HValue*>(NULL),
- FAST_ELEMENTS);
+ FAST_SMI_ELEMENTS);
IfBuilder key_compare(this);
key_compare.IfNot<HCompareObjectEqAndBranch>(key, candidate_key);
@@ -1477,7 +1490,7 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoadHelper(
HValue* details = Add<HLoadKeyed>(elements, details_index,
static_cast<HValue*>(NULL),
- FAST_ELEMENTS);
+ FAST_SMI_ELEMENTS);
IfBuilder details_compare(this);
details_compare.If<HCompareNumericAndBranch>(details,
graph()->GetConstant0(),
@@ -1547,7 +1560,7 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(HValue* receiver,
elements,
Add<HConstant>(NameDictionary::kCapacityIndex),
static_cast<HValue*>(NULL),
- FAST_ELEMENTS);
+ FAST_SMI_ELEMENTS);
HValue* mask = AddUncasted<HSub>(capacity, graph()->GetConstant1());
mask->ChangeRepresentation(Representation::Integer32());
@@ -1682,7 +1695,7 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
// Check if the object is a heap number.
IfBuilder if_objectisnumber(this);
HValue* objectisnumber = if_objectisnumber.If<HCompareMap>(
- object, isolate()->factory()->heap_number_map(), top_info());
+ object, isolate()->factory()->heap_number_map());
if_objectisnumber.Then();
{
// Compute hash for heap number similar to double_get_hash().
@@ -2146,7 +2159,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
HValue* val,
bool is_js_array,
ElementsKind elements_kind,
- PropertyAccessType access_type,
+ bool is_store,
LoadKeyedHoleMode load_mode,
KeyedAccessStoreMode store_mode) {
ASSERT((!IsExternalArrayElementsKind(elements_kind) &&
@@ -2159,18 +2172,18 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
// for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
// generated store code.
if ((elements_kind == FAST_HOLEY_ELEMENTS) ||
- (elements_kind == FAST_ELEMENTS && access_type == STORE)) {
- checked_object->ClearDependsOnFlag(kElementsKind);
+ (elements_kind == FAST_ELEMENTS && is_store)) {
+ checked_object->ClearGVNFlag(kDependsOnElementsKind);
}
bool fast_smi_only_elements = IsFastSmiElementsKind(elements_kind);
bool fast_elements = IsFastObjectElementsKind(elements_kind);
HValue* elements = AddLoadElements(checked_object);
- if (access_type == STORE && (fast_elements || fast_smi_only_elements) &&
+ if (is_store && (fast_elements || fast_smi_only_elements) &&
store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
HCheckMaps* check_cow_map = Add<HCheckMaps>(
elements, isolate()->factory()->fixed_array_map(), top_info());
- check_cow_map->ClearDependsOnFlag(kElementsKind);
+ check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
}
HInstruction* length = NULL;
if (is_js_array) {
@@ -2202,7 +2215,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
key, graph()->GetConstant0(), Token::GTE);
negative_checker.Then();
HInstruction* result = AddElementAccess(
- backing_store, key, val, bounds_check, elements_kind, access_type);
+ backing_store, key, val, bounds_check, elements_kind, is_store);
negative_checker.ElseDeopt("Negative key encountered");
negative_checker.End();
length_checker.End();
@@ -2212,7 +2225,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
checked_key = Add<HBoundsCheck>(key, length);
return AddElementAccess(
backing_store, checked_key, val,
- checked_object, elements_kind, access_type);
+ checked_object, elements_kind, is_store);
}
}
ASSERT(fast_smi_only_elements ||
@@ -2222,7 +2235,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
// In case val is stored into a fast smi array, assure that the value is a smi
// before manipulating the backing store. Otherwise the actual store may
// deopt, leaving the backing store in an invalid state.
- if (access_type == STORE && IsFastSmiElementsKind(elements_kind) &&
+ if (is_store && IsFastSmiElementsKind(elements_kind) &&
!val->type().IsSmi()) {
val = AddUncasted<HForceRepresentation>(val, Representation::Smi());
}
@@ -2231,12 +2244,12 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
NoObservableSideEffectsScope no_effects(this);
elements = BuildCheckForCapacityGrow(checked_object, elements,
elements_kind, length, key,
- is_js_array, access_type);
+ is_js_array, is_store);
checked_key = key;
} else {
checked_key = Add<HBoundsCheck>(key, length);
- if (access_type == STORE && (fast_elements || fast_smi_only_elements)) {
+ if (is_store && (fast_elements || fast_smi_only_elements)) {
if (store_mode == STORE_NO_TRANSITION_HANDLE_COW) {
NoObservableSideEffectsScope no_effects(this);
elements = BuildCopyElementsOnWrite(checked_object, elements,
@@ -2244,12 +2257,12 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
} else {
HCheckMaps* check_cow_map = Add<HCheckMaps>(
elements, isolate()->factory()->fixed_array_map(), top_info());
- check_cow_map->ClearDependsOnFlag(kElementsKind);
+ check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
}
}
}
return AddElementAccess(elements, checked_key, val, checked_object,
- elements_kind, access_type, load_mode);
+ elements_kind, is_store, load_mode);
}
@@ -2391,9 +2404,9 @@ HInstruction* HGraphBuilder::AddElementAccess(
HValue* val,
HValue* dependency,
ElementsKind elements_kind,
- PropertyAccessType access_type,
+ bool is_store,
LoadKeyedHoleMode load_mode) {
- if (access_type == STORE) {
+ if (is_store) {
ASSERT(val != NULL);
if (elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
elements_kind == UINT8_CLAMPED_ELEMENTS) {
@@ -2405,7 +2418,7 @@ HInstruction* HGraphBuilder::AddElementAccess(
: INITIALIZING_STORE);
}
- ASSERT(access_type == LOAD);
+ ASSERT(!is_store);
ASSERT(val == NULL);
HLoadKeyed* load = Add<HLoadKeyed>(
elements, checked_key, dependency, elements_kind, load_mode);
@@ -2821,8 +2834,7 @@ HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode() {
// No need for a context lookup if the kind_ matches the initial
// map, because we can just load the map in that case.
HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
- return builder()->Add<HLoadNamedField>(
- constructor_function_, static_cast<HValue*>(NULL), access);
+ return builder()->AddLoadNamedField(constructor_function_, access);
}
// TODO(mvstanton): we should always have a constructor function if we
@@ -2847,8 +2859,7 @@ HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode() {
HValue* HGraphBuilder::JSArrayBuilder::EmitInternalMapCode() {
// Find the map near the constructor function
HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
- return builder()->Add<HLoadNamedField>(
- constructor_function_, static_cast<HValue*>(NULL), access);
+ return builder()->AddLoadNamedField(constructor_function_, access);
}
@@ -2982,7 +2993,7 @@ HValue* HGraphBuilder::AddLoadJSBuiltin(Builtins::JavaScript builtin) {
HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
: HGraphBuilder(info),
function_state_(NULL),
- initial_function_state_(this, info, NORMAL_RETURN, 0),
+ initial_function_state_(this, info, NORMAL_RETURN),
ast_context_(NULL),
break_scope_(NULL),
inlined_count_(0),
@@ -2994,7 +3005,7 @@ HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
// to know it's the initial state.
function_state_= &initial_function_state_;
InitializeAstVisitor(info->zone());
- if (FLAG_hydrogen_track_positions) {
+ if (FLAG_emit_opt_code_positions) {
SetSourcePosition(info->shared_info()->start_position());
}
}
@@ -3063,8 +3074,7 @@ HBasicBlock* HOptimizedGraphBuilder::BuildLoopEntry(
}
-void HBasicBlock::FinishExit(HControlInstruction* instruction,
- HSourcePosition position) {
+void HBasicBlock::FinishExit(HControlInstruction* instruction, int position) {
Finish(instruction, position);
ClearEnvironment();
}
@@ -3087,9 +3097,7 @@ HGraph::HGraph(CompilationInfo* info)
type_change_checksum_(0),
maximum_environment_size_(0),
no_side_effects_scope_count_(0),
- disallow_adding_new_values_(false),
- next_inline_id_(0),
- inlined_functions_(5, info->zone()) {
+ disallow_adding_new_values_(false) {
if (info->IsStub()) {
HydrogenCodeStub* stub = info->code_stub();
CodeStubInterfaceDescriptor* descriptor =
@@ -3097,7 +3105,6 @@ HGraph::HGraph(CompilationInfo* info)
start_environment_ =
new(zone_) HEnvironment(zone_, descriptor->environment_length());
} else {
- TraceInlinedFunction(info->shared_info(), HSourcePosition::Unknown());
start_environment_ =
new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
}
@@ -3125,81 +3132,6 @@ void HGraph::FinalizeUniqueness() {
}
-int HGraph::TraceInlinedFunction(
- Handle<SharedFunctionInfo> shared,
- HSourcePosition position) {
- if (!FLAG_hydrogen_track_positions) {
- return 0;
- }
-
- int id = 0;
- for (; id < inlined_functions_.length(); id++) {
- if (inlined_functions_[id].shared().is_identical_to(shared)) {
- break;
- }
- }
-
- if (id == inlined_functions_.length()) {
- inlined_functions_.Add(InlinedFunctionInfo(shared), zone());
-
- if (!shared->script()->IsUndefined()) {
- Handle<Script> script(Script::cast(shared->script()));
- if (!script->source()->IsUndefined()) {
- CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
- PrintF(tracing_scope.file(),
- "--- FUNCTION SOURCE (%s) id{%d,%d} ---\n",
- shared->DebugName()->ToCString().get(),
- info()->optimization_id(),
- id);
-
- {
- ConsStringIteratorOp op;
- StringCharacterStream stream(String::cast(script->source()),
- &op,
- shared->start_position());
- // fun->end_position() points to the last character in the stream. We
- // need to compensate by adding one to calculate the length.
- int source_len =
- shared->end_position() - shared->start_position() + 1;
- for (int i = 0; i < source_len; i++) {
- if (stream.HasMore()) {
- PrintF(tracing_scope.file(), "%c", stream.GetNext());
- }
- }
- }
-
- PrintF(tracing_scope.file(), "\n--- END ---\n");
- }
- }
- }
-
- int inline_id = next_inline_id_++;
-
- if (inline_id != 0) {
- CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
- PrintF(tracing_scope.file(), "INLINE (%s) id{%d,%d} AS %d AT ",
- shared->DebugName()->ToCString().get(),
- info()->optimization_id(),
- id,
- inline_id);
- position.PrintTo(tracing_scope.file());
- PrintF(tracing_scope.file(), "\n");
- }
-
- return inline_id;
-}
-
-
-int HGraph::SourcePositionToScriptPosition(HSourcePosition pos) {
- if (!FLAG_hydrogen_track_positions || pos.IsUnknown()) {
- return pos.raw();
- }
-
- return inlined_functions_[pos.inlining_id()].start_position() +
- pos.position();
-}
-
-
// Block ordering was implemented with two mutually recursive methods,
// HGraph::Postorder and HGraph::PostorderLoopBlocks.
// The recursion could lead to stack overflow so the algorithm has been
@@ -3578,8 +3510,7 @@ void HGraph::CollectPhis() {
// a (possibly inlined) function.
FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
CompilationInfo* info,
- InliningKind inlining_kind,
- int inlining_id)
+ InliningKind inlining_kind)
: owner_(owner),
compilation_info_(info),
call_context_(NULL),
@@ -3589,8 +3520,6 @@ FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
entry_(NULL),
arguments_object_(NULL),
arguments_elements_(NULL),
- inlining_id_(inlining_id),
- outer_source_position_(HSourcePosition::Unknown()),
outer_(owner->function_state()) {
if (outer_ != NULL) {
// State for an inline function.
@@ -3614,27 +3543,12 @@ FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
// Push on the state stack.
owner->set_function_state(this);
-
- if (FLAG_hydrogen_track_positions) {
- outer_source_position_ = owner->source_position();
- owner->EnterInlinedSource(
- info->shared_info()->start_position(),
- inlining_id);
- owner->SetSourcePosition(info->shared_info()->start_position());
- }
}
FunctionState::~FunctionState() {
delete test_context_;
owner_->set_function_state(outer_);
-
- if (FLAG_hydrogen_track_positions) {
- owner_->set_source_position(outer_source_position_);
- owner_->EnterInlinedSource(
- outer_->compilation_info()->shared_info()->start_position(),
- outer_->inlining_id());
- }
}
@@ -4447,10 +4361,8 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
Type* combined_type = clause->compare_type();
HControlInstruction* compare = BuildCompareInstruction(
Token::EQ_STRICT, tag_value, label_value, tag_type, label_type,
- combined_type,
- ScriptPositionToSourcePosition(stmt->tag()->position()),
- ScriptPositionToSourcePosition(clause->label()->position()),
- PUSH_BEFORE_SIMULATE, clause->id());
+ combined_type, stmt->tag()->position(), clause->label()->position(),
+ clause->id());
HBasicBlock* next_test_block = graph()->CreateBasicBlock();
HBasicBlock* body_block = graph()->CreateBasicBlock();
@@ -4870,14 +4782,14 @@ void HOptimizedGraphBuilder::VisitConditional(Conditional* expr) {
HOptimizedGraphBuilder::GlobalPropertyAccess
HOptimizedGraphBuilder::LookupGlobalProperty(
- Variable* var, LookupResult* lookup, PropertyAccessType access_type) {
+ Variable* var, LookupResult* lookup, bool is_store) {
if (var->is_this() || !current_info()->has_global_object()) {
return kUseGeneric;
}
Handle<GlobalObject> global(current_info()->global_object());
global->Lookup(*var->name(), lookup);
if (!lookup->IsNormal() ||
- (access_type == STORE && lookup->IsReadOnly()) ||
+ (is_store && lookup->IsReadOnly()) ||
lookup->holder() != *global) {
return kUseGeneric;
}
@@ -4891,9 +4803,8 @@ HValue* HOptimizedGraphBuilder::BuildContextChainWalk(Variable* var) {
HValue* context = environment()->context();
int length = current_info()->scope()->ContextChainLength(var->scope());
while (length-- > 0) {
- context = Add<HLoadNamedField>(
- context, static_cast<HValue*>(NULL),
- HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
+ context = AddLoadNamedField(
+ context, HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
}
return context;
}
@@ -4924,7 +4835,8 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
}
LookupResult lookup(isolate());
- GlobalPropertyAccess type = LookupGlobalProperty(variable, &lookup, LOAD);
+ GlobalPropertyAccess type =
+ LookupGlobalProperty(variable, &lookup, false);
if (type == kUseCell &&
current_info()->global_object()->IsAccessCheckNeeded()) {
@@ -5159,8 +5071,7 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
HInstruction* store;
if (map.is_null()) {
// If we don't know the monomorphic type, do a generic store.
- CHECK_ALIVE(store = BuildNamedGeneric(
- STORE, literal, name, value));
+ CHECK_ALIVE(store = BuildStoreNamedGeneric(literal, name, value));
} else {
PropertyAccessInfo info(this, STORE, ToType(map), name);
if (info.CanAccessMonomorphic()) {
@@ -5170,8 +5081,8 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
&info, literal, checked_literal, value,
BailoutId::None(), BailoutId::None());
} else {
- CHECK_ALIVE(store = BuildNamedGeneric(
- STORE, literal, name, value));
+ CHECK_ALIVE(
+ store = BuildStoreNamedGeneric(literal, name, value));
}
}
AddInstruction(store);
@@ -5347,24 +5258,6 @@ HCheckMaps* HOptimizedGraphBuilder::AddCheckMap(HValue* object,
}
-HInstruction* HOptimizedGraphBuilder::BuildLoadNamedField(
- PropertyAccessInfo* info,
- HValue* checked_object) {
- HObjectAccess access = info->access();
- if (access.representation().IsDouble()) {
- // Load the heap number.
- checked_object = Add<HLoadNamedField>(
- checked_object, static_cast<HValue*>(NULL),
- access.WithRepresentation(Representation::Tagged()));
- checked_object->set_type(HType::HeapNumber());
- // Load the double value from it.
- access = HObjectAccess::ForHeapNumberValue();
- }
- return New<HLoadNamedField>(
- checked_object, static_cast<HValue*>(NULL), access);
-}
-
-
HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
PropertyAccessInfo* info,
HValue* checked_object,
@@ -5375,7 +5268,7 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
info->map(), info->lookup(), info->name());
HStoreNamedField *instr;
- if (field_access.representation().IsDouble()) {
+ if (FLAG_track_double_fields && field_access.representation().IsDouble()) {
HObjectAccess heap_number_access =
field_access.WithRepresentation(Representation::Tagged());
if (transition_to_field) {
@@ -5415,12 +5308,30 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
if (transition_to_field) {
HConstant* transition_constant = Add<HConstant>(info->transition());
instr->SetTransition(transition_constant, top_info());
- instr->SetChangesFlag(kMaps);
+ instr->SetGVNFlag(kChangesMaps);
}
return instr;
}
+HInstruction* HOptimizedGraphBuilder::BuildStoreNamedGeneric(
+ HValue* object,
+ Handle<String> name,
+ HValue* value,
+ bool is_uninitialized) {
+ if (is_uninitialized) {
+ Add<HDeoptimize>("Insufficient type feedback for property assignment",
+ Deoptimizer::SOFT);
+ }
+
+ return New<HStoreNamedGeneric>(
+ object,
+ name,
+ value,
+ function_strict_mode_flag());
+}
+
+
bool HOptimizedGraphBuilder::PropertyAccessInfo::IsCompatible(
PropertyAccessInfo* info) {
if (!CanInlinePropertyAccess(type_)) return false;
@@ -5635,7 +5546,7 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicAccess(
if (info->lookup()->IsField()) {
if (info->IsLoad()) {
- return BuildLoadNamedField(info, checked_holder);
+ return BuildLoadNamedField(checked_holder, info->access());
} else {
return BuildStoreNamedField(info, checked_object, value);
}
@@ -5731,7 +5642,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
smi_check = New<HIsSmiAndBranch>(
object, empty_smi_block, not_smi_block);
FinishCurrentBlock(smi_check);
- GotoNoSimulate(empty_smi_block, number_block);
+ Goto(empty_smi_block, number_block);
set_current_block(not_smi_block);
} else {
BuildCheckHeapObject(object);
@@ -5745,22 +5656,21 @@ void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
HValue* dependency;
if (info.type()->Is(Type::Number())) {
Handle<Map> heap_number_map = isolate()->factory()->heap_number_map();
- compare = New<HCompareMap>(object, heap_number_map, top_info(),
- if_true, if_false);
+ compare = New<HCompareMap>(object, heap_number_map, if_true, if_false);
dependency = smi_check;
} else if (info.type()->Is(Type::String())) {
compare = New<HIsStringAndBranch>(object, if_true, if_false);
dependency = compare;
} else {
- compare = New<HCompareMap>(object, info.map(), top_info(),
- if_true, if_false);
+ compare = New<HCompareMap>(object, info.map(), if_true, if_false);
dependency = compare;
}
FinishCurrentBlock(compare);
if (info.type()->Is(Type::Number())) {
- GotoNoSimulate(if_true, number_block);
+ Goto(if_true, number_block);
if_true = number_block;
+ number_block->SetJoinId(ast_id);
}
set_current_block(if_true);
@@ -5794,11 +5704,32 @@ void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
- FinishExitWithHardDeoptimization("Uknown map in polymorphic access");
+ // Because the deopt may be the only path in the polymorphic load, make sure
+ // that the environment stack matches the depth on deopt that it otherwise
+ // would have had after a successful load.
+ if (!ast_context()->IsEffect()) Push(graph()->GetConstant0());
+ const char* message = "";
+ switch (access_type) {
+ case LOAD:
+ message = "Unknown map in polymorphic load";
+ break;
+ case STORE:
+ message = "Unknown map in polymorphic store";
+ break;
+ }
+ FinishExitWithHardDeoptimization(message, join);
} else {
- HInstruction* instr = BuildNamedGeneric(access_type, object, name, value);
- AddInstruction(instr);
- if (!ast_context()->IsEffect()) Push(access_type == LOAD ? instr : value);
+ HValue* result = NULL;
+ switch (access_type) {
+ case LOAD:
+ result = Add<HLoadNamedGeneric>(object, name);
+ break;
+ case STORE:
+ AddInstruction(BuildStoreNamedGeneric(object, name, value));
+ result = value;
+ break;
+ }
+ if (!ast_context()->IsEffect()) Push(result);
if (join != NULL) {
Goto(join);
@@ -5810,13 +5741,9 @@ void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
}
ASSERT(join != NULL);
- if (join->HasPredecessor()) {
- join->SetJoinId(ast_id);
- set_current_block(join);
- if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
- } else {
- set_current_block(NULL);
- }
+ join->SetJoinId(ast_id);
+ set_current_block(join);
+ if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
}
@@ -5857,7 +5784,8 @@ void HOptimizedGraphBuilder::BuildStore(Expression* expr,
HValue* object = environment()->ExpressionStackAt(2);
bool has_side_effects = false;
HandleKeyedElementAccess(object, key, value, expr,
- STORE, &has_side_effects);
+ true, // is_store
+ &has_side_effects);
Drop(3);
Push(value);
Add<HSimulate>(return_id, REMOVABLE_SIMULATE);
@@ -5907,23 +5835,32 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
HValue* value,
BailoutId ast_id) {
LookupResult lookup(isolate());
- GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, STORE);
+ GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
if (type == kUseCell) {
Handle<GlobalObject> global(current_info()->global_object());
Handle<PropertyCell> cell(global->GetPropertyCell(&lookup));
if (cell->type()->IsConstant()) {
- IfBuilder builder(this);
- HValue* constant = Add<HConstant>(cell->type()->AsConstant());
- if (cell->type()->AsConstant()->IsNumber()) {
- builder.If<HCompareNumericAndBranch>(value, constant, Token::EQ);
+ Handle<Object> constant = cell->type()->AsConstant();
+ if (value->IsConstant()) {
+ HConstant* c_value = HConstant::cast(value);
+ if (!constant.is_identical_to(c_value->handle(isolate()))) {
+ Add<HDeoptimize>("Constant global variable assignment",
+ Deoptimizer::EAGER);
+ }
} else {
- builder.If<HCompareObjectEqAndBranch>(value, constant);
+ HValue* c_constant = Add<HConstant>(constant);
+ IfBuilder builder(this);
+ if (constant->IsNumber()) {
+ builder.If<HCompareNumericAndBranch>(value, c_constant, Token::EQ);
+ } else {
+ builder.If<HCompareObjectEqAndBranch>(value, c_constant);
+ }
+ builder.Then();
+ builder.Else();
+ Add<HDeoptimize>("Constant global variable assignment",
+ Deoptimizer::EAGER);
+ builder.End();
}
- builder.Then();
- builder.Else();
- Add<HDeoptimize>("Constant global variable assignment",
- Deoptimizer::EAGER);
- builder.End();
}
HInstruction* instr =
Add<HStoreGlobalCell>(value, cell, lookup.GetPropertyDetails());
@@ -6191,7 +6128,7 @@ void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
CHECK_ALIVE(VisitForValue(expr->exception()));
HValue* value = environment()->Pop();
- if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
+ if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
Add<HPushArgument>(value);
Add<HCallRuntime>(isolate()->factory()->empty_string(),
Runtime::FunctionForId(Runtime::kThrow), 1);
@@ -6206,6 +6143,29 @@ void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
}
+HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
+ HObjectAccess access) {
+ if (FLAG_track_double_fields && access.representation().IsDouble()) {
+ // load the heap number
+ HLoadNamedField* heap_number = Add<HLoadNamedField>(
+ object, static_cast<HValue*>(NULL),
+ access.WithRepresentation(Representation::Tagged()));
+ heap_number->set_type(HType::HeapNumber());
+ // load the double value from it
+ return New<HLoadNamedField>(
+ heap_number, static_cast<HValue*>(NULL),
+ HObjectAccess::ForHeapNumberValue());
+ }
+ return New<HLoadNamedField>(object, static_cast<HValue*>(NULL), access);
+}
+
+
+HInstruction* HGraphBuilder::AddLoadNamedField(HValue* object,
+ HObjectAccess access) {
+ return AddInstruction(BuildLoadNamedField(object, access));
+}
+
+
HInstruction* HGraphBuilder::AddLoadStringInstanceType(HValue* string) {
if (string->IsConstant()) {
HConstant* c_string = HConstant::cast(string);
@@ -6213,10 +6173,9 @@ HInstruction* HGraphBuilder::AddLoadStringInstanceType(HValue* string) {
return Add<HConstant>(c_string->StringValue()->map()->instance_type());
}
}
- return Add<HLoadNamedField>(
- Add<HLoadNamedField>(string, static_cast<HValue*>(NULL),
- HObjectAccess::ForMap()),
- static_cast<HValue*>(NULL), HObjectAccess::ForMapInstanceType());
+ return AddLoadNamedField(
+ AddLoadNamedField(string, HObjectAccess::ForMap()),
+ HObjectAccess::ForMapInstanceType());
}
@@ -6227,42 +6186,26 @@ HInstruction* HGraphBuilder::AddLoadStringLength(HValue* string) {
return Add<HConstant>(c_string->StringValue()->length());
}
}
- return Add<HLoadNamedField>(string, static_cast<HValue*>(NULL),
- HObjectAccess::ForStringLength());
+ return AddLoadNamedField(string, HObjectAccess::ForStringLength());
}
-HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
- PropertyAccessType access_type,
+HInstruction* HOptimizedGraphBuilder::BuildLoadNamedGeneric(
HValue* object,
Handle<String> name,
- HValue* value,
bool is_uninitialized) {
if (is_uninitialized) {
- Add<HDeoptimize>("Insufficient type feedback for generic named access",
+ Add<HDeoptimize>("Insufficient type feedback for generic named load",
Deoptimizer::SOFT);
}
- if (access_type == LOAD) {
- return New<HLoadNamedGeneric>(object, name);
- } else {
- return New<HStoreNamedGeneric>(
- object, name, value, function_strict_mode_flag());
- }
+ return New<HLoadNamedGeneric>(object, name);
}
-HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
- PropertyAccessType access_type,
- HValue* object,
- HValue* key,
- HValue* value) {
- if (access_type == LOAD) {
- return New<HLoadKeyedGeneric>(object, key);
- } else {
- return New<HStoreKeyedGeneric>(
- object, key, value, function_strict_mode_flag());
- }
+HInstruction* HOptimizedGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
+ HValue* key) {
+ return New<HLoadKeyedGeneric>(object, key);
}
@@ -6288,15 +6231,15 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
HValue* val,
HValue* dependency,
Handle<Map> map,
- PropertyAccessType access_type,
+ bool is_store,
KeyedAccessStoreMode store_mode) {
HCheckMaps* checked_object = Add<HCheckMaps>(object, map, top_info(),
dependency);
if (dependency) {
- checked_object->ClearDependsOnFlag(kElementsKind);
+ checked_object->ClearGVNFlag(kDependsOnElementsKind);
}
- if (access_type == STORE && map->prototype()->IsJSObject()) {
+ if (is_store && map->prototype()->IsJSObject()) {
// monomorphic stores need a prototype chain check because shape
// changes could allow callbacks on elements in the chain that
// aren't compatible with monomorphic keyed stores.
@@ -6315,7 +6258,7 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
return BuildUncheckedMonomorphicElementAccess(
checked_object, key, val,
map->instance_type() == JS_ARRAY_TYPE,
- map->elements_kind(), access_type,
+ map->elements_kind(), is_store,
load_mode, store_mode);
}
@@ -6371,7 +6314,7 @@ HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
}
if (!has_double_maps && !has_smi_or_object_maps) return NULL;
- HCheckMaps* checked_object = Add<HCheckMaps>(object, maps, top_info());
+ HCheckMaps* checked_object = Add<HCheckMaps>(object, maps);
// FAST_ELEMENTS is considered more general than FAST_HOLEY_SMI_ELEMENTS.
// If we've seen both, the consolidated load must use FAST_HOLEY_ELEMENTS.
ElementsKind consolidated_elements_kind = has_seen_holey_elements
@@ -6381,7 +6324,7 @@ HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
checked_object, key, val,
most_general_consolidated_map->instance_type() == JS_ARRAY_TYPE,
consolidated_elements_kind,
- LOAD, NEVER_RETURN_HOLE, STANDARD_STORE);
+ false, NEVER_RETURN_HOLE, STANDARD_STORE);
return instr;
}
@@ -6391,13 +6334,13 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
HValue* key,
HValue* val,
SmallMapList* maps,
- PropertyAccessType access_type,
+ bool is_store,
KeyedAccessStoreMode store_mode,
bool* has_side_effects) {
*has_side_effects = false;
BuildCheckHeapObject(object);
- if (access_type == LOAD) {
+ if (!is_store) {
HInstruction* consolidated_load =
TryBuildConsolidatedElementLoad(object, key, val, maps);
if (consolidated_load != NULL) {
@@ -6450,14 +6393,15 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
HInstruction* instr = NULL;
if (untransitionable_map->has_slow_elements_kind() ||
!untransitionable_map->IsJSObjectMap()) {
- instr = AddInstruction(BuildKeyedGeneric(access_type, object, key, val));
+ instr = AddInstruction(is_store ? BuildStoreKeyedGeneric(object, key, val)
+ : BuildLoadKeyedGeneric(object, key));
} else {
instr = BuildMonomorphicElementAccess(
- object, key, val, transition, untransitionable_map, access_type,
+ object, key, val, transition, untransitionable_map, is_store,
store_mode);
}
*has_side_effects |= instr->HasObservableSideEffects();
- return access_type == STORE ? NULL : instr;
+ return is_store ? NULL : instr;
}
HBasicBlock* join = graph()->CreateBasicBlock();
@@ -6469,13 +6413,15 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
HBasicBlock* this_map = graph()->CreateBasicBlock();
HBasicBlock* other_map = graph()->CreateBasicBlock();
HCompareMap* mapcompare =
- New<HCompareMap>(object, map, top_info(), this_map, other_map);
+ New<HCompareMap>(object, map, this_map, other_map);
FinishCurrentBlock(mapcompare);
set_current_block(this_map);
HInstruction* access = NULL;
if (IsDictionaryElementsKind(elements_kind)) {
- access = AddInstruction(BuildKeyedGeneric(access_type, object, key, val));
+ access = is_store
+ ? AddInstruction(BuildStoreKeyedGeneric(object, key, val))
+ : AddInstruction(BuildLoadKeyedGeneric(object, key));
} else {
ASSERT(IsFastElementsKind(elements_kind) ||
IsExternalArrayElementsKind(elements_kind));
@@ -6484,14 +6430,14 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
access = BuildUncheckedMonomorphicElementAccess(
mapcompare, key, val,
map->instance_type() == JS_ARRAY_TYPE,
- elements_kind, access_type,
+ elements_kind, is_store,
load_mode,
store_mode);
}
*has_side_effects |= access->HasObservableSideEffects();
// The caller will use has_side_effects and add a correct Simulate.
access->SetFlag(HValue::kHasNoObservableSideEffects);
- if (access_type == LOAD) {
+ if (!is_store) {
Push(access);
}
NoObservableSideEffectsScope scope(this);
@@ -6499,16 +6445,12 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
set_current_block(other_map);
}
- // Ensure that we visited at least one map above that goes to join. This is
- // necessary because FinishExitWithHardDeoptimization does an AbnormalExit
- // rather than joining the join block. If this becomes an issue, insert a
- // generic access in the case length() == 0.
- ASSERT(join->predecessors()->length() > 0);
// Deopt if none of the cases matched.
NoObservableSideEffectsScope scope(this);
- FinishExitWithHardDeoptimization("Unknown map in polymorphic element access");
+ FinishExitWithHardDeoptimization("Unknown map in polymorphic element access",
+ join);
set_current_block(join);
- return access_type == STORE ? NULL : Pop();
+ return is_store ? NULL : Pop();
}
@@ -6517,7 +6459,7 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
HValue* key,
HValue* val,
Expression* expr,
- PropertyAccessType access_type,
+ bool is_store,
bool* has_side_effects) {
ASSERT(!expr->IsPropertyName());
HInstruction* instr = NULL;
@@ -6526,8 +6468,7 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
bool monomorphic = ComputeReceiverTypes(expr, obj, &types, zone());
bool force_generic = false;
- if (access_type == STORE &&
- (monomorphic || (types != NULL && !types->is_empty()))) {
+ if (is_store && (monomorphic || (types != NULL && !types->is_empty()))) {
// Stores can't be mono/polymorphic if their prototype chain has dictionary
// elements. However a receiver map that has dictionary elements itself
// should be left to normal mono/poly behavior (the other maps may benefit
@@ -6545,36 +6486,52 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
if (monomorphic) {
Handle<Map> map = types->first();
if (map->has_slow_elements_kind() || !map->IsJSObjectMap()) {
- instr = AddInstruction(BuildKeyedGeneric(access_type, obj, key, val));
+ instr = is_store ? BuildStoreKeyedGeneric(obj, key, val)
+ : BuildLoadKeyedGeneric(obj, key);
+ AddInstruction(instr);
} else {
BuildCheckHeapObject(obj);
instr = BuildMonomorphicElementAccess(
- obj, key, val, NULL, map, access_type, expr->GetStoreMode());
+ obj, key, val, NULL, map, is_store, expr->GetStoreMode());
}
} else if (!force_generic && (types != NULL && !types->is_empty())) {
return HandlePolymorphicElementAccess(
- obj, key, val, types, access_type,
+ obj, key, val, types, is_store,
expr->GetStoreMode(), has_side_effects);
} else {
- if (access_type == STORE) {
+ if (is_store) {
if (expr->IsAssignment() &&
expr->AsAssignment()->HasNoTypeInformation()) {
Add<HDeoptimize>("Insufficient type feedback for keyed store",
Deoptimizer::SOFT);
}
+ instr = BuildStoreKeyedGeneric(obj, key, val);
} else {
if (expr->AsProperty()->HasNoTypeInformation()) {
Add<HDeoptimize>("Insufficient type feedback for keyed load",
Deoptimizer::SOFT);
}
+ instr = BuildLoadKeyedGeneric(obj, key);
}
- instr = AddInstruction(BuildKeyedGeneric(access_type, obj, key, val));
+ AddInstruction(instr);
}
*has_side_effects = instr->HasObservableSideEffects();
return instr;
}
+HInstruction* HOptimizedGraphBuilder::BuildStoreKeyedGeneric(
+ HValue* object,
+ HValue* key,
+ HValue* value) {
+ return New<HStoreKeyedGeneric>(
+ object,
+ key,
+ value,
+ function_strict_mode_flag());
+}
+
+
void HOptimizedGraphBuilder::EnsureArgumentsArePushedForAccess() {
// Outermost function already has arguments on the stack.
if (function_state()->outer() == NULL) return;
@@ -6681,13 +6638,17 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedAccess(
checked_object =
Add<HCheckInstanceType>(object, HCheckInstanceType::IS_STRING);
} else {
- checked_object = Add<HCheckMaps>(object, types, top_info());
+ checked_object = Add<HCheckMaps>(object, types);
}
return BuildMonomorphicAccess(
&info, object, checked_object, value, ast_id, return_id);
}
- return BuildNamedGeneric(access, object, name, value, is_uninitialized);
+ if (access == LOAD) {
+ return BuildLoadNamedGeneric(object, name, is_uninitialized);
+ } else {
+ return BuildStoreNamedGeneric(object, name, value, is_uninitialized);
+ }
}
@@ -6731,7 +6692,9 @@ void HOptimizedGraphBuilder::BuildLoad(Property* expr,
bool has_side_effects = false;
HValue* load = HandleKeyedElementAccess(
- obj, key, NULL, expr, LOAD, &has_side_effects);
+ obj, key, NULL, expr,
+ false, // is_store
+ &has_side_effects);
if (has_side_effects) {
if (ast_context()->IsEffect()) {
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
@@ -6777,7 +6740,7 @@ HInstruction* HGraphBuilder::BuildConstantMapCheck(Handle<JSObject> constant,
AddInstruction(constant_value);
HCheckMaps* check =
Add<HCheckMaps>(constant_value, handle(constant->map()), info);
- check->ClearDependsOnFlag(kElementsKind);
+ check->ClearGVNFlag(kDependsOnElementsKind);
return check;
}
@@ -6861,13 +6824,44 @@ HInstruction* HOptimizedGraphBuilder::BuildCallConstantFunction(
}
+class FunctionSorter {
+ public:
+ FunctionSorter() : index_(0), ticks_(0), ast_length_(0), src_length_(0) { }
+ FunctionSorter(int index, int ticks, int ast_length, int src_length)
+ : index_(index),
+ ticks_(ticks),
+ ast_length_(ast_length),
+ src_length_(src_length) { }
+
+ int index() const { return index_; }
+ int ticks() const { return ticks_; }
+ int ast_length() const { return ast_length_; }
+ int src_length() const { return src_length_; }
+
+ private:
+ int index_;
+ int ticks_;
+ int ast_length_;
+ int src_length_;
+};
+
+
+inline bool operator<(const FunctionSorter& lhs, const FunctionSorter& rhs) {
+ int diff = lhs.ticks() - rhs.ticks();
+ if (diff != 0) return diff > 0;
+ diff = lhs.ast_length() - rhs.ast_length();
+ if (diff != 0) return diff < 0;
+ return lhs.src_length() < rhs.src_length();
+}
+
+
void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
Call* expr,
HValue* receiver,
SmallMapList* types,
Handle<String> name) {
int argument_count = expr->arguments()->length() + 1; // Includes receiver.
- int order[kMaxCallPolymorphism];
+ FunctionSorter order[kMaxCallPolymorphism];
bool handle_smi = false;
bool handled_string = false;
@@ -6889,17 +6883,23 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
handle_smi = true;
}
expr->set_target(target);
- order[ordered_functions++] = i;
+ order[ordered_functions++] =
+ FunctionSorter(i,
+ expr->target()->shared()->profiler_ticks(),
+ InliningAstSize(expr->target()),
+ expr->target()->shared()->SourceSize());
}
}
+ std::sort(order, order + ordered_functions);
+
HBasicBlock* number_block = NULL;
HBasicBlock* join = NULL;
handled_string = false;
int count = 0;
for (int fn = 0; fn < ordered_functions; ++fn) {
- int i = order[fn];
+ int i = order[fn].index();
PropertyAccessInfo info(this, LOAD, ToType(types->at(i)), name);
if (info.type()->Is(Type::String())) {
if (handled_string) continue;
@@ -6919,7 +6919,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
number_block = graph()->CreateBasicBlock();
FinishCurrentBlock(New<HIsSmiAndBranch>(
receiver, empty_smi_block, not_smi_block));
- GotoNoSimulate(empty_smi_block, number_block);
+ Goto(empty_smi_block, number_block);
set_current_block(not_smi_block);
} else {
BuildCheckHeapObject(receiver);
@@ -6933,19 +6933,18 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
Handle<Map> map = info.map();
if (info.type()->Is(Type::Number())) {
Handle<Map> heap_number_map = isolate()->factory()->heap_number_map();
- compare = New<HCompareMap>(receiver, heap_number_map, top_info(),
- if_true, if_false);
+ compare = New<HCompareMap>(receiver, heap_number_map, if_true, if_false);
} else if (info.type()->Is(Type::String())) {
compare = New<HIsStringAndBranch>(receiver, if_true, if_false);
} else {
- compare = New<HCompareMap>(receiver, map, top_info(),
- if_true, if_false);
+ compare = New<HCompareMap>(receiver, map, if_true, if_false);
}
FinishCurrentBlock(compare);
if (info.type()->Is(Type::Number())) {
- GotoNoSimulate(if_true, number_block);
+ Goto(if_true, number_block);
if_true = number_block;
+ number_block->SetJoinId(expr->id());
}
set_current_block(if_true);
@@ -6993,11 +6992,16 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (ordered_functions == types->length() && FLAG_deoptimize_uncommon_cases) {
- FinishExitWithHardDeoptimization("Unknown map in polymorphic call");
+ // Because the deopt may be the only path in the polymorphic call, make sure
+ // that the environment stack matches the depth on deopt that it otherwise
+ // would have had after a successful call.
+ Drop(1); // Drop receiver.
+ if (!ast_context()->IsEffect()) Push(graph()->GetConstant0());
+ FinishExitWithHardDeoptimization("Unknown map in polymorphic call", join);
} else {
Property* prop = expr->expression()->AsProperty();
- HInstruction* function = BuildNamedGeneric(
- LOAD, receiver, name, NULL, prop->IsUninitialized());
+ HInstruction* function = BuildLoadNamedGeneric(
+ receiver, name, prop->IsUninitialized());
AddInstruction(function);
Push(function);
AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
@@ -7101,8 +7105,7 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
HValue* implicit_return_value,
BailoutId ast_id,
BailoutId return_id,
- InliningKind inlining_kind,
- HSourcePosition position) {
+ InliningKind inlining_kind) {
int nodes_added = InliningAstSize(target);
if (nodes_added == kNotInlinable) return false;
@@ -7234,13 +7237,11 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
ASSERT(target_shared->has_deoptimization_support());
AstTyper::Run(&target_info);
- int function_id = graph()->TraceInlinedFunction(target_shared, position);
-
// Save the pending call context. Set up new one for the inlined function.
// The function state is new-allocated because we need to delete it
// in two different places.
FunctionState* target_state = new FunctionState(
- this, &target_info, inlining_kind, function_id);
+ this, &target_info, inlining_kind);
HConstant* undefined = graph()->GetConstantUndefined();
@@ -7387,8 +7388,7 @@ bool HOptimizedGraphBuilder::TryInlineCall(Call* expr) {
NULL,
expr->id(),
expr->ReturnId(),
- NORMAL_RETURN,
- ScriptPositionToSourcePosition(expr->position()));
+ NORMAL_RETURN);
}
@@ -7399,8 +7399,7 @@ bool HOptimizedGraphBuilder::TryInlineConstruct(CallNew* expr,
implicit_return_value,
expr->id(),
expr->ReturnId(),
- CONSTRUCT_CALL_RETURN,
- ScriptPositionToSourcePosition(expr->position()));
+ CONSTRUCT_CALL_RETURN);
}
@@ -7414,8 +7413,7 @@ bool HOptimizedGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
NULL,
ast_id,
return_id,
- GETTER_CALL_RETURN,
- source_position());
+ GETTER_CALL_RETURN);
}
@@ -7429,8 +7427,7 @@ bool HOptimizedGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
1,
implicit_return_value,
id, assignment_id,
- SETTER_CALL_RETURN,
- source_position());
+ SETTER_CALL_RETURN);
}
@@ -7442,8 +7439,7 @@ bool HOptimizedGraphBuilder::TryInlineApply(Handle<JSFunction> function,
NULL,
expr->id(),
expr->ReturnId(),
- NORMAL_RETURN,
- ScriptPositionToSourcePosition(expr->position()));
+ NORMAL_RETURN);
}
@@ -7627,7 +7623,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
}
reduced_length = AddUncasted<HSub>(length, graph()->GetConstant1());
result = AddElementAccess(elements, reduced_length, NULL,
- bounds_check, elements_kind, LOAD);
+ bounds_check, elements_kind, false);
Factory* factory = isolate()->factory();
double nan_double = FixedDoubleArray::hole_nan_as_double();
HValue* hole = IsFastSmiOrObjectElementsKind(elements_kind)
@@ -7637,7 +7633,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
elements_kind = FAST_HOLEY_ELEMENTS;
}
AddElementAccess(
- elements, reduced_length, hole, bounds_check, elements_kind, STORE);
+ elements, reduced_length, hole, bounds_check, elements_kind, true);
Add<HStoreNamedField>(
checked_object, HObjectAccess::ForArrayLength(elements_kind),
reduced_length, STORE_TO_INITIALIZED_ENTRY);
@@ -7782,12 +7778,11 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<JSFunction> function,
}
bool drop_extra = false;
- bool is_store = false;
switch (call_type) {
case kCallApiFunction:
case kCallApiMethod:
// Need to check that none of the receiver maps could have changed.
- Add<HCheckMaps>(receiver, receiver_maps, top_info());
+ Add<HCheckMaps>(receiver, receiver_maps);
// Need to ensure the chain between receiver and api_holder is intact.
if (holder_lookup == CallOptimization::kHolderFound) {
AddCheckPrototypeMaps(api_holder, receiver_maps->first());
@@ -7809,7 +7804,6 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<JSFunction> function,
break;
case kCallApiSetter:
{
- is_store = true;
// Receiver and prototype chain cannot have changed.
ASSERT_EQ(1, argc);
ASSERT_EQ(NULL, receiver);
@@ -7855,7 +7849,7 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<JSFunction> function,
CallInterfaceDescriptor* descriptor =
isolate()->call_descriptor(Isolate::ApiFunctionCall);
- CallApiFunctionStub stub(is_store, call_data_is_undefined, argc);
+ CallApiFunctionStub stub(true, call_data_is_undefined, argc);
Handle<Code> code = stub.GetCode(isolate());
HConstant* code_value = Add<HConstant>(code);
@@ -7993,8 +7987,6 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
CHECK_ALIVE(PushLoad(prop, receiver, key));
HValue* function = Pop();
- if (FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
-
// Push the function under the receiver.
environment()->SetExpressionStackAt(0, function);
@@ -8057,7 +8049,7 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
// access check is not enabled we assume that the function will not change
// and generate optimized code for calling the function.
LookupResult lookup(isolate());
- GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, LOAD);
+ GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, false);
if (type == kUseCell &&
!current_info()->global_object()->IsAccessCheckNeeded()) {
Handle<GlobalObject> global(current_info()->global_object());
@@ -8267,7 +8259,7 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
+ if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
int argument_count = expr->arguments()->length() + 1; // Plus constructor.
Factory* factory = isolate()->factory();
@@ -8810,7 +8802,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
+ if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
Expression* target = expr->expression();
VariableProxy* proxy = target->AsVariableProxy();
Property* prop = target->AsProperty();
@@ -9086,12 +9078,13 @@ HValue* HOptimizedGraphBuilder::BuildBinaryOperation(
// after phis, which are the result of BuildBinaryOperation when we
// inlined some complex subgraph.
if (result->HasObservableSideEffects() || result->IsPhi()) {
- if (push_sim_result == PUSH_BEFORE_SIMULATE) {
- Push(result);
+ if (push_sim_result == NO_PUSH_BEFORE_SIMULATE) {
Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
- Drop(1);
} else {
+ ASSERT(push_sim_result == PUSH_BEFORE_SIMULATE);
+ Push(result);
Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+ Drop(1);
}
}
return result;
@@ -9476,11 +9469,9 @@ void HOptimizedGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
BuildBinaryOperation(expr, left, right,
ast_context()->IsEffect() ? NO_PUSH_BEFORE_SIMULATE
: PUSH_BEFORE_SIMULATE);
- if (FLAG_hydrogen_track_positions && result->IsBinaryOperation()) {
+ if (FLAG_emit_opt_code_positions && result->IsBinaryOperation()) {
HBinaryOperation::cast(result)->SetOperandPositions(
- zone(),
- ScriptPositionToSourcePosition(expr->left()->position()),
- ScriptPositionToSourcePosition(expr->right()->position()));
+ zone(), expr->left()->position(), expr->right()->position());
}
return ast_context()->ReturnValue(result);
}
@@ -9514,7 +9505,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
+ if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
// Check for a few fast cases. The AST visiting behavior must be in sync
// with the full codegen: We don't push both left and right values onto
@@ -9549,7 +9540,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->left()));
CHECK_ALIVE(VisitForValue(expr->right()));
- if (FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
+ if (FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
HValue* right = Pop();
HValue* left = Pop();
@@ -9609,14 +9600,9 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
return ast_context()->ReturnInstruction(result, expr->id());
}
- PushBeforeSimulateBehavior push_behavior =
- ast_context()->IsEffect() ? NO_PUSH_BEFORE_SIMULATE
- : PUSH_BEFORE_SIMULATE;
HControlInstruction* compare = BuildCompareInstruction(
op, left, right, left_type, right_type, combined_type,
- ScriptPositionToSourcePosition(expr->left()->position()),
- ScriptPositionToSourcePosition(expr->right()->position()),
- push_behavior, expr->id());
+ expr->left()->position(), expr->right()->position(), expr->id());
if (compare == NULL) return; // Bailed out.
return ast_context()->ReturnControl(compare, expr->id());
}
@@ -9629,9 +9615,8 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
Type* left_type,
Type* right_type,
Type* combined_type,
- HSourcePosition left_position,
- HSourcePosition right_position,
- PushBeforeSimulateBehavior push_sim_result,
+ int left_position,
+ int right_position,
BailoutId bailout_id) {
// Cases handled below depend on collected type feedback. They should
// soft deoptimize when there is no type feedback.
@@ -9656,7 +9641,7 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
AddCheckMap(operand_to_check, map);
HCompareObjectEqAndBranch* result =
New<HCompareObjectEqAndBranch>(left, right);
- if (FLAG_hydrogen_track_positions) {
+ if (FLAG_emit_opt_code_positions) {
result->set_operand_position(zone(), 0, left_position);
result->set_operand_position(zone(), 1, right_position);
}
@@ -9696,13 +9681,9 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
result->set_observed_input_representation(1, left_rep);
result->set_observed_input_representation(2, right_rep);
if (result->HasObservableSideEffects()) {
- if (push_sim_result == PUSH_BEFORE_SIMULATE) {
- Push(result);
- AddSimulate(bailout_id, REMOVABLE_SIMULATE);
- Drop(1);
- } else {
- AddSimulate(bailout_id, REMOVABLE_SIMULATE);
- }
+ Push(result);
+ AddSimulate(bailout_id, REMOVABLE_SIMULATE);
+ Drop(1);
}
// TODO(jkummerow): Can we make this more efficient?
HBranch* branch = New<HBranch>(result);
@@ -9711,7 +9692,7 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
HCompareNumericAndBranch* result =
New<HCompareNumericAndBranch>(left, right, op);
result->set_observed_input_representation(left_rep, right_rep);
- if (FLAG_hydrogen_track_positions) {
+ if (FLAG_emit_opt_code_positions) {
result->SetOperandPositions(zone(), left_position, right_position);
}
return result;
@@ -9727,7 +9708,7 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
ASSERT(expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT);
- if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
+ if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
CHECK_ALIVE(VisitForValue(sub_expr));
HValue* value = Pop();
if (expr->op() == Token::EQ_STRICT) {
@@ -9922,9 +9903,11 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
Add<HStoreNamedField>(double_box, HObjectAccess::ForHeapNumberValue(),
Add<HConstant>(value));
value_instruction = double_box;
- } else if (representation.IsSmi() && value->IsUninitialized()) {
- value_instruction = graph()->GetConstant0();
- // Ensure that Constant0 is stored as smi.
+ } else if (representation.IsSmi()) {
+ value_instruction = value->IsUninitialized()
+ ? graph()->GetConstant0()
+ : Add<HConstant>(value);
+ // Ensure that value is stored as smi.
access = access.WithRepresentation(representation);
} else {
value_instruction = Add<HConstant>(value);
@@ -10358,13 +10341,12 @@ void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
CallRuntime* call) {
ASSERT(call->arguments()->length() == 3);
- // We need to follow the evaluation order of full codegen.
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* string = Pop();
HValue* value = Pop();
HValue* index = Pop();
+ HValue* string = Pop();
Add<HSeqStringSetChar>(String::ONE_BYTE_ENCODING, string,
index, value);
Add<HSimulate>(call->id(), FIXED_SIMULATE);
@@ -10375,13 +10357,12 @@ void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
void HOptimizedGraphBuilder::GenerateTwoByteSeqStringSetChar(
CallRuntime* call) {
ASSERT(call->arguments()->length() == 3);
- // We need to follow the evaluation order of full codegen.
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* string = Pop();
HValue* value = Pop();
HValue* index = Pop();
+ HValue* string = Pop();
Add<HSeqStringSetChar>(String::TWO_BYTE_ENCODING, string,
index, value);
Add<HSimulate>(call->id(), FIXED_SIMULATE);
@@ -10405,23 +10386,14 @@ void HOptimizedGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
Add<HStoreNamedField>(object,
HObjectAccess::ForObservableJSObjectOffset(JSValue::kValueOffset),
value);
- if (!ast_context()->IsEffect()) {
- Push(value);
- }
Add<HSimulate>(call->id(), FIXED_SIMULATE);
}
if_objectisvalue.Else();
{
// Nothing to do in this case.
- if (!ast_context()->IsEffect()) {
- Push(value);
- }
Add<HSimulate>(call->id(), FIXED_SIMULATE);
}
if_objectisvalue.End();
- if (!ast_context()->IsEffect()) {
- Drop(1);
- }
return ast_context()->ReturnValue(value);
}
@@ -10964,10 +10936,7 @@ void HTracer::TraceCompilation(CompilationInfo* info) {
if (info->IsOptimizing()) {
Handle<String> name = info->function()->debug_name();
PrintStringProperty("name", name->ToCString().get());
- PrintIndent();
- trace_.Add("method \"%s:%d\"\n",
- name->ToCString().get(),
- info->optimization_id());
+ PrintStringProperty("method", name->ToCString().get());
} else {
CodeStub::Major major_key = info->code_stub()->MajorKey();
PrintStringProperty("name", CodeStub::MajorName(major_key, false));
@@ -11081,22 +11050,14 @@ void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
Tag HIR_tag(this, "HIR");
for (HInstructionIterator it(current); !it.Done(); it.Advance()) {
HInstruction* instruction = it.Current();
+ int bci = FLAG_emit_opt_code_positions && instruction->has_position() ?
+ instruction->position() : 0;
int uses = instruction->UseCount();
PrintIndent();
- trace_.Add("0 %d ", uses);
+ trace_.Add("%d %d ", bci, uses);
instruction->PrintNameTo(&trace_);
trace_.Add(" ");
instruction->PrintTo(&trace_);
- if (FLAG_hydrogen_track_positions &&
- instruction->has_position() &&
- instruction->position().raw() != 0) {
- const HSourcePosition pos = instruction->position();
- trace_.Add(" pos:");
- if (pos.inlining_id() != 0) {
- trace_.Add("%d_", pos.inlining_id());
- }
- trace_.Add("%d", pos.position());
- }
trace_.Add(" <|@\n");
}
}
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index b9d53be94e..b8344ef9c4 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -110,7 +110,7 @@ class HBasicBlock V8_FINAL : public ZoneObject {
bool IsFinished() const { return end_ != NULL; }
void AddPhi(HPhi* phi);
void RemovePhi(HPhi* phi);
- void AddInstruction(HInstruction* instr, HSourcePosition position);
+ void AddInstruction(HInstruction* instr, int position);
bool Dominates(HBasicBlock* other) const;
bool EqualToOrDominates(HBasicBlock* other) const;
int LoopNestingDepth() const;
@@ -137,7 +137,7 @@ class HBasicBlock V8_FINAL : public ZoneObject {
int PredecessorIndexOf(HBasicBlock* predecessor) const;
HPhi* AddNewPhi(int merged_index);
HSimulate* AddNewSimulate(BailoutId ast_id,
- HSourcePosition position,
+ int position,
RemovableSimulate removable = FIXED_SIMULATE) {
HSimulate* instr = CreateSimulate(ast_id, removable);
AddInstruction(instr, position);
@@ -174,8 +174,6 @@ class HBasicBlock V8_FINAL : public ZoneObject {
dominates_loop_successors_ = true;
}
- void MarkSuccEdgeUnreachable(int succ);
-
inline Zone* zone() const;
#ifdef DEBUG
@@ -186,13 +184,13 @@ class HBasicBlock V8_FINAL : public ZoneObject {
friend class HGraphBuilder;
HSimulate* CreateSimulate(BailoutId ast_id, RemovableSimulate removable);
- void Finish(HControlInstruction* last, HSourcePosition position);
- void FinishExit(HControlInstruction* instruction, HSourcePosition position);
+ void Finish(HControlInstruction* last, int position);
+ void FinishExit(HControlInstruction* instruction, int position);
void Goto(HBasicBlock* block,
- HSourcePosition position,
+ int position,
FunctionState* state = NULL,
bool add_simulate = true);
- void GotoNoSimulate(HBasicBlock* block, HSourcePosition position) {
+ void GotoNoSimulate(HBasicBlock* block, int position) {
Goto(block, position, NULL, false);
}
@@ -200,7 +198,7 @@ class HBasicBlock V8_FINAL : public ZoneObject {
// instruction and updating the bailout environment.
void AddLeaveInlined(HValue* return_value,
FunctionState* state,
- HSourcePosition position);
+ int position);
private:
void RegisterPredecessor(HBasicBlock* pred);
@@ -471,16 +469,6 @@ class HGraph V8_FINAL : public ZoneObject {
void DecrementInNoSideEffectsScope() { no_side_effects_scope_count_--; }
bool IsInsideNoSideEffectsScope() { return no_side_effects_scope_count_ > 0; }
- // If we are tracking source positions then this function assigns a unique
- // identifier to each inlining and dumps function source if it was inlined
- // for the first time during the current optimization.
- int TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
- HSourcePosition position);
-
- // Converts given HSourcePosition to the absolute offset from the start of
- // the corresponding script.
- int SourcePositionToScriptPosition(HSourcePosition position);
-
private:
HConstant* ReinsertConstantIfNecessary(HConstant* constant);
HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
@@ -526,23 +514,6 @@ class HGraph V8_FINAL : public ZoneObject {
int no_side_effects_scope_count_;
bool disallow_adding_new_values_;
- class InlinedFunctionInfo {
- public:
- explicit InlinedFunctionInfo(Handle<SharedFunctionInfo> shared)
- : shared_(shared), start_position_(shared->start_position()) {
- }
-
- Handle<SharedFunctionInfo> shared() const { return shared_; }
- int start_position() const { return start_position_; }
-
- private:
- Handle<SharedFunctionInfo> shared_;
- int start_position_;
- };
-
- int next_inline_id_;
- ZoneList<InlinedFunctionInfo> inlined_functions_;
-
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
@@ -909,8 +880,7 @@ class FunctionState V8_FINAL {
public:
FunctionState(HOptimizedGraphBuilder* owner,
CompilationInfo* info,
- InliningKind inlining_kind,
- int inlining_id);
+ InliningKind inlining_kind);
~FunctionState();
CompilationInfo* compilation_info() { return compilation_info_; }
@@ -940,8 +910,6 @@ class FunctionState V8_FINAL {
bool arguments_pushed() { return arguments_elements() != NULL; }
- int inlining_id() const { return inlining_id_; }
-
private:
HOptimizedGraphBuilder* owner_;
@@ -971,9 +939,6 @@ class FunctionState V8_FINAL {
HArgumentsObject* arguments_object_;
HArgumentsElements* arguments_elements_;
- int inlining_id_;
- HSourcePosition outer_source_position_;
-
FunctionState* outer_;
};
@@ -1057,8 +1022,7 @@ class HGraphBuilder {
: info_(info),
graph_(NULL),
current_block_(NULL),
- position_(HSourcePosition::Unknown()),
- start_position_(0) {}
+ position_(RelocInfo::kNoPosition) {}
virtual ~HGraphBuilder() {}
HBasicBlock* current_block() const { return current_block_; }
@@ -1088,7 +1052,7 @@ class HGraphBuilder {
HBasicBlock* target,
FunctionState* state = NULL,
bool add_simulate = true) {
- from->Goto(target, source_position(), state, add_simulate);
+ from->Goto(target, position_, state, add_simulate);
}
void Goto(HBasicBlock* target,
FunctionState* state = NULL,
@@ -1104,7 +1068,7 @@ class HGraphBuilder {
void AddLeaveInlined(HBasicBlock* block,
HValue* return_value,
FunctionState* state) {
- block->AddLeaveInlined(return_value, state, source_position());
+ block->AddLeaveInlined(return_value, state, position_);
}
void AddLeaveInlined(HValue* return_value, FunctionState* state) {
return AddLeaveInlined(current_block(), return_value, state);
@@ -1310,6 +1274,8 @@ class HGraphBuilder {
void AddSimulate(BailoutId id, RemovableSimulate removable = FIXED_SIMULATE);
+ int position() const { return position_; }
+
protected:
virtual bool BuildGraph() = 0;
@@ -1328,7 +1294,7 @@ class HGraphBuilder {
HValue* length,
HValue* key,
bool is_js_array,
- PropertyAccessType access_type);
+ bool is_store);
HValue* BuildCopyElementsOnWrite(HValue* object,
HValue* elements,
@@ -1385,7 +1351,7 @@ class HGraphBuilder {
HValue* val,
bool is_js_array,
ElementsKind elements_kind,
- PropertyAccessType access_type,
+ bool is_store,
LoadKeyedHoleMode load_mode,
KeyedAccessStoreMode store_mode);
@@ -1395,9 +1361,11 @@ class HGraphBuilder {
HValue* val,
HValue* dependency,
ElementsKind elements_kind,
- PropertyAccessType access_type,
+ bool is_store,
LoadKeyedHoleMode load_mode = NEVER_RETURN_HOLE);
+ HLoadNamedField* BuildLoadNamedField(HValue* object, HObjectAccess access);
+ HInstruction* AddLoadNamedField(HValue* object, HObjectAccess access);
HInstruction* AddLoadStringInstanceType(HValue* string);
HInstruction* AddLoadStringLength(HValue* string);
HStoreNamedField* AddStoreMapNoWriteBarrier(HValue* object, HValue* map) {
@@ -1436,7 +1404,8 @@ class HGraphBuilder {
HValue* EnforceNumberType(HValue* number, Type* expected);
HValue* TruncateToNumber(HValue* value, Type** expected);
- void FinishExitWithHardDeoptimization(const char* reason);
+ void FinishExitWithHardDeoptimization(const char* reason,
+ HBasicBlock* continuation);
void AddIncrementCounter(StatsCounter* counter);
@@ -1808,27 +1777,6 @@ class HGraphBuilder {
protected:
void SetSourcePosition(int position) {
ASSERT(position != RelocInfo::kNoPosition);
- position_.set_position(position - start_position_);
- }
-
- void EnterInlinedSource(int start_position, int id) {
- if (FLAG_hydrogen_track_positions) {
- start_position_ = start_position;
- position_.set_inlining_id(id);
- }
- }
-
- // Convert the given absolute offset from the start of the script to
- // the HSourcePosition assuming that this position corresponds to the
- // same function as current position_.
- HSourcePosition ScriptPositionToSourcePosition(int position) {
- HSourcePosition pos = position_;
- pos.set_position(position - start_position_);
- return pos;
- }
-
- HSourcePosition source_position() { return position_; }
- void set_source_position(HSourcePosition position) {
position_ = position;
}
@@ -1848,6 +1796,9 @@ class HGraphBuilder {
HValue* mask,
int current_probe);
+ void PadEnvironmentForContinuation(HBasicBlock* from,
+ HBasicBlock* continuation);
+
template <class I>
I* AddInstructionTyped(I* instr) {
return I::cast(AddInstruction(instr));
@@ -1856,8 +1807,7 @@ class HGraphBuilder {
CompilationInfo* info_;
HGraph* graph_;
HBasicBlock* current_block_;
- HSourcePosition position_;
- int start_position_;
+ int position_;
};
@@ -2237,6 +2187,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
Type* ToType(Handle<Map> map) { return IC::MapToType<Type>(map, zone()); }
private:
+ enum PropertyAccessType { LOAD, STORE };
+
// Helpers for flow graph construction.
enum GlobalPropertyAccess {
kUseCell,
@@ -2244,7 +2196,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
};
GlobalPropertyAccess LookupGlobalProperty(Variable* var,
LookupResult* lookup,
- PropertyAccessType access_type);
+ bool is_store);
void EnsureArgumentsArePushedForAccess();
bool TryArgumentsAccess(Property* expr);
@@ -2261,8 +2213,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HValue* implicit_return_value,
BailoutId ast_id,
BailoutId return_id,
- InliningKind inlining_kind,
- HSourcePosition position);
+ InliningKind inlining_kind);
bool TryInlineCall(Call* expr);
bool TryInlineConstruct(CallNew* expr, HValue* implicit_return_value);
@@ -2465,27 +2416,23 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void HandleLiteralCompareNil(CompareOperation* expr,
Expression* sub_expr,
NilValue nil);
+ HControlInstruction* BuildCompareInstruction(Token::Value op,
+ HValue* left,
+ HValue* right,
+ Type* left_type,
+ Type* right_type,
+ Type* combined_type,
+ int left_position,
+ int right_position,
+ BailoutId bailout_id);
+
+ HInstruction* BuildStringCharCodeAt(HValue* string,
+ HValue* index);
enum PushBeforeSimulateBehavior {
PUSH_BEFORE_SIMULATE,
NO_PUSH_BEFORE_SIMULATE
};
-
- HControlInstruction* BuildCompareInstruction(
- Token::Value op,
- HValue* left,
- HValue* right,
- Type* left_type,
- Type* right_type,
- Type* combined_type,
- HSourcePosition left_position,
- HSourcePosition right_position,
- PushBeforeSimulateBehavior push_sim_result,
- BailoutId bailout_id);
-
- HInstruction* BuildStringCharCodeAt(HValue* string,
- HValue* index);
-
HValue* BuildBinaryOperation(
BinaryOperation* expr,
HValue* left,
@@ -2493,10 +2440,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
PushBeforeSimulateBehavior push_sim_result);
HInstruction* BuildIncrement(bool returns_original_input,
CountOperation* expr);
- HInstruction* BuildKeyedGeneric(PropertyAccessType access_type,
- HValue* object,
- HValue* key,
- HValue* value);
+ HInstruction* BuildLoadKeyedGeneric(HValue* object,
+ HValue* key);
HInstruction* TryBuildConsolidatedElementLoad(HValue* object,
HValue* key,
@@ -2510,14 +2455,14 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HValue* val,
HValue* dependency,
Handle<Map> map,
- PropertyAccessType access_type,
+ bool is_store,
KeyedAccessStoreMode store_mode);
HValue* HandlePolymorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
SmallMapList* maps,
- PropertyAccessType access_type,
+ bool is_store,
KeyedAccessStoreMode store_mode,
bool* has_side_effects);
@@ -2525,14 +2470,12 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HValue* key,
HValue* val,
Expression* expr,
- PropertyAccessType access_type,
+ bool is_store,
bool* has_side_effects);
- HInstruction* BuildNamedGeneric(PropertyAccessType access,
- HValue* object,
- Handle<String> name,
- HValue* value,
- bool is_uninitialized = false);
+ HInstruction* BuildLoadNamedGeneric(HValue* object,
+ Handle<String> name,
+ bool is_uninitialized = false);
HCheckMaps* AddCheckMap(HValue* object, Handle<Map> map);
@@ -2556,11 +2499,16 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
BailoutId return_id,
bool is_uninitialized = false);
- HInstruction* BuildLoadNamedField(PropertyAccessInfo* info,
- HValue* checked_object);
HInstruction* BuildStoreNamedField(PropertyAccessInfo* info,
HValue* checked_object,
HValue* value);
+ HInstruction* BuildStoreNamedGeneric(HValue* object,
+ Handle<String> name,
+ HValue* value,
+ bool is_uninitialized = false);
+ HInstruction* BuildStoreKeyedGeneric(HValue* object,
+ HValue* key,
+ HValue* value);
HValue* BuildContextChainWalk(Variable* var);
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 22709e41a0..e280c50e79 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -110,8 +110,8 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { ebx, edx };
- descriptor->register_param_count_ = 2;
+ static Register registers[] = { ebx };
+ descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = NULL;
}
@@ -2322,74 +2322,66 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a feedback vector slot. Cache states
+ // Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// eax : number of arguments to the construct function
- // ebx : Feedback vector
- // edx : slot in feedback vector (Smi)
+ // ebx : cache cell for call target
// edi : the function to call
Isolate* isolate = masm->isolate();
- Label check_array, initialize_array, initialize_non_array, megamorphic, done;
+ Label initialize, done, miss, megamorphic, not_array_function;
// Load the cache state into ecx.
- __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize));
+ __ mov(ecx, FieldOperand(ebx, Cell::kValueOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
__ cmp(ecx, edi);
- __ j(equal, &done, Label::kFar);
- __ cmp(ecx, Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
- __ j(equal, &done, Label::kFar);
+ __ j(equal, &done);
+ __ cmp(ecx, Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
+ __ j(equal, &done);
+
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the cell either some other function or an
+ // AllocationSite. Do a map check on the object in ecx.
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
+ __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
+ __ j(not_equal, &miss);
- // Load the global or builtins object from the current context and check
- // if we're dealing with the Array function or not.
+ // Load the global or builtins object from the current context
__ LoadGlobalContext(ecx);
+ // Make sure the function is the Array() function
__ cmp(edi, Operand(ecx,
Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
- __ j(equal, &check_array);
-
- // Non-array cache: Reload the cache state and check it.
- __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(ecx, Immediate(TypeFeedbackInfo::PremonomorphicSentinel(isolate)));
- __ j(equal, &initialize_non_array);
- __ cmp(ecx, Immediate(TypeFeedbackInfo::UninitializedSentinel(isolate)));
__ j(not_equal, &megamorphic);
+ __ jmp(&done);
- // Non-array cache: Uninitialized -> premonomorphic. The sentinel is an
- // immortal immovable object (null) so no write-barrier is needed.
- __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize),
- Immediate(TypeFeedbackInfo::PremonomorphicSentinel(isolate)));
- __ jmp(&done, Label::kFar);
-
- // Array cache: Reload the cache state and check to see if we're in a
- // monomorphic state where the state object is an AllocationSite object.
- __ bind(&check_array);
- __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize));
- Handle<Map> allocation_site_map = isolate->factory()->allocation_site_map();
- __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
- __ j(equal, &done, Label::kFar);
-
- // Array cache: Uninitialized or premonomorphic -> monomorphic.
- __ cmp(ecx, Immediate(TypeFeedbackInfo::UninitializedSentinel(isolate)));
- __ j(equal, &initialize_array);
- __ cmp(ecx, Immediate(TypeFeedbackInfo::PremonomorphicSentinel(isolate)));
- __ j(equal, &initialize_array);
+ __ bind(&miss);
- // Both caches: Monomorphic -> megamorphic. The sentinel is an
- // immortal immovable object (undefined) so no write-barrier is needed.
+ // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+ // megamorphic.
+ __ cmp(ecx, Immediate(TypeFeedbackCells::UninitializedSentinel(isolate)));
+ __ j(equal, &initialize);
+ // MegamorphicSentinel is an immortal immovable object (undefined) so no
+ // write-barrier is needed.
__ bind(&megamorphic);
- __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize),
- Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
- __ jmp(&done, Label::kFar);
+ __ mov(FieldOperand(ebx, Cell::kValueOffset),
+ Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
+ __ jmp(&done, Label::kNear);
- // Array cache: Uninitialized or premonomorphic -> monomorphic.
- __ bind(&initialize_array);
+ // An uninitialized cache is patched with the function or sentinel to
+ // indicate the ElementsKind if function is the Array constructor.
+ __ bind(&initialize);
+ __ LoadGlobalContext(ecx);
+ // Make sure the function is the Array() function
+ __ cmp(edi, Operand(ecx,
+ Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+ __ j(not_equal, &not_array_function);
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the cell
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -2397,41 +2389,28 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ SmiTag(eax);
__ push(eax);
__ push(edi);
- __ push(edx);
__ push(ebx);
CreateAllocationSiteStub create_stub;
__ CallStub(&create_stub);
__ pop(ebx);
- __ pop(edx);
__ pop(edi);
__ pop(eax);
__ SmiUntag(eax);
}
__ jmp(&done);
- // Non-array cache: Premonomorphic -> monomorphic.
- __ bind(&initialize_non_array);
- __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize),
- edi);
- __ push(edi);
- __ push(ebx);
- __ push(edx);
- __ RecordWriteArray(ebx, edi, edx, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ pop(edx);
- __ pop(ebx);
- __ pop(edi);
+ __ bind(&not_array_function);
+ __ mov(FieldOperand(ebx, Cell::kValueOffset), edi);
+ // No need for a write barrier here - cells are rescanned.
__ bind(&done);
}
void CallFunctionStub::Generate(MacroAssembler* masm) {
- // ebx : feedback vector
- // edx : (only if ebx is not undefined) slot in feedback vector (Smi)
+ // ebx : cache cell for call target
// edi : the function to call
Isolate* isolate = masm->isolate();
Label slow, non_function, wrap, cont;
@@ -2490,9 +2469,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// If there is a call target cache, mark it megamorphic in the
// non-function case. MegamorphicSentinel is an immortal immovable
// object (undefined) so no write barrier is needed.
- __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize),
- Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+ __ mov(FieldOperand(ebx, Cell::kValueOffset),
+ Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
}
// Check for function proxy.
__ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
@@ -2536,8 +2514,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
void CallConstructStub::Generate(MacroAssembler* masm) {
// eax : number of arguments
- // ebx : feedback vector
- // edx : (only if ebx is not undefined) slot in feedback vector (Smi)
+ // ebx : cache cell for call target
// edi : constructor function
Label slow, non_function_call;
@@ -5160,8 +5137,7 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc (only if argument_count_ == ANY)
- // -- ebx : feedback vector (fixed array or undefined)
- // -- edx : slot index (if ebx is fixed array)
+ // -- ebx : type info cell
// -- edi : constructor
// -- esp[0] : return address
// -- esp[4] : last argument
@@ -5182,27 +5158,22 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ CmpObjectType(ecx, MAP_TYPE, ecx);
__ Assert(equal, kUnexpectedInitialMapForArrayFunction);
- // We should either have undefined in ebx or a valid fixed array.
+ // We should either have undefined in ebx or a valid cell
Label okay_here;
- Handle<Map> fixed_array_map = masm->isolate()->factory()->fixed_array_map();
+ Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
__ cmp(ebx, Immediate(undefined_sentinel));
__ j(equal, &okay_here);
- __ cmp(FieldOperand(ebx, 0), Immediate(fixed_array_map));
- __ Assert(equal, kExpectedFixedArrayInRegisterEbx);
-
- // edx should be a smi if we don't have undefined in ebx.
- __ AssertSmi(edx);
-
+ __ cmp(FieldOperand(ebx, 0), Immediate(cell_map));
+ __ Assert(equal, kExpectedPropertyCellInRegisterEbx);
__ bind(&okay_here);
}
Label no_info;
- // If the feedback vector is undefined, or contains anything other than an
+ // If the type cell is undefined, or contains anything other than an
// AllocationSite, call an array constructor that doesn't use AllocationSites.
__ cmp(ebx, Immediate(undefined_sentinel));
__ j(equal, &no_info);
- __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize));
+ __ mov(ebx, FieldOperand(ebx, Cell::kValueOffset));
__ cmp(FieldOperand(ebx, 0), Immediate(
masm->isolate()->factory()->allocation_site_map()));
__ j(not_equal, &no_info);
@@ -5258,6 +5229,7 @@ void InternalArrayConstructorStub::GenerateCase(
void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
+ // -- ebx : type info cell
// -- edi : constructor
// -- esp[0] : return address
// -- esp[4] : last argument
@@ -5329,7 +5301,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register context = esi;
int argc = ArgumentBits::decode(bit_field_);
- bool is_store = IsStoreBits::decode(bit_field_);
+ bool restore_context = RestoreContextBits::decode(bit_field_);
bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
typedef FunctionCallbackArguments FCA;
@@ -5410,20 +5382,15 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Operand context_restore_operand(ebp,
(2 + FCA::kContextSaveIndex) * kPointerSize);
- // Stores return the first js argument
- int return_value_offset = 0;
- if (is_store) {
- return_value_offset = 2 + FCA::kArgsLength;
- } else {
- return_value_offset = 2 + FCA::kReturnValueOffset;
- }
- Operand return_value_operand(ebp, return_value_offset * kPointerSize);
+ Operand return_value_operand(ebp,
+ (2 + FCA::kReturnValueOffset) * kPointerSize);
__ CallApiFunctionAndReturn(api_function_address,
thunk_address,
ApiParameterOperand(1),
argc + FCA::kArgsLength + 1,
return_value_operand,
- &context_restore_operand);
+ restore_context ?
+ &context_restore_operand : NULL);
}
diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc
index 4c76f7dfe1..76a7003bfe 100644
--- a/deps/v8/src/ia32/debug-ia32.cc
+++ b/deps/v8/src/ia32/debug-ia32.cc
@@ -280,12 +280,10 @@ void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-ia32.cc).
// ----------- S t a t e -------------
- // -- ebx: feedback array
- // -- edx: slot in feedback array
+ // -- ebx: cache cell for call target
// -- edi: function
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, ebx.bit() | edx.bit() | edi.bit(),
- 0, false);
+ Generate_DebugBreakCallHelper(masm, ebx.bit() | edi.bit(), 0, false);
}
@@ -308,13 +306,11 @@ void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
// above IC call.
// ----------- S t a t e -------------
// -- eax: number of arguments (not smi)
- // -- ebx: feedback array
- // -- edx: feedback slot (smi)
+ // -- ebx: cache cell for call target
// -- edi: constructor function
// -----------------------------------
// The number of arguments in eax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, ebx.bit() | edx.bit() | edi.bit(),
- eax.bit(), false);
+ Generate_DebugBreakCallHelper(masm, ebx.bit() | edi.bit(), eax.bit(), false);
}
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index fd4079cb4f..f3125666f8 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -118,9 +118,6 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
-
- InitializeFeedbackVector();
-
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -629,7 +626,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_false,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(ic, condition->test_id());
+ CallIC(ic, NOT_CONTEXTUAL, condition->test_id());
__ test(result_register(), result_register());
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
@@ -980,7 +977,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, clause->CompareId());
+ CallIC(ic, NOT_CONTEXTUAL, clause->CompareId());
patch_site.EmitPatchInfo();
Label skip;
@@ -1024,8 +1021,6 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
- int slot = stmt->ForInFeedbackSlot();
-
SetStatementPosition(stmt);
Label loop, exit;
@@ -1104,15 +1099,13 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- Handle<Object> feedback = Handle<Object>(
- Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker),
- isolate());
- StoreFeedbackVectorSlot(slot, feedback);
-
- // No need for a write barrier, we are storing a Smi in the feedback vector.
- __ LoadHeapObject(ebx, FeedbackVector());
- __ mov(FieldOperand(ebx, FixedArray::OffsetOfElementAt(slot)),
- Immediate(Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker)));
+ Handle<Cell> cell = isolate()->factory()->NewCell(
+ Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
+ isolate()));
+ RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
+ __ LoadHeapObject(ebx, cell);
+ __ mov(FieldOperand(ebx, Cell::kValueOffset),
+ Immediate(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
__ mov(ebx, Immediate(Smi::FromInt(1))); // Smi indicates slow check
__ mov(ecx, Operand(esp, 0 * kPointerSize)); // Get enumerated object
@@ -1419,7 +1412,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// variables.
switch (var->location()) {
case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "[ Global variable");
+ Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in ecx and the global
// object in eax.
__ mov(edx, GlobalObjectOperand());
@@ -1432,8 +1425,9 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
- : "[ Stack variable");
+ Comment cmnt(masm_, var->IsContextSlot()
+ ? "Context variable"
+ : "Stack variable");
if (var->binding_needs_init()) {
// var->scope() may be NULL when the proxy is located in eval code and
// refers to a potential outside binding. Currently those bindings are
@@ -1495,12 +1489,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
}
case Variable::LOOKUP: {
- Comment cmnt(masm_, "[ Lookup variable");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
+ Comment cmnt(masm_, "Lookup variable");
__ push(esi); // Context.
__ push(Immediate(var->name()));
__ CallRuntime(Runtime::kLoadContextSlot, 2);
@@ -1639,7 +1633,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
__ mov(ecx, Immediate(key->value()));
__ mov(edx, Operand(esp, 0));
- CallStoreIC(key->LiteralFeedbackId());
+ CallStoreIC(NOT_CONTEXTUAL, key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -2053,7 +2047,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ bind(&l_call);
__ mov(edx, Operand(esp, kPointerSize));
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, TypeFeedbackId::None());
+ CallIC(ic, NOT_CONTEXTUAL, TypeFeedbackId::None());
__ mov(edi, eax);
__ mov(Operand(esp, 2 * kPointerSize), edi);
CallFunctionStub stub(1, CALL_AS_METHOD);
@@ -2243,7 +2237,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, prop->PropertyFeedbackId());
+ CallIC(ic, NOT_CONTEXTUAL, prop->PropertyFeedbackId());
}
@@ -2264,7 +2258,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ mov(eax, ecx);
BinaryOpICStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
+ expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -2349,7 +2344,8 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(edx);
BinaryOpICStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
+ expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(eax);
}
@@ -2387,7 +2383,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ mov(edx, eax);
__ pop(eax); // Restore value.
__ mov(ecx, prop->key()->AsLiteral()->value());
- CallStoreIC();
+ CallStoreIC(NOT_CONTEXTUAL);
break;
}
case KEYED_PROPERTY: {
@@ -2408,58 +2404,44 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
-void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
- Variable* var, MemOperand location) {
- __ mov(location, eax);
- if (var->IsContextSlot()) {
- __ mov(edx, eax);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
- }
-}
-
-
-void FullCodeGenerator::EmitCallStoreContextSlot(
- Handle<String> name, LanguageMode mode) {
- __ push(eax); // Value.
- __ push(esi); // Context.
- __ push(Immediate(name));
- __ push(Immediate(Smi::FromInt(mode)));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
-}
-
-
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(ecx, var->name());
__ mov(edx, GlobalObjectOperand());
- CallStoreIC();
-
+ CallStoreIC(CONTEXTUAL);
} else if (op == Token::INIT_CONST) {
// Const initializers need a write barrier.
ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsLookupSlot()) {
+ if (var->IsStackLocal()) {
+ Label skip;
+ __ mov(edx, StackOperand(var));
+ __ cmp(edx, isolate()->factory()->the_hole_value());
+ __ j(not_equal, &skip);
+ __ mov(StackOperand(var), eax);
+ __ bind(&skip);
+ } else {
+ ASSERT(var->IsContextSlot() || var->IsLookupSlot());
+ // Like var declarations, const declarations are hoisted to function
+ // scope. However, unlike var initializers, const initializers are
+ // able to drill a hole to that function context, even from inside a
+ // 'with' context. We thus bypass the normal static scope lookup for
+ // var->IsContextSlot().
__ push(eax);
__ push(esi);
__ push(Immediate(var->name()));
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- ASSERT(var->IsStackLocal() || var->IsContextSlot());
- Label skip;
- MemOperand location = VarOperand(var, ecx);
- __ mov(edx, location);
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ j(not_equal, &skip, Label::kNear);
- EmitStoreToStackLocalOrContextSlot(var, location);
- __ bind(&skip);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
- EmitCallStoreContextSlot(var->name(), language_mode());
+ __ push(eax); // Value.
+ __ push(esi); // Context.
+ __ push(Immediate(var->name()));
+ __ push(Immediate(Smi::FromInt(language_mode())));
+ __ CallRuntime(Runtime::kStoreContextSlot, 4);
} else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label assign;
@@ -2470,16 +2452,18 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ push(Immediate(var->name()));
__ CallRuntime(Runtime::kThrowReferenceError, 1);
__ bind(&assign);
- EmitStoreToStackLocalOrContextSlot(var, location);
+ __ mov(location, eax);
+ if (var->IsContextSlot()) {
+ __ mov(edx, eax);
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
+ }
}
} else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
// Assignment to var or initializing assignment to let/const
// in harmony mode.
- if (var->IsLookupSlot()) {
- EmitCallStoreContextSlot(var->name(), language_mode());
- } else {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, ecx);
if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
@@ -2487,7 +2471,20 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ cmp(edx, isolate()->factory()->the_hole_value());
__ Check(equal, kLetBindingReInitialization);
}
- EmitStoreToStackLocalOrContextSlot(var, location);
+ // Perform the assignment.
+ __ mov(location, eax);
+ if (var->IsContextSlot()) {
+ __ mov(edx, eax);
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
+ }
+ } else {
+ ASSERT(var->IsLookupSlot());
+ __ push(eax); // Value.
+ __ push(esi); // Context.
+ __ push(Immediate(var->name()));
+ __ push(Immediate(Smi::FromInt(language_mode())));
+ __ CallRuntime(Runtime::kStoreContextSlot, 4);
}
}
// Non-initializing assignments to consts are ignored.
@@ -2507,7 +2504,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
SetSourcePosition(expr->position());
__ mov(ecx, prop->key()->AsLiteral()->value());
__ pop(edx);
- CallStoreIC(expr->AssignmentFeedbackId());
+ CallStoreIC(NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
}
@@ -2526,7 +2523,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, expr->AssignmentFeedbackId());
+ CallIC(ic, NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
@@ -2555,8 +2552,10 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
+ ContextualMode mode,
TypeFeedbackId ast_id) {
ic_total_count_++;
+ ASSERT(mode != CONTEXTUAL || ast_id.IsNone());
__ call(code, RelocInfo::CODE_TARGET, ast_id);
}
@@ -2669,15 +2668,15 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
SetSourcePosition(expr->position());
Handle<Object> uninitialized =
- TypeFeedbackInfo::UninitializedSentinel(isolate());
- StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized);
- __ LoadHeapObject(ebx, FeedbackVector());
- __ mov(edx, Immediate(Smi::FromInt(expr->CallFeedbackSlot())));
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
+ RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
+ __ mov(ebx, cell);
// Record call targets in unoptimized code.
CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
+ __ CallStub(&stub, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
@@ -2849,10 +2848,10 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Record call targets in unoptimized code.
Handle<Object> uninitialized =
- TypeFeedbackInfo::UninitializedSentinel(isolate());
- StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized);
- __ LoadHeapObject(ebx, FeedbackVector());
- __ mov(edx, Immediate(Smi::FromInt(expr->CallNewFeedbackSlot())));
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
+ RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
+ __ mov(ebx, cell);
CallConstructStub stub(RECORD_CALL_TARGET);
__ call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
@@ -4417,7 +4416,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
BinaryOpICStub stub(expr->binary_op(), NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId());
+ CallIC(stub.GetCode(isolate()),
+ NOT_CONTEXTUAL,
+ expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4448,7 +4449,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ mov(ecx, prop->key()->AsLiteral()->value());
__ pop(edx);
- CallStoreIC(expr->CountStoreFeedbackId());
+ CallStoreIC(NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4465,7 +4466,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, expr->CountStoreFeedbackId());
+ CallIC(ic, NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
// Result is on the stack
@@ -4487,7 +4488,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
ASSERT(!context()->IsTest());
if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "[ Global variable");
+ Comment cmnt(masm_, "Global variable");
__ mov(edx, GlobalObjectOperand());
__ mov(ecx, Immediate(proxy->name()));
// Use a regular load, not a contextual load, to avoid a reference
@@ -4496,7 +4497,6 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
PrepareForBailout(expr, TOS_REG);
context()->Plug(eax);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
- Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -4655,7 +4655,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, expr->CompareOperationFeedbackId());
+ CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4691,7 +4691,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Split(equal, if_true, if_false, fall_through);
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, expr->CompareOperationFeedbackId());
+ CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
__ test(eax, eax);
Split(not_zero, if_true, if_false, fall_through);
}
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index 69f6e3a6bb..bd6dcefe15 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -947,7 +947,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
}
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
+ ExtraICState extra_state) {
// ----------- S t a t e -------------
// -- ecx : name
// -- edx : receiver
@@ -955,7 +956,9 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::HANDLER, MONOMORPHIC, extra_state,
+ Code::NORMAL, Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, edx, ecx, ebx, eax);
@@ -1061,14 +1064,17 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
+ ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::HANDLER, MONOMORPHIC, extra_ic_state,
+ Code::NORMAL, Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, edx, ecx, ebx, no_reg);
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index 71946afe06..f9d1fc0d25 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -476,8 +476,7 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(
- chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+ RecordAndWritePosition(value->position());
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -1179,7 +1178,6 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
- data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -1453,39 +1451,54 @@ void LCodeGen::DoModI(LModI* instr) {
void LCodeGen::DoDivI(LDivI* instr) {
if (!instr->is_flooring() && instr->hydrogen()->RightIsPowerOf2()) {
Register dividend = ToRegister(instr->left());
- HDiv* hdiv = instr->hydrogen();
- int32_t divisor = hdiv->right()->GetInteger32Constant();
- Register result = ToRegister(instr->result());
- ASSERT(!result.is(dividend));
+ int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
+ int32_t test_value = 0;
+ int32_t power = 0;
- // Check for (0 / -x) that will produce negative zero.
- if (hdiv->left()->RangeCanInclude(0) && divisor < 0 &&
- hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ test(dividend, Operand(dividend));
- DeoptimizeIf(zero, instr->environment());
- }
- // Check for (kMinInt / -1).
- if (hdiv->left()->RangeCanInclude(kMinInt) && divisor == -1 &&
- hdiv->CheckFlag(HValue::kCanOverflow)) {
- __ cmp(dividend, kMinInt);
- DeoptimizeIf(zero, instr->environment());
+ if (divisor > 0) {
+ test_value = divisor - 1;
+ power = WhichPowerOf2(divisor);
+ } else {
+ // Check for (0 / -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ test(dividend, Operand(dividend));
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Check for (kMinInt / -1).
+ if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ __ cmp(dividend, kMinInt);
+ DeoptimizeIf(zero, instr->environment());
+ }
+ test_value = - divisor - 1;
+ power = WhichPowerOf2(-divisor);
}
- // Deoptimize if remainder will not be 0.
- if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
- Abs(divisor) != 1) {
- __ test(dividend, Immediate(Abs(divisor) - 1));
+
+ if (test_value != 0) {
+ if (instr->hydrogen()->CheckFlag(
+ HInstruction::kAllUsesTruncatingToInt32)) {
+ Label done, negative;
+ __ cmp(dividend, 0);
+ __ j(less, &negative, Label::kNear);
+ __ sar(dividend, power);
+ if (divisor < 0) __ neg(dividend);
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&negative);
+ __ neg(dividend);
+ __ sar(dividend, power);
+ if (divisor > 0) __ neg(dividend);
+ __ bind(&done);
+ return; // Don't fall through to "__ neg" below.
+ } else {
+ // Deoptimize if remainder is not 0.
+ __ test(dividend, Immediate(test_value));
DeoptimizeIf(not_zero, instr->environment());
+ __ sar(dividend, power);
+ }
}
- __ Move(result, dividend);
- int32_t shift = WhichPowerOf2(Abs(divisor));
- if (shift > 0) {
- // The arithmetic shift is always OK, the 'if' is an optimization only.
- if (shift > 1) __ sar(result, 31);
- __ shr(result, 32 - shift);
- __ add(result, dividend);
- __ sar(result, shift);
- }
- if (divisor < 0) __ neg(result);
+
+ if (divisor < 0) __ neg(dividend);
+
return;
}
@@ -4276,9 +4289,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register object = ToRegister(instr->object());
Handle<Map> transition = instr->transition();
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (FLAG_track_fields && representation.IsSmi()) {
if (instr->value()->IsConstantOperand()) {
@@ -4298,9 +4308,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register value = ToRegister(instr->value());
__ test(value, Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr->environment());
-
- // We know that value is a smi now, so we can omit the check below.
- check_needed = OMIT_SMI_CHECK;
}
}
} else if (representation.IsDouble()) {
@@ -4338,6 +4345,10 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
// Do the store.
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+
Register write_register = object;
if (!access.IsInobject()) {
write_register = ToRegister(instr->temp());
@@ -5769,7 +5780,11 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
+ if (size <= Page::kMaxRegularHeapObjectSize) {
+ __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
+ } else {
+ __ jmp(deferred->entry());
+ }
} else {
Register size = ToRegister(instr->size());
__ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index bbbc7ec731..27a5672901 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -1319,10 +1319,10 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->RightIsPowerOf2()) {
ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegister(instr->left());
+ LOperand* value = UseRegisterAtStart(instr->left());
LDivI* div =
new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
- return AssignEnvironment(DefineAsRegister(div));
+ return AssignEnvironment(DefineSameAsFirst(div));
}
// The temporary operand is necessary to ensure that right is not allocated
// into edx.
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 30bc4adb58..faf768e11d 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -2980,8 +2980,16 @@ void MacroAssembler::CheckStackAlignment() {
void MacroAssembler::Abort(BailoutReason reason) {
-#ifdef DEBUG
+ // We want to pass the msg string like a smi to avoid GC
+ // problems, however msg is not guaranteed to be aligned
+ // properly. Instead, we pass an aligned pointer that is
+ // a proper v8 smi, but also pass the alignment difference
+ // from the real pointer as a smi.
const char* msg = GetBailoutReason(reason);
+ intptr_t p1 = reinterpret_cast<intptr_t>(msg);
+ intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
+ ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
+#ifdef DEBUG
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -2994,15 +3002,16 @@ void MacroAssembler::Abort(BailoutReason reason) {
#endif
push(eax);
- push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(reason))));
+ push(Immediate(p0));
+ push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0))));
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort, 2);
} else {
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort, 2);
}
// will not return here
int3();
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index 4bc428849c..a5b93b9b22 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -422,14 +422,13 @@ static void CompileCallLoadPropertyWithInterceptor(
// This function uses push() to generate smaller, faster code than
// the version above. It is an optimization that should will be removed
// when api call ICs are generated in hydrogen.
-void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Handle<Map> receiver_map,
- Register receiver,
- Register scratch_in,
- bool is_store,
- int argc,
- Register* values) {
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch_in,
+ int argc,
+ Register* values) {
// Copy return value.
__ pop(scratch_in);
// receiver
@@ -494,7 +493,7 @@ void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
__ mov(api_function_address, Immediate(function_address));
// Jump to stub.
- CallApiFunctionStub stub(is_store, call_data_undefined, argc);
+ CallApiFunctionStub stub(true, call_data_undefined, argc);
__ TailCallStub(&stub);
}
@@ -1067,6 +1066,15 @@ void LoadStubCompiler::GenerateLoadField(Register reg,
void LoadStubCompiler::GenerateLoadCallback(
+ const CallOptimization& call_optimization,
+ Handle<Map> receiver_map) {
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver_map,
+ receiver(), scratch1(), 0, NULL);
+}
+
+
+void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Insert additional parameters into the stack frame above return address.
@@ -1255,6 +1263,24 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
}
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization) {
+ HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
+ receiver(), holder, name);
+
+ Register values[] = { value() };
+ GenerateFastApiCall(
+ masm(), call_optimization, handle(object->map()),
+ receiver(), scratch1(), 1, values);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
#undef __
#define __ ACCESS_MASM(masm)
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index bd06cb6f96..1e7997a80d 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -148,9 +148,9 @@ IC::IC(FrameDepth depth, Isolate* isolate)
pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
target_ = handle(raw_target(), isolate);
state_ = target_->ic_state();
- extra_ic_state_ = target_->extra_ic_state();
- target()->FindAllTypes(&types_);
- target()->FindHandlers(&handlers_);
+ extra_ic_state_ = target_->needs_extended_extra_ic_state(target_->kind())
+ ? target_->extended_extra_ic_state()
+ : target_->extra_ic_state();
}
@@ -283,7 +283,7 @@ bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
// If the IC is shared between multiple receivers (slow dictionary mode), then
// the map cannot be deprecated and the stub invalidated.
if (cache_holder == OWN_MAP) {
- Map* old_map = first_map();
+ Map* old_map = target()->FindFirstMap();
if (old_map == *map) return true;
if (old_map != NULL) {
if (old_map->is_deprecated()) return true;
@@ -308,8 +308,10 @@ bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
void IC::TryRemoveInvalidHandlers(Handle<Map> map, Handle<String> name) {
- for (int i = 0; i < handlers()->length(); i++) {
- Handle<Code> handler = handlers()->at(i);
+ CodeHandleList handlers;
+ target()->FindHandlers(&handlers);
+ for (int i = 0; i < handlers.length(); i++) {
+ Handle<Code> handler = handlers.at(i);
int index = map->IndexInCodeCache(*name, *handler);
if (index >= 0) {
map->RemoveFromCodeCache(*name, *handler, index);
@@ -605,14 +607,18 @@ bool IC::UpdatePolymorphicIC(Handle<HeapType> type,
Handle<String> name,
Handle<Code> code) {
if (!code->is_handler()) return false;
+ TypeHandleList types;
+ CodeHandleList handlers;
+
int number_of_valid_types;
int handler_to_overwrite = -1;
- int number_of_types = types()->length();
+ target()->FindAllTypes(&types);
+ int number_of_types = types.length();
number_of_valid_types = number_of_types;
for (int i = 0; i < number_of_types; i++) {
- Handle<HeapType> current_type = types()->at(i);
+ Handle<HeapType> current_type = types.at(i);
// Filter out deprecated maps to ensure their instances get migrated.
if (current_type->IsClass() && current_type->AsClass()->is_deprecated()) {
number_of_valid_types--;
@@ -628,19 +634,18 @@ bool IC::UpdatePolymorphicIC(Handle<HeapType> type,
if (number_of_valid_types >= 4) return false;
if (number_of_types == 0) return false;
- if (handlers()->length() < types()->length()) return false;
+ if (!target()->FindHandlers(&handlers, types.length())) return false;
number_of_valid_types++;
if (handler_to_overwrite >= 0) {
- handlers()->Set(handler_to_overwrite, code);
+ handlers.Set(handler_to_overwrite, code);
} else {
- types()->Add(type);
- handlers()->Add(code);
+ types.Add(type);
+ handlers.Add(code);
}
Handle<Code> ic = isolate()->stub_cache()->ComputePolymorphicIC(
- kind(), types(), handlers(), number_of_valid_types,
- name, extra_ic_state());
+ &types, &handlers, number_of_valid_types, name, extra_ic_state());
set_target(*ic);
return true;
}
@@ -692,29 +697,35 @@ void IC::UpdateMonomorphicIC(Handle<HeapType> type,
Handle<String> name) {
if (!handler->is_handler()) return set_target(*handler);
Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicIC(
- kind(), name, type, handler, extra_ic_state());
+ name, type, handler, extra_ic_state());
set_target(*ic);
}
void IC::CopyICToMegamorphicCache(Handle<String> name) {
- if (handlers()->length() < types()->length()) return;
- for (int i = 0; i < types()->length(); i++) {
- UpdateMegamorphicCache(*types()->at(i), *name, *handlers()->at(i));
+ TypeHandleList types;
+ CodeHandleList handlers;
+ target()->FindAllTypes(&types);
+ if (!target()->FindHandlers(&handlers, types.length())) return;
+ for (int i = 0; i < types.length(); i++) {
+ UpdateMegamorphicCache(*types.at(i), *name, *handlers.at(i));
}
}
-bool IC::IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map) {
- if (source_map == NULL) return true;
- if (target_map == NULL) return false;
- ElementsKind target_elements_kind = target_map->elements_kind();
- bool more_general_transition = IsMoreGeneralElementsKindTransition(
- source_map->elements_kind(), target_elements_kind);
+bool IC::IsTransitionOfMonomorphicTarget(Handle<HeapType> type) {
+ if (!type->IsClass()) return false;
+ Map* receiver_map = *type->AsClass();
+ Map* current_map = target()->FindFirstMap();
+ ElementsKind receiver_elements_kind = receiver_map->elements_kind();
+ bool more_general_transition =
+ IsMoreGeneralElementsKindTransition(
+ current_map->elements_kind(), receiver_elements_kind);
Map* transitioned_map = more_general_transition
- ? source_map->LookupElementsTransitionMap(target_elements_kind)
+ ? current_map->LookupElementsTransitionMap(receiver_elements_kind)
: NULL;
- return transitioned_map == target_map;
+
+ return transitioned_map == receiver_map;
}
@@ -731,11 +742,8 @@ void IC::PatchCache(Handle<HeapType> type,
// For now, call stubs are allowed to rewrite to the same stub. This
// happens e.g., when the field does not contain a function.
ASSERT(!target().is_identical_to(code));
- Map* old_map = first_map();
- Code* old_handler = first_handler();
- Map* map = type->IsClass() ? *type->AsClass() : NULL;
- if (old_handler == *code &&
- IsTransitionOfMonomorphicTarget(old_map, map)) {
+ Code* old_handler = target()->FindFirstHandler();
+ if (old_handler == *code && IsTransitionOfMonomorphicTarget(type)) {
UpdateMonomorphicIC(type, code, name);
break;
}
@@ -1004,7 +1012,7 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) {
if (target().is_identical_to(string_stub())) {
target_receiver_maps.Add(isolate()->factory()->string_map());
} else {
- GetMapsFromTypes(&target_receiver_maps);
+ target()->FindAllMaps(&target_receiver_maps);
if (target_receiver_maps.length() == 0) {
return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map);
}
@@ -1055,15 +1063,15 @@ MaybeObject* KeyedLoadIC::Load(Handle<Object> object, Handle<Object> key) {
MaybeObject* maybe_object = NULL;
Handle<Code> stub = generic_stub();
- // Check for non-string values that can be converted into an
- // internalized string directly or is representable as a smi.
+ // Check for values that can be converted into an internalized string directly
+ // or is representable as a smi.
key = TryConvertKey(key, isolate());
if (key->IsInternalizedString()) {
maybe_object = LoadIC::Load(object, Handle<String>::cast(key));
if (maybe_object->IsFailure()) return maybe_object;
} else if (FLAG_use_ic && !object->IsAccessCheckNeeded()) {
- ASSERT(!object->IsAccessCheckNeeded());
+ ASSERT(!object->IsJSGlobalProxy());
if (object->IsString() && key->IsNumber()) {
if (state() == UNINITIALIZED) stub = string_stub();
} else if (object->IsJSObject()) {
@@ -1102,17 +1110,21 @@ static bool LookupForWrite(Handle<JSObject> receiver,
Handle<JSObject> holder = receiver;
receiver->Lookup(*name, lookup);
if (lookup->IsFound()) {
- if (lookup->IsInterceptor() && !HasInterceptorSetter(lookup->holder())) {
- receiver->LocalLookupRealNamedProperty(*name, lookup);
- if (!lookup->IsFound()) return false;
+ if (lookup->IsReadOnly() || !lookup->IsCacheable()) return false;
+
+ if (lookup->holder() == *receiver) {
+ if (lookup->IsInterceptor() && !HasInterceptorSetter(*receiver)) {
+ receiver->LocalLookupRealNamedProperty(*name, lookup);
+ return lookup->IsFound() &&
+ !lookup->IsReadOnly() &&
+ lookup->CanHoldValue(value) &&
+ lookup->IsCacheable();
+ }
+ return lookup->CanHoldValue(value);
}
- if (lookup->IsReadOnly() || !lookup->IsCacheable()) return false;
- if (lookup->holder() == *receiver) return lookup->CanHoldValue(value);
if (lookup->IsPropertyCallbacks()) return true;
- // JSGlobalProxy either stores on the global object in the prototype, or
- // goes into the runtime if access checks are needed, so this is always
- // safe.
+ // JSGlobalProxy always goes via the runtime, so it's safe to cache.
if (receiver->IsJSGlobalProxy()) return true;
// Currently normal holders in the prototype chain are not supported. They
// would require a runtime positive lookup and verification that the details
@@ -1298,7 +1310,7 @@ Handle<Code> StoreIC::CompileHandler(LookupResult* lookup,
Handle<String> name,
Handle<Object> value,
InlineCacheHolderFlag cache_holder) {
- if (object->IsAccessCheckNeeded()) return slow_stub();
+ if (object->IsJSGlobalProxy()) return slow_stub();
ASSERT(cache_holder == OWN_MAP);
// This is currently guaranteed by checks in StoreIC::Store.
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
@@ -1322,19 +1334,17 @@ Handle<Code> StoreIC::CompileHandler(LookupResult* lookup,
}
case NORMAL:
if (kind() == Code::KEYED_STORE_IC) break;
- if (receiver->IsJSGlobalProxy() || receiver->IsGlobalObject()) {
+ if (receiver->IsGlobalObject()) {
// The stub generated for the global object picks the value directly
// from the property cell. So the property must be directly on the
// global object.
- Handle<GlobalObject> global = receiver->IsJSGlobalProxy()
- ? handle(GlobalObject::cast(receiver->GetPrototype()))
- : Handle<GlobalObject>::cast(receiver);
+ Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
Handle<PropertyCell> cell(global->GetPropertyCell(lookup), isolate());
Handle<HeapType> union_type = PropertyCell::UpdatedType(cell, value);
- StoreGlobalStub stub(
- union_type->IsConstant(), receiver->IsJSGlobalProxy());
+ StoreGlobalStub stub(union_type->IsConstant());
+
Handle<Code> code = stub.GetCodeCopyFromTemplate(
- isolate(), *global, *cell);
+ isolate(), receiver->map(), *cell);
// TODO(verwaest): Move caching of these NORMAL stubs outside as well.
HeapObject::UpdateMapCodeCache(receiver, name, code);
return code;
@@ -1375,7 +1385,7 @@ Handle<Code> StoreIC::CompileHandler(LookupResult* lookup,
}
case INTERCEPTOR:
if (kind() == Code::KEYED_STORE_IC) break;
- ASSERT(HasInterceptorSetter(*holder));
+ ASSERT(HasInterceptorSetter(*receiver));
return compiler.CompileStoreInterceptor(receiver, name);
case CONSTANT:
break;
@@ -1408,27 +1418,39 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
monomorphic_map, strict_mode(), store_mode);
}
+ MapHandleList target_receiver_maps;
+ target()->FindAllMaps(&target_receiver_maps);
+ if (target_receiver_maps.length() == 0) {
+ // In the case that there is a non-map-specific IC is installed (e.g. keyed
+ // stores into properties in dictionary mode), then there will be not
+ // receiver maps in the target.
+ return generic_stub();
+ }
+
// There are several special cases where an IC that is MONOMORPHIC can still
// transition to a different GetNonTransitioningStoreMode IC that handles a
// superset of the original IC. Handle those here if the receiver map hasn't
// changed or it has transitioned to a more general kind.
KeyedAccessStoreMode old_store_mode =
KeyedStoreIC::GetKeyedAccessStoreMode(target()->extra_ic_state());
+ Handle<Map> previous_receiver_map = target_receiver_maps.at(0);
if (state() == MONOMORPHIC) {
- // If the "old" and "new" maps are in the same elements map family, stay
- // MONOMORPHIC and use the map for the most generic ElementsKind.
- Handle<Map> transitioned_map = receiver_map;
+ Handle<Map> transitioned_receiver_map = receiver_map;
if (IsTransitionStoreMode(store_mode)) {
- transitioned_map = ComputeTransitionedMap(receiver, store_mode);
+ transitioned_receiver_map = ComputeTransitionedMap(receiver, store_mode);
}
- if (IsTransitionOfMonomorphicTarget(first_map(), *transitioned_map)) {
- // Element family is the same, use the "worst" case map.
+ if (receiver_map.is_identical_to(previous_receiver_map) ||
+ IsTransitionOfMonomorphicTarget(
+ MapToType<HeapType>(transitioned_receiver_map, isolate()))) {
+ // If the "old" and "new" maps are in the same elements map family, or
+ // if they at least come from the same origin for a transitioning store,
+ // stay MONOMORPHIC and use the map for the most generic ElementsKind.
store_mode = GetNonTransitioningStoreMode(store_mode);
return isolate()->stub_cache()->ComputeKeyedStoreElement(
- transitioned_map, strict_mode(), store_mode);
- } else if (first_map() == receiver->map() &&
+ transitioned_receiver_map, strict_mode(), store_mode);
+ } else if (*previous_receiver_map == receiver->map() &&
old_store_mode == STANDARD_STORE &&
- (IsGrowStoreMode(store_mode) ||
+ (store_mode == STORE_AND_GROW_NO_TRANSITION ||
store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
store_mode == STORE_NO_TRANSITION_HANDLE_COW)) {
// A "normal" IC that handles stores can switch to a version that can
@@ -1441,9 +1463,6 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
ASSERT(state() != GENERIC);
- MapHandleList target_receiver_maps;
- GetMapsFromTypes(&target_receiver_maps);
-
bool map_added =
AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map);
@@ -1636,8 +1655,8 @@ MaybeObject* KeyedStoreIC::Store(Handle<Object> object,
return *result;
}
- // Check for non-string values that can be converted into an
- // internalized string directly or is representable as a smi.
+ // Check for values that can be converted into an internalized string directly
+ // or is representable as a smi.
key = TryConvertKey(key, isolate());
MaybeObject* maybe_object = NULL;
@@ -1662,7 +1681,7 @@ MaybeObject* KeyedStoreIC::Store(Handle<Object> object,
}
if (use_ic) {
- ASSERT(!object->IsAccessCheckNeeded());
+ ASSERT(!object->IsJSGlobalProxy());
if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
@@ -2347,7 +2366,7 @@ Type* BinaryOpIC::State::KindToType(Kind kind, Zone* zone) {
MaybeObject* BinaryOpIC::Transition(Handle<AllocationSite> allocation_site,
Handle<Object> left,
Handle<Object> right) {
- State state(target()->extra_ic_state());
+ State state(target()->extended_extra_ic_state());
// Compute the actual result using the builtin for the binary operation.
Object* builtin = isolate()->js_builtins_object()->javascript_builtin(
@@ -2663,7 +2682,7 @@ RUNTIME_FUNCTION(Code*, CompareIC_Miss) {
void CompareNilIC::Clear(Address address, Code* target) {
if (IsCleared(target)) return;
- ExtraICState state = target->extra_ic_state();
+ ExtraICState state = target->extended_extra_ic_state();
CompareNilICStub stub(state, HydrogenCodeStub::UNINITIALIZED);
stub.ClearState();
@@ -2685,7 +2704,7 @@ MaybeObject* CompareNilIC::DoCompareNilSlow(NilValue nil,
MaybeObject* CompareNilIC::CompareNil(Handle<Object> object) {
- ExtraICState extra_ic_state = target()->extra_ic_state();
+ ExtraICState extra_ic_state = target()->extended_extra_ic_state();
CompareNilICStub stub(extra_ic_state);
@@ -2700,8 +2719,8 @@ MaybeObject* CompareNilIC::CompareNil(Handle<Object> object) {
// Find or create the specialized stub to support the new set of types.
Handle<Code> code;
if (stub.IsMonomorphic()) {
- Handle<Map> monomorphic_map(already_monomorphic && (first_map() != NULL)
- ? first_map()
+ Handle<Map> monomorphic_map(already_monomorphic
+ ? target()->FindFirstMap()
: HeapObject::cast(*object)->map());
code = isolate()->stub_cache()->ComputeCompareNil(monomorphic_map, stub);
} else {
@@ -2769,7 +2788,7 @@ Builtins::JavaScript BinaryOpIC::TokenToJSBuiltin(Token::Value op) {
MaybeObject* ToBooleanIC::ToBoolean(Handle<Object> object) {
- ToBooleanStub stub(target()->extra_ic_state());
+ ToBooleanStub stub(target()->extended_extra_ic_state());
bool to_boolean_value = stub.UpdateStatus(object);
Handle<Code> code = stub.GetCode(isolate());
set_target(*code);
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index fce585f6d7..99309f4edf 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -150,19 +150,6 @@ class IC {
// Get the call-site target; used for determining the state.
Handle<Code> target() const { return target_; }
- TypeHandleList* types() { return &types_; }
- CodeHandleList* handlers() { return &handlers_; }
- Map* first_map() {
- return types_.length() == 0 ? NULL : *TypeToMap(*types_.at(0), isolate_);
- }
- Code* first_handler() {
- return handlers_.length() == 0 ? NULL : *handlers_.at(0);
- }
- void GetMapsFromTypes(MapHandleList* maps) {
- for (int i = 0; i < types_.length(); ++i) {
- maps->Add(TypeToMap(*types_.at(i), isolate_));
- }
- }
Address fp() const { return fp_; }
Address pc() const { return *pc_address_; }
Isolate* isolate() const { return isolate_; }
@@ -222,7 +209,7 @@ class IC {
virtual void UpdateMegamorphicCache(HeapType* type, Name* name, Code* code);
void CopyICToMegamorphicCache(Handle<String> name);
- bool IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map);
+ bool IsTransitionOfMonomorphicTarget(Handle<HeapType> type);
void PatchCache(Handle<HeapType> type,
Handle<String> name,
Handle<Code> code);
@@ -273,9 +260,6 @@ class IC {
ExtraICState extra_ic_state_;
- TypeHandleList types_;
- CodeHandleList handlers_;
-
DISALLOW_IMPLICIT_CONSTRUCTORS(IC);
};
@@ -336,7 +320,8 @@ class LoadIC: public IC {
GenerateMiss(masm);
}
static void GenerateMiss(MacroAssembler* masm);
- static void GenerateMegamorphic(MacroAssembler* masm);
+ static void GenerateMegamorphic(MacroAssembler* masm,
+ ExtraICState extra_state);
static void GenerateNormal(MacroAssembler* masm);
static void GenerateRuntimeGetProperty(MacroAssembler* masm);
@@ -497,7 +482,8 @@ class StoreIC: public IC {
GenerateMiss(masm);
}
static void GenerateMiss(MacroAssembler* masm);
- static void GenerateMegamorphic(MacroAssembler* masm);
+ static void GenerateMegamorphic(MacroAssembler* masm,
+ ExtraICState extra_ic_state);
static void GenerateNormal(MacroAssembler* masm);
static void GenerateRuntimeSetProperty(MacroAssembler* masm,
StrictModeFlag strict_mode);
diff --git a/deps/v8/src/incremental-marking.h b/deps/v8/src/incremental-marking.h
index a4dd5f3314..d47c300ef3 100644
--- a/deps/v8/src/incremental-marking.h
+++ b/deps/v8/src/incremental-marking.h
@@ -100,7 +100,7 @@ class IncrementalMarking {
// Do some marking every time this much memory has been allocated or that many
// heavy (color-checking) write barriers have been invoked.
static const intptr_t kAllocatedThreshold = 65536;
- static const intptr_t kWriteBarriersInvokedThreshold = 32768;
+ static const intptr_t kWriteBarriersInvokedThreshold = 65536;
// Start off by marking this many times more memory than has been allocated.
static const intptr_t kInitialMarkingSpeed = 1;
// But if we are promoting a lot of data we need to mark faster to keep up
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index ca324603f7..8a2f4219c7 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -778,7 +778,7 @@ static MayAccessDecision MayAccessPreCheck(Isolate* isolate,
bool Isolate::MayNamedAccess(JSObject* receiver, Object* key,
v8::AccessType type) {
- ASSERT(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded());
+ ASSERT(receiver->IsAccessCheckNeeded());
// The callers of this method are not expecting a GC.
DisallowHeapAllocation no_gc;
@@ -829,7 +829,7 @@ bool Isolate::MayNamedAccess(JSObject* receiver, Object* key,
bool Isolate::MayIndexedAccess(JSObject* receiver,
uint32_t index,
v8::AccessType type) {
- ASSERT(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded());
+ ASSERT(receiver->IsAccessCheckNeeded());
// Check for compatibility between the security tokens in the
// current lexical context and the accessed object.
ASSERT(context());
@@ -946,7 +946,6 @@ Failure* Isolate::ReThrow(MaybeObject* exception) {
Failure* Isolate::ThrowIllegalOperation() {
- if (FLAG_stack_trace_on_illegal) PrintStack(stdout);
return Throw(heap_.illegal_access_string());
}
@@ -1123,6 +1122,8 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
// while the bootstrapper is active since the infrastructure may not have
// been properly initialized.
if (!bootstrapping) {
+ Handle<String> stack_trace;
+ if (FLAG_trace_exception) stack_trace = StackTraceString();
Handle<JSArray> stack_trace_object;
if (capture_stack_trace_for_uncaught_exceptions_) {
if (IsErrorObject(exception_handle)) {
@@ -1162,6 +1163,7 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
"uncaught_exception",
location,
HandleVector<Object>(&exception_arg, 1),
+ stack_trace,
stack_trace_object);
thread_local_top()->pending_message_obj_ = *message_obj;
if (location != NULL) {
@@ -1564,8 +1566,7 @@ Isolate::Isolate()
sweeper_thread_(NULL),
num_sweeper_threads_(0),
max_available_threads_(0),
- stress_deopt_count_(0),
- next_optimization_id_(0) {
+ stress_deopt_count_(0) {
id_ = NoBarrier_AtomicIncrement(&isolate_counter_, 1);
TRACE_ISOLATE(constructor);
@@ -1581,7 +1582,6 @@ Isolate::Isolate()
thread_manager_->isolate_ = this;
#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
- V8_TARGET_ARCH_A64 && !defined(__aarch64__) || \
V8_TARGET_ARCH_MIPS && !defined(__mips__)
simulator_initialized_ = false;
simulator_i_cache_ = NULL;
@@ -1672,10 +1672,6 @@ void Isolate::Deinit() {
delete[] sweeper_thread_;
sweeper_thread_ = NULL;
- if (FLAG_job_based_sweeping &&
- heap_.mark_compact_collector()->IsConcurrentSweepingInProgress()) {
- heap_.mark_compact_collector()->WaitUntilSweepingCompleted();
- }
if (FLAG_hydrogen_stats) GetHStatistics()->Print();
@@ -1971,7 +1967,7 @@ bool Isolate::Init(Deserializer* des) {
// Initialize other runtime facilities
#if defined(USE_SIMULATOR)
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_A64 || V8_TARGET_ARCH_MIPS
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
Simulator::Initialize(this);
#endif
#endif
@@ -2017,10 +2013,7 @@ bool Isolate::Init(Deserializer* des) {
max_available_threads_ = Max(Min(CPU::NumberOfProcessorsOnline(), 4), 1);
}
- if (!FLAG_job_based_sweeping) {
- num_sweeper_threads_ =
- SweeperThread::NumberOfThreads(max_available_threads_);
- }
+ num_sweeper_threads_ = SweeperThread::NumberOfThreads(max_available_threads_);
if (FLAG_trace_hydrogen || FLAG_trace_hydrogen_stubs) {
PrintF("Concurrent recompilation has been disabled for tracing.\n");
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index ef1dd30b22..d93a862294 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -102,7 +102,6 @@ class DebuggerAgent;
#endif
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
- !defined(__aarch64__) && V8_TARGET_ARCH_A64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
class Redirection;
class Simulator;
@@ -208,11 +207,6 @@ class ThreadId {
};
-#define FIELD_ACCESSOR(type, name) \
- inline void set_##name(type v) { name##_ = v; } \
- inline type name() const { return name##_; }
-
-
class ThreadLocalTop BASE_EMBEDDED {
public:
// Does early low-level initialization that does not depend on the
@@ -239,7 +233,14 @@ class ThreadLocalTop BASE_EMBEDDED {
// stack, try_catch_handler_address returns a JS stack address that
// corresponds to the place on the JS stack where the C++ handler
// would have been if the stack were not separate.
- FIELD_ACCESSOR(Address, try_catch_handler_address)
+ inline Address try_catch_handler_address() {
+ return try_catch_handler_address_;
+ }
+
+ // Set the address of the top C++ try catch handler.
+ inline void set_try_catch_handler_address(Address address) {
+ try_catch_handler_address_ = address;
+ }
void Free() {
ASSERT(!has_pending_message_);
@@ -359,18 +360,12 @@ typedef List<HeapObject*> DebugObjectCache;
/* AstNode state. */ \
V(int, ast_node_id, 0) \
V(unsigned, ast_node_count, 0) \
- V(bool, microtask_pending, false) \
- V(bool, autorun_microtasks, true) \
+ V(bool, microtask_pending, false) \
V(HStatistics*, hstatistics, NULL) \
V(HTracer*, htracer, NULL) \
V(CodeTracer*, code_tracer, NULL) \
ISOLATE_DEBUGGER_INIT_LIST(V)
-#define THREAD_LOCAL_TOP_ACCESSOR(type, name) \
- inline void set_##name(type v) { thread_local_top_.name##_ = v; } \
- inline type name() const { return thread_local_top_.name##_; }
-
-
class Isolate {
// These forward declarations are required to make the friend declarations in
// PerIsolateThreadData work on some older versions of gcc.
@@ -390,7 +385,6 @@ class Isolate {
stack_limit_(0),
thread_state_(NULL),
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
- !defined(__aarch64__) && V8_TARGET_ARCH_A64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
simulator_(NULL),
#endif
@@ -398,14 +392,17 @@ class Isolate {
prev_(NULL) { }
Isolate* isolate() const { return isolate_; }
ThreadId thread_id() const { return thread_id_; }
-
- FIELD_ACCESSOR(uintptr_t, stack_limit)
- FIELD_ACCESSOR(ThreadState*, thread_state)
+ void set_stack_limit(uintptr_t value) { stack_limit_ = value; }
+ uintptr_t stack_limit() const { return stack_limit_; }
+ ThreadState* thread_state() const { return thread_state_; }
+ void set_thread_state(ThreadState* value) { thread_state_ = value; }
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
- !defined(__aarch64__) && V8_TARGET_ARCH_A64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
- FIELD_ACCESSOR(Simulator*, simulator)
+ Simulator* simulator() const { return simulator_; }
+ void set_simulator(Simulator* simulator) {
+ simulator_ = simulator;
+ }
#endif
bool Matches(Isolate* isolate, ThreadId thread_id) const {
@@ -419,7 +416,6 @@ class Isolate {
ThreadState* thread_state_;
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
- !defined(__aarch64__) && V8_TARGET_ARCH_A64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
Simulator* simulator_;
#endif
@@ -545,35 +541,38 @@ class Isolate {
}
Context** context_address() { return &thread_local_top_.context_; }
- THREAD_LOCAL_TOP_ACCESSOR(SaveContext*, save_context)
+ SaveContext* save_context() { return thread_local_top_.save_context_; }
+ void set_save_context(SaveContext* save) {
+ thread_local_top_.save_context_ = save;
+ }
// Access to current thread id.
- THREAD_LOCAL_TOP_ACCESSOR(ThreadId, thread_id)
+ ThreadId thread_id() { return thread_local_top_.thread_id_; }
+ void set_thread_id(ThreadId id) { thread_local_top_.thread_id_ = id; }
// Interface to pending exception.
MaybeObject* pending_exception() {
ASSERT(has_pending_exception());
return thread_local_top_.pending_exception_;
}
-
+ bool external_caught_exception() {
+ return thread_local_top_.external_caught_exception_;
+ }
+ void set_external_caught_exception(bool value) {
+ thread_local_top_.external_caught_exception_ = value;
+ }
void set_pending_exception(MaybeObject* exception) {
thread_local_top_.pending_exception_ = exception;
}
-
void clear_pending_exception() {
thread_local_top_.pending_exception_ = heap_.the_hole_value();
}
-
MaybeObject** pending_exception_address() {
return &thread_local_top_.pending_exception_;
}
-
bool has_pending_exception() {
return !thread_local_top_.pending_exception_->IsTheHole();
}
-
- THREAD_LOCAL_TOP_ACCESSOR(bool, external_caught_exception)
-
void clear_pending_message() {
thread_local_top_.has_pending_message_ = false;
thread_local_top_.pending_message_obj_ = heap_.the_hole_value();
@@ -588,8 +587,12 @@ class Isolate {
bool* external_caught_exception_address() {
return &thread_local_top_.external_caught_exception_;
}
-
- THREAD_LOCAL_TOP_ACCESSOR(v8::TryCatch*, catcher)
+ v8::TryCatch* catcher() {
+ return thread_local_top_.catcher_;
+ }
+ void set_catcher(v8::TryCatch* catcher) {
+ thread_local_top_.catcher_ = catcher;
+ }
MaybeObject** scheduled_exception_address() {
return &thread_local_top_.scheduled_exception_;
@@ -705,8 +708,12 @@ class Isolate {
// Tells whether the current context has experienced an out of memory
// exception.
bool is_out_of_memory();
-
- THREAD_LOCAL_TOP_ACCESSOR(bool, ignore_out_of_memory)
+ bool ignore_out_of_memory() {
+ return thread_local_top_.ignore_out_of_memory_;
+ }
+ void set_ignore_out_of_memory(bool value) {
+ thread_local_top_.ignore_out_of_memory_ = value;
+ }
void PrintCurrentStackTrace(FILE* out);
void PrintStack(StringStream* accumulator);
@@ -931,7 +938,11 @@ class Isolate {
RuntimeState* runtime_state() { return &runtime_state_; }
- FIELD_ACCESSOR(bool, fp_stubs_generated);
+ void set_fp_stubs_generated(bool value) {
+ fp_stubs_generated_ = value;
+ }
+
+ bool fp_stubs_generated() { return fp_stubs_generated_; }
Builtins* builtins() { return &builtins_; }
@@ -983,20 +994,43 @@ class Isolate {
#endif
#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
- V8_TARGET_ARCH_A64 && !defined(__aarch64__) || \
V8_TARGET_ARCH_MIPS && !defined(__mips__)
- FIELD_ACCESSOR(bool, simulator_initialized)
- FIELD_ACCESSOR(HashMap*, simulator_i_cache)
- FIELD_ACCESSOR(Redirection*, simulator_redirection)
+ bool simulator_initialized() { return simulator_initialized_; }
+ void set_simulator_initialized(bool initialized) {
+ simulator_initialized_ = initialized;
+ }
+
+ HashMap* simulator_i_cache() { return simulator_i_cache_; }
+ void set_simulator_i_cache(HashMap* hash_map) {
+ simulator_i_cache_ = hash_map;
+ }
+
+ Redirection* simulator_redirection() {
+ return simulator_redirection_;
+ }
+ void set_simulator_redirection(Redirection* redirection) {
+ simulator_redirection_ = redirection;
+ }
#endif
Factory* factory() { return reinterpret_cast<Factory*>(this); }
static const int kJSRegexpStaticOffsetsVectorSize = 128;
- THREAD_LOCAL_TOP_ACCESSOR(ExternalCallbackScope*, external_callback_scope)
+ ExternalCallbackScope* external_callback_scope() {
+ return thread_local_top_.external_callback_scope_;
+ }
+ void set_external_callback_scope(ExternalCallbackScope* scope) {
+ thread_local_top_.external_callback_scope_ = scope;
+ }
+
+ StateTag current_vm_state() {
+ return thread_local_top_.current_vm_state_;
+ }
- THREAD_LOCAL_TOP_ACCESSOR(StateTag, current_vm_state)
+ void set_current_vm_state(StateTag state) {
+ thread_local_top_.current_vm_state_ = state;
+ }
void SetData(uint32_t slot, void* data) {
ASSERT(slot < Internals::kNumIsolateDataSlots);
@@ -1007,7 +1041,12 @@ class Isolate {
return embedder_data_[slot];
}
- THREAD_LOCAL_TOP_ACCESSOR(LookupResult*, top_lookup_result)
+ LookupResult* top_lookup_result() {
+ return thread_local_top_.top_lookup_result_;
+ }
+ void SetTopLookupResult(LookupResult* top) {
+ thread_local_top_.top_lookup_result_ = top;
+ }
bool IsDead() { return has_fatal_error_; }
void SignalFatalError() { has_fatal_error_ = true; }
@@ -1057,7 +1096,13 @@ class Isolate {
bool IsDeferredHandle(Object** location);
#endif // DEBUG
- FIELD_ACCESSOR(int, max_available_threads);
+ int max_available_threads() const {
+ return max_available_threads_;
+ }
+
+ void set_max_available_threads(int value) {
+ max_available_threads_ = value;
+ }
bool concurrent_recompilation_enabled() {
// Thread is only available with flag enabled.
@@ -1108,14 +1153,6 @@ class Isolate {
// Given an address occupied by a live code object, return that object.
Object* FindCodeObject(Address a);
- int NextOptimizationId() {
- int id = next_optimization_id_++;
- if (!Smi::IsValid(next_optimization_id_)) {
- next_optimization_id_ = 0;
- }
- return id;
- }
-
private:
Isolate();
@@ -1293,7 +1330,6 @@ class Isolate {
double time_millis_at_init_;
#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
- V8_TARGET_ARCH_A64 && !defined(__aarch64__) || \
V8_TARGET_ARCH_MIPS && !defined(__mips__)
bool simulator_initialized_;
HashMap* simulator_i_cache_;
@@ -1348,8 +1384,6 @@ class Isolate {
// Counts deopt points if deopt_every_n_times is enabled.
unsigned int stress_deopt_count_;
- int next_optimization_id_;
-
friend class ExecutionAccess;
friend class HandleScopeImplementer;
friend class IsolateInitializer;
@@ -1369,10 +1403,6 @@ class Isolate {
};
-#undef FIELD_ACCESSOR
-#undef THREAD_LOCAL_TOP_ACCESSOR
-
-
// If the GCC version is 4.1.x or 4.2.x an additional field is added to the
// class as a work around for a bug in the generated code found with these
// versions of GCC. See V8 issue 122 for details.
diff --git a/deps/v8/src/json-stringifier.h b/deps/v8/src/json-stringifier.h
index 0d17b356ab..4510c4b45b 100644
--- a/deps/v8/src/json-stringifier.h
+++ b/deps/v8/src/json-stringifier.h
@@ -360,6 +360,7 @@ Handle<Object> BasicJsonStringifier::ApplyToJsonFunction(
PropertyAttributes attr;
Handle<Object> fun =
Object::GetProperty(object, object, &lookup, tojson_string_, &attr);
+ if (fun.is_null()) return Handle<Object>::null();
if (!fun->IsJSFunction()) return object;
// Call toJSON function.
diff --git a/deps/v8/src/json.js b/deps/v8/src/json.js
index 0799deadfe..c21e6351d4 100644
--- a/deps/v8/src/json.js
+++ b/deps/v8/src/json.js
@@ -210,21 +210,6 @@ function JSONStringify(value, replacer, space) {
} else {
gap = "";
}
- if (IS_ARRAY(replacer)) {
- // Deduplicate replacer array items.
- var property_list = new InternalArray();
- var seen_properties = {};
- var length = replacer.length;
- for (var i = 0; i < length; i++) {
- var item = replacer[i];
- if (IS_NUMBER(item)) item = %_NumberToString(item);
- if (IS_STRING(item) && !(item in seen_properties)) {
- property_list.push(item);
- seen_properties[item] = true;
- }
- }
- replacer = property_list;
- }
return JSONSerialize('', {'': value}, replacer, new InternalArray(), "", gap);
}
diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc
index d9057dbf56..edd2eacd3d 100644
--- a/deps/v8/src/jsregexp.cc
+++ b/deps/v8/src/jsregexp.cc
@@ -49,8 +49,6 @@
#include "ia32/regexp-macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/regexp-macro-assembler-x64.h"
-#elif V8_TARGET_ARCH_A64
-#include "a64/regexp-macro-assembler-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
@@ -3599,12 +3597,9 @@ class AlternativeGenerationList {
// The '2' variant is has inclusive from and exclusive to.
-// This covers \s as defined in ECMA-262 5.1, 15.10.2.12,
-// which include WhiteSpace (7.2) or LineTerminator (7.3) values.
-static const int kSpaceRanges[] = { '\t', '\r' + 1, ' ', ' ' + 1,
- 0x00A0, 0x00A1, 0x1680, 0x1681, 0x180E, 0x180F, 0x2000, 0x200B,
- 0x2028, 0x202A, 0x202F, 0x2030, 0x205F, 0x2060, 0x3000, 0x3001,
- 0xFEFF, 0xFF00, 0x10000 };
+static const int kSpaceRanges[] = { '\t', '\r' + 1, ' ', ' ' + 1, 0x00A0,
+ 0x00A1, 0x1680, 0x1681, 0x180E, 0x180F, 0x2000, 0x200B, 0x2028, 0x202A,
+ 0x202F, 0x2030, 0x205F, 0x2060, 0x3000, 0x3001, 0xFEFF, 0xFF00, 0x10000 };
static const int kSpaceRangeCount = ARRAY_SIZE(kSpaceRanges);
static const int kWordRanges[] = {
@@ -6090,14 +6085,9 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
#elif V8_TARGET_ARCH_ARM
RegExpMacroAssemblerARM macro_assembler(mode, (data->capture_count + 1) * 2,
zone);
-#elif V8_TARGET_ARCH_A64
- RegExpMacroAssemblerA64 macro_assembler(mode, (data->capture_count + 1) * 2,
- zone);
#elif V8_TARGET_ARCH_MIPS
RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2,
zone);
-#else
-#error "Unsupported architecture"
#endif
#else // V8_INTERPRETED_REGEXP
diff --git a/deps/v8/src/lithium-allocator-inl.h b/deps/v8/src/lithium-allocator-inl.h
index 1d43b269f9..deee98877d 100644
--- a/deps/v8/src/lithium-allocator-inl.h
+++ b/deps/v8/src/lithium-allocator-inl.h
@@ -34,8 +34,6 @@
#include "ia32/lithium-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-x64.h"
-#elif V8_TARGET_ARCH_A64
-#include "a64/lithium-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc
index eae2995695..48fa862c90 100644
--- a/deps/v8/src/lithium-allocator.cc
+++ b/deps/v8/src/lithium-allocator.cc
@@ -35,8 +35,6 @@
#include "ia32/lithium-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-x64.h"
-#elif V8_TARGET_ARCH_A64
-#include "a64/lithium-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/lithium-codegen.cc b/deps/v8/src/lithium-codegen.cc
index 9eecedc2f0..2d71d13c69 100644
--- a/deps/v8/src/lithium-codegen.cc
+++ b/deps/v8/src/lithium-codegen.cc
@@ -38,9 +38,6 @@
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#include "arm/lithium-codegen-arm.h"
-#elif V8_TARGET_ARCH_A64
-#include "a64/lithium-a64.h"
-#include "a64/lithium-codegen-a64.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/lithium-mips.h"
#include "mips/lithium-codegen-mips.h"
@@ -107,9 +104,11 @@ bool LCodeGenBase::GenerateBody() {
GenerateBodyInstructionPre(instr);
HValue* value = instr->hydrogen_value();
- if (!value->position().IsUnknown()) {
- RecordAndWritePosition(
- chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+ if (value->position() != RelocInfo::kNoPosition) {
+ ASSERT(!graph()->info()->IsOptimizing() ||
+ !FLAG_emit_opt_code_positions ||
+ value->position() != RelocInfo::kNoPosition);
+ RecordAndWritePosition(value->position());
}
instr->CompileToNative(codegen);
diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc
index ab1e630c70..b4f96290c7 100644
--- a/deps/v8/src/lithium.cc
+++ b/deps/v8/src/lithium.cc
@@ -41,9 +41,6 @@
#elif V8_TARGET_ARCH_MIPS
#include "mips/lithium-mips.h"
#include "mips/lithium-codegen-mips.h"
-#elif V8_TARGET_ARCH_A64
-#include "a64/lithium-a64.h"
-#include "a64/lithium-codegen-a64.h"
#else
#error "Unknown architecture."
#endif
diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h
index 230c68ab0e..9fdf2ee7d8 100644
--- a/deps/v8/src/macro-assembler.h
+++ b/deps/v8/src/macro-assembler.h
@@ -72,14 +72,6 @@ const int kInvalidProtoDepth = -1;
#include "x64/assembler-x64-inl.h"
#include "code.h" // must be after assembler_*.h
#include "x64/macro-assembler-x64.h"
-#elif V8_TARGET_ARCH_A64
-#include "a64/constants-a64.h"
-#include "assembler.h"
-#include "a64/assembler-a64.h"
-#include "a64/assembler-a64-inl.h"
-#include "code.h" // must be after assembler_*.h
-#include "a64/macro-assembler-a64.h"
-#include "a64/macro-assembler-a64-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/constants-arm.h"
#include "assembler.h"
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index 26a1a960c8..f38fa5ef1f 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -67,7 +67,6 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap) : // NOLINT
compacting_(false),
was_marked_incrementally_(false),
sweeping_pending_(false),
- pending_sweeper_jobs_semaphore_(0),
sequential_sweeping_(false),
tracer_(NULL),
migration_slots_buffer_(NULL),
@@ -570,27 +569,6 @@ void MarkCompactCollector::ClearMarkbits() {
}
-class MarkCompactCollector::SweeperTask : public v8::Task {
- public:
- SweeperTask(Heap* heap, PagedSpace* space)
- : heap_(heap), space_(space) {}
-
- virtual ~SweeperTask() {}
-
- private:
- // v8::Task overrides.
- virtual void Run() V8_OVERRIDE {
- heap_->mark_compact_collector()->SweepInParallel(space_);
- heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal();
- }
-
- Heap* heap_;
- PagedSpace* space_;
-
- DISALLOW_COPY_AND_ASSIGN(SweeperTask);
-};
-
-
void MarkCompactCollector::StartSweeperThreads() {
// TODO(hpayer): This check is just used for debugging purpose and
// should be removed or turned into an assert after investigating the
@@ -601,14 +579,6 @@ void MarkCompactCollector::StartSweeperThreads() {
for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
isolate()->sweeper_threads()[i]->StartSweeping();
}
- if (FLAG_job_based_sweeping) {
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new SweeperTask(heap(), heap()->old_data_space()),
- v8::Platform::kShortRunningTask);
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new SweeperTask(heap(), heap()->old_pointer_space()),
- v8::Platform::kShortRunningTask);
- }
}
@@ -617,12 +587,6 @@ void MarkCompactCollector::WaitUntilSweepingCompleted() {
for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
isolate()->sweeper_threads()[i]->WaitForSweeperThread();
}
- if (FLAG_job_based_sweeping) {
- // Wait twice for both jobs.
- pending_sweeper_jobs_semaphore_.Wait();
- pending_sweeper_jobs_semaphore_.Wait();
- }
- ParallelSweepSpacesComplete();
sweeping_pending_ = false;
RefillFreeLists(heap()->paged_space(OLD_DATA_SPACE));
RefillFreeLists(heap()->paged_space(OLD_POINTER_SPACE));
@@ -652,7 +616,7 @@ intptr_t MarkCompactCollector::RefillFreeLists(PagedSpace* space) {
bool MarkCompactCollector::AreSweeperThreadsActivated() {
- return isolate()->sweeper_threads() != NULL || FLAG_job_based_sweeping;
+ return isolate()->sweeper_threads() != NULL;
}
@@ -2654,6 +2618,7 @@ void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
Object* prototype = prototype_transitions->get(proto_offset + i * step);
Object* cached_map = prototype_transitions->get(map_offset + i * step);
if (IsMarked(prototype) && IsMarked(cached_map)) {
+ ASSERT(!prototype->IsUndefined());
int proto_index = proto_offset + new_number_of_transitions * step;
int map_index = map_offset + new_number_of_transitions * step;
if (new_number_of_transitions != i) {
@@ -3421,6 +3386,13 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
EvacuateNewSpace();
}
+ // We have to travers our allocation sites scratchpad which contains raw
+ // pointers before we move objects. During new space evacauation we
+ // gathered pretenuring statistics. The found allocation sites may not be
+ // valid after compacting old space.
+ heap()->ProcessPretenuringFeedback();
+
+
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
EvacuatePages();
}
@@ -3947,11 +3919,7 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
(mode == MarkCompactCollector::SWEEP_SEQUENTIALLY &&
free_list == NULL));
- // When parallel sweeping is active, the page will be marked after
- // sweeping by the main thread.
- if (mode != MarkCompactCollector::SWEEP_IN_PARALLEL) {
- p->MarkSweptConservatively();
- }
+ p->MarkSweptConservatively();
intptr_t freed_bytes = 0;
size_t size = 0;
@@ -4063,7 +4031,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
while (it.has_next()) {
Page* p = it.next();
- ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE);
+ ASSERT(p->parallel_sweeping() == 0);
ASSERT(!p->IsEvacuationCandidate());
// Clear sweeping flags indicating that marking bits are still intact.
@@ -4136,7 +4104,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
reinterpret_cast<intptr_t>(p));
}
- p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_PENDING);
+ p->set_parallel_sweeping(1);
space->IncreaseUnsweptFreeBytes(p);
}
break;
@@ -4178,7 +4146,7 @@ void MarkCompactCollector::SweepSpaces() {
#endif
SweeperType how_to_sweep =
FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
- if (AreSweeperThreadsActivated()) {
+ if (isolate()->num_sweeper_threads() > 0) {
if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
}
@@ -4228,24 +4196,6 @@ void MarkCompactCollector::SweepSpaces() {
}
-void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
- PageIterator it(space);
- while (it.has_next()) {
- Page* p = it.next();
- if (p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_IN_PROGRESS) {
- p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_DONE);
- p->MarkSweptConservatively();
- }
- }
-}
-
-
-void MarkCompactCollector::ParallelSweepSpacesComplete() {
- ParallelSweepSpaceComplete(heap()->old_pointer_space());
- ParallelSweepSpaceComplete(heap()->old_data_space());
-}
-
-
void MarkCompactCollector::EnableCodeFlushing(bool enable) {
#ifdef ENABLE_DEBUGGER_SUPPORT
if (isolate()->debug()->IsLoaded() ||
diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h
index c966e2018e..0773d02666 100644
--- a/deps/v8/src/mark-compact.h
+++ b/deps/v8/src/mark-compact.h
@@ -744,8 +744,6 @@ class MarkCompactCollector {
void MarkAllocationSite(AllocationSite* site);
private:
- class SweeperTask;
-
explicit MarkCompactCollector(Heap* heap);
~MarkCompactCollector();
@@ -793,8 +791,6 @@ class MarkCompactCollector {
// True if concurrent or parallel sweeping is currently in progress.
bool sweeping_pending_;
- Semaphore pending_sweeper_jobs_semaphore_;
-
bool sequential_sweeping_;
// A pointer to the current stack-allocated GC tracer object during a full
@@ -944,12 +940,6 @@ class MarkCompactCollector {
void SweepSpace(PagedSpace* space, SweeperType sweeper);
- // Finalizes the parallel sweeping phase. Marks all the pages that were
- // swept in parallel.
- void ParallelSweepSpacesComplete();
-
- void ParallelSweepSpaceComplete(PagedSpace* space);
-
#ifdef DEBUG
friend class MarkObjectVisitor;
static void VisitObject(HeapObject* obj);
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index 0077d0309f..3f4484a098 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -61,6 +61,7 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
const char* type,
MessageLocation* loc,
Vector< Handle<Object> > args,
+ Handle<String> stack_trace,
Handle<JSArray> stack_frames) {
Factory* factory = isolate->factory();
Handle<String> type_handle = factory->InternalizeUtf8String(type);
@@ -81,6 +82,10 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
script_handle = GetScriptWrapper(loc->script());
}
+ Handle<Object> stack_trace_handle = stack_trace.is_null()
+ ? Handle<Object>::cast(factory->undefined_value())
+ : Handle<Object>::cast(stack_trace);
+
Handle<Object> stack_frames_handle = stack_frames.is_null()
? Handle<Object>::cast(factory->undefined_value())
: Handle<Object>::cast(stack_frames);
@@ -91,6 +96,7 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
start,
end,
script_handle,
+ stack_trace_handle,
stack_frames_handle);
return message;
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index 2f4be518b2..5d84e46caa 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -95,6 +95,7 @@ class MessageHandler {
const char* type,
MessageLocation* loc,
Vector< Handle<Object> > args,
+ Handle<String> stack_trace,
Handle<JSArray> stack_frames);
// Report a formatted message (needs JS allocation).
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index 733fe95e2f..e9f1ae46c2 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -120,7 +120,7 @@ var kMessages = {
invalid_string_length: ["Invalid string length"],
invalid_typed_array_offset: ["Start offset is too large:"],
invalid_typed_array_length: ["Invalid typed array length"],
- invalid_typed_array_alignment: ["%0", " of ", "%1", " should be a multiple of ", "%2"],
+ invalid_typed_array_alignment: ["%0", "of", "%1", "should be a multiple of", "%3"],
typed_array_set_source_too_large:
["Source is too large"],
typed_array_set_negative_offset:
@@ -939,10 +939,14 @@ function CallSiteToString() {
if (this.isNative()) {
fileLocation = "native";
} else {
- fileName = this.getScriptNameOrSourceURL();
- if (!fileName && this.isEval()) {
- fileLocation = this.getEvalOrigin();
- fileLocation += ", "; // Expecting source position to follow.
+ if (this.isEval()) {
+ fileName = this.getScriptNameOrSourceURL();
+ if (!fileName) {
+ fileLocation = this.getEvalOrigin();
+ fileLocation += ", "; // Expecting source position to follow.
+ }
+ } else {
+ fileName = this.getFileName();
}
if (fileName) {
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index e83447ada0..e38f181911 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -106,8 +106,8 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a2, a3 };
- descriptor->register_param_count_ = 2;
+ static Register registers[] = { a2 };
+ descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = NULL;
}
@@ -3152,85 +3152,67 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a feedback vector slot. Cache states
+ // Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// a0 : number of arguments to the construct function
// a1 : the function to call
- // a2 : Feedback vector
- // a3 : slot in feedback vector (Smi)
- Label check_array, initialize_array, initialize_non_array, megamorphic, done;
+ // a2 : cache cell for call target
+ Label initialize, done, miss, megamorphic, not_array_function;
- ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->undefined_value());
- Heap::RootListIndex kMegamorphicRootIndex = Heap::kUndefinedValueRootIndex;
- ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
+ ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->the_hole_value());
- Heap::RootListIndex kUninitializedRootIndex = Heap::kTheHoleValueRootIndex;
- ASSERT_EQ(*TypeFeedbackInfo::PremonomorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->null_value());
- Heap::RootListIndex kPremonomorphicRootIndex = Heap::kNullValueRootIndex;
- // Load the cache state into t0.
- __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t0, a2, Operand(t0));
- __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
+ // Load the cache state into a3.
+ __ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
- __ Branch(&done, eq, t0, Operand(a1));
- __ LoadRoot(at, kMegamorphicRootIndex);
- __ Branch(&done, eq, t0, Operand(at));
-
- // Check if we're dealing with the Array function or not.
- __ LoadArrayFunction(t1);
- __ Branch(&check_array, eq, a1, Operand(t1));
-
- // Non-array cache: Check the cache state.
- __ LoadRoot(at, kPremonomorphicRootIndex);
- __ Branch(&initialize_non_array, eq, t0, Operand(at));
- __ LoadRoot(at, kUninitializedRootIndex);
- __ Branch(&megamorphic, ne, t0, Operand(at));
-
- // Non-array cache: Uninitialized -> premonomorphic. The sentinel is an
- // immortal immovable object (null) so no write-barrier is needed.
- __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t0, a2, at);
- __ LoadRoot(at, kPremonomorphicRootIndex);
- __ Branch(USE_DELAY_SLOT, &done);
- __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize)); // In delay slot.
-
- // Array cache: Check the cache state to see if we're in a monomorphic
- // state where the state object is an AllocationSite object.
- __ bind(&check_array);
- __ lw(t1, FieldMemOperand(t0, 0));
+ __ Branch(&done, eq, a3, Operand(a1));
+
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the cell either some other function or an
+ // AllocationSite. Do a map check on the object in a3.
+ __ lw(t1, FieldMemOperand(a3, 0));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&done, eq, t1, Operand(at));
+ __ Branch(&miss, ne, t1, Operand(at));
+
+ // Make sure the function is the Array() function
+ __ LoadArrayFunction(a3);
+ __ Branch(&megamorphic, ne, a1, Operand(a3));
+ __ jmp(&done);
- // Array cache: Uninitialized or premonomorphic -> monomorphic.
- __ LoadRoot(at, kUninitializedRootIndex);
- __ Branch(&initialize_array, eq, t0, Operand(at));
- __ LoadRoot(at, kPremonomorphicRootIndex);
- __ Branch(&initialize_array, eq, t0, Operand(at));
+ __ bind(&miss);
- // Both caches: Monomorphic -> megamorphic. The sentinel is an
- // immortal immovable object (undefined) so no write-barrier is needed.
+ // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+ // megamorphic.
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(&initialize, eq, a3, Operand(at));
+ // MegamorphicSentinel is an immortal immovable object (undefined) so no
+ // write-barrier is needed.
__ bind(&megamorphic);
- __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t0, a2, Operand(t0));
- __ LoadRoot(at, kMegamorphicRootIndex);
- __ Branch(USE_DELAY_SLOT, &done);
- __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize)); // In delay slot.
-
- // Array cache: Uninitialized or premonomorphic -> monomorphic.
- __ bind(&initialize_array);
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ sw(at, FieldMemOperand(a2, Cell::kValueOffset));
+ __ jmp(&done);
+
+ // An uninitialized cache is patched with the function or sentinel to
+ // indicate the ElementsKind if function is the Array constructor.
+ __ bind(&initialize);
+ // Make sure the function is the Array() function
+ __ LoadArrayFunction(a3);
+ __ Branch(&not_array_function, ne, a1, Operand(a3));
+
+ // The target function is the Array constructor.
+ // Create an AllocationSite if we don't already have it, store it in the cell.
{
FrameScope scope(masm, StackFrame::INTERNAL);
const RegList kSavedRegs =
1 << 4 | // a0
1 << 5 | // a1
- 1 << 6 | // a2
- 1 << 7; // a3
+ 1 << 6; // a2
// Arguments register must be smi-tagged to call out.
__ SmiTag(a0);
@@ -3244,17 +3226,9 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
}
__ Branch(&done);
- // Non-array cache: Premonomorphic -> monomorphic.
- __ bind(&initialize_non_array);
- __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t0, a2, Operand(t0));
- __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sw(a1, MemOperand(t0, 0));
-
- __ Push(t0, a2, a1);
- __ RecordWrite(a2, t0, a1, kRAHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Pop(t0, a2, a1);
+ __ bind(&not_array_function);
+ __ sw(a1, FieldMemOperand(a2, Cell::kValueOffset));
+ // No need for a write barrier here - cells are rescanned.
__ bind(&done);
}
@@ -3262,8 +3236,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
void CallFunctionStub::Generate(MacroAssembler* masm) {
// a1 : the function to call
- // a2 : feedback vector
- // a3 : (only if a2 is not undefined) slot in feedback vector (Smi)
+ // a2 : cache cell for call target
Label slow, non_function, wrap, cont;
if (NeedsChecks()) {
@@ -3272,8 +3245,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(a1, &non_function);
// Goto slow case if we do not have a function.
- __ GetObjectType(a1, t0, t0);
- __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
+ __ GetObjectType(a1, a3, a3);
+ __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
@@ -3318,15 +3291,13 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// If there is a call target cache, mark it megamorphic in the
// non-function case. MegamorphicSentinel is an immortal immovable
// object (undefined) so no write barrier is needed.
- ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->undefined_value());
- __ sll(t1, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t1, a2, Operand(t1));
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ sw(at, FieldMemOperand(t1, FixedArray::kHeaderSize));
+ __ sw(at, FieldMemOperand(a2, Cell::kValueOffset));
}
// Check for function proxy.
- __ Branch(&non_function, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ Branch(&non_function, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
__ push(a1); // Put proxy as additional argument.
__ li(a0, Operand(argc_ + 1, RelocInfo::NONE32));
__ li(a2, Operand(0, RelocInfo::NONE32));
@@ -3366,22 +3337,21 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
void CallConstructStub::Generate(MacroAssembler* masm) {
// a0 : number of arguments
// a1 : the function to call
- // a2 : feedback vector
- // a3 : (only if a2 is not undefined) slot in feedback vector (Smi)
+ // a2 : cache cell for call target
Label slow, non_function_call;
// Check that the function is not a smi.
__ JumpIfSmi(a1, &non_function_call);
// Check that the function is a JSFunction.
- __ GetObjectType(a1, t0, t0);
- __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
+ __ GetObjectType(a1, a3, a3);
+ __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
}
// Jump to the function-specific construct stub.
- Register jmp_reg = t0;
+ Register jmp_reg = a3;
__ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(jmp_reg, FieldMemOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset));
@@ -3390,10 +3360,10 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// a0: number of arguments
// a1: called object
- // t0: object type
+ // a3: object type
Label do_call;
__ bind(&slow);
- __ Branch(&non_function_call, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ Branch(&non_function_call, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
__ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
__ jmp(&do_call);
@@ -5391,7 +5361,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
// We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the slot).
+ // Fix kind and retry (only if we have an allocation site in the cell).
__ Addu(a3, a3, Operand(1));
if (FLAG_debug_code) {
@@ -5498,8 +5468,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc (only if argument_count_ == ANY)
// -- a1 : constructor
- // -- a2 : feedback vector (fixed array or undefined)
- // -- a3 : slot index (if a2 is fixed array)
+ // -- a2 : type info cell
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
@@ -5508,27 +5477,23 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
- __ SmiTst(t0, at);
+ __ SmiTst(a3, at);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction,
at, Operand(zero_reg));
- __ GetObjectType(t0, t0, t1);
+ __ GetObjectType(a3, a3, t0);
__ Assert(eq, kUnexpectedInitialMapForArrayFunction,
- t1, Operand(MAP_TYPE));
+ t0, Operand(MAP_TYPE));
- // We should either have undefined in a2 or a valid fixed array.
+ // We should either have undefined in a2 or a valid cell.
Label okay_here;
- Handle<Map> fixed_array_map = masm->isolate()->factory()->fixed_array_map();
+ Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&okay_here, eq, a2, Operand(at));
- __ lw(t0, FieldMemOperand(a2, 0));
- __ Assert(eq, kExpectedFixedArrayInRegisterA2,
- t0, Operand(fixed_array_map));
-
- // a3 should be a smi if we don't have undefined in a2
- __ AssertSmi(a3);
-
+ __ lw(a3, FieldMemOperand(a2, 0));
+ __ Assert(eq, kExpectedPropertyCellInRegisterA2,
+ a3, Operand(cell_map));
__ bind(&okay_here);
}
@@ -5536,11 +5501,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Get the elements kind and case on that.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&no_info, eq, a2, Operand(at));
- __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(a2, a2, Operand(t0));
- __ lw(a2, FieldMemOperand(a2, FixedArray::kHeaderSize));
+ __ lw(a2, FieldMemOperand(a2, Cell::kValueOffset));
- // If the feedback vector is undefined, or contains anything other than an
+ // If the type cell is undefined, or contains anything other than an
// AllocationSite, call an array constructor that doesn't use AllocationSites.
__ lw(t0, FieldMemOperand(a2, 0));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
@@ -5652,7 +5615,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register context = cp;
int argc = ArgumentBits::decode(bit_field_);
- bool is_store = IsStoreBits::decode(bit_field_);
+ bool restore_context = RestoreContextBits::decode(bit_field_);
bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
typedef FunctionCallbackArguments FCA;
@@ -5719,20 +5682,15 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
AllowExternalCallThatCantCauseGC scope(masm);
MemOperand context_restore_operand(
fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
- // Stores return the first js argument.
- int return_value_offset = 0;
- if (is_store) {
- return_value_offset = 2 + FCA::kArgsLength;
- } else {
- return_value_offset = 2 + FCA::kReturnValueOffset;
- }
- MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
+ MemOperand return_value_operand(fp,
+ (2 + FCA::kReturnValueOffset) * kPointerSize);
__ CallApiFunctionAndReturn(api_function_address,
thunk_ref,
kStackUnwindSpace,
return_value_operand,
- &context_restore_operand);
+ restore_context ?
+ &context_restore_operand : NULL);
}
diff --git a/deps/v8/src/mips/debug-mips.cc b/deps/v8/src/mips/debug-mips.cc
index b9bf69db42..1535231dd8 100644
--- a/deps/v8/src/mips/debug-mips.cc
+++ b/deps/v8/src/mips/debug-mips.cc
@@ -274,10 +274,9 @@ void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-mips.cc).
// ----------- S t a t e -------------
// -- a1 : function
- // -- a2 : feedback array
- // -- a3 : slot in feedback array
+ // -- a2 : cache cell for call target
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit() | a3.bit(), 0);
+ Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit(), 0);
}
@@ -296,10 +295,9 @@ void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments (not smi)
// -- a1 : constructor function
- // -- a2 : feedback array
- // -- a3 : feedback slot (smi)
+ // -- a2 : cache cell for call target
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit() | a3.bit(), a0.bit());
+ Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit(), a0.bit());
}
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index 41bc68e6d0..18ee02dc5c 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -138,9 +138,6 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
-
- InitializeFeedbackVector();
-
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -682,7 +679,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* fall_through) {
__ mov(a0, result_register());
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(ic, condition->test_id());
+ CallIC(ic, NOT_CONTEXTUAL, condition->test_id());
__ mov(at, zero_reg);
Split(ne, v0, Operand(at), if_true, if_false, fall_through);
}
@@ -1047,7 +1044,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, clause->CompareId());
+ CallIC(ic, NOT_CONTEXTUAL, clause->CompareId());
patch_site.EmitPatchInfo();
Label skip;
@@ -1090,7 +1087,6 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
- int slot = stmt->ForInFeedbackSlot();
SetStatementPosition(stmt);
Label loop, exit;
@@ -1176,13 +1172,13 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- Handle<Object> feedback = Handle<Object>(
- Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker),
- isolate());
- StoreFeedbackVectorSlot(slot, feedback);
- __ li(a1, FeedbackVector());
- __ li(a2, Operand(Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker)));
- __ sw(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(slot)));
+ Handle<Cell> cell = isolate()->factory()->NewCell(
+ Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
+ isolate()));
+ RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
+ __ li(a1, cell);
+ __ li(a2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
+ __ sw(a2, FieldMemOperand(a1, Cell::kValueOffset));
__ li(a1, Operand(Smi::FromInt(1))); // Smi indicates slow check
__ lw(a2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
@@ -1490,7 +1486,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// variables.
switch (var->location()) {
case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "[ Global variable");
+ Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in a2 and the global
// object (receiver) in a0.
__ lw(a0, GlobalObjectOperand());
@@ -1503,8 +1499,9 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
- : "[ Stack variable");
+ Comment cmnt(masm_, var->IsContextSlot()
+ ? "Context variable"
+ : "Stack variable");
if (var->binding_needs_init()) {
// var->scope() may be NULL when the proxy is located in eval code and
// refers to a potential outside binding. Currently those bindings are
@@ -1569,12 +1566,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
}
case Variable::LOOKUP: {
- Comment cmnt(masm_, "[ Lookup variable");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
+ Comment cmnt(masm_, "Lookup variable");
__ li(a1, Operand(var->name()));
__ Push(cp, a1); // Context and name.
__ CallRuntime(Runtime::kLoadContextSlot, 2);
@@ -1706,7 +1703,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(a0, result_register());
__ li(a2, Operand(key->value()));
__ lw(a1, MemOperand(sp));
- CallStoreIC(key->LiteralFeedbackId());
+ CallStoreIC(NOT_CONTEXTUAL, key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -2114,7 +2111,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ lw(a1, MemOperand(sp, kPointerSize));
__ lw(a0, MemOperand(sp, 2 * kPointerSize));
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, TypeFeedbackId::None());
+ CallIC(ic, NOT_CONTEXTUAL, TypeFeedbackId::None());
__ mov(a0, v0);
__ mov(a1, a0);
__ sw(a1, MemOperand(sp, 2 * kPointerSize));
@@ -2312,7 +2309,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
__ mov(a0, result_register());
// Call keyed load IC. It has arguments key and receiver in a0 and a1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, prop->PropertyFeedbackId());
+ CallIC(ic, NOT_CONTEXTUAL, prop->PropertyFeedbackId());
}
@@ -2340,7 +2337,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
BinaryOpICStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
+ expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2419,7 +2417,8 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(a1);
BinaryOpICStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
+ expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(v0);
}
@@ -2457,7 +2456,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ mov(a1, result_register());
__ pop(a0); // Restore value.
__ li(a2, Operand(prop->key()->AsLiteral()->value()));
- CallStoreIC();
+ CallStoreIC(NOT_CONTEXTUAL);
break;
}
case KEYED_PROPERTY: {
@@ -2477,28 +2476,6 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
-void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
- Variable* var, MemOperand location) {
- __ sw(result_register(), location);
- if (var->IsContextSlot()) {
- // RecordWrite may destroy all its register arguments.
- __ Move(a3, result_register());
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
- }
-}
-
-
-void FullCodeGenerator::EmitCallStoreContextSlot(
- Handle<String> name, LanguageMode mode) {
- __ li(a1, Operand(name));
- __ li(a0, Operand(Smi::FromInt(mode)));
- __ Push(v0, cp, a1, a0); // Value, context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
-}
-
-
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
if (var->IsUnallocated()) {
@@ -2506,30 +2483,36 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ mov(a0, result_register());
__ li(a2, Operand(var->name()));
__ lw(a1, GlobalObjectOperand());
- CallStoreIC();
-
+ CallStoreIC(CONTEXTUAL);
} else if (op == Token::INIT_CONST) {
// Const initializers need a write barrier.
ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsLookupSlot()) {
+ if (var->IsStackLocal()) {
+ Label skip;
+ __ lw(a1, StackOperand(var));
+ __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+ __ Branch(&skip, ne, a1, Operand(t0));
+ __ sw(result_register(), StackOperand(var));
+ __ bind(&skip);
+ } else {
+ ASSERT(var->IsContextSlot() || var->IsLookupSlot());
+ // Like var declarations, const declarations are hoisted to function
+ // scope. However, unlike var initializers, const initializers are
+ // able to drill a hole to that function context, even from inside a
+ // 'with' context. We thus bypass the normal static scope lookup for
+ // var->IsContextSlot().
__ li(a0, Operand(var->name()));
__ Push(v0, cp, a0); // Context and name.
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- Label skip;
- MemOperand location = VarOperand(var, a1);
- __ lw(a2, location);
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&skip, ne, a2, Operand(at));
- EmitStoreToStackLocalOrContextSlot(var, location);
- __ bind(&skip);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
- EmitCallStoreContextSlot(var->name(), language_mode());
+ __ li(a1, Operand(var->name()));
+ __ li(a0, Operand(Smi::FromInt(language_mode())));
+ __ Push(v0, cp, a1, a0); // Value, context, name, strict mode.
+ __ CallRuntime(Runtime::kStoreContextSlot, 4);
} else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label assign;
@@ -2542,16 +2525,20 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CallRuntime(Runtime::kThrowReferenceError, 1);
// Perform the assignment.
__ bind(&assign);
- EmitStoreToStackLocalOrContextSlot(var, location);
+ __ sw(result_register(), location);
+ if (var->IsContextSlot()) {
+ // RecordWrite may destroy all its register arguments.
+ __ mov(a3, result_register());
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(
+ a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
+ }
}
} else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
// Assignment to var or initializing assignment to let/const
// in harmony mode.
- if (var->IsLookupSlot()) {
- EmitCallStoreContextSlot(var->name(), language_mode());
- } else {
- ASSERT((var->IsStackAllocated() || var->IsContextSlot()));
+ if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, a1);
if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
@@ -2559,10 +2546,23 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
__ Check(eq, kLetBindingReInitialization, a2, Operand(t0));
}
- EmitStoreToStackLocalOrContextSlot(var, location);
+ // Perform the assignment.
+ __ sw(v0, location);
+ if (var->IsContextSlot()) {
+ __ mov(a3, v0);
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(
+ a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
+ }
+ } else {
+ ASSERT(var->IsLookupSlot());
+ __ li(a1, Operand(var->name()));
+ __ li(a0, Operand(Smi::FromInt(language_mode())));
+ __ Push(v0, cp, a1, a0); // Value, context, name, strict mode.
+ __ CallRuntime(Runtime::kStoreContextSlot, 4);
}
}
- // Non-initializing assignments to consts are ignored.
+ // Non-initializing assignments to consts are ignored.
}
@@ -2578,7 +2578,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ li(a2, Operand(prop->key()->AsLiteral()->value()));
__ pop(a1);
- CallStoreIC(expr->AssignmentFeedbackId());
+ CallStoreIC(NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
@@ -2601,7 +2601,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, expr->AssignmentFeedbackId());
+ CallIC(ic, NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
@@ -2628,8 +2628,10 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
+ ContextualMode mode,
TypeFeedbackId id) {
ic_total_count_++;
+ ASSERT(mode != CONTEXTUAL || id.IsNone());
__ Call(code, RelocInfo::CODE_TARGET, id);
}
@@ -2739,15 +2741,15 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
SetSourcePosition(expr->position());
Handle<Object> uninitialized =
- TypeFeedbackInfo::UninitializedSentinel(isolate());
- StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized);
- __ li(a2, FeedbackVector());
- __ li(a3, Operand(Smi::FromInt(expr->CallFeedbackSlot())));
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
+ RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
+ __ li(a2, Operand(cell));
// Record call targets in unoptimized code.
CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
+ __ CallStub(&stub, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2926,10 +2928,10 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Record call targets in unoptimized code.
Handle<Object> uninitialized =
- TypeFeedbackInfo::UninitializedSentinel(isolate());
- StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized);
- __ li(a2, FeedbackVector());
- __ li(a3, Operand(Smi::FromInt(expr->CallNewFeedbackSlot())));
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
+ RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
+ __ li(a2, Operand(cell));
CallConstructStub stub(RECORD_CALL_TARGET);
__ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
@@ -4469,7 +4471,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
BinaryOpICStub stub(Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId());
+ CallIC(stub.GetCode(isolate()),
+ NOT_CONTEXTUAL,
+ expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4499,7 +4503,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(a0, result_register()); // Value.
__ li(a2, Operand(prop->key()->AsLiteral()->value())); // Name.
__ pop(a1); // Receiver.
- CallStoreIC(expr->CountStoreFeedbackId());
+ CallStoreIC(NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4516,7 +4520,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, expr->CountStoreFeedbackId());
+ CallIC(ic, NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4536,7 +4540,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
ASSERT(!context()->IsTest());
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "[ Global variable");
+ Comment cmnt(masm_, "Global variable");
__ lw(a0, GlobalObjectOperand());
__ li(a2, Operand(proxy->name()));
// Use a regular load, not a contextual load, to avoid a reference
@@ -4545,7 +4549,6 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
PrepareForBailout(expr, TOS_REG);
context()->Plug(v0);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
- Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -4702,7 +4705,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, expr->CompareOperationFeedbackId());
+ CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
@@ -4736,7 +4739,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Split(eq, a0, Operand(a1), if_true, if_false, fall_through);
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, expr->CompareOperationFeedbackId());
+ CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
Split(ne, v0, Operand(zero_reg), if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc
index 4088ea4f4f..14d1cd6827 100644
--- a/deps/v8/src/mips/ic-mips.cc
+++ b/deps/v8/src/mips/ic-mips.cc
@@ -229,8 +229,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
__ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
__ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- __ And(at, scratch,
- Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
+ __ And(at, scratch, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
__ Branch(slow, ne, at, Operand(zero_reg));
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
@@ -339,7 +338,8 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
}
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
+ ExtraICState extra_state) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
@@ -347,7 +347,9 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::HANDLER, MONOMORPHIC, extra_state,
+ Code::NORMAL, Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, a0, a2, a3, t0, t1, t2);
@@ -647,7 +649,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateKeyNameCheck(masm, key, a2, a3, &index_name, &slow);
GenerateKeyedLoadReceiverCheck(
- masm, receiver, a2, a3, Map::kHasNamedInterceptor, &slow);
+ masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
// If the receiver is a fast-case object, check the keyed lookup
@@ -1178,7 +1180,8 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
+ ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : receiver
@@ -1187,7 +1190,9 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::HANDLER, MONOMORPHIC, extra_ic_state,
+ Code::NORMAL, Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, a1, a2, a3, t0, t1, t2);
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index e4fbe1fcd6..d34344c83f 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -267,8 +267,7 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(
- chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+ RecordAndWritePosition(value->position());
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -860,7 +859,6 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
- data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -3969,18 +3967,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
Handle<Map> transition = instr->transition();
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
Register value = ToRegister(instr->value());
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
__ SmiTst(value, scratch);
DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
-
- // We know that value is a smi now, so we can omit the check below.
- check_needed = OMIT_SMI_CHECK;
}
} else if (representation.IsDouble()) {
ASSERT(transition.is_null());
@@ -4010,6 +4002,9 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
// Do the store.
Register value = ToRegister(instr->value());
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (access.IsInobject()) {
MemOperand operand = FieldMemOperand(object, offset);
__ Store(value, operand, representation);
@@ -5201,7 +5196,11 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+ if (size <= Page::kMaxRegularHeapObjectSize) {
+ __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+ } else {
+ __ jmp(deferred->entry());
+ }
} else {
Register size = ToRegister(instr->size());
__ Allocate(size,
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index a194c29eab..bb8e7502d8 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -848,6 +848,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
+ if (current->has_position()) position_ = current->position();
LInstruction* instr = NULL;
if (current->CanReplaceWithDummyUses()) {
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index 258502dc26..6cc13bf4c2 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -2554,6 +2554,7 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
current_block_(NULL),
next_block_(NULL),
allocator_(allocator),
+ position_(RelocInfo::kNoPosition),
instruction_pending_deoptimization_environment_(NULL),
pending_deoptimization_ast_id_(BailoutId::None()) { }
@@ -2689,6 +2690,7 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
HBasicBlock* current_block_;
HBasicBlock* next_block_;
LAllocator* allocator_;
+ int position_;
LInstruction* instruction_pending_deoptimization_environment_;
BailoutId pending_deoptimization_ast_id_;
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index c62b9f5322..69a2a3dc4b 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -3440,8 +3440,8 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
bind(&is_nan);
// Load canonical NaN for storing into the double array.
LoadRoot(at, Heap::kNanValueRootIndex);
- lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset));
- lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset));
+ lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
+ lw(exponent_reg, FieldMemOperand(at, HeapNumber::kValueOffset + 4));
jmp(&have_double_value);
bind(&smi_value);
@@ -4346,8 +4346,16 @@ void MacroAssembler::Check(Condition cc, BailoutReason reason,
void MacroAssembler::Abort(BailoutReason reason) {
Label abort_start;
bind(&abort_start);
-#ifdef DEBUG
+ // We want to pass the msg string like a smi to avoid GC
+ // problems, however msg is not guaranteed to be aligned
+ // properly. Instead, we pass an aligned pointer that is
+ // a proper v8 smi, but also pass the alignment difference
+ // from the real pointer as a smi.
const char* msg = GetBailoutReason(reason);
+ intptr_t p1 = reinterpret_cast<intptr_t>(msg);
+ intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
+ ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
+#ifdef DEBUG
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -4359,16 +4367,18 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- li(a0, Operand(Smi::FromInt(reason)));
+ li(a0, Operand(p0));
+ push(a0);
+ li(a0, Operand(Smi::FromInt(p1 - p0)));
push(a0);
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort, 2);
} else {
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort, 2);
}
// Will not return here.
if (is_trampoline_pool_blocked()) {
@@ -4376,8 +4386,8 @@ void MacroAssembler::Abort(BailoutReason reason) {
// instructions generated, we insert padding here to keep the size
// of the Abort macro constant.
// Currently in debug mode with debug_code enabled the number of
- // generated instructions is 10, so we use this as a maximum value.
- static const int kExpectedAbortInstructions = 10;
+ // generated instructions is 14, so we use this as a maximum value.
+ static const int kExpectedAbortInstructions = 14;
int abort_instructions = InstructionsGeneratedSince(&abort_start);
ASSERT(abort_instructions <= kExpectedAbortInstructions);
while (abort_instructions++ < kExpectedAbortInstructions) {
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index 92a0a87d24..d9fd10f245 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -203,10 +203,6 @@ class Simulator {
void set_pc(int32_t value);
int32_t get_pc() const;
- Address get_sp() {
- return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
- }
-
// Accessor to the internal simulator stack area.
uintptr_t StackLimit() const;
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index 7e3c801399..d1b428a345 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -770,14 +770,13 @@ static void CompileCallLoadPropertyWithInterceptor(
// Generate call to api function.
-void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Handle<Map> receiver_map,
- Register receiver,
- Register scratch_in,
- bool is_store,
- int argc,
- Register* values) {
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch_in,
+ int argc,
+ Register* values) {
ASSERT(!receiver.is(scratch_in));
// Preparing to push, adjust sp.
__ Subu(sp, sp, Operand((argc + 1) * kPointerSize));
@@ -844,7 +843,7 @@ void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
__ li(api_function_address, Operand(ref));
// Jump to stub.
- CallApiFunctionStub stub(is_store, call_data_undefined, argc);
+ CallApiFunctionStub stub(true, call_data_undefined, argc);
__ TailCallStub(&stub);
}
@@ -1065,6 +1064,15 @@ void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
void LoadStubCompiler::GenerateLoadCallback(
+ const CallOptimization& call_optimization,
+ Handle<Map> receiver_map) {
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver_map,
+ receiver(), scratch3(), 0, NULL);
+}
+
+
+void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Build AccessorInfo::args_ list on the stack and push property name below
@@ -1238,6 +1246,24 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
}
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization) {
+ HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
+ receiver(), holder, name);
+
+ Register values[] = { value() };
+ GenerateFastApiCall(
+ masm(), call_optimization, handle(object->map()),
+ receiver(), scratch3(), 1, values);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
#undef __
#define __ ACCESS_MASM(masm)
@@ -1296,6 +1322,21 @@ void StoreStubCompiler::GenerateStoreViaSetter(
Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> object,
Handle<Name> name) {
+ Label miss;
+
+ // Check that the map of the object hasn't changed.
+ __ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss,
+ DO_SMI_CHECK);
+
+ // Perform global security token check if needed.
+ if (object->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(receiver(), scratch1(), &miss);
+ }
+
+ // Stub is never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
@@ -1303,6 +1344,10 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
__ TailCallExternalReference(store_ic_property, 3, 1);
+ // Handle store cache miss.
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
}
@@ -1442,10 +1487,11 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
}
Label number_case;
+ Register match = scratch1();
Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target);
+ __ JumpIfSmi(receiver(), smi_target, match); // Reg match is 0 if Smi.
- Register map_reg = scratch1();
+ Register map_reg = scratch2();
int receiver_count = types->length();
int number_of_handled_maps = 0;
@@ -1455,12 +1501,15 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
Handle<Map> map = IC::TypeToMap(*type, isolate());
if (!map->is_deprecated()) {
number_of_handled_maps++;
+ // Check map and tail call if there's a match.
+ // Separate compare from branch, to provide path for above JumpIfSmi().
+ __ Subu(match, map_reg, Operand(map));
if (type->Is(HeapType::Number())) {
ASSERT(!number_case.is_unused());
__ bind(&number_case);
}
__ Jump(handlers->at(current), RelocInfo::CODE_TARGET,
- eq, map_reg, Operand(map));
+ eq, match, Operand(zero_reg));
}
}
ASSERT(number_of_handled_maps != 0);
diff --git a/deps/v8/src/object-observe.js b/deps/v8/src/object-observe.js
index 468da31ec2..499b27eca1 100644
--- a/deps/v8/src/object-observe.js
+++ b/deps/v8/src/object-observe.js
@@ -390,13 +390,11 @@ function ObserverEnqueueIfActive(observer, objectInfo, changeRecord,
}
var callbackInfo = CallbackInfoNormalize(callback);
- if (IS_NULL(observationState.pendingObservers)) {
+ if (!observationState.pendingObservers)
observationState.pendingObservers = nullProtoObject();
- GetMicrotaskQueue().push(ObserveMicrotaskRunner);
- %SetMicrotaskPending(true);
- }
observationState.pendingObservers[callbackInfo.priority] = callback;
callbackInfo.push(changeRecord);
+ %SetMicrotaskPending(true);
}
function ObjectInfoEnqueueExternalChangeRecord(objectInfo, changeRecord, type) {
@@ -585,6 +583,7 @@ function ObserveMicrotaskRunner() {
}
}
}
+RunMicrotasks.runners.push(ObserveMicrotaskRunner);
function SetupObjectObserve() {
%CheckIsBootstrapping();
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 4f59a1a5a2..e33b46be79 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -367,7 +367,7 @@ void PolymorphicCodeCache::PolymorphicCodeCacheVerify() {
void TypeFeedbackInfo::TypeFeedbackInfoVerify() {
VerifyObjectField(kStorage1Offset);
VerifyObjectField(kStorage2Offset);
- VerifyHeapPointer(feedback_vector());
+ VerifyHeapPointer(type_feedback_cells());
}
@@ -490,6 +490,7 @@ void JSMessageObject::JSMessageObjectVerify() {
VerifyObjectField(kEndPositionOffset);
VerifyObjectField(kArgumentsOffset);
VerifyObjectField(kScriptOffset);
+ VerifyObjectField(kStackTraceOffset);
VerifyObjectField(kStackFramesOffset);
}
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index ffec178d42..65c46f0af3 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -59,7 +59,7 @@ PropertyDetails::PropertyDetails(Smi* smi) {
}
-Smi* PropertyDetails::AsSmi() const {
+Smi* PropertyDetails::AsSmi() {
// Ensure the upper 2 bits have the same value by sign extending it. This is
// necessary to be able to use the 31st bit of the property details.
int value = value_ << 1;
@@ -67,7 +67,7 @@ Smi* PropertyDetails::AsSmi() const {
}
-PropertyDetails PropertyDetails::AsDeleted() const {
+PropertyDetails PropertyDetails::AsDeleted() {
Smi* smi = Smi::FromInt(value_ | DeletedField::encode(1));
return PropertyDetails(smi);
}
@@ -760,6 +760,16 @@ bool Object::IsDependentCode() {
}
+bool Object::IsTypeFeedbackCells() {
+ if (!IsFixedArray()) return false;
+ // There's actually no way to see the difference between a fixed array and
+ // a cache cells array. Since this is used for asserts we can check that
+ // the length is plausible though.
+ if (FixedArray::cast(this)->length() % 2 != 0) return false;
+ return true;
+}
+
+
bool Object::IsContext() {
if (!Object::IsHeapObject()) return false;
Map* map = HeapObject::cast(this)->map();
@@ -927,8 +937,7 @@ bool Object::IsJSGlobalProxy() {
bool result = IsHeapObject() &&
(HeapObject::cast(this)->map()->instance_type() ==
JS_GLOBAL_PROXY_TYPE);
- ASSERT(!result ||
- HeapObject::cast(this)->map()->is_access_check_needed());
+ ASSERT(!result || IsAccessCheckNeeded());
return result;
}
@@ -953,14 +962,8 @@ bool Object::IsUndetectableObject() {
bool Object::IsAccessCheckNeeded() {
- if (!IsHeapObject()) return false;
- if (IsJSGlobalProxy()) {
- JSGlobalProxy* proxy = JSGlobalProxy::cast(this);
- GlobalObject* global =
- proxy->GetIsolate()->context()->global_object();
- return proxy->IsDetachedFrom(global);
- }
- return HeapObject::cast(this)->map()->is_access_check_needed();
+ return IsHeapObject()
+ && HeapObject::cast(this)->map()->is_access_check_needed();
}
@@ -1561,7 +1564,9 @@ inline bool AllocationSite::DigestPretenuringFeedback() {
set_pretenure_decision(result);
if (current_mode != GetPretenureMode()) {
decision_changed = true;
- set_deopt_dependent_code(true);
+ dependent_code()->MarkCodeForDeoptimization(
+ GetIsolate(),
+ DependentCode::kAllocationSiteTenuringChangedGroup);
}
}
@@ -2792,6 +2797,7 @@ CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(DeoptimizationInputData)
CAST_ACCESSOR(DeoptimizationOutputData)
CAST_ACCESSOR(DependentCode)
+CAST_ACCESSOR(TypeFeedbackCells)
CAST_ACCESSOR(StringTable)
CAST_ACCESSOR(JSFunctionResultCache)
CAST_ACCESSOR(NormalizedMapCache)
@@ -4205,16 +4211,30 @@ InlineCacheState Code::ic_state() {
ExtraICState Code::extra_ic_state() {
- ASSERT(is_inline_cache_stub() || ic_state() == DEBUG_STUB);
+ ASSERT((is_inline_cache_stub() && !needs_extended_extra_ic_state(kind()))
+ || ic_state() == DEBUG_STUB);
return ExtractExtraICStateFromFlags(flags());
}
+ExtraICState Code::extended_extra_ic_state() {
+ ASSERT(is_inline_cache_stub() || ic_state() == DEBUG_STUB);
+ ASSERT(needs_extended_extra_ic_state(kind()));
+ return ExtractExtendedExtraICStateFromFlags(flags());
+}
+
+
Code::StubType Code::type() {
return ExtractTypeFromFlags(flags());
}
+int Code::arguments_count() {
+ ASSERT(kind() == STUB || is_handler());
+ return ExtractArgumentsCountFromFlags(flags());
+}
+
+
// For initialization.
void Code::set_raw_kind_specific_flags1(int value) {
WRITE_INT_FIELD(this, kKindSpecificFlags1Offset, value);
@@ -4418,7 +4438,7 @@ void Code::set_back_edges_patched_for_osr(bool value) {
byte Code::to_boolean_state() {
- return extra_ic_state();
+ return extended_extra_ic_state();
}
@@ -4489,13 +4509,18 @@ Code::Flags Code::ComputeFlags(Kind kind,
InlineCacheState ic_state,
ExtraICState extra_ic_state,
StubType type,
+ int argc,
InlineCacheHolderFlag holder) {
+ ASSERT(argc <= Code::kMaxArguments);
// Compute the bit mask.
unsigned int bits = KindField::encode(kind)
| ICStateField::encode(ic_state)
| TypeField::encode(type)
- | ExtraICStateField::encode(extra_ic_state)
+ | ExtendedExtraICStateField::encode(extra_ic_state)
| CacheHolderField::encode(holder);
+ if (!Code::needs_extended_extra_ic_state(kind)) {
+ bits |= (argc << kArgumentsCountShift);
+ }
return static_cast<Flags>(bits);
}
@@ -4503,15 +4528,9 @@ Code::Flags Code::ComputeFlags(Kind kind,
Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
ExtraICState extra_ic_state,
InlineCacheHolderFlag holder,
- StubType type) {
- return ComputeFlags(kind, MONOMORPHIC, extra_ic_state, type, holder);
-}
-
-
-Code::Flags Code::ComputeHandlerFlags(Kind handler_kind,
- StubType type,
- InlineCacheHolderFlag holder) {
- return ComputeFlags(Code::HANDLER, MONOMORPHIC, handler_kind, type, holder);
+ StubType type,
+ int argc) {
+ return ComputeFlags(kind, MONOMORPHIC, extra_ic_state, type, argc, holder);
}
@@ -4530,11 +4549,22 @@ ExtraICState Code::ExtractExtraICStateFromFlags(Flags flags) {
}
+ExtraICState Code::ExtractExtendedExtraICStateFromFlags(
+ Flags flags) {
+ return ExtendedExtraICStateField::decode(flags);
+}
+
+
Code::StubType Code::ExtractTypeFromFlags(Flags flags) {
return TypeField::decode(flags);
}
+int Code::ExtractArgumentsCountFromFlags(Flags flags) {
+ return (flags & kArgumentsCountMask) >> kArgumentsCountShift;
+}
+
+
InlineCacheHolderFlag Code::ExtractCacheHolderFromFlags(Flags flags) {
return CacheHolderField::decode(flags);
}
@@ -5656,6 +5686,7 @@ JSDate* JSDate::cast(Object* obj) {
ACCESSORS(JSMessageObject, type, String, kTypeOffset)
ACCESSORS(JSMessageObject, arguments, JSArray, kArgumentsOffset)
ACCESSORS(JSMessageObject, script, Object, kScriptOffset)
+ACCESSORS(JSMessageObject, stack_trace, Object, kStackTraceOffset)
ACCESSORS(JSMessageObject, stack_frames, Object, kStackFramesOffset)
SMI_ACCESSORS(JSMessageObject, start_position, kStartPositionOffset)
SMI_ACCESSORS(JSMessageObject, end_position, kEndPositionOffset)
@@ -6538,28 +6569,43 @@ MaybeObject* ConstantPoolArray::Copy() {
}
-Handle<Object> TypeFeedbackInfo::UninitializedSentinel(Isolate* isolate) {
- return isolate->factory()->the_hole_value();
+void TypeFeedbackCells::SetAstId(int index, TypeFeedbackId id) {
+ set(1 + index * 2, Smi::FromInt(id.ToInt()));
}
-Handle<Object> TypeFeedbackInfo::PremonomorphicSentinel(Isolate* isolate) {
- return isolate->factory()->null_value();
+TypeFeedbackId TypeFeedbackCells::AstId(int index) {
+ return TypeFeedbackId(Smi::cast(get(1 + index * 2))->value());
+}
+
+
+void TypeFeedbackCells::SetCell(int index, Cell* cell) {
+ set(index * 2, cell);
+}
+
+
+Cell* TypeFeedbackCells::GetCell(int index) {
+ return Cell::cast(get(index * 2));
+}
+
+
+Handle<Object> TypeFeedbackCells::UninitializedSentinel(Isolate* isolate) {
+ return isolate->factory()->the_hole_value();
}
-Handle<Object> TypeFeedbackInfo::MegamorphicSentinel(Isolate* isolate) {
+Handle<Object> TypeFeedbackCells::MegamorphicSentinel(Isolate* isolate) {
return isolate->factory()->undefined_value();
}
-Handle<Object> TypeFeedbackInfo::MonomorphicArraySentinel(Isolate* isolate,
+Handle<Object> TypeFeedbackCells::MonomorphicArraySentinel(Isolate* isolate,
ElementsKind elements_kind) {
return Handle<Object>(Smi::FromInt(static_cast<int>(elements_kind)), isolate);
}
-Object* TypeFeedbackInfo::RawUninitializedSentinel(Heap* heap) {
+Object* TypeFeedbackCells::RawUninitializedSentinel(Heap* heap) {
return heap->the_hole_value();
}
@@ -6642,8 +6688,8 @@ bool TypeFeedbackInfo::matches_inlined_type_change_checksum(int checksum) {
}
-ACCESSORS(TypeFeedbackInfo, feedback_vector, FixedArray,
- kFeedbackVectorOffset)
+ACCESSORS(TypeFeedbackInfo, type_feedback_cells, TypeFeedbackCells,
+ kTypeFeedbackCellsOffset)
SMI_ACCESSORS(AliasedArgumentsEntry, aliased_context_slot, kAliasedContextSlot)
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index e9fb83258a..909d8f7421 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -555,8 +555,8 @@ void TypeFeedbackInfo::TypeFeedbackInfoPrint(FILE* out) {
HeapObject::PrintHeader(out, "TypeFeedbackInfo");
PrintF(out, " - ic_total_count: %d, ic_with_type_info_count: %d\n",
ic_total_count(), ic_with_type_info_count());
- PrintF(out, " - feedback_vector: ");
- feedback_vector()->FixedArrayPrint(out);
+ PrintF(out, " - type_feedback_cells: ");
+ type_feedback_cells()->FixedArrayPrint(out);
}
@@ -624,6 +624,8 @@ void JSMessageObject::JSMessageObjectPrint(FILE* out) {
PrintF(out, "\n - end_position: %d", end_position());
PrintF(out, "\n - script: ");
script()->ShortPrint(out);
+ PrintF(out, "\n - stack_trace: ");
+ stack_trace()->ShortPrint(out);
PrintF(out, "\n - stack_frames: ");
stack_frames()->ShortPrint(out);
PrintF(out, "\n");
diff --git a/deps/v8/src/objects-visiting-inl.h b/deps/v8/src/objects-visiting-inl.h
index 9c3378357d..5201a7b318 100644
--- a/deps/v8/src/objects-visiting-inl.h
+++ b/deps/v8/src/objects-visiting-inl.h
@@ -427,7 +427,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCode(
Heap* heap = map->GetHeap();
Code* code = Code::cast(object);
if (FLAG_cleanup_code_caches_at_gc) {
- code->ClearTypeFeedbackInfo(heap);
+ code->ClearTypeFeedbackCells(heap);
}
if (FLAG_age_code && !Serializer::enabled()) {
code->MakeOlder(heap->mark_compact_collector()->marking_parity());
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 3156edc142..15c12db4e0 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -687,7 +687,7 @@ PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
}
-Object* JSObject::GetNormalizedProperty(const LookupResult* result) {
+Object* JSObject::GetNormalizedProperty(LookupResult* result) {
ASSERT(!HasFastProperties());
Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
if (IsGlobalObject()) {
@@ -699,7 +699,7 @@ Object* JSObject::GetNormalizedProperty(const LookupResult* result) {
void JSObject::SetNormalizedProperty(Handle<JSObject> object,
- const LookupResult* result,
+ LookupResult* result,
Handle<Object> value) {
ASSERT(!object->HasFastProperties());
NameDictionary* property_dictionary = object->property_dictionary();
@@ -732,7 +732,7 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object,
Handle<NameDictionary> property_dictionary(object->property_dictionary());
if (!name->IsUniqueName()) {
- name = object->GetIsolate()->factory()->InternalizeString(
+ name = object->GetIsolate()->factory()->InternalizedStringFromString(
Handle<String>::cast(name));
}
@@ -2152,7 +2152,7 @@ Handle<Object> JSObject::AddProperty(Handle<JSObject> object,
Isolate* isolate = object->GetIsolate();
if (!name->IsUniqueName()) {
- name = isolate->factory()->InternalizeString(
+ name = isolate->factory()->InternalizedStringFromString(
Handle<String>::cast(name));
}
@@ -3135,7 +3135,7 @@ static int AppendUniqueCallbacks(NeanderArray* callbacks,
Handle<AccessorInfo> entry(AccessorInfo::cast(callbacks->get(i)));
if (entry->name()->IsUniqueName()) continue;
Handle<String> key =
- isolate->factory()->InternalizeString(
+ isolate->factory()->InternalizedStringFromString(
Handle<String>(String::cast(entry->name())));
entry->set_name(*key);
}
@@ -10631,18 +10631,18 @@ void Code::ClearInlineCaches(Code::Kind* kind) {
}
-void Code::ClearTypeFeedbackInfo(Heap* heap) {
+void Code::ClearTypeFeedbackCells(Heap* heap) {
if (kind() != FUNCTION) return;
Object* raw_info = type_feedback_info();
if (raw_info->IsTypeFeedbackInfo()) {
- FixedArray* feedback_vector =
- TypeFeedbackInfo::cast(raw_info)->feedback_vector();
- for (int i = 0; i < feedback_vector->length(); i++) {
- Object* obj = feedback_vector->get(i);
- if (!obj->IsAllocationSite()) {
- // TODO(mvstanton): Can't I avoid a write barrier for this sentinel?
- feedback_vector->set(i,
- TypeFeedbackInfo::RawUninitializedSentinel(heap));
+ TypeFeedbackCells* type_feedback_cells =
+ TypeFeedbackInfo::cast(raw_info)->type_feedback_cells();
+ for (int i = 0; i < type_feedback_cells->CellCount(); i++) {
+ Cell* cell = type_feedback_cells->GetCell(i);
+ // Don't clear AllocationSites
+ Object* value = cell->value();
+ if (value == NULL || !value->IsAllocationSite()) {
+ cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap));
}
}
}
@@ -11091,7 +11091,8 @@ void Code::Disassemble(const char* name, FILE* out) {
}
if (is_inline_cache_stub()) {
PrintF(out, "ic_state = %s\n", ICState2String(ic_state()));
- PrintExtraICState(out, kind(), extra_ic_state());
+ PrintExtraICState(out, kind(), needs_extended_extra_ic_state(kind()) ?
+ extended_extra_ic_state() : extra_ic_state());
if (ic_state() == MONOMORPHIC) {
PrintF(out, "type = %s\n", StubType2String(type()));
}
@@ -11564,7 +11565,7 @@ Handle<Map> Map::PutPrototypeTransition(Handle<Map> map,
cache->set(entry + kProtoTransitionPrototypeOffset, *prototype);
cache->set(entry + kProtoTransitionMapOffset, *target_map);
- map->SetNumberOfProtoTransitions(transitions);
+ map->SetNumberOfProtoTransitions(last + 1);
return map;
}
@@ -11763,14 +11764,23 @@ bool DependentCode::MarkCodeForDeoptimization(
// Mark all the code that needs to be deoptimized.
bool marked = false;
for (int i = start; i < end; i++) {
- if (is_code_at(i)) {
- Code* code = code_at(i);
+ Object* object = object_at(i);
+ // TODO(hpayer): This is a temporary hack. Foreign objects move after
+ // new space evacuation. Since pretenuring may mark these objects as aborted
+ // we have to follow the forwarding pointer in that case.
+ MapWord map_word = HeapObject::cast(object)->map_word();
+ if (map_word.IsForwardingAddress()) {
+ object = map_word.ToForwardingAddress();
+ }
+ if (object->IsCode()) {
+ Code* code = Code::cast(object);
if (!code->marked_for_deoptimization()) {
code->set_marked_for_deoptimization(true);
marked = true;
}
} else {
- CompilationInfo* info = compilation_info_at(i);
+ CompilationInfo* info = reinterpret_cast<CompilationInfo*>(
+ Foreign::cast(object)->foreign_address());
info->AbortDueToDependencyChange();
}
}
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 05ab695de8..1b40752507 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -37,9 +37,7 @@
#include "property-details.h"
#include "smart-pointers.h"
#include "unicode-inl.h"
-#if V8_TARGET_ARCH_A64
-#include "a64/constants-a64.h"
-#elif V8_TARGET_ARCH_ARM
+#if V8_TARGET_ARCH_ARM
#include "arm/constants-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/constants-mips.h"
@@ -1040,6 +1038,7 @@ class MaybeObject BASE_EMBEDDED {
V(DeoptimizationInputData) \
V(DeoptimizationOutputData) \
V(DependentCode) \
+ V(TypeFeedbackCells) \
V(FixedArray) \
V(FixedDoubleArray) \
V(ConstantPoolArray) \
@@ -1130,9 +1129,6 @@ class MaybeObject BASE_EMBEDDED {
V(kCodeObjectNotProperlyPatched, "Code object not properly patched") \
V(kCompoundAssignmentToLookupSlot, "Compound assignment to lookup slot") \
V(kContextAllocatedArguments, "Context-allocated arguments") \
- V(kCopyBuffersOverlap, "Copy buffers overlap") \
- V(kCouldNotGenerateZero, "Could not generate +0.0") \
- V(kCouldNotGenerateNegativeZero, "Could not generate -0.0") \
V(kDebuggerIsActive, "Debugger is active") \
V(kDebuggerStatement, "DebuggerStatement") \
V(kDeclarationInCatchContext, "Declaration in catch context") \
@@ -1145,32 +1141,18 @@ class MaybeObject BASE_EMBEDDED {
"DontDelete cells can't contain the hole") \
V(kDoPushArgumentNotImplementedForDoubleType, \
"DoPushArgument not implemented for double type") \
- V(kEliminatedBoundsCheckFailed, "Eliminated bounds check failed") \
V(kEmitLoadRegisterUnsupportedDoubleImmediate, \
"EmitLoadRegister: Unsupported double immediate") \
V(kEval, "eval") \
V(kExpected0AsASmiSentinel, "Expected 0 as a Smi sentinel") \
- V(kExpectedAlignmentMarker, "Expected alignment marker") \
- V(kExpectedAllocationSite, "Expected allocation site") \
- V(kExpectedFunctionObject, "Expected function object in register") \
- V(kExpectedHeapNumber, "Expected HeapNumber") \
- V(kExpectedNativeContext, "Expected native context") \
- V(kExpectedNonIdenticalObjects, "Expected non-identical objects") \
- V(kExpectedNonNullContext, "Expected non-null context") \
- V(kExpectedPositiveZero, "Expected +0.0") \
- V(kExpectedAllocationSiteInCell, \
- "Expected AllocationSite in property cell") \
- V(kExpectedFixedArrayInFeedbackVector, \
- "Expected fixed array in feedback vector") \
- V(kExpectedFixedArrayInRegisterA2, \
- "Expected fixed array in register a2") \
- V(kExpectedFixedArrayInRegisterEbx, \
- "Expected fixed array in register ebx") \
- V(kExpectedFixedArrayInRegisterR2, \
- "Expected fixed array in register r2") \
- V(kExpectedFixedArrayInRegisterRbx, \
- "Expected fixed array in register rbx") \
- V(kExpectedSmiOrHeapNumber, "Expected smi or HeapNumber") \
+ V(kExpectedAlignmentMarker, "expected alignment marker") \
+ V(kExpectedAllocationSite, "expected allocation site") \
+ V(kExpectedPropertyCellInRegisterA2, \
+ "Expected property cell in register a2") \
+ V(kExpectedPropertyCellInRegisterEbx, \
+ "Expected property cell in register ebx") \
+ V(kExpectedPropertyCellInRegisterRbx, \
+ "Expected property cell in register rbx") \
V(kExpectingAlignmentForCopyBytes, \
"Expecting alignment for CopyBytes") \
V(kExportDeclaration, "Export declaration") \
@@ -1215,7 +1197,6 @@ class MaybeObject BASE_EMBEDDED {
V(kInliningBailedOut, "Inlining bailed out") \
V(kInputGPRIsExpectedToHaveUpper32Cleared, \
"Input GPR is expected to have upper32 cleared") \
- V(kInputStringTooLong, "Input string too long") \
V(kInstanceofStubUnexpectedCallSiteCacheCheck, \
"InstanceofStub unexpected call site cache (check)") \
V(kInstanceofStubUnexpectedCallSiteCacheCmp1, \
@@ -1229,7 +1210,6 @@ class MaybeObject BASE_EMBEDDED {
V(kInvalidCaptureReferenced, "Invalid capture referenced") \
V(kInvalidElementsKindForInternalArrayOrInternalPackedArray, \
"Invalid ElementsKind for InternalArray or InternalPackedArray") \
- V(kInvalidFullCodegenState, "invalid full-codegen state") \
V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
V(kInvalidLeftHandSideInAssignment, "Invalid left-hand side in assignment") \
V(kInvalidLhsInCompoundAssignment, "Invalid lhs in compound assignment") \
@@ -1242,10 +1222,7 @@ class MaybeObject BASE_EMBEDDED {
V(kJSObjectWithFastElementsMapHasSlowElements, \
"JSObject with fast elements map has slow elements") \
V(kLetBindingReInitialization, "Let binding re-initialization") \
- V(kLhsHasBeenClobbered, "lhs has been clobbered") \
V(kLiveBytesCountOverflowChunkSize, "Live Bytes Count overflow chunk size") \
- V(kLiveEditFrameDroppingIsNotSupportedOnA64, \
- "LiveEdit frame dropping is not supported on a64") \
V(kLiveEditFrameDroppingIsNotSupportedOnArm, \
"LiveEdit frame dropping is not supported on arm") \
V(kLiveEditFrameDroppingIsNotSupportedOnMips, \
@@ -1281,7 +1258,6 @@ class MaybeObject BASE_EMBEDDED {
"Object literal with complex property") \
V(kOddballInStringTableIsNotUndefinedOrTheHole, \
"Oddball in string table is not undefined or the hole") \
- V(kOffsetOutOfRange, "Offset out of range") \
V(kOperandIsASmiAndNotAName, "Operand is a smi and not a name") \
V(kOperandIsASmiAndNotAString, "Operand is a smi and not a string") \
V(kOperandIsASmi, "Operand is a smi") \
@@ -1297,7 +1273,6 @@ class MaybeObject BASE_EMBEDDED {
"Out of virtual registers while trying to allocate temp register") \
V(kParseScopeError, "Parse/scope error") \
V(kPossibleDirectCallToEval, "Possible direct call to eval") \
- V(kPreconditionsWereNotMet, "Preconditions were not met") \
V(kPropertyAllocationCountFailed, "Property allocation count failed") \
V(kReceivedInvalidReturnAddress, "Received invalid return address") \
V(kReferenceToAVariableWhichRequiresDynamicLookup, \
@@ -1307,37 +1282,24 @@ class MaybeObject BASE_EMBEDDED {
V(kReferenceToUninitializedVariable, "Reference to uninitialized variable") \
V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
V(kRegisterWasClobbered, "Register was clobbered") \
- V(kRememberedSetPointerInNewSpace, "Remembered set pointer is in new space") \
- V(kReturnAddressNotFoundInFrame, "Return address not found in frame") \
- V(kRhsHasBeenClobbered, "Rhs has been clobbered") \
V(kScopedBlock, "ScopedBlock") \
V(kSmiAdditionOverflow, "Smi addition overflow") \
V(kSmiSubtractionOverflow, "Smi subtraction overflow") \
- V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
V(kStackFrameTypesMustMatch, "Stack frame types must match") \
V(kSwitchStatementMixedOrNonLiteralSwitchLabels, \
"SwitchStatement: mixed or non-literal switch labels") \
V(kSwitchStatementTooManyClauses, "SwitchStatement: too many clauses") \
- V(kTheCurrentStackPointerIsBelowCsp, \
- "The current stack pointer is below csp") \
V(kTheInstructionShouldBeALui, "The instruction should be a lui") \
V(kTheInstructionShouldBeAnOri, "The instruction should be an ori") \
V(kTheInstructionToPatchShouldBeALoadFromPc, \
"The instruction to patch should be a load from pc") \
- V(kTheInstructionToPatchShouldBeAnLdrLiteral, \
- "The instruction to patch should be a ldr literal") \
V(kTheInstructionToPatchShouldBeALui, \
"The instruction to patch should be a lui") \
V(kTheInstructionToPatchShouldBeAnOri, \
"The instruction to patch should be an ori") \
- V(kTheSourceAndDestinationAreTheSame, \
- "The source and destination are the same") \
- V(kTheStackWasCorruptedByMacroAssemblerCall, \
- "The stack was corrupted by MacroAssembler::Call()") \
V(kTooManyParametersLocals, "Too many parameters/locals") \
V(kTooManyParameters, "Too many parameters") \
V(kTooManySpillSlotsNeededForOSR, "Too many spill slots needed for OSR") \
- V(kToOperand32UnsupportedImmediate, "ToOperand32 unsupported immediate.") \
V(kToOperandIsDoubleRegisterUnimplemented, \
"ToOperand IsDoubleRegister unimplemented") \
V(kToOperandUnsupportedDoubleImmediate, \
@@ -1346,12 +1308,10 @@ class MaybeObject BASE_EMBEDDED {
V(kTryFinallyStatement, "TryFinallyStatement") \
V(kUnableToEncodeValueAsSmi, "Unable to encode value as smi") \
V(kUnalignedAllocationInNewSpace, "Unaligned allocation in new space") \
- V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier") \
V(kUndefinedValueNotLoaded, "Undefined value not loaded") \
V(kUndoAllocationOfNonAllocatedMemory, \
"Undo allocation of non allocated memory") \
V(kUnexpectedAllocationTop, "Unexpected allocation top") \
- V(kUnexpectedColorFound, "Unexpected color bit pattern found") \
V(kUnexpectedElementsKindInArrayConstructor, \
"Unexpected ElementsKind in array constructor") \
V(kUnexpectedFallthroughFromCharCodeAtSlowCase, \
@@ -1378,20 +1338,16 @@ class MaybeObject BASE_EMBEDDED {
"Unexpected initial map for InternalArray function") \
V(kUnexpectedLevelAfterReturnFromApiCall, \
"Unexpected level after return from api call") \
- V(kUnexpectedNegativeValue, "Unexpected negative value") \
V(kUnexpectedNumberOfPreAllocatedPropertyFields, \
"Unexpected number of pre-allocated property fields") \
- V(kUnexpectedSmi, "Unexpected smi value") \
V(kUnexpectedStringFunction, "Unexpected String function") \
V(kUnexpectedStringType, "Unexpected string type") \
V(kUnexpectedStringWrapperInstanceSize, \
"Unexpected string wrapper instance size") \
V(kUnexpectedTypeForRegExpDataFixedArrayExpected, \
"Unexpected type for RegExp data, FixedArray expected") \
- V(kUnexpectedValue, "Unexpected value") \
V(kUnexpectedUnusedPropertiesOfStringWrapper, \
"Unexpected unused properties of string wrapper") \
- V(kUnimplemented, "unimplemented") \
V(kUninitializedKSmiConstantRegister, "Uninitialized kSmiConstantRegister") \
V(kUnknown, "Unknown") \
V(kUnsupportedConstCompoundAssignment, \
@@ -2284,12 +2240,12 @@ class JSObject: public JSReceiver {
// Retrieve a value in a normalized object given a lookup result.
// Handles the special representation of JS global objects.
- Object* GetNormalizedProperty(const LookupResult* result);
+ Object* GetNormalizedProperty(LookupResult* result);
// Sets the property value in a normalized object given a lookup result.
// Handles the special representation of JS global objects.
static void SetNormalizedProperty(Handle<JSObject> object,
- const LookupResult* result,
+ LookupResult* result,
Handle<Object> value);
// Sets the property value in a normalized object given (key, value, details).
@@ -2360,6 +2316,10 @@ class JSObject: public JSReceiver {
// been modified since it was created. May give false positives.
bool IsDirty();
+ // If the receiver is a JSGlobalProxy this method will return its prototype,
+ // otherwise the result is the receiver itself.
+ inline Object* BypassGlobalProxy();
+
// Accessors for hidden properties object.
//
// Hidden properties are not local properties of the object itself.
@@ -4991,8 +4951,7 @@ class DeoptimizationInputData: public FixedArray {
static const int kLiteralArrayIndex = 2;
static const int kOsrAstIdIndex = 3;
static const int kOsrPcOffsetIndex = 4;
- static const int kOptimizationIdIndex = 5;
- static const int kFirstDeoptEntryIndex = 6;
+ static const int kFirstDeoptEntryIndex = 5;
// Offsets of deopt entry elements relative to the start of the entry.
static const int kAstIdRawOffset = 0;
@@ -5015,7 +4974,6 @@ class DeoptimizationInputData: public FixedArray {
DEFINE_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
DEFINE_ELEMENT_ACCESSORS(OsrAstId, Smi)
DEFINE_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
- DEFINE_ELEMENT_ACCESSORS(OptimizationId, Smi)
#undef DEFINE_ELEMENT_ACCESSORS
@@ -5111,6 +5069,49 @@ class DeoptimizationOutputData: public FixedArray {
// Forward declaration.
class Cell;
class PropertyCell;
+
+// TypeFeedbackCells is a fixed array used to hold the association between
+// cache cells and AST ids for code generated by the full compiler.
+// The format of the these objects is
+// [i * 2]: Global property cell of ith cache cell.
+// [i * 2 + 1]: Ast ID for ith cache cell.
+class TypeFeedbackCells: public FixedArray {
+ public:
+ int CellCount() { return length() / 2; }
+ static int LengthOfFixedArray(int cell_count) { return cell_count * 2; }
+
+ // Accessors for AST ids associated with cache values.
+ inline TypeFeedbackId AstId(int index);
+ inline void SetAstId(int index, TypeFeedbackId id);
+
+ // Accessors for global property cells holding the cache values.
+ inline Cell* GetCell(int index);
+ inline void SetCell(int index, Cell* cell);
+
+ // The object that indicates an uninitialized cache.
+ static inline Handle<Object> UninitializedSentinel(Isolate* isolate);
+
+ // The object that indicates a megamorphic state.
+ static inline Handle<Object> MegamorphicSentinel(Isolate* isolate);
+
+ // The object that indicates a monomorphic state of Array with
+ // ElementsKind
+ static inline Handle<Object> MonomorphicArraySentinel(Isolate* isolate,
+ ElementsKind elements_kind);
+
+ // A raw version of the uninitialized sentinel that's safe to read during
+ // garbage collection (e.g., for patching the cache).
+ static inline Object* RawUninitializedSentinel(Heap* heap);
+
+ // Casting.
+ static inline TypeFeedbackCells* cast(Object* obj);
+
+ static const int kForInFastCaseMarker = 0;
+ static const int kForInSlowCaseMarker = 1;
+};
+
+
+// Forward declaration.
class SafepointEntry;
class TypeFeedbackInfo;
@@ -5229,10 +5230,24 @@ class Code: public HeapObject {
// [flags]: Access to specific code flags.
inline Kind kind();
+ inline Kind handler_kind() {
+ return static_cast<Kind>(arguments_count());
+ }
inline InlineCacheState ic_state(); // Only valid for IC stubs.
inline ExtraICState extra_ic_state(); // Only valid for IC stubs.
+ inline ExtraICState extended_extra_ic_state(); // Only valid for
+ // non-call IC stubs.
+ static bool needs_extended_extra_ic_state(Kind kind) {
+ // TODO(danno): This is a bit of a hack right now since there are still
+ // clients of this API that pass "extra" values in for argc. These clients
+ // should be retrofitted to used ExtendedExtraICState.
+ return kind == COMPARE_NIL_IC || kind == TO_BOOLEAN_IC ||
+ kind == BINARY_OP_IC;
+ }
+
inline StubType type(); // Only valid for monomorphic IC stubs.
+ inline int arguments_count(); // Only valid for call IC stubs.
// Testers for IC stub kinds.
inline bool is_inline_cache_stub();
@@ -5371,24 +5386,23 @@ class Code: public HeapObject {
InlineCacheState ic_state = UNINITIALIZED,
ExtraICState extra_ic_state = kNoExtraICState,
StubType type = NORMAL,
+ int argc = -1,
InlineCacheHolderFlag holder = OWN_MAP);
static inline Flags ComputeMonomorphicFlags(
Kind kind,
ExtraICState extra_ic_state = kNoExtraICState,
InlineCacheHolderFlag holder = OWN_MAP,
- StubType type = NORMAL);
-
- static inline Flags ComputeHandlerFlags(
- Kind handler_kind,
StubType type = NORMAL,
- InlineCacheHolderFlag holder = OWN_MAP);
+ int argc = -1);
static inline InlineCacheState ExtractICStateFromFlags(Flags flags);
static inline StubType ExtractTypeFromFlags(Flags flags);
static inline Kind ExtractKindFromFlags(Flags flags);
static inline InlineCacheHolderFlag ExtractCacheHolderFromFlags(Flags flags);
static inline ExtraICState ExtractExtraICStateFromFlags(Flags flags);
+ static inline ExtraICState ExtractExtendedExtraICStateFromFlags(Flags flags);
+ static inline int ExtractArgumentsCountFromFlags(Flags flags);
static inline Flags RemoveTypeFromFlags(Flags flags);
@@ -5458,7 +5472,7 @@ class Code: public HeapObject {
void ClearInlineCaches();
void ClearInlineCaches(Kind kind);
- void ClearTypeFeedbackInfo(Heap* heap);
+ void ClearTypeFeedbackCells(Heap* heap);
BailoutId TranslatePcOffsetToAstId(uint32_t pc_offset);
uint32_t TranslateAstIdToPcOffset(BailoutId ast_id);
@@ -5553,8 +5567,10 @@ class Code: public HeapObject {
class CacheHolderField: public BitField<InlineCacheHolderFlag, 5, 1> {};
class KindField: public BitField<Kind, 6, 4> {};
// TODO(bmeurer): Bit 10 is available for free use. :-)
- class ExtraICStateField: public BitField<ExtraICState, 11,
+ class ExtraICStateField: public BitField<ExtraICState, 11, 6> {};
+ class ExtendedExtraICStateField: public BitField<ExtraICState, 11,
PlatformSmiTagging::kSmiValueSize - 11 + 1> {}; // NOLINT
+ STATIC_ASSERT(ExtraICStateField::kShift == ExtendedExtraICStateField::kShift);
// KindSpecificFlags1 layout (STUB and OPTIMIZED_FUNCTION)
static const int kStackSlotsFirstBit = 0;
@@ -5608,9 +5624,20 @@ class Code: public HeapObject {
class BackEdgesPatchedForOSRField: public BitField<bool,
kIsCrankshaftedBit + 1 + 29, 1> {}; // NOLINT
- static const int kArgumentsBits = 16;
+ // Signed field cannot be encoded using the BitField class.
+ static const int kArgumentsCountShift = 17;
+ static const int kArgumentsCountMask = ~((1 << kArgumentsCountShift) - 1);
+ static const int kArgumentsBits =
+ PlatformSmiTagging::kSmiValueSize - Code::kArgumentsCountShift + 1;
static const int kMaxArguments = (1 << kArgumentsBits) - 1;
+ // ICs can use either argument count or ExtendedExtraIC, since their storage
+ // overlaps.
+ STATIC_ASSERT(ExtraICStateField::kShift +
+ ExtraICStateField::kSize + kArgumentsBits ==
+ ExtendedExtraICStateField::kShift +
+ ExtendedExtraICStateField::kSize);
+
// This constant should be encodable in an ARM instruction.
static const int kFlagsNotUsedInLookup =
TypeField::kMask | CacheHolderField::kMask;
@@ -7776,6 +7803,9 @@ class JSMessageObject: public JSObject {
// [script]: the script from which the error message originated.
DECL_ACCESSORS(script, Object)
+ // [stack_trace]: the stack trace for this error message.
+ DECL_ACCESSORS(stack_trace, Object)
+
// [stack_frames]: an array of stack frames for this error object.
DECL_ACCESSORS(stack_frames, Object)
@@ -7798,7 +7828,8 @@ class JSMessageObject: public JSObject {
static const int kTypeOffset = JSObject::kHeaderSize;
static const int kArgumentsOffset = kTypeOffset + kPointerSize;
static const int kScriptOffset = kArgumentsOffset + kPointerSize;
- static const int kStackFramesOffset = kScriptOffset + kPointerSize;
+ static const int kStackTraceOffset = kScriptOffset + kPointerSize;
+ static const int kStackFramesOffset = kStackTraceOffset + kPointerSize;
static const int kStartPositionOffset = kStackFramesOffset + kPointerSize;
static const int kEndPositionOffset = kStartPositionOffset + kPointerSize;
static const int kSize = kEndPositionOffset + kPointerSize;
@@ -8157,7 +8188,7 @@ class TypeFeedbackInfo: public Struct {
inline void set_inlined_type_change_checksum(int checksum);
inline bool matches_inlined_type_change_checksum(int checksum);
- DECL_ACCESSORS(feedback_vector, FixedArray)
+ DECL_ACCESSORS(type_feedback_cells, TypeFeedbackCells)
static inline TypeFeedbackInfo* cast(Object* obj);
@@ -8167,30 +8198,8 @@ class TypeFeedbackInfo: public Struct {
static const int kStorage1Offset = HeapObject::kHeaderSize;
static const int kStorage2Offset = kStorage1Offset + kPointerSize;
- static const int kFeedbackVectorOffset =
- kStorage2Offset + kPointerSize;
- static const int kSize = kFeedbackVectorOffset + kPointerSize;
-
- // The object that indicates an uninitialized cache.
- static inline Handle<Object> UninitializedSentinel(Isolate* isolate);
-
- // The object that indicates a cache in pre-monomorphic state.
- static inline Handle<Object> PremonomorphicSentinel(Isolate* isolate);
-
- // The object that indicates a megamorphic state.
- static inline Handle<Object> MegamorphicSentinel(Isolate* isolate);
-
- // The object that indicates a monomorphic state of Array with
- // ElementsKind
- static inline Handle<Object> MonomorphicArraySentinel(Isolate* isolate,
- ElementsKind elements_kind);
-
- // A raw version of the uninitialized sentinel that's safe to read during
- // garbage collection (e.g., for patching the cache).
- static inline Object* RawUninitializedSentinel(Heap* heap);
-
- static const int kForInFastCaseMarker = 0;
- static const int kForInSlowCaseMarker = 1;
+ static const int kTypeFeedbackCellsOffset = kStorage2Offset + kPointerSize;
+ static const int kSize = kTypeFeedbackCellsOffset + kPointerSize;
private:
static const int kTypeChangeChecksumBits = 7;
@@ -8253,9 +8262,8 @@ class AllocationSite: public Struct {
class DoNotInlineBit: public BitField<bool, 29, 1> {};
// Bitfields for pretenure_data
- class MementoFoundCountBits: public BitField<int, 0, 27> {};
- class PretenureDecisionBits: public BitField<PretenureDecision, 27, 2> {};
- class DeoptDependentCodeBit: public BitField<bool, 29, 1> {};
+ class MementoFoundCountBits: public BitField<int, 0, 28> {};
+ class PretenureDecisionBits: public BitField<PretenureDecision, 28, 2> {};
STATIC_ASSERT(PretenureDecisionBits::kMax >= kLastPretenureDecisionValue);
// Increments the mementos found counter and returns true when the first
@@ -8280,18 +8288,6 @@ class AllocationSite: public Struct {
SKIP_WRITE_BARRIER);
}
- bool deopt_dependent_code() {
- int value = pretenure_data()->value();
- return DeoptDependentCodeBit::decode(value);
- }
-
- void set_deopt_dependent_code(bool deopt) {
- int value = pretenure_data()->value();
- set_pretenure_data(
- Smi::FromInt(DeoptDependentCodeBit::update(value, deopt)),
- SKIP_WRITE_BARRIER);
- }
-
int memento_found_count() {
int value = pretenure_data()->value();
return MementoFoundCountBits::decode(value);
@@ -8631,7 +8627,7 @@ class Name: public HeapObject {
// kMaxCachedArrayIndexLength.
STATIC_CHECK(IS_POWER_OF_TWO(kMaxCachedArrayIndexLength + 1));
- static const unsigned int kContainsCachedArrayIndexMask =
+ static const int kContainsCachedArrayIndexMask =
(~kMaxCachedArrayIndexLength << kArrayIndexHashLengthShift) |
kIsNotArrayIndexMask;
@@ -10650,7 +10646,6 @@ class BreakPointInfo: public Struct {
V(kStringTable, "string_table", "(Internalized strings)") \
V(kExternalStringsTable, "external_strings_table", "(External strings)") \
V(kStrongRootList, "strong_root_list", "(Strong roots)") \
- V(kSmiRootList, "smi_root_list", "(Smi roots)") \
V(kInternalizedString, "internalized_string", "(Internal string)") \
V(kBootstrapper, "bootstrapper", "(Bootstrapper)") \
V(kTop, "top", "(Isolate)") \
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index 409059778a..5e7680e6c1 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -46,6 +46,49 @@
namespace v8 {
namespace internal {
+// PositionStack is used for on-stack allocation of token positions for
+// new expressions. Please look at ParseNewExpression.
+
+class PositionStack {
+ public:
+ explicit PositionStack(bool* ok) : top_(NULL), ok_(ok) {}
+ ~PositionStack() {
+ ASSERT(!*ok_ || is_empty());
+ USE(ok_);
+ }
+
+ class Element {
+ public:
+ Element(PositionStack* stack, int value) {
+ previous_ = stack->top();
+ value_ = value;
+ stack->set_top(this);
+ }
+
+ private:
+ Element* previous() { return previous_; }
+ int value() { return value_; }
+ friend class PositionStack;
+ Element* previous_;
+ int value_;
+ };
+
+ bool is_empty() { return top_ == NULL; }
+ int pop() {
+ ASSERT(!is_empty());
+ int result = top_->value();
+ top_ = top_->previous();
+ return result;
+ }
+
+ private:
+ Element* top() { return top_; }
+ void set_top(Element* value) { top_ = value; }
+ Element* top_;
+ bool* ok_;
+};
+
+
RegExpBuilder::RegExpBuilder(Zone* zone)
: zone_(zone),
pending_empty_(false),
@@ -213,12 +256,12 @@ Handle<String> Parser::LookupSymbol(int symbol_id) {
// if there is some preparser data.
if (static_cast<unsigned>(symbol_id)
>= static_cast<unsigned>(symbol_cache_.length())) {
- if (scanner()->is_literal_ascii()) {
+ if (scanner().is_literal_ascii()) {
return isolate()->factory()->InternalizeOneByteString(
- Vector<const uint8_t>::cast(scanner()->literal_ascii_string()));
+ Vector<const uint8_t>::cast(scanner().literal_ascii_string()));
} else {
return isolate()->factory()->InternalizeTwoByteString(
- scanner()->literal_utf16_string());
+ scanner().literal_utf16_string());
}
}
return LookupCachedSymbol(symbol_id);
@@ -234,12 +277,12 @@ Handle<String> Parser::LookupCachedSymbol(int symbol_id) {
}
Handle<String> result = symbol_cache_.at(symbol_id);
if (result.is_null()) {
- if (scanner()->is_literal_ascii()) {
+ if (scanner().is_literal_ascii()) {
result = isolate()->factory()->InternalizeOneByteString(
- Vector<const uint8_t>::cast(scanner()->literal_ascii_string()));
+ Vector<const uint8_t>::cast(scanner().literal_ascii_string()));
} else {
result = isolate()->factory()->InternalizeTwoByteString(
- scanner()->literal_utf16_string());
+ scanner().literal_utf16_string());
}
symbol_cache_.at(symbol_id) = result;
return result;
@@ -420,6 +463,54 @@ class TargetScope BASE_EMBEDDED {
// ----------------------------------------------------------------------------
+// FunctionState and BlockState together implement the parser's scope stack.
+// The parser's current scope is in top_scope_. The BlockState and
+// FunctionState constructors push on the scope stack and the destructors
+// pop. They are also used to hold the parser's per-function and per-block
+// state.
+
+class Parser::BlockState BASE_EMBEDDED {
+ public:
+ BlockState(Parser* parser, Scope* scope)
+ : parser_(parser),
+ outer_scope_(parser->top_scope_) {
+ parser->top_scope_ = scope;
+ }
+
+ ~BlockState() { parser_->top_scope_ = outer_scope_; }
+
+ private:
+ Parser* parser_;
+ Scope* outer_scope_;
+};
+
+
+Parser::FunctionState::FunctionState(Parser* parser, Scope* scope)
+ : next_materialized_literal_index_(JSFunction::kLiteralsPrefixSize),
+ next_handler_index_(0),
+ expected_property_count_(0),
+ generator_object_variable_(NULL),
+ parser_(parser),
+ outer_function_state_(parser->current_function_state_),
+ outer_scope_(parser->top_scope_),
+ saved_ast_node_id_(parser->zone()->isolate()->ast_node_id()),
+ factory_(parser->zone()) {
+ parser->top_scope_ = scope;
+ parser->current_function_state_ = this;
+ parser->zone()->isolate()->set_ast_node_id(BailoutId::FirstUsable().ToInt());
+}
+
+
+Parser::FunctionState::~FunctionState() {
+ parser_->top_scope_ = outer_scope_;
+ parser_->current_function_state_ = outer_function_state_;
+ if (outer_function_state_ != NULL) {
+ parser_->isolate()->set_ast_node_id(saved_ast_node_id_);
+ }
+}
+
+
+// ----------------------------------------------------------------------------
// The CHECK_OK macro is a convenient macro to enforce error
// handling for functions that may fail (by returning !*ok).
//
@@ -442,168 +533,21 @@ class TargetScope BASE_EMBEDDED {
// ----------------------------------------------------------------------------
// Implementation of Parser
-bool ParserTraits::IsEvalOrArguments(Handle<String> identifier) const {
- return identifier.is_identical_to(
- parser_->isolate()->factory()->eval_string()) ||
- identifier.is_identical_to(
- parser_->isolate()->factory()->arguments_string());
-}
-
-
-void ParserTraits::ReportMessageAt(Scanner::Location source_location,
- const char* message,
- Vector<const char*> args) {
- MessageLocation location(parser_->script_,
- source_location.beg_pos,
- source_location.end_pos);
- Factory* factory = parser_->isolate()->factory();
- Handle<FixedArray> elements = factory->NewFixedArray(args.length());
- for (int i = 0; i < args.length(); i++) {
- Handle<String> arg_string = factory->NewStringFromUtf8(CStrVector(args[i]));
- elements->set(i, *arg_string);
- }
- Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> result = factory->NewSyntaxError(message, array);
- parser_->isolate()->Throw(*result, &location);
-}
-
-
-void ParserTraits::ReportMessage(const char* message,
- Vector<Handle<String> > args) {
- Scanner::Location source_location = parser_->scanner()->location();
- ReportMessageAt(source_location, message, args);
-}
-
-
-void ParserTraits::ReportMessageAt(Scanner::Location source_location,
- const char* message,
- Vector<Handle<String> > args) {
- MessageLocation location(parser_->script_,
- source_location.beg_pos,
- source_location.end_pos);
- Factory* factory = parser_->isolate()->factory();
- Handle<FixedArray> elements = factory->NewFixedArray(args.length());
- for (int i = 0; i < args.length(); i++) {
- elements->set(i, *args[i]);
- }
- Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> result = factory->NewSyntaxError(message, array);
- parser_->isolate()->Throw(*result, &location);
-}
-
-
-Handle<String> ParserTraits::GetSymbol(Scanner* scanner) {
- int symbol_id = -1;
- if (parser_->pre_parse_data() != NULL) {
- symbol_id = parser_->pre_parse_data()->GetSymbolIdentifier();
- }
- return parser_->LookupSymbol(symbol_id);
-}
-
-
-Handle<String> ParserTraits::NextLiteralString(Scanner* scanner,
- PretenureFlag tenured) {
- if (scanner->is_next_literal_ascii()) {
- return parser_->isolate_->factory()->NewStringFromAscii(
- scanner->next_literal_ascii_string(), tenured);
- } else {
- return parser_->isolate_->factory()->NewStringFromTwoByte(
- scanner->next_literal_utf16_string(), tenured);
- }
-}
-
-
-Expression* ParserTraits::ThisExpression(
- Scope* scope,
- AstNodeFactory<AstConstructionVisitor>* factory) {
- return factory->NewVariableProxy(scope->receiver());
-}
-
-
-Expression* ParserTraits::ExpressionFromLiteral(
- Token::Value token, int pos,
- Scanner* scanner,
- AstNodeFactory<AstConstructionVisitor>* factory) {
- Factory* isolate_factory = parser_->isolate()->factory();
- switch (token) {
- case Token::NULL_LITERAL:
- return factory->NewLiteral(isolate_factory->null_value(), pos);
- case Token::TRUE_LITERAL:
- return factory->NewLiteral(isolate_factory->true_value(), pos);
- case Token::FALSE_LITERAL:
- return factory->NewLiteral(isolate_factory->false_value(), pos);
- case Token::NUMBER: {
- ASSERT(scanner->is_literal_ascii());
- double value = StringToDouble(parser_->isolate()->unicode_cache(),
- scanner->literal_ascii_string(),
- ALLOW_HEX | ALLOW_OCTAL |
- ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY);
- return factory->NewNumberLiteral(value, pos);
- }
- default:
- ASSERT(false);
- }
- return NULL;
-}
-
-
-Expression* ParserTraits::ExpressionFromIdentifier(
- Handle<String> name, int pos, Scope* scope,
- AstNodeFactory<AstConstructionVisitor>* factory) {
- if (parser_->fni_ != NULL) parser_->fni_->PushVariableName(name);
- // The name may refer to a module instance object, so its type is unknown.
-#ifdef DEBUG
- if (FLAG_print_interface_details)
- PrintF("# Variable %s ", name->ToAsciiArray());
-#endif
- Interface* interface = Interface::NewUnknown(parser_->zone());
- return scope->NewUnresolved(factory, name, interface, pos);
-}
-
-
-Expression* ParserTraits::ExpressionFromString(
- int pos, Scanner* scanner,
- AstNodeFactory<AstConstructionVisitor>* factory) {
- Handle<String> symbol = GetSymbol(scanner);
- if (parser_->fni_ != NULL) parser_->fni_->PushLiteralName(symbol);
- return factory->NewLiteral(symbol, pos);
-}
-
-
-Expression* ParserTraits::ParseArrayLiteral(bool* ok) {
- return parser_->ParseArrayLiteral(ok);
-}
-
-
-Expression* ParserTraits::ParseObjectLiteral(bool* ok) {
- return parser_->ParseObjectLiteral(ok);
-}
-
-
-Expression* ParserTraits::ParseExpression(bool accept_IN, bool* ok) {
- return parser_->ParseExpression(accept_IN, ok);
-}
-
-
-Expression* ParserTraits::ParseV8Intrinsic(bool* ok) {
- return parser_->ParseV8Intrinsic(ok);
-}
-
-
Parser::Parser(CompilationInfo* info)
- : ParserBase<ParserTraits>(&scanner_,
- info->isolate()->stack_guard()->real_climit(),
- info->extension(),
- this),
+ : ParserBase(&scanner_, info->isolate()->stack_guard()->real_climit()),
isolate_(info->isolate()),
symbol_cache_(0, info->zone()),
script_(info->script()),
scanner_(isolate_->unicode_cache()),
reusable_preparser_(NULL),
+ top_scope_(NULL),
original_scope_(NULL),
+ current_function_state_(NULL),
target_stack_(NULL),
+ extension_(info->extension()),
pre_parse_data_(NULL),
fni_(NULL),
+ parenthesized_function_(false),
zone_(info->zone()),
info_(info) {
ASSERT(!script_.is_null());
@@ -666,14 +610,14 @@ FunctionLiteral* Parser::ParseProgram() {
FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
Handle<String> source) {
- ASSERT(scope_ == NULL);
+ ASSERT(top_scope_ == NULL);
ASSERT(target_stack_ == NULL);
if (pre_parse_data_ != NULL) pre_parse_data_->Initialize();
Handle<String> no_name = isolate()->factory()->empty_string();
FunctionLiteral* result = NULL;
- { Scope* scope = NewScope(scope_, GLOBAL_SCOPE);
+ { Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
info->SetGlobalScope(scope);
if (!info->context().is_null()) {
scope = Scope::DeserializeScopeChain(*info->context(), scope, zone());
@@ -699,19 +643,19 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
ParsingModeScope parsing_mode(this, mode);
// Enters 'scope'.
- FunctionState function_state(&function_state_, &scope_, scope, zone());
+ FunctionState function_state(this, scope);
- scope_->SetLanguageMode(info->language_mode());
+ top_scope_->SetLanguageMode(info->language_mode());
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
bool ok = true;
- int beg_pos = scanner()->location().beg_pos;
+ int beg_pos = scanner().location().beg_pos;
ParseSourceElements(body, Token::EOS, info->is_eval(), true, &ok);
- if (ok && !scope_->is_classic_mode()) {
- CheckOctalLiteral(beg_pos, scanner()->location().end_pos, &ok);
+ if (ok && !top_scope_->is_classic_mode()) {
+ CheckOctalLiteral(beg_pos, scanner().location().end_pos, &ok);
}
if (ok && is_extended_mode()) {
- CheckConflictingVarDeclarations(scope_, &ok);
+ CheckConflictingVarDeclarations(top_scope_, &ok);
}
if (ok && info->parse_restriction() == ONLY_SINGLE_FUNCTION_LITERAL) {
@@ -727,7 +671,7 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
if (ok) {
result = factory()->NewFunctionLiteral(
no_name,
- scope_,
+ top_scope_,
body,
function_state.materialized_literal_count(),
function_state.expected_property_count(),
@@ -740,7 +684,6 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
FunctionLiteral::kNotGenerator,
0);
result->set_ast_properties(factory()->visitor()->ast_properties());
- result->set_slot_processor(factory()->visitor()->slot_processor());
result->set_dont_optimize_reason(
factory()->visitor()->dont_optimize_reason());
} else if (stack_overflow()) {
@@ -793,7 +736,7 @@ FunctionLiteral* Parser::ParseLazy() {
FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
Handle<SharedFunctionInfo> shared_info = info()->shared_info();
scanner_.Initialize(source);
- ASSERT(scope_ == NULL);
+ ASSERT(top_scope_ == NULL);
ASSERT(target_stack_ == NULL);
Handle<String> name(String::cast(shared_info->name()));
@@ -807,14 +750,14 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
{
// Parse the function literal.
- Scope* scope = NewScope(scope_, GLOBAL_SCOPE);
+ Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
info()->SetGlobalScope(scope);
if (!info()->closure().is_null()) {
scope = Scope::DeserializeScopeChain(info()->closure()->context(), scope,
zone());
}
original_scope_ = scope;
- FunctionState function_state(&function_state_, &scope_, scope, zone());
+ FunctionState function_state(this, scope);
ASSERT(scope->language_mode() != STRICT_MODE || !info()->is_classic_mode());
ASSERT(scope->language_mode() != EXTENDED_MODE ||
info()->is_extended_mode());
@@ -850,6 +793,62 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
}
+Handle<String> Parser::GetSymbol() {
+ int symbol_id = -1;
+ if (pre_parse_data() != NULL) {
+ symbol_id = pre_parse_data()->GetSymbolIdentifier();
+ }
+ return LookupSymbol(symbol_id);
+}
+
+
+void Parser::ReportMessage(const char* message, Vector<const char*> args) {
+ Scanner::Location source_location = scanner().location();
+ ReportMessageAt(source_location, message, args);
+}
+
+
+void Parser::ReportMessage(const char* message, Vector<Handle<String> > args) {
+ Scanner::Location source_location = scanner().location();
+ ReportMessageAt(source_location, message, args);
+}
+
+
+void Parser::ReportMessageAt(Scanner::Location source_location,
+ const char* message,
+ Vector<const char*> args) {
+ MessageLocation location(script_,
+ source_location.beg_pos,
+ source_location.end_pos);
+ Factory* factory = isolate()->factory();
+ Handle<FixedArray> elements = factory->NewFixedArray(args.length());
+ for (int i = 0; i < args.length(); i++) {
+ Handle<String> arg_string = factory->NewStringFromUtf8(CStrVector(args[i]));
+ elements->set(i, *arg_string);
+ }
+ Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
+ Handle<Object> result = factory->NewSyntaxError(message, array);
+ isolate()->Throw(*result, &location);
+}
+
+
+void Parser::ReportMessageAt(Scanner::Location source_location,
+ const char* message,
+ Vector<Handle<String> > args) {
+ MessageLocation location(script_,
+ source_location.beg_pos,
+ source_location.end_pos);
+ Factory* factory = isolate()->factory();
+ Handle<FixedArray> elements = factory->NewFixedArray(args.length());
+ for (int i = 0; i < args.length(); i++) {
+ elements->set(i, *args[i]);
+ }
+ Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
+ Handle<Object> result = factory->NewSyntaxError(message, array);
+ isolate()->Throw(*result, &location);
+}
+
+
void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
int end_token,
bool is_eval,
@@ -872,7 +871,7 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
directive_prologue = false;
}
- Scanner::Location token_loc = scanner()->peek_location();
+ Scanner::Location token_loc = scanner().peek_location();
Statement* stat;
if (is_global && !is_eval) {
stat = ParseModuleElement(NULL, CHECK_OK);
@@ -895,7 +894,7 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
Handle<String> directive = Handle<String>::cast(literal->value());
// Check "use strict" directive (ES5 14.1).
- if (scope_->is_classic_mode() &&
+ if (top_scope_->is_classic_mode() &&
directive->Equals(isolate()->heap()->use_strict_string()) &&
token_loc.end_pos - token_loc.beg_pos ==
isolate()->heap()->use_strict_string()->length() + 2) {
@@ -904,16 +903,16 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
// add this scope in DoParseProgram(), but that requires adaptations
// all over the code base, so we go with a quick-fix for now.
// In the same manner, we have to patch the parsing mode.
- if (is_eval && !scope_->is_eval_scope()) {
- ASSERT(scope_->is_global_scope());
- Scope* scope = NewScope(scope_, EVAL_SCOPE);
- scope->set_start_position(scope_->start_position());
- scope->set_end_position(scope_->end_position());
- scope_ = scope;
+ if (is_eval && !top_scope_->is_eval_scope()) {
+ ASSERT(top_scope_->is_global_scope());
+ Scope* scope = NewScope(top_scope_, EVAL_SCOPE);
+ scope->set_start_position(top_scope_->start_position());
+ scope->set_end_position(top_scope_->end_position());
+ top_scope_ = scope;
mode_ = PARSE_EAGERLY;
}
// TODO(ES6): Fix entering extended mode, once it is specified.
- scope_->SetLanguageMode(allow_harmony_scoping()
+ top_scope_->SetLanguageMode(allow_harmony_scoping()
? EXTENDED_MODE : STRICT_MODE);
// "use strict" is the only directive for now.
directive_prologue = false;
@@ -962,14 +961,14 @@ Statement* Parser::ParseModuleElement(ZoneStringList* labels,
// Handle 'module' as a context-sensitive keyword.
if (FLAG_harmony_modules &&
peek() == Token::IDENTIFIER &&
- !scanner()->HasAnyLineTerminatorBeforeNext() &&
+ !scanner().HasAnyLineTerminatorBeforeNext() &&
stmt != NULL) {
ExpressionStatement* estmt = stmt->AsExpressionStatement();
if (estmt != NULL &&
estmt->expression()->AsVariableProxy() != NULL &&
estmt->expression()->AsVariableProxy()->name()->Equals(
isolate()->heap()->module_string()) &&
- !scanner()->literal_contains_escapes()) {
+ !scanner().literal_contains_escapes()) {
return ParseModuleDeclaration(NULL, ok);
}
}
@@ -994,7 +993,7 @@ Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
Module* module = ParseModule(CHECK_OK);
VariableProxy* proxy = NewUnresolved(name, MODULE, module->interface());
Declaration* declaration =
- factory()->NewModuleDeclaration(proxy, module, scope_, pos);
+ factory()->NewModuleDeclaration(proxy, module, top_scope_, pos);
Declare(declaration, true, CHECK_OK);
#ifdef DEBUG
@@ -1052,14 +1051,14 @@ Module* Parser::ParseModuleLiteral(bool* ok) {
#ifdef DEBUG
if (FLAG_print_interface_details) PrintF("# Literal ");
#endif
- Scope* scope = NewScope(scope_, MODULE_SCOPE);
+ Scope* scope = NewScope(top_scope_, MODULE_SCOPE);
Expect(Token::LBRACE, CHECK_OK);
- scope->set_start_position(scanner()->location().beg_pos);
+ scope->set_start_position(scanner().location().beg_pos);
scope->SetLanguageMode(EXTENDED_MODE);
{
- BlockState block_state(&scope_, scope);
+ BlockState block_state(this, scope);
TargetCollector collector(zone());
Target target(&this->target_stack_, &collector);
Target target_body(&this->target_stack_, body);
@@ -1073,7 +1072,7 @@ Module* Parser::ParseModuleLiteral(bool* ok) {
}
Expect(Token::RBRACE, CHECK_OK);
- scope->set_end_position(scanner()->location().end_pos);
+ scope->set_end_position(scanner().location().end_pos);
body->set_scope(scope);
// Check that all exports are bound.
@@ -1082,8 +1081,8 @@ Module* Parser::ParseModuleLiteral(bool* ok) {
!it.done(); it.Advance()) {
if (scope->LocalLookup(it.name()) == NULL) {
Handle<String> name(it.name());
- ParserTraits::ReportMessage("module_export_undefined",
- Vector<Handle<String> >(&name, 1));
+ ReportMessage("module_export_undefined",
+ Vector<Handle<String> >(&name, 1));
*ok = false;
return NULL;
}
@@ -1122,8 +1121,7 @@ Module* Parser::ParseModulePath(bool* ok) {
member->interface()->Print();
}
#endif
- ParserTraits::ReportMessage("invalid_module_path",
- Vector<Handle<String> >(&name, 1));
+ ReportMessage("invalid_module_path", Vector<Handle<String> >(&name, 1));
return NULL;
}
result = member;
@@ -1143,9 +1141,9 @@ Module* Parser::ParseModuleVariable(bool* ok) {
if (FLAG_print_interface_details)
PrintF("# Module variable %s ", name->ToAsciiArray());
#endif
- VariableProxy* proxy = scope_->NewUnresolved(
+ VariableProxy* proxy = top_scope_->NewUnresolved(
factory(), name, Interface::NewModule(zone()),
- scanner()->location().beg_pos);
+ scanner().location().beg_pos);
return factory()->NewModuleVariable(proxy, pos);
}
@@ -1167,7 +1165,7 @@ Module* Parser::ParseModuleUrl(bool* ok) {
// Create an empty literal as long as the feature isn't finished.
USE(symbol);
- Scope* scope = NewScope(scope_, MODULE_SCOPE);
+ Scope* scope = NewScope(top_scope_, MODULE_SCOPE);
Block* body = factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
body->set_scope(scope);
Interface* interface = scope->interface();
@@ -1233,13 +1231,12 @@ Block* Parser::ParseImportDeclaration(bool* ok) {
module->interface()->Print();
}
#endif
- ParserTraits::ReportMessage("invalid_module_path",
- Vector<Handle<String> >(&name, 1));
+ ReportMessage("invalid_module_path", Vector<Handle<String> >(&name, 1));
return NULL;
}
VariableProxy* proxy = NewUnresolved(names[i], LET, interface);
Declaration* declaration =
- factory()->NewImportDeclaration(proxy, module, scope_, pos);
+ factory()->NewImportDeclaration(proxy, module, top_scope_, pos);
Declare(declaration, true, CHECK_OK);
}
@@ -1294,12 +1291,12 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
default:
*ok = false;
- ReportUnexpectedToken(scanner()->current_token());
+ ReportUnexpectedToken(scanner().current_token());
return NULL;
}
// Extract declared names into export declarations and interface.
- Interface* interface = scope_->interface();
+ Interface* interface = top_scope_->interface();
for (int i = 0; i < names.length(); ++i) {
#ifdef DEBUG
if (FLAG_print_interface_details)
@@ -1314,8 +1311,8 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
// TODO(rossberg): Rethink whether we actually need to store export
// declarations (for compilation?).
// ExportDeclaration* declaration =
- // factory()->NewExportDeclaration(proxy, scope_, position);
- // scope_->AddDeclaration(declaration);
+ // factory()->NewExportDeclaration(proxy, top_scope_, position);
+ // top_scope_->AddDeclaration(declaration);
}
ASSERT(result != NULL);
@@ -1441,8 +1438,9 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
// In Harmony mode, this case also handles the extension:
// Statement:
// GeneratorDeclaration
- if (!scope_->is_classic_mode()) {
- ReportMessageAt(scanner()->peek_location(), "strict_function");
+ if (!top_scope_->is_classic_mode()) {
+ ReportMessageAt(scanner().peek_location(), "strict_function",
+ Vector<const char*>::empty());
*ok = false;
return NULL;
}
@@ -1621,8 +1619,7 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
var->interface()->Print();
}
#endif
- ParserTraits::ReportMessage("module_type_error",
- Vector<Handle<String> >(&name, 1));
+ ReportMessage("module_type_error", Vector<Handle<String> >(&name, 1));
}
}
}
@@ -1661,7 +1658,7 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
// other functions are set up when entering the surrounding scope.
VariableProxy* proxy = NewUnresolved(name, VAR, Interface::NewValue());
Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, VAR, scope_, pos);
+ factory()->NewVariableDeclaration(proxy, VAR, top_scope_, pos);
Declare(declaration, true, CHECK_OK);
NativeFunctionLiteral* lit = factory()->NewNativeFunctionLiteral(
name, extension_, RelocInfo::kNoPosition);
@@ -1685,7 +1682,7 @@ Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
Handle<String> name = ParseIdentifierOrStrictReservedWord(
&is_strict_reserved, CHECK_OK);
FunctionLiteral* fun = ParseFunctionLiteral(name,
- scanner()->location(),
+ scanner().location(),
is_strict_reserved,
is_generator,
pos,
@@ -1697,10 +1694,10 @@ Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
// In extended mode, a function behaves as a lexical binding, except in the
// global scope.
VariableMode mode =
- is_extended_mode() && !scope_->is_global_scope() ? LET : VAR;
+ is_extended_mode() && !top_scope_->is_global_scope() ? LET : VAR;
VariableProxy* proxy = NewUnresolved(name, mode, Interface::NewValue());
Declaration* declaration =
- factory()->NewFunctionDeclaration(proxy, mode, fun, scope_, pos);
+ factory()->NewFunctionDeclaration(proxy, mode, fun, top_scope_, pos);
Declare(declaration, true, CHECK_OK);
if (names) names->Add(name, zone());
return factory()->NewEmptyStatement(RelocInfo::kNoPosition);
@@ -1708,7 +1705,7 @@ Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) {
- if (scope_->is_extended_mode()) return ParseScopedBlock(labels, ok);
+ if (top_scope_->is_extended_mode()) return ParseScopedBlock(labels, ok);
// Block ::
// '{' Statement* '}'
@@ -1741,12 +1738,12 @@ Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) {
// Construct block expecting 16 statements.
Block* body =
factory()->NewBlock(labels, 16, false, RelocInfo::kNoPosition);
- Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
+ Scope* block_scope = NewScope(top_scope_, BLOCK_SCOPE);
// Parse the statements and collect escaping labels.
Expect(Token::LBRACE, CHECK_OK);
- block_scope->set_start_position(scanner()->location().beg_pos);
- { BlockState block_state(&scope_, block_scope);
+ block_scope->set_start_position(scanner().location().beg_pos);
+ { BlockState block_state(this, block_scope);
TargetCollector collector(zone());
Target target(&this->target_stack_, &collector);
Target target_body(&this->target_stack_, body);
@@ -1759,7 +1756,7 @@ Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) {
}
}
Expect(Token::RBRACE, CHECK_OK);
- block_scope->set_end_position(scanner()->location().end_pos);
+ block_scope->set_end_position(scanner().location().end_pos);
block_scope = block_scope->FinalizeBlockScope();
body->set_scope(block_scope);
return body;
@@ -1780,6 +1777,12 @@ Block* Parser::ParseVariableStatement(VariableDeclarationContext var_context,
}
+bool Parser::IsEvalOrArguments(Handle<String> string) {
+ return string.is_identical_to(isolate()->factory()->eval_string()) ||
+ string.is_identical_to(isolate()->factory()->arguments_string());
+}
+
+
// If the variable declaration declares exactly one non-const
// variable, then *out is set to that variable. In all other cases,
// *out is untouched; in particular, it is the caller's responsibility
@@ -1828,7 +1831,7 @@ Block* Parser::ParseVariableDeclarations(
// existing pages. Therefore we keep allowing const with the old
// non-harmony semantics in classic mode.
Consume(Token::CONST);
- switch (scope_->language_mode()) {
+ switch (top_scope_->language_mode()) {
case CLASSIC_MODE:
mode = CONST;
init_op = Token::INIT_CONST;
@@ -1921,11 +1924,12 @@ Block* Parser::ParseVariableDeclarations(
is_const ? Interface::NewConst() : Interface::NewValue();
VariableProxy* proxy = NewUnresolved(name, mode, interface);
Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, mode, scope_, pos);
+ factory()->NewVariableDeclaration(proxy, mode, top_scope_, pos);
Declare(declaration, mode != VAR, CHECK_OK);
nvars++;
if (declaration_scope->num_var_or_const() > kMaxNumFunctionLocals) {
- ReportMessageAt(scanner()->location(), "too_many_variables");
+ ReportMessageAt(scanner().location(), "too_many_variables",
+ Vector<const char*>::empty());
*ok = false;
return NULL;
}
@@ -1940,7 +1944,7 @@ Block* Parser::ParseVariableDeclarations(
//
// var v; v = x;
//
- // In particular, we need to re-lookup 'v' (in scope_, not
+ // In particular, we need to re-lookup 'v' (in top_scope_, not
// declaration_scope) as it may be a different 'v' than the 'v' in the
// declaration (e.g., if we are inside a 'with' statement or 'catch'
// block).
@@ -1958,7 +1962,7 @@ Block* Parser::ParseVariableDeclarations(
// one - there is no re-lookup (see the last parameter of the
// Declare() call above).
- Scope* initialization_scope = is_const ? declaration_scope : scope_;
+ Scope* initialization_scope = is_const ? declaration_scope : top_scope_;
Expression* value = NULL;
int pos = -1;
// Harmony consts have non-optional initializers.
@@ -2149,7 +2153,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
// Remove the "ghost" variable that turned out to be a label
// from the top scope. This way, we don't try to resolve it
// during the scope processing.
- scope_->RemoveUnresolved(var);
+ top_scope_->RemoveUnresolved(var);
Expect(Token::COLON, CHECK_OK);
return ParseStatement(labels, ok);
}
@@ -2159,12 +2163,12 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
// no line-terminator between the two words.
if (extension_ != NULL &&
peek() == Token::FUNCTION &&
- !scanner()->HasAnyLineTerminatorBeforeNext() &&
+ !scanner().HasAnyLineTerminatorBeforeNext() &&
expr != NULL &&
expr->AsVariableProxy() != NULL &&
expr->AsVariableProxy()->name()->Equals(
isolate()->heap()->native_string()) &&
- !scanner()->literal_contains_escapes()) {
+ !scanner().literal_contains_escapes()) {
return ParseNativeDeclaration(ok);
}
@@ -2172,11 +2176,11 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
// Only expect semicolon in the former case.
if (!FLAG_harmony_modules ||
peek() != Token::IDENTIFIER ||
- scanner()->HasAnyLineTerminatorBeforeNext() ||
+ scanner().HasAnyLineTerminatorBeforeNext() ||
expr->AsVariableProxy() == NULL ||
!expr->AsVariableProxy()->name()->Equals(
isolate()->heap()->module_string()) ||
- scanner()->literal_contains_escapes()) {
+ scanner().literal_contains_escapes()) {
ExpectSemicolon(CHECK_OK);
}
return factory()->NewExpressionStatement(expr, pos);
@@ -2213,7 +2217,7 @@ Statement* Parser::ParseContinueStatement(bool* ok) {
Expect(Token::CONTINUE, CHECK_OK);
Handle<String> label = Handle<String>::null();
Token::Value tok = peek();
- if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
+ if (!scanner().HasAnyLineTerminatorBeforeNext() &&
tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
// ECMA allows "eval" or "arguments" as labels even in strict mode.
label = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
@@ -2228,7 +2232,7 @@ Statement* Parser::ParseContinueStatement(bool* ok) {
message = "unknown_label";
args = Vector<Handle<String> >(&label, 1);
}
- ParserTraits::ReportMessageAt(scanner()->location(), message, args);
+ ReportMessageAt(scanner().location(), message, args);
*ok = false;
return NULL;
}
@@ -2245,7 +2249,7 @@ Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) {
Expect(Token::BREAK, CHECK_OK);
Handle<String> label;
Token::Value tok = peek();
- if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
+ if (!scanner().HasAnyLineTerminatorBeforeNext() &&
tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
// ECMA allows "eval" or "arguments" as labels even in strict mode.
label = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
@@ -2266,7 +2270,7 @@ Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) {
message = "unknown_label";
args = Vector<Handle<String> >(&label, 1);
}
- ParserTraits::ReportMessageAt(scanner()->location(), message, args);
+ ReportMessageAt(scanner().location(), message, args);
*ok = false;
return NULL;
}
@@ -2288,7 +2292,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
Token::Value tok = peek();
Statement* result;
Expression* return_value;
- if (scanner()->HasAnyLineTerminatorBeforeNext() ||
+ if (scanner().HasAnyLineTerminatorBeforeNext() ||
tok == Token::SEMICOLON ||
tok == Token::RBRACE ||
tok == Token::EOS) {
@@ -2299,7 +2303,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
ExpectSemicolon(CHECK_OK);
if (is_generator()) {
Expression* generator = factory()->NewVariableProxy(
- function_state_->generator_object_variable());
+ current_function_state_->generator_object_variable());
Expression* yield = factory()->NewYield(
generator, return_value, Yield::FINAL, pos);
result = factory()->NewExpressionStatement(yield, pos);
@@ -2312,7 +2316,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
// function. See ECMA-262, section 12.9, page 67.
//
// To be consistent with KJS we report the syntax error at runtime.
- Scope* declaration_scope = scope_->DeclarationScope();
+ Scope* declaration_scope = top_scope_->DeclarationScope();
if (declaration_scope->is_global_scope() ||
declaration_scope->is_eval_scope()) {
Handle<String> message = isolate()->factory()->illegal_return_string();
@@ -2331,7 +2335,7 @@ Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) {
Expect(Token::WITH, CHECK_OK);
int pos = position();
- if (!scope_->is_classic_mode()) {
+ if (!top_scope_->is_classic_mode()) {
ReportMessage("strict_mode_with", Vector<const char*>::empty());
*ok = false;
return NULL;
@@ -2341,13 +2345,13 @@ Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) {
Expression* expr = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- scope_->DeclarationScope()->RecordWithStatement();
- Scope* with_scope = NewScope(scope_, WITH_SCOPE);
+ top_scope_->DeclarationScope()->RecordWithStatement();
+ Scope* with_scope = NewScope(top_scope_, WITH_SCOPE);
Statement* stmt;
- { BlockState block_state(&scope_, with_scope);
- with_scope->set_start_position(scanner()->peek_location().beg_pos);
+ { BlockState block_state(this, with_scope);
+ with_scope->set_start_position(scanner().peek_location().beg_pos);
stmt = ParseStatement(labels, CHECK_OK);
- with_scope->set_end_position(scanner()->location().end_pos);
+ with_scope->set_end_position(scanner().location().end_pos);
}
return factory()->NewWithStatement(with_scope, expr, stmt, pos);
}
@@ -2421,7 +2425,7 @@ Statement* Parser::ParseThrowStatement(bool* ok) {
Expect(Token::THROW, CHECK_OK);
int pos = position();
- if (scanner()->HasAnyLineTerminatorBeforeNext()) {
+ if (scanner().HasAnyLineTerminatorBeforeNext()) {
ReportMessage("newline_after_throw", Vector<const char*>::empty());
*ok = false;
return NULL;
@@ -2476,8 +2480,8 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
Consume(Token::CATCH);
Expect(Token::LPAREN, CHECK_OK);
- catch_scope = NewScope(scope_, CATCH_SCOPE);
- catch_scope->set_start_position(scanner()->location().beg_pos);
+ catch_scope = NewScope(top_scope_, CATCH_SCOPE);
+ catch_scope->set_start_position(scanner().location().beg_pos);
name = ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
@@ -2487,10 +2491,10 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
catch_variable =
catch_scope->DeclareLocal(name, mode, kCreatedInitialized);
- BlockState block_state(&scope_, catch_scope);
+ BlockState block_state(this, catch_scope);
catch_block = ParseBlock(NULL, CHECK_OK);
- catch_scope->set_end_position(scanner()->location().end_pos);
+ catch_scope->set_end_position(scanner().location().end_pos);
tok = peek();
}
@@ -2509,7 +2513,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
if (catch_block != NULL && finally_block != NULL) {
// If we have both, create an inner try/catch.
ASSERT(catch_scope != NULL && catch_variable != NULL);
- int index = function_state_->NextHandlerIndex();
+ int index = current_function_state_->NextHandlerIndex();
TryCatchStatement* statement = factory()->NewTryCatchStatement(
index, try_block, catch_scope, catch_variable, catch_block,
RelocInfo::kNoPosition);
@@ -2523,12 +2527,12 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
if (catch_block != NULL) {
ASSERT(finally_block == NULL);
ASSERT(catch_scope != NULL && catch_variable != NULL);
- int index = function_state_->NextHandlerIndex();
+ int index = current_function_state_->NextHandlerIndex();
result = factory()->NewTryCatchStatement(
index, try_block, catch_scope, catch_variable, catch_block, pos);
} else {
ASSERT(finally_block != NULL);
- int index = function_state_->NextHandlerIndex();
+ int index = current_function_state_->NextHandlerIndex();
result = factory()->NewTryFinallyStatement(
index, try_block, finally_block, pos);
// Combine the jump targets of the try block and the possible catch block.
@@ -2608,9 +2612,9 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
if (for_of != NULL) {
Factory* heap_factory = isolate()->factory();
- Variable* iterator = scope_->DeclarationScope()->NewTemporary(
+ Variable* iterator = top_scope_->DeclarationScope()->NewTemporary(
heap_factory->dot_iterator_string());
- Variable* result = scope_->DeclarationScope()->NewTemporary(
+ Variable* result = top_scope_->DeclarationScope()->NewTemporary(
heap_factory->dot_result_string());
Expression* assign_iterator;
@@ -2677,13 +2681,13 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Statement* init = NULL;
// Create an in-between scope for let-bound iteration variables.
- Scope* saved_scope = scope_;
- Scope* for_scope = NewScope(scope_, BLOCK_SCOPE);
- scope_ = for_scope;
+ Scope* saved_scope = top_scope_;
+ Scope* for_scope = NewScope(top_scope_, BLOCK_SCOPE);
+ top_scope_ = for_scope;
Expect(Token::FOR, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
- for_scope->set_start_position(scanner()->location().beg_pos);
+ for_scope->set_start_position(scanner().location().beg_pos);
if (peek() != Token::SEMICOLON) {
if (peek() == Token::VAR || peek() == Token::CONST) {
bool is_const = peek() == Token::CONST;
@@ -2706,15 +2710,15 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Expect(Token::RPAREN, CHECK_OK);
VariableProxy* each =
- scope_->NewUnresolved(factory(), name, interface);
+ top_scope_->NewUnresolved(factory(), name, interface);
Statement* body = ParseStatement(NULL, CHECK_OK);
InitializeForEachStatement(loop, each, enumerable, body);
Block* result =
factory()->NewBlock(NULL, 2, false, RelocInfo::kNoPosition);
result->AddStatement(variable_statement, zone());
result->AddStatement(loop, zone());
- scope_ = saved_scope;
- for_scope->set_end_position(scanner()->location().end_pos);
+ top_scope_ = saved_scope;
+ for_scope->set_end_position(scanner().location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
ASSERT(for_scope == NULL);
// Parsed for-in loop w/ variable/const declaration.
@@ -2752,20 +2756,20 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Handle<String> tempstr =
heap_factory->NewConsString(heap_factory->dot_for_string(), name);
Handle<String> tempname = heap_factory->InternalizeString(tempstr);
- Variable* temp = scope_->DeclarationScope()->NewTemporary(tempname);
+ Variable* temp = top_scope_->DeclarationScope()->NewTemporary(tempname);
VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
ForEachStatement* loop =
factory()->NewForEachStatement(mode, labels, pos);
Target target(&this->target_stack_, loop);
// The expression does not see the loop variable.
- scope_ = saved_scope;
+ top_scope_ = saved_scope;
Expression* enumerable = ParseExpression(true, CHECK_OK);
- scope_ = for_scope;
+ top_scope_ = for_scope;
Expect(Token::RPAREN, CHECK_OK);
VariableProxy* each =
- scope_->NewUnresolved(factory(), name, Interface::NewValue());
+ top_scope_->NewUnresolved(factory(), name, Interface::NewValue());
Statement* body = ParseStatement(NULL, CHECK_OK);
Block* body_block =
factory()->NewBlock(NULL, 3, false, RelocInfo::kNoPosition);
@@ -2777,8 +2781,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
body_block->AddStatement(assignment_statement, zone());
body_block->AddStatement(body, zone());
InitializeForEachStatement(loop, temp_proxy, enumerable, body_block);
- scope_ = saved_scope;
- for_scope->set_end_position(scanner()->location().end_pos);
+ top_scope_ = saved_scope;
+ for_scope->set_end_position(scanner().location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
body_block->set_scope(for_scope);
// Parsed for-in loop w/ let declaration.
@@ -2811,8 +2815,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Statement* body = ParseStatement(NULL, CHECK_OK);
InitializeForEachStatement(loop, expression, enumerable, body);
- scope_ = saved_scope;
- for_scope->set_end_position(scanner()->location().end_pos);
+ top_scope_ = saved_scope;
+ for_scope->set_end_position(scanner().location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
ASSERT(for_scope == NULL);
// Parsed for-in loop.
@@ -2846,8 +2850,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Expect(Token::RPAREN, CHECK_OK);
Statement* body = ParseStatement(NULL, CHECK_OK);
- scope_ = saved_scope;
- for_scope->set_end_position(scanner()->location().end_pos);
+ top_scope_ = saved_scope;
+ for_scope->set_end_position(scanner().location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
if (for_scope != NULL) {
// Rewrite a for statement of the form
@@ -2922,7 +2926,7 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
expression = NewThrowReferenceError(message);
}
- if (!scope_->is_classic_mode()) {
+ if (!top_scope_->is_classic_mode()) {
// Assignment to eval or arguments is disallowed in strict mode.
CheckStrictModeLValue(expression, CHECK_OK);
}
@@ -2942,7 +2946,7 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
property != NULL &&
property->obj()->AsVariableProxy() != NULL &&
property->obj()->AsVariableProxy()->is_this()) {
- function_state_->AddProperty();
+ current_function_state_->AddProperty();
}
// If we assign a function literal to a property we pretenure the
@@ -2978,11 +2982,11 @@ Expression* Parser::ParseYieldExpression(bool* ok) {
Yield::Kind kind =
Check(Token::MUL) ? Yield::DELEGATING : Yield::SUSPEND;
Expression* generator_object = factory()->NewVariableProxy(
- function_state_->generator_object_variable());
+ current_function_state_->generator_object_variable());
Expression* expression = ParseAssignmentExpression(false, CHECK_OK);
Yield* yield = factory()->NewYield(generator_object, expression, kind, pos);
if (kind == Yield::DELEGATING) {
- yield->set_index(function_state_->NextHandlerIndex());
+ yield->set_index(current_function_state_->NextHandlerIndex());
}
return yield;
}
@@ -3009,6 +3013,14 @@ Expression* Parser::ParseConditionalExpression(bool accept_IN, bool* ok) {
}
+int ParserBase::Precedence(Token::Value tok, bool accept_IN) {
+ if (tok == Token::IN && !accept_IN)
+ return 0; // 0 precedence will terminate binary expression parsing
+
+ return Token::Precedence(tok);
+}
+
+
// Precedence >= 4
Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
ASSERT(prec >= 4);
@@ -3146,7 +3158,7 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
}
// "delete identifier" is a syntax error in strict mode.
- if (op == Token::DELETE && !scope_->is_classic_mode()) {
+ if (op == Token::DELETE && !top_scope_->is_classic_mode()) {
VariableProxy* operand = expression->AsVariableProxy();
if (operand != NULL && !operand->is_this()) {
ReportMessage("strict_delete", Vector<const char*>::empty());
@@ -3194,7 +3206,7 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
expression = NewThrowReferenceError(message);
}
- if (!scope_->is_classic_mode()) {
+ if (!top_scope_->is_classic_mode()) {
// Prefix expression operand in strict mode may not be eval or arguments.
CheckStrictModeLValue(expression, CHECK_OK);
}
@@ -3216,7 +3228,7 @@ Expression* Parser::ParsePostfixExpression(bool* ok) {
// LeftHandSideExpression ('++' | '--')?
Expression* expression = ParseLeftHandSideExpression(CHECK_OK);
- if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
+ if (!scanner().HasAnyLineTerminatorBeforeNext() &&
Token::IsCountOp(peek())) {
// Signal a reference error if the expression is an invalid
// left-hand side expression. We could report this as a syntax
@@ -3228,7 +3240,7 @@ Expression* Parser::ParsePostfixExpression(bool* ok) {
expression = NewThrowReferenceError(message);
}
- if (!scope_->is_classic_mode()) {
+ if (!top_scope_->is_classic_mode()) {
// Postfix expression operand in strict mode may not be eval or arguments.
CheckStrictModeLValue(expression, CHECK_OK);
}
@@ -3249,7 +3261,12 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
// LeftHandSideExpression ::
// (NewExpression | MemberExpression) ...
- Expression* result = ParseMemberWithNewPrefixesExpression(CHECK_OK);
+ Expression* result;
+ if (peek() == Token::NEW) {
+ result = ParseNewExpression(CHECK_OK);
+ } else {
+ result = ParseMemberExpression(CHECK_OK);
+ }
while (true) {
switch (peek()) {
@@ -3264,7 +3281,7 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
case Token::LPAREN: {
int pos;
- if (scanner()->current_token() == Token::IDENTIFIER) {
+ if (scanner().current_token() == Token::IDENTIFIER) {
// For call of an identifier we want to report position of
// the identifier as position of the call in the stack trace.
pos = position();
@@ -3294,7 +3311,7 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
VariableProxy* callee = result->AsVariableProxy();
if (callee != NULL &&
callee->IsVariable(isolate()->factory()->eval_string())) {
- scope_->DeclarationScope()->RecordEvalCall();
+ top_scope_->DeclarationScope()->RecordEvalCall();
}
result = factory()->NewCall(result, args, pos);
if (fni_ != NULL) fni_->RemoveLastFunction();
@@ -3318,54 +3335,54 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
}
-Expression* Parser::ParseMemberWithNewPrefixesExpression(bool* ok) {
+Expression* Parser::ParseNewPrefix(PositionStack* stack, bool* ok) {
// NewExpression ::
// ('new')+ MemberExpression
- // The grammar for new expressions is pretty warped. We can have several 'new'
- // keywords following each other, and then a MemberExpression. When we see '('
- // after the MemberExpression, it's associated with the rightmost unassociated
- // 'new' to create a NewExpression with arguments. However, a NewExpression
- // can also occur without arguments.
-
- // Examples of new expression:
- // new foo.bar().baz means (new (foo.bar)()).baz
- // new foo()() means (new foo())()
- // new new foo()() means (new (new foo())())
- // new new foo means new (new foo)
- // new new foo() means new (new foo())
- // new new foo().bar().baz means (new (new foo()).bar()).baz
-
+ // The grammar for new expressions is pretty warped. The keyword
+ // 'new' can either be a part of the new expression (where it isn't
+ // followed by an argument list) or a part of the member expression,
+ // where it must be followed by an argument list. To accommodate
+ // this, we parse the 'new' keywords greedily and keep track of how
+ // many we have parsed. This information is then passed on to the
+ // member expression parser, which is only allowed to match argument
+ // lists as long as it has 'new' prefixes left
+ Expect(Token::NEW, CHECK_OK);
+ PositionStack::Element pos(stack, position());
+
+ Expression* result;
if (peek() == Token::NEW) {
- Consume(Token::NEW);
- int new_pos = position();
- Expression* result = ParseMemberWithNewPrefixesExpression(CHECK_OK);
- if (peek() == Token::LPAREN) {
- // NewExpression with arguments.
- ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
- result = factory()->NewCallNew(result, args, new_pos);
- // The expression can still continue with . or [ after the arguments.
- result = ParseMemberExpressionContinuation(result, CHECK_OK);
- return result;
- }
- // NewExpression without arguments.
- return factory()->NewCallNew(
- result, new(zone()) ZoneList<Expression*>(0, zone()), new_pos);
+ result = ParseNewPrefix(stack, CHECK_OK);
+ } else {
+ result = ParseMemberWithNewPrefixesExpression(stack, CHECK_OK);
+ }
+
+ if (!stack->is_empty()) {
+ int last = stack->pop();
+ result = factory()->NewCallNew(
+ result, new(zone()) ZoneList<Expression*>(0, zone()), last);
}
- // No 'new' keyword.
- return ParseMemberExpression(ok);
+ return result;
+}
+
+
+Expression* Parser::ParseNewExpression(bool* ok) {
+ PositionStack stack(ok);
+ return ParseNewPrefix(&stack, ok);
}
Expression* Parser::ParseMemberExpression(bool* ok) {
+ return ParseMemberWithNewPrefixesExpression(NULL, ok);
+}
+
+
+Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
+ bool* ok) {
// MemberExpression ::
// (PrimaryExpression | FunctionLiteral)
// ('[' Expression ']' | '.' Identifier | Arguments)*
- // The '[' Expression ']' and '.' Identifier parts are parsed by
- // ParseMemberExpressionContinuation, and the Arguments part is parsed by the
- // caller.
-
// Parse the initial primary or function expression.
Expression* result = NULL;
if (peek() == Token::FUNCTION) {
@@ -3378,7 +3395,7 @@ Expression* Parser::ParseMemberExpression(bool* ok) {
if (peek_any_identifier()) {
name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved_name,
CHECK_OK);
- function_name_location = scanner()->location();
+ function_name_location = scanner().location();
}
FunctionLiteral::FunctionType function_type = name.is_null()
? FunctionLiteral::ANONYMOUS_EXPRESSION
@@ -3394,22 +3411,13 @@ Expression* Parser::ParseMemberExpression(bool* ok) {
result = ParsePrimaryExpression(CHECK_OK);
}
- result = ParseMemberExpressionContinuation(result, CHECK_OK);
- return result;
-}
-
-
-Expression* Parser::ParseMemberExpressionContinuation(Expression* expression,
- bool* ok) {
- // Parses this part of MemberExpression:
- // ('[' Expression ']' | '.' Identifier)*
while (true) {
switch (peek()) {
case Token::LBRACK: {
Consume(Token::LBRACK);
int pos = position();
Expression* index = ParseExpression(true, CHECK_OK);
- expression = factory()->NewProperty(expression, index, pos);
+ result = factory()->NewProperty(result, index, pos);
if (fni_ != NULL) {
if (index->IsPropertyName()) {
fni_->PushLiteralName(index->AsLiteral()->AsPropertyName());
@@ -3425,17 +3433,23 @@ Expression* Parser::ParseMemberExpressionContinuation(Expression* expression,
Consume(Token::PERIOD);
int pos = position();
Handle<String> name = ParseIdentifierName(CHECK_OK);
- expression = factory()->NewProperty(
- expression, factory()->NewLiteral(name, pos), pos);
+ result = factory()->NewProperty(
+ result, factory()->NewLiteral(name, pos), pos);
if (fni_ != NULL) fni_->PushLiteralName(name);
break;
}
+ case Token::LPAREN: {
+ if ((stack == NULL) || stack->is_empty()) return result;
+ // Consume one of the new prefixes (already parsed).
+ ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
+ int pos = stack->pop();
+ result = factory()->NewCallNew(result, args, pos);
+ break;
+ }
default:
- return expression;
+ return result;
}
}
- ASSERT(false);
- return NULL;
}
@@ -3462,6 +3476,124 @@ void Parser::ReportInvalidPreparseData(Handle<String> name, bool* ok) {
}
+Expression* Parser::ParsePrimaryExpression(bool* ok) {
+ // PrimaryExpression ::
+ // 'this'
+ // 'null'
+ // 'true'
+ // 'false'
+ // Identifier
+ // Number
+ // String
+ // ArrayLiteral
+ // ObjectLiteral
+ // RegExpLiteral
+ // '(' Expression ')'
+
+ int pos = peek_position();
+ Expression* result = NULL;
+ switch (peek()) {
+ case Token::THIS: {
+ Consume(Token::THIS);
+ result = factory()->NewVariableProxy(top_scope_->receiver());
+ break;
+ }
+
+ case Token::NULL_LITERAL:
+ Consume(Token::NULL_LITERAL);
+ result = factory()->NewLiteral(isolate()->factory()->null_value(), pos);
+ break;
+
+ case Token::TRUE_LITERAL:
+ Consume(Token::TRUE_LITERAL);
+ result = factory()->NewLiteral(isolate()->factory()->true_value(), pos);
+ break;
+
+ case Token::FALSE_LITERAL:
+ Consume(Token::FALSE_LITERAL);
+ result = factory()->NewLiteral(isolate()->factory()->false_value(), pos);
+ break;
+
+ case Token::IDENTIFIER:
+ case Token::YIELD:
+ case Token::FUTURE_STRICT_RESERVED_WORD: {
+ // Using eval or arguments in this context is OK even in strict mode.
+ Handle<String> name = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
+ if (fni_ != NULL) fni_->PushVariableName(name);
+ // The name may refer to a module instance object, so its type is unknown.
+#ifdef DEBUG
+ if (FLAG_print_interface_details)
+ PrintF("# Variable %s ", name->ToAsciiArray());
+#endif
+ Interface* interface = Interface::NewUnknown(zone());
+ result = top_scope_->NewUnresolved(factory(), name, interface, pos);
+ break;
+ }
+
+ case Token::NUMBER: {
+ Consume(Token::NUMBER);
+ ASSERT(scanner().is_literal_ascii());
+ double value = StringToDouble(isolate()->unicode_cache(),
+ scanner().literal_ascii_string(),
+ ALLOW_HEX | ALLOW_OCTAL |
+ ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY);
+ result = factory()->NewNumberLiteral(value, pos);
+ break;
+ }
+
+ case Token::STRING: {
+ Consume(Token::STRING);
+ Handle<String> symbol = GetSymbol();
+ result = factory()->NewLiteral(symbol, pos);
+ if (fni_ != NULL) fni_->PushLiteralName(symbol);
+ break;
+ }
+
+ case Token::ASSIGN_DIV:
+ result = ParseRegExpLiteral(true, CHECK_OK);
+ break;
+
+ case Token::DIV:
+ result = ParseRegExpLiteral(false, CHECK_OK);
+ break;
+
+ case Token::LBRACK:
+ result = ParseArrayLiteral(CHECK_OK);
+ break;
+
+ case Token::LBRACE:
+ result = ParseObjectLiteral(CHECK_OK);
+ break;
+
+ case Token::LPAREN:
+ Consume(Token::LPAREN);
+ // Heuristically try to detect immediately called functions before
+ // seeing the call parentheses.
+ parenthesized_function_ = (peek() == Token::FUNCTION);
+ result = ParseExpression(true, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
+ break;
+
+ case Token::MOD:
+ if (allow_natives_syntax() || extension_ != NULL) {
+ result = ParseV8Intrinsic(CHECK_OK);
+ break;
+ }
+ // If we're not allowing special syntax we fall-through to the
+ // default case.
+
+ default: {
+ Token::Value tok = Next();
+ ReportUnexpectedToken(tok);
+ *ok = false;
+ return NULL;
+ }
+ }
+
+ return result;
+}
+
+
Expression* Parser::ParseArrayLiteral(bool* ok) {
// ArrayLiteral ::
// '[' Expression? (',' Expression?)* ']'
@@ -3484,7 +3616,7 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
Expect(Token::RBRACK, CHECK_OK);
// Update the scope information before the pre-parsing bailout.
- int literal_index = function_state_->NextMaterializedLiteralIndex();
+ int literal_index = current_function_state_->NextMaterializedLiteralIndex();
return factory()->NewArrayLiteral(values, literal_index, pos);
}
@@ -3546,7 +3678,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
int number_of_boilerplate_properties = 0;
bool has_function = false;
- ObjectLiteralChecker checker(this, scope_->language_mode());
+ ObjectLiteralChecker checker(this, top_scope_->language_mode());
Expect(Token::LBRACE, CHECK_OK);
@@ -3592,7 +3724,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
: GetSymbol();
FunctionLiteral* value =
ParseFunctionLiteral(name,
- scanner()->location(),
+ scanner().location(),
false, // reserved words are allowed here
false, // not a generator
RelocInfo::kNoPosition,
@@ -3633,9 +3765,9 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
}
case Token::NUMBER: {
Consume(Token::NUMBER);
- ASSERT(scanner()->is_literal_ascii());
+ ASSERT(scanner().is_literal_ascii());
double value = StringToDouble(isolate()->unicode_cache(),
- scanner()->literal_ascii_string(),
+ scanner().literal_ascii_string(),
ALLOW_HEX | ALLOW_OCTAL |
ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY);
key = factory()->NewNumberLiteral(value, next_pos);
@@ -3667,7 +3799,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
// Mark top-level object literals that contain function literals and
// pretenure the literal so it can be added as a constant function
// property.
- if (scope_->DeclarationScope()->is_global_scope() &&
+ if (top_scope_->DeclarationScope()->is_global_scope() &&
value->AsFunctionLiteral() != NULL) {
has_function = true;
value->AsFunctionLiteral()->set_pretenure();
@@ -3690,7 +3822,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
Expect(Token::RBRACE, CHECK_OK);
// Computation of literal_index must happen before pre parse bailout.
- int literal_index = function_state_->NextMaterializedLiteralIndex();
+ int literal_index = current_function_state_->NextMaterializedLiteralIndex();
return factory()->NewObjectLiteral(properties,
literal_index,
@@ -3700,6 +3832,26 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
}
+Expression* Parser::ParseRegExpLiteral(bool seen_equal, bool* ok) {
+ int pos = peek_position();
+ if (!scanner().ScanRegExpPattern(seen_equal)) {
+ Next();
+ ReportMessage("unterminated_regexp", Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
+
+ int literal_index = current_function_state_->NextMaterializedLiteralIndex();
+
+ Handle<String> js_pattern = NextLiteralString(TENURED);
+ scanner().ScanRegExpFlags();
+ Handle<String> js_flags = NextLiteralString(TENURED);
+ Next();
+
+ return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index, pos);
+}
+
+
ZoneList<Expression*>* Parser::ParseArguments(bool* ok) {
// Arguments ::
// '(' (AssignmentExpression)*[','] ')'
@@ -3711,7 +3863,8 @@ ZoneList<Expression*>* Parser::ParseArguments(bool* ok) {
Expression* argument = ParseAssignmentExpression(true, CHECK_OK);
result->Add(argument, zone());
if (result->length() > Code::kMaxArguments) {
- ReportMessageAt(scanner()->location(), "too_many_arguments");
+ ReportMessageAt(scanner().location(), "too_many_arguments",
+ Vector<const char*>::empty());
*ok = false;
return NULL;
}
@@ -3868,14 +4021,14 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// one relative to the deserialized scope chain. Otherwise we must be
// compiling a function in an inner declaration scope in the eval, e.g. a
// nested function, and hoisting works normally relative to that.
- Scope* declaration_scope = scope_->DeclarationScope();
+ Scope* declaration_scope = top_scope_->DeclarationScope();
Scope* original_declaration_scope = original_scope_->DeclarationScope();
Scope* scope =
function_type == FunctionLiteral::DECLARATION && !is_extended_mode() &&
(original_scope_ == original_declaration_scope ||
declaration_scope != original_declaration_scope)
? NewScope(declaration_scope, FUNCTION_SCOPE)
- : NewScope(scope_, FUNCTION_SCOPE);
+ : NewScope(top_scope_, FUNCTION_SCOPE);
ZoneList<Statement*>* body = NULL;
int materialized_literal_count = -1;
int expected_property_count = -1;
@@ -3888,23 +4041,23 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
FunctionLiteral::IsGeneratorFlag generator = is_generator
? FunctionLiteral::kIsGenerator
: FunctionLiteral::kNotGenerator;
- DeferredFeedbackSlotProcessor* slot_processor;
AstProperties ast_properties;
BailoutReason dont_optimize_reason = kNoReason;
// Parse function body.
- { FunctionState function_state(&function_state_, &scope_, scope, zone());
- scope_->SetScopeName(function_name);
+ { FunctionState function_state(this, scope);
+ top_scope_->SetScopeName(function_name);
if (is_generator) {
// For generators, allocating variables in contexts is currently a win
// because it minimizes the work needed to suspend and resume an
// activation.
- scope_->ForceContextAllocation();
+ top_scope_->ForceContextAllocation();
// Calling a generator returns a generator object. That object is stored
// in a temporary variable, a definition that is used by "yield"
- // expressions. This also marks the FunctionState as a generator.
- Variable* temp = scope_->DeclarationScope()->NewTemporary(
+ // expressions. Presence of a variable for the generator object in the
+ // FunctionState indicates that this function is a generator.
+ Variable* temp = top_scope_->DeclarationScope()->NewTemporary(
isolate()->factory()->dot_generator_object_string());
function_state.set_generator_object_variable(temp);
}
@@ -3912,7 +4065,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// FormalParameterList ::
// '(' (Identifier)*[','] ')'
Expect(Token::LPAREN, CHECK_OK);
- scope->set_start_position(scanner()->location().beg_pos);
+ scope->set_start_position(scanner().location().beg_pos);
// We don't yet know if the function will be strict, so we cannot yet
// produce errors for parameter names or duplicates. However, we remember
@@ -3929,20 +4082,21 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// Store locations for possible future error reports.
if (!eval_args_error_log.IsValid() && IsEvalOrArguments(param_name)) {
- eval_args_error_log = scanner()->location();
+ eval_args_error_log = scanner().location();
}
if (!reserved_loc.IsValid() && is_strict_reserved) {
- reserved_loc = scanner()->location();
+ reserved_loc = scanner().location();
}
- if (!dupe_error_loc.IsValid() && scope_->IsDeclared(param_name)) {
+ if (!dupe_error_loc.IsValid() && top_scope_->IsDeclared(param_name)) {
duplicate_parameters = FunctionLiteral::kHasDuplicateParameters;
- dupe_error_loc = scanner()->location();
+ dupe_error_loc = scanner().location();
}
- scope_->DeclareParameter(param_name, VAR);
+ top_scope_->DeclareParameter(param_name, VAR);
num_parameters++;
if (num_parameters > Code::kMaxArguments) {
- ReportMessageAt(scanner()->location(), "too_many_parameters");
+ ReportMessageAt(scanner().location(), "too_many_parameters",
+ Vector<const char*>::empty());
*ok = false;
return NULL;
}
@@ -3964,13 +4118,13 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
if (function_type == FunctionLiteral::NAMED_EXPRESSION) {
if (is_extended_mode()) fvar_init_op = Token::INIT_CONST_HARMONY;
VariableMode fvar_mode = is_extended_mode() ? CONST_HARMONY : CONST;
- fvar = new(zone()) Variable(scope_,
+ fvar = new(zone()) Variable(top_scope_,
function_name, fvar_mode, true /* is valid LHS */,
Variable::NORMAL, kCreatedInitialized, Interface::NewConst());
VariableProxy* proxy = factory()->NewVariableProxy(fvar);
VariableDeclaration* fvar_declaration = factory()->NewVariableDeclaration(
- proxy, fvar_mode, scope_, RelocInfo::kNoPosition);
- scope_->DeclareFunctionVar(fvar_declaration);
+ proxy, fvar_mode, top_scope_, RelocInfo::kNoPosition);
+ top_scope_->DeclareFunctionVar(fvar_declaration);
}
// Determine whether the function will be lazily compiled.
@@ -3985,7 +4139,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// These are all things we can know at this point, without looking at the
// function itself.
bool is_lazily_compiled = (mode() == PARSE_LAZILY &&
- scope_->AllowsLazyCompilation() &&
+ top_scope_->AllowsLazyCompilation() &&
!parenthesized_function_);
parenthesized_function_ = false; // The bit was set for this function only.
@@ -4003,7 +4157,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// to check.
ReportInvalidPreparseData(function_name, CHECK_OK);
}
- scanner()->SeekForward(entry.end_pos() - 1);
+ scanner().SeekForward(entry.end_pos() - 1);
scope->set_end_position(entry.end_pos());
Expect(Token::RBRACE, CHECK_OK);
@@ -4011,7 +4165,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
scope->end_position() - function_block_pos);
materialized_literal_count = entry.literal_count();
expected_property_count = entry.property_count();
- scope_->SetLanguageMode(entry.language_mode());
+ top_scope_->SetLanguageMode(entry.language_mode());
} else {
is_lazily_compiled = false;
}
@@ -4033,10 +4187,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
if (arg != NULL) {
args = Vector<const char*>(&arg, 1);
}
- ParserTraits::ReportMessageAt(
- Scanner::Location(logger.start(), logger.end()),
- logger.message(),
- args);
+ ReportMessageAt(Scanner::Location(logger.start(), logger.end()),
+ logger.message(), args);
*ok = false;
return NULL;
}
@@ -4046,7 +4198,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
scope->end_position() - function_block_pos);
materialized_literal_count = logger.literals();
expected_property_count = logger.properties();
- scope_->SetLanguageMode(logger.language_mode());
+ top_scope_->SetLanguageMode(logger.language_mode());
}
}
@@ -4054,7 +4206,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
body = new(zone()) ZoneList<Statement*>(8, zone());
if (fvar != NULL) {
- VariableProxy* fproxy = scope_->NewUnresolved(
+ VariableProxy* fproxy = top_scope_->NewUnresolved(
factory(), function_name, Interface::NewConst());
fproxy->BindTo(fvar);
body->Add(factory()->NewExpressionStatement(
@@ -4074,11 +4226,11 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
Runtime::FunctionForId(Runtime::kCreateJSGeneratorObject),
arguments, pos);
VariableProxy* init_proxy = factory()->NewVariableProxy(
- function_state_->generator_object_variable());
+ current_function_state_->generator_object_variable());
Assignment* assignment = factory()->NewAssignment(
Token::INIT_VAR, init_proxy, allocation, RelocInfo::kNoPosition);
VariableProxy* get_proxy = factory()->NewVariableProxy(
- function_state_->generator_object_variable());
+ current_function_state_->generator_object_variable());
Yield* yield = factory()->NewYield(
get_proxy, assignment, Yield::INITIAL, RelocInfo::kNoPosition);
body->Add(factory()->NewExpressionStatement(
@@ -4089,7 +4241,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
if (is_generator) {
VariableProxy* get_proxy = factory()->NewVariableProxy(
- function_state_->generator_object_variable());
+ current_function_state_->generator_object_variable());
Expression *undefined = factory()->NewLiteral(
isolate()->factory()->undefined_value(), RelocInfo::kNoPosition);
Yield* yield = factory()->NewYield(
@@ -4103,34 +4255,40 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
handler_count = function_state.handler_count();
Expect(Token::RBRACE, CHECK_OK);
- scope->set_end_position(scanner()->location().end_pos);
+ scope->set_end_position(scanner().location().end_pos);
}
// Validate strict mode. We can do this only after parsing the function,
// since the function can declare itself strict.
- if (!scope_->is_classic_mode()) {
+ if (!top_scope_->is_classic_mode()) {
if (IsEvalOrArguments(function_name)) {
- ReportMessageAt(function_name_location, "strict_eval_arguments");
+ ReportMessageAt(function_name_location,
+ "strict_eval_arguments",
+ Vector<const char*>::empty());
*ok = false;
return NULL;
}
if (name_is_strict_reserved) {
- ReportMessageAt(function_name_location, "unexpected_strict_reserved");
+ ReportMessageAt(function_name_location, "unexpected_strict_reserved",
+ Vector<const char*>::empty());
*ok = false;
return NULL;
}
if (eval_args_error_log.IsValid()) {
- ReportMessageAt(eval_args_error_log, "strict_eval_arguments");
+ ReportMessageAt(eval_args_error_log, "strict_eval_arguments",
+ Vector<const char*>::empty());
*ok = false;
return NULL;
}
if (dupe_error_loc.IsValid()) {
- ReportMessageAt(dupe_error_loc, "strict_param_dupe");
+ ReportMessageAt(dupe_error_loc, "strict_param_dupe",
+ Vector<const char*>::empty());
*ok = false;
return NULL;
}
if (reserved_loc.IsValid()) {
- ReportMessageAt(reserved_loc, "unexpected_strict_reserved");
+ ReportMessageAt(reserved_loc, "unexpected_strict_reserved",
+ Vector<const char*>::empty());
*ok = false;
return NULL;
}
@@ -4139,7 +4297,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
CHECK_OK);
}
ast_properties = *factory()->visitor()->ast_properties();
- slot_processor = factory()->visitor()->slot_processor();
dont_optimize_reason = factory()->visitor()->dont_optimize_reason();
}
@@ -4163,7 +4320,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
pos);
function_literal->set_function_token_position(function_token_pos);
function_literal->set_ast_properties(&ast_properties);
- function_literal->set_slot_processor(slot_processor);
function_literal->set_dont_optimize_reason(dont_optimize_reason);
if (fni_ != NULL && should_infer_name) fni_->AddFunction(function_literal);
@@ -4174,7 +4330,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
PreParser::PreParseResult Parser::LazyParseFunctionLiteral(
SingletonLogger* logger) {
HistogramTimerScope preparse_scope(isolate()->counters()->pre_parse());
- ASSERT_EQ(Token::LBRACE, scanner()->current_token());
+ ASSERT_EQ(Token::LBRACE, scanner().current_token());
if (reusable_preparser_ == NULL) {
intptr_t stack_limit = isolate()->stack_guard()->real_climit();
@@ -4189,7 +4345,7 @@ PreParser::PreParseResult Parser::LazyParseFunctionLiteral(
allow_harmony_numeric_literals());
}
PreParser::PreParseResult result =
- reusable_preparser_->PreParseLazyFunction(scope_->language_mode(),
+ reusable_preparser_->PreParseLazyFunction(top_scope_->language_mode(),
is_generator(),
logger);
return result;
@@ -4209,7 +4365,7 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
if (extension_ != NULL) {
// The extension structures are only accessible while parsing the
// very first time not when reparsing because of lazy compilation.
- scope_->DeclarationScope()->ForceEagerCompilation();
+ top_scope_->DeclarationScope()->ForceEagerCompilation();
}
const Runtime::Function* function = Runtime::FunctionForName(name);
@@ -4241,8 +4397,7 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
// Check that the function is defined if it's an inline runtime call.
if (function == NULL && name->Get(0) == '_') {
- ParserTraits::ReportMessage("not_defined",
- Vector<Handle<String> >(&name, 1));
+ ReportMessage("not_defined", Vector<Handle<String> >(&name, 1));
*ok = false;
return NULL;
}
@@ -4252,6 +4407,88 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
}
+bool ParserBase::peek_any_identifier() {
+ Token::Value next = peek();
+ return next == Token::IDENTIFIER ||
+ next == Token::FUTURE_RESERVED_WORD ||
+ next == Token::FUTURE_STRICT_RESERVED_WORD ||
+ next == Token::YIELD;
+}
+
+
+bool ParserBase::CheckContextualKeyword(Vector<const char> keyword) {
+ if (peek() == Token::IDENTIFIER &&
+ scanner()->is_next_contextual_keyword(keyword)) {
+ Consume(Token::IDENTIFIER);
+ return true;
+ }
+ return false;
+}
+
+
+void ParserBase::ExpectSemicolon(bool* ok) {
+ // Check for automatic semicolon insertion according to
+ // the rules given in ECMA-262, section 7.9, page 21.
+ Token::Value tok = peek();
+ if (tok == Token::SEMICOLON) {
+ Next();
+ return;
+ }
+ if (scanner()->HasAnyLineTerminatorBeforeNext() ||
+ tok == Token::RBRACE ||
+ tok == Token::EOS) {
+ return;
+ }
+ Expect(Token::SEMICOLON, ok);
+}
+
+
+void ParserBase::ExpectContextualKeyword(Vector<const char> keyword, bool* ok) {
+ Expect(Token::IDENTIFIER, ok);
+ if (!*ok) return;
+ if (!scanner()->is_literal_contextual_keyword(keyword)) {
+ ReportUnexpectedToken(scanner()->current_token());
+ *ok = false;
+ }
+}
+
+
+void ParserBase::ReportUnexpectedToken(Token::Value token) {
+ // We don't report stack overflows here, to avoid increasing the
+ // stack depth even further. Instead we report it after parsing is
+ // over, in ParseProgram.
+ if (token == Token::ILLEGAL && stack_overflow()) {
+ return;
+ }
+ Scanner::Location source_location = scanner()->location();
+
+ // Four of the tokens are treated specially
+ switch (token) {
+ case Token::EOS:
+ return ReportMessageAt(source_location, "unexpected_eos");
+ case Token::NUMBER:
+ return ReportMessageAt(source_location, "unexpected_token_number");
+ case Token::STRING:
+ return ReportMessageAt(source_location, "unexpected_token_string");
+ case Token::IDENTIFIER:
+ return ReportMessageAt(source_location,
+ "unexpected_token_identifier");
+ case Token::FUTURE_RESERVED_WORD:
+ return ReportMessageAt(source_location, "unexpected_reserved");
+ case Token::YIELD:
+ case Token::FUTURE_STRICT_RESERVED_WORD:
+ return ReportMessageAt(source_location,
+ is_classic_mode() ? "unexpected_token_identifier"
+ : "unexpected_strict_reserved");
+ default:
+ const char* name = Token::String(token);
+ ASSERT(name != NULL);
+ ReportMessageAt(
+ source_location, "unexpected_token", Vector<const char*>(&name, 1));
+ }
+}
+
+
Literal* Parser::GetLiteralUndefined(int position) {
return factory()->NewLiteral(
isolate()->factory()->undefined_value(), position);
@@ -4264,6 +4501,68 @@ Literal* Parser::GetLiteralTheHole(int position) {
}
+// Parses an identifier that is valid for the current scope, in particular it
+// fails on strict mode future reserved keywords in a strict scope. If
+// allow_eval_or_arguments is kAllowEvalOrArguments, we allow "eval" or
+// "arguments" as identifier even in strict mode (this is needed in cases like
+// "var foo = eval;").
+Handle<String> Parser::ParseIdentifier(
+ AllowEvalOrArgumentsAsIdentifier allow_eval_or_arguments,
+ bool* ok) {
+ Token::Value next = Next();
+ if (next == Token::IDENTIFIER) {
+ Handle<String> name = GetSymbol();
+ if (allow_eval_or_arguments == kDontAllowEvalOrArguments &&
+ !top_scope_->is_classic_mode() && IsEvalOrArguments(name)) {
+ ReportMessage("strict_eval_arguments", Vector<const char*>::empty());
+ *ok = false;
+ }
+ return name;
+ } else if (top_scope_->is_classic_mode() &&
+ (next == Token::FUTURE_STRICT_RESERVED_WORD ||
+ (next == Token::YIELD && !is_generator()))) {
+ return GetSymbol();
+ } else {
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return Handle<String>();
+ }
+}
+
+
+// Parses and identifier or a strict mode future reserved word, and indicate
+// whether it is strict mode future reserved.
+Handle<String> Parser::ParseIdentifierOrStrictReservedWord(
+ bool* is_strict_reserved, bool* ok) {
+ Token::Value next = Next();
+ if (next == Token::IDENTIFIER) {
+ *is_strict_reserved = false;
+ } else if (next == Token::FUTURE_STRICT_RESERVED_WORD ||
+ (next == Token::YIELD && !is_generator())) {
+ *is_strict_reserved = true;
+ } else {
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return Handle<String>();
+ }
+ return GetSymbol();
+}
+
+
+Handle<String> Parser::ParseIdentifierName(bool* ok) {
+ Token::Value next = Next();
+ if (next != Token::IDENTIFIER &&
+ next != Token::FUTURE_RESERVED_WORD &&
+ next != Token::FUTURE_STRICT_RESERVED_WORD &&
+ !Token::IsKeyword(next)) {
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return Handle<String>();
+ }
+ return GetSymbol();
+}
+
+
void Parser::MarkAsLValue(Expression* expression) {
VariableProxy* proxy = expression != NULL
? expression->AsVariableProxy()
@@ -4277,7 +4576,7 @@ void Parser::MarkAsLValue(Expression* expression) {
// in strict mode.
void Parser::CheckStrictModeLValue(Expression* expression,
bool* ok) {
- ASSERT(!scope_->is_classic_mode());
+ ASSERT(!top_scope_->is_classic_mode());
VariableProxy* lhs = expression != NULL
? expression->AsVariableProxy()
: NULL;
@@ -4289,6 +4588,18 @@ void Parser::CheckStrictModeLValue(Expression* expression,
}
+// Checks whether an octal literal was last seen between beg_pos and end_pos.
+// If so, reports an error. Only called for strict mode.
+void ParserBase::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
+ Scanner::Location octal = scanner()->octal_position();
+ if (octal.IsValid() && beg_pos <= octal.beg_pos && octal.end_pos <= end_pos) {
+ ReportMessageAt(octal, "strict_octal_literal");
+ scanner()->clear_octal_position();
+ *ok = false;
+ }
+}
+
+
void Parser::CheckConflictingVarDeclarations(Scope* scope, bool* ok) {
Declaration* decl = scope->CheckConflictingVarDeclarations();
if (decl != NULL) {
@@ -4302,12 +4613,28 @@ void Parser::CheckConflictingVarDeclarations(Scope* scope, bool* ok) {
Scanner::Location location = position == RelocInfo::kNoPosition
? Scanner::Location::invalid()
: Scanner::Location(position, position + 1);
- ParserTraits::ReportMessageAt(location, "redeclaration", args);
+ ReportMessageAt(location, "redeclaration", args);
*ok = false;
}
}
+// This function reads an identifier name and determines whether or not it
+// is 'get' or 'set'.
+Handle<String> Parser::ParseIdentifierNameOrGetOrSet(bool* is_get,
+ bool* is_set,
+ bool* ok) {
+ Handle<String> result = ParseIdentifierName(ok);
+ if (!*ok) return Handle<String>();
+ if (scanner().is_literal_ascii() && scanner().literal_length() == 3) {
+ const char* token = scanner().literal_ascii_string().start();
+ *is_get = strncmp(token, "get", 3) == 0;
+ *is_set = !*is_get && strncmp(token, "set", 3) == 0;
+ }
+ return result;
+}
+
+
// ----------------------------------------------------------------------------
// Parser support
@@ -5356,7 +5683,7 @@ bool Parser::Parse() {
Scanner::Location loc = pre_parse_data->MessageLocation();
const char* message = pre_parse_data->BuildMessage();
Vector<const char*> args = pre_parse_data->BuildArgs();
- ParserTraits::ReportMessageAt(loc, message, args);
+ ReportMessageAt(loc, message, args);
DeleteArray(message);
for (int i = 0; i < args.length(); i++) {
DeleteArray(args[i]);
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index 85e34d9ae5..2b0995ace2 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -404,92 +404,10 @@ class RegExpParser BASE_EMBEDDED {
// ----------------------------------------------------------------------------
// JAVASCRIPT PARSING
-class Parser;
+// Forward declaration.
class SingletonLogger;
-class ParserTraits {
- public:
- struct Type {
- typedef v8::internal::Parser* Parser;
-
- // Types used by FunctionState and BlockState.
- typedef v8::internal::Scope Scope;
- typedef AstNodeFactory<AstConstructionVisitor> Factory;
- typedef Variable GeneratorVariable;
- typedef v8::internal::Zone Zone;
-
- // Return types for traversing functions.
- typedef Handle<String> Identifier;
- typedef v8::internal::Expression* Expression;
- };
-
- explicit ParserTraits(Parser* parser) : parser_(parser) {}
-
- // Custom operations executed when FunctionStates are created and destructed.
- template<typename FunctionState>
- static void SetUpFunctionState(FunctionState* function_state, Zone* zone) {
- Isolate* isolate = zone->isolate();
- function_state->isolate_ = isolate;
- function_state->saved_ast_node_id_ = isolate->ast_node_id();
- isolate->set_ast_node_id(BailoutId::FirstUsable().ToInt());
- }
-
- template<typename FunctionState>
- static void TearDownFunctionState(FunctionState* function_state) {
- if (function_state->outer_function_state_ != NULL) {
- function_state->isolate_->set_ast_node_id(
- function_state->saved_ast_node_id_);
- }
- }
-
- // Helper functions for recursive descent.
- bool IsEvalOrArguments(Handle<String> identifier) const;
-
- // Reporting errors.
- void ReportMessageAt(Scanner::Location source_location,
- const char* message,
- Vector<const char*> args);
- void ReportMessage(const char* message, Vector<Handle<String> > args);
- void ReportMessageAt(Scanner::Location source_location,
- const char* message,
- Vector<Handle<String> > args);
-
- // "null" return type creators.
- static Handle<String> EmptyIdentifier() {
- return Handle<String>();
- }
- static Expression* EmptyExpression() {
- return NULL;
- }
-
- // Producing data during the recursive descent.
- Handle<String> GetSymbol(Scanner* scanner = NULL);
- Handle<String> NextLiteralString(Scanner* scanner,
- PretenureFlag tenured);
- Expression* ThisExpression(Scope* scope,
- AstNodeFactory<AstConstructionVisitor>* factory);
- Expression* ExpressionFromLiteral(
- Token::Value token, int pos, Scanner* scanner,
- AstNodeFactory<AstConstructionVisitor>* factory);
- Expression* ExpressionFromIdentifier(
- Handle<String> name, int pos, Scope* scope,
- AstNodeFactory<AstConstructionVisitor>* factory);
- Expression* ExpressionFromString(
- int pos, Scanner* scanner,
- AstNodeFactory<AstConstructionVisitor>* factory);
-
- // Temporary glue; these functions will move to ParserBase.
- Expression* ParseArrayLiteral(bool* ok);
- Expression* ParseObjectLiteral(bool* ok);
- Expression* ParseExpression(bool accept_IN, bool* ok);
- Expression* ParseV8Intrinsic(bool* ok);
-
- private:
- Parser* parser_;
-};
-
-
-class Parser : public ParserBase<ParserTraits> {
+class Parser : public ParserBase {
public:
explicit Parser(CompilationInfo* info);
~Parser() {
@@ -509,8 +427,6 @@ class Parser : public ParserBase<ParserTraits> {
bool Parse();
private:
- friend class ParserTraits;
-
static const int kMaxNumFunctionLocals = 131071; // 2^17-1
enum Mode {
@@ -531,6 +447,64 @@ class Parser : public ParserBase<ParserTraits> {
kHasNoInitializers
};
+ class BlockState;
+
+ class FunctionState BASE_EMBEDDED {
+ public:
+ FunctionState(Parser* parser, Scope* scope);
+ ~FunctionState();
+
+ int NextMaterializedLiteralIndex() {
+ return next_materialized_literal_index_++;
+ }
+ int materialized_literal_count() {
+ return next_materialized_literal_index_ - JSFunction::kLiteralsPrefixSize;
+ }
+
+ int NextHandlerIndex() { return next_handler_index_++; }
+ int handler_count() { return next_handler_index_; }
+
+ void AddProperty() { expected_property_count_++; }
+ int expected_property_count() { return expected_property_count_; }
+
+ void set_generator_object_variable(Variable *variable) {
+ ASSERT(variable != NULL);
+ ASSERT(!is_generator());
+ generator_object_variable_ = variable;
+ }
+ Variable* generator_object_variable() const {
+ return generator_object_variable_;
+ }
+ bool is_generator() const {
+ return generator_object_variable_ != NULL;
+ }
+
+ AstNodeFactory<AstConstructionVisitor>* factory() { return &factory_; }
+
+ private:
+ // Used to assign an index to each literal that needs materialization in
+ // the function. Includes regexp literals, and boilerplate for object and
+ // array literals.
+ int next_materialized_literal_index_;
+
+ // Used to assign a per-function index to try and catch handlers.
+ int next_handler_index_;
+
+ // Properties count estimation.
+ int expected_property_count_;
+
+ // For generators, the variable that holds the generator object. This
+ // variable is used by yield expressions and return statements. NULL
+ // indicates that this function is not a generator.
+ Variable* generator_object_variable_;
+
+ Parser* parser_;
+ FunctionState* outer_function_state_;
+ Scope* outer_scope_;
+ int saved_ast_node_id_;
+ AstNodeFactory<AstConstructionVisitor> factory_;
+ };
+
class ParsingModeScope BASE_EMBEDDED {
public:
ParsingModeScope(Parser* parser, Mode mode)
@@ -547,6 +521,10 @@ class Parser : public ParserBase<ParserTraits> {
Mode old_mode_;
};
+ virtual bool is_classic_mode() {
+ return top_scope_->is_classic_mode();
+ }
+
// Returns NULL if parsing failed.
FunctionLiteral* ParseProgram();
@@ -563,24 +541,39 @@ class Parser : public ParserBase<ParserTraits> {
// Report syntax error
void ReportInvalidPreparseData(Handle<String> name, bool* ok);
+ void ReportMessage(const char* message, Vector<const char*> args);
+ void ReportMessage(const char* message, Vector<Handle<String> > args);
+ void ReportMessageAt(Scanner::Location location, const char* type) {
+ ReportMessageAt(location, type, Vector<const char*>::empty());
+ }
+ void ReportMessageAt(Scanner::Location loc,
+ const char* message,
+ Vector<const char*> args);
+ void ReportMessageAt(Scanner::Location loc,
+ const char* message,
+ Vector<Handle<String> > args);
void set_pre_parse_data(ScriptDataImpl *data) {
pre_parse_data_ = data;
symbol_cache_.Initialize(data ? data->symbol_count() : 0, zone());
}
- bool inside_with() const { return scope_->inside_with(); }
+ bool inside_with() const { return top_scope_->inside_with(); }
+ Scanner& scanner() { return scanner_; }
Mode mode() const { return mode_; }
ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
bool is_extended_mode() {
- ASSERT(scope_ != NULL);
- return scope_->is_extended_mode();
+ ASSERT(top_scope_ != NULL);
+ return top_scope_->is_extended_mode();
}
Scope* DeclarationScope(VariableMode mode) {
return IsLexicalVariableMode(mode)
- ? scope_ : scope_->DeclarationScope();
+ ? top_scope_ : top_scope_->DeclarationScope();
}
+ // Check if the given string is 'eval' or 'arguments'.
+ bool IsEvalOrArguments(Handle<String> string);
+
// All ParseXXX functions take as the last argument an *ok parameter
// which is set to false if parsing failed; it is unchanged otherwise.
// By making the 'exception handling' explicit, we are forced to check
@@ -638,12 +631,15 @@ class Parser : public ParserBase<ParserTraits> {
Expression* ParseUnaryExpression(bool* ok);
Expression* ParsePostfixExpression(bool* ok);
Expression* ParseLeftHandSideExpression(bool* ok);
- Expression* ParseMemberWithNewPrefixesExpression(bool* ok);
+ Expression* ParseNewExpression(bool* ok);
Expression* ParseMemberExpression(bool* ok);
- Expression* ParseMemberExpressionContinuation(Expression* expression,
- bool* ok);
+ Expression* ParseNewPrefix(PositionStack* stack, bool* ok);
+ Expression* ParseMemberWithNewPrefixesExpression(PositionStack* stack,
+ bool* ok);
+ Expression* ParsePrimaryExpression(bool* ok);
Expression* ParseArrayLiteral(bool* ok);
Expression* ParseObjectLiteral(bool* ok);
+ Expression* ParseRegExpLiteral(bool seen_equal, bool* ok);
// Initialize the components of a for-in / for-of statement.
void InitializeForEachStatement(ForEachStatement* stmt,
@@ -664,22 +660,44 @@ class Parser : public ParserBase<ParserTraits> {
// Magical syntax support.
Expression* ParseV8Intrinsic(bool* ok);
+ bool is_generator() const { return current_function_state_->is_generator(); }
+
bool CheckInOrOf(bool accept_OF, ForEachStatement::VisitMode* visit_mode);
Handle<String> LiteralString(PretenureFlag tenured) {
- if (scanner()->is_literal_ascii()) {
+ if (scanner().is_literal_ascii()) {
return isolate_->factory()->NewStringFromAscii(
- scanner()->literal_ascii_string(), tenured);
+ scanner().literal_ascii_string(), tenured);
} else {
return isolate_->factory()->NewStringFromTwoByte(
- scanner()->literal_utf16_string(), tenured);
+ scanner().literal_utf16_string(), tenured);
}
}
+ Handle<String> NextLiteralString(PretenureFlag tenured) {
+ if (scanner().is_next_literal_ascii()) {
+ return isolate_->factory()->NewStringFromAscii(
+ scanner().next_literal_ascii_string(), tenured);
+ } else {
+ return isolate_->factory()->NewStringFromTwoByte(
+ scanner().next_literal_utf16_string(), tenured);
+ }
+ }
+
+ Handle<String> GetSymbol();
+
// Get odd-ball literals.
Literal* GetLiteralUndefined(int position);
Literal* GetLiteralTheHole(int position);
+ Handle<String> ParseIdentifier(AllowEvalOrArgumentsAsIdentifier, bool* ok);
+ Handle<String> ParseIdentifierOrStrictReservedWord(
+ bool* is_strict_reserved, bool* ok);
+ Handle<String> ParseIdentifierName(bool* ok);
+ Handle<String> ParseIdentifierNameOrGetOrSet(bool* is_get,
+ bool* is_set,
+ bool* ok);
+
// Determine if the expression is a variable proxy and mark it as being used
// in an assignment or with a increment/decrement operator. This is currently
// used on for the statically checking assignments to harmony const bindings.
@@ -742,21 +760,35 @@ class Parser : public ParserBase<ParserTraits> {
PreParser::PreParseResult LazyParseFunctionLiteral(
SingletonLogger* logger);
+ AstNodeFactory<AstConstructionVisitor>* factory() {
+ return current_function_state_->factory();
+ }
+
Isolate* isolate_;
ZoneList<Handle<String> > symbol_cache_;
Handle<Script> script_;
Scanner scanner_;
PreParser* reusable_preparser_;
+ Scope* top_scope_;
Scope* original_scope_; // for ES5 function declarations in sloppy eval
+ FunctionState* current_function_state_;
Target* target_stack_; // for break, continue statements
+ v8::Extension* extension_;
ScriptDataImpl* pre_parse_data_;
FuncNameInferrer* fni_;
Mode mode_;
+ // If true, the next (and immediately following) function literal is
+ // preceded by a parenthesis.
+ // Heuristically that means that the function will be called immediately,
+ // so never lazily compile it.
+ bool parenthesized_function_;
Zone* zone_;
CompilationInfo* info_;
+ friend class BlockState;
+ friend class FunctionState;
};
diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc
index b35cd28454..fbcad8f6d1 100644
--- a/deps/v8/src/platform-linux.cc
+++ b/deps/v8/src/platform-linux.cc
@@ -53,8 +53,7 @@
// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
// Old versions of the C library <signal.h> didn't define the type.
#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
- (defined(__arm__) || defined(__aarch64__)) && \
- !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
+ defined(__arm__) && !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
#include <asm/sigcontext.h>
#endif
diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc
index b5ab0cfef1..402d411327 100644
--- a/deps/v8/src/platform-posix.cc
+++ b/deps/v8/src/platform-posix.cc
@@ -276,8 +276,6 @@ void OS::Abort() {
void OS::DebugBreak() {
#if V8_HOST_ARCH_ARM
asm("bkpt 0");
-#elif V8_HOST_ARCH_A64
- asm("brk 0");
#elif V8_HOST_ARCH_MIPS
asm("break");
#elif V8_HOST_ARCH_IA32
diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/preparser.cc
index fc4481fd86..fa6f217993 100644
--- a/deps/v8/src/preparser.cc
+++ b/deps/v8/src/preparser.cc
@@ -55,103 +55,14 @@ int isfinite(double value);
namespace v8 {
namespace internal {
-void PreParserTraits::ReportMessageAt(Scanner::Location location,
- const char* message,
- Vector<const char*> args) {
- ReportMessageAt(location.beg_pos,
- location.end_pos,
- message,
- args.length() > 0 ? args[0] : NULL);
-}
-
-
-void PreParserTraits::ReportMessageAt(Scanner::Location location,
- const char* type,
- const char* name_opt) {
- pre_parser_->log_
- ->LogMessage(location.beg_pos, location.end_pos, type, name_opt);
-}
-
-
-void PreParserTraits::ReportMessageAt(int start_pos,
- int end_pos,
- const char* type,
- const char* name_opt) {
- pre_parser_->log_->LogMessage(start_pos, end_pos, type, name_opt);
-}
-
-
-PreParserIdentifier PreParserTraits::GetSymbol(Scanner* scanner) {
- pre_parser_->LogSymbol();
- if (scanner->current_token() == Token::FUTURE_RESERVED_WORD) {
- return PreParserIdentifier::FutureReserved();
- } else if (scanner->current_token() ==
- Token::FUTURE_STRICT_RESERVED_WORD) {
- return PreParserIdentifier::FutureStrictReserved();
- } else if (scanner->current_token() == Token::YIELD) {
- return PreParserIdentifier::Yield();
- }
- if (scanner->is_literal_ascii()) {
- // Detect strict-mode poison words.
- if (scanner->literal_length() == 4 &&
- !strncmp(scanner->literal_ascii_string().start(), "eval", 4)) {
- return PreParserIdentifier::Eval();
- }
- if (scanner->literal_length() == 9 &&
- !strncmp(scanner->literal_ascii_string().start(), "arguments", 9)) {
- return PreParserIdentifier::Arguments();
- }
- }
- return PreParserIdentifier::Default();
-}
-
-
-PreParserExpression PreParserTraits::ExpressionFromString(
- int pos, Scanner* scanner, PreParserFactory* factory) {
- const int kUseStrictLength = 10;
- const char* kUseStrictChars = "use strict";
- pre_parser_->LogSymbol();
- if (scanner->is_literal_ascii() &&
- scanner->literal_length() == kUseStrictLength &&
- !scanner->literal_contains_escapes() &&
- !strncmp(scanner->literal_ascii_string().start(), kUseStrictChars,
- kUseStrictLength)) {
- return PreParserExpression::UseStrictStringLiteral();
- }
- return PreParserExpression::StringLiteral();
-}
-
-
-PreParserExpression PreParserTraits::ParseArrayLiteral(bool* ok) {
- return pre_parser_->ParseArrayLiteral(ok);
-}
-
-
-PreParserExpression PreParserTraits::ParseObjectLiteral(bool* ok) {
- return pre_parser_->ParseObjectLiteral(ok);
-}
-
-
-PreParserExpression PreParserTraits::ParseExpression(bool accept_IN, bool* ok) {
- return pre_parser_->ParseExpression(accept_IN, ok);
-}
-
-
-PreParserExpression PreParserTraits::ParseV8Intrinsic(bool* ok) {
- return pre_parser_->ParseV8Intrinsic(ok);
-}
-
-
PreParser::PreParseResult PreParser::PreParseLazyFunction(
LanguageMode mode, bool is_generator, ParserRecorder* log) {
log_ = log;
// Lazy functions always have trivial outer scopes (no with/catch scopes).
- PreParserScope top_scope(scope_, GLOBAL_SCOPE);
- FunctionState top_state(&function_state_, &scope_, &top_scope);
- scope_->SetLanguageMode(mode);
- PreParserScope function_scope(scope_, FUNCTION_SCOPE);
- FunctionState function_state(&function_state_, &scope_, &function_scope);
- function_state.set_is_generator(is_generator);
+ Scope top_scope(&scope_, kTopLevelScope);
+ set_language_mode(mode);
+ Scope function_scope(&scope_, kFunctionScope);
+ function_scope.set_is_generator(is_generator);
ASSERT_EQ(Token::LBRACE, scanner()->current_token());
bool ok = true;
int start_position = peek_position();
@@ -228,8 +139,8 @@ PreParser::SourceElements PreParser::ParseSourceElements(int end_token,
Statement statement = ParseSourceElement(CHECK_OK);
if (directive_prologue) {
if (statement.IsUseStrictLiteral()) {
- scope_->SetLanguageMode(allow_harmony_scoping() ?
- EXTENDED_MODE : STRICT_MODE);
+ set_language_mode(allow_harmony_scoping() ?
+ EXTENDED_MODE : STRICT_MODE);
} else if (!statement.IsStringLiteral()) {
directive_prologue = false;
}
@@ -324,10 +235,8 @@ PreParser::Statement PreParser::ParseStatement(bool* ok) {
Statement statement = ParseFunctionDeclaration(CHECK_OK);
Scanner::Location end_location = scanner()->location();
if (!scope_->is_classic_mode()) {
- PreParserTraits::ReportMessageAt(start_location.beg_pos,
- end_location.end_pos,
- "strict_function",
- NULL);
+ ReportMessageAt(start_location.beg_pos, end_location.end_pos,
+ "strict_function", NULL);
*ok = false;
return Statement::Default();
} else {
@@ -374,7 +283,7 @@ PreParser::Statement PreParser::ParseBlock(bool* ok) {
//
Expect(Token::LBRACE, CHECK_OK);
while (peek() != Token::RBRACE) {
- if (scope_->is_extended_mode()) {
+ if (is_extended_mode()) {
ParseSourceElement(CHECK_OK);
} else {
ParseStatement(CHECK_OK);
@@ -438,19 +347,21 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
// existing pages. Therefore we keep allowing const with the old
// non-harmony semantics in classic mode.
Consume(Token::CONST);
- switch (scope_->language_mode()) {
+ switch (language_mode()) {
case CLASSIC_MODE:
break;
case STRICT_MODE: {
Scanner::Location location = scanner()->peek_location();
- ReportMessageAt(location, "strict_const");
+ ReportMessageAt(location, "strict_const", NULL);
*ok = false;
return Statement::Default();
}
case EXTENDED_MODE:
if (var_context != kSourceElement &&
var_context != kForStatement) {
- ReportMessageAt(scanner()->peek_location(), "unprotected_const");
+ Scanner::Location location = scanner()->peek_location();
+ ReportMessageAt(location.beg_pos, location.end_pos,
+ "unprotected_const", NULL);
*ok = false;
return Statement::Default();
}
@@ -464,15 +375,19 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
//
// * It is a Syntax Error if the code that matches this production is not
// contained in extended code.
- if (!scope_->is_extended_mode()) {
- ReportMessageAt(scanner()->peek_location(), "illegal_let");
+ if (!is_extended_mode()) {
+ Scanner::Location location = scanner()->peek_location();
+ ReportMessageAt(location.beg_pos, location.end_pos,
+ "illegal_let", NULL);
*ok = false;
return Statement::Default();
}
Consume(Token::LET);
if (var_context != kSourceElement &&
var_context != kForStatement) {
- ReportMessageAt(scanner()->peek_location(), "unprotected_let");
+ Scanner::Location location = scanner()->peek_location();
+ ReportMessageAt(location.beg_pos, location.end_pos,
+ "unprotected_let", NULL);
*ok = false;
return Statement::Default();
}
@@ -616,7 +531,8 @@ PreParser::Statement PreParser::ParseWithStatement(bool* ok) {
// 'with' '(' Expression ')' Statement
Expect(Token::WITH, CHECK_OK);
if (!scope_->is_classic_mode()) {
- ReportMessageAt(scanner()->location(), "strict_mode_with");
+ Scanner::Location location = scanner()->location();
+ ReportMessageAt(location, "strict_mode_with", NULL);
*ok = false;
return Statement::Default();
}
@@ -624,8 +540,7 @@ PreParser::Statement PreParser::ParseWithStatement(bool* ok) {
ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- PreParserScope with_scope(scope_, WITH_SCOPE);
- BlockState block_state(&scope_, &with_scope);
+ Scope::InsideWith iw(scope_);
ParseStatement(CHECK_OK);
return Statement::Default();
}
@@ -761,7 +676,8 @@ PreParser::Statement PreParser::ParseThrowStatement(bool* ok) {
Expect(Token::THROW, CHECK_OK);
if (scanner()->HasAnyLineTerminatorBeforeNext()) {
- ReportMessageAt(scanner()->location(), "newline_after_throw");
+ Scanner::Location pos = scanner()->location();
+ ReportMessageAt(pos, "newline_after_throw", NULL);
*ok = false;
return Statement::Default();
}
@@ -789,7 +705,7 @@ PreParser::Statement PreParser::ParseTryStatement(bool* ok) {
Token::Value tok = peek();
if (tok != Token::CATCH && tok != Token::FINALLY) {
- ReportMessageAt(scanner()->location(), "no_catch_or_finally");
+ ReportMessageAt(scanner()->location(), "no_catch_or_finally", NULL);
*ok = false;
return Statement::Default();
}
@@ -798,9 +714,7 @@ PreParser::Statement PreParser::ParseTryStatement(bool* ok) {
Expect(Token::LPAREN, CHECK_OK);
ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- {
- PreParserScope with_scope(scope_, WITH_SCOPE);
- BlockState block_state(&scope_, &with_scope);
+ { Scope::InsideWith iw(scope_);
ParseBlock(CHECK_OK);
}
tok = peek();
@@ -858,7 +772,7 @@ PreParser::Expression PreParser::ParseAssignmentExpression(bool accept_IN,
// YieldExpression
// LeftHandSideExpression AssignmentOperator AssignmentExpression
- if (function_state_->is_generator() && peek() == Token::YIELD) {
+ if (scope_->is_generator() && peek() == Token::YIELD) {
return ParseYieldExpression(ok);
}
@@ -874,8 +788,8 @@ PreParser::Expression PreParser::ParseAssignmentExpression(bool accept_IN,
expression.IsIdentifier() &&
expression.AsIdentifier().IsEvalOrArguments()) {
Scanner::Location after = scanner()->location();
- PreParserTraits::ReportMessageAt(before.beg_pos, after.end_pos,
- "strict_eval_arguments", NULL);
+ ReportMessageAt(before.beg_pos, after.end_pos,
+ "strict_eval_arguments", NULL);
*ok = false;
return Expression::Default();
}
@@ -884,7 +798,7 @@ PreParser::Expression PreParser::ParseAssignmentExpression(bool accept_IN,
ParseAssignmentExpression(accept_IN, CHECK_OK);
if ((op == Token::ASSIGN) && expression.IsThisProperty()) {
- function_state_->AddProperty();
+ scope_->AddProperty();
}
return Expression::Default();
@@ -968,8 +882,8 @@ PreParser::Expression PreParser::ParseUnaryExpression(bool* ok) {
expression.IsIdentifier() &&
expression.AsIdentifier().IsEvalOrArguments()) {
Scanner::Location after = scanner()->location();
- PreParserTraits::ReportMessageAt(before.beg_pos, after.end_pos,
- "strict_eval_arguments", NULL);
+ ReportMessageAt(before.beg_pos, after.end_pos,
+ "strict_eval_arguments", NULL);
*ok = false;
}
return Expression::Default();
@@ -991,8 +905,8 @@ PreParser::Expression PreParser::ParsePostfixExpression(bool* ok) {
expression.IsIdentifier() &&
expression.AsIdentifier().IsEvalOrArguments()) {
Scanner::Location after = scanner()->location();
- PreParserTraits::ReportMessageAt(before.beg_pos, after.end_pos,
- "strict_eval_arguments", NULL);
+ ReportMessageAt(before.beg_pos, after.end_pos,
+ "strict_eval_arguments", NULL);
*ok = false;
return Expression::Default();
}
@@ -1007,7 +921,12 @@ PreParser::Expression PreParser::ParseLeftHandSideExpression(bool* ok) {
// LeftHandSideExpression ::
// (NewExpression | MemberExpression) ...
- Expression result = ParseMemberWithNewPrefixesExpression(CHECK_OK);
+ Expression result = Expression::Default();
+ if (peek() == Token::NEW) {
+ result = ParseNewExpression(CHECK_OK);
+ } else {
+ result = ParseMemberExpression(CHECK_OK);
+ }
while (true) {
switch (peek()) {
@@ -1047,38 +966,39 @@ PreParser::Expression PreParser::ParseLeftHandSideExpression(bool* ok) {
}
-PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression(
- bool* ok) {
+PreParser::Expression PreParser::ParseNewExpression(bool* ok) {
// NewExpression ::
// ('new')+ MemberExpression
- // See Parser::ParseNewExpression.
-
- if (peek() == Token::NEW) {
+ // The grammar for new expressions is pretty warped. The keyword
+ // 'new' can either be a part of the new expression (where it isn't
+ // followed by an argument list) or a part of the member expression,
+ // where it must be followed by an argument list. To accommodate
+ // this, we parse the 'new' keywords greedily and keep track of how
+ // many we have parsed. This information is then passed on to the
+ // member expression parser, which is only allowed to match argument
+ // lists as long as it has 'new' prefixes left
+ unsigned new_count = 0;
+ do {
Consume(Token::NEW);
- ParseMemberWithNewPrefixesExpression(CHECK_OK);
- if (peek() == Token::LPAREN) {
- // NewExpression with arguments.
- ParseArguments(CHECK_OK);
- // The expression can still continue with . or [ after the arguments.
- ParseMemberExpressionContinuation(Expression::Default(), CHECK_OK);
- }
- return Expression::Default();
- }
- // No 'new' keyword.
- return ParseMemberExpression(ok);
+ new_count++;
+ } while (peek() == Token::NEW);
+
+ return ParseMemberWithNewPrefixesExpression(new_count, ok);
}
PreParser::Expression PreParser::ParseMemberExpression(bool* ok) {
+ return ParseMemberWithNewPrefixesExpression(0, ok);
+}
+
+
+PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression(
+ unsigned new_count, bool* ok) {
// MemberExpression ::
// (PrimaryExpression | FunctionLiteral)
// ('[' Expression ']' | '.' Identifier | Arguments)*
- // The '[' Expression ']' and '.' Identifier parts are parsed by
- // ParseMemberExpressionContinuation, and the Arguments part is parsed by the
- // caller.
-
// Parse the initial primary or function expression.
Expression result = Expression::Default();
if (peek() == Token::FUNCTION) {
@@ -1101,44 +1021,126 @@ PreParser::Expression PreParser::ParseMemberExpression(bool* ok) {
} else {
result = ParsePrimaryExpression(CHECK_OK);
}
- result = ParseMemberExpressionContinuation(result, CHECK_OK);
- return result;
-}
-
-PreParser::Expression PreParser::ParseMemberExpressionContinuation(
- PreParserExpression expression, bool* ok) {
- // Parses this part of MemberExpression:
- // ('[' Expression ']' | '.' Identifier)*
while (true) {
switch (peek()) {
case Token::LBRACK: {
Consume(Token::LBRACK);
ParseExpression(true, CHECK_OK);
Expect(Token::RBRACK, CHECK_OK);
- if (expression.IsThis()) {
- expression = Expression::ThisProperty();
+ if (result.IsThis()) {
+ result = Expression::ThisProperty();
} else {
- expression = Expression::Default();
+ result = Expression::Default();
}
break;
}
case Token::PERIOD: {
Consume(Token::PERIOD);
ParseIdentifierName(CHECK_OK);
- if (expression.IsThis()) {
- expression = Expression::ThisProperty();
+ if (result.IsThis()) {
+ result = Expression::ThisProperty();
} else {
- expression = Expression::Default();
+ result = Expression::Default();
}
break;
}
+ case Token::LPAREN: {
+ if (new_count == 0) return result;
+ // Consume one of the new prefixes (already parsed).
+ ParseArguments(CHECK_OK);
+ new_count--;
+ result = Expression::Default();
+ break;
+ }
default:
- return expression;
+ return result;
+ }
+ }
+}
+
+
+PreParser::Expression PreParser::ParsePrimaryExpression(bool* ok) {
+ // PrimaryExpression ::
+ // 'this'
+ // 'null'
+ // 'true'
+ // 'false'
+ // Identifier
+ // Number
+ // String
+ // ArrayLiteral
+ // ObjectLiteral
+ // RegExpLiteral
+ // '(' Expression ')'
+
+ Expression result = Expression::Default();
+ switch (peek()) {
+ case Token::THIS: {
+ Next();
+ result = Expression::This();
+ break;
+ }
+
+ case Token::FUTURE_RESERVED_WORD:
+ case Token::FUTURE_STRICT_RESERVED_WORD:
+ case Token::YIELD:
+ case Token::IDENTIFIER: {
+ // Using eval or arguments in this context is OK even in strict mode.
+ Identifier id = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
+ result = Expression::FromIdentifier(id);
+ break;
+ }
+
+ case Token::NULL_LITERAL:
+ case Token::TRUE_LITERAL:
+ case Token::FALSE_LITERAL:
+ case Token::NUMBER: {
+ Next();
+ break;
+ }
+ case Token::STRING: {
+ Next();
+ result = GetStringSymbol();
+ break;
+ }
+
+ case Token::ASSIGN_DIV:
+ result = ParseRegExpLiteral(true, CHECK_OK);
+ break;
+
+ case Token::DIV:
+ result = ParseRegExpLiteral(false, CHECK_OK);
+ break;
+
+ case Token::LBRACK:
+ result = ParseArrayLiteral(CHECK_OK);
+ break;
+
+ case Token::LBRACE:
+ result = ParseObjectLiteral(CHECK_OK);
+ break;
+
+ case Token::LPAREN:
+ Consume(Token::LPAREN);
+ parenthesized_function_ = (peek() == Token::FUNCTION);
+ result = ParseExpression(true, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
+ break;
+
+ case Token::MOD:
+ result = ParseV8Intrinsic(CHECK_OK);
+ break;
+
+ default: {
+ Token::Value next = Next();
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return Expression::Default();
}
}
- ASSERT(false);
- return PreParserExpression::Default();
+
+ return result;
}
@@ -1156,7 +1158,7 @@ PreParser::Expression PreParser::ParseArrayLiteral(bool* ok) {
}
Expect(Token::RBRACK, CHECK_OK);
- function_state_->NextMaterializedLiteralIndex();
+ scope_->NextMaterializedLiteralIndex();
return Expression::Default();
}
@@ -1168,7 +1170,7 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
// | (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral)
// )*[','] '}'
- ObjectLiteralChecker checker(this, scope_->language_mode());
+ ObjectLiteralChecker checker(this, language_mode());
Expect(Token::LBRACE, CHECK_OK);
while (peek() != Token::RBRACE) {
@@ -1213,7 +1215,7 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
case Token::STRING:
Consume(next);
checker.CheckProperty(next, kValueProperty, CHECK_OK);
- LogSymbol();
+ GetStringSymbol();
break;
case Token::NUMBER:
Consume(next);
@@ -1238,7 +1240,29 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
}
Expect(Token::RBRACE, CHECK_OK);
- function_state_->NextMaterializedLiteralIndex();
+ scope_->NextMaterializedLiteralIndex();
+ return Expression::Default();
+}
+
+
+PreParser::Expression PreParser::ParseRegExpLiteral(bool seen_equal,
+ bool* ok) {
+ if (!scanner()->ScanRegExpPattern(seen_equal)) {
+ Next();
+ ReportMessageAt(scanner()->location(), "unterminated_regexp", NULL);
+ *ok = false;
+ return Expression::Default();
+ }
+
+ scope_->NextMaterializedLiteralIndex();
+
+ if (!scanner()->ScanRegExpFlags()) {
+ Next();
+ ReportMessageAt(scanner()->location(), "invalid_regexp_flags", NULL);
+ *ok = false;
+ return Expression::Default();
+ }
+ Next();
return Expression::Default();
}
@@ -1276,10 +1300,9 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
// Parse function body.
ScopeType outer_scope_type = scope_->type();
- bool inside_with = scope_->inside_with();
- PreParserScope function_scope(scope_, FUNCTION_SCOPE);
- FunctionState function_state(&function_state_, &scope_, &function_scope);
- function_state.set_is_generator(is_generator);
+ bool inside_with = scope_->IsInsideWith();
+ Scope function_scope(&scope_, kFunctionScope);
+ function_scope.set_is_generator(is_generator);
// FormalParameterList ::
// '(' (Identifier)*[','] ')'
Expect(Token::LPAREN, CHECK_OK);
@@ -1326,7 +1349,7 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
// Determine if the function will be lazily compiled.
// Currently only happens to top-level functions.
// Optimistically assume that all top-level functions are lazily compiled.
- bool is_lazily_compiled = (outer_scope_type == GLOBAL_SCOPE &&
+ bool is_lazily_compiled = (outer_scope_type == kTopLevelScope &&
!inside_with && allow_lazy() &&
!parenthesized_function_);
parenthesized_function_ = false;
@@ -1343,27 +1366,31 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
// since the function can declare itself strict.
if (!scope_->is_classic_mode()) {
if (function_name.IsEvalOrArguments()) {
- ReportMessageAt(function_name_location, "strict_eval_arguments");
+ ReportMessageAt(function_name_location, "strict_eval_arguments", NULL);
*ok = false;
return Expression::Default();
}
if (name_is_strict_reserved) {
- ReportMessageAt(function_name_location, "unexpected_strict_reserved");
+ ReportMessageAt(
+ function_name_location, "unexpected_strict_reserved", NULL);
*ok = false;
return Expression::Default();
}
if (eval_args_error_loc.IsValid()) {
- ReportMessageAt(eval_args_error_loc, "strict_eval_arguments");
+ ReportMessageAt(eval_args_error_loc, "strict_eval_arguments",
+ Vector<const char*>::empty());
*ok = false;
return Expression::Default();
}
if (dupe_error_loc.IsValid()) {
- ReportMessageAt(dupe_error_loc, "strict_param_dupe");
+ ReportMessageAt(dupe_error_loc, "strict_param_dupe",
+ Vector<const char*>::empty());
*ok = false;
return Expression::Default();
}
if (reserved_error_loc.IsValid()) {
- ReportMessageAt(reserved_error_loc, "unexpected_strict_reserved");
+ ReportMessageAt(reserved_error_loc, "unexpected_strict_reserved",
+ Vector<const char*>::empty());
*ok = false;
return Expression::Default();
}
@@ -1388,9 +1415,9 @@ void PreParser::ParseLazyFunctionLiteralBody(bool* ok) {
ASSERT_EQ(Token::RBRACE, scanner()->peek());
int body_end = scanner()->peek_location().end_pos;
log_->LogFunction(body_start, body_end,
- function_state_->materialized_literal_count(),
- function_state_->expected_property_count(),
- scope_->language_mode());
+ scope_->materialized_literal_count(),
+ scope_->expected_properties(),
+ language_mode());
}
@@ -1422,4 +1449,157 @@ void PreParser::LogSymbol() {
}
+PreParser::Expression PreParser::GetStringSymbol() {
+ const int kUseStrictLength = 10;
+ const char* kUseStrictChars = "use strict";
+ LogSymbol();
+ if (scanner()->is_literal_ascii() &&
+ scanner()->literal_length() == kUseStrictLength &&
+ !scanner()->literal_contains_escapes() &&
+ !strncmp(scanner()->literal_ascii_string().start(), kUseStrictChars,
+ kUseStrictLength)) {
+ return Expression::UseStrictStringLiteral();
+ }
+ return Expression::StringLiteral();
+}
+
+
+PreParser::Identifier PreParser::GetIdentifierSymbol() {
+ LogSymbol();
+ if (scanner()->current_token() == Token::FUTURE_RESERVED_WORD) {
+ return Identifier::FutureReserved();
+ } else if (scanner()->current_token() ==
+ Token::FUTURE_STRICT_RESERVED_WORD) {
+ return Identifier::FutureStrictReserved();
+ } else if (scanner()->current_token() == Token::YIELD) {
+ return Identifier::Yield();
+ }
+ if (scanner()->is_literal_ascii()) {
+ // Detect strict-mode poison words.
+ if (scanner()->literal_length() == 4 &&
+ !strncmp(scanner()->literal_ascii_string().start(), "eval", 4)) {
+ return Identifier::Eval();
+ }
+ if (scanner()->literal_length() == 9 &&
+ !strncmp(scanner()->literal_ascii_string().start(), "arguments", 9)) {
+ return Identifier::Arguments();
+ }
+ }
+ return Identifier::Default();
+}
+
+
+// Parses an identifier that is valid for the current scope, in particular it
+// fails on strict mode future reserved keywords in a strict scope. If
+// allow_eval_or_arguments is kAllowEvalOrArguments, we allow "eval" or
+// "arguments" as identifier even in strict mode (this is needed in cases like
+// "var foo = eval;").
+PreParser::Identifier PreParser::ParseIdentifier(
+ AllowEvalOrArgumentsAsIdentifier allow_eval_or_arguments,
+ bool* ok) {
+ Token::Value next = Next();
+ if (next == Token::IDENTIFIER) {
+ PreParser::Identifier name = GetIdentifierSymbol();
+ if (allow_eval_or_arguments == kDontAllowEvalOrArguments &&
+ !scope_->is_classic_mode() && name.IsEvalOrArguments()) {
+ ReportMessageAt(scanner()->location(), "strict_eval_arguments", NULL);
+ *ok = false;
+ }
+ return name;
+ } else if (scope_->is_classic_mode() &&
+ (next == Token::FUTURE_STRICT_RESERVED_WORD ||
+ (next == Token::YIELD && !scope_->is_generator()))) {
+ return GetIdentifierSymbol();
+ } else {
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return Identifier::Default();
+ }
+}
+
+
+// Parses and identifier or a strict mode future reserved word, and indicate
+// whether it is strict mode future reserved.
+PreParser::Identifier PreParser::ParseIdentifierOrStrictReservedWord(
+ bool* is_strict_reserved, bool* ok) {
+ Token::Value next = Next();
+ if (next == Token::IDENTIFIER) {
+ *is_strict_reserved = false;
+ } else if (next == Token::FUTURE_STRICT_RESERVED_WORD ||
+ (next == Token::YIELD && !scope_->is_generator())) {
+ *is_strict_reserved = true;
+ } else {
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return Identifier::Default();
+ }
+ return GetIdentifierSymbol();
+}
+
+
+PreParser::Identifier PreParser::ParseIdentifierName(bool* ok) {
+ Token::Value next = Next();
+ if (next != Token::IDENTIFIER &&
+ next != Token::FUTURE_RESERVED_WORD &&
+ next != Token::FUTURE_STRICT_RESERVED_WORD &&
+ !Token::IsKeyword(next)) {
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return Identifier::Default();
+ }
+ return GetIdentifierSymbol();
+}
+
+#undef CHECK_OK
+
+
+// This function reads an identifier and determines whether or not it
+// is 'get' or 'set'.
+PreParser::Identifier PreParser::ParseIdentifierNameOrGetOrSet(bool* is_get,
+ bool* is_set,
+ bool* ok) {
+ Identifier result = ParseIdentifierName(ok);
+ if (!*ok) return Identifier::Default();
+ if (scanner()->is_literal_ascii() &&
+ scanner()->literal_length() == 3) {
+ const char* token = scanner()->literal_ascii_string().start();
+ *is_get = strncmp(token, "get", 3) == 0;
+ *is_set = !*is_get && strncmp(token, "set", 3) == 0;
+ }
+ return result;
+}
+
+
+void PreParser::ObjectLiteralChecker::CheckProperty(Token::Value property,
+ PropertyKind type,
+ bool* ok) {
+ int old;
+ if (property == Token::NUMBER) {
+ old = finder_.AddNumber(scanner()->literal_ascii_string(), type);
+ } else if (scanner()->is_literal_ascii()) {
+ old = finder_.AddAsciiSymbol(scanner()->literal_ascii_string(), type);
+ } else {
+ old = finder_.AddUtf16Symbol(scanner()->literal_utf16_string(), type);
+ }
+ PropertyKind old_type = static_cast<PropertyKind>(old);
+ if (HasConflict(old_type, type)) {
+ if (IsDataDataConflict(old_type, type)) {
+ // Both are data properties.
+ if (language_mode_ == CLASSIC_MODE) return;
+ parser()->ReportMessageAt(scanner()->location(),
+ "strict_duplicate_property");
+ } else if (IsDataAccessorConflict(old_type, type)) {
+ // Both a data and an accessor property with the same name.
+ parser()->ReportMessageAt(scanner()->location(),
+ "accessor_data_property");
+ } else {
+ ASSERT(IsAccessorAccessorConflict(old_type, type));
+ // Both accessors of the same type.
+ parser()->ReportMessageAt(scanner()->location(),
+ "accessor_get_set");
+ }
+ *ok = false;
+ }
+}
+
} } // v8::internal
diff --git a/deps/v8/src/preparser.h b/deps/v8/src/preparser.h
index 89c522060f..bcaab743e5 100644
--- a/deps/v8/src/preparser.h
+++ b/deps/v8/src/preparser.h
@@ -29,33 +29,25 @@
#define V8_PREPARSER_H
#include "hashmap.h"
-#include "scopes.h"
#include "token.h"
#include "scanner.h"
-#include "v8.h"
namespace v8 {
namespace internal {
// Common base class shared between parser and pre-parser.
-template <typename Traits>
-class ParserBase : public Traits {
+class ParserBase {
public:
- ParserBase(Scanner* scanner, uintptr_t stack_limit,
- v8::Extension* extension,
- typename Traits::Type::Parser this_object)
- : Traits(this_object),
- parenthesized_function_(false),
- scope_(NULL),
- function_state_(NULL),
- extension_(extension),
- scanner_(scanner),
+ ParserBase(Scanner* scanner, uintptr_t stack_limit)
+ : scanner_(scanner),
stack_limit_(stack_limit),
stack_overflow_(false),
allow_lazy_(false),
allow_natives_syntax_(false),
allow_generators_(false),
allow_for_of_(false) { }
+ // TODO(mstarzinger): Only virtual until message reporting has been unified.
+ virtual ~ParserBase() { }
// Getters that indicate whether certain syntactical constructs are
// allowed to be parsed by this instance of the parser.
@@ -89,103 +81,14 @@ class ParserBase : public Traits {
kDontAllowEvalOrArguments
};
- // ---------------------------------------------------------------------------
- // FunctionState and BlockState together implement the parser's scope stack.
- // The parser's current scope is in scope_. BlockState and FunctionState
- // constructors push on the scope stack and the destructors pop. They are also
- // used to hold the parser's per-function and per-block state.
- class BlockState BASE_EMBEDDED {
- public:
- BlockState(typename Traits::Type::Scope** scope_stack,
- typename Traits::Type::Scope* scope)
- : scope_stack_(scope_stack),
- outer_scope_(*scope_stack),
- scope_(scope) {
- *scope_stack_ = scope_;
- }
- ~BlockState() { *scope_stack_ = outer_scope_; }
-
- private:
- typename Traits::Type::Scope** scope_stack_;
- typename Traits::Type::Scope* outer_scope_;
- typename Traits::Type::Scope* scope_;
- };
-
- class FunctionState BASE_EMBEDDED {
- public:
- FunctionState(
- FunctionState** function_state_stack,
- typename Traits::Type::Scope** scope_stack,
- typename Traits::Type::Scope* scope,
- typename Traits::Type::Zone* zone = NULL);
- ~FunctionState();
-
- int NextMaterializedLiteralIndex() {
- return next_materialized_literal_index_++;
- }
- int materialized_literal_count() {
- return next_materialized_literal_index_ - JSFunction::kLiteralsPrefixSize;
- }
-
- int NextHandlerIndex() { return next_handler_index_++; }
- int handler_count() { return next_handler_index_; }
-
- void AddProperty() { expected_property_count_++; }
- int expected_property_count() { return expected_property_count_; }
-
- void set_is_generator(bool is_generator) { is_generator_ = is_generator; }
- bool is_generator() const { return is_generator_; }
-
- void set_generator_object_variable(
- typename Traits::Type::GeneratorVariable* variable) {
- ASSERT(variable != NULL);
- ASSERT(!is_generator());
- generator_object_variable_ = variable;
- is_generator_ = true;
- }
- typename Traits::Type::GeneratorVariable* generator_object_variable()
- const {
- return generator_object_variable_;
- }
-
- typename Traits::Type::Factory* factory() { return &factory_; }
-
- private:
- // Used to assign an index to each literal that needs materialization in
- // the function. Includes regexp literals, and boilerplate for object and
- // array literals.
- int next_materialized_literal_index_;
-
- // Used to assign a per-function index to try and catch handlers.
- int next_handler_index_;
-
- // Properties count estimation.
- int expected_property_count_;
-
- // Whether the function is a generator.
- bool is_generator_;
- // For generators, this variable may hold the generator object. It variable
- // is used by yield expressions and return statements. It is not necessary
- // for generator functions to have this variable set.
- Variable* generator_object_variable_;
-
- FunctionState** function_state_stack_;
- FunctionState* outer_function_state_;
- typename Traits::Type::Scope** scope_stack_;
- typename Traits::Type::Scope* outer_scope_;
- Isolate* isolate_; // Only used by ParserTraits.
- int saved_ast_node_id_; // Only used by ParserTraits.
- typename Traits::Type::Factory factory_;
-
- friend class ParserTraits;
- };
-
Scanner* scanner() const { return scanner_; }
int position() { return scanner_->location().beg_pos; }
int peek_position() { return scanner_->peek_location().beg_pos; }
bool stack_overflow() const { return stack_overflow_; }
void set_stack_overflow() { stack_overflow_ = true; }
+ virtual bool is_classic_mode() = 0;
+
INLINE(Token::Value peek()) {
if (stack_overflow_) return Token::ILLEGAL;
return scanner()->peek();
@@ -229,112 +132,25 @@ class ParserBase : public Traits {
}
}
- void ExpectSemicolon(bool* ok) {
- // Check for automatic semicolon insertion according to
- // the rules given in ECMA-262, section 7.9, page 21.
- Token::Value tok = peek();
- if (tok == Token::SEMICOLON) {
- Next();
- return;
- }
- if (scanner()->HasAnyLineTerminatorBeforeNext() ||
- tok == Token::RBRACE ||
- tok == Token::EOS) {
- return;
- }
- Expect(Token::SEMICOLON, ok);
- }
-
- bool peek_any_identifier() {
- Token::Value next = peek();
- return next == Token::IDENTIFIER ||
- next == Token::FUTURE_RESERVED_WORD ||
- next == Token::FUTURE_STRICT_RESERVED_WORD ||
- next == Token::YIELD;
- }
+ bool peek_any_identifier();
+ void ExpectSemicolon(bool* ok);
+ bool CheckContextualKeyword(Vector<const char> keyword);
+ void ExpectContextualKeyword(Vector<const char> keyword, bool* ok);
- bool CheckContextualKeyword(Vector<const char> keyword) {
- if (peek() == Token::IDENTIFIER &&
- scanner()->is_next_contextual_keyword(keyword)) {
- Consume(Token::IDENTIFIER);
- return true;
- }
- return false;
- }
-
- void ExpectContextualKeyword(Vector<const char> keyword, bool* ok) {
- Expect(Token::IDENTIFIER, ok);
- if (!*ok) return;
- if (!scanner()->is_literal_contextual_keyword(keyword)) {
- ReportUnexpectedToken(scanner()->current_token());
- *ok = false;
- }
- }
-
- // Checks whether an octal literal was last seen between beg_pos and end_pos.
- // If so, reports an error. Only called for strict mode.
- void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
- Scanner::Location octal = scanner()->octal_position();
- if (octal.IsValid() && beg_pos <= octal.beg_pos &&
- octal.end_pos <= end_pos) {
- ReportMessageAt(octal, "strict_octal_literal");
- scanner()->clear_octal_position();
- *ok = false;
- }
- }
+ // Strict mode octal literal validation.
+ void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok);
// Determine precedence of given token.
- static int Precedence(Token::Value token, bool accept_IN) {
- if (token == Token::IN && !accept_IN)
- return 0; // 0 precedence will terminate binary expression parsing
- return Token::Precedence(token);
- }
-
- typename Traits::Type::Factory* factory() {
- return function_state_->factory();
- }
-
- bool is_classic_mode() const { return scope_->is_classic_mode(); }
-
- bool is_generator() const { return function_state_->is_generator(); }
+ static int Precedence(Token::Value token, bool accept_IN);
// Report syntax errors.
- void ReportMessage(const char* message, Vector<const char*> args) {
- Scanner::Location source_location = scanner()->location();
- Traits::ReportMessageAt(source_location, message, args);
- }
-
- void ReportMessageAt(Scanner::Location location, const char* message) {
- Traits::ReportMessageAt(location, message, Vector<const char*>::empty());
- }
-
void ReportUnexpectedToken(Token::Value token);
-
- // Recursive descent functions:
-
- // Parses an identifier that is valid for the current scope, in particular it
- // fails on strict mode future reserved keywords in a strict scope. If
- // allow_eval_or_arguments is kAllowEvalOrArguments, we allow "eval" or
- // "arguments" as identifier even in strict mode (this is needed in cases like
- // "var foo = eval;").
- typename Traits::Type::Identifier ParseIdentifier(
- AllowEvalOrArgumentsAsIdentifier,
- bool* ok);
- // Parses an identifier or a strict mode future reserved word, and indicate
- // whether it is strict mode future reserved.
- typename Traits::Type::Identifier ParseIdentifierOrStrictReservedWord(
- bool* is_strict_reserved,
- bool* ok);
- typename Traits::Type::Identifier ParseIdentifierName(bool* ok);
- // Parses an identifier and determines whether or not it is 'get' or 'set'.
- typename Traits::Type::Identifier ParseIdentifierNameOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok);
-
- typename Traits::Type::Expression ParseRegExpLiteral(bool seen_equal,
- bool* ok);
-
- typename Traits::Type::Expression ParsePrimaryExpression(bool* ok);
+ void ReportMessageAt(Scanner::Location location, const char* type) {
+ ReportMessageAt(location, type, Vector<const char*>::empty());
+ }
+ virtual void ReportMessageAt(Scanner::Location source_location,
+ const char* message,
+ Vector<const char*> args) = 0;
// Used to detect duplicates in object literals. Each of the values
// kGetterProperty, kSetterProperty and kValueProperty represents
@@ -390,16 +206,6 @@ class ParserBase : public Traits {
LanguageMode language_mode_;
};
- // If true, the next (and immediately following) function literal is
- // preceded by a parenthesis.
- // Heuristically that means that the function will be called immediately,
- // so never lazily compile it.
- bool parenthesized_function_;
-
- typename Traits::Type::Scope* scope_; // Scope stack.
- FunctionState* function_state_; // Function state stack.
- v8::Extension* extension_;
-
private:
Scanner* scanner_;
uintptr_t stack_limit_;
@@ -412,279 +218,6 @@ class ParserBase : public Traits {
};
-class PreParserIdentifier {
- public:
- static PreParserIdentifier Default() {
- return PreParserIdentifier(kUnknownIdentifier);
- }
- static PreParserIdentifier Eval() {
- return PreParserIdentifier(kEvalIdentifier);
- }
- static PreParserIdentifier Arguments() {
- return PreParserIdentifier(kArgumentsIdentifier);
- }
- static PreParserIdentifier FutureReserved() {
- return PreParserIdentifier(kFutureReservedIdentifier);
- }
- static PreParserIdentifier FutureStrictReserved() {
- return PreParserIdentifier(kFutureStrictReservedIdentifier);
- }
- static PreParserIdentifier Yield() {
- return PreParserIdentifier(kYieldIdentifier);
- }
- bool IsEval() { return type_ == kEvalIdentifier; }
- bool IsArguments() { return type_ == kArgumentsIdentifier; }
- bool IsEvalOrArguments() { return type_ >= kEvalIdentifier; }
- bool IsYield() { return type_ == kYieldIdentifier; }
- bool IsFutureReserved() { return type_ == kFutureReservedIdentifier; }
- bool IsFutureStrictReserved() {
- return type_ == kFutureStrictReservedIdentifier;
- }
- bool IsValidStrictVariable() { return type_ == kUnknownIdentifier; }
-
- private:
- enum Type {
- kUnknownIdentifier,
- kFutureReservedIdentifier,
- kFutureStrictReservedIdentifier,
- kYieldIdentifier,
- kEvalIdentifier,
- kArgumentsIdentifier
- };
- explicit PreParserIdentifier(Type type) : type_(type) {}
- Type type_;
-
- friend class PreParserExpression;
-};
-
-
-// Bits 0 and 1 are used to identify the type of expression:
-// If bit 0 is set, it's an identifier.
-// if bit 1 is set, it's a string literal.
-// If neither is set, it's no particular type, and both set isn't
-// use yet.
-class PreParserExpression {
- public:
- static PreParserExpression Default() {
- return PreParserExpression(kUnknownExpression);
- }
-
- static PreParserExpression FromIdentifier(PreParserIdentifier id) {
- return PreParserExpression(kIdentifierFlag |
- (id.type_ << kIdentifierShift));
- }
-
- static PreParserExpression StringLiteral() {
- return PreParserExpression(kUnknownStringLiteral);
- }
-
- static PreParserExpression UseStrictStringLiteral() {
- return PreParserExpression(kUseStrictString);
- }
-
- static PreParserExpression This() {
- return PreParserExpression(kThisExpression);
- }
-
- static PreParserExpression ThisProperty() {
- return PreParserExpression(kThisPropertyExpression);
- }
-
- static PreParserExpression StrictFunction() {
- return PreParserExpression(kStrictFunctionExpression);
- }
-
- bool IsIdentifier() { return (code_ & kIdentifierFlag) != 0; }
-
- // Only works corretly if it is actually an identifier expression.
- PreParserIdentifier AsIdentifier() {
- return PreParserIdentifier(
- static_cast<PreParserIdentifier::Type>(code_ >> kIdentifierShift));
- }
-
- bool IsStringLiteral() { return (code_ & kStringLiteralFlag) != 0; }
-
- bool IsUseStrictLiteral() {
- return (code_ & kStringLiteralMask) == kUseStrictString;
- }
-
- bool IsThis() { return code_ == kThisExpression; }
-
- bool IsThisProperty() { return code_ == kThisPropertyExpression; }
-
- bool IsStrictFunction() { return code_ == kStrictFunctionExpression; }
-
- private:
- // First two/three bits are used as flags.
- // Bit 0 and 1 represent identifiers or strings literals, and are
- // mutually exclusive, but can both be absent.
- enum {
- kUnknownExpression = 0,
- // Identifiers
- kIdentifierFlag = 1, // Used to detect labels.
- kIdentifierShift = 3,
-
- kStringLiteralFlag = 2, // Used to detect directive prologue.
- kUnknownStringLiteral = kStringLiteralFlag,
- kUseStrictString = kStringLiteralFlag | 8,
- kStringLiteralMask = kUseStrictString,
-
- // Below here applies if neither identifier nor string literal.
- kThisExpression = 4,
- kThisPropertyExpression = 8,
- kStrictFunctionExpression = 12
- };
-
- explicit PreParserExpression(int expression_code) : code_(expression_code) {}
-
- int code_;
-};
-
-
-class PreParserScope {
- public:
- explicit PreParserScope(PreParserScope* outer_scope, ScopeType scope_type)
- : scope_type_(scope_type) {
- if (outer_scope) {
- scope_inside_with_ =
- outer_scope->scope_inside_with_ || is_with_scope();
- language_mode_ = outer_scope->language_mode();
- } else {
- scope_inside_with_ = is_with_scope();
- language_mode_ = CLASSIC_MODE;
- }
- }
-
- bool is_with_scope() const { return scope_type_ == WITH_SCOPE; }
- bool is_classic_mode() const {
- return language_mode() == CLASSIC_MODE;
- }
- bool is_extended_mode() {
- return language_mode() == EXTENDED_MODE;
- }
- bool inside_with() const {
- return scope_inside_with_;
- }
-
- ScopeType type() { return scope_type_; }
- LanguageMode language_mode() const { return language_mode_; }
- void SetLanguageMode(LanguageMode language_mode) {
- language_mode_ = language_mode;
- }
-
- private:
- ScopeType scope_type_;
- bool scope_inside_with_;
- LanguageMode language_mode_;
-};
-
-
-class PreParserFactory {
- public:
- explicit PreParserFactory(void* extra_param) {}
-
- PreParserExpression NewRegExpLiteral(PreParserIdentifier js_pattern,
- PreParserIdentifier js_flags,
- int literal_index,
- int pos) {
- return PreParserExpression::Default();
- }
-};
-
-
-class PreParser;
-
-class PreParserTraits {
- public:
- struct Type {
- typedef PreParser* Parser;
-
- // Types used by FunctionState and BlockState.
- typedef PreParserScope Scope;
- typedef PreParserFactory Factory;
- // PreParser doesn't need to store generator variables.
- typedef void GeneratorVariable;
- // No interaction with Zones.
- typedef void Zone;
-
- // Return types for traversing functions.
- typedef PreParserIdentifier Identifier;
- typedef PreParserExpression Expression;
- };
-
- explicit PreParserTraits(PreParser* pre_parser) : pre_parser_(pre_parser) {}
-
- // Custom operations executed when FunctionStates are created and
- // destructed. (The PreParser doesn't need to do anything.)
- template<typename FunctionState>
- static void SetUpFunctionState(FunctionState* function_state, void*) {}
- template<typename FunctionState>
- static void TearDownFunctionState(FunctionState* function_state) {}
-
- // Helper functions for recursive descent.
- static bool IsEvalOrArguments(PreParserIdentifier identifier) {
- return identifier.IsEvalOrArguments();
- }
-
- // Reporting errors.
- void ReportMessageAt(Scanner::Location location,
- const char* message,
- Vector<const char*> args);
- void ReportMessageAt(Scanner::Location location,
- const char* type,
- const char* name_opt);
- void ReportMessageAt(int start_pos,
- int end_pos,
- const char* type,
- const char* name_opt);
-
- // "null" return type creators.
- static PreParserIdentifier EmptyIdentifier() {
- return PreParserIdentifier::Default();
- }
- static PreParserExpression EmptyExpression() {
- return PreParserExpression::Default();
- }
-
- // Producing data during the recursive descent.
- PreParserIdentifier GetSymbol(Scanner* scanner);
- static PreParserIdentifier NextLiteralString(Scanner* scanner,
- PretenureFlag tenured) {
- return PreParserIdentifier::Default();
- }
-
- static PreParserExpression ThisExpression(PreParserScope* scope,
- PreParserFactory* factory) {
- return PreParserExpression::This();
- }
-
- static PreParserExpression ExpressionFromLiteral(
- Token::Value token, int pos, Scanner* scanner,
- PreParserFactory* factory) {
- return PreParserExpression::Default();
- }
-
- static PreParserExpression ExpressionFromIdentifier(
- PreParserIdentifier name, int pos, PreParserScope* scope,
- PreParserFactory* factory) {
- return PreParserExpression::FromIdentifier(name);
- }
-
- PreParserExpression ExpressionFromString(int pos,
- Scanner* scanner,
- PreParserFactory* factory = NULL);
-
- // Temporary glue; these functions will move to ParserBase.
- PreParserExpression ParseArrayLiteral(bool* ok);
- PreParserExpression ParseObjectLiteral(bool* ok);
- PreParserExpression ParseExpression(bool accept_IN, bool* ok);
- PreParserExpression ParseV8Intrinsic(bool* ok);
-
- private:
- PreParser* pre_parser_;
-};
-
-
// Preparsing checks a JavaScript program and emits preparse-data that helps
// a later parsing to be faster.
// See preparse-data-format.h for the data format.
@@ -697,11 +230,8 @@ class PreParserTraits {
// rather it is to speed up properly written and correct programs.
// That means that contextual checks (like a label being declared where
// it is used) are generally omitted.
-class PreParser : public ParserBase<PreParserTraits> {
+class PreParser : public ParserBase {
public:
- typedef PreParserIdentifier Identifier;
- typedef PreParserExpression Expression;
-
enum PreParseResult {
kPreParseStackOverflow,
kPreParseSuccess
@@ -710,16 +240,19 @@ class PreParser : public ParserBase<PreParserTraits> {
PreParser(Scanner* scanner,
ParserRecorder* log,
uintptr_t stack_limit)
- : ParserBase<PreParserTraits>(scanner, stack_limit, NULL, this),
- log_(log) {}
+ : ParserBase(scanner, stack_limit),
+ log_(log),
+ scope_(NULL),
+ parenthesized_function_(false) { }
+
+ ~PreParser() {}
// Pre-parse the program from the character stream; returns true on
// success (even if parsing failed, the pre-parse data successfully
// captured the syntax error), and false if a stack-overflow happened
// during parsing.
PreParseResult PreParseProgram() {
- PreParserScope scope(scope_, GLOBAL_SCOPE);
- FunctionState top_scope(&function_state_, &scope_, &scope, NULL);
+ Scope top_scope(&scope_, kTopLevelScope);
bool ok = true;
int start_position = scanner()->peek_location().beg_pos;
ParseSourceElements(Token::EOS, &ok);
@@ -745,13 +278,16 @@ class PreParser : public ParserBase<PreParserTraits> {
ParserRecorder* log);
private:
- friend class PreParserTraits;
-
// These types form an algebra over syntactic categories that is just
// rich enough to let us recognize and propagate the constructs that
// are either being counted in the preparser data, or is important
// to throw the correct syntax error exceptions.
+ enum ScopeType {
+ kTopLevelScope,
+ kFunctionScope
+ };
+
enum VariableDeclarationContext {
kSourceElement,
kStatement,
@@ -764,6 +300,142 @@ class PreParser : public ParserBase<PreParserTraits> {
kHasNoInitializers
};
+ class Expression;
+
+ class Identifier {
+ public:
+ static Identifier Default() {
+ return Identifier(kUnknownIdentifier);
+ }
+ static Identifier Eval() {
+ return Identifier(kEvalIdentifier);
+ }
+ static Identifier Arguments() {
+ return Identifier(kArgumentsIdentifier);
+ }
+ static Identifier FutureReserved() {
+ return Identifier(kFutureReservedIdentifier);
+ }
+ static Identifier FutureStrictReserved() {
+ return Identifier(kFutureStrictReservedIdentifier);
+ }
+ static Identifier Yield() {
+ return Identifier(kYieldIdentifier);
+ }
+ bool IsEval() { return type_ == kEvalIdentifier; }
+ bool IsArguments() { return type_ == kArgumentsIdentifier; }
+ bool IsEvalOrArguments() { return type_ >= kEvalIdentifier; }
+ bool IsYield() { return type_ == kYieldIdentifier; }
+ bool IsFutureReserved() { return type_ == kFutureReservedIdentifier; }
+ bool IsFutureStrictReserved() {
+ return type_ == kFutureStrictReservedIdentifier;
+ }
+ bool IsValidStrictVariable() { return type_ == kUnknownIdentifier; }
+
+ private:
+ enum Type {
+ kUnknownIdentifier,
+ kFutureReservedIdentifier,
+ kFutureStrictReservedIdentifier,
+ kYieldIdentifier,
+ kEvalIdentifier,
+ kArgumentsIdentifier
+ };
+ explicit Identifier(Type type) : type_(type) { }
+ Type type_;
+
+ friend class Expression;
+ };
+
+ // Bits 0 and 1 are used to identify the type of expression:
+ // If bit 0 is set, it's an identifier.
+ // if bit 1 is set, it's a string literal.
+ // If neither is set, it's no particular type, and both set isn't
+ // use yet.
+ class Expression {
+ public:
+ static Expression Default() {
+ return Expression(kUnknownExpression);
+ }
+
+ static Expression FromIdentifier(Identifier id) {
+ return Expression(kIdentifierFlag | (id.type_ << kIdentifierShift));
+ }
+
+ static Expression StringLiteral() {
+ return Expression(kUnknownStringLiteral);
+ }
+
+ static Expression UseStrictStringLiteral() {
+ return Expression(kUseStrictString);
+ }
+
+ static Expression This() {
+ return Expression(kThisExpression);
+ }
+
+ static Expression ThisProperty() {
+ return Expression(kThisPropertyExpression);
+ }
+
+ static Expression StrictFunction() {
+ return Expression(kStrictFunctionExpression);
+ }
+
+ bool IsIdentifier() {
+ return (code_ & kIdentifierFlag) != 0;
+ }
+
+ // Only works corretly if it is actually an identifier expression.
+ PreParser::Identifier AsIdentifier() {
+ return PreParser::Identifier(
+ static_cast<PreParser::Identifier::Type>(code_ >> kIdentifierShift));
+ }
+
+ bool IsStringLiteral() { return (code_ & kStringLiteralFlag) != 0; }
+
+ bool IsUseStrictLiteral() {
+ return (code_ & kStringLiteralMask) == kUseStrictString;
+ }
+
+ bool IsThis() {
+ return code_ == kThisExpression;
+ }
+
+ bool IsThisProperty() {
+ return code_ == kThisPropertyExpression;
+ }
+
+ bool IsStrictFunction() {
+ return code_ == kStrictFunctionExpression;
+ }
+
+ private:
+ // First two/three bits are used as flags.
+ // Bit 0 and 1 represent identifiers or strings literals, and are
+ // mutually exclusive, but can both be absent.
+ enum {
+ kUnknownExpression = 0,
+ // Identifiers
+ kIdentifierFlag = 1, // Used to detect labels.
+ kIdentifierShift = 3,
+
+ kStringLiteralFlag = 2, // Used to detect directive prologue.
+ kUnknownStringLiteral = kStringLiteralFlag,
+ kUseStrictString = kStringLiteralFlag | 8,
+ kStringLiteralMask = kUseStrictString,
+
+ // Below here applies if neither identifier nor string literal.
+ kThisExpression = 4,
+ kThisPropertyExpression = 8,
+ kStrictFunctionExpression = 12
+ };
+
+ explicit Expression(int expression_code) : code_(expression_code) { }
+
+ int code_;
+ };
+
class Statement {
public:
static Statement Default() {
@@ -817,6 +489,84 @@ class PreParser : public ParserBase<PreParserTraits> {
typedef int Arguments;
+ class Scope {
+ public:
+ Scope(Scope** variable, ScopeType type)
+ : variable_(variable),
+ prev_(*variable),
+ type_(type),
+ materialized_literal_count_(0),
+ expected_properties_(0),
+ with_nesting_count_(0),
+ language_mode_(
+ (prev_ != NULL) ? prev_->language_mode() : CLASSIC_MODE),
+ is_generator_(false) {
+ *variable = this;
+ }
+ ~Scope() { *variable_ = prev_; }
+ void NextMaterializedLiteralIndex() { materialized_literal_count_++; }
+ void AddProperty() { expected_properties_++; }
+ ScopeType type() { return type_; }
+ int expected_properties() { return expected_properties_; }
+ int materialized_literal_count() { return materialized_literal_count_; }
+ bool IsInsideWith() { return with_nesting_count_ != 0; }
+ bool is_generator() { return is_generator_; }
+ void set_is_generator(bool is_generator) { is_generator_ = is_generator; }
+ bool is_classic_mode() {
+ return language_mode_ == CLASSIC_MODE;
+ }
+ LanguageMode language_mode() {
+ return language_mode_;
+ }
+ void set_language_mode(LanguageMode language_mode) {
+ language_mode_ = language_mode;
+ }
+
+ class InsideWith {
+ public:
+ explicit InsideWith(Scope* scope) : scope_(scope) {
+ scope->with_nesting_count_++;
+ }
+
+ ~InsideWith() { scope_->with_nesting_count_--; }
+
+ private:
+ Scope* scope_;
+ DISALLOW_COPY_AND_ASSIGN(InsideWith);
+ };
+
+ private:
+ Scope** const variable_;
+ Scope* const prev_;
+ const ScopeType type_;
+ int materialized_literal_count_;
+ int expected_properties_;
+ int with_nesting_count_;
+ LanguageMode language_mode_;
+ bool is_generator_;
+ };
+
+ // Report syntax error
+ void ReportMessageAt(Scanner::Location location,
+ const char* message,
+ Vector<const char*> args) {
+ ReportMessageAt(location.beg_pos,
+ location.end_pos,
+ message,
+ args.length() > 0 ? args[0] : NULL);
+ }
+ void ReportMessageAt(Scanner::Location location,
+ const char* type,
+ const char* name_opt) {
+ log_->LogMessage(location.beg_pos, location.end_pos, type, name_opt);
+ }
+ void ReportMessageAt(int start_pos,
+ int end_pos,
+ const char* type,
+ const char* name_opt) {
+ log_->LogMessage(start_pos, end_pos, type, name_opt);
+ }
+
// All ParseXXX functions take as the last argument an *ok parameter
// which is set to false if parsing failed; it is unchanged otherwise.
// By making the 'exception handling' explicit, we are forced to check
@@ -854,12 +604,13 @@ class PreParser : public ParserBase<PreParserTraits> {
Expression ParseUnaryExpression(bool* ok);
Expression ParsePostfixExpression(bool* ok);
Expression ParseLeftHandSideExpression(bool* ok);
+ Expression ParseNewExpression(bool* ok);
Expression ParseMemberExpression(bool* ok);
- Expression ParseMemberExpressionContinuation(PreParserExpression expression,
- bool* ok);
- Expression ParseMemberWithNewPrefixesExpression(bool* ok);
+ Expression ParseMemberWithNewPrefixesExpression(unsigned new_count, bool* ok);
+ Expression ParsePrimaryExpression(bool* ok);
Expression ParseArrayLiteral(bool* ok);
Expression ParseObjectLiteral(bool* ok);
+ Expression ParseRegExpLiteral(bool seen_equal, bool* ok);
Expression ParseV8Intrinsic(bool* ok);
Arguments ParseArguments(bool* ok);
@@ -871,324 +622,41 @@ class PreParser : public ParserBase<PreParserTraits> {
bool* ok);
void ParseLazyFunctionLiteralBody(bool* ok);
+ Identifier ParseIdentifier(AllowEvalOrArgumentsAsIdentifier, bool* ok);
+ Identifier ParseIdentifierOrStrictReservedWord(bool* is_strict_reserved,
+ bool* ok);
+ Identifier ParseIdentifierName(bool* ok);
+ Identifier ParseIdentifierNameOrGetOrSet(bool* is_get,
+ bool* is_set,
+ bool* ok);
+
// Logs the currently parsed literal as a symbol in the preparser data.
void LogSymbol();
+ // Log the currently parsed identifier.
+ Identifier GetIdentifierSymbol();
// Log the currently parsed string literal.
Expression GetStringSymbol();
- bool CheckInOrOf(bool accept_OF);
-
- ParserRecorder* log_;
-};
-
-
-template<class Traits>
-ParserBase<Traits>::FunctionState::FunctionState(
- FunctionState** function_state_stack,
- typename Traits::Type::Scope** scope_stack,
- typename Traits::Type::Scope* scope,
- typename Traits::Type::Zone* extra_param)
- : next_materialized_literal_index_(JSFunction::kLiteralsPrefixSize),
- next_handler_index_(0),
- expected_property_count_(0),
- is_generator_(false),
- generator_object_variable_(NULL),
- function_state_stack_(function_state_stack),
- outer_function_state_(*function_state_stack),
- scope_stack_(scope_stack),
- outer_scope_(*scope_stack),
- isolate_(NULL),
- saved_ast_node_id_(0),
- factory_(extra_param) {
- *scope_stack_ = scope;
- *function_state_stack = this;
- Traits::SetUpFunctionState(this, extra_param);
-}
-
-
-template<class Traits>
-ParserBase<Traits>::FunctionState::~FunctionState() {
- *scope_stack_ = outer_scope_;
- *function_state_stack_ = outer_function_state_;
- Traits::TearDownFunctionState(this);
-}
-
-
-template<class Traits>
-void ParserBase<Traits>::ReportUnexpectedToken(Token::Value token) {
- // We don't report stack overflows here, to avoid increasing the
- // stack depth even further. Instead we report it after parsing is
- // over, in ParseProgram.
- if (token == Token::ILLEGAL && stack_overflow()) {
- return;
- }
- Scanner::Location source_location = scanner()->location();
-
- // Four of the tokens are treated specially
- switch (token) {
- case Token::EOS:
- return ReportMessageAt(source_location, "unexpected_eos");
- case Token::NUMBER:
- return ReportMessageAt(source_location, "unexpected_token_number");
- case Token::STRING:
- return ReportMessageAt(source_location, "unexpected_token_string");
- case Token::IDENTIFIER:
- return ReportMessageAt(source_location, "unexpected_token_identifier");
- case Token::FUTURE_RESERVED_WORD:
- return ReportMessageAt(source_location, "unexpected_reserved");
- case Token::YIELD:
- case Token::FUTURE_STRICT_RESERVED_WORD:
- return ReportMessageAt(source_location,
- is_classic_mode() ? "unexpected_token_identifier"
- : "unexpected_strict_reserved");
- default:
- const char* name = Token::String(token);
- ASSERT(name != NULL);
- Traits::ReportMessageAt(
- source_location, "unexpected_token", Vector<const char*>(&name, 1));
- }
-}
-
-
-template<class Traits>
-typename Traits::Type::Identifier ParserBase<Traits>::ParseIdentifier(
- AllowEvalOrArgumentsAsIdentifier allow_eval_or_arguments,
- bool* ok) {
- Token::Value next = Next();
- if (next == Token::IDENTIFIER) {
- typename Traits::Type::Identifier name = this->GetSymbol(scanner());
- if (allow_eval_or_arguments == kDontAllowEvalOrArguments &&
- !is_classic_mode() && this->IsEvalOrArguments(name)) {
- ReportMessageAt(scanner()->location(), "strict_eval_arguments");
- *ok = false;
- }
- return name;
- } else if (is_classic_mode() && (next == Token::FUTURE_STRICT_RESERVED_WORD ||
- (next == Token::YIELD && !is_generator()))) {
- return this->GetSymbol(scanner());
- } else {
- this->ReportUnexpectedToken(next);
- *ok = false;
- return Traits::EmptyIdentifier();
- }
-}
-
-
-template <class Traits>
-typename Traits::Type::Identifier ParserBase<
- Traits>::ParseIdentifierOrStrictReservedWord(bool* is_strict_reserved,
- bool* ok) {
- Token::Value next = Next();
- if (next == Token::IDENTIFIER) {
- *is_strict_reserved = false;
- } else if (next == Token::FUTURE_STRICT_RESERVED_WORD ||
- (next == Token::YIELD && !this->is_generator())) {
- *is_strict_reserved = true;
- } else {
- ReportUnexpectedToken(next);
- *ok = false;
- return Traits::EmptyIdentifier();
- }
- return this->GetSymbol(scanner());
-}
-
-
-template <class Traits>
-typename Traits::Type::Identifier ParserBase<Traits>::ParseIdentifierName(
- bool* ok) {
- Token::Value next = Next();
- if (next != Token::IDENTIFIER && next != Token::FUTURE_RESERVED_WORD &&
- next != Token::FUTURE_STRICT_RESERVED_WORD && !Token::IsKeyword(next)) {
- this->ReportUnexpectedToken(next);
- *ok = false;
- return Traits::EmptyIdentifier();
- }
- return this->GetSymbol(scanner());
-}
-
-
-template <class Traits>
-typename Traits::Type::Identifier
-ParserBase<Traits>::ParseIdentifierNameOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok) {
- typename Traits::Type::Identifier result = ParseIdentifierName(ok);
- if (!*ok) return Traits::EmptyIdentifier();
- if (scanner()->is_literal_ascii() &&
- scanner()->literal_length() == 3) {
- const char* token = scanner()->literal_ascii_string().start();
- *is_get = strncmp(token, "get", 3) == 0;
- *is_set = !*is_get && strncmp(token, "set", 3) == 0;
+ void set_language_mode(LanguageMode language_mode) {
+ scope_->set_language_mode(language_mode);
}
- return result;
-}
-
-
-template <class Traits>
-typename Traits::Type::Expression
-ParserBase<Traits>::ParseRegExpLiteral(bool seen_equal, bool* ok) {
- int pos = peek_position();
- if (!scanner()->ScanRegExpPattern(seen_equal)) {
- Next();
- ReportMessage("unterminated_regexp", Vector<const char*>::empty());
- *ok = false;
- return Traits::EmptyExpression();
- }
-
- int literal_index = function_state_->NextMaterializedLiteralIndex();
- typename Traits::Type::Identifier js_pattern =
- this->NextLiteralString(scanner(), TENURED);
- if (!scanner()->ScanRegExpFlags()) {
- Next();
- ReportMessageAt(scanner()->location(), "invalid_regexp_flags");
- *ok = false;
- return Traits::EmptyExpression();
+ virtual bool is_classic_mode() {
+ return scope_->language_mode() == CLASSIC_MODE;
}
- typename Traits::Type::Identifier js_flags =
- this->NextLiteralString(scanner(), TENURED);
- Next();
- return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index, pos);
-}
-
-
-#define CHECK_OK ok); \
- if (!*ok) return this->EmptyExpression(); \
- ((void)0
-#define DUMMY ) // to make indentation work
-#undef DUMMY
-
-template <class Traits>
-typename Traits::Type::Expression ParserBase<Traits>::ParsePrimaryExpression(
- bool* ok) {
- // PrimaryExpression ::
- // 'this'
- // 'null'
- // 'true'
- // 'false'
- // Identifier
- // Number
- // String
- // ArrayLiteral
- // ObjectLiteral
- // RegExpLiteral
- // '(' Expression ')'
-
- int pos = peek_position();
- typename Traits::Type::Expression result = this->EmptyExpression();
- Token::Value token = peek();
- switch (token) {
- case Token::THIS: {
- Consume(Token::THIS);
- result = this->ThisExpression(scope_, factory());
- break;
- }
- case Token::NULL_LITERAL:
- case Token::TRUE_LITERAL:
- case Token::FALSE_LITERAL:
- case Token::NUMBER:
- Next();
- result = this->ExpressionFromLiteral(token, pos, scanner(), factory());
- break;
-
- case Token::IDENTIFIER:
- case Token::YIELD:
- case Token::FUTURE_STRICT_RESERVED_WORD: {
- // Using eval or arguments in this context is OK even in strict mode.
- typename Traits::Type::Identifier name =
- ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
- result =
- this->ExpressionFromIdentifier(name, pos, scope_, factory());
- break;
- }
-
- case Token::STRING: {
- Consume(Token::STRING);
- result = this->ExpressionFromString(pos, scanner(), factory());
- break;
- }
-
- case Token::ASSIGN_DIV:
- result = this->ParseRegExpLiteral(true, CHECK_OK);
- break;
-
- case Token::DIV:
- result = this->ParseRegExpLiteral(false, CHECK_OK);
- break;
-
- case Token::LBRACK:
- result = this->ParseArrayLiteral(CHECK_OK);
- break;
-
- case Token::LBRACE:
- result = this->ParseObjectLiteral(CHECK_OK);
- break;
-
- case Token::LPAREN:
- Consume(Token::LPAREN);
- // Heuristically try to detect immediately called functions before
- // seeing the call parentheses.
- parenthesized_function_ = (peek() == Token::FUNCTION);
- result = this->ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
- break;
-
- case Token::MOD:
- if (allow_natives_syntax() || extension_ != NULL) {
- result = this->ParseV8Intrinsic(CHECK_OK);
- break;
- }
- // If we're not allowing special syntax we fall-through to the
- // default case.
-
- default: {
- Next();
- ReportUnexpectedToken(token);
- *ok = false;
- }
+ bool is_extended_mode() {
+ return scope_->language_mode() == EXTENDED_MODE;
}
- return result;
-}
-
-#undef CHECK_OK
+ LanguageMode language_mode() { return scope_->language_mode(); }
+ bool CheckInOrOf(bool accept_OF);
-template <typename Traits>
-void ParserBase<Traits>::ObjectLiteralChecker::CheckProperty(
- Token::Value property,
- PropertyKind type,
- bool* ok) {
- int old;
- if (property == Token::NUMBER) {
- old = finder_.AddNumber(scanner()->literal_ascii_string(), type);
- } else if (scanner()->is_literal_ascii()) {
- old = finder_.AddAsciiSymbol(scanner()->literal_ascii_string(), type);
- } else {
- old = finder_.AddUtf16Symbol(scanner()->literal_utf16_string(), type);
- }
- PropertyKind old_type = static_cast<PropertyKind>(old);
- if (HasConflict(old_type, type)) {
- if (IsDataDataConflict(old_type, type)) {
- // Both are data properties.
- if (language_mode_ == CLASSIC_MODE) return;
- parser()->ReportMessageAt(scanner()->location(),
- "strict_duplicate_property");
- } else if (IsDataAccessorConflict(old_type, type)) {
- // Both a data and an accessor property with the same name.
- parser()->ReportMessageAt(scanner()->location(),
- "accessor_data_property");
- } else {
- ASSERT(IsAccessorAccessorConflict(old_type, type));
- // Both accessors of the same type.
- parser()->ReportMessageAt(scanner()->location(),
- "accessor_get_set");
- }
- *ok = false;
- }
-}
-
+ ParserRecorder* log_;
+ Scope* scope_;
+ bool parenthesized_function_;
+};
} } // v8::internal
diff --git a/deps/v8/src/promise.js b/deps/v8/src/promise.js
index 82aa99027a..db7863f809 100644
--- a/deps/v8/src/promise.js
+++ b/deps/v8/src/promise.js
@@ -173,29 +173,37 @@ function PromiseCatch(onReject) {
}
function PromiseEnqueue(value, tasks) {
- GetMicrotaskQueue().push(function() {
- for (var i = 0; i < tasks.length; i += 2) {
- PromiseHandle(value, tasks[i], tasks[i + 1])
- }
- });
-
+ promiseEvents.push(value, tasks);
%SetMicrotaskPending(true);
}
-function PromiseHandle(value, handler, deferred) {
- try {
- var result = handler(value);
- if (result === deferred.promise)
- throw MakeTypeError('promise_cyclic', [result]);
- else if (IsPromise(result))
- result.chain(deferred.resolve, deferred.reject);
- else
- deferred.resolve(result);
- } catch(e) {
- // TODO(rossberg): perhaps log uncaught exceptions below.
- try { deferred.reject(e) } catch(e) {}
+function PromiseMicrotaskRunner() {
+ var events = promiseEvents;
+ if (events.length > 0) {
+ promiseEvents = new InternalArray;
+ for (var i = 0; i < events.length; i += 2) {
+ var value = events[i];
+ var tasks = events[i + 1];
+ for (var j = 0; j < tasks.length; j += 2) {
+ var handler = tasks[j];
+ var deferred = tasks[j + 1];
+ try {
+ var result = handler(value);
+ if (result === deferred.promise)
+ throw MakeTypeError('promise_cyclic', [result]);
+ else if (IsPromise(result))
+ result.chain(deferred.resolve, deferred.reject);
+ else
+ deferred.resolve(result);
+ } catch(e) {
+ // TODO(rossberg): perhaps log uncaught exceptions below.
+ try { deferred.reject(e) } catch(e) {}
+ }
+ }
+ }
}
}
+RunMicrotasks.runners.push(PromiseMicrotaskRunner);
// Multi-unwrapped chaining with thenable coercion.
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index 7bc553a46b..b8baff2c26 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -138,7 +138,7 @@ class Representation {
ASSERT(kind_ != kExternal);
ASSERT(other.kind_ != kExternal);
- if (IsHeapObject()) return other.IsDouble() || other.IsNone();
+ if (IsHeapObject()) return other.IsNone();
if (kind_ == kUInteger8 && other.kind_ == kInteger8) return false;
if (kind_ == kUInteger16 && other.kind_ == kInteger16) return false;
return kind_ > other.kind_;
@@ -233,11 +233,11 @@ class PropertyDetails BASE_EMBEDDED {
| FieldIndexField::encode(field_index);
}
- int pointer() const { return DescriptorPointer::decode(value_); }
+ int pointer() { return DescriptorPointer::decode(value_); }
PropertyDetails set_pointer(int i) { return PropertyDetails(value_, i); }
- PropertyDetails CopyWithRepresentation(Representation representation) const {
+ PropertyDetails CopyWithRepresentation(Representation representation) {
return PropertyDetails(value_, representation);
}
PropertyDetails CopyAddAttributes(PropertyAttributes new_attributes) {
@@ -248,7 +248,7 @@ class PropertyDetails BASE_EMBEDDED {
// Conversion for storing details as Object*.
explicit inline PropertyDetails(Smi* smi);
- inline Smi* AsSmi() const;
+ inline Smi* AsSmi();
static uint8_t EncodeRepresentation(Representation representation) {
return representation.kind();
@@ -258,26 +258,26 @@ class PropertyDetails BASE_EMBEDDED {
return Representation::FromKind(static_cast<Representation::Kind>(bits));
}
- PropertyType type() const { return TypeField::decode(value_); }
+ PropertyType type() { return TypeField::decode(value_); }
PropertyAttributes attributes() const {
return AttributesField::decode(value_);
}
- int dictionary_index() const {
+ int dictionary_index() {
return DictionaryStorageField::decode(value_);
}
- Representation representation() const {
+ Representation representation() {
ASSERT(type() != NORMAL);
return DecodeRepresentation(RepresentationField::decode(value_));
}
- int field_index() const {
+ int field_index() {
return FieldIndexField::decode(value_);
}
- inline PropertyDetails AsDeleted() const;
+ inline PropertyDetails AsDeleted();
static bool IsValidIndex(int index) {
return DictionaryStorageField::is_valid(index);
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index baa5a0f993..da772dc86c 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -187,12 +187,12 @@ class LookupResult BASE_EMBEDDED {
transition_(NULL),
cacheable_(true),
details_(NONE, NONEXISTENT, Representation::None()) {
- isolate->set_top_lookup_result(this);
+ isolate->SetTopLookupResult(this);
}
~LookupResult() {
ASSERT(isolate()->top_lookup_result() == this);
- isolate()->set_top_lookup_result(next_);
+ isolate()->SetTopLookupResult(next_);
}
Isolate* isolate() const { return isolate_; }
@@ -200,9 +200,9 @@ class LookupResult BASE_EMBEDDED {
void DescriptorResult(JSObject* holder, PropertyDetails details, int number) {
lookup_type_ = DESCRIPTOR_TYPE;
holder_ = holder;
- transition_ = NULL;
details_ = details;
number_ = number;
+ transition_ = NULL;
}
bool CanHoldValue(Handle<Object> value) {
@@ -246,93 +246,92 @@ class LookupResult BASE_EMBEDDED {
lookup_type_ = NOT_FOUND;
details_ = PropertyDetails(NONE, NONEXISTENT, Representation::None());
holder_ = NULL;
- transition_ = NULL;
}
- JSObject* holder() const {
+ JSObject* holder() {
ASSERT(IsFound());
return JSObject::cast(holder_);
}
- JSProxy* proxy() const {
+ JSProxy* proxy() {
ASSERT(IsHandler());
return JSProxy::cast(holder_);
}
- PropertyType type() const {
+ PropertyType type() {
ASSERT(IsFound());
return details_.type();
}
- Representation representation() const {
+ Representation representation() {
ASSERT(IsFound());
ASSERT(!IsTransition());
ASSERT(details_.type() != NONEXISTENT);
return details_.representation();
}
- PropertyAttributes GetAttributes() const {
+ PropertyAttributes GetAttributes() {
ASSERT(!IsTransition());
ASSERT(IsFound());
ASSERT(details_.type() != NONEXISTENT);
return details_.attributes();
}
- PropertyDetails GetPropertyDetails() const {
+ PropertyDetails GetPropertyDetails() {
ASSERT(!IsTransition());
return details_;
}
- bool IsFastPropertyType() const {
+ bool IsFastPropertyType() {
ASSERT(IsFound());
return IsTransition() || type() != NORMAL;
}
// Property callbacks does not include transitions to callbacks.
- bool IsPropertyCallbacks() const {
+ bool IsPropertyCallbacks() {
ASSERT(!(details_.type() == CALLBACKS && !IsFound()));
return details_.type() == CALLBACKS;
}
- bool IsReadOnly() const {
+ bool IsReadOnly() {
ASSERT(IsFound());
ASSERT(!IsTransition());
ASSERT(details_.type() != NONEXISTENT);
return details_.IsReadOnly();
}
- bool IsField() const {
+ bool IsField() {
ASSERT(!(details_.type() == FIELD && !IsFound()));
return details_.type() == FIELD;
}
- bool IsNormal() const {
+ bool IsNormal() {
ASSERT(!(details_.type() == NORMAL && !IsFound()));
return details_.type() == NORMAL;
}
- bool IsConstant() const {
+ bool IsConstant() {
ASSERT(!(details_.type() == CONSTANT && !IsFound()));
return details_.type() == CONSTANT;
}
- bool IsConstantFunction() const {
+ bool IsConstantFunction() {
return IsConstant() && GetValue()->IsJSFunction();
}
- bool IsDontDelete() const { return details_.IsDontDelete(); }
- bool IsDontEnum() const { return details_.IsDontEnum(); }
- bool IsFound() const { return lookup_type_ != NOT_FOUND; }
- bool IsTransition() const { return lookup_type_ == TRANSITION_TYPE; }
- bool IsHandler() const { return lookup_type_ == HANDLER_TYPE; }
- bool IsInterceptor() const { return lookup_type_ == INTERCEPTOR_TYPE; }
+ bool IsDontDelete() { return details_.IsDontDelete(); }
+ bool IsDontEnum() { return details_.IsDontEnum(); }
+ bool IsFound() { return lookup_type_ != NOT_FOUND; }
+ bool IsTransition() { return lookup_type_ == TRANSITION_TYPE; }
+ bool IsHandler() { return lookup_type_ == HANDLER_TYPE; }
+ bool IsInterceptor() { return lookup_type_ == INTERCEPTOR_TYPE; }
// Is the result is a property excluding transitions and the null descriptor?
- bool IsProperty() const {
+ bool IsProperty() {
return IsFound() && !IsTransition();
}
- bool IsDataProperty() const {
+ bool IsDataProperty() {
switch (type()) {
case FIELD:
case NORMAL:
@@ -352,10 +351,10 @@ class LookupResult BASE_EMBEDDED {
return false;
}
- bool IsCacheable() const { return cacheable_; }
+ bool IsCacheable() { return cacheable_; }
void DisallowCaching() { cacheable_ = false; }
- Object* GetLazyValue() const {
+ Object* GetLazyValue() {
switch (type()) {
case FIELD:
return holder()->RawFastPropertyAt(GetFieldIndex().field_index());
@@ -380,62 +379,66 @@ class LookupResult BASE_EMBEDDED {
return NULL;
}
- Map* GetTransitionTarget() const {
+ Map* GetTransitionTarget() {
return transition_;
}
- PropertyDetails GetTransitionDetails() const {
- ASSERT(IsTransition());
+ PropertyDetails GetTransitionDetails() {
return transition_->GetLastDescriptorDetails();
}
- bool IsTransitionToField() const {
+ bool IsTransitionToField() {
return IsTransition() && GetTransitionDetails().type() == FIELD;
}
- bool IsTransitionToConstant() const {
+ bool IsTransitionToConstant() {
return IsTransition() && GetTransitionDetails().type() == CONSTANT;
}
- int GetDescriptorIndex() const {
+ int GetTransitionIndex() {
+ ASSERT(IsTransition());
+ return number_;
+ }
+
+ int GetDescriptorIndex() {
ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
return number_;
}
- PropertyIndex GetFieldIndex() const {
+ PropertyIndex GetFieldIndex() {
ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
return PropertyIndex::NewFieldIndex(GetFieldIndexFromMap(holder()->map()));
}
- int GetLocalFieldIndexFromMap(Map* map) const {
+ int GetLocalFieldIndexFromMap(Map* map) {
return GetFieldIndexFromMap(map) - map->inobject_properties();
}
- int GetDictionaryEntry() const {
+ int GetDictionaryEntry() {
ASSERT(lookup_type_ == DICTIONARY_TYPE);
return number_;
}
- JSFunction* GetConstantFunction() const {
+ JSFunction* GetConstantFunction() {
ASSERT(type() == CONSTANT);
return JSFunction::cast(GetValue());
}
- Object* GetConstantFromMap(Map* map) const {
+ Object* GetConstantFromMap(Map* map) {
ASSERT(type() == CONSTANT);
return GetValueFromMap(map);
}
- JSFunction* GetConstantFunctionFromMap(Map* map) const {
+ JSFunction* GetConstantFunctionFromMap(Map* map) {
return JSFunction::cast(GetConstantFromMap(map));
}
- Object* GetConstant() const {
+ Object* GetConstant() {
ASSERT(type() == CONSTANT);
return GetValue();
}
- Object* GetCallbackObject() const {
+ Object* GetCallbackObject() {
ASSERT(type() == CALLBACKS && !IsTransition());
return GetValue();
}
@@ -444,7 +447,7 @@ class LookupResult BASE_EMBEDDED {
void Print(FILE* out);
#endif
- Object* GetValue() const {
+ Object* GetValue() {
if (lookup_type_ == DESCRIPTOR_TYPE) {
return GetValueFromMap(holder()->map());
}
diff --git a/deps/v8/src/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp-macro-assembler-tracer.cc
index bf289186e2..c446b4b49f 100644
--- a/deps/v8/src/regexp-macro-assembler-tracer.cc
+++ b/deps/v8/src/regexp-macro-assembler-tracer.cc
@@ -38,8 +38,8 @@ RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer(
RegExpMacroAssembler(assembler->zone()),
assembler_(assembler) {
unsigned int type = assembler->Implementation();
- ASSERT(type < 6);
- const char* impl_names[] = {"IA32", "ARM", "A64", "MIPS", "X64", "Bytecode"};
+ ASSERT(type < 5);
+ const char* impl_names[] = {"IA32", "ARM", "MIPS", "X64", "Bytecode"};
PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
}
diff --git a/deps/v8/src/regexp-macro-assembler.h b/deps/v8/src/regexp-macro-assembler.h
index 2ac9c86d82..1ff8bd9797 100644
--- a/deps/v8/src/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp-macro-assembler.h
@@ -53,7 +53,6 @@ class RegExpMacroAssembler {
enum IrregexpImplementation {
kIA32Implementation,
kARMImplementation,
- kA64Implementation,
kMIPSImplementation,
kX64Implementation,
kBytecodeImplementation
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index 50621a997d..ac9a2c0c9e 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -957,6 +957,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitializeFromArrayLike) {
Runtime::ArrayIdToTypeAndSize(arrayId, &array_type, &element_size);
Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
+ if (source->IsJSTypedArray() &&
+ JSTypedArray::cast(*source)->type() == array_type) {
+ length_obj = Handle<Object>(JSTypedArray::cast(*source)->length(), isolate);
+ }
size_t length = NumberToSize(isolate, *length_obj);
if ((length > static_cast<unsigned>(Smi::kMaxValue)) ||
@@ -6541,6 +6545,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToUpperCase) {
}
+static inline bool IsTrimWhiteSpace(unibrow::uchar c) {
+ return unibrow::WhiteSpace::Is(c) || c == 0x200b || c == 0xfeff;
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringTrim) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
@@ -6553,19 +6562,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringTrim) {
int length = string->length();
int left = 0;
- UnicodeCache* unicode_cache = isolate->unicode_cache();
if (trimLeft) {
- while (left < length &&
- unicode_cache->IsWhiteSpaceOrLineTerminator(string->Get(left))) {
+ while (left < length && IsTrimWhiteSpace(string->Get(left))) {
left++;
}
}
int right = length;
if (trimRight) {
- while (right > left &&
- unicode_cache->IsWhiteSpaceOrLineTerminator(
- string->Get(right - 1))) {
+ while (right > left && IsTrimWhiteSpace(string->Get(right - 1))) {
right--;
}
}
@@ -7829,16 +7834,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sqrt) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_fround) {
- SealHandleScope shs(isolate);
- ASSERT(args.length() == 1);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- float xf = static_cast<float>(x);
- return isolate->heap()->AllocateHeapNumber(xf);
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_DateMakeDay) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
@@ -8484,7 +8479,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearFunctionTypeFeedback) {
Code* unoptimized = function->shared()->code();
if (unoptimized->kind() == Code::FUNCTION) {
unoptimized->ClearInlineCaches();
- unoptimized->ClearTypeFeedbackInfo(isolate->heap());
+ unoptimized->ClearTypeFeedbackCells(isolate->heap());
}
return isolate->heap()->undefined_value();
}
@@ -14275,11 +14270,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetV8Version) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Abort) {
SealHandleScope shs(isolate);
- ASSERT(args.length() == 1);
- CONVERT_SMI_ARG_CHECKED(message_id, 0);
- const char* message = GetBailoutReason(
- static_cast<BailoutReason>(message_id));
- OS::PrintError("abort: %s\n", message);
+ ASSERT(args.length() == 2);
+ OS::PrintError("abort: %s\n",
+ reinterpret_cast<char*>(args[0]) + args.smi_at(1));
isolate->PrintStack(stderr);
OS::Abort();
UNREACHABLE();
@@ -14608,21 +14601,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetMicrotaskPending) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_RunMicrotasks) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 0);
- Execution::RunMicrotasks(isolate);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetMicrotaskState) {
- SealHandleScope shs(isolate);
- ASSERT(args.length() == 0);
- return isolate->heap()->microtask_state();
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetObservationState) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
@@ -14661,7 +14639,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsAccessAllowedForObserver) {
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, observer, 0);
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 1);
- ASSERT(object->map()->is_access_check_needed());
+ ASSERT(object->IsAccessCheckNeeded());
Handle<Object> key = args.at<Object>(2);
SaveContext save(isolate);
isolate->set_context(observer->context());
@@ -14774,7 +14752,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConstructor) {
Handle<AllocationSite> site;
if (!type_info.is_null() &&
- *type_info != isolate->heap()->null_value() &&
*type_info != isolate->heap()->undefined_value()) {
site = Handle<AllocationSite>::cast(type_info);
ASSERT(!site->SitePointsToLiteral());
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index 6e79dbed30..61c019c068 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -185,7 +185,6 @@ namespace internal {
F(Math_pow_cfunction, 2, 1) \
F(RoundNumber, 1, 1) \
F(Math_sqrt, 1, 1) \
- F(Math_fround, 1, 1) \
\
/* Regular expressions */ \
F(RegExpCompile, 3, 1) \
@@ -309,9 +308,6 @@ namespace internal {
/* ES5 */ \
F(ObjectFreeze, 1, 1) \
\
- /* Harmony Microtasks */ \
- F(GetMicrotaskState, 0, 1) \
- \
/* Harmony modules */ \
F(IsJSModule, 1, 1) \
\
@@ -355,7 +351,6 @@ namespace internal {
\
/* Harmony events */ \
F(SetMicrotaskPending, 1, 1) \
- F(RunMicrotasks, 0, 1) \
\
/* Harmony observe */ \
F(IsObserved, 1, 1) \
@@ -442,7 +437,7 @@ namespace internal {
F(DebugTrace, 0, 1) \
F(TraceEnter, 0, 1) \
F(TraceExit, 1, 1) \
- F(Abort, 1, 1) \
+ F(Abort, 2, 1) \
F(AbortJS, 1, 1) \
/* Logging */ \
F(Log, 2, 1) \
diff --git a/deps/v8/src/sampler.cc b/deps/v8/src/sampler.cc
index 33f46c7701..cb98b6fdcf 100644
--- a/deps/v8/src/sampler.cc
+++ b/deps/v8/src/sampler.cc
@@ -54,8 +54,7 @@
// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
// Old versions of the C library <signal.h> didn't define the type.
#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
- (defined(__arm__) || defined(__aarch64__)) && \
- !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
+ defined(__arm__) && !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
#include <asm/sigcontext.h>
#endif
@@ -98,18 +97,6 @@ typedef struct ucontext {
// Other fields are not used by V8, don't define them here.
} ucontext_t;
-#elif defined(__aarch64__)
-
-typedef struct sigcontext mcontext_t;
-
-typedef struct ucontext {
- uint64_t uc_flags;
- struct ucontext *uc_link;
- stack_t uc_stack;
- mcontext_t uc_mcontext;
- // Other fields are not used by V8, don't define them here.
-} ucontext_t;
-
#elif defined(__mips__)
// MIPS version of sigcontext, for Android bionic.
typedef struct {
@@ -239,27 +226,13 @@ class SimulatorHelper {
}
inline void FillRegisters(RegisterState* state) {
-#if V8_TARGET_ARCH_ARM
state->pc = reinterpret_cast<Address>(simulator_->get_pc());
state->sp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::sp));
+#if V8_TARGET_ARCH_ARM
state->fp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::r11));
-#elif V8_TARGET_ARCH_A64
- if (simulator_->sp() == 0 || simulator_->fp() == 0) {
- // It possible that the simulator is interrupted while it is updating
- // the sp or fp register. A64 simulator does this in two steps:
- // first setting it to zero and then setting it to the new value.
- // Bailout if sp/fp doesn't contain the new value.
- return;
- }
- state->pc = reinterpret_cast<Address>(simulator_->pc());
- state->sp = reinterpret_cast<Address>(simulator_->sp());
- state->fp = reinterpret_cast<Address>(simulator_->fp());
#elif V8_TARGET_ARCH_MIPS
- state->pc = reinterpret_cast<Address>(simulator_->get_pc());
- state->sp = reinterpret_cast<Address>(simulator_->get_register(
- Simulator::sp));
state->fp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::fp));
#endif
@@ -356,11 +329,6 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
SimulatorHelper helper;
if (!helper.Init(sampler, isolate)) return;
helper.FillRegisters(&state);
- // It possible that the simulator is interrupted while it is updating
- // the sp or fp register. A64 simulator does this in two steps:
- // first setting it to zero and then setting it to the new value.
- // Bailout if sp/fp doesn't contain the new value.
- if (state.sp == 0 || state.fp == 0) return;
#else
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
@@ -390,11 +358,6 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
state.fp = reinterpret_cast<Address>(mcontext.arm_fp);
#endif // defined(__GLIBC__) && !defined(__UCLIBC__) &&
// (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
-#elif V8_HOST_ARCH_A64
- state.pc = reinterpret_cast<Address>(mcontext.pc);
- state.sp = reinterpret_cast<Address>(mcontext.sp);
- // FP is an alias for x29.
- state.fp = reinterpret_cast<Address>(mcontext.regs[29]);
#elif V8_HOST_ARCH_MIPS
state.pc = reinterpret_cast<Address>(mcontext.pc);
state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc
index 27768547fb..26f840b23a 100644
--- a/deps/v8/src/scanner.cc
+++ b/deps/v8/src/scanner.cc
@@ -246,8 +246,7 @@ Token::Value Scanner::Next() {
}
-// TODO(yangguo): check whether this is actually necessary.
-static inline bool IsLittleEndianByteOrderMark(uc32 c) {
+static inline bool IsByteOrderMark(uc32 c) {
// The Unicode value U+FFFE is guaranteed never to be assigned as a
// Unicode character; this implies that in a Unicode context the
// 0xFF, 0xFE byte pattern can only be interpreted as the U+FEFF
@@ -255,7 +254,7 @@ static inline bool IsLittleEndianByteOrderMark(uc32 c) {
// not be a U+FFFE character expressed in big-endian byte
// order). Nevertheless, we check for it to be compatible with
// Spidermonkey.
- return c == 0xFFFE;
+ return c == 0xFEFF || c == 0xFFFE;
}
@@ -263,14 +262,14 @@ bool Scanner::SkipWhiteSpace() {
int start_position = source_pos();
while (true) {
- while (true) {
- // Advance as long as character is a WhiteSpace or LineTerminator.
- // Remember if the latter is the case.
+ // We treat byte-order marks (BOMs) as whitespace for better
+ // compatibility with Spidermonkey and other JavaScript engines.
+ while (unicode_cache_->IsWhiteSpace(c0_) || IsByteOrderMark(c0_)) {
+ // IsWhiteSpace() includes line terminators!
if (unicode_cache_->IsLineTerminator(c0_)) {
+ // Ignore line terminators, but remember them. This is necessary
+ // for automatic semicolon insertion.
has_line_terminator_before_next_ = true;
- } else if (!unicode_cache_->IsWhiteSpace(c0_) &&
- !IsLittleEndianByteOrderMark(c0_)) {
- break;
}
Advance();
}
diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h
index b08692b3ae..3cefc833ac 100644
--- a/deps/v8/src/scanner.h
+++ b/deps/v8/src/scanner.h
@@ -139,17 +139,12 @@ class UnicodeCache {
bool IsIdentifierPart(unibrow::uchar c) { return kIsIdentifierPart.get(c); }
bool IsLineTerminator(unibrow::uchar c) { return kIsLineTerminator.get(c); }
bool IsWhiteSpace(unibrow::uchar c) { return kIsWhiteSpace.get(c); }
- bool IsWhiteSpaceOrLineTerminator(unibrow::uchar c) {
- return kIsWhiteSpaceOrLineTerminator.get(c);
- }
private:
unibrow::Predicate<IdentifierStart, 128> kIsIdentifierStart;
unibrow::Predicate<IdentifierPart, 128> kIsIdentifierPart;
unibrow::Predicate<unibrow::LineTerminator, 128> kIsLineTerminator;
- unibrow::Predicate<WhiteSpace, 128> kIsWhiteSpace;
- unibrow::Predicate<WhiteSpaceOrLineTerminator, 128>
- kIsWhiteSpaceOrLineTerminator;
+ unibrow::Predicate<unibrow::WhiteSpace, 128> kIsWhiteSpace;
StaticResource<Utf8Decoder> utf8_decoder_;
DISALLOW_COPY_AND_ASSIGN(UnicodeCache);
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index 650f57c616..97b67bd5a4 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -307,7 +307,7 @@ bool Scope::Analyze(CompilationInfo* info) {
}
#endif
- info->PrepareForCompilation(scope);
+ info->SetScope(scope);
return true;
}
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index 14b1b9cc96..5adc2b8995 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -789,7 +789,6 @@ void Deserializer::Deserialize(Isolate* isolate) {
ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty());
ASSERT_EQ(NULL, external_reference_decoder_);
external_reference_decoder_ = new ExternalReferenceDecoder(isolate);
- isolate_->heap()->IterateSmiRoots(this);
isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
isolate_->heap()->RepairFreeListsAfterBoot();
isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
@@ -1254,6 +1253,7 @@ void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
: isolate_(isolate),
sink_(sink),
+ current_root_index_(0),
external_reference_encoder_(new ExternalReferenceEncoder(isolate)),
root_index_wave_front_(0) {
// The serializer is meant to be used only to generate initial heap images
@@ -1279,7 +1279,7 @@ void StartupSerializer::SerializeStrongReferences() {
CHECK_EQ(0, isolate->eternal_handles()->NumberOfHandles());
// We don't support serializing installed extensions.
CHECK(!isolate->has_installed_extensions());
- isolate->heap()->IterateSmiRoots(this);
+
isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
}
diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h
index 2ad9bb17ed..ee9df39ad8 100644
--- a/deps/v8/src/serialize.h
+++ b/deps/v8/src/serialize.h
@@ -579,6 +579,7 @@ class Serializer : public SerializerDeserializer {
// relative addresses for back references.
int fullness_[LAST_SPACE + 1];
SnapshotByteSink* sink_;
+ int current_root_index_;
ExternalReferenceEncoder* external_reference_encoder_;
static bool serialization_enabled_;
// Did we already make use of the fact that serialization was not enabled?
diff --git a/deps/v8/src/simulator.h b/deps/v8/src/simulator.h
index c16e7fffdc..485e930645 100644
--- a/deps/v8/src/simulator.h
+++ b/deps/v8/src/simulator.h
@@ -32,8 +32,6 @@
#include "ia32/simulator-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/simulator-x64.h"
-#elif V8_TARGET_ARCH_A64
-#include "a64/simulator-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/simulator-arm.h"
#elif V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index 6c03daa75a..a80341bd7f 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -483,7 +483,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
chunk->progress_bar_ = 0;
chunk->high_water_mark_ = static_cast<int>(area_start - base);
- chunk->set_parallel_sweeping(PARALLEL_SWEEPING_DONE);
+ chunk->parallel_sweeping_ = 0;
chunk->available_in_small_free_list_ = 0;
chunk->available_in_medium_free_list_ = 0;
chunk->available_in_large_free_list_ = 0;
@@ -560,12 +560,21 @@ bool MemoryChunk::CommitArea(size_t requested) {
void MemoryChunk::InsertAfter(MemoryChunk* other) {
- MemoryChunk* other_next = other->next_chunk();
+ next_chunk_ = other->next_chunk_;
+ prev_chunk_ = other;
- set_next_chunk(other_next);
- set_prev_chunk(other);
- other_next->set_prev_chunk(this);
- other->set_next_chunk(this);
+ // This memory barrier is needed since concurrent sweeper threads may iterate
+ // over the list of pages while a new page is inserted.
+ // TODO(hpayer): find a cleaner way to guarantee that the page list can be
+ // expanded concurrently
+ MemoryBarrier();
+
+ // The following two write operations can take effect in arbitrary order
+ // since pages are always iterated by the sweeper threads in LIFO order, i.e,
+ // the inserted page becomes visible for the sweeper threads after
+ // other->next_chunk_ = this;
+ other->next_chunk_->prev_chunk_ = this;
+ other->next_chunk_ = this;
}
@@ -574,12 +583,10 @@ void MemoryChunk::Unlink() {
heap_->decrement_scan_on_scavenge_pages();
ClearFlag(SCAN_ON_SCAVENGE);
}
- MemoryChunk* next_element = next_chunk();
- MemoryChunk* prev_element = prev_chunk();
- next_element->set_prev_chunk(prev_element);
- prev_element->set_next_chunk(next_element);
- set_prev_chunk(NULL);
- set_next_chunk(NULL);
+ next_chunk_->prev_chunk_ = prev_chunk_;
+ prev_chunk_->next_chunk_ = next_chunk_;
+ prev_chunk_ = NULL;
+ next_chunk_ = NULL;
}
@@ -2075,21 +2082,20 @@ void FreeListNode::set_next(FreeListNode* next) {
intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
intptr_t free_bytes = 0;
- if (category->top() != NULL) {
+ if (category->top_ != NULL) {
+ ASSERT(category->end_ != NULL);
// This is safe (not going to deadlock) since Concatenate operations
// are never performed on the same free lists at the same time in
// reverse order.
LockGuard<Mutex> target_lock_guard(mutex());
LockGuard<Mutex> source_lock_guard(category->mutex());
- ASSERT(category->end_ != NULL);
free_bytes = category->available();
if (end_ == NULL) {
end_ = category->end();
} else {
- category->end()->set_next(top());
+ category->end()->set_next(top_);
}
- set_top(category->top());
- NoBarrier_Store(&top_, category->top_);
+ top_ = category->top();
available_ += category->available();
category->Reset();
}
@@ -2098,16 +2104,15 @@ intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
void FreeListCategory::Reset() {
- set_top(NULL);
- set_end(NULL);
- set_available(0);
+ top_ = NULL;
+ end_ = NULL;
+ available_ = 0;
}
intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
int sum = 0;
- FreeListNode* t = top();
- FreeListNode** n = &t;
+ FreeListNode** n = &top_;
while (*n != NULL) {
if (Page::FromAddress((*n)->address()) == p) {
FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
@@ -2117,9 +2122,8 @@ intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
n = (*n)->next_address();
}
}
- set_top(t);
- if (top() == NULL) {
- set_end(NULL);
+ if (top_ == NULL) {
+ end_ = NULL;
}
available_ -= sum;
return sum;
@@ -2127,17 +2131,17 @@ intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
- FreeListNode* node = top();
- while (node != NULL) {
- if (Page::FromAddress(node->address()) == p) return true;
- node = node->next();
+ FreeListNode** n = &top_;
+ while (*n != NULL) {
+ if (Page::FromAddress((*n)->address()) == p) return true;
+ n = (*n)->next_address();
}
return false;
}
FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) {
- FreeListNode* node = top();
+ FreeListNode* node = top_;
if (node == NULL) return NULL;
@@ -2176,8 +2180,8 @@ FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
- node->set_next(top());
- set_top(node);
+ node->set_next(top_);
+ top_ = node;
if (end_ == NULL) {
end_ = node;
}
@@ -2186,7 +2190,7 @@ void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
void FreeListCategory::RepairFreeList(Heap* heap) {
- FreeListNode* n = top();
+ FreeListNode* n = top_;
while (n != NULL) {
Map** map_location = reinterpret_cast<Map**>(n->address());
if (*map_location == NULL) {
@@ -2295,8 +2299,7 @@ FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
}
int huge_list_available = huge_list_.available();
- FreeListNode* top_node = huge_list_.top();
- for (FreeListNode** cur = &top_node;
+ for (FreeListNode** cur = huge_list_.GetTopAddress();
*cur != NULL;
cur = (*cur)->next_address()) {
FreeListNode* cur_node = *cur;
@@ -2330,7 +2333,6 @@ FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
}
}
- huge_list_.set_top(top_node);
if (huge_list_.top() == NULL) {
huge_list_.set_end(NULL);
}
@@ -2484,7 +2486,7 @@ void FreeList::RepairLists(Heap* heap) {
#ifdef DEBUG
intptr_t FreeListCategory::SumFreeList() {
intptr_t sum = 0;
- FreeListNode* cur = top();
+ FreeListNode* cur = top_;
while (cur != NULL) {
ASSERT(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
@@ -2500,7 +2502,7 @@ static const int kVeryLongFreeList = 500;
int FreeListCategory::FreeListLength() {
int length = 0;
- FreeListNode* cur = top();
+ FreeListNode* cur = top_;
while (cur != NULL) {
length++;
cur = cur->next();
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index 770b88a9fb..9d47f81ac6 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -313,21 +313,11 @@ class MemoryChunk {
bool is_valid() { return address() != NULL; }
- MemoryChunk* next_chunk() const {
- return reinterpret_cast<MemoryChunk*>(Acquire_Load(&next_chunk_));
- }
-
- MemoryChunk* prev_chunk() const {
- return reinterpret_cast<MemoryChunk*>(Acquire_Load(&prev_chunk_));
- }
+ MemoryChunk* next_chunk() const { return next_chunk_; }
+ MemoryChunk* prev_chunk() const { return prev_chunk_; }
- void set_next_chunk(MemoryChunk* next) {
- Release_Store(&next_chunk_, reinterpret_cast<AtomicWord>(next));
- }
-
- void set_prev_chunk(MemoryChunk* prev) {
- Release_Store(&prev_chunk_, reinterpret_cast<AtomicWord>(prev));
- }
+ void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; }
+ void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; }
Space* owner() const {
if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
@@ -467,32 +457,16 @@ class MemoryChunk {
// Return all current flags.
intptr_t GetFlags() { return flags_; }
-
- // PARALLEL_SWEEPING_PENDING - This page is ready for parallel sweeping.
- // PARALLEL_SWEEPING_IN_PROGRESS - This page is currently swept or was
- // swept by a sweeper thread.
- // PARALLEL_SWEEPING_DONE - The page state when sweeping is complete or
- // sweeping must not be performed on that page.
- enum ParallelSweepingState {
- PARALLEL_SWEEPING_DONE,
- PARALLEL_SWEEPING_IN_PROGRESS,
- PARALLEL_SWEEPING_PENDING
- };
-
- ParallelSweepingState parallel_sweeping() {
- return static_cast<ParallelSweepingState>(
- NoBarrier_Load(&parallel_sweeping_));
+ intptr_t parallel_sweeping() const {
+ return parallel_sweeping_;
}
- void set_parallel_sweeping(ParallelSweepingState state) {
- NoBarrier_Store(&parallel_sweeping_, state);
+ void set_parallel_sweeping(intptr_t state) {
+ parallel_sweeping_ = state;
}
bool TryParallelSweeping() {
- return NoBarrier_CompareAndSwap(&parallel_sweeping_,
- PARALLEL_SWEEPING_PENDING,
- PARALLEL_SWEEPING_IN_PROGRESS) ==
- PARALLEL_SWEEPING_PENDING;
+ return NoBarrier_CompareAndSwap(&parallel_sweeping_, 1, 0) == 1;
}
// Manage live byte count (count of bytes known to be live,
@@ -562,7 +536,7 @@ class MemoryChunk {
static const intptr_t kAlignmentMask = kAlignment - 1;
- static const intptr_t kSizeOffset = 0;
+ static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
static const intptr_t kLiveBytesOffset =
kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
@@ -576,8 +550,7 @@ class MemoryChunk {
static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize +
kIntSize + kIntSize + kPointerSize +
- 5 * kPointerSize +
- kPointerSize + kPointerSize;
+ 5 * kPointerSize;
static const int kBodyOffset =
CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
@@ -649,7 +622,7 @@ class MemoryChunk {
inline Heap* heap() { return heap_; }
- static const int kFlagsOffset = kPointerSize;
+ static const int kFlagsOffset = kPointerSize * 3;
bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
@@ -698,6 +671,8 @@ class MemoryChunk {
static inline void UpdateHighWaterMark(Address mark);
protected:
+ MemoryChunk* next_chunk_;
+ MemoryChunk* prev_chunk_;
size_t size_;
intptr_t flags_;
@@ -727,7 +702,7 @@ class MemoryChunk {
// count highest number of bytes ever allocated on the page.
int high_water_mark_;
- AtomicWord parallel_sweeping_;
+ intptr_t parallel_sweeping_;
// PagedSpace free-list statistics.
intptr_t available_in_small_free_list_;
@@ -744,12 +719,6 @@ class MemoryChunk {
Executability executable,
Space* owner);
- private:
- // next_chunk_ holds a pointer of type MemoryChunk
- AtomicWord next_chunk_;
- // prev_chunk_ holds a pointer of type MemoryChunk
- AtomicWord prev_chunk_;
-
friend class MemoryAllocator;
};
@@ -1534,7 +1503,7 @@ class FreeListNode: public HeapObject {
class FreeListCategory {
public:
FreeListCategory() :
- top_(0),
+ top_(NULL),
end_(NULL),
available_(0) {}
@@ -1552,13 +1521,9 @@ class FreeListCategory {
void RepairFreeList(Heap* heap);
- FreeListNode* top() const {
- return reinterpret_cast<FreeListNode*>(NoBarrier_Load(&top_));
- }
-
- void set_top(FreeListNode* top) {
- NoBarrier_Store(&top_, reinterpret_cast<AtomicWord>(top));
- }
+ FreeListNode** GetTopAddress() { return &top_; }
+ FreeListNode* top() const { return top_; }
+ void set_top(FreeListNode* top) { top_ = top; }
FreeListNode** GetEndAddress() { return &end_; }
FreeListNode* end() const { return end_; }
@@ -1571,7 +1536,7 @@ class FreeListCategory {
Mutex* mutex() { return &mutex_; }
bool IsEmpty() {
- return top() == 0;
+ return top_ == NULL;
}
#ifdef DEBUG
@@ -1580,8 +1545,7 @@ class FreeListCategory {
#endif
private:
- // top_ points to the top FreeListNode* in the free list category.
- AtomicWord top_;
+ FreeListNode* top_;
FreeListNode* end_;
Mutex mutex_;
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index 132ed711aa..5dfce55fb9 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -118,7 +118,7 @@ Handle<Code> StubCache::FindHandler(Handle<Name> name,
Code::Kind kind,
InlineCacheHolderFlag cache_holder) {
Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::HANDLER, kNoExtraICState, cache_holder, Code::NORMAL);
+ Code::HANDLER, kNoExtraICState, cache_holder, Code::NORMAL, kind);
Handle<Object> probe(stub_holder->FindInCodeCache(*name, flags), isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -127,11 +127,11 @@ Handle<Code> StubCache::FindHandler(Handle<Name> name,
Handle<Code> StubCache::ComputeMonomorphicIC(
- Code::Kind kind,
Handle<Name> name,
Handle<HeapType> type,
Handle<Code> handler,
ExtraICState extra_ic_state) {
+ Code::Kind kind = handler->handler_kind();
InlineCacheHolderFlag flag = IC::GetCodeCacheFlag(*type);
Handle<Map> stub_holder;
@@ -369,13 +369,14 @@ Handle<Code> StubCache::ComputeLoadElementPolymorphic(
Handle<Code> StubCache::ComputePolymorphicIC(
- Code::Kind kind,
TypeHandleList* types,
CodeHandleList* handlers,
int number_of_valid_types,
Handle<Name> name,
ExtraICState extra_ic_state) {
+
Handle<Code> handler = handlers->at(0);
+ Code::Kind kind = handler->handler_kind();
Code::StubType type = number_of_valid_types == 1 ? handler->type()
: Code::NORMAL;
if (kind == Code::LOAD_IC) {
@@ -689,7 +690,9 @@ Handle<Code> StubCompiler::CompileLoadPreMonomorphic(Code::Flags flags) {
Handle<Code> StubCompiler::CompileLoadMegamorphic(Code::Flags flags) {
- LoadIC::GenerateMegamorphic(masm());
+ ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
+ ContextualMode mode = LoadIC::GetContextualMode(extra_state);
+ LoadIC::GenerateMegamorphic(masm(), mode);
Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadMegamorphic");
PROFILE(isolate(),
CodeCreateEvent(Logger::LOAD_MEGAMORPHIC_TAG, *code, 0));
@@ -731,7 +734,8 @@ Handle<Code> StubCompiler::CompileStoreGeneric(Code::Flags flags) {
Handle<Code> StubCompiler::CompileStoreMegamorphic(Code::Flags flags) {
- StoreIC::GenerateMegamorphic(masm());
+ ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
+ StoreIC::GenerateMegamorphic(masm(), extra_state);
Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreMegamorphic");
PROFILE(isolate(),
CodeCreateEvent(Logger::STORE_MEGAMORPHIC_TAG, *code, 0));
@@ -947,10 +951,8 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
ASSERT(call_optimization.is_simple_api_call());
Handle<JSFunction> callback = call_optimization.constant_function();
CallbackHandlerFrontend(type, receiver(), holder, name, callback);
- Handle<Map>receiver_map = IC::TypeToMap(*type, isolate());
- GenerateFastApiCall(
- masm(), call_optimization, receiver_map,
- receiver(), scratch1(), false, 0, NULL);
+ GenerateLoadCallback(call_optimization, IC::TypeToMap(*type, isolate()));
+
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
}
@@ -1129,22 +1131,6 @@ Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
}
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- const CallOptimization& call_optimization) {
- HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
- receiver(), holder, name);
- Register values[] = { value() };
- GenerateFastApiCall(
- masm(), call_optimization, handle(object->map()),
- receiver(), scratch1(), true, 1, values);
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
Handle<Map> receiver_map) {
ElementsKind elements_kind = receiver_map->elements_kind();
@@ -1250,8 +1236,8 @@ Handle<Code> BaseLoadStoreStubCompiler::GetICCode(Code::Kind kind,
Handle<Code> BaseLoadStoreStubCompiler::GetCode(Code::Kind kind,
Code::StubType type,
Handle<Name> name) {
- ASSERT_EQ(kNoExtraICState, extra_state());
- Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder_);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::HANDLER, MONOMORPHIC, extra_state(), type, kind, cache_holder_);
Handle<Code> code = GetCodeWithFlags(flags, name);
PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
JitEvent(name, code);
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index 7eca6bb1d8..f55c440ea4 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -91,8 +91,7 @@ class StubCache {
Code::Kind kind,
InlineCacheHolderFlag cache_holder = OWN_MAP);
- Handle<Code> ComputeMonomorphicIC(Code::Kind kind,
- Handle<Name> name,
+ Handle<Code> ComputeMonomorphicIC(Handle<Name> name,
Handle<HeapType> type,
Handle<Code> handler,
ExtraICState extra_ic_state);
@@ -123,8 +122,7 @@ class StubCache {
KeyedAccessStoreMode store_mode,
StrictModeFlag strict_mode);
- Handle<Code> ComputePolymorphicIC(Code::Kind kind,
- TypeHandleList* types,
+ Handle<Code> ComputePolymorphicIC(TypeHandleList* types,
CodeHandleList* handlers,
int number_of_valid_maps,
Handle<Name> name,
@@ -406,15 +404,6 @@ class StubCompiler BASE_EMBEDDED {
void GenerateBooleanCheck(Register object, Label* miss);
- static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Handle<Map> receiver_map,
- Register receiver,
- Register scratch,
- bool is_store,
- int argc,
- Register* values);
-
protected:
Handle<Code> GetCodeWithFlags(Code::Flags flags, const char* name);
Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<Name> name);
diff --git a/deps/v8/src/sweeper-thread.cc b/deps/v8/src/sweeper-thread.cc
index 7e8305abe8..097b594a74 100644
--- a/deps/v8/src/sweeper-thread.cc
+++ b/deps/v8/src/sweeper-thread.cc
@@ -45,7 +45,6 @@ SweeperThread::SweeperThread(Isolate* isolate)
start_sweeping_semaphore_(0),
end_sweeping_semaphore_(0),
stop_semaphore_(0) {
- ASSERT(!FLAG_job_based_sweeping);
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
}
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index 7372693dfa..2ca04b88fc 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -47,12 +47,6 @@ TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code,
Zone* zone)
: native_context_(native_context),
zone_(zone) {
- Object* raw_info = code->type_feedback_info();
- if (raw_info->IsTypeFeedbackInfo()) {
- feedback_vector_ = Handle<FixedArray>(TypeFeedbackInfo::cast(raw_info)->
- feedback_vector());
- }
-
BuildDictionary(code);
ASSERT(dictionary_->IsDictionary());
}
@@ -78,17 +72,6 @@ Handle<Object> TypeFeedbackOracle::GetInfo(TypeFeedbackId ast_id) {
}
-Handle<Object> TypeFeedbackOracle::GetInfo(int slot) {
- ASSERT(slot >= 0 && slot < feedback_vector_->length());
- Object* obj = feedback_vector_->get(slot);
- if (!obj->IsJSFunction() ||
- !CanRetainOtherContext(JSFunction::cast(obj), *native_context_)) {
- return Handle<Object>(obj, isolate());
- }
- return Handle<Object>::cast(isolate()->factory()->undefined_value());
-}
-
-
bool TypeFeedbackOracle::LoadIsUninitialized(TypeFeedbackId id) {
Handle<Object> maybe_code = GetInfo(id);
if (maybe_code->IsCode()) {
@@ -118,22 +101,22 @@ bool TypeFeedbackOracle::StoreIsKeyedPolymorphic(TypeFeedbackId ast_id) {
}
-bool TypeFeedbackOracle::CallIsMonomorphic(int slot) {
- Handle<Object> value = GetInfo(slot);
+bool TypeFeedbackOracle::CallIsMonomorphic(TypeFeedbackId id) {
+ Handle<Object> value = GetInfo(id);
return value->IsAllocationSite() || value->IsJSFunction();
}
-bool TypeFeedbackOracle::CallNewIsMonomorphic(int slot) {
- Handle<Object> info = GetInfo(slot);
+bool TypeFeedbackOracle::CallNewIsMonomorphic(TypeFeedbackId id) {
+ Handle<Object> info = GetInfo(id);
return info->IsAllocationSite() || info->IsJSFunction();
}
-byte TypeFeedbackOracle::ForInType(int feedback_vector_slot) {
- Handle<Object> value = GetInfo(feedback_vector_slot);
+byte TypeFeedbackOracle::ForInType(TypeFeedbackId id) {
+ Handle<Object> value = GetInfo(id);
return value->IsSmi() &&
- Smi::cast(*value)->value() == TypeFeedbackInfo::kForInFastCaseMarker
+ Smi::cast(*value)->value() == TypeFeedbackCells::kForInFastCaseMarker
? ForInStatement::FAST_FOR_IN : ForInStatement::SLOW_FOR_IN;
}
@@ -151,8 +134,8 @@ KeyedAccessStoreMode TypeFeedbackOracle::GetStoreMode(
}
-Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(int slot) {
- Handle<Object> info = GetInfo(slot);
+Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(TypeFeedbackId id) {
+ Handle<Object> info = GetInfo(id);
if (info->IsAllocationSite()) {
return Handle<JSFunction>(isolate()->global_context()->array_function());
} else {
@@ -161,8 +144,8 @@ Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(int slot) {
}
-Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(int slot) {
- Handle<Object> info = GetInfo(slot);
+Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(TypeFeedbackId id) {
+ Handle<Object> info = GetInfo(id);
if (info->IsAllocationSite()) {
return Handle<JSFunction>(isolate()->global_context()->array_function());
} else {
@@ -171,8 +154,9 @@ Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(int slot) {
}
-Handle<AllocationSite> TypeFeedbackOracle::GetCallNewAllocationSite(int slot) {
- Handle<Object> info = GetInfo(slot);
+Handle<AllocationSite> TypeFeedbackOracle::GetCallNewAllocationSite(
+ TypeFeedbackId id) {
+ Handle<Object> info = GetInfo(id);
if (info->IsAllocationSite()) {
return Handle<AllocationSite>::cast(info);
}
@@ -222,7 +206,7 @@ void TypeFeedbackOracle::CompareType(TypeFeedbackId id,
CompareIC::StubInfoToType(
stub_minor_key, left_type, right_type, combined_type, map, zone());
} else if (code->is_compare_nil_ic_stub()) {
- CompareNilICStub stub(code->extra_ic_state());
+ CompareNilICStub stub(code->extended_extra_ic_state());
*combined_type = stub.GetType(zone(), map);
*left_type = *right_type = stub.GetInputType(zone(), map);
}
@@ -249,7 +233,7 @@ void TypeFeedbackOracle::BinaryType(TypeFeedbackId id,
}
Handle<Code> code = Handle<Code>::cast(object);
ASSERT_EQ(Code::BINARY_OP_IC, code->kind());
- BinaryOpIC::State state(code->extra_ic_state());
+ BinaryOpIC::State state(code->extended_extra_ic_state());
ASSERT_EQ(op, state.op());
*left = state.GetLeftType(zone());
@@ -271,7 +255,7 @@ Type* TypeFeedbackOracle::CountType(TypeFeedbackId id) {
if (!object->IsCode()) return Type::None(zone());
Handle<Code> code = Handle<Code>::cast(object);
ASSERT_EQ(Code::BINARY_OP_IC, code->kind());
- BinaryOpIC::State state(code->extra_ic_state());
+ BinaryOpIC::State state(code->extended_extra_ic_state());
return state.GetLeftType(zone());
}
@@ -283,7 +267,9 @@ void TypeFeedbackOracle::PropertyReceiverTypes(
FunctionPrototypeStub proto_stub(Code::LOAD_IC);
*is_prototype = LoadIsStub(id, &proto_stub);
if (!*is_prototype) {
- Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::HANDLER, MONOMORPHIC, kNoExtraICState,
+ Code::NORMAL, Code::LOAD_IC);
CollectReceiverTypes(id, name, flags, receiver_types);
}
}
@@ -304,7 +290,9 @@ void TypeFeedbackOracle::KeyedPropertyReceiverTypes(
void TypeFeedbackOracle::AssignmentReceiverTypes(
TypeFeedbackId id, Handle<String> name, SmallMapList* receiver_types) {
receiver_types->Clear();
- Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::HANDLER, MONOMORPHIC, kNoExtraICState,
+ Code::NORMAL, Code::STORE_IC);
CollectReceiverTypes(id, name, flags, receiver_types);
}
@@ -421,6 +409,7 @@ void TypeFeedbackOracle::BuildDictionary(Handle<Code> code) {
GetRelocInfos(code, &infos);
CreateDictionary(code, &infos);
ProcessRelocInfos(&infos);
+ ProcessTypeFeedbackCells(code);
// Allocate handle in the parent scope.
dictionary_ = scope.CloseAndEscape(dictionary_);
}
@@ -438,9 +427,13 @@ void TypeFeedbackOracle::GetRelocInfos(Handle<Code> code,
void TypeFeedbackOracle::CreateDictionary(Handle<Code> code,
ZoneList<RelocInfo>* infos) {
AllowHeapAllocation allocation_allowed;
+ int cell_count = code->type_feedback_info()->IsTypeFeedbackInfo()
+ ? TypeFeedbackInfo::cast(code->type_feedback_info())->
+ type_feedback_cells()->CellCount()
+ : 0;
+ int length = infos->length() + cell_count;
byte* old_start = code->instruction_start();
- dictionary_ =
- isolate()->factory()->NewUnseededNumberDictionary(infos->length());
+ dictionary_ = isolate()->factory()->NewUnseededNumberDictionary(length);
byte* new_start = code->instruction_start();
RelocateRelocInfos(infos, old_start, new_start);
}
@@ -482,6 +475,26 @@ void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
}
+void TypeFeedbackOracle::ProcessTypeFeedbackCells(Handle<Code> code) {
+ Object* raw_info = code->type_feedback_info();
+ if (!raw_info->IsTypeFeedbackInfo()) return;
+ Handle<TypeFeedbackCells> cache(
+ TypeFeedbackInfo::cast(raw_info)->type_feedback_cells());
+ for (int i = 0; i < cache->CellCount(); i++) {
+ TypeFeedbackId ast_id = cache->AstId(i);
+ Cell* cell = cache->GetCell(i);
+ Object* value = cell->value();
+ if (value->IsSmi() ||
+ value->IsAllocationSite() ||
+ (value->IsJSFunction() &&
+ !CanRetainOtherContext(JSFunction::cast(value),
+ *native_context_))) {
+ SetInfo(ast_id, cell);
+ }
+ }
+}
+
+
void TypeFeedbackOracle::SetInfo(TypeFeedbackId ast_id, Object* target) {
ASSERT(dictionary_->FindEntry(IdToKey(ast_id)) ==
UnseededNumberDictionary::kNotFound);
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index 6de92cec0e..8661d5057b 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -50,16 +50,14 @@ class TypeFeedbackOracle: public ZoneObject {
bool LoadIsUninitialized(TypeFeedbackId id);
bool StoreIsUninitialized(TypeFeedbackId id);
bool StoreIsKeyedPolymorphic(TypeFeedbackId id);
- bool CallIsMonomorphic(int slot);
bool CallIsMonomorphic(TypeFeedbackId aid);
- bool KeyedArrayCallIsHoley(TypeFeedbackId id);
- bool CallNewIsMonomorphic(int slot);
+ bool CallNewIsMonomorphic(TypeFeedbackId id);
// TODO(1571) We can't use ForInStatement::ForInType as the return value due
// to various cycles in our headers.
// TODO(rossberg): once all oracle access is removed from ast.cc, it should
// be possible.
- byte ForInType(int feedback_vector_slot);
+ byte ForInType(TypeFeedbackId id);
KeyedAccessStoreMode GetStoreMode(TypeFeedbackId id);
@@ -86,9 +84,9 @@ class TypeFeedbackOracle: public ZoneObject {
static bool CanRetainOtherContext(JSFunction* function,
Context* native_context);
- Handle<JSFunction> GetCallTarget(int slot);
- Handle<JSFunction> GetCallNewTarget(int slot);
- Handle<AllocationSite> GetCallNewAllocationSite(int slot);
+ Handle<JSFunction> GetCallTarget(TypeFeedbackId id);
+ Handle<JSFunction> GetCallNewTarget(TypeFeedbackId id);
+ Handle<AllocationSite> GetCallNewAllocationSite(TypeFeedbackId id);
bool LoadIsBuiltin(TypeFeedbackId id, Builtins::Name builtin_id);
bool LoadIsStub(TypeFeedbackId id, ICStub* stub);
@@ -132,20 +130,16 @@ class TypeFeedbackOracle: public ZoneObject {
byte* old_start,
byte* new_start);
void ProcessRelocInfos(ZoneList<RelocInfo>* infos);
+ void ProcessTypeFeedbackCells(Handle<Code> code);
// Returns an element from the backing store. Returns undefined if
// there is no information.
Handle<Object> GetInfo(TypeFeedbackId id);
- // Returns an element from the type feedback vector. Returns undefined
- // if there is no information.
- Handle<Object> GetInfo(int slot);
-
private:
Handle<Context> native_context_;
Zone* zone_;
Handle<UnseededNumberDictionary> dictionary_;
- Handle<FixedArray> feedback_vector_;
DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle);
};
diff --git a/deps/v8/src/typedarray.js b/deps/v8/src/typedarray.js
index 0c0cb71b2a..81c52961c0 100644
--- a/deps/v8/src/typedarray.js
+++ b/deps/v8/src/typedarray.js
@@ -49,7 +49,7 @@ endmacro
macro TYPED_ARRAY_CONSTRUCTOR(ARRAY_ID, NAME, ELEMENT_SIZE)
function NAMEConstructByArrayBuffer(obj, buffer, byteOffset, length) {
- var bufferByteLength = buffer.byteLength;
+ var bufferByteLength = %ArrayBufferGetByteLength(buffer);
var offset;
if (IS_UNDEFINED(byteOffset)) {
offset = 0;
@@ -58,7 +58,7 @@ macro TYPED_ARRAY_CONSTRUCTOR(ARRAY_ID, NAME, ELEMENT_SIZE)
if (offset % ELEMENT_SIZE !== 0) {
throw MakeRangeError("invalid_typed_array_alignment",
- ["start offset", "NAME", ELEMENT_SIZE]);
+ "start offset", "NAME", ELEMENT_SIZE);
}
if (offset > bufferByteLength) {
throw MakeRangeError("invalid_typed_array_offset");
@@ -70,7 +70,7 @@ macro TYPED_ARRAY_CONSTRUCTOR(ARRAY_ID, NAME, ELEMENT_SIZE)
if (IS_UNDEFINED(length)) {
if (bufferByteLength % ELEMENT_SIZE !== 0) {
throw MakeRangeError("invalid_typed_array_alignment",
- ["byte length", "NAME", ELEMENT_SIZE]);
+ "byte length", "NAME", ELEMENT_SIZE);
}
newByteLength = bufferByteLength - offset;
newLength = newByteLength / ELEMENT_SIZE;
@@ -317,7 +317,7 @@ function DataViewConstructor(buffer, byteOffset, byteLength) { // length = 3
if (!IS_ARRAYBUFFER(buffer)) {
throw MakeTypeError('data_view_not_array_buffer', []);
}
- var bufferByteLength = buffer.byteLength;
+ var bufferByteLength = %ArrayBufferGetByteLength(buffer);
var offset = IS_UNDEFINED(byteOffset) ?
0 : ToPositiveInteger(byteOffset, 'invalid_data_view_offset');
if (offset > bufferByteLength) {
diff --git a/deps/v8/src/types.cc b/deps/v8/src/types.cc
index 3840e6fd22..7867899d71 100644
--- a/deps/v8/src/types.cc
+++ b/deps/v8/src/types.cc
@@ -164,7 +164,6 @@ int TypeImpl<Config>::LubBitset(i::Object* value) {
if (value->IsNull()) return kNull;
if (value->IsBoolean()) return kBoolean;
if (value->IsTheHole()) return kAny; // TODO(rossberg): kNone?
- if (value->IsUninitialized()) return kNone;
UNREACHABLE();
}
return LubBitset(map);
diff --git a/deps/v8/src/typing.cc b/deps/v8/src/typing.cc
index b925dc610f..c7bea40ac6 100644
--- a/deps/v8/src/typing.cc
+++ b/deps/v8/src/typing.cc
@@ -323,7 +323,7 @@ void AstTyper::VisitForStatement(ForStatement* stmt) {
void AstTyper::VisitForInStatement(ForInStatement* stmt) {
// Collect type feedback.
stmt->set_for_in_type(static_cast<ForInStatement::ForInType>(
- oracle()->ForInType(stmt->ForInFeedbackSlot())));
+ oracle()->ForInType(stmt->ForInFeedbackId())));
RECURSE(Visit(stmt->enumerable()));
store_.Forget(); // Control may transfer here via looping or 'continue'.
@@ -530,9 +530,8 @@ void AstTyper::VisitCall(Call* expr) {
// Collect type feedback.
RECURSE(Visit(expr->expression()));
if (!expr->expression()->IsProperty() &&
- expr->HasCallFeedbackSlot() &&
- oracle()->CallIsMonomorphic(expr->CallFeedbackSlot())) {
- expr->set_target(oracle()->GetCallTarget(expr->CallFeedbackSlot()));
+ oracle()->CallIsMonomorphic(expr->CallFeedbackId())) {
+ expr->set_target(oracle()->GetCallTarget(expr->CallFeedbackId()));
}
ZoneList<Expression*>* args = expr->arguments();
diff --git a/deps/v8/src/unicode.cc b/deps/v8/src/unicode.cc
index 2bef7ab20b..bd32467786 100644
--- a/deps/v8/src/unicode.cc
+++ b/deps/v8/src/unicode.cc
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// This file was generated at 2014-02-07 15:31:16.733174
+// This file was generated at 2012-03-06 09:55:58.934483
#include "unicode-inl.h"
#include <stdlib.h>
@@ -710,6 +710,28 @@ bool Letter::Is(uchar c) {
}
+// Space: point.category == 'Zs'
+
+static const uint16_t kSpaceTable0Size = 4;
+static const int32_t kSpaceTable0[4] = {
+ 32, 160, 5760, 6158 }; // NOLINT
+static const uint16_t kSpaceTable1Size = 5;
+static const int32_t kSpaceTable1[5] = {
+ 1073741824, 10, 47, 95, 4096 }; // NOLINT
+bool Space::Is(uchar c) {
+ int chunk_index = c >> 13;
+ switch (chunk_index) {
+ case 0: return LookupPredicate(kSpaceTable0,
+ kSpaceTable0Size,
+ c);
+ case 1: return LookupPredicate(kSpaceTable1,
+ kSpaceTable1Size,
+ c);
+ default: return false;
+ }
+}
+
+
// Number: point.category == 'Nd'
static const uint16_t kNumberTable0Size = 56;
@@ -745,14 +767,14 @@ bool Number::Is(uchar c) {
}
-// WhiteSpace: point.category == 'Zs'
+// WhiteSpace: 'Ws' in point.properties
-static const uint16_t kWhiteSpaceTable0Size = 4;
-static const int32_t kWhiteSpaceTable0[4] = {
- 32, 160, 5760, 6158 }; // NOLINT
-static const uint16_t kWhiteSpaceTable1Size = 5;
-static const int32_t kWhiteSpaceTable1[5] = {
- 1073741824, 10, 47, 95, 4096 }; // NOLINT
+static const uint16_t kWhiteSpaceTable0Size = 7;
+static const int32_t kWhiteSpaceTable0[7] = {
+ 1073741833, 13, 32, 133, 160, 5760, 6158 }; // NOLINT
+static const uint16_t kWhiteSpaceTable1Size = 7;
+static const int32_t kWhiteSpaceTable1[7] = {
+ 1073741824, 10, 1073741864, 41, 47, 95, 4096 }; // NOLINT
bool WhiteSpace::Is(uchar c) {
int chunk_index = c >> 13;
switch (chunk_index) {
@@ -1811,6 +1833,8 @@ int UnicodeData::GetByteCount() {
+ kLetterTable5Size * sizeof(int32_t) // NOLINT
+ kLetterTable6Size * sizeof(int32_t) // NOLINT
+ kLetterTable7Size * sizeof(int32_t) // NOLINT
+ + kSpaceTable0Size * sizeof(int32_t) // NOLINT
+ + kSpaceTable1Size * sizeof(int32_t) // NOLINT
+ kNumberTable0Size * sizeof(int32_t) // NOLINT
+ kNumberTable5Size * sizeof(int32_t) // NOLINT
+ kNumberTable7Size * sizeof(int32_t) // NOLINT
diff --git a/deps/v8/src/unicode.h b/deps/v8/src/unicode.h
index 65a9af58fc..bb5506d38e 100644
--- a/deps/v8/src/unicode.h
+++ b/deps/v8/src/unicode.h
@@ -226,6 +226,9 @@ struct Lowercase {
struct Letter {
static bool Is(uchar c);
};
+struct Space {
+ static bool Is(uchar c);
+};
struct Number {
static bool Is(uchar c);
};
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index c86fcba782..2e7c494d63 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -172,17 +172,6 @@ inline T RoundUp(T x, intptr_t m) {
}
-// Increment a pointer until it has the specified alignment.
-// This works like RoundUp, but it works correctly on pointer types where
-// sizeof(*pointer) might not be 1.
-template<class T>
-T AlignUp(T pointer, size_t alignment) {
- ASSERT(sizeof(pointer) == sizeof(uintptr_t));
- uintptr_t pointer_raw = reinterpret_cast<uintptr_t>(pointer);
- return reinterpret_cast<T>(RoundUp(pointer_raw, alignment));
-}
-
-
template <typename T>
int Compare(const T& a, const T& b) {
if (a == b)
@@ -1100,66 +1089,6 @@ class EnumSet {
T bits_;
};
-// Bit field extraction.
-inline uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x) {
- return (x >> lsb) & ((1 << (1 + msb - lsb)) - 1);
-}
-
-inline uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x) {
- return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1);
-}
-
-inline int32_t signed_bitextract_32(int msb, int lsb, int32_t x) {
- return (x << (31 - msb)) >> (lsb + 31 - msb);
-}
-
-inline int signed_bitextract_64(int msb, int lsb, int x) {
- // TODO(jbramley): This is broken for big bitfields.
- return (x << (63 - msb)) >> (lsb + 63 - msb);
-}
-
-// Check number width.
-inline bool is_intn(int64_t x, unsigned n) {
- ASSERT((0 < n) && (n < 64));
- int64_t limit = static_cast<int64_t>(1) << (n - 1);
- return (-limit <= x) && (x < limit);
-}
-
-inline bool is_uintn(int64_t x, unsigned n) {
- ASSERT((0 < n) && (n < (sizeof(x) * kBitsPerByte)));
- return !(x >> n);
-}
-
-template <class T>
-inline T truncate_to_intn(T x, unsigned n) {
- ASSERT((0 < n) && (n < (sizeof(x) * kBitsPerByte)));
- return (x & ((static_cast<T>(1) << n) - 1));
-}
-
-#define INT_1_TO_63_LIST(V) \
-V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) \
-V(9) V(10) V(11) V(12) V(13) V(14) V(15) V(16) \
-V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24) \
-V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32) \
-V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \
-V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) \
-V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56) \
-V(57) V(58) V(59) V(60) V(61) V(62) V(63)
-
-#define DECLARE_IS_INT_N(N) \
-inline bool is_int##N(int64_t x) { return is_intn(x, N); }
-#define DECLARE_IS_UINT_N(N) \
-template <class T> \
-inline bool is_uint##N(T x) { return is_uintn(x, N); }
-#define DECLARE_TRUNCATE_TO_INT_N(N) \
-template <class T> \
-inline T truncate_to_int##N(T x) { return truncate_to_intn(x, N); }
-INT_1_TO_63_LIST(DECLARE_IS_INT_N)
-INT_1_TO_63_LIST(DECLARE_IS_UINT_N)
-INT_1_TO_63_LIST(DECLARE_TRUNCATE_TO_INT_N)
-#undef DECLARE_IS_INT_N
-#undef DECLARE_IS_UINT_N
-#undef DECLARE_TRUNCATE_TO_INT_N
class TypeFeedbackId {
public:
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 28454b437e..b89bb7a69b 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -148,16 +148,15 @@ void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
void V8::FireCallCompletedCallback(Isolate* isolate) {
bool has_call_completed_callbacks = call_completed_callbacks_ != NULL;
- bool run_microtasks = isolate->autorun_microtasks() &&
- isolate->microtask_pending();
- if (!has_call_completed_callbacks && !run_microtasks) return;
+ bool microtask_pending = isolate->microtask_pending();
+ if (!has_call_completed_callbacks && !microtask_pending) return;
HandleScopeImplementer* handle_scope_implementer =
isolate->handle_scope_implementer();
if (!handle_scope_implementer->CallDepthIsZero()) return;
// Fire callbacks. Increase call depth to prevent recursive callbacks.
handle_scope_implementer->IncrementCallDepth();
- if (run_microtasks) Execution::RunMicrotasks(isolate);
+ if (microtask_pending) Execution::RunMicrotasks(isolate);
if (has_call_completed_callbacks) {
for (int i = 0; i < call_completed_callbacks_->length(); i++) {
call_completed_callbacks_->at(i)();
@@ -167,21 +166,6 @@ void V8::FireCallCompletedCallback(Isolate* isolate) {
}
-void V8::RunMicrotasks(Isolate* isolate) {
- if (!isolate->microtask_pending())
- return;
-
- HandleScopeImplementer* handle_scope_implementer =
- isolate->handle_scope_implementer();
- ASSERT(handle_scope_implementer->CallDepthIsZero());
-
- // Increase call depth to prevent recursive callbacks.
- handle_scope_implementer->IncrementCallDepth();
- Execution::RunMicrotasks(isolate);
- handle_scope_implementer->DecrementCallDepth();
-}
-
-
void V8::InitializeOncePerProcessImpl() {
FlagList::EnforceFlagImplications();
diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h
index d3f5a9c839..8069e8adda 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/v8.h
@@ -101,8 +101,6 @@ class V8 : public AllStatic {
static void RemoveCallCompletedCallback(CallCompletedCallback callback);
static void FireCallCompletedCallback(Isolate* isolate);
- static void RunMicrotasks(Isolate* isolate);
-
static v8::ArrayBuffer::Allocator* ArrayBufferAllocator() {
return array_buffer_allocator_;
}
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index e4f0a3b860..df663c025e 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -1889,30 +1889,10 @@ SetUpFunction();
// Eventually, we should move to a real event queue that allows to maintain
// relative ordering of different kinds of tasks.
-function GetMicrotaskQueue() {
- var microtaskState = %GetMicrotaskState();
- if (IS_UNDEFINED(microtaskState.queue)) {
- microtaskState.queue = new InternalArray;
- }
- return microtaskState.queue;
-}
+RunMicrotasks.runners = new InternalArray;
function RunMicrotasks() {
while (%SetMicrotaskPending(false)) {
- var microtaskState = %GetMicrotaskState();
- if (IS_UNDEFINED(microtaskState.queue))
- return;
-
- var microtasks = microtaskState.queue;
- microtaskState.queue = new InternalArray;
-
- for (var i = 0; i < microtasks.length; i++) {
- microtasks[i]();
- }
+ for (var i in RunMicrotasks.runners) RunMicrotasks.runners[i]();
}
}
-
-function EnqueueExternalMicrotask(fn) {
- GetMicrotaskQueue().push(fn);
- %SetMicrotaskPending(true);
-}
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index b8ddaf0fe5..4f6ca83e89 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -34,8 +34,8 @@
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
#define MINOR_VERSION 24
-#define BUILD_NUMBER 40
-#define PATCH_LEVEL 0
+#define BUILD_NUMBER 35
+#define PATCH_LEVEL 17
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/deps/v8/src/vm-state-inl.h b/deps/v8/src/vm-state-inl.h
index 5bee438b65..658773e6d6 100644
--- a/deps/v8/src/vm-state-inl.h
+++ b/deps/v8/src/vm-state-inl.h
@@ -85,7 +85,8 @@ ExternalCallbackScope::ExternalCallbackScope(Isolate* isolate, Address callback)
callback_(callback),
previous_scope_(isolate->external_callback_scope()) {
#ifdef USE_SIMULATOR
- scope_address_ = Simulator::current(isolate)->get_sp();
+ int32_t sp = Simulator::current(isolate)->get_register(Simulator::sp);
+ scope_address_ = reinterpret_cast<Address>(static_cast<intptr_t>(sp));
#endif
isolate_->set_external_callback_scope(this);
}
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index ba3dbd7613..ef513d1e53 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -44,6 +44,27 @@ namespace internal {
// Utility functions
+// Test whether a 64-bit value is in a specific range.
+inline bool is_uint32(int64_t x) {
+ static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
+ return static_cast<uint64_t>(x) <= kMaxUInt32;
+}
+
+inline bool is_int32(int64_t x) {
+ static const int64_t kMinInt32 = -V8_INT64_C(0x80000000);
+ return is_uint32(x - kMinInt32);
+}
+
+inline bool uint_is_int32(uint64_t x) {
+ static const uint64_t kMaxInt32 = V8_UINT64_C(0x7fffffff);
+ return x <= kMaxInt32;
+}
+
+inline bool is_uint32(uint64_t x) {
+ static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
+ return x <= kMaxUInt32;
+}
+
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -1239,6 +1260,9 @@ class Assembler : public AssemblerBase {
// Call near absolute indirect, address in register
void call(Register adr);
+ // Call near indirect
+ void call(const Operand& operand);
+
// Jumps
// Jump short or near relative.
// Use a 32-bit signed displacement.
@@ -1250,6 +1274,9 @@ class Assembler : public AssemblerBase {
// Jump near absolute indirect (r64)
void jmp(Register adr);
+ // Jump near absolute indirect (m64)
+ void jmp(const Operand& src);
+
// Conditional jumps
void j(Condition cc,
Label* L,
@@ -1472,13 +1499,6 @@ class Assembler : public AssemblerBase {
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
- protected:
- // Call near indirect
- void call(const Operand& operand);
-
- // Jump near absolute indirect (m64)
- void jmp(const Operand& src);
-
private:
byte* addr_at(int pos) { return buffer_ + pos; }
uint32_t long_at(int pos) {
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 92af1f0455..075964bcee 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -106,8 +106,8 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { rbx, rdx };
- descriptor->register_param_count_ = 2;
+ static Register registers[] = { rbx };
+ descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = NULL;
}
@@ -2161,71 +2161,63 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a feedback vector slot. Cache states
+ // Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// rax : number of arguments to the construct function
- // rbx : Feedback vector
- // rdx : slot in feedback vector (Smi)
+ // rbx : cache cell for call target
// rdi : the function to call
Isolate* isolate = masm->isolate();
- Label check_array, initialize_array, initialize_non_array, megamorphic, done;
+ Label initialize, done, miss, megamorphic, not_array_function;
// Load the cache state into rcx.
- __ SmiToInteger32(rdx, rdx);
- __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size,
- FixedArray::kHeaderSize));
+ __ movp(rcx, FieldOperand(rbx, Cell::kValueOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
__ cmpq(rcx, rdi);
__ j(equal, &done);
- __ Cmp(rcx, TypeFeedbackInfo::MegamorphicSentinel(isolate));
+ __ Cmp(rcx, TypeFeedbackCells::MegamorphicSentinel(isolate));
__ j(equal, &done);
- // Check if we're dealing with the Array function or not.
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the cell either some other function or an
+ // AllocationSite. Do a map check on the object in rcx.
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
+ __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
+ __ j(not_equal, &miss);
+
+ // Make sure the function is the Array() function
__ LoadArrayFunction(rcx);
__ cmpq(rdi, rcx);
- __ j(equal, &check_array);
-
- // Non-array cache: Reload the cache state and check it.
- __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size,
- FixedArray::kHeaderSize));
- __ Cmp(rcx, TypeFeedbackInfo::PremonomorphicSentinel(isolate));
- __ j(equal, &initialize_non_array);
- __ Cmp(rcx, TypeFeedbackInfo::UninitializedSentinel(isolate));
__ j(not_equal, &megamorphic);
+ __ jmp(&done);
- // Non-array cache: Uninitialized -> premonomorphic. The sentinel is an
- // immortal immovable object (null) so no write-barrier is needed.
- __ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
- TypeFeedbackInfo::PremonomorphicSentinel(isolate));
- __ jmp(&done, Label::kFar);
-
- // Array cache: Reload the cache state and check to see if we're in a
- // monomorphic state where the state object is an AllocationSite object.
- __ bind(&check_array);
- __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size,
- FixedArray::kHeaderSize));
- Handle<Map> allocation_site_map = isolate->factory()->allocation_site_map();
- __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
- __ j(equal, &done);
-
- // Array cache: Uninitialized or premonomorphic -> monomorphic.
- __ Cmp(rcx, TypeFeedbackInfo::UninitializedSentinel(isolate));
- __ j(equal, &initialize_array);
- __ Cmp(rcx, TypeFeedbackInfo::PremonomorphicSentinel(isolate));
- __ j(equal, &initialize_array);
+ __ bind(&miss);
- // Both caches: Monomorphic -> megamorphic. The sentinel is an
- // immortal immovable object (undefined) so no write-barrier is needed.
+ // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+ // megamorphic.
+ __ Cmp(rcx, TypeFeedbackCells::UninitializedSentinel(isolate));
+ __ j(equal, &initialize);
+ // MegamorphicSentinel is an immortal immovable object (undefined) so no
+ // write-barrier is needed.
__ bind(&megamorphic);
- __ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
- TypeFeedbackInfo::MegamorphicSentinel(isolate));
+ __ Move(FieldOperand(rbx, Cell::kValueOffset),
+ TypeFeedbackCells::MegamorphicSentinel(isolate));
__ jmp(&done);
- // Array cache: Uninitialized or premonomorphic -> monomorphic.
- __ bind(&initialize_array);
+ // An uninitialized cache is patched with the function or sentinel to
+ // indicate the ElementsKind if function is the Array constructor.
+ __ bind(&initialize);
+ // Make sure the function is the Array() function
+ __ LoadArrayFunction(rcx);
+ __ cmpq(rdi, rcx);
+ __ j(not_equal, &not_array_function);
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the cell
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -2233,45 +2225,28 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ Integer32ToSmi(rax, rax);
__ push(rax);
__ push(rdi);
- __ Integer32ToSmi(rdx, rdx);
- __ push(rdx);
__ push(rbx);
CreateAllocationSiteStub create_stub;
__ CallStub(&create_stub);
__ pop(rbx);
- __ pop(rdx);
__ pop(rdi);
__ pop(rax);
__ SmiToInteger32(rax, rax);
}
- Label done_no_smi_convert;
- __ jmp(&done_no_smi_convert);
+ __ jmp(&done);
- // Non-array cache: Premonomorphic -> monomorphic.
- __ bind(&initialize_non_array);
- __ movp(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
- rdi);
- __ push(rdi);
- __ push(rbx);
- __ push(rdx);
- __ RecordWriteArray(rbx, rdi, rdx, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ pop(rdx);
- __ pop(rbx);
- __ pop(rdi);
+ __ bind(&not_array_function);
+ __ movp(FieldOperand(rbx, Cell::kValueOffset), rdi);
+ // No need for a write barrier here - cells are rescanned.
__ bind(&done);
- __ Integer32ToSmi(rdx, rdx);
-
- __ bind(&done_no_smi_convert);
}
void CallFunctionStub::Generate(MacroAssembler* masm) {
- // rbx : feedback vector
- // rdx : (only if rbx is not undefined) slot in feedback vector (Smi)
+ // rbx : cache cell for call target
// rdi : the function to call
Isolate* isolate = masm->isolate();
Label slow, non_function, wrap, cont;
@@ -2308,7 +2283,6 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &cont);
}
-
// Load the receiver from the stack.
__ movp(rax, args.GetReceiverOperand());
@@ -2332,11 +2306,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// If there is a call target cache, mark it megamorphic in the
// non-function case. MegamorphicSentinel is an immortal immovable
// object (undefined) so no write barrier is needed.
- __ SmiToInteger32(rdx, rdx);
- __ Move(FieldOperand(rbx, rdx, times_pointer_size,
- FixedArray::kHeaderSize),
- TypeFeedbackInfo::MegamorphicSentinel(isolate));
- __ Integer32ToSmi(rdx, rdx);
+ __ Move(FieldOperand(rbx, Cell::kValueOffset),
+ TypeFeedbackCells::MegamorphicSentinel(isolate));
}
// Check for function proxy.
__ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
@@ -2382,8 +2353,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
void CallConstructStub::Generate(MacroAssembler* masm) {
// rax : number of arguments
- // rbx : feedback vector
- // rdx : (only if rbx is not undefined) slot in feedback vector (Smi)
+ // rbx : cache cell for call target
// rdi : constructor function
Label slow, non_function_call;
@@ -4897,7 +4867,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
// We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the slot).
+ // Fix kind and retry (only if we have an allocation site in the cell).
__ incl(rdx);
if (FLAG_debug_code) {
@@ -5007,8 +4977,7 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
- // -- rbx : feedback vector (fixed array or undefined)
- // -- rdx : slot index (if ebx is fixed array)
+ // -- rbx : type info cell
// -- rdi : constructor
// -- rsp[0] : return address
// -- rsp[8] : last argument
@@ -5030,29 +4999,22 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ CmpObjectType(rcx, MAP_TYPE, rcx);
__ Check(equal, kUnexpectedInitialMapForArrayFunction);
- // We should either have undefined in rbx or a valid fixed array.
+ // We should either have undefined in rbx or a valid cell
Label okay_here;
- Handle<Map> fixed_array_map = masm->isolate()->factory()->fixed_array_map();
+ Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
__ Cmp(rbx, undefined_sentinel);
__ j(equal, &okay_here);
- __ Cmp(FieldOperand(rbx, 0), fixed_array_map);
- __ Assert(equal, kExpectedFixedArrayInRegisterRbx);
-
- // rdx should be a smi if we don't have undefined in rbx.
- __ AssertSmi(rdx);
-
+ __ Cmp(FieldOperand(rbx, 0), cell_map);
+ __ Assert(equal, kExpectedPropertyCellInRegisterRbx);
__ bind(&okay_here);
}
Label no_info;
- // If the feedback slot is undefined, or contains anything other than an
+ // If the type cell is undefined, or contains anything other than an
// AllocationSite, call an array constructor that doesn't use AllocationSites.
__ Cmp(rbx, undefined_sentinel);
__ j(equal, &no_info);
- __ SmiToInteger32(rdx, rdx);
- __ movp(rbx, FieldOperand(rbx, rdx, times_pointer_size,
- FixedArray::kHeaderSize));
- __ Integer32ToSmi(rdx, rdx);
+ __ movp(rbx, FieldOperand(rbx, Cell::kValueOffset));
__ Cmp(FieldOperand(rbx, 0),
masm->isolate()->factory()->allocation_site_map());
__ j(not_equal, &no_info);
@@ -5109,6 +5071,7 @@ void InternalArrayConstructorStub::GenerateCase(
void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
+ // -- rbx : type info cell
// -- rdi : constructor
// -- rsp[0] : return address
// -- rsp[8] : last argument
@@ -5181,7 +5144,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register context = rsi;
int argc = ArgumentBits::decode(bit_field_);
- bool is_store = IsStoreBits::decode(bit_field_);
+ bool restore_context = RestoreContextBits::decode(bit_field_);
bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
typedef FunctionCallbackArguments FCA;
@@ -5257,21 +5220,19 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
- // Accessor for FunctionCallbackInfo and first js arg.
- StackArgumentsAccessor args_from_rbp(rbp, FCA::kArgsLength + 1,
+ StackArgumentsAccessor args_from_rbp(rbp, FCA::kArgsLength,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
Operand context_restore_operand = args_from_rbp.GetArgumentOperand(
- FCA::kArgsLength - FCA::kContextSaveIndex);
- // Stores return the first js argument
+ FCA::kArgsLength - 1 - FCA::kContextSaveIndex);
Operand return_value_operand = args_from_rbp.GetArgumentOperand(
- is_store ? 0 : FCA::kArgsLength - FCA::kReturnValueOffset);
+ FCA::kArgsLength - 1 - FCA::kReturnValueOffset);
__ CallApiFunctionAndReturn(
api_function_address,
thunk_address,
callback_arg,
argc + FCA::kArgsLength + 1,
return_value_operand,
- &context_restore_operand);
+ restore_context ? &context_restore_operand : NULL);
}
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index 938703ef3e..8ae03deae3 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -173,7 +173,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
ExternalReference after_break_target =
ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate());
__ Move(kScratchRegister, after_break_target);
- __ Jump(Operand(kScratchRegister, 0));
+ __ jmp(Operand(kScratchRegister, 0));
}
@@ -261,11 +261,9 @@ void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-x64.cc).
// ----------- S t a t e -------------
// -- rdi : function
- // -- rbx: feedback array
- // -- rdx: slot in feedback array
+ // -- rbx: cache cell for call target
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, rbx.bit() | rdx.bit() | rdi.bit(),
- 0, false);
+ Generate_DebugBreakCallHelper(masm, rbx.bit() | rdi.bit(), 0, false);
}
@@ -287,12 +285,10 @@ void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
// above IC call.
// ----------- S t a t e -------------
// -- rax: number of arguments
- // -- rbx: feedback array
- // -- rdx: feedback slot (smi)
+ // -- rbx: cache cell for call target
// -----------------------------------
// The number of arguments in rax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, rbx.bit() | rdx.bit() | rdi.bit(),
- rax.bit(), false);
+ Generate_DebugBreakCallHelper(masm, rbx.bit() | rdi.bit(), rax.bit(), false);
}
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 476eab2b42..2d659cf0e7 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -1451,7 +1451,8 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += 3;
break;
case OPERAND_DOUBLEWORD_SIZE:
- addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
+ addr =
+ reinterpret_cast<byte*>(*reinterpret_cast<uint32_t*>(data + 1));
data += 5;
break;
case OPERAND_QUADWORD_SIZE:
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index badf18ed67..621eacc708 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -118,9 +118,6 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
-
- InitializeFeedbackVector();
-
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -641,7 +638,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_false,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(ic, condition->test_id());
+ CallIC(ic, NOT_CONTEXTUAL, condition->test_id());
__ testq(result_register(), result_register());
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
@@ -994,7 +991,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, clause->CompareId());
+ CallIC(ic, NOT_CONTEXTUAL, clause->CompareId());
patch_site.EmitPatchInfo();
Label skip;
@@ -1038,7 +1035,6 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
- int slot = stmt->ForInFeedbackSlot();
SetStatementPosition(stmt);
Label loop, exit;
@@ -1127,15 +1123,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- Handle<Object> feedback = Handle<Object>(
- Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker),
- isolate());
- StoreFeedbackVectorSlot(slot, feedback);
+ Handle<Cell> cell = isolate()->factory()->NewCell(
+ Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
+ isolate()));
+ RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
+ __ Move(rbx, cell);
+ __ Move(FieldOperand(rbx, Cell::kValueOffset),
+ Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker));
- // No need for a write barrier, we are storing a Smi in the feedback vector.
- __ Move(rbx, FeedbackVector());
- __ Move(FieldOperand(rbx, FixedArray::OffsetOfElementAt(slot)),
- Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker));
__ Move(rbx, Smi::FromInt(1)); // Smi indicates slow check
__ movp(rcx, Operand(rsp, 0 * kPointerSize)); // Get enumerated object
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
@@ -1444,7 +1439,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// variables.
switch (var->location()) {
case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "[ Global variable");
+ Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in rcx and the global
// object on the stack.
__ Move(rcx, var->name());
@@ -1457,8 +1452,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot() ? "[ Context slot"
- : "[ Stack slot");
+ Comment cmnt(masm_, var->IsContextSlot() ? "Context slot" : "Stack slot");
if (var->binding_needs_init()) {
// var->scope() may be NULL when the proxy is located in eval code and
// refers to a potential outside binding. Currently those bindings are
@@ -1520,12 +1514,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
}
case Variable::LOOKUP: {
- Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
+ Comment cmnt(masm_, "Lookup slot");
__ push(rsi); // Context.
__ Push(var->name());
__ CallRuntime(Runtime::kLoadContextSlot, 2);
@@ -1664,7 +1658,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
__ Move(rcx, key->value());
__ movp(rdx, Operand(rsp, 0));
- CallStoreIC(key->LiteralFeedbackId());
+ CallStoreIC(NOT_CONTEXTUAL, key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -2079,7 +2073,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ movp(rdx, Operand(rsp, kPointerSize));
__ movp(rax, Operand(rsp, 2 * kPointerSize));
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, TypeFeedbackId::None());
+ CallIC(ic, NOT_CONTEXTUAL, TypeFeedbackId::None());
__ movp(rdi, rax);
__ movp(Operand(rsp, 2 * kPointerSize), rdi);
CallFunctionStub stub(1, CALL_AS_METHOD);
@@ -2270,7 +2264,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, prop->PropertyFeedbackId());
+ CallIC(ic, NOT_CONTEXTUAL, prop->PropertyFeedbackId());
}
@@ -2292,7 +2286,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ movp(rax, rcx);
BinaryOpICStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
+ expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -2341,7 +2336,8 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(rdx);
BinaryOpICStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
+ expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(rax);
}
@@ -2379,7 +2375,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ movp(rdx, rax);
__ pop(rax); // Restore value.
__ Move(rcx, prop->key()->AsLiteral()->value());
- CallStoreIC();
+ CallStoreIC(NOT_CONTEXTUAL);
break;
}
case KEYED_PROPERTY: {
@@ -2400,58 +2396,44 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
-void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
- Variable* var, MemOperand location) {
- __ movp(location, rax);
- if (var->IsContextSlot()) {
- __ movp(rdx, rax);
- __ RecordWriteContextSlot(
- rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
- }
-}
-
-
-void FullCodeGenerator::EmitCallStoreContextSlot(
- Handle<String> name, LanguageMode mode) {
- __ push(rax); // Value.
- __ push(rsi); // Context.
- __ Push(name);
- __ Push(Smi::FromInt(mode));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
-}
-
-
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ Move(rcx, var->name());
__ movp(rdx, GlobalObjectOperand());
- CallStoreIC();
-
+ CallStoreIC(CONTEXTUAL);
} else if (op == Token::INIT_CONST) {
// Const initializers need a write barrier.
ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsLookupSlot()) {
- __ push(rax);
- __ push(rsi);
- __ Push(var->name());
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- ASSERT(var->IsStackLocal() || var->IsContextSlot());
+ if (var->IsStackLocal()) {
Label skip;
- MemOperand location = VarOperand(var, rcx);
- __ movp(rdx, location);
+ __ movp(rdx, StackOperand(var));
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &skip);
- EmitStoreToStackLocalOrContextSlot(var, location);
+ __ movp(StackOperand(var), rax);
__ bind(&skip);
+ } else {
+ ASSERT(var->IsContextSlot() || var->IsLookupSlot());
+ // Like var declarations, const declarations are hoisted to function
+ // scope. However, unlike var initializers, const initializers are
+ // able to drill a hole to that function context, even from inside a
+ // 'with' context. We thus bypass the normal static scope lookup for
+ // var->IsContextSlot().
+ __ push(rax);
+ __ push(rsi);
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
- EmitCallStoreContextSlot(var->name(), language_mode());
+ __ push(rax); // Value.
+ __ push(rsi); // Context.
+ __ Push(var->name());
+ __ Push(Smi::FromInt(language_mode()));
+ __ CallRuntime(Runtime::kStoreContextSlot, 4);
} else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label assign;
@@ -2462,16 +2444,18 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ Push(var->name());
__ CallRuntime(Runtime::kThrowReferenceError, 1);
__ bind(&assign);
- EmitStoreToStackLocalOrContextSlot(var, location);
+ __ movp(location, rax);
+ if (var->IsContextSlot()) {
+ __ movp(rdx, rax);
+ __ RecordWriteContextSlot(
+ rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
+ }
}
} else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
// Assignment to var or initializing assignment to let/const
// in harmony mode.
- if (var->IsLookupSlot()) {
- EmitCallStoreContextSlot(var->name(), language_mode());
- } else {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, rcx);
if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
@@ -2479,7 +2463,20 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ Check(equal, kLetBindingReInitialization);
}
- EmitStoreToStackLocalOrContextSlot(var, location);
+ // Perform the assignment.
+ __ movp(location, rax);
+ if (var->IsContextSlot()) {
+ __ movp(rdx, rax);
+ __ RecordWriteContextSlot(
+ rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
+ }
+ } else {
+ ASSERT(var->IsLookupSlot());
+ __ push(rax); // Value.
+ __ push(rsi); // Context.
+ __ Push(var->name());
+ __ Push(Smi::FromInt(language_mode()));
+ __ CallRuntime(Runtime::kStoreContextSlot, 4);
}
}
// Non-initializing assignments to consts are ignored.
@@ -2496,7 +2493,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
SetSourcePosition(expr->position());
__ Move(rcx, prop->key()->AsLiteral()->value());
__ pop(rdx);
- CallStoreIC(expr->AssignmentFeedbackId());
+ CallStoreIC(NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
@@ -2513,7 +2510,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, expr->AssignmentFeedbackId());
+ CallIC(ic, NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
@@ -2540,8 +2537,10 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
+ ContextualMode mode,
TypeFeedbackId ast_id) {
ic_total_count_++;
+ ASSERT(mode != CONTEXTUAL || ast_id.IsNone());
__ call(code, RelocInfo::CODE_TARGET, ast_id);
}
@@ -2651,15 +2650,15 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
SetSourcePosition(expr->position());
Handle<Object> uninitialized =
- TypeFeedbackInfo::UninitializedSentinel(isolate());
- StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized);
- __ Move(rbx, FeedbackVector());
- __ Move(rdx, Smi::FromInt(expr->CallFeedbackSlot()));
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
+ RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
+ __ Move(rbx, cell);
// Record call targets in unoptimized code.
CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
+ __ CallStub(&stub, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2831,10 +2830,10 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Record call targets in unoptimized code, but not in the snapshot.
Handle<Object> uninitialized =
- TypeFeedbackInfo::UninitializedSentinel(isolate());
- StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized);
- __ Move(rbx, FeedbackVector());
- __ Move(rdx, Smi::FromInt(expr->CallNewFeedbackSlot()));
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
+ RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
+ __ Move(rbx, cell);
CallConstructStub stub(RECORD_CALL_TARGET);
__ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
@@ -4410,7 +4409,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ movp(rdx, rax);
__ Move(rax, Smi::FromInt(1));
BinaryOpICStub stub(expr->binary_op(), NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId());
+ CallIC(stub.GetCode(isolate()),
+ NOT_CONTEXTUAL,
+ expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4441,7 +4442,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ Move(rcx, prop->key()->AsLiteral()->value());
__ pop(rdx);
- CallStoreIC(expr->CountStoreFeedbackId());
+ CallStoreIC(NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4458,7 +4459,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, expr->CountStoreFeedbackId());
+ CallIC(ic, NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4479,7 +4480,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
ASSERT(!context()->IsTest());
if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "[ Global variable");
+ Comment cmnt(masm_, "Global variable");
__ Move(rcx, proxy->name());
__ movp(rax, GlobalObjectOperand());
// Use a regular load, not a contextual load, to avoid a reference
@@ -4488,7 +4489,6 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
PrepareForBailout(expr, TOS_REG);
context()->Plug(rax);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
- Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -4647,7 +4647,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, expr->CompareOperationFeedbackId());
+ CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4682,7 +4682,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Split(equal, if_true, if_false, fall_through);
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, expr->CompareOperationFeedbackId());
+ CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
__ testq(rax, rax);
Split(not_zero, if_true, if_false, fall_through);
}
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index d2340c83c4..c76eca04d8 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -973,7 +973,8 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
}
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
+ ExtraICState extra_state) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
@@ -981,7 +982,9 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::HANDLER, MONOMORPHIC, extra_state,
+ Code::NORMAL, Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, rax, rcx, rbx, rdx);
@@ -1088,7 +1091,8 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
+ ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -1097,7 +1101,9 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::HANDLER, MONOMORPHIC, extra_ic_state,
+ Code::NORMAL, Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, rdx, rcx, rbx, no_reg);
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index a94dcee227..c3b6e845fb 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -328,8 +328,7 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(
- chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+ RecordAndWritePosition(value->position());
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -792,7 +791,6 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
- data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -1152,38 +1150,55 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
void LCodeGen::DoDivI(LDivI* instr) {
if (!instr->is_flooring() && instr->hydrogen()->RightIsPowerOf2()) {
Register dividend = ToRegister(instr->left());
- HDiv* hdiv = instr->hydrogen();
- int32_t divisor = hdiv->right()->GetInteger32Constant();
- Register result = ToRegister(instr->result());
- ASSERT(!result.is(dividend));
-
- // Check for (0 / -x) that will produce negative zero.
- if (hdiv->left()->RangeCanInclude(0) && divisor < 0 &&
- hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ testl(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
- }
- // Check for (kMinInt / -1).
- if (hdiv->left()->RangeCanInclude(kMinInt) && divisor == -1 &&
- hdiv->CheckFlag(HValue::kCanOverflow)) {
- __ cmpl(dividend, Immediate(kMinInt));
- DeoptimizeIf(zero, instr->environment());
- }
- // Deoptimize if remainder will not be 0.
- if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
- __ testl(dividend, Immediate(Abs(divisor) - 1));
- DeoptimizeIf(not_zero, instr->environment());
+ int32_t divisor =
+ HConstant::cast(instr->hydrogen()->right())->Integer32Value();
+ int32_t test_value = 0;
+ int32_t power = 0;
+
+ if (divisor > 0) {
+ test_value = divisor - 1;
+ power = WhichPowerOf2(divisor);
+ } else {
+ // Check for (0 / -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ testl(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Check for (kMinInt / -1).
+ if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ __ cmpl(dividend, Immediate(kMinInt));
+ DeoptimizeIf(zero, instr->environment());
+ }
+ test_value = - divisor - 1;
+ power = WhichPowerOf2(-divisor);
}
- __ Move(result, dividend);
- int32_t shift = WhichPowerOf2(Abs(divisor));
- if (shift > 0) {
- // The arithmetic shift is always OK, the 'if' is an optimization only.
- if (shift > 1) __ sarl(result, Immediate(31));
- __ shrl(result, Immediate(32 - shift));
- __ addl(result, dividend);
- __ sarl(result, Immediate(shift));
+
+ if (test_value != 0) {
+ if (instr->hydrogen()->CheckFlag(
+ HInstruction::kAllUsesTruncatingToInt32)) {
+ Label done, negative;
+ __ cmpl(dividend, Immediate(0));
+ __ j(less, &negative, Label::kNear);
+ __ sarl(dividend, Immediate(power));
+ if (divisor < 0) __ negl(dividend);
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&negative);
+ __ negl(dividend);
+ __ sarl(dividend, Immediate(power));
+ if (divisor > 0) __ negl(dividend);
+ __ bind(&done);
+ return; // Don't fall through to "__ neg" below.
+ } else {
+ // Deoptimize if remainder is not 0.
+ __ testl(dividend, Immediate(test_value));
+ DeoptimizeIf(not_zero, instr->environment());
+ __ sarl(dividend, Immediate(power));
+ }
}
- if (divisor < 0) __ negl(result);
+
+ if (divisor < 0) __ negl(dividend);
+
return;
}
@@ -2764,12 +2779,6 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
Representation representation = access.representation();
if (representation.IsSmi() &&
instr->hydrogen()->representation().IsInteger32()) {
-#ifdef DEBUG
- Register scratch = kScratchRegister;
- __ Load(scratch, FieldOperand(object, offset), representation);
- __ AssertSmi(scratch);
-#endif
-
// Read int value directly from upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
@@ -3015,17 +3024,6 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (representation.IsInteger32() &&
hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
ASSERT(!requires_hole_check);
-#ifdef DEBUG
- Register scratch = kScratchRegister;
- __ Load(scratch,
- BuildFastArrayOperand(instr->elements(),
- key,
- FAST_ELEMENTS,
- offset,
- instr->additional_index()),
- Representation::Smi());
- __ AssertSmi(scratch);
-#endif
// Read int value directly from upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
@@ -3314,7 +3312,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
if (function.is_identical_to(info()->closure())) {
__ CallSelf();
} else {
- __ Call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
}
// Set up deoptimization.
@@ -3379,7 +3377,7 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
} else {
Operand target = FieldOperand(rdi, JSFunction::kCodeEntryOffset);
generator.BeforeCall(__ CallSize(target));
- __ Call(target);
+ __ call(target);
}
generator.AfterCall();
}
@@ -3553,10 +3551,11 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
const XMMRegister xmm_scratch = double_scratch0();
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
+ XMMRegister input_temp = ToDoubleRegister(instr->temp());
static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5
static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
- Label done, round_to_zero, below_one_half, do_not_compensate, restore;
+ Label done, round_to_zero, below_one_half;
Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
__ movq(kScratchRegister, one_half);
__ movq(xmm_scratch, kScratchRegister);
@@ -3580,21 +3579,19 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
// compare and compensate.
- __ movq(kScratchRegister, input_reg); // Back up input_reg.
- __ subsd(input_reg, xmm_scratch);
- __ cvttsd2si(output_reg, input_reg);
+ __ movq(input_temp, input_reg); // Do not alter input_reg.
+ __ subsd(input_temp, xmm_scratch);
+ __ cvttsd2si(output_reg, input_temp);
// Catch minint due to overflow, and to prevent overflow when compensating.
__ cmpl(output_reg, Immediate(0x80000000));
__ RecordComment("D2I conversion overflow");
DeoptimizeIf(equal, instr->environment());
__ Cvtlsi2sd(xmm_scratch, output_reg);
- __ ucomisd(input_reg, xmm_scratch);
- __ j(equal, &restore, Label::kNear);
+ __ ucomisd(xmm_scratch, input_temp);
+ __ j(equal, &done, dist);
__ subl(output_reg, Immediate(1));
// No overflow because we already ruled out minint.
- __ bind(&restore);
- __ movq(input_reg, kScratchRegister); // Restore input_reg.
__ jmp(&done, dist);
__ bind(&round_to_zero);
@@ -3857,6 +3854,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register value = ToRegister(instr->value());
if (instr->object()->IsConstantOperand()) {
ASSERT(value.is(rax));
+ ASSERT(!access.representation().IsSpecialization());
LConstantOperand* object = LConstantOperand::cast(instr->object());
__ store_rax(ToExternalReference(object));
} else {
@@ -3868,8 +3866,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register object = ToRegister(instr->object());
Handle<Map> transition = instr->transition();
- SmiCheck check_needed = hinstr->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (FLAG_track_fields && representation.IsSmi()) {
if (instr->value()->IsConstantOperand()) {
@@ -3890,9 +3886,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register value = ToRegister(instr->value());
Condition cc = masm()->CheckSmi(value);
DeoptimizeIf(cc, instr->environment());
-
- // We know that value is a smi now, so we can omit the check below.
- check_needed = OMIT_SMI_CHECK;
}
}
} else if (representation.IsDouble()) {
@@ -3923,6 +3916,9 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
// Do the store.
+ SmiCheck check_needed = hinstr->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+
Register write_register = object;
if (!access.IsInobject()) {
write_register = ToRegister(instr->temp());
@@ -3932,11 +3928,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (representation.IsSmi() &&
hinstr->value()->representation().IsInteger32()) {
ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
-#ifdef DEBUG
- Register scratch = kScratchRegister;
- __ Load(scratch, FieldOperand(write_register, offset), representation);
- __ AssertSmi(scratch);
-#endif
// Store int value directly to upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
@@ -4008,44 +3999,51 @@ void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- if (instr->hydrogen()->skip_check()) return;
+ HBoundsCheck* hinstr = instr->hydrogen();
+ if (hinstr->skip_check()) return;
+
+ Representation representation = hinstr->length()->representation();
+ ASSERT(representation.Equals(hinstr->index()->representation()));
+ ASSERT(representation.IsSmiOrInteger32());
if (instr->length()->IsRegister()) {
Register reg = ToRegister(instr->length());
- if (!instr->hydrogen()->length()->representation().IsSmi()) {
- __ AssertZeroExtended(reg);
- }
+
if (instr->index()->IsConstantOperand()) {
int32_t constant_index =
ToInteger32(LConstantOperand::cast(instr->index()));
- if (instr->hydrogen()->length()->representation().IsSmi()) {
+ if (representation.IsSmi()) {
__ Cmp(reg, Smi::FromInt(constant_index));
} else {
- __ cmpq(reg, Immediate(constant_index));
+ __ cmpl(reg, Immediate(constant_index));
}
} else {
Register reg2 = ToRegister(instr->index());
- if (!instr->hydrogen()->index()->representation().IsSmi()) {
- __ AssertZeroExtended(reg2);
+ if (representation.IsSmi()) {
+ __ cmpq(reg, reg2);
+ } else {
+ __ cmpl(reg, reg2);
}
- __ cmpq(reg, reg2);
}
} else {
Operand length = ToOperand(instr->length());
if (instr->index()->IsConstantOperand()) {
int32_t constant_index =
ToInteger32(LConstantOperand::cast(instr->index()));
- if (instr->hydrogen()->length()->representation().IsSmi()) {
+ if (representation.IsSmi()) {
__ Cmp(length, Smi::FromInt(constant_index));
} else {
- __ cmpq(length, Immediate(constant_index));
+ __ cmpl(length, Immediate(constant_index));
}
} else {
- __ cmpq(length, ToRegister(instr->index()));
+ if (representation.IsSmi()) {
+ __ cmpq(length, ToRegister(instr->index()));
+ } else {
+ __ cmpl(length, ToRegister(instr->index()));
+ }
}
}
- Condition condition =
- instr->hydrogen()->allow_equality() ? below : below_equal;
+ Condition condition = hinstr->allow_equality() ? below : below_equal;
ApplyCheckIf(condition, instr);
}
@@ -4190,17 +4188,6 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
if (representation.IsInteger32()) {
ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
ASSERT(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
-#ifdef DEBUG
- Register scratch = kScratchRegister;
- __ Load(scratch,
- BuildFastArrayOperand(instr->elements(),
- key,
- FAST_ELEMENTS,
- offset,
- instr->additional_index()),
- Representation::Smi());
- __ AssertSmi(scratch);
-#endif
// Store int value directly to upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
@@ -5070,7 +5057,11 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
+ if (size <= Page::kMaxRegularHeapObjectSize) {
+ __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
+ } else {
+ __ jmp(deferred->entry());
+ }
} else {
Register size = ToRegister(instr->size());
__ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
diff --git a/deps/v8/src/x64/lithium-gap-resolver-x64.cc b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
index 5b4e32d2c4..c3bfd9e612 100644
--- a/deps/v8/src/x64/lithium-gap-resolver-x64.cc
+++ b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
@@ -198,7 +198,7 @@ void LGapResolver::EmitMove(int index) {
if (cgen_->IsSmiConstant(constant_source)) {
__ Move(dst, cgen_->ToSmi(constant_source));
} else if (cgen_->IsInteger32Constant(constant_source)) {
- __ Set(dst, cgen_->ToInteger32(constant_source));
+ __ Set(dst, static_cast<uint32_t>(cgen_->ToInteger32(constant_source)));
} else {
__ Move(dst, cgen_->ToHandle(constant_source));
}
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index e342acbcb8..511b6b6615 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -1121,8 +1121,9 @@ LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- LMathRound* result = new(zone()) LMathRound(input);
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp = FixedTemp(xmm4);
+ LMathRound* result = new(zone()) LMathRound(input, temp);
return AssignEnvironment(DefineAsRegister(result));
}
@@ -1240,10 +1241,10 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->RightIsPowerOf2()) {
ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegister(instr->left());
+ LOperand* value = UseRegisterAtStart(instr->left());
LDivI* div =
new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
- return AssignEnvironment(DefineAsRegister(div));
+ return AssignEnvironment(DefineSameAsFirst(div));
}
// The temporary operand is necessary to ensure that right is not allocated
// into rdx.
@@ -1427,15 +1428,16 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
LOperand* left = NULL;
LOperand* right = NULL;
- if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ if (instr->representation().IsSmi()) {
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
+ right = UseAtStart(instr->BetterRightOperand());
+ } else if (instr->representation().IsInteger32()) {
left = UseRegisterAtStart(instr->BetterLeftOperand());
right = UseOrConstantAtStart(instr->BetterRightOperand());
} else {
ASSERT(instr->representation().IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
left = UseRegisterAtStart(instr->left());
right = UseRegisterAtStart(instr->right());
}
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index cfaed15077..9785fcc838 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -720,13 +720,15 @@ class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
- explicit LMathRound(LOperand* value) {
+ explicit LMathRound(LOperand* value, LOperand* temp) {
inputs_[0] = value;
+ temps_[0] = temp;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index c0ae4e8d71..4c19fced69 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -505,8 +505,17 @@ void MacroAssembler::NegativeZeroTest(Register result,
void MacroAssembler::Abort(BailoutReason reason) {
-#ifdef DEBUG
+ // We want to pass the msg string like a smi to avoid GC
+ // problems, however msg is not guaranteed to be aligned
+ // properly. Instead, we pass an aligned pointer that is
+ // a proper v8 smi, but also pass the alignment difference
+ // from the real pointer as a smi.
const char* msg = GetBailoutReason(reason);
+ intptr_t p1 = reinterpret_cast<intptr_t>(msg);
+ intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
+ // Note: p0 might not be a valid Smi _value_, but it has a valid Smi tag.
+ ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
+#ifdef DEBUG
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -519,7 +528,10 @@ void MacroAssembler::Abort(BailoutReason reason) {
#endif
push(rax);
- Move(kScratchRegister, Smi::FromInt(static_cast<int>(reason)),
+ Move(kScratchRegister, reinterpret_cast<Smi*>(p0),
+ Assembler::RelocInfoNone());
+ push(kScratchRegister);
+ Move(kScratchRegister, Smi::FromInt(static_cast<int>(p1 - p0)),
Assembler::RelocInfoNone());
push(kScratchRegister);
@@ -527,9 +539,9 @@ void MacroAssembler::Abort(BailoutReason reason) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort, 2);
} else {
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort, 2);
}
// Control will not return here.
int3();
@@ -972,17 +984,12 @@ void MacroAssembler::Set(Register dst, int64_t x) {
}
-void MacroAssembler::Set(const Operand& dst, intptr_t x) {
- if (kPointerSize == kInt64Size) {
- if (is_int32(x)) {
- movp(dst, Immediate(static_cast<int32_t>(x)));
- } else {
- Set(kScratchRegister, x);
- movp(dst, kScratchRegister);
- }
+void MacroAssembler::Set(const Operand& dst, int64_t x) {
+ if (is_int32(x)) {
+ movq(dst, Immediate(static_cast<int32_t>(x)));
} else {
- ASSERT(kPointerSize == kInt32Size);
- movp(dst, Immediate(static_cast<int32_t>(x)));
+ Set(kScratchRegister, x);
+ movq(dst, kScratchRegister);
}
}
@@ -2585,17 +2592,6 @@ void MacroAssembler::Jump(ExternalReference ext) {
}
-void MacroAssembler::Jump(const Operand& op) {
- if (kPointerSize == kInt64Size) {
- jmp(op);
- } else {
- ASSERT(kPointerSize == kInt32Size);
- movp(kScratchRegister, op);
- jmp(kScratchRegister);
- }
-}
-
-
void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
Move(kScratchRegister, destination, rmode);
jmp(kScratchRegister);
@@ -2627,17 +2623,6 @@ void MacroAssembler::Call(ExternalReference ext) {
}
-void MacroAssembler::Call(const Operand& op) {
- if (kPointerSize == kInt64Size) {
- call(op);
- } else {
- ASSERT(kPointerSize == kInt32Size);
- movp(kScratchRegister, op);
- call(kScratchRegister);
- }
-}
-
-
void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
#ifdef DEBUG
int end_position = pc_offset() + CallSize(destination);
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 092acc0278..42245aa808 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -802,7 +802,7 @@ class MacroAssembler: public Assembler {
// Load a register with a long value as efficiently as possible.
void Set(Register dst, int64_t x);
- void Set(const Operand& dst, intptr_t x);
+ void Set(const Operand& dst, int64_t x);
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
// hinders register renaming and makes dependence chains longer. So we use
@@ -865,12 +865,10 @@ class MacroAssembler: public Assembler {
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);
void Jump(ExternalReference ext);
- void Jump(const Operand& op);
void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
void Call(Address destination, RelocInfo::Mode rmode);
void Call(ExternalReference ext);
- void Call(const Operand& op);
void Call(Handle<Code> code_object,
RelocInfo::Mode rmode,
TypeFeedbackId ast_id = TypeFeedbackId::None());
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 346d5e805d..a43d709b17 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -393,14 +393,13 @@ static void CompileCallLoadPropertyWithInterceptor(
// Generate call to api function.
-void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Handle<Map> receiver_map,
- Register receiver,
- Register scratch_in,
- bool is_store,
- int argc,
- Register* values) {
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch_in,
+ int argc,
+ Register* values) {
ASSERT(optimization.is_simple_api_call());
__ PopReturnAddressTo(scratch_in);
@@ -466,7 +465,7 @@ void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
api_function_address, function_address, RelocInfo::EXTERNAL_REFERENCE);
// Jump to stub.
- CallApiFunctionStub stub(is_store, call_data_undefined, argc);
+ CallApiFunctionStub stub(true, call_data_undefined, argc);
__ TailCallStub(&stub);
}
@@ -971,6 +970,15 @@ void LoadStubCompiler::GenerateLoadField(Register reg,
void LoadStubCompiler::GenerateLoadCallback(
+ const CallOptimization& call_optimization,
+ Handle<Map> receiver_map) {
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver_map,
+ receiver(), scratch1(), 0, NULL);
+}
+
+
+void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Insert additional parameters into the stack frame above return address.
@@ -1150,6 +1158,24 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
}
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization) {
+ HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
+ receiver(), holder, name);
+
+ Register values[] = { value() };
+ GenerateFastApiCall(
+ masm(), call_optimization, handle(object->map()),
+ receiver(), scratch1(), 1, values);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
#undef __
#define __ ACCESS_MASM(masm)