summaryrefslogtreecommitdiff
path: root/deps/v8/src/x64
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2010-09-22 10:21:15 -0700
committerRyan Dahl <ry@tinyclouds.org>2010-09-22 10:21:15 -0700
commite2274412488ab310decb8494ab41009342b3c2f6 (patch)
treeb4f29c5bed08a9e5b51217fc4e4ed704ecef8ae8 /deps/v8/src/x64
parentcc1d61cbb3a9b2b75edac66295f2bc5314be5042 (diff)
downloadnode-new-e2274412488ab310decb8494ab41009342b3c2f6.tar.gz
Upgrade V8 to 2.4.5
Diffstat (limited to 'deps/v8/src/x64')
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc11
-rw-r--r--deps/v8/src/x64/frames-x64.cc14
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc102
-rw-r--r--deps/v8/src/x64/ic-x64.cc28
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc750
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h801
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc33
7 files changed, 909 insertions, 830 deletions
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index b480412aab..e9729ea9a7 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -1989,7 +1989,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ j(negative, &done);
// Read the value from the static offsets vector buffer and make it a smi.
__ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
- __ Integer32ToSmi(rdi, rdi, &runtime);
+ __ Integer32ToSmi(rdi, rdi);
// Store the smi value in the last match info.
__ movq(FieldOperand(rbx,
rdx,
@@ -3343,7 +3343,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Look at the length of the result of adding the two strings.
STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
- __ SmiAdd(rbx, rbx, rcx, NULL);
+ __ SmiAdd(rbx, rbx, rcx);
// Use the runtime system when adding two one character strings, as it
// contains optimizations for this specific case using the symbol table.
__ SmiCompare(rbx, Smi::FromInt(2));
@@ -3803,7 +3803,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ movq(rdx, Operand(rsp, kFromOffset));
__ JumpIfNotBothPositiveSmi(rcx, rdx, &runtime);
- __ SmiSub(rcx, rcx, rdx, NULL); // Overflow doesn't happen.
+ __ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen.
__ cmpq(FieldOperand(rax, String::kLengthOffset), rcx);
Label return_rax;
__ j(equal, &return_rax);
@@ -3936,8 +3936,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ movq(scratch4, scratch1);
__ SmiSub(scratch4,
scratch4,
- FieldOperand(right, String::kLengthOffset),
- NULL);
+ FieldOperand(right, String::kLengthOffset));
// Register scratch4 now holds left.length - right.length.
const Register length_difference = scratch4;
Label left_shorter;
@@ -3945,7 +3944,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// The right string isn't longer that the left one.
// Get the right string's length by subtracting the (non-negative) difference
// from the left string's length.
- __ SmiSub(scratch1, scratch1, length_difference, NULL);
+ __ SmiSub(scratch1, scratch1, length_difference);
__ bind(&left_shorter);
// Register scratch1 now holds Min(left.length, right.length).
const Register min_length = scratch1;
diff --git a/deps/v8/src/x64/frames-x64.cc b/deps/v8/src/x64/frames-x64.cc
index fd26535155..9c960478aa 100644
--- a/deps/v8/src/x64/frames-x64.cc
+++ b/deps/v8/src/x64/frames-x64.cc
@@ -35,18 +35,8 @@ namespace v8 {
namespace internal {
-
-
-StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
- if (fp == 0) return NONE;
- // Compute the stack pointer.
- Address sp = Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
- // Fill in the state.
- state->fp = fp;
- state->sp = sp;
- state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
- ASSERT(*state->pc_address != NULL);
- return EXIT;
+Address ExitFrame::ComputeStackPointer(Address fp) {
+ return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
}
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index e4faafc65f..b357a9b590 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -625,10 +625,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
__ pop(rdx);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // Absence of a test rax instruction following the call
- // indicates that none of the load was inlined.
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
}
}
}
@@ -941,8 +938,7 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
- __ call(ic, mode);
- __ nop(); // Signal no inlined code.
+ EmitCallIC(ic, mode);
}
@@ -1019,7 +1015,7 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
slow));
__ Move(rax, key_literal->handle());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
__ jmp(done);
}
}
@@ -1043,11 +1039,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
__ Move(rcx, var->name());
__ movq(rax, CodeGenerator::GlobalObject());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- // A test rax instruction following the call is used by the IC to
- // indicate that the inobject property case was inlined. Ensure there
- // is no test rax instruction here.
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
Apply(context, rax);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
@@ -1110,10 +1102,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
// Do a keyed property load.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // Notice: We must not have a "test rax, ..." instruction after the
- // call. It is treated specially by the LoadIC code.
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
Apply(context, rax);
}
}
@@ -1212,8 +1201,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Move(rcx, key->handle());
__ movq(rdx, Operand(rsp, 0));
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
break;
}
// Fall through.
@@ -1425,16 +1413,14 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
Literal* key = prop->key()->AsLiteral();
__ Move(rcx, key->handle());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
}
@@ -1553,8 +1539,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ pop(rax); // Restore value.
__ Move(rcx, prop->key()->AsLiteral()->handle());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- __ nop(); // Signal no inlined code.
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
break;
}
case KEYED_PROPERTY: {
@@ -1565,8 +1550,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ pop(rdx);
__ pop(rax);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- __ nop(); // Signal no inlined code.
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
break;
}
}
@@ -1589,8 +1573,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ Move(rcx, var->name());
__ movq(rdx, CodeGenerator::GlobalObject());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
} else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
// Perform the assignment for non-const variables and for initialization
@@ -1674,8 +1657,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ pop(rdx);
}
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -1713,10 +1695,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // This nop signals to the IC that there is no inlined code at the call
- // site for it to patch.
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -1765,7 +1744,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
in_loop);
- __ Call(ic, mode);
+ EmitCallIC(ic, mode);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
Apply(context_, rax);
@@ -1789,7 +1768,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = CodeGenerator::ComputeKeyedCallInitialize(arg_count,
in_loop);
- __ Call(ic, mode);
+ EmitCallIC(ic, mode);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
Apply(context_, rax);
@@ -1924,11 +1903,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Record source code position for IC call.
SetSourcePosition(prop->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // By emitting a nop we make sure that we do not have a "test rax,..."
- // instruction after the call as it is treated specially
- // by the LoadIC code.
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
// Pop receiver.
__ pop(rbx);
// Push result (function).
@@ -2841,7 +2816,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
__ Move(rcx, expr->name());
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
- __ call(ic, RelocInfo::CODE_TARGET);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
} else {
@@ -3139,10 +3114,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ Move(rcx, prop->key()->AsLiteral()->handle());
__ pop(rdx);
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // This nop signals to the IC that there is no inlined code at the call
- // site for it to patch.
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
if (expr->is_postfix()) {
if (context_ != Expression::kEffect) {
ApplyTOS(context_);
@@ -3156,10 +3128,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ pop(rcx);
__ pop(rdx);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // This nop signals to the IC that there is no inlined code at the call
- // site for it to patch.
- __ nop();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
if (expr->is_postfix()) {
if (context_ != Expression::kEffect) {
ApplyTOS(context_);
@@ -3182,8 +3151,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) {
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
// Use a regular load, not a contextual load, to avoid a reference
// error.
- __ Call(ic, RelocInfo::CODE_TARGET);
- __ nop(); // Signal no inlined code.
+ EmitCallIC(ic, RelocInfo::CODE_TARGET);
if (where == kStack) __ push(rax);
} else if (proxy != NULL &&
proxy->var()->slot() != NULL &&
@@ -3431,10 +3399,36 @@ void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
}
-Register FullCodeGenerator::result_register() { return rax; }
+Register FullCodeGenerator::result_register() {
+ return rax;
+}
+
+
+Register FullCodeGenerator::context_register() {
+ return rsi;
+}
+
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
+ ASSERT(mode == RelocInfo::CODE_TARGET ||
+ mode == RelocInfo::CODE_TARGET_CONTEXT);
+ __ call(ic, mode);
-Register FullCodeGenerator::context_register() { return rsi; }
+ // If we're calling a (keyed) load or store stub, we have to mark
+ // the call as containing no inlined code so we will not attempt to
+ // patch it.
+ switch (ic->kind()) {
+ case Code::LOAD_IC:
+ case Code::KEYED_LOAD_IC:
+ case Code::STORE_IC:
+ case Code::KEYED_STORE_IC:
+ __ nop(); // Signals no inlined code.
+ break;
+ default:
+ // Do nothing.
+ break;
+ }
+}
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index a74e621e15..62e769123e 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -730,7 +730,6 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// -- rsp[0] : return address
// -----------------------------------
Label miss;
- Label index_out_of_range;
Register receiver = rdx;
Register index = rax;
@@ -745,7 +744,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
result,
&miss, // When not a string.
&miss, // When not a number.
- &index_out_of_range,
+ &miss, // When index out of range.
STRING_INDEX_IS_ARRAY_INDEX);
char_at_generator.GenerateFast(masm);
__ ret(0);
@@ -753,10 +752,6 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
ICRuntimeCallHelper call_helper;
char_at_generator.GenerateSlow(masm, call_helper);
- __ bind(&index_out_of_range);
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- __ ret(0);
-
__ bind(&miss);
GenerateMiss(masm);
}
@@ -847,7 +842,7 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// For the UnsignedInt array type, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
- Label box_int;
+ NearLabel box_int;
__ JumpIfUIntNotValidSmiValue(rcx, &box_int);
@@ -1032,7 +1027,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// No more bailouts to slow case on this path, so key not needed.
__ SmiToInteger32(rdi, rax);
{ // Clamp the value to [0..255].
- Label done;
+ NearLabel done;
__ testl(rdi, Immediate(0xFFFFFF00));
__ j(zero, &done);
__ setcc(negative, rdi); // 1 if negative, 0 if positive.
@@ -1082,7 +1077,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// rax: value
// rbx: receiver's elements array (a FixedArray)
// rcx: index
- Label non_smi_value;
+ NearLabel non_smi_value;
__ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
rax);
__ JumpIfNotSmi(rax, &non_smi_value);
@@ -1104,7 +1099,7 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label slow, check_heap_number;
+ Label slow;
// Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow);
@@ -1145,6 +1140,7 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
// rdx: receiver (a JSObject)
// rbx: elements array
// rdi: untagged key
+ NearLabel check_heap_number;
__ JumpIfNotSmi(rax, &check_heap_number);
// No more branches to slow case on this path. Key and receiver not needed.
__ SmiToInteger32(rdx, rax);
@@ -1488,7 +1484,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Get the receiver of the function from the stack; 1 ~ return address.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
- Label do_call, slow_call, slow_load, slow_reload_receiver;
+ Label do_call, slow_call, slow_load;
Label check_number_dictionary, check_string, lookup_monomorphic_cache;
Label index_smi, index_string;
@@ -1730,6 +1726,14 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
}
+bool LoadIC::PatchInlinedContextualLoad(Address address,
+ Object* map,
+ Object* cell) {
+ // TODO(<bug#>): implement this.
+ return false;
+}
+
+
// The offset from the inlined patch site to the start of the inlined
// store instruction.
const int StoreIC::kOffsetToStoreInstruction = 20;
@@ -1880,7 +1884,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
// -- rsp[0] : return address
// -----------------------------------
- Label miss, restore_miss;
+ Label miss;
GenerateStringDictionaryReceiverCheck(masm, rdx, rbx, rdi, &miss);
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 165c51dd27..d62bed4bdf 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -85,7 +85,7 @@ void MacroAssembler::RecordWriteHelper(Register object,
Register scratch) {
if (FLAG_debug_code) {
// Check that the object is not in new space.
- Label not_in_new_space;
+ NearLabel not_in_new_space;
InNewSpace(object, scratch, not_equal, &not_in_new_space);
Abort("new-space object passed to RecordWriteHelper");
bind(&not_in_new_space);
@@ -171,7 +171,7 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
Label done;
if (FLAG_debug_code) {
- Label okay;
+ NearLabel okay;
JumpIfNotSmi(object, &okay);
Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
bind(&okay);
@@ -221,42 +221,6 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
}
}
-
-void MacroAssembler::InNewSpace(Register object,
- Register scratch,
- Condition cc,
- Label* branch) {
- if (Serializer::enabled()) {
- // Can't do arithmetic on external references if it might get serialized.
- // The mask isn't really an address. We load it as an external reference in
- // case the size of the new space is different between the snapshot maker
- // and the running system.
- if (scratch.is(object)) {
- movq(kScratchRegister, ExternalReference::new_space_mask());
- and_(scratch, kScratchRegister);
- } else {
- movq(scratch, ExternalReference::new_space_mask());
- and_(scratch, object);
- }
- movq(kScratchRegister, ExternalReference::new_space_start());
- cmpq(scratch, kScratchRegister);
- j(cc, branch);
- } else {
- ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
- intptr_t new_space_start =
- reinterpret_cast<intptr_t>(Heap::NewSpaceStart());
- movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
- if (scratch.is(object)) {
- addq(scratch, kScratchRegister);
- } else {
- lea(scratch, Operand(object, kScratchRegister, times_1, 0));
- }
- and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
- j(cc, branch);
- }
-}
-
-
void MacroAssembler::Assert(Condition cc, const char* msg) {
if (FLAG_debug_code) Check(cc, msg);
}
@@ -264,7 +228,7 @@ void MacroAssembler::Assert(Condition cc, const char* msg) {
void MacroAssembler::AssertFastElements(Register elements) {
if (FLAG_debug_code) {
- Label ok;
+ NearLabel ok;
CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
j(equal, &ok);
@@ -278,7 +242,7 @@ void MacroAssembler::AssertFastElements(Register elements) {
void MacroAssembler::Check(Condition cc, const char* msg) {
- Label L;
+ NearLabel L;
j(cc, &L);
Abort(msg);
// will not return here
@@ -291,7 +255,7 @@ void MacroAssembler::CheckStackAlignment() {
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
ASSERT(IsPowerOf2(frame_alignment));
- Label alignment_as_expected;
+ NearLabel alignment_as_expected;
testq(rsp, Immediate(frame_alignment_mask));
j(zero, &alignment_as_expected);
// Abort if stack is not aligned.
@@ -304,7 +268,7 @@ void MacroAssembler::CheckStackAlignment() {
void MacroAssembler::NegativeZeroTest(Register result,
Register op,
Label* then_label) {
- Label ok;
+ NearLabel ok;
testl(result, result);
j(not_zero, &ok);
testl(op, op);
@@ -642,8 +606,6 @@ void MacroAssembler::Set(const Operand& dst, int64_t x) {
// ----------------------------------------------------------------------------
// Smi tagging, untagging and tag detection.
-static int kSmiShift = kSmiTagSize + kSmiShiftSize;
-
Register MacroAssembler::GetSmiConstant(Smi* source) {
int value = source->value();
if (value == 0) {
@@ -666,7 +628,7 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
if (allow_stub_calls()) {
Assert(equal, "Uninitialized kSmiConstantRegister");
} else {
- Label ok;
+ NearLabel ok;
j(equal, &ok);
int3();
bind(&ok);
@@ -716,20 +678,9 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
}
}
-void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
- ASSERT_EQ(0, kSmiTag);
- if (!dst.is(src)) {
- movl(dst, src);
- }
- shl(dst, Immediate(kSmiShift));
-}
-
-void MacroAssembler::Integer32ToSmi(Register dst,
- Register src,
- Label* on_overflow) {
+void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
ASSERT_EQ(0, kSmiTag);
- // 32-bit integer always fits in a long smi.
if (!dst.is(src)) {
movl(dst, src);
}
@@ -740,7 +691,7 @@ void MacroAssembler::Integer32ToSmi(Register dst,
void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
if (FLAG_debug_code) {
testb(dst, Immediate(0x01));
- Label ok;
+ NearLabel ok;
j(zero, &ok);
if (allow_stub_calls()) {
Abort("Integer32ToSmiField writing to non-smi location");
@@ -949,180 +900,6 @@ Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
}
-void MacroAssembler::SmiNeg(Register dst, Register src, Label* on_smi_result) {
- if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- movq(kScratchRegister, src);
- neg(dst); // Low 32 bits are retained as zero by negation.
- // Test if result is zero or Smi::kMinValue.
- cmpq(dst, kScratchRegister);
- j(not_equal, on_smi_result);
- movq(src, kScratchRegister);
- } else {
- movq(dst, src);
- neg(dst);
- cmpq(dst, src);
- // If the result is zero or Smi::kMinValue, negation failed to create a smi.
- j(not_equal, on_smi_result);
- }
-}
-
-
-void MacroAssembler::SmiAdd(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result) {
- ASSERT(!dst.is(src2));
- if (on_not_smi_result == NULL) {
- // No overflow checking. Use only when it's known that
- // overflowing is impossible.
- if (dst.is(src1)) {
- addq(dst, src2);
- } else {
- movq(dst, src1);
- addq(dst, src2);
- }
- Assert(no_overflow, "Smi addition overflow");
- } else if (dst.is(src1)) {
- movq(kScratchRegister, src1);
- addq(kScratchRegister, src2);
- j(overflow, on_not_smi_result);
- movq(dst, kScratchRegister);
- } else {
- movq(dst, src1);
- addq(dst, src2);
- j(overflow, on_not_smi_result);
- }
-}
-
-
-void MacroAssembler::SmiSub(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result) {
- ASSERT(!dst.is(src2));
- if (on_not_smi_result == NULL) {
- // No overflow checking. Use only when it's known that
- // overflowing is impossible (e.g., subtracting two positive smis).
- if (dst.is(src1)) {
- subq(dst, src2);
- } else {
- movq(dst, src1);
- subq(dst, src2);
- }
- Assert(no_overflow, "Smi subtraction overflow");
- } else if (dst.is(src1)) {
- cmpq(dst, src2);
- j(overflow, on_not_smi_result);
- subq(dst, src2);
- } else {
- movq(dst, src1);
- subq(dst, src2);
- j(overflow, on_not_smi_result);
- }
-}
-
-
-void MacroAssembler::SmiSub(Register dst,
- Register src1,
- const Operand& src2,
- Label* on_not_smi_result) {
- if (on_not_smi_result == NULL) {
- // No overflow checking. Use only when it's known that
- // overflowing is impossible (e.g., subtracting two positive smis).
- if (dst.is(src1)) {
- subq(dst, src2);
- } else {
- movq(dst, src1);
- subq(dst, src2);
- }
- Assert(no_overflow, "Smi subtraction overflow");
- } else if (dst.is(src1)) {
- movq(kScratchRegister, src2);
- cmpq(src1, kScratchRegister);
- j(overflow, on_not_smi_result);
- subq(src1, kScratchRegister);
- } else {
- movq(dst, src1);
- subq(dst, src2);
- j(overflow, on_not_smi_result);
- }
-}
-
-void MacroAssembler::SmiMul(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result) {
- ASSERT(!dst.is(src2));
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
-
- if (dst.is(src1)) {
- Label failure, zero_correct_result;
- movq(kScratchRegister, src1); // Create backup for later testing.
- SmiToInteger64(dst, src1);
- imul(dst, src2);
- j(overflow, &failure);
-
- // Check for negative zero result. If product is zero, and one
- // argument is negative, go to slow case.
- Label correct_result;
- testq(dst, dst);
- j(not_zero, &correct_result);
-
- movq(dst, kScratchRegister);
- xor_(dst, src2);
- j(positive, &zero_correct_result); // Result was positive zero.
-
- bind(&failure); // Reused failure exit, restores src1.
- movq(src1, kScratchRegister);
- jmp(on_not_smi_result);
-
- bind(&zero_correct_result);
- xor_(dst, dst);
-
- bind(&correct_result);
- } else {
- SmiToInteger64(dst, src1);
- imul(dst, src2);
- j(overflow, on_not_smi_result);
- // Check for negative zero result. If product is zero, and one
- // argument is negative, go to slow case.
- Label correct_result;
- testq(dst, dst);
- j(not_zero, &correct_result);
- // One of src1 and src2 is zero, the check whether the other is
- // negative.
- movq(kScratchRegister, src1);
- xor_(kScratchRegister, src2);
- j(negative, on_not_smi_result);
- bind(&correct_result);
- }
-}
-
-
-void MacroAssembler::SmiTryAddConstant(Register dst,
- Register src,
- Smi* constant,
- Label* on_not_smi_result) {
- // Does not assume that src is a smi.
- ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
- ASSERT_EQ(0, kSmiTag);
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src.is(kScratchRegister));
-
- JumpIfNotSmi(src, on_not_smi_result);
- Register tmp = (dst.is(src) ? kScratchRegister : dst);
- LoadSmiConstant(tmp, constant);
- addq(tmp, src);
- j(overflow, on_not_smi_result);
- if (dst.is(src)) {
- movq(dst, tmp);
- }
-}
-
-
void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
if (constant->value() == 0) {
if (!dst.is(src)) {
@@ -1179,29 +956,6 @@ void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
}
-void MacroAssembler::SmiAddConstant(Register dst,
- Register src,
- Smi* constant,
- Label* on_not_smi_result) {
- if (constant->value() == 0) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
- } else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
-
- LoadSmiConstant(kScratchRegister, constant);
- addq(kScratchRegister, src);
- j(overflow, on_not_smi_result);
- movq(dst, kScratchRegister);
- } else {
- LoadSmiConstant(dst, constant);
- addq(dst, src);
- j(overflow, on_not_smi_result);
- }
-}
-
-
void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
if (constant->value() == 0) {
if (!dst.is(src)) {
@@ -1226,165 +980,48 @@ void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
}
-void MacroAssembler::SmiSubConstant(Register dst,
- Register src,
- Smi* constant,
- Label* on_not_smi_result) {
- if (constant->value() == 0) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
- } else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- if (constant->value() == Smi::kMinValue) {
- // Subtracting min-value from any non-negative value will overflow.
- // We test the non-negativeness before doing the subtraction.
- testq(src, src);
- j(not_sign, on_not_smi_result);
- LoadSmiConstant(kScratchRegister, constant);
- subq(dst, kScratchRegister);
- } else {
- // Subtract by adding the negation.
- LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
- addq(kScratchRegister, dst);
- j(overflow, on_not_smi_result);
- movq(dst, kScratchRegister);
- }
+void MacroAssembler::SmiAdd(Register dst,
+ Register src1,
+ Register src2) {
+ // No overflow checking. Use only when it's known that
+ // overflowing is impossible.
+ ASSERT(!dst.is(src2));
+ if (dst.is(src1)) {
+ addq(dst, src2);
} else {
- if (constant->value() == Smi::kMinValue) {
- // Subtracting min-value from any non-negative value will overflow.
- // We test the non-negativeness before doing the subtraction.
- testq(src, src);
- j(not_sign, on_not_smi_result);
- LoadSmiConstant(dst, constant);
- // Adding and subtracting the min-value gives the same result, it only
- // differs on the overflow bit, which we don't check here.
- addq(dst, src);
- } else {
- // Subtract by adding the negation.
- LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
- addq(dst, src);
- j(overflow, on_not_smi_result);
- }
+ movq(dst, src1);
+ addq(dst, src2);
}
+ Assert(no_overflow, "Smi addition overflow");
}
-void MacroAssembler::SmiDiv(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result) {
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src2.is(rax));
- ASSERT(!src2.is(rdx));
- ASSERT(!src1.is(rdx));
-
- // Check for 0 divisor (result is +/-Infinity).
- Label positive_divisor;
- testq(src2, src2);
- j(zero, on_not_smi_result);
-
- if (src1.is(rax)) {
- movq(kScratchRegister, src1);
- }
- SmiToInteger32(rax, src1);
- // We need to rule out dividing Smi::kMinValue by -1, since that would
- // overflow in idiv and raise an exception.
- // We combine this with negative zero test (negative zero only happens
- // when dividing zero by a negative number).
-
- // We overshoot a little and go to slow case if we divide min-value
- // by any negative value, not just -1.
- Label safe_div;
- testl(rax, Immediate(0x7fffffff));
- j(not_zero, &safe_div);
- testq(src2, src2);
- if (src1.is(rax)) {
- j(positive, &safe_div);
- movq(src1, kScratchRegister);
- jmp(on_not_smi_result);
- } else {
- j(negative, on_not_smi_result);
- }
- bind(&safe_div);
-
- SmiToInteger32(src2, src2);
- // Sign extend src1 into edx:eax.
- cdq();
- idivl(src2);
- Integer32ToSmi(src2, src2);
- // Check that the remainder is zero.
- testl(rdx, rdx);
- if (src1.is(rax)) {
- Label smi_result;
- j(zero, &smi_result);
- movq(src1, kScratchRegister);
- jmp(on_not_smi_result);
- bind(&smi_result);
+void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
+ // No overflow checking. Use only when it's known that
+ // overflowing is impossible (e.g., subtracting two positive smis).
+ ASSERT(!dst.is(src2));
+ if (dst.is(src1)) {
+ subq(dst, src2);
} else {
- j(not_zero, on_not_smi_result);
- }
- if (!dst.is(src1) && src1.is(rax)) {
- movq(src1, kScratchRegister);
+ movq(dst, src1);
+ subq(dst, src2);
}
- Integer32ToSmi(dst, rax);
+ Assert(no_overflow, "Smi subtraction overflow");
}
-void MacroAssembler::SmiMod(Register dst,
+void MacroAssembler::SmiSub(Register dst,
Register src1,
- Register src2,
- Label* on_not_smi_result) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!src2.is(rax));
- ASSERT(!src2.is(rdx));
- ASSERT(!src1.is(rdx));
- ASSERT(!src1.is(src2));
-
- testq(src2, src2);
- j(zero, on_not_smi_result);
-
- if (src1.is(rax)) {
- movq(kScratchRegister, src1);
- }
- SmiToInteger32(rax, src1);
- SmiToInteger32(src2, src2);
-
- // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
- Label safe_div;
- cmpl(rax, Immediate(Smi::kMinValue));
- j(not_equal, &safe_div);
- cmpl(src2, Immediate(-1));
- j(not_equal, &safe_div);
- // Retag inputs and go slow case.
- Integer32ToSmi(src2, src2);
- if (src1.is(rax)) {
- movq(src1, kScratchRegister);
- }
- jmp(on_not_smi_result);
- bind(&safe_div);
-
- // Sign extend eax into edx:eax.
- cdq();
- idivl(src2);
- // Restore smi tags on inputs.
- Integer32ToSmi(src2, src2);
- if (src1.is(rax)) {
- movq(src1, kScratchRegister);
+ const Operand& src2) {
+ // No overflow checking. Use only when it's known that
+ // overflowing is impossible (e.g., subtracting two positive smis).
+ if (dst.is(src1)) {
+ subq(dst, src2);
+ } else {
+ movq(dst, src1);
+ subq(dst, src2);
}
- // Check for a negative zero result. If the result is zero, and the
- // dividend is negative, go slow to return a floating point negative zero.
- Label smi_result;
- testl(rdx, rdx);
- j(not_zero, &smi_result);
- testq(src1, src1);
- j(negative, on_not_smi_result);
- bind(&smi_result);
- Integer32ToSmi(dst, rdx);
+ Assert(no_overflow, "Smi subtraction overflow");
}
@@ -1480,25 +1117,6 @@ void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
}
-void MacroAssembler::SmiShiftLogicalRightConstant(Register dst,
- Register src,
- int shift_value,
- Label* on_not_smi_result) {
- // Logic right shift interprets its result as an *unsigned* number.
- if (dst.is(src)) {
- UNIMPLEMENTED(); // Not used.
- } else {
- movq(dst, src);
- if (shift_value == 0) {
- testq(dst, dst);
- j(negative, on_not_smi_result);
- }
- shr(dst, Immediate(shift_value + kSmiShift));
- shl(dst, Immediate(kSmiShift));
- }
-}
-
-
void MacroAssembler::SmiShiftLeftConstant(Register dst,
Register src,
int shift_value) {
@@ -1515,7 +1133,7 @@ void MacroAssembler::SmiShiftLeft(Register dst,
Register src1,
Register src2) {
ASSERT(!dst.is(rcx));
- Label result_ok;
+ NearLabel result_ok;
// Untag shift amount.
if (!dst.is(src1)) {
movq(dst, src1);
@@ -1527,42 +1145,6 @@ void MacroAssembler::SmiShiftLeft(Register dst,
}
-void MacroAssembler::SmiShiftLogicalRight(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(rcx));
- Label result_ok;
- if (src1.is(rcx) || src2.is(rcx)) {
- movq(kScratchRegister, rcx);
- }
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- SmiToInteger32(rcx, src2);
- orl(rcx, Immediate(kSmiShift));
- shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
- shl(dst, Immediate(kSmiShift));
- testq(dst, dst);
- if (src1.is(rcx) || src2.is(rcx)) {
- Label positive_result;
- j(positive, &positive_result);
- if (src1.is(rcx)) {
- movq(src1, kScratchRegister);
- } else {
- movq(src2, kScratchRegister);
- }
- jmp(on_not_smi_result);
- bind(&positive_result);
- } else {
- j(negative, on_not_smi_result); // src2 was zero and src1 negative.
- }
-}
-
-
void MacroAssembler::SmiShiftArithmeticRight(Register dst,
Register src1,
Register src2) {
@@ -1590,44 +1172,6 @@ void MacroAssembler::SmiShiftArithmeticRight(Register dst,
}
-void MacroAssembler::SelectNonSmi(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smis) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(src1));
- ASSERT(!dst.is(src2));
- // Both operands must not be smis.
-#ifdef DEBUG
- if (allow_stub_calls()) { // Check contains a stub call.
- Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
- Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
- }
-#endif
- ASSERT_EQ(0, kSmiTag);
- ASSERT_EQ(0, Smi::FromInt(0));
- movl(kScratchRegister, Immediate(kSmiTagMask));
- and_(kScratchRegister, src1);
- testl(kScratchRegister, src2);
- // If non-zero then both are smis.
- j(not_zero, on_not_smis);
-
- // Exactly one operand is a smi.
- ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
- // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
- subq(kScratchRegister, Immediate(1));
- // If src1 is a smi, then scratch register all 1s, else it is all 0s.
- movq(dst, src1);
- xor_(dst, src2);
- and_(dst, kScratchRegister);
- // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
- xor_(dst, src1);
- // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
-}
-
-
SmiIndex MacroAssembler::SmiToIndex(Register dst,
Register src,
int shift) {
@@ -1663,138 +1207,6 @@ SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
}
-void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) {
- ASSERT_EQ(0, kSmiTag);
- Condition smi = CheckSmi(src);
- j(smi, on_smi);
-}
-
-
-void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) {
- Condition smi = CheckSmi(src);
- j(NegateCondition(smi), on_not_smi);
-}
-
-
-void MacroAssembler::JumpIfNotPositiveSmi(Register src,
- Label* on_not_positive_smi) {
- Condition positive_smi = CheckPositiveSmi(src);
- j(NegateCondition(positive_smi), on_not_positive_smi);
-}
-
-
-void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
- Smi* constant,
- Label* on_equals) {
- SmiCompare(src, constant);
- j(equal, on_equals);
-}
-
-
-void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) {
- Condition is_valid = CheckInteger32ValidSmiValue(src);
- j(NegateCondition(is_valid), on_invalid);
-}
-
-
-void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
- Label* on_invalid) {
- Condition is_valid = CheckUInteger32ValidSmiValue(src);
- j(NegateCondition(is_valid), on_invalid);
-}
-
-
-void MacroAssembler::JumpIfNotBothSmi(Register src1, Register src2,
- Label* on_not_both_smi) {
- Condition both_smi = CheckBothSmi(src1, src2);
- j(NegateCondition(both_smi), on_not_both_smi);
-}
-
-
-void MacroAssembler::JumpIfNotBothPositiveSmi(Register src1, Register src2,
- Label* on_not_both_smi) {
- Condition both_smi = CheckBothPositiveSmi(src1, src2);
- j(NegateCondition(both_smi), on_not_both_smi);
-}
-
-
-
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
- Register second_object,
- Register scratch1,
- Register scratch2,
- Label* on_fail) {
- // Check that both objects are not smis.
- Condition either_smi = CheckEitherSmi(first_object, second_object);
- j(either_smi, on_fail);
-
- // Load instance type for both strings.
- movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
- movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
- movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
- movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
-
- // Check that both are flat ascii strings.
- ASSERT(kNotStringTag != 0);
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
-
- andl(scratch1, Immediate(kFlatAsciiStringMask));
- andl(scratch2, Immediate(kFlatAsciiStringMask));
- // Interleave the bits to check both scratch1 and scratch2 in one test.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
- cmpl(scratch1,
- Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
- j(not_equal, on_fail);
-}
-
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
- Register instance_type,
- Register scratch,
- Label *failure) {
- if (!scratch.is(instance_type)) {
- movl(scratch, instance_type);
- }
-
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-
- andl(scratch, Immediate(kFlatAsciiStringMask));
- cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
- j(not_equal, failure);
-}
-
-
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first_object_instance_type,
- Register second_object_instance_type,
- Register scratch1,
- Register scratch2,
- Label* on_fail) {
- // Load instance type for both strings.
- movq(scratch1, first_object_instance_type);
- movq(scratch2, second_object_instance_type);
-
- // Check that both are flat ascii strings.
- ASSERT(kNotStringTag != 0);
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
-
- andl(scratch1, Immediate(kFlatAsciiStringMask));
- andl(scratch2, Immediate(kFlatAsciiStringMask));
- // Interleave the bits to check both scratch1 and scratch2 in one test.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
- cmpl(scratch1,
- Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
- j(not_equal, on_fail);
-}
-
-
void MacroAssembler::Move(Register dst, Handle<Object> source) {
ASSERT(!source->IsFailure());
if (source->IsSmi()) {
@@ -1903,7 +1315,6 @@ void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
ASSERT(RelocInfo::IsCodeTarget(rmode));
- WriteRecordedPositions();
call(code_object, rmode);
}
@@ -1994,7 +1405,7 @@ void MacroAssembler::CheckMap(Register obj,
void MacroAssembler::AbortIfNotNumber(Register object) {
- Label ok;
+ NearLabel ok;
Condition is_smi = CheckSmi(object);
j(is_smi, &ok);
Cmp(FieldOperand(object, HeapObject::kMapOffset),
@@ -2005,14 +1416,14 @@ void MacroAssembler::AbortIfNotNumber(Register object) {
void MacroAssembler::AbortIfSmi(Register object) {
- Label ok;
+ NearLabel ok;
Condition is_smi = CheckSmi(object);
Assert(NegateCondition(is_smi), "Operand is a smi");
}
void MacroAssembler::AbortIfNotSmi(Register object) {
- Label ok;
+ NearLabel ok;
Condition is_smi = CheckSmi(object);
Assert(is_smi, "Operand is not a smi");
}
@@ -2052,7 +1463,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
j(not_equal, miss);
// Make sure that the function has an instance prototype.
- Label non_instance;
+ NearLabel non_instance;
testb(FieldOperand(result, Map::kBitFieldOffset),
Immediate(1 << Map::kHasNonInstancePrototype));
j(not_zero, &non_instance);
@@ -2068,7 +1479,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
j(equal, miss);
// If the function does not have an initial map, we're done.
- Label done;
+ NearLabel done;
CmpObjectType(result, MAP_TYPE, kScratchRegister);
j(not_equal, &done);
@@ -2133,76 +1544,11 @@ void MacroAssembler::DebugBreak() {
#endif // ENABLE_DEBUGGER_SUPPORT
-void MacroAssembler::InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_register,
- Label* done,
- InvokeFlag flag) {
- bool definitely_matches = false;
- Label invoke;
- if (expected.is_immediate()) {
- ASSERT(actual.is_immediate());
- if (expected.immediate() == actual.immediate()) {
- definitely_matches = true;
- } else {
- Set(rax, actual.immediate());
- if (expected.immediate() ==
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
- // Don't worry about adapting arguments for built-ins that
- // don't want that done. Skip adaption code by making it look
- // like we have a match between expected and actual number of
- // arguments.
- definitely_matches = true;
- } else {
- Set(rbx, expected.immediate());
- }
- }
- } else {
- if (actual.is_immediate()) {
- // Expected is in register, actual is immediate. This is the
- // case when we invoke function values without going through the
- // IC mechanism.
- cmpq(expected.reg(), Immediate(actual.immediate()));
- j(equal, &invoke);
- ASSERT(expected.reg().is(rbx));
- Set(rax, actual.immediate());
- } else if (!expected.reg().is(actual.reg())) {
- // Both expected and actual are in (different) registers. This
- // is the case when we invoke functions using call and apply.
- cmpq(expected.reg(), actual.reg());
- j(equal, &invoke);
- ASSERT(actual.reg().is(rax));
- ASSERT(expected.reg().is(rbx));
- }
- }
-
- if (!definitely_matches) {
- Handle<Code> adaptor =
- Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
- if (!code_constant.is_null()) {
- movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
- addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- } else if (!code_register.is(rdx)) {
- movq(rdx, code_register);
- }
-
- if (flag == CALL_FUNCTION) {
- Call(adaptor, RelocInfo::CODE_TARGET);
- jmp(done);
- } else {
- Jump(adaptor, RelocInfo::CODE_TARGET);
- }
- bind(&invoke);
- }
-}
-
-
void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag) {
- Label done;
+ NearLabel done;
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
if (flag == CALL_FUNCTION) {
call(code);
@@ -2219,7 +1565,7 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
const ParameterCount& actual,
RelocInfo::Mode rmode,
InvokeFlag flag) {
- Label done;
+ NearLabel done;
Register dummy = rax;
InvokePrologue(expected, actual, code, dummy, &done, flag);
if (flag == CALL_FUNCTION) {
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 9f5a746581..503c7f2c79 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -91,10 +91,11 @@ class MacroAssembler: public Assembler {
// Check if object is in new space. The condition cc can be equal or
// not_equal. If it is equal a jump will be done if the object is on new
// space. The register scratch can be object itself, but it will be clobbered.
+ template <typename LabelType>
void InNewSpace(Register object,
Register scratch,
Condition cc,
- Label* branch);
+ LabelType* branch);
// For page containing |object| mark region covering [object+offset]
// dirty. |object| is the object being stored into, |value| is the
@@ -215,14 +216,9 @@ class MacroAssembler: public Assembler {
// Tag an integer value. The result must be known to be a valid smi value.
// Only uses the low 32 bits of the src register. Sets the N and Z flags
- // based on the value of the resulting integer.
+ // based on the value of the resulting smi.
void Integer32ToSmi(Register dst, Register src);
- // Tag an integer value if possible, or jump the integer value cannot be
- // represented as a smi. Only uses the low 32 bit of the src registers.
- // NOTICE: Destroys the dst register even if unsuccessful!
- void Integer32ToSmi(Register dst, Register src, Label* on_overflow);
-
// Stores an integer32 value into a memory field that already holds a smi.
void Integer32ToSmiField(const Operand& dst, Register src);
@@ -300,30 +296,42 @@ class MacroAssembler: public Assembler {
// above with a conditional jump.
// Jump if the value cannot be represented by a smi.
- void JumpIfNotValidSmiValue(Register src, Label* on_invalid);
+ template <typename LabelType>
+ void JumpIfNotValidSmiValue(Register src, LabelType* on_invalid);
// Jump if the unsigned integer value cannot be represented by a smi.
- void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid);
+ template <typename LabelType>
+ void JumpIfUIntNotValidSmiValue(Register src, LabelType* on_invalid);
// Jump to label if the value is a tagged smi.
- void JumpIfSmi(Register src, Label* on_smi);
+ template <typename LabelType>
+ void JumpIfSmi(Register src, LabelType* on_smi);
// Jump to label if the value is not a tagged smi.
- void JumpIfNotSmi(Register src, Label* on_not_smi);
+ template <typename LabelType>
+ void JumpIfNotSmi(Register src, LabelType* on_not_smi);
// Jump to label if the value is not a positive tagged smi.
- void JumpIfNotPositiveSmi(Register src, Label* on_not_smi);
+ template <typename LabelType>
+ void JumpIfNotPositiveSmi(Register src, LabelType* on_not_smi);
// Jump to label if the value, which must be a tagged smi, has value equal
// to the constant.
- void JumpIfSmiEqualsConstant(Register src, Smi* constant, Label* on_equals);
+ template <typename LabelType>
+ void JumpIfSmiEqualsConstant(Register src,
+ Smi* constant,
+ LabelType* on_equals);
// Jump if either or both register are not smi values.
- void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
+ template <typename LabelType>
+ void JumpIfNotBothSmi(Register src1,
+ Register src2,
+ LabelType* on_not_both_smi);
// Jump if either or both register are not positive smi values.
+ template <typename LabelType>
void JumpIfNotBothPositiveSmi(Register src1, Register src2,
- Label* on_not_both_smi);
+ LabelType* on_not_both_smi);
// Operations on tagged smi values.
@@ -333,10 +341,11 @@ class MacroAssembler: public Assembler {
// Optimistically adds an integer constant to a supposed smi.
// If the src is not a smi, or the result is not a smi, jump to
// the label.
+ template <typename LabelType>
void SmiTryAddConstant(Register dst,
Register src,
Smi* constant,
- Label* on_not_smi_result);
+ LabelType* on_not_smi_result);
// Add an integer constant to a tagged smi, giving a tagged smi as result.
// No overflow testing on the result is done.
@@ -348,10 +357,11 @@ class MacroAssembler: public Assembler {
// Add an integer constant to a tagged smi, giving a tagged smi as result,
// or jumping to a label if the result cannot be represented by a smi.
+ template <typename LabelType>
void SmiAddConstant(Register dst,
Register src,
Smi* constant,
- Label* on_not_smi_result);
+ LabelType* on_not_smi_result);
// Subtract an integer constant from a tagged smi, giving a tagged smi as
// result. No testing on the result is done. Sets the N and Z flags
@@ -360,60 +370,80 @@ class MacroAssembler: public Assembler {
// Subtract an integer constant from a tagged smi, giving a tagged smi as
// result, or jumping to a label if the result cannot be represented by a smi.
+ template <typename LabelType>
void SmiSubConstant(Register dst,
Register src,
Smi* constant,
- Label* on_not_smi_result);
+ LabelType* on_not_smi_result);
// Negating a smi can give a negative zero or too large positive value.
// NOTICE: This operation jumps on success, not failure!
+ template <typename LabelType>
void SmiNeg(Register dst,
Register src,
- Label* on_smi_result);
+ LabelType* on_smi_result);
// Adds smi values and return the result as a smi.
// If dst is src1, then src1 will be destroyed, even if
// the operation is unsuccessful.
+ template <typename LabelType>
void SmiAdd(Register dst,
Register src1,
Register src2,
- Label* on_not_smi_result);
+ LabelType* on_not_smi_result);
+
+ void SmiAdd(Register dst,
+ Register src1,
+ Register src2);
// Subtracts smi values and return the result as a smi.
// If dst is src1, then src1 will be destroyed, even if
// the operation is unsuccessful.
+ template <typename LabelType>
void SmiSub(Register dst,
Register src1,
Register src2,
- Label* on_not_smi_result);
+ LabelType* on_not_smi_result);
+
+ void SmiSub(Register dst,
+ Register src1,
+ Register src2);
+ template <typename LabelType>
void SmiSub(Register dst,
Register src1,
const Operand& src2,
- Label* on_not_smi_result);
+ LabelType* on_not_smi_result);
+
+ void SmiSub(Register dst,
+ Register src1,
+ const Operand& src2);
// Multiplies smi values and return the result as a smi,
// if possible.
// If dst is src1, then src1 will be destroyed, even if
// the operation is unsuccessful.
+ template <typename LabelType>
void SmiMul(Register dst,
Register src1,
Register src2,
- Label* on_not_smi_result);
+ LabelType* on_not_smi_result);
// Divides one smi by another and returns the quotient.
// Clobbers rax and rdx registers.
+ template <typename LabelType>
void SmiDiv(Register dst,
Register src1,
Register src2,
- Label* on_not_smi_result);
+ LabelType* on_not_smi_result);
// Divides one smi by another and returns the remainder.
// Clobbers rax and rdx registers.
+ template <typename LabelType>
void SmiMod(Register dst,
Register src1,
Register src2,
- Label* on_not_smi_result);
+ LabelType* on_not_smi_result);
// Bitwise operations.
void SmiNot(Register dst, Register src);
@@ -427,10 +457,11 @@ class MacroAssembler: public Assembler {
void SmiShiftLeftConstant(Register dst,
Register src,
int shift_value);
+ template <typename LabelType>
void SmiShiftLogicalRightConstant(Register dst,
Register src,
int shift_value,
- Label* on_not_smi_result);
+ LabelType* on_not_smi_result);
void SmiShiftArithmeticRightConstant(Register dst,
Register src,
int shift_value);
@@ -443,10 +474,11 @@ class MacroAssembler: public Assembler {
// Shifts a smi value to the right, shifting in zero bits at the top, and
// returns the unsigned intepretation of the result if that is a smi.
// Uses and clobbers rcx, so dst may not be rcx.
+ template <typename LabelType>
void SmiShiftLogicalRight(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result);
+ Register src1,
+ Register src2,
+ LabelType* on_not_smi_result);
// Shifts a smi value to the right, sign extending the top, and
// returns the signed intepretation of the result. That will always
// be a valid smi value, since it's numerically smaller than the
@@ -460,10 +492,11 @@ class MacroAssembler: public Assembler {
// Select the non-smi register of two registers where exactly one is a
// smi. If neither are smis, jump to the failure label.
+ template <typename LabelType>
void SelectNonSmi(Register dst,
Register src1,
Register src2,
- Label* on_not_smis);
+ LabelType* on_not_smis);
// Converts, if necessary, a smi to a combination of number and
// multiplier to be used as a scaled index.
@@ -493,25 +526,29 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// String macros.
+ template <typename LabelType>
void JumpIfNotBothSequentialAsciiStrings(Register first_object,
Register second_object,
Register scratch1,
Register scratch2,
- Label* on_not_both_flat_ascii);
+ LabelType* on_not_both_flat_ascii);
// Check whether the instance type represents a flat ascii string. Jump to the
// label if not. If the instance type can be scratched specify same register
// for both instance type and scratch.
- void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type,
- Register scratch,
- Label *on_not_flat_ascii_string);
+ template <typename LabelType>
+ void JumpIfInstanceTypeIsNotSequentialAscii(
+ Register instance_type,
+ Register scratch,
+ LabelType *on_not_flat_ascii_string);
+ template <typename LabelType>
void JumpIfBothInstanceTypesAreNotSequentialAscii(
Register first_object_instance_type,
Register second_object_instance_type,
Register scratch1,
Register scratch2,
- Label* on_fail);
+ LabelType* on_fail);
// ---------------------------------------------------------------------------
// Macro instructions.
@@ -865,11 +902,12 @@ class MacroAssembler: public Assembler {
Handle<Object> code_object_;
// Helper functions for generating invokes.
+ template <typename LabelType>
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Handle<Code> code_constant,
Register code_register,
- Label* done,
+ LabelType* done,
InvokeFlag flag);
// Activation support.
@@ -961,6 +999,697 @@ extern void LogGeneratedCodeCoverage(const char* file_line);
#define ACCESS_MASM(masm) masm->
#endif
+// -----------------------------------------------------------------------------
+// Template implementations.
+
+static int kSmiShift = kSmiTagSize + kSmiShiftSize;
+
+
+template <typename LabelType>
+void MacroAssembler::SmiNeg(Register dst,
+ Register src,
+ LabelType* on_smi_result) {
+ if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+ movq(kScratchRegister, src);
+ neg(dst); // Low 32 bits are retained as zero by negation.
+ // Test if result is zero or Smi::kMinValue.
+ cmpq(dst, kScratchRegister);
+ j(not_equal, on_smi_result);
+ movq(src, kScratchRegister);
+ } else {
+ movq(dst, src);
+ neg(dst);
+ cmpq(dst, src);
+ // If the result is zero or Smi::kMinValue, negation failed to create a smi.
+ j(not_equal, on_smi_result);
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiAdd(Register dst,
+ Register src1,
+ Register src2,
+ LabelType* on_not_smi_result) {
+ ASSERT_NOT_NULL(on_not_smi_result);
+ ASSERT(!dst.is(src2));
+ if (dst.is(src1)) {
+ movq(kScratchRegister, src1);
+ addq(kScratchRegister, src2);
+ j(overflow, on_not_smi_result);
+ movq(dst, kScratchRegister);
+ } else {
+ movq(dst, src1);
+ addq(dst, src2);
+ j(overflow, on_not_smi_result);
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiSub(Register dst,
+ Register src1,
+ Register src2,
+ LabelType* on_not_smi_result) {
+ ASSERT_NOT_NULL(on_not_smi_result);
+ ASSERT(!dst.is(src2));
+ if (dst.is(src1)) {
+ cmpq(dst, src2);
+ j(overflow, on_not_smi_result);
+ subq(dst, src2);
+ } else {
+ movq(dst, src1);
+ subq(dst, src2);
+ j(overflow, on_not_smi_result);
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiSub(Register dst,
+ Register src1,
+ const Operand& src2,
+ LabelType* on_not_smi_result) {
+ ASSERT_NOT_NULL(on_not_smi_result);
+ if (dst.is(src1)) {
+ movq(kScratchRegister, src2);
+ cmpq(src1, kScratchRegister);
+ j(overflow, on_not_smi_result);
+ subq(src1, kScratchRegister);
+ } else {
+ movq(dst, src1);
+ subq(dst, src2);
+ j(overflow, on_not_smi_result);
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiMul(Register dst,
+ Register src1,
+ Register src2,
+ LabelType* on_not_smi_result) {
+ ASSERT(!dst.is(src2));
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+
+ if (dst.is(src1)) {
+ NearLabel failure, zero_correct_result;
+ movq(kScratchRegister, src1); // Create backup for later testing.
+ SmiToInteger64(dst, src1);
+ imul(dst, src2);
+ j(overflow, &failure);
+
+ // Check for negative zero result. If product is zero, and one
+ // argument is negative, go to slow case.
+ NearLabel correct_result;
+ testq(dst, dst);
+ j(not_zero, &correct_result);
+
+ movq(dst, kScratchRegister);
+ xor_(dst, src2);
+ j(positive, &zero_correct_result); // Result was positive zero.
+
+ bind(&failure); // Reused failure exit, restores src1.
+ movq(src1, kScratchRegister);
+ jmp(on_not_smi_result);
+
+ bind(&zero_correct_result);
+ xor_(dst, dst);
+
+ bind(&correct_result);
+ } else {
+ SmiToInteger64(dst, src1);
+ imul(dst, src2);
+ j(overflow, on_not_smi_result);
+ // Check for negative zero result. If product is zero, and one
+ // argument is negative, go to slow case.
+ NearLabel correct_result;
+ testq(dst, dst);
+ j(not_zero, &correct_result);
+ // One of src1 and src2 is zero, the check whether the other is
+ // negative.
+ movq(kScratchRegister, src1);
+ xor_(kScratchRegister, src2);
+ j(negative, on_not_smi_result);
+ bind(&correct_result);
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiTryAddConstant(Register dst,
+ Register src,
+ Smi* constant,
+ LabelType* on_not_smi_result) {
+ // Does not assume that src is a smi.
+ ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src.is(kScratchRegister));
+
+ JumpIfNotSmi(src, on_not_smi_result);
+ Register tmp = (dst.is(src) ? kScratchRegister : dst);
+ LoadSmiConstant(tmp, constant);
+ addq(tmp, src);
+ j(overflow, on_not_smi_result);
+ if (dst.is(src)) {
+ movq(dst, tmp);
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiAddConstant(Register dst,
+ Register src,
+ Smi* constant,
+ LabelType* on_not_smi_result) {
+ if (constant->value() == 0) {
+ if (!dst.is(src)) {
+ movq(dst, src);
+ }
+ } else if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+
+ LoadSmiConstant(kScratchRegister, constant);
+ addq(kScratchRegister, src);
+ j(overflow, on_not_smi_result);
+ movq(dst, kScratchRegister);
+ } else {
+ LoadSmiConstant(dst, constant);
+ addq(dst, src);
+ j(overflow, on_not_smi_result);
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiSubConstant(Register dst,
+ Register src,
+ Smi* constant,
+ LabelType* on_not_smi_result) {
+ if (constant->value() == 0) {
+ if (!dst.is(src)) {
+ movq(dst, src);
+ }
+ } else if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+ if (constant->value() == Smi::kMinValue) {
+ // Subtracting min-value from any non-negative value will overflow.
+ // We test the non-negativeness before doing the subtraction.
+ testq(src, src);
+ j(not_sign, on_not_smi_result);
+ LoadSmiConstant(kScratchRegister, constant);
+ subq(dst, kScratchRegister);
+ } else {
+ // Subtract by adding the negation.
+ LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
+ addq(kScratchRegister, dst);
+ j(overflow, on_not_smi_result);
+ movq(dst, kScratchRegister);
+ }
+ } else {
+ if (constant->value() == Smi::kMinValue) {
+ // Subtracting min-value from any non-negative value will overflow.
+ // We test the non-negativeness before doing the subtraction.
+ testq(src, src);
+ j(not_sign, on_not_smi_result);
+ LoadSmiConstant(dst, constant);
+ // Adding and subtracting the min-value gives the same result, it only
+ // differs on the overflow bit, which we don't check here.
+ addq(dst, src);
+ } else {
+ // Subtract by adding the negation.
+ LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
+ addq(dst, src);
+ j(overflow, on_not_smi_result);
+ }
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiDiv(Register dst,
+ Register src1,
+ Register src2,
+ LabelType* on_not_smi_result) {
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src2.is(rax));
+ ASSERT(!src2.is(rdx));
+ ASSERT(!src1.is(rdx));
+
+ // Check for 0 divisor (result is +/-Infinity).
+ NearLabel positive_divisor;
+ testq(src2, src2);
+ j(zero, on_not_smi_result);
+
+ if (src1.is(rax)) {
+ movq(kScratchRegister, src1);
+ }
+ SmiToInteger32(rax, src1);
+ // We need to rule out dividing Smi::kMinValue by -1, since that would
+ // overflow in idiv and raise an exception.
+ // We combine this with negative zero test (negative zero only happens
+ // when dividing zero by a negative number).
+
+ // We overshoot a little and go to slow case if we divide min-value
+ // by any negative value, not just -1.
+ NearLabel safe_div;
+ testl(rax, Immediate(0x7fffffff));
+ j(not_zero, &safe_div);
+ testq(src2, src2);
+ if (src1.is(rax)) {
+ j(positive, &safe_div);
+ movq(src1, kScratchRegister);
+ jmp(on_not_smi_result);
+ } else {
+ j(negative, on_not_smi_result);
+ }
+ bind(&safe_div);
+
+ SmiToInteger32(src2, src2);
+ // Sign extend src1 into edx:eax.
+ cdq();
+ idivl(src2);
+ Integer32ToSmi(src2, src2);
+ // Check that the remainder is zero.
+ testl(rdx, rdx);
+ if (src1.is(rax)) {
+ NearLabel smi_result;
+ j(zero, &smi_result);
+ movq(src1, kScratchRegister);
+ jmp(on_not_smi_result);
+ bind(&smi_result);
+ } else {
+ j(not_zero, on_not_smi_result);
+ }
+ if (!dst.is(src1) && src1.is(rax)) {
+ movq(src1, kScratchRegister);
+ }
+ Integer32ToSmi(dst, rax);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiMod(Register dst,
+ Register src1,
+ Register src2,
+ LabelType* on_not_smi_result) {
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!src2.is(rax));
+ ASSERT(!src2.is(rdx));
+ ASSERT(!src1.is(rdx));
+ ASSERT(!src1.is(src2));
+
+ testq(src2, src2);
+ j(zero, on_not_smi_result);
+
+ if (src1.is(rax)) {
+ movq(kScratchRegister, src1);
+ }
+ SmiToInteger32(rax, src1);
+ SmiToInteger32(src2, src2);
+
+ // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
+ NearLabel safe_div;
+ cmpl(rax, Immediate(Smi::kMinValue));
+ j(not_equal, &safe_div);
+ cmpl(src2, Immediate(-1));
+ j(not_equal, &safe_div);
+ // Retag inputs and go slow case.
+ Integer32ToSmi(src2, src2);
+ if (src1.is(rax)) {
+ movq(src1, kScratchRegister);
+ }
+ jmp(on_not_smi_result);
+ bind(&safe_div);
+
+ // Sign extend eax into edx:eax.
+ cdq();
+ idivl(src2);
+ // Restore smi tags on inputs.
+ Integer32ToSmi(src2, src2);
+ if (src1.is(rax)) {
+ movq(src1, kScratchRegister);
+ }
+ // Check for a negative zero result. If the result is zero, and the
+ // dividend is negative, go slow to return a floating point negative zero.
+ NearLabel smi_result;
+ testl(rdx, rdx);
+ j(not_zero, &smi_result);
+ testq(src1, src1);
+ j(negative, on_not_smi_result);
+ bind(&smi_result);
+ Integer32ToSmi(dst, rdx);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiShiftLogicalRightConstant(
+ Register dst, Register src, int shift_value, LabelType* on_not_smi_result) {
+ // Logic right shift interprets its result as an *unsigned* number.
+ if (dst.is(src)) {
+ UNIMPLEMENTED(); // Not used.
+ } else {
+ movq(dst, src);
+ if (shift_value == 0) {
+ testq(dst, dst);
+ j(negative, on_not_smi_result);
+ }
+ shr(dst, Immediate(shift_value + kSmiShift));
+ shl(dst, Immediate(kSmiShift));
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiShiftLogicalRight(Register dst,
+ Register src1,
+ Register src2,
+ LabelType* on_not_smi_result) {
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!dst.is(rcx));
+ NearLabel result_ok;
+ if (src1.is(rcx) || src2.is(rcx)) {
+ movq(kScratchRegister, rcx);
+ }
+ if (!dst.is(src1)) {
+ movq(dst, src1);
+ }
+ SmiToInteger32(rcx, src2);
+ orl(rcx, Immediate(kSmiShift));
+ shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
+ shl(dst, Immediate(kSmiShift));
+ testq(dst, dst);
+ if (src1.is(rcx) || src2.is(rcx)) {
+ NearLabel positive_result;
+ j(positive, &positive_result);
+ if (src1.is(rcx)) {
+ movq(src1, kScratchRegister);
+ } else {
+ movq(src2, kScratchRegister);
+ }
+ jmp(on_not_smi_result);
+ bind(&positive_result);
+ } else {
+ j(negative, on_not_smi_result); // src2 was zero and src1 negative.
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SelectNonSmi(Register dst,
+ Register src1,
+ Register src2,
+ LabelType* on_not_smis) {
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!dst.is(src1));
+ ASSERT(!dst.is(src2));
+ // Both operands must not be smis.
+#ifdef DEBUG
+ if (allow_stub_calls()) { // Check contains a stub call.
+ Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
+ Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
+ }
+#endif
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT_EQ(0, Smi::FromInt(0));
+ movl(kScratchRegister, Immediate(kSmiTagMask));
+ and_(kScratchRegister, src1);
+ testl(kScratchRegister, src2);
+ // If non-zero then both are smis.
+ j(not_zero, on_not_smis);
+
+ // Exactly one operand is a smi.
+ ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
+ // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
+ subq(kScratchRegister, Immediate(1));
+ // If src1 is a smi, then scratch register all 1s, else it is all 0s.
+ movq(dst, src1);
+ xor_(dst, src2);
+ and_(dst, kScratchRegister);
+ // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
+ xor_(dst, src1);
+ // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfSmi(Register src, LabelType* on_smi) {
+ ASSERT_EQ(0, kSmiTag);
+ Condition smi = CheckSmi(src);
+ j(smi, on_smi);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotSmi(Register src, LabelType* on_not_smi) {
+ Condition smi = CheckSmi(src);
+ j(NegateCondition(smi), on_not_smi);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotPositiveSmi(Register src,
+ LabelType* on_not_positive_smi) {
+ Condition positive_smi = CheckPositiveSmi(src);
+ j(NegateCondition(positive_smi), on_not_positive_smi);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
+ Smi* constant,
+ LabelType* on_equals) {
+ SmiCompare(src, constant);
+ j(equal, on_equals);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotValidSmiValue(Register src,
+ LabelType* on_invalid) {
+ Condition is_valid = CheckInteger32ValidSmiValue(src);
+ j(NegateCondition(is_valid), on_invalid);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
+ LabelType* on_invalid) {
+ Condition is_valid = CheckUInteger32ValidSmiValue(src);
+ j(NegateCondition(is_valid), on_invalid);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotBothSmi(Register src1,
+ Register src2,
+ LabelType* on_not_both_smi) {
+ Condition both_smi = CheckBothSmi(src1, src2);
+ j(NegateCondition(both_smi), on_not_both_smi);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotBothPositiveSmi(Register src1,
+ Register src2,
+ LabelType* on_not_both_smi) {
+ Condition both_smi = CheckBothPositiveSmi(src1, src2);
+ j(NegateCondition(both_smi), on_not_both_smi);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
+ Register second_object,
+ Register scratch1,
+ Register scratch2,
+ LabelType* on_fail) {
+ // Check that both objects are not smis.
+ Condition either_smi = CheckEitherSmi(first_object, second_object);
+ j(either_smi, on_fail);
+
+ // Load instance type for both strings.
+ movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
+ movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
+ movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
+ movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
+
+ // Check that both are flat ascii strings.
+ ASSERT(kNotStringTag != 0);
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+
+ andl(scratch1, Immediate(kFlatAsciiStringMask));
+ andl(scratch2, Immediate(kFlatAsciiStringMask));
+ // Interleave the bits to check both scratch1 and scratch2 in one test.
+ ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ cmpl(scratch1,
+ Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+ j(not_equal, on_fail);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
+ Register instance_type,
+ Register scratch,
+ LabelType *failure) {
+ if (!scratch.is(instance_type)) {
+ movl(scratch, instance_type);
+ }
+
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+
+ andl(scratch, Immediate(kFlatAsciiStringMask));
+ cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
+ j(not_equal, failure);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
+ Register first_object_instance_type,
+ Register second_object_instance_type,
+ Register scratch1,
+ Register scratch2,
+ LabelType* on_fail) {
+ // Load instance type for both strings.
+ movq(scratch1, first_object_instance_type);
+ movq(scratch2, second_object_instance_type);
+
+ // Check that both are flat ascii strings.
+ ASSERT(kNotStringTag != 0);
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+
+ andl(scratch1, Immediate(kFlatAsciiStringMask));
+ andl(scratch2, Immediate(kFlatAsciiStringMask));
+ // Interleave the bits to check both scratch1 and scratch2 in one test.
+ ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ cmpl(scratch1,
+ Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+ j(not_equal, on_fail);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::InNewSpace(Register object,
+ Register scratch,
+ Condition cc,
+ LabelType* branch) {
+ if (Serializer::enabled()) {
+ // Can't do arithmetic on external references if it might get serialized.
+ // The mask isn't really an address. We load it as an external reference in
+ // case the size of the new space is different between the snapshot maker
+ // and the running system.
+ if (scratch.is(object)) {
+ movq(kScratchRegister, ExternalReference::new_space_mask());
+ and_(scratch, kScratchRegister);
+ } else {
+ movq(scratch, ExternalReference::new_space_mask());
+ and_(scratch, object);
+ }
+ movq(kScratchRegister, ExternalReference::new_space_start());
+ cmpq(scratch, kScratchRegister);
+ j(cc, branch);
+ } else {
+ ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
+ intptr_t new_space_start =
+ reinterpret_cast<intptr_t>(Heap::NewSpaceStart());
+ movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
+ if (scratch.is(object)) {
+ addq(scratch, kScratchRegister);
+ } else {
+ lea(scratch, Operand(object, kScratchRegister, times_1, 0));
+ }
+ and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
+ j(cc, branch);
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ Register code_register,
+ LabelType* done,
+ InvokeFlag flag) {
+ bool definitely_matches = false;
+ NearLabel invoke;
+ if (expected.is_immediate()) {
+ ASSERT(actual.is_immediate());
+ if (expected.immediate() == actual.immediate()) {
+ definitely_matches = true;
+ } else {
+ Set(rax, actual.immediate());
+ if (expected.immediate() ==
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+ // Don't worry about adapting arguments for built-ins that
+ // don't want that done. Skip adaption code by making it look
+ // like we have a match between expected and actual number of
+ // arguments.
+ definitely_matches = true;
+ } else {
+ Set(rbx, expected.immediate());
+ }
+ }
+ } else {
+ if (actual.is_immediate()) {
+ // Expected is in register, actual is immediate. This is the
+ // case when we invoke function values without going through the
+ // IC mechanism.
+ cmpq(expected.reg(), Immediate(actual.immediate()));
+ j(equal, &invoke);
+ ASSERT(expected.reg().is(rbx));
+ Set(rax, actual.immediate());
+ } else if (!expected.reg().is(actual.reg())) {
+ // Both expected and actual are in (different) registers. This
+ // is the case when we invoke functions using call and apply.
+ cmpq(expected.reg(), actual.reg());
+ j(equal, &invoke);
+ ASSERT(actual.reg().is(rax));
+ ASSERT(expected.reg().is(rbx));
+ }
+ }
+
+ if (!definitely_matches) {
+ Handle<Code> adaptor =
+ Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ if (!code_constant.is_null()) {
+ movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
+ addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ } else if (!code_register.is(rdx)) {
+ movq(rdx, code_register);
+ }
+
+ if (flag == CALL_FUNCTION) {
+ Call(adaptor, RelocInfo::CODE_TARGET);
+ jmp(done);
+ } else {
+ Jump(adaptor, RelocInfo::CODE_TARGET);
+ }
+ bind(&invoke);
+ }
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 765a90c023..75956eb02a 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -216,7 +216,12 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype) {
+ MacroAssembler* masm, int index, Register prototype, Label* miss) {
+ // Check we're still in the same context.
+ __ Move(prototype, Top::global());
+ __ cmpq(Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)),
+ prototype);
+ __ j(not_equal, miss);
// Get the global function with the given index.
JSFunction* function = JSFunction::cast(Top::global_context()->get(index));
// Load its initial map. The global functions all have initial maps.
@@ -964,7 +969,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ j(above_equal, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, rax);
+ masm(), Context::STRING_FUNCTION_INDEX, rax, &miss);
CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
rbx, rdx, rdi, name, &miss);
}
@@ -983,7 +988,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, rax);
+ masm(), Context::NUMBER_FUNCTION_INDEX, rax, &miss);
CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
rbx, rdx, rdi, name, &miss);
}
@@ -1004,7 +1009,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, rax);
+ masm(), Context::BOOLEAN_FUNCTION_INDEX, rax, &miss);
CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
rbx, rdx, rdi, name, &miss);
}
@@ -1358,7 +1363,8 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
- rax);
+ rax,
+ &miss);
ASSERT(object != holder);
CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
rbx, rdx, rdi, name, &miss);
@@ -1429,7 +1435,8 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
- rax);
+ rax,
+ &miss);
ASSERT(object != holder);
CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
rbx, rdx, rdi, name, &miss);
@@ -1541,6 +1548,16 @@ Object* CallStubCompiler::CompileStringFromCharCodeCall(
}
+Object* CallStubCompiler::CompileMathFloorCall(Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // TODO(872): implement this.
+ return Heap::undefined_value();
+}
+
+
Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
JSObject* holder,
String* name) {
@@ -1845,12 +1862,12 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
- __ IncrementCounter(&Counters::named_load_global_inline, 1);
+ __ IncrementCounter(&Counters::named_load_global_stub, 1);
__ movq(rax, rbx);
__ ret(0);
__ bind(&miss);
- __ IncrementCounter(&Counters::named_load_global_inline_miss, 1);
+ __ IncrementCounter(&Counters::named_load_global_stub_miss, 1);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.