diff options
Diffstat (limited to 'deps/v8/src/mips/macro-assembler-mips.cc')
-rw-r--r-- | deps/v8/src/mips/macro-assembler-mips.cc | 777 |
1 files changed, 395 insertions, 382 deletions
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc index 62067a26d..691592272 100644 --- a/deps/v8/src/mips/macro-assembler-mips.cc +++ b/deps/v8/src/mips/macro-assembler-mips.cc @@ -4,16 +4,16 @@ #include <limits.h> // For LONG_MIN, LONG_MAX. -#include "v8.h" +#include "src/v8.h" #if V8_TARGET_ARCH_MIPS -#include "bootstrapper.h" -#include "codegen.h" -#include "cpu-profiler.h" -#include "debug.h" -#include "isolate-inl.h" -#include "runtime.h" +#include "src/bootstrapper.h" +#include "src/codegen.h" +#include "src/cpu-profiler.h" +#include "src/debug.h" +#include "src/isolate-inl.h" +#include "src/runtime.h" namespace v8 { namespace internal { @@ -32,7 +32,7 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) void MacroAssembler::Load(Register dst, const MemOperand& src, Representation r) { - ASSERT(!r.IsDouble()); + DCHECK(!r.IsDouble()); if (r.IsInteger8()) { lb(dst, src); } else if (r.IsUInteger8()) { @@ -50,7 +50,7 @@ void MacroAssembler::Load(Register dst, void MacroAssembler::Store(Register src, const MemOperand& dst, Representation r) { - ASSERT(!r.IsDouble()); + DCHECK(!r.IsDouble()); if (r.IsInteger8() || r.IsUInteger8()) { sb(src, dst); } else if (r.IsInteger16() || r.IsUInteger16()) { @@ -101,7 +101,7 @@ void MacroAssembler::PushSafepointRegisters() { // Safepoints expect a block of kNumSafepointRegisters values on the // stack, so adjust the stack for unsaved registers. const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; - ASSERT(num_unsaved >= 0); + DCHECK(num_unsaved >= 0); if (num_unsaved > 0) { Subu(sp, sp, Operand(num_unsaved * kPointerSize)); } @@ -118,32 +118,6 @@ void MacroAssembler::PopSafepointRegisters() { } -void MacroAssembler::PushSafepointRegistersAndDoubles() { - PushSafepointRegisters(); - Subu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize)); - for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) { - FPURegister reg = FPURegister::FromAllocationIndex(i); - sdc1(reg, MemOperand(sp, i * kDoubleSize)); - } -} - - -void MacroAssembler::PopSafepointRegistersAndDoubles() { - for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) { - FPURegister reg = FPURegister::FromAllocationIndex(i); - ldc1(reg, MemOperand(sp, i * kDoubleSize)); - } - Addu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize)); - PopSafepointRegisters(); -} - - -void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src, - Register dst) { - sw(src, SafepointRegistersAndDoublesSlot(dst)); -} - - void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) { sw(src, SafepointRegisterSlot(dst)); } @@ -179,7 +153,7 @@ void MacroAssembler::InNewSpace(Register object, Register scratch, Condition cc, Label* branch) { - ASSERT(cc == eq || cc == ne); + DCHECK(cc == eq || cc == ne); And(scratch, object, Operand(ExternalReference::new_space_mask(isolate()))); Branch(branch, cc, scratch, Operand(ExternalReference::new_space_start(isolate()))); @@ -194,8 +168,9 @@ void MacroAssembler::RecordWriteField( RAStatus ra_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action, - SmiCheck smi_check) { - ASSERT(!AreAliased(value, dst, t8, object)); + SmiCheck smi_check, + PointersToHereCheck pointers_to_here_check_for_value) { + DCHECK(!AreAliased(value, dst, t8, object)); // First, check if a write barrier is even needed. The tests below // catch stores of Smis. Label done; @@ -207,7 +182,7 @@ void MacroAssembler::RecordWriteField( // Although the object register is tagged, the offset is relative to the start // of the object, so so offset must be a multiple of kPointerSize. - ASSERT(IsAligned(offset, kPointerSize)); + DCHECK(IsAligned(offset, kPointerSize)); Addu(dst, object, Operand(offset - kHeapObjectTag)); if (emit_debug_code()) { @@ -224,7 +199,8 @@ void MacroAssembler::RecordWriteField( ra_status, save_fp, remembered_set_action, - OMIT_SMI_CHECK); + OMIT_SMI_CHECK, + pointers_to_here_check_for_value); bind(&done); @@ -237,18 +213,95 @@ void MacroAssembler::RecordWriteField( } +// Will clobber 4 registers: object, map, dst, ip. The +// register 'object' contains a heap object pointer. +void MacroAssembler::RecordWriteForMap(Register object, + Register map, + Register dst, + RAStatus ra_status, + SaveFPRegsMode fp_mode) { + if (emit_debug_code()) { + DCHECK(!dst.is(at)); + lw(dst, FieldMemOperand(map, HeapObject::kMapOffset)); + Check(eq, + kWrongAddressOrValuePassedToRecordWrite, + dst, + Operand(isolate()->factory()->meta_map())); + } + + if (!FLAG_incremental_marking) { + return; + } + + if (emit_debug_code()) { + lw(at, FieldMemOperand(object, HeapObject::kMapOffset)); + Check(eq, + kWrongAddressOrValuePassedToRecordWrite, + map, + Operand(at)); + } + + Label done; + + // A single check of the map's pages interesting flag suffices, since it is + // only set during incremental collection, and then it's also guaranteed that + // the from object's page's interesting flag is also set. This optimization + // relies on the fact that maps can never be in new space. + CheckPageFlag(map, + map, // Used as scratch. + MemoryChunk::kPointersToHereAreInterestingMask, + eq, + &done); + + Addu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag)); + if (emit_debug_code()) { + Label ok; + And(at, dst, Operand((1 << kPointerSizeLog2) - 1)); + Branch(&ok, eq, at, Operand(zero_reg)); + stop("Unaligned cell in write barrier"); + bind(&ok); + } + + // Record the actual write. + if (ra_status == kRAHasNotBeenSaved) { + push(ra); + } + RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET, + fp_mode); + CallStub(&stub); + if (ra_status == kRAHasNotBeenSaved) { + pop(ra); + } + + bind(&done); + + // Count number of write barriers in generated code. + isolate()->counters()->write_barriers_static()->Increment(); + IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst); + + // Clobber clobbered registers when running with the debug-code flag + // turned on to provoke errors. + if (emit_debug_code()) { + li(dst, Operand(BitCast<int32_t>(kZapValue + 12))); + li(map, Operand(BitCast<int32_t>(kZapValue + 16))); + } +} + + // Will clobber 4 registers: object, address, scratch, ip. The // register 'object' contains a heap object pointer. The heap object // tag is shifted away. -void MacroAssembler::RecordWrite(Register object, - Register address, - Register value, - RAStatus ra_status, - SaveFPRegsMode fp_mode, - RememberedSetAction remembered_set_action, - SmiCheck smi_check) { - ASSERT(!AreAliased(object, address, value, t8)); - ASSERT(!AreAliased(object, address, value, t9)); +void MacroAssembler::RecordWrite( + Register object, + Register address, + Register value, + RAStatus ra_status, + SaveFPRegsMode fp_mode, + RememberedSetAction remembered_set_action, + SmiCheck smi_check, + PointersToHereCheck pointers_to_here_check_for_value) { + DCHECK(!AreAliased(object, address, value, t8)); + DCHECK(!AreAliased(object, address, value, t9)); if (emit_debug_code()) { lw(at, MemOperand(address)); @@ -256,24 +309,27 @@ void MacroAssembler::RecordWrite(Register object, eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value)); } - // Count number of write barriers in generated code. - isolate()->counters()->write_barriers_static()->Increment(); - // TODO(mstarzinger): Dynamic counter missing. + if (remembered_set_action == OMIT_REMEMBERED_SET && + !FLAG_incremental_marking) { + return; + } // First, check if a write barrier is even needed. The tests below // catch stores of smis and stores into the young generation. Label done; if (smi_check == INLINE_SMI_CHECK) { - ASSERT_EQ(0, kSmiTag); + DCHECK_EQ(0, kSmiTag); JumpIfSmi(value, &done); } - CheckPageFlag(value, - value, // Used as scratch. - MemoryChunk::kPointersToHereAreInterestingMask, - eq, - &done); + if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) { + CheckPageFlag(value, + value, // Used as scratch. + MemoryChunk::kPointersToHereAreInterestingMask, + eq, + &done); + } CheckPageFlag(object, value, // Used as scratch. MemoryChunk::kPointersFromHereAreInterestingMask, @@ -293,6 +349,11 @@ void MacroAssembler::RecordWrite(Register object, bind(&done); + // Count number of write barriers in generated code. + isolate()->counters()->write_barriers_static()->Increment(); + IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, + value); + // Clobber clobbered registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { @@ -330,7 +391,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. if (and_then == kFallThroughAtEnd) { Branch(&done, eq, t8, Operand(zero_reg)); } else { - ASSERT(and_then == kReturnAtEnd); + DCHECK(and_then == kReturnAtEnd); Ret(eq, t8, Operand(zero_reg)); } push(ra); @@ -354,9 +415,9 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, Label* miss) { Label same_contexts; - ASSERT(!holder_reg.is(scratch)); - ASSERT(!holder_reg.is(at)); - ASSERT(!scratch.is(at)); + DCHECK(!holder_reg.is(scratch)); + DCHECK(!holder_reg.is(at)); + DCHECK(!scratch.is(at)); // Load current lexical context from the stack frame. lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); @@ -419,6 +480,9 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, } +// Compute the hash code from the untagged key. This must be kept in sync with +// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in +// code-stub-hydrogen.cc void MacroAssembler::GetNumberHash(Register reg0, Register scratch) { // First of all we assign the hash seed to scratch. LoadRoot(scratch, Heap::kHashSeedRootIndex); @@ -508,7 +572,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss, and_(reg2, reg2, reg1); // Scale the index by multiplying by the element size. - ASSERT(SeededNumberDictionary::kEntrySize == 3); + DCHECK(SeededNumberDictionary::kEntrySize == 3); sll(at, reg2, 1); // 2x. addu(reg2, reg2, at); // reg2 = reg2 * 3. @@ -551,7 +615,7 @@ void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) { addiu(rd, rs, rt.imm32_); } else { // li handles the relocation. - ASSERT(!rs.is(at)); + DCHECK(!rs.is(at)); li(at, rt); addu(rd, rs, at); } @@ -567,7 +631,7 @@ void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) { addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm). } else { // li handles the relocation. - ASSERT(!rs.is(at)); + DCHECK(!rs.is(at)); li(at, rt); subu(rd, rs, at); } @@ -585,7 +649,7 @@ void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) { } } else { // li handles the relocation. - ASSERT(!rs.is(at)); + DCHECK(!rs.is(at)); li(at, rt); if (kArchVariant == kLoongson) { mult(rs, at); @@ -602,7 +666,7 @@ void MacroAssembler::Mult(Register rs, const Operand& rt) { mult(rs, rt.rm()); } else { // li handles the relocation. - ASSERT(!rs.is(at)); + DCHECK(!rs.is(at)); li(at, rt); mult(rs, at); } @@ -614,7 +678,7 @@ void MacroAssembler::Multu(Register rs, const Operand& rt) { multu(rs, rt.rm()); } else { // li handles the relocation. - ASSERT(!rs.is(at)); + DCHECK(!rs.is(at)); li(at, rt); multu(rs, at); } @@ -626,7 +690,7 @@ void MacroAssembler::Div(Register rs, const Operand& rt) { div(rs, rt.rm()); } else { // li handles the relocation. - ASSERT(!rs.is(at)); + DCHECK(!rs.is(at)); li(at, rt); div(rs, at); } @@ -638,7 +702,7 @@ void MacroAssembler::Divu(Register rs, const Operand& rt) { divu(rs, rt.rm()); } else { // li handles the relocation. - ASSERT(!rs.is(at)); + DCHECK(!rs.is(at)); li(at, rt); divu(rs, at); } @@ -653,7 +717,7 @@ void MacroAssembler::And(Register rd, Register rs, const Operand& rt) { andi(rd, rs, rt.imm32_); } else { // li handles the relocation. - ASSERT(!rs.is(at)); + DCHECK(!rs.is(at)); li(at, rt); and_(rd, rs, at); } @@ -669,7 +733,7 @@ void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) { ori(rd, rs, rt.imm32_); } else { // li handles the relocation. - ASSERT(!rs.is(at)); + DCHECK(!rs.is(at)); li(at, rt); or_(rd, rs, at); } @@ -685,7 +749,7 @@ void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) { xori(rd, rs, rt.imm32_); } else { // li handles the relocation. - ASSERT(!rs.is(at)); + DCHECK(!rs.is(at)); li(at, rt); xor_(rd, rs, at); } @@ -698,7 +762,7 @@ void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) { nor(rd, rs, rt.rm()); } else { // li handles the relocation. - ASSERT(!rs.is(at)); + DCHECK(!rs.is(at)); li(at, rt); nor(rd, rs, at); } @@ -706,9 +770,9 @@ void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) { void MacroAssembler::Neg(Register rs, const Operand& rt) { - ASSERT(rt.is_reg()); - ASSERT(!at.is(rs)); - ASSERT(!at.is(rt.rm())); + DCHECK(rt.is_reg()); + DCHECK(!at.is(rs)); + DCHECK(!at.is(rt.rm())); li(at, -1); xor_(rs, rt.rm(), at); } @@ -722,7 +786,7 @@ void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) { slti(rd, rs, rt.imm32_); } else { // li handles the relocation. - ASSERT(!rs.is(at)); + DCHECK(!rs.is(at)); li(at, rt); slt(rd, rs, at); } @@ -738,7 +802,7 @@ void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) { sltiu(rd, rs, rt.imm32_); } else { // li handles the relocation. - ASSERT(!rs.is(at)); + DCHECK(!rs.is(at)); li(at, rt); sltu(rd, rs, at); } @@ -781,7 +845,7 @@ void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) { } -//------------Pseudo-instructions------------- +// ------------Pseudo-instructions------------- void MacroAssembler::Ulw(Register rd, const MemOperand& rs) { lwr(rd, rs); @@ -800,7 +864,7 @@ void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) { if (value->IsSmi()) { li(dst, Operand(value), mode); } else { - ASSERT(value->IsHeapObject()); + DCHECK(value->IsHeapObject()); if (isolate()->heap()->InNewSpace(*value)) { Handle<Cell> cell = isolate()->factory()->NewCell(value); li(dst, Operand(cell)); @@ -813,7 +877,7 @@ void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) { void MacroAssembler::li(Register rd, Operand j, LiFlags mode) { - ASSERT(!j.is_reg()); + DCHECK(!j.is_reg()); BlockTrampolinePoolScope block_trampoline_pool(this); if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) { // Normal load of an immediate value which does not need Relocation Info. @@ -966,8 +1030,8 @@ void MacroAssembler::Ext(Register rt, Register rs, uint16_t pos, uint16_t size) { - ASSERT(pos < 32); - ASSERT(pos + size < 33); + DCHECK(pos < 32); + DCHECK(pos + size < 33); if (kArchVariant == kMips32r2) { ext_(rt, rs, pos, size); @@ -989,14 +1053,14 @@ void MacroAssembler::Ins(Register rt, Register rs, uint16_t pos, uint16_t size) { - ASSERT(pos < 32); - ASSERT(pos + size <= 32); - ASSERT(size != 0); + DCHECK(pos < 32); + DCHECK(pos + size <= 32); + DCHECK(size != 0); if (kArchVariant == kMips32r2) { ins_(rt, rs, pos, size); } else { - ASSERT(!rt.is(t8) && !rs.is(t8)); + DCHECK(!rt.is(t8) && !rs.is(t8)); Subu(at, zero_reg, Operand(1)); srl(at, at, 32 - size); and_(t8, rs, at); @@ -1025,9 +1089,9 @@ void MacroAssembler::Cvt_d_uw(FPURegister fd, // We do this by converting rs minus the MSB to avoid sign conversion, // then adding 2^31 to the result (if needed). - ASSERT(!fd.is(scratch)); - ASSERT(!rs.is(t9)); - ASSERT(!rs.is(at)); + DCHECK(!fd.is(scratch)); + DCHECK(!rs.is(t9)); + DCHECK(!rs.is(at)); // Save rs's MSB to t9. Ext(t9, rs, 31, 1); @@ -1111,8 +1175,8 @@ void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) { void MacroAssembler::Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch) { - ASSERT(!fd.is(scratch)); - ASSERT(!rs.is(at)); + DCHECK(!fd.is(scratch)); + DCHECK(!rs.is(at)); // Load 2^31 into scratch as its float representation. li(at, 0x41E00000); @@ -1153,7 +1217,7 @@ void MacroAssembler::BranchF(Label* target, return; } - ASSERT(nan || target); + DCHECK(nan || target); // Check for unordered (NaN) cases. if (nan) { c(UN, D, cmp1, cmp2); @@ -1199,7 +1263,7 @@ void MacroAssembler::BranchF(Label* target, break; default: CHECK(0); - }; + } } if (bd == PROTECT) { @@ -1269,8 +1333,8 @@ void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) { if (kArchVariant == kLoongson) { // Tests an FP condition code and then conditionally move rs to rd. // We do not currently use any FPU cc bit other than bit 0. - ASSERT(cc == 0); - ASSERT(!(rs.is(t8) || rd.is(t8))); + DCHECK(cc == 0); + DCHECK(!(rs.is(t8) || rd.is(t8))); Label done; Register scratch = t8; // For testing purposes we need to fetch content of the FCSR register and @@ -1295,8 +1359,8 @@ void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) { if (kArchVariant == kLoongson) { // Tests an FP condition code and then conditionally move rs to rd. // We do not currently use any FPU cc bit other than bit 0. - ASSERT(cc == 0); - ASSERT(!(rs.is(t8) || rd.is(t8))); + DCHECK(cc == 0); + DCHECK(!(rs.is(t8) || rd.is(t8))); Label done; Register scratch = t8; // For testing purposes we need to fetch content of the FCSR register and @@ -1319,7 +1383,7 @@ void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) { void MacroAssembler::Clz(Register rd, Register rs) { if (kArchVariant == kLoongson) { - ASSERT(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9))); + DCHECK(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9))); Register mask = t8; Register scratch = t9; Label loop, end; @@ -1346,9 +1410,9 @@ void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode, DoubleRegister double_scratch, Register except_flag, CheckForInexactConversion check_inexact) { - ASSERT(!result.is(scratch)); - ASSERT(!double_input.is(double_scratch)); - ASSERT(!except_flag.is(scratch)); + DCHECK(!result.is(scratch)); + DCHECK(!double_input.is(double_scratch)); + DCHECK(!except_flag.is(scratch)); Label done; @@ -1452,7 +1516,7 @@ void MacroAssembler::TruncateDoubleToI(Register result, void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) { Label done; DoubleRegister double_scratch = f12; - ASSERT(!result.is(object)); + DCHECK(!result.is(object)); ldc1(double_scratch, MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag)); @@ -1479,7 +1543,7 @@ void MacroAssembler::TruncateNumberToI(Register object, Register scratch, Label* not_number) { Label done; - ASSERT(!result.is(object)); + DCHECK(!result.is(object)); UntagAndJumpIfSmi(result, object, &done); JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number); @@ -1506,7 +1570,7 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst, // Emulated condtional branches do not emit a nop in the branch delay slot. // // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct. -#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \ +#define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \ (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \ (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg)))) @@ -1598,7 +1662,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { BRANCH_ARGS_CHECK(cond, rs, rt); - ASSERT(!rs.is(zero_reg)); + DCHECK(!rs.is(zero_reg)); Register r2 = no_reg; Register scratch = at; @@ -1698,14 +1762,14 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs, break; case eq: // We don't want any other register but scratch clobbered. - ASSERT(!scratch.is(rs)); + DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); beq(rs, r2, offset); break; case ne: // We don't want any other register but scratch clobbered. - ASSERT(!scratch.is(rs)); + DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); bne(rs, r2, offset); @@ -1950,14 +2014,14 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs, b(offset); break; case eq: - ASSERT(!scratch.is(rs)); + DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); offset = shifted_branch_offset(L, false); beq(rs, r2, offset); break; case ne: - ASSERT(!scratch.is(rs)); + DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); offset = shifted_branch_offset(L, false); @@ -1969,7 +2033,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs, offset = shifted_branch_offset(L, false); bgtz(rs, offset); } else { - ASSERT(!scratch.is(rs)); + DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); slt(scratch, r2, rs); @@ -1986,7 +2050,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs, offset = shifted_branch_offset(L, false); beq(scratch, zero_reg, offset); } else { - ASSERT(!scratch.is(rs)); + DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); slt(scratch, rs, r2); @@ -2003,7 +2067,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs, offset = shifted_branch_offset(L, false); bne(scratch, zero_reg, offset); } else { - ASSERT(!scratch.is(rs)); + DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); slt(scratch, rs, r2); @@ -2016,7 +2080,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs, offset = shifted_branch_offset(L, false); blez(rs, offset); } else { - ASSERT(!scratch.is(rs)); + DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); slt(scratch, r2, rs); @@ -2028,9 +2092,9 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs, case Ugreater: if (rt.imm32_ == 0) { offset = shifted_branch_offset(L, false); - bgtz(rs, offset); + bne(rs, zero_reg, offset); } else { - ASSERT(!scratch.is(rs)); + DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); sltu(scratch, r2, rs); @@ -2047,7 +2111,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs, offset = shifted_branch_offset(L, false); beq(scratch, zero_reg, offset); } else { - ASSERT(!scratch.is(rs)); + DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); sltu(scratch, rs, r2); @@ -2064,7 +2128,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs, offset = shifted_branch_offset(L, false); bne(scratch, zero_reg, offset); } else { - ASSERT(!scratch.is(rs)); + DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); sltu(scratch, rs, r2); @@ -2077,7 +2141,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs, offset = shifted_branch_offset(L, false); beq(rs, zero_reg, offset); } else { - ASSERT(!scratch.is(rs)); + DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); sltu(scratch, r2, rs); @@ -2090,7 +2154,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs, } } // Check that offset could actually hold on an int16_t. - ASSERT(is_int16(offset)); + DCHECK(is_int16(offset)); // Emit a nop in the branch delay slot if required. if (bdslot == PROTECT) nop(); @@ -2352,7 +2416,7 @@ void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs, } } // Check that offset could actually hold on an int16_t. - ASSERT(is_int16(offset)); + DCHECK(is_int16(offset)); // Emit a nop in the branch delay slot if required. if (bdslot == PROTECT) @@ -2403,7 +2467,7 @@ void MacroAssembler::Jump(Address target, Register rs, const Operand& rt, BranchDelaySlot bd) { - ASSERT(!RelocInfo::IsCodeTarget(rmode)); + DCHECK(!RelocInfo::IsCodeTarget(rmode)); Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd); } @@ -2414,7 +2478,7 @@ void MacroAssembler::Jump(Handle<Code> code, Register rs, const Operand& rt, BranchDelaySlot bd) { - ASSERT(RelocInfo::IsCodeTarget(rmode)); + DCHECK(RelocInfo::IsCodeTarget(rmode)); AllowDeferredHandleDereference embedding_raw_address; Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd); } @@ -2460,7 +2524,7 @@ void MacroAssembler::Call(Register target, if (bd == PROTECT) nop(); - ASSERT_EQ(CallSize(target, cond, rs, rt, bd), + DCHECK_EQ(CallSize(target, cond, rs, rt, bd), SizeOfCodeGeneratedSince(&start)); } @@ -2491,7 +2555,7 @@ void MacroAssembler::Call(Address target, positions_recorder()->WriteRecordedPositions(); li(t9, Operand(target_int, rmode), CONSTANT_SIZE); Call(t9, cond, rs, rt, bd); - ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd), + DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd), SizeOfCodeGeneratedSince(&start)); } @@ -2519,14 +2583,14 @@ void MacroAssembler::Call(Handle<Code> code, BlockTrampolinePoolScope block_trampoline_pool(this); Label start; bind(&start); - ASSERT(RelocInfo::IsCodeTarget(rmode)); + DCHECK(RelocInfo::IsCodeTarget(rmode)); if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) { SetRecordedAstId(ast_id); rmode = RelocInfo::CODE_TARGET_WITH_ID; } AllowDeferredHandleDereference embedding_raw_address; Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd); - ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd), + DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd), SizeOfCodeGeneratedSince(&start)); } @@ -2674,7 +2738,7 @@ void MacroAssembler::DebugBreak() { PrepareCEntryArgs(0); PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate())); CEntryStub ces(isolate(), 1); - ASSERT(AllowThisStubCall(&ces)); + DCHECK(AllowThisStubCall(&ces)); Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); } @@ -2704,7 +2768,7 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind, // Push the frame pointer, context, state, and code object. if (kind == StackHandler::JS_ENTRY) { - ASSERT_EQ(Smi::FromInt(0), 0); + DCHECK_EQ(Smi::FromInt(0), 0); // The second zero_reg indicates no context. // The first zero_reg is the NULL frame pointer. // The operands are reversed to match the order of MultiPush/Pop. @@ -2832,7 +2896,7 @@ void MacroAssembler::Allocate(int object_size, Register scratch2, Label* gc_required, AllocationFlags flags) { - ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); + DCHECK(object_size <= Page::kMaxRegularHeapObjectSize); if (!FLAG_inline_new) { if (emit_debug_code()) { // Trash the registers to simulate an allocation failure. @@ -2844,18 +2908,18 @@ void MacroAssembler::Allocate(int object_size, return; } - ASSERT(!result.is(scratch1)); - ASSERT(!result.is(scratch2)); - ASSERT(!scratch1.is(scratch2)); - ASSERT(!scratch1.is(t9)); - ASSERT(!scratch2.is(t9)); - ASSERT(!result.is(t9)); + DCHECK(!result.is(scratch1)); + DCHECK(!result.is(scratch2)); + DCHECK(!scratch1.is(scratch2)); + DCHECK(!scratch1.is(t9)); + DCHECK(!scratch2.is(t9)); + DCHECK(!result.is(t9)); // Make object size into bytes. if ((flags & SIZE_IN_WORDS) != 0) { object_size *= kPointerSize; } - ASSERT_EQ(0, object_size & kObjectAlignmentMask); + DCHECK_EQ(0, object_size & kObjectAlignmentMask); // Check relative positions of allocation top and limit addresses. // ARM adds additional checks to make sure the ldm instruction can be @@ -2869,7 +2933,7 @@ void MacroAssembler::Allocate(int object_size, reinterpret_cast<intptr_t>(allocation_top.address()); intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address()); - ASSERT((limit - top) == kPointerSize); + DCHECK((limit - top) == kPointerSize); // Set up allocation top address and object size registers. Register topaddr = scratch1; @@ -2895,8 +2959,8 @@ void MacroAssembler::Allocate(int object_size, if ((flags & DOUBLE_ALIGNMENT) != 0) { // Align the next allocation. Storing the filler map without checking top is // safe in new-space because the limit of the heap is aligned there. - ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); - ASSERT(kPointerAlignment * 2 == kDoubleAlignment); + DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0); + DCHECK(kPointerAlignment * 2 == kDoubleAlignment); And(scratch2, result, Operand(kDoubleAlignmentMask)); Label aligned; Branch(&aligned, eq, scratch2, Operand(zero_reg)); @@ -2939,11 +3003,11 @@ void MacroAssembler::Allocate(Register object_size, return; } - ASSERT(!result.is(scratch1)); - ASSERT(!result.is(scratch2)); - ASSERT(!scratch1.is(scratch2)); - ASSERT(!object_size.is(t9)); - ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9)); + DCHECK(!result.is(scratch1)); + DCHECK(!result.is(scratch2)); + DCHECK(!scratch1.is(scratch2)); + DCHECK(!object_size.is(t9)); + DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9)); // Check relative positions of allocation top and limit addresses. // ARM adds additional checks to make sure the ldm instruction can be @@ -2956,7 +3020,7 @@ void MacroAssembler::Allocate(Register object_size, reinterpret_cast<intptr_t>(allocation_top.address()); intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address()); - ASSERT((limit - top) == kPointerSize); + DCHECK((limit - top) == kPointerSize); // Set up allocation top address and object size registers. Register topaddr = scratch1; @@ -2982,8 +3046,8 @@ void MacroAssembler::Allocate(Register object_size, if ((flags & DOUBLE_ALIGNMENT) != 0) { // Align the next allocation. Storing the filler map without checking top is // safe in new-space because the limit of the heap is aligned there. - ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); - ASSERT(kPointerAlignment * 2 == kDoubleAlignment); + DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0); + DCHECK(kPointerAlignment * 2 == kDoubleAlignment); And(scratch2, result, Operand(kDoubleAlignmentMask)); Label aligned; Branch(&aligned, eq, scratch2, Operand(zero_reg)); @@ -3049,7 +3113,7 @@ void MacroAssembler::AllocateTwoByteString(Register result, Label* gc_required) { // Calculate the number of bytes needed for the characters in the string while // observing object alignment. - ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); + DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); sll(scratch1, length, 1); // Length in bytes, not chars. addiu(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize); @@ -3080,8 +3144,8 @@ void MacroAssembler::AllocateAsciiString(Register result, Label* gc_required) { // Calculate the number of bytes needed for the characters in the string // while observing object alignment. - ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); - ASSERT(kCharSize == 1); + DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); + DCHECK(kCharSize == 1); addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize); And(scratch1, scratch1, Operand(~kObjectAlignmentMask)); @@ -3122,33 +3186,12 @@ void MacroAssembler::AllocateAsciiConsString(Register result, Register scratch1, Register scratch2, Label* gc_required) { - Label allocate_new_space, install_map; - AllocationFlags flags = TAG_OBJECT; - - ExternalReference high_promotion_mode = ExternalReference:: - new_space_high_promotion_mode_active_address(isolate()); - li(scratch1, Operand(high_promotion_mode)); - lw(scratch1, MemOperand(scratch1, 0)); - Branch(&allocate_new_space, eq, scratch1, Operand(zero_reg)); - - Allocate(ConsString::kSize, - result, - scratch1, - scratch2, - gc_required, - static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE)); - - jmp(&install_map); - - bind(&allocate_new_space); Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, - flags); - - bind(&install_map); + TAG_OBJECT); InitializeNewString(result, length, @@ -3209,14 +3252,19 @@ void MacroAssembler::AllocateHeapNumber(Register result, Register scratch2, Register heap_number_map, Label* need_gc, - TaggingMode tagging_mode) { + TaggingMode tagging_mode, + MutableMode mode) { // Allocate an object in the heap for the heap number and tag it as a heap // object. Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc, tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS); + Heap::RootListIndex map_index = mode == MUTABLE + ? Heap::kMutableHeapNumberMapRootIndex + : Heap::kHeapNumberMapRootIndex; + AssertIsRoot(heap_number_map, map_index); + // Store heap number map in the allocated object. - AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); if (tagging_mode == TAG_RESULT) { sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); } else { @@ -3241,8 +3289,8 @@ void MacroAssembler::CopyFields(Register dst, Register src, RegList temps, int field_count) { - ASSERT((temps & dst.bit()) == 0); - ASSERT((temps & src.bit()) == 0); + DCHECK((temps & dst.bit()) == 0); + DCHECK((temps & src.bit()) == 0); // Primitive implementation using only one temporary register. Register tmp = no_reg; @@ -3253,7 +3301,7 @@ void MacroAssembler::CopyFields(Register dst, break; } } - ASSERT(!tmp.is(no_reg)); + DCHECK(!tmp.is(no_reg)); for (int i = 0; i < field_count; i++) { lw(tmp, FieldMemOperand(src, i * kPointerSize)); @@ -3572,7 +3620,7 @@ void MacroAssembler::MovToFloatParameters(DoubleRegister src1, DoubleRegister src2) { if (!IsMipsSoftFloatABI) { if (src2.is(f12)) { - ASSERT(!src1.is(f14)); + DCHECK(!src1.is(f14)); Move(f14, src2); Move(f12, src1); } else { @@ -3615,12 +3663,12 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, // The code below is made a lot easier because the calling code already sets // up actual and expected registers according to the contract if values are // passed in registers. - ASSERT(actual.is_immediate() || actual.reg().is(a0)); - ASSERT(expected.is_immediate() || expected.reg().is(a2)); - ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3)); + DCHECK(actual.is_immediate() || actual.reg().is(a0)); + DCHECK(expected.is_immediate() || expected.reg().is(a2)); + DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3)); if (expected.is_immediate()) { - ASSERT(actual.is_immediate()); + DCHECK(actual.is_immediate()); if (expected.immediate() == actual.immediate()) { definitely_matches = true; } else { @@ -3673,7 +3721,7 @@ void MacroAssembler::InvokeCode(Register code, InvokeFlag flag, const CallWrapper& call_wrapper) { // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); + DCHECK(flag == JUMP_FUNCTION || has_frame()); Label done; @@ -3687,7 +3735,7 @@ void MacroAssembler::InvokeCode(Register code, Call(code); call_wrapper.AfterCall(); } else { - ASSERT(flag == JUMP_FUNCTION); + DCHECK(flag == JUMP_FUNCTION); Jump(code); } // Continue here if InvokePrologue does handle the invocation due to @@ -3702,10 +3750,10 @@ void MacroAssembler::InvokeFunction(Register function, InvokeFlag flag, const CallWrapper& call_wrapper) { // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); + DCHECK(flag == JUMP_FUNCTION || has_frame()); // Contract with called JS functions requires that function is passed in a1. - ASSERT(function.is(a1)); + DCHECK(function.is(a1)); Register expected_reg = a2; Register code_reg = a3; @@ -3728,10 +3776,10 @@ void MacroAssembler::InvokeFunction(Register function, InvokeFlag flag, const CallWrapper& call_wrapper) { // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); + DCHECK(flag == JUMP_FUNCTION || has_frame()); // Contract with called JS functions requires that function is passed in a1. - ASSERT(function.is(a1)); + DCHECK(function.is(a1)); // Get the function and setup the context. lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); @@ -3775,7 +3823,7 @@ void MacroAssembler::IsInstanceJSObjectType(Register map, void MacroAssembler::IsObjectJSStringType(Register object, Register scratch, Label* fail) { - ASSERT(kNotStringTag != 0); + DCHECK(kNotStringTag != 0); lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); @@ -3802,14 +3850,15 @@ void MacroAssembler::TryGetFunctionPrototype(Register function, Register scratch, Label* miss, bool miss_on_bound_function) { - // Check that the receiver isn't a smi. - JumpIfSmi(function, miss); + Label non_instance; + if (miss_on_bound_function) { + // Check that the receiver isn't a smi. + JumpIfSmi(function, miss); - // Check that the function really is a function. Load map into result reg. - GetObjectType(function, result, scratch); - Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE)); + // Check that the function really is a function. Load map into result reg. + GetObjectType(function, result, scratch); + Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE)); - if (miss_on_bound_function) { lw(scratch, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); lw(scratch, @@ -3817,13 +3866,12 @@ void MacroAssembler::TryGetFunctionPrototype(Register function, And(scratch, scratch, Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction))); Branch(miss, ne, scratch, Operand(zero_reg)); - } - // Make sure that the function has an instance prototype. - Label non_instance; - lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); - And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype)); - Branch(&non_instance, ne, scratch, Operand(zero_reg)); + // Make sure that the function has an instance prototype. + lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); + And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype)); + Branch(&non_instance, ne, scratch, Operand(zero_reg)); + } // Get the prototype or initial map from the function. lw(result, @@ -3842,12 +3890,15 @@ void MacroAssembler::TryGetFunctionPrototype(Register function, // Get the prototype from the initial map. lw(result, FieldMemOperand(result, Map::kPrototypeOffset)); - jmp(&done); - // Non-instance prototype: Fetch prototype from constructor field - // in initial map. - bind(&non_instance); - lw(result, FieldMemOperand(result, Map::kConstructorOffset)); + if (miss_on_bound_function) { + jmp(&done); + + // Non-instance prototype: Fetch prototype from constructor field + // in initial map. + bind(&non_instance); + lw(result, FieldMemOperand(result, Map::kConstructorOffset)); + } // All done. bind(&done); @@ -3871,7 +3922,7 @@ void MacroAssembler::CallStub(CodeStub* stub, Register r1, const Operand& r2, BranchDelaySlot bd) { - ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. + DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond, r1, r2, bd); } @@ -3907,7 +3958,7 @@ void MacroAssembler::CallApiFunctionAndReturn( ExternalReference::handle_scope_level_address(isolate()), next_address); - ASSERT(function_address.is(a1) || function_address.is(a2)); + DCHECK(function_address.is(a1) || function_address.is(a2)); Label profiler_disabled; Label end_profiler_check; @@ -3996,7 +4047,7 @@ void MacroAssembler::CallApiFunctionAndReturn( { FrameScope frame(this, StackFrame::INTERNAL); CallExternalReference( - ExternalReference(Runtime::kHiddenPromoteScheduledException, isolate()), + ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0); } jmp(&exception_handled); @@ -4020,19 +4071,14 @@ bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { } -void MacroAssembler::IndexFromHash(Register hash, - Register index) { +void MacroAssembler::IndexFromHash(Register hash, Register index) { // If the hash field contains an array index pick it out. The assert checks // that the constants for the maximum number of digits for an array index // cached in the hash field and the number of bits reserved for it does not // conflict. - ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < + DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) < (1 << String::kArrayIndexValueBits)); - // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in - // the low kHashShift bits. - STATIC_ASSERT(kSmiTag == 0); - Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits); - sll(index, hash, kSmiTagSize); + DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash); } @@ -4087,18 +4133,18 @@ void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register right, Register overflow_dst, Register scratch) { - ASSERT(!dst.is(overflow_dst)); - ASSERT(!dst.is(scratch)); - ASSERT(!overflow_dst.is(scratch)); - ASSERT(!overflow_dst.is(left)); - ASSERT(!overflow_dst.is(right)); + DCHECK(!dst.is(overflow_dst)); + DCHECK(!dst.is(scratch)); + DCHECK(!overflow_dst.is(scratch)); + DCHECK(!overflow_dst.is(left)); + DCHECK(!overflow_dst.is(right)); if (left.is(right) && dst.is(left)) { - ASSERT(!dst.is(t9)); - ASSERT(!scratch.is(t9)); - ASSERT(!left.is(t9)); - ASSERT(!right.is(t9)); - ASSERT(!overflow_dst.is(t9)); + DCHECK(!dst.is(t9)); + DCHECK(!scratch.is(t9)); + DCHECK(!left.is(t9)); + DCHECK(!right.is(t9)); + DCHECK(!overflow_dst.is(t9)); mov(t9, right); right = t9; } @@ -4129,13 +4175,13 @@ void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register right, Register overflow_dst, Register scratch) { - ASSERT(!dst.is(overflow_dst)); - ASSERT(!dst.is(scratch)); - ASSERT(!overflow_dst.is(scratch)); - ASSERT(!overflow_dst.is(left)); - ASSERT(!overflow_dst.is(right)); - ASSERT(!scratch.is(left)); - ASSERT(!scratch.is(right)); + DCHECK(!dst.is(overflow_dst)); + DCHECK(!dst.is(scratch)); + DCHECK(!overflow_dst.is(scratch)); + DCHECK(!overflow_dst.is(left)); + DCHECK(!overflow_dst.is(right)); + DCHECK(!scratch.is(left)); + DCHECK(!scratch.is(right)); // This happens with some crankshaft code. Since Subu works fine if // left == right, let's not make that restriction here. @@ -4236,7 +4282,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, const CallWrapper& call_wrapper) { // You can't call a builtin without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); + DCHECK(flag == JUMP_FUNCTION || has_frame()); GetBuiltinEntry(t9, id); if (flag == CALL_FUNCTION) { @@ -4244,7 +4290,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, Call(t9); call_wrapper.AfterCall(); } else { - ASSERT(flag == JUMP_FUNCTION); + DCHECK(flag == JUMP_FUNCTION); Jump(t9); } } @@ -4262,7 +4308,7 @@ void MacroAssembler::GetBuiltinFunction(Register target, void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { - ASSERT(!target.is(a1)); + DCHECK(!target.is(a1)); GetBuiltinFunction(a1, id); // Load the code entry point from the builtins object. lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); @@ -4281,7 +4327,7 @@ void MacroAssembler::SetCounter(StatsCounter* counter, int value, void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, Register scratch1, Register scratch2) { - ASSERT(value > 0); + DCHECK(value > 0); if (FLAG_native_code_counters && counter->Enabled()) { li(scratch2, Operand(ExternalReference(counter))); lw(scratch1, MemOperand(scratch2)); @@ -4293,7 +4339,7 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, Register scratch1, Register scratch2) { - ASSERT(value > 0); + DCHECK(value > 0); if (FLAG_native_code_counters && counter->Enabled()) { li(scratch2, Operand(ExternalReference(counter))); lw(scratch1, MemOperand(scratch2)); @@ -4315,7 +4361,7 @@ void MacroAssembler::Assert(Condition cc, BailoutReason reason, void MacroAssembler::AssertFastElements(Register elements) { if (emit_debug_code()) { - ASSERT(!elements.is(at)); + DCHECK(!elements.is(at)); Label ok; push(elements); lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset)); @@ -4378,7 +4424,7 @@ void MacroAssembler::Abort(BailoutReason reason) { // generated instructions is 10, so we use this as a maximum value. static const int kExpectedAbortInstructions = 10; int abort_instructions = InstructionsGeneratedSince(&abort_start); - ASSERT(abort_instructions <= kExpectedAbortInstructions); + DCHECK(abort_instructions <= kExpectedAbortInstructions); while (abort_instructions++ < kExpectedAbortInstructions) { nop(); } @@ -4457,36 +4503,37 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, } -void MacroAssembler::Prologue(PrologueFrameMode frame_mode) { - if (frame_mode == BUILD_STUB_FRAME) { +void MacroAssembler::StubPrologue() { Push(ra, fp, cp); Push(Smi::FromInt(StackFrame::STUB)); // Adjust FP to point to saved FP. Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); - } else { - PredictableCodeSizeScope predictible_code_size_scope( +} + + +void MacroAssembler::Prologue(bool code_pre_aging) { + PredictableCodeSizeScope predictible_code_size_scope( this, kNoCodeAgeSequenceLength); - // The following three instructions must remain together and unmodified - // for code aging to work properly. - if (isolate()->IsCodePreAgingActive()) { - // Pre-age the code. - Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); - nop(Assembler::CODE_AGE_MARKER_NOP); - // Load the stub address to t9 and call it, - // GetCodeAgeAndParity() extracts the stub address from this instruction. - li(t9, - Operand(reinterpret_cast<uint32_t>(stub->instruction_start())), - CONSTANT_SIZE); - nop(); // Prevent jalr to jal optimization. - jalr(t9, a0); - nop(); // Branch delay slot nop. - nop(); // Pad the empty space. - } else { - Push(ra, fp, cp, a1); - nop(Assembler::CODE_AGE_SEQUENCE_NOP); - // Adjust fp to point to caller's fp. - Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); - } + // The following three instructions must remain together and unmodified + // for code aging to work properly. + if (code_pre_aging) { + // Pre-age the code. + Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); + nop(Assembler::CODE_AGE_MARKER_NOP); + // Load the stub address to t9 and call it, + // GetCodeAgeAndParity() extracts the stub address from this instruction. + li(t9, + Operand(reinterpret_cast<uint32_t>(stub->instruction_start())), + CONSTANT_SIZE); + nop(); // Prevent jalr to jal optimization. + jalr(t9, a0); + nop(); // Branch delay slot nop. + nop(); // Pad the empty space. + } else { + Push(ra, fp, cp, a1); + nop(Assembler::CODE_AGE_SEQUENCE_NOP); + // Adjust fp to point to caller's fp. + Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); } } @@ -4553,9 +4600,9 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); if (save_doubles) { // The stack must be allign to 0 modulo 8 for stores with sdc1. - ASSERT(kDoubleSize == frame_alignment); + DCHECK(kDoubleSize == frame_alignment); if (frame_alignment > 0) { - ASSERT(IsPowerOf2(frame_alignment)); + DCHECK(IsPowerOf2(frame_alignment)); And(sp, sp, Operand(-frame_alignment)); // Align stack. } int space = FPURegister::kMaxNumRegisters * kDoubleSize; @@ -4570,10 +4617,10 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, // Reserve place for the return address, stack space and an optional slot // (used by the DirectCEntryStub to hold the return value if a struct is // returned) and align the frame preparing for calling the runtime function. - ASSERT(stack_space >= 0); + DCHECK(stack_space >= 0); Subu(sp, sp, Operand((stack_space + 2) * kPointerSize)); if (frame_alignment > 0) { - ASSERT(IsPowerOf2(frame_alignment)); + DCHECK(IsPowerOf2(frame_alignment)); And(sp, sp, Operand(-frame_alignment)); // Align stack. } @@ -4650,7 +4697,7 @@ int MacroAssembler::ActivationFrameAlignment() { // environment. // Note: This will break if we ever start generating snapshots on one Mips // platform for another Mips platform with a different alignment. - return OS::ActivationFrameAlignment(); + return base::OS::ActivationFrameAlignment(); #else // V8_HOST_ARCH_MIPS // If we are using the simulator then we should always align to the expected // alignment. As the simulator is used to generate snapshots we do not know @@ -4668,7 +4715,7 @@ void MacroAssembler::AssertStackIsAligned() { if (frame_alignment > kPointerSize) { Label alignment_as_expected; - ASSERT(IsPowerOf2(frame_alignment)); + DCHECK(IsPowerOf2(frame_alignment)); andi(at, sp, frame_alignment_mask); Branch(&alignment_as_expected, eq, at, Operand(zero_reg)); // Don't use Check here, as it will call Runtime_Abort re-entering here. @@ -4692,7 +4739,7 @@ void MacroAssembler::JumpIfNotPowerOfTwoOrZero( void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) { - ASSERT(!reg.is(overflow)); + DCHECK(!reg.is(overflow)); mov(overflow, reg); // Save original value. SmiTag(reg); xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0. @@ -4706,9 +4753,9 @@ void MacroAssembler::SmiTagCheckOverflow(Register dst, // Fall back to slower case. SmiTagCheckOverflow(dst, overflow); } else { - ASSERT(!dst.is(src)); - ASSERT(!dst.is(overflow)); - ASSERT(!src.is(overflow)); + DCHECK(!dst.is(src)); + DCHECK(!dst.is(overflow)); + DCHECK(!src.is(overflow)); SmiTag(dst, src); xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0. } @@ -4734,7 +4781,7 @@ void MacroAssembler::JumpIfSmi(Register value, Label* smi_label, Register scratch, BranchDelaySlot bd) { - ASSERT_EQ(0, kSmiTag); + DCHECK_EQ(0, kSmiTag); andi(scratch, value, kSmiTagMask); Branch(bd, smi_label, eq, scratch, Operand(zero_reg)); } @@ -4743,7 +4790,7 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label, Register scratch, BranchDelaySlot bd) { - ASSERT_EQ(0, kSmiTag); + DCHECK_EQ(0, kSmiTag); andi(scratch, value, kSmiTagMask); Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg)); } @@ -4753,7 +4800,7 @@ void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi) { STATIC_ASSERT(kSmiTag == 0); - ASSERT_EQ(1, kSmiTagMask); + DCHECK_EQ(1, kSmiTagMask); or_(at, reg1, reg2); JumpIfNotSmi(at, on_not_both_smi); } @@ -4763,7 +4810,7 @@ void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi) { STATIC_ASSERT(kSmiTag == 0); - ASSERT_EQ(1, kSmiTagMask); + DCHECK_EQ(1, kSmiTagMask); // Both Smi tags must be 1 (not Smi). and_(at, reg1, reg2); JumpIfSmi(at, on_either_smi); @@ -4835,7 +4882,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) { if (emit_debug_code()) { - ASSERT(!reg.is(at)); + DCHECK(!reg.is(at)); LoadRoot(at, index); Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at)); } @@ -4980,7 +5027,7 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; const int kFlatAsciiStringTag = kStringTag | kOneByteStringTag | kSeqStringTag; - ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed. + DCHECK(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed. andi(scratch1, first, kFlatAsciiStringMask); Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag)); andi(scratch2, second, kFlatAsciiStringMask); @@ -5045,7 +5092,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string, lw(at, FieldMemOperand(string, String::kLengthOffset)); Check(lt, kIndexIsTooLarge, index, Operand(at)); - ASSERT(Smi::FromInt(0) == 0); + DCHECK(Smi::FromInt(0) == 0); Check(ge, kIndexIsNegative, index, Operand(zero_reg)); SmiUntag(index, index); @@ -5069,7 +5116,7 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, // and the original value of sp. mov(scratch, sp); Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); - ASSERT(IsPowerOf2(frame_alignment)); + DCHECK(IsPowerOf2(frame_alignment)); And(sp, sp, Operand(-frame_alignment)); sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); } else { @@ -5114,7 +5161,7 @@ void MacroAssembler::CallCFunction(Register function, void MacroAssembler::CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments) { - ASSERT(has_frame()); + DCHECK(has_frame()); // Make sure that the stack is aligned before calling a C function unless // running in the simulator. The simulator has its own alignment check which // provides more information. @@ -5123,10 +5170,10 @@ void MacroAssembler::CallCFunctionHelper(Register function, #if V8_HOST_ARCH_MIPS if (emit_debug_code()) { - int frame_alignment = OS::ActivationFrameAlignment(); + int frame_alignment = base::OS::ActivationFrameAlignment(); int frame_alignment_mask = frame_alignment - 1; if (frame_alignment > kPointerSize) { - ASSERT(IsPowerOf2(frame_alignment)); + DCHECK(IsPowerOf2(frame_alignment)); Label alignment_as_expected; And(at, sp, Operand(frame_alignment_mask)); Branch(&alignment_as_expected, eq, at, Operand(zero_reg)); @@ -5152,7 +5199,7 @@ void MacroAssembler::CallCFunctionHelper(Register function, int stack_passed_arguments = CalculateStackPassedWords( num_reg_arguments, num_double_arguments); - if (OS::ActivationFrameAlignment() > kPointerSize) { + if (base::OS::ActivationFrameAlignment() > kPointerSize) { lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); } else { Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize))); @@ -5241,7 +5288,7 @@ void MacroAssembler::CheckMapDeprecated(Handle<Map> map, if (map->CanBeDeprecated()) { li(scratch, Operand(map)); lw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset)); - And(scratch, scratch, Operand(Smi::FromInt(Map::Deprecated::kMask))); + And(scratch, scratch, Operand(Map::Deprecated::kMask)); Branch(if_deprecated, ne, scratch, Operand(zero_reg)); } } @@ -5252,7 +5299,7 @@ void MacroAssembler::JumpIfBlack(Register object, Register scratch1, Label* on_black) { HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. - ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); + DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); } @@ -5262,8 +5309,8 @@ void MacroAssembler::HasColor(Register object, Label* has_color, int first_bit, int second_bit) { - ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8)); - ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9)); + DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8)); + DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9)); GetMarkBits(object, bitmap_scratch, mask_scratch); @@ -5292,13 +5339,13 @@ void MacroAssembler::HasColor(Register object, void MacroAssembler::JumpIfDataObject(Register value, Register scratch, Label* not_data_object) { - ASSERT(!AreAliased(value, scratch, t8, no_reg)); + DCHECK(!AreAliased(value, scratch, t8, no_reg)); Label is_data_object; lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); LoadRoot(t8, Heap::kHeapNumberMapRootIndex); Branch(&is_data_object, eq, t8, Operand(scratch)); - ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); - ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); + DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); + DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); // If it's a string and it's not a cons string then it's an object containing // no GC pointers. lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); @@ -5311,7 +5358,7 @@ void MacroAssembler::JumpIfDataObject(Register value, void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg, Register mask_reg) { - ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg)); + DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg)); And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask)); Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; @@ -5329,14 +5376,14 @@ void MacroAssembler::EnsureNotWhite( Register mask_scratch, Register load_scratch, Label* value_is_white_and_not_data) { - ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8)); + DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8)); GetMarkBits(value, bitmap_scratch, mask_scratch); // If the value is black or grey we don't need to do anything. - ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); - ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); - ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); - ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); + DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); + DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); + DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); + DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); Label done; @@ -5375,8 +5422,8 @@ void MacroAssembler::EnsureNotWhite( } // Check for strings. - ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); - ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); + DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); + DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); // If it's a string and it's not a cons string then it's an object containing // no GC pointers. Register instance_type = load_scratch; @@ -5388,8 +5435,8 @@ void MacroAssembler::EnsureNotWhite( // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). // External strings are the only ones with the kExternalStringTag bit // set. - ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); - ASSERT_EQ(0, kConsStringTag & kExternalStringTag); + DCHECK_EQ(0, kSeqStringTag & kExternalStringTag); + DCHECK_EQ(0, kConsStringTag & kExternalStringTag); And(t8, instance_type, Operand(kExternalStringTag)); { Label skip; @@ -5403,8 +5450,8 @@ void MacroAssembler::EnsureNotWhite( // For ASCII (char-size of 1) we shift the smi tag away to get the length. // For UC16 (char-size of 2) we just leave the smi tag in place, thereby // getting the length multiplied by 2. - ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4); - ASSERT(kSmiTag == 0 && kSmiTagSize == 1); + DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4); + DCHECK(kSmiTag == 0 && kSmiTagSize == 1); lw(t9, FieldMemOperand(value, String::kLengthOffset)); And(t8, instance_type, Operand(kStringEncodingMask)); { @@ -5432,57 +5479,6 @@ void MacroAssembler::EnsureNotWhite( } -void MacroAssembler::Throw(BailoutReason reason) { - Label throw_start; - bind(&throw_start); -#ifdef DEBUG - const char* msg = GetBailoutReason(reason); - if (msg != NULL) { - RecordComment("Throw message: "); - RecordComment(msg); - } -#endif - - li(a0, Operand(Smi::FromInt(reason))); - push(a0); - // Disable stub call restrictions to always allow calls to throw. - if (!has_frame_) { - // We don't actually want to generate a pile of code for this, so just - // claim there is a stack frame, without generating one. - FrameScope scope(this, StackFrame::NONE); - CallRuntime(Runtime::kHiddenThrowMessage, 1); - } else { - CallRuntime(Runtime::kHiddenThrowMessage, 1); - } - // will not return here - if (is_trampoline_pool_blocked()) { - // If the calling code cares throw the exact number of - // instructions generated, we insert padding here to keep the size - // of the ThrowMessage macro constant. - // Currently in debug mode with debug_code enabled the number of - // generated instructions is 14, so we use this as a maximum value. - static const int kExpectedThrowMessageInstructions = 14; - int throw_instructions = InstructionsGeneratedSince(&throw_start); - ASSERT(throw_instructions <= kExpectedThrowMessageInstructions); - while (throw_instructions++ < kExpectedThrowMessageInstructions) { - nop(); - } - } -} - - -void MacroAssembler::ThrowIf(Condition cc, - BailoutReason reason, - Register rs, - Operand rt) { - Label L; - Branch(&L, NegateCondition(cc), rs, rt); - Throw(reason); - // will not return here - bind(&L); -} - - void MacroAssembler::LoadInstanceDescriptors(Register map, Register descriptors) { lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset)); @@ -5498,7 +5494,8 @@ void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) { void MacroAssembler::EnumLength(Register dst, Register map) { STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); lw(dst, FieldMemOperand(map, Map::kBitField3Offset)); - And(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask))); + And(dst, dst, Operand(Map::EnumLengthBits::kMask)); + SmiTag(dst); } @@ -5544,7 +5541,7 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) { void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { - ASSERT(!output_reg.is(input_reg)); + DCHECK(!output_reg.is(input_reg)); Label done; li(output_reg, Operand(255)); // Normal branch: nop in delay slot. @@ -5639,7 +5636,7 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain( Register scratch0, Register scratch1, Label* found) { - ASSERT(!scratch1.is(scratch0)); + DCHECK(!scratch1.is(scratch0)); Factory* factory = isolate()->factory(); Register current = scratch0; Label loop_again; @@ -5651,21 +5648,37 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain( bind(&loop_again); lw(current, FieldMemOperand(current, HeapObject::kMapOffset)); lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset)); - Ext(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount); + DecodeField<Map::ElementsKindBits>(scratch1); Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS)); lw(current, FieldMemOperand(current, Map::kPrototypeOffset)); Branch(&loop_again, ne, current, Operand(factory->null_value())); } -bool AreAliased(Register r1, Register r2, Register r3, Register r4) { - if (r1.is(r2)) return true; - if (r1.is(r3)) return true; - if (r1.is(r4)) return true; - if (r2.is(r3)) return true; - if (r2.is(r4)) return true; - if (r3.is(r4)) return true; - return false; +bool AreAliased(Register reg1, + Register reg2, + Register reg3, + Register reg4, + Register reg5, + Register reg6, + Register reg7, + Register reg8) { + int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + + reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() + + reg7.is_valid() + reg8.is_valid(); + + RegList regs = 0; + if (reg1.is_valid()) regs |= reg1.bit(); + if (reg2.is_valid()) regs |= reg2.bit(); + if (reg3.is_valid()) regs |= reg3.bit(); + if (reg4.is_valid()) regs |= reg4.bit(); + if (reg5.is_valid()) regs |= reg5.bit(); + if (reg6.is_valid()) regs |= reg6.bit(); + if (reg7.is_valid()) regs |= reg7.bit(); + if (reg8.is_valid()) regs |= reg8.bit(); + int n_of_non_aliasing_regs = NumRegs(regs); + + return n_of_valid_regs != n_of_non_aliasing_regs; } @@ -5679,19 +5692,19 @@ CodePatcher::CodePatcher(byte* address, // Create a new macro assembler pointing to the address of the code to patch. // The size is adjusted with kGap on order for the assembler to generate size // bytes of instructions without failing with buffer size constraints. - ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); + DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); } CodePatcher::~CodePatcher() { // Indicate that code has changed. if (flush_cache_ == FLUSH) { - CPU::FlushICache(address_, size_); + CpuFeatures::FlushICache(address_, size_); } // Check that the code was patched as expected. - ASSERT(masm_.pc_ == address_ + size_); - ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); + DCHECK(masm_.pc_ == address_ + size_); + DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); } @@ -5707,13 +5720,13 @@ void CodePatcher::Emit(Address addr) { void CodePatcher::ChangeBranchCondition(Condition cond) { Instr instr = Assembler::instr_at(masm_.pc_); - ASSERT(Assembler::IsBranch(instr)); + DCHECK(Assembler::IsBranch(instr)); uint32_t opcode = Assembler::GetOpcodeField(instr); // Currently only the 'eq' and 'ne' cond values are supported and the simple // branch instructions (with opcode being the branch type). // There are some special cases (see Assembler::IsBranch()) so extending this // would be tricky. - ASSERT(opcode == BEQ || + DCHECK(opcode == BEQ || opcode == BNE || opcode == BLEZ || opcode == BGTZ || @@ -5730,9 +5743,9 @@ void CodePatcher::ChangeBranchCondition(Condition cond) { void MacroAssembler::TruncatingDiv(Register result, Register dividend, int32_t divisor) { - ASSERT(!dividend.is(result)); - ASSERT(!dividend.is(at)); - ASSERT(!result.is(at)); + DCHECK(!dividend.is(result)); + DCHECK(!dividend.is(at)); + DCHECK(!result.is(at)); MultiplierAndShift ms(divisor); li(at, Operand(ms.multiplier())); Mult(dividend, Operand(at)); |