diff options
author | Michaël Zasso <targos@protonmail.com> | 2017-05-02 10:50:00 +0200 |
---|---|---|
committer | Michaël Zasso <targos@protonmail.com> | 2017-05-06 20:02:35 +0200 |
commit | 60d1aac8d225e844e68ae48e8f3d58802e635fbe (patch) | |
tree | 922f347dd054db18d88666fad7181e5a777f4022 /deps/v8/src/ppc | |
parent | 73d9c0f903ae371cd5011af64c3a6f69a1bda978 (diff) | |
download | node-new-60d1aac8d225e844e68ae48e8f3d58802e635fbe.tar.gz |
deps: update V8 to 5.8.283.38
PR-URL: https://github.com/nodejs/node/pull/12784
Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
Reviewed-By: Gibson Fahnestock <gibfahn@gmail.com>
Diffstat (limited to 'deps/v8/src/ppc')
-rw-r--r-- | deps/v8/src/ppc/assembler-ppc-inl.h | 21 | ||||
-rw-r--r-- | deps/v8/src/ppc/assembler-ppc.cc | 17 | ||||
-rw-r--r-- | deps/v8/src/ppc/assembler-ppc.h | 12 | ||||
-rw-r--r-- | deps/v8/src/ppc/code-stubs-ppc.cc | 856 | ||||
-rw-r--r-- | deps/v8/src/ppc/codegen-ppc.cc | 26 | ||||
-rw-r--r-- | deps/v8/src/ppc/constants-ppc.h | 2739 | ||||
-rw-r--r-- | deps/v8/src/ppc/deoptimizer-ppc.cc | 2 | ||||
-rw-r--r-- | deps/v8/src/ppc/disasm-ppc.cc | 35 | ||||
-rw-r--r-- | deps/v8/src/ppc/interface-descriptors-ppc.cc | 55 | ||||
-rw-r--r-- | deps/v8/src/ppc/macro-assembler-ppc.cc | 69 | ||||
-rw-r--r-- | deps/v8/src/ppc/macro-assembler-ppc.h | 18 | ||||
-rw-r--r-- | deps/v8/src/ppc/simulator-ppc.cc | 37 |
12 files changed, 2628 insertions, 1259 deletions
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h index 12201daf21..216650c2f4 100644 --- a/deps/v8/src/ppc/assembler-ppc-inl.h +++ b/deps/v8/src/ppc/assembler-ppc-inl.h @@ -41,7 +41,7 @@ #include "src/assembler.h" #include "src/debug/debug.h" - +#include "src/objects-inl.h" namespace v8 { namespace internal { @@ -131,6 +131,17 @@ Address RelocInfo::constant_pool_entry_address() { int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; } +Address Assembler::target_address_at(Address pc, Code* code) { + Address constant_pool = code ? code->constant_pool() : NULL; + return target_address_at(pc, constant_pool); +} + +void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code, + Address target, + ICacheFlushMode icache_flush_mode) { + Address constant_pool = code ? code->constant_pool() : NULL; + set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode); +} Address Assembler::target_address_from_return_address(Address pc) { // Returns the address of the call target from the return address that will @@ -466,9 +477,9 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) { #if V8_TARGET_ARCH_PPC64 -const int kLoadIntptrOpcode = LD; +const uint32_t kLoadIntptrOpcode = LD; #else -const int kLoadIntptrOpcode = LWZ; +const uint32_t kLoadIntptrOpcode = LWZ; #endif // Constant pool load sequence detection: @@ -481,7 +492,7 @@ const int kLoadIntptrOpcode = LWZ; bool Assembler::IsConstantPoolLoadStart(Address pc, ConstantPoolEntry::Access* access) { Instr instr = instr_at(pc); - int opcode = instr & kOpcodeMask; + uint32_t opcode = instr & kOpcodeMask; if (!GetRA(instr).is(kConstantPoolRegister)) return false; bool overflowed = (opcode == ADDIS); #ifdef DEBUG @@ -501,7 +512,7 @@ bool Assembler::IsConstantPoolLoadStart(Address pc, bool Assembler::IsConstantPoolLoadEnd(Address pc, ConstantPoolEntry::Access* access) { Instr instr = instr_at(pc); - int opcode = instr & kOpcodeMask; + uint32_t opcode = instr & kOpcodeMask; bool overflowed = false; if (!(opcode == kLoadIntptrOpcode || opcode == LFD)) return false; if (!GetRA(instr).is(kConstantPoolRegister)) { diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc index 32408f3079..645561dbdd 100644 --- a/deps/v8/src/ppc/assembler-ppc.cc +++ b/deps/v8/src/ppc/assembler-ppc.cc @@ -355,7 +355,7 @@ bool Assembler::Is32BitLoadIntoR12(Instr instr1, Instr instr2) { bool Assembler::IsCmpRegister(Instr instr) { return (((instr & kOpcodeMask) == EXT2) && - ((instr & kExt2OpcodeMask) == CMP)); + ((EXT2 | (instr & kExt2OpcodeMask)) == CMP)); } @@ -370,7 +370,7 @@ bool Assembler::IsAndi(Instr instr) { return ((instr & kOpcodeMask) == ANDIx); } #if V8_TARGET_ARCH_PPC64 bool Assembler::IsRldicl(Instr instr) { return (((instr & kOpcodeMask) == EXT5) && - ((instr & kExt5OpcodeMask) == RLDICL)); + ((EXT5 | (instr & kExt5OpcodeMask)) == RLDICL)); } #endif @@ -382,7 +382,7 @@ bool Assembler::IsCmpImmediate(Instr instr) { bool Assembler::IsCrSet(Instr instr) { return (((instr & kOpcodeMask) == EXT1) && - ((instr & kExt1OpcodeMask) == CREQV)); + ((EXT1 | (instr & kExt1OpcodeMask)) == CREQV)); } @@ -425,7 +425,7 @@ enum { int Assembler::target_at(int pos) { Instr instr = instr_at(pos); // check which type of branch this is 16 or 26 bit offset - int opcode = instr & kOpcodeMask; + uint32_t opcode = instr & kOpcodeMask; int link; switch (opcode) { case BX: @@ -455,7 +455,7 @@ int Assembler::target_at(int pos) { void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) { Instr instr = instr_at(pos); - int opcode = instr & kOpcodeMask; + uint32_t opcode = instr & kOpcodeMask; if (is_branch != nullptr) { *is_branch = (opcode == BX || opcode == BCX); @@ -535,7 +535,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) { int Assembler::max_reach_from(int pos) { Instr instr = instr_at(pos); - int opcode = instr & kOpcodeMask; + uint32_t opcode = instr & kOpcodeMask; // check which type of branch this is 16 or 26 bit offset switch (opcode) { @@ -646,7 +646,6 @@ void Assembler::x_form(Instr instr, Register ra, Register rs, Register rb, emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | r); } - void Assembler::xo_form(Instr instr, Register rt, Register ra, Register rb, OEBit o, RCBit r) { emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | o | r); @@ -2251,13 +2250,13 @@ void Assembler::fcfidu(const DoubleRegister frt, const DoubleRegister frb, void Assembler::fcfidus(const DoubleRegister frt, const DoubleRegister frb, RCBit rc) { - emit(EXT3 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc); + emit(EXT3 | FCFIDUS | frt.code() * B21 | frb.code() * B11 | rc); } void Assembler::fcfids(const DoubleRegister frt, const DoubleRegister frb, RCBit rc) { - emit(EXT3 | FCFID | frt.code() * B21 | frb.code() * B11 | rc); + emit(EXT3 | FCFIDS | frt.code() * B21 | frb.code() * B11 | rc); } diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h index b385af0321..810b42f900 100644 --- a/deps/v8/src/ppc/assembler-ppc.h +++ b/deps/v8/src/ppc/assembler-ppc.h @@ -206,6 +206,7 @@ const Register kRootRegister = r29; // Roots array pointer. const Register cp = r30; // JavaScript context pointer. static const bool kSimpleFPAliasing = true; +static const bool kSimdMaskRegisters = false; // Double word FP register. struct DoubleRegister { @@ -469,17 +470,10 @@ class Assembler : public AssemblerBase { INLINE(static void set_target_address_at( Isolate* isolate, Address pc, Address constant_pool, Address target, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)); - INLINE(static Address target_address_at(Address pc, Code* code)) { - Address constant_pool = code ? code->constant_pool() : NULL; - return target_address_at(pc, constant_pool); - } + INLINE(static Address target_address_at(Address pc, Code* code)); INLINE(static void set_target_address_at( Isolate* isolate, Address pc, Code* code, Address target, - ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) { - Address constant_pool = code ? code->constant_pool() : NULL; - set_target_address_at(isolate, pc, constant_pool, target, - icache_flush_mode); - } + ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)); // Return the code target address at a call site from the return address // of that call in the instruction stream. diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc index ec1d3a0c56..389cba2f17 100644 --- a/deps/v8/src/ppc/code-stubs-ppc.cc +++ b/deps/v8/src/ppc/code-stubs-ppc.cc @@ -205,9 +205,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow, // Call runtime on identical symbols since we need to throw a TypeError. __ cmpi(r7, Operand(SYMBOL_TYPE)); __ beq(slow); - // Call runtime on identical SIMD values since we must throw a TypeError. - __ cmpi(r7, Operand(SIMD128_VALUE_TYPE)); - __ beq(slow); } else { __ CompareObjectType(r3, r7, r7, HEAP_NUMBER_TYPE); __ beq(&heap_number); @@ -218,9 +215,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow, // Call runtime on identical symbols since we need to throw a TypeError. __ cmpi(r7, Operand(SYMBOL_TYPE)); __ beq(slow); - // Call runtime on identical SIMD values since we must throw a TypeError. - __ cmpi(r7, Operand(SIMD128_VALUE_TYPE)); - __ beq(slow); // Normally here we fall through to return_equal, but undefined is // special: (undefined == undefined) == true, but // (undefined <= undefined) == false! See ECMAScript 11.8.5. @@ -1106,8 +1100,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) { __ li(kConstantPoolRegister, Operand::Zero()); __ push(kConstantPoolRegister); } - int marker = type(); - __ LoadSmiLiteral(r0, Smi::FromInt(marker)); + StackFrame::Type marker = type(); + __ mov(r0, Operand(StackFrame::TypeToMarker(marker))); __ push(r0); __ push(r0); // Save copies of the top frame descriptor on the stack. @@ -1126,11 +1120,11 @@ void JSEntryStub::Generate(MacroAssembler* masm) { __ cmpi(r9, Operand::Zero()); __ bne(&non_outermost_js); __ StoreP(fp, MemOperand(r8)); - __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)); + __ mov(ip, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); Label cont; __ b(&cont); __ bind(&non_outermost_js); - __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)); + __ mov(ip, Operand(StackFrame::INNER_JSENTRY_FRAME)); __ bind(&cont); __ push(ip); // frame-type @@ -1193,7 +1187,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) { // Check if the current stack frame is marked as the outermost JS frame. Label non_outermost_js_2; __ pop(r8); - __ CmpSmiLiteral(r8, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME), r0); + __ cmpi(r8, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); __ bne(&non_outermost_js_2); __ mov(r9, Operand::Zero()); __ mov(r8, Operand(ExternalReference(js_entry_sp))); @@ -1220,55 +1214,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) { __ blr(); } - -void FunctionPrototypeStub::Generate(MacroAssembler* masm) { - Label miss; - Register receiver = LoadDescriptor::ReceiverRegister(); - // Ensure that the vector and slot registers won't be clobbered before - // calling the miss handler. - DCHECK(!AreAliased(r7, r8, LoadWithVectorDescriptor::VectorRegister(), - LoadWithVectorDescriptor::SlotRegister())); - - NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r7, - r8, &miss); - __ bind(&miss); - PropertyAccessCompiler::TailCallBuiltin( - masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC)); -} - - -void LoadIndexedStringStub::Generate(MacroAssembler* masm) { - // Return address is in lr. - Label miss; - - Register receiver = LoadDescriptor::ReceiverRegister(); - Register index = LoadDescriptor::NameRegister(); - Register scratch = r8; - Register result = r3; - DCHECK(!scratch.is(receiver) && !scratch.is(index)); - DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) && - result.is(LoadWithVectorDescriptor::SlotRegister())); - - // StringCharAtGenerator doesn't use the result register until it's passed - // the different miss possibilities. If it did, we would have a conflict - // when FLAG_vector_ics is true. - StringCharAtGenerator char_at_generator(receiver, index, scratch, result, - &miss, // When not a string. - &miss, // When not a number. - &miss, // When index out of range. - RECEIVER_IS_STRING); - char_at_generator.GenerateFast(masm); - __ Ret(); - - StubRuntimeCallHelper call_helper; - char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper); - - __ bind(&miss); - PropertyAccessCompiler::TailCallBuiltin( - masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC)); -} - - void RegExpExecStub::Generate(MacroAssembler* masm) { // Just jump directly to runtime if native RegExp is not selected at compile // time or if regexp entry in generated code is turned off runtime switch or @@ -1373,7 +1318,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // (6) External string. Make it, offset-wise, look like a sequential string. // Go to (4). // (7) Short external string or not a string? If yes, bail out to runtime. - // (8) Sliced string. Replace subject with parent. Go to (1). + // (8) Sliced or thin string. Replace subject with parent. Go to (1). Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */, not_seq_nor_cons /* 5 */, not_long_external /* 7 */; @@ -1385,7 +1330,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // (1) Sequential string? If yes, go to (4). STATIC_ASSERT((kIsNotStringMask | kStringRepresentationMask | - kShortExternalStringMask) == 0x93); + kShortExternalStringMask) == 0xa7); __ andi(r4, r3, Operand(kIsNotStringMask | kStringRepresentationMask | kShortExternalStringMask)); STATIC_ASSERT((kStringTag | kSeqStringTag) == 0); @@ -1394,6 +1339,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // (2) Sequential or cons? If not, go to (5). STATIC_ASSERT(kConsStringTag < kExternalStringTag); STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); + STATIC_ASSERT(kThinStringTag > kExternalStringTag); STATIC_ASSERT(kIsNotStringMask > kExternalStringTag); STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag); STATIC_ASSERT(kExternalStringTag < 0xffffu); @@ -1422,9 +1368,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ ble(&runtime); __ SmiUntag(r4); - STATIC_ASSERT(4 == kOneByteStringTag); + STATIC_ASSERT(8 == kOneByteStringTag); STATIC_ASSERT(kTwoByteStringTag == 0); - STATIC_ASSERT(kStringEncodingMask == 4); + STATIC_ASSERT(kStringEncodingMask == 8); __ ExtractBitMask(r6, r3, kStringEncodingMask, SetRC); __ beq(&encoding_type_UC16, cr0); __ LoadP(code, @@ -1670,12 +1616,19 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ andi(r0, r4, Operand(kIsNotStringMask | kShortExternalStringMask)); __ bne(&runtime, cr0); - // (8) Sliced string. Replace subject with parent. Go to (4). + // (8) Sliced or thin string. Replace subject with parent. Go to (4). + Label thin_string; + __ cmpi(r4, Operand(kThinStringTag)); + __ beq(&thin_string); // Load offset into r11 and replace subject string with parent. __ LoadP(r11, FieldMemOperand(subject, SlicedString::kOffsetOffset)); __ SmiUntag(r11); __ LoadP(subject, FieldMemOperand(subject, SlicedString::kParentOffset)); __ b(&check_underlying); // Go to (4). + + __ bind(&thin_string); + __ LoadP(subject, FieldMemOperand(subject, ThinString::kActualOffset)); + __ b(&check_underlying); // Go to (4). #endif // V8_INTERPRETED_REGEXP } @@ -1851,190 +1804,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) { __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET); } -// Note: feedback_vector and slot are clobbered after the call. -static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector, - Register slot, Register temp) { - const int count_offset = FixedArray::kHeaderSize + kPointerSize; - __ SmiToPtrArrayOffset(temp, slot); - __ add(feedback_vector, feedback_vector, temp); - __ LoadP(slot, FieldMemOperand(feedback_vector, count_offset)); - __ AddSmiLiteral(slot, slot, Smi::FromInt(1), temp); - __ StoreP(slot, FieldMemOperand(feedback_vector, count_offset), temp); -} - -void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) { - // r3 - number of arguments - // r4 - function - // r6 - slot id - // r5 - vector - // r7 - allocation site (loaded from vector[slot]) - __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8); - __ cmp(r4, r8); - __ bne(miss); - - // Increment the call count for monomorphic function calls. - IncrementCallCount(masm, r5, r6, r0); - - __ mr(r5, r7); - __ mr(r6, r4); - ArrayConstructorStub stub(masm->isolate()); - __ TailCallStub(&stub); -} - - -void CallICStub::Generate(MacroAssembler* masm) { - // r3 - number of arguments - // r4 - function - // r6 - slot id (Smi) - // r5 - vector - Label extra_checks_or_miss, call, call_function, call_count_incremented; - - // The checks. First, does r4 match the recorded monomorphic target? - __ SmiToPtrArrayOffset(r9, r6); - __ add(r9, r5, r9); - __ LoadP(r7, FieldMemOperand(r9, FixedArray::kHeaderSize)); - - // We don't know that we have a weak cell. We might have a private symbol - // or an AllocationSite, but the memory is safe to examine. - // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to - // FixedArray. - // WeakCell::kValueOffset - contains a JSFunction or Smi(0) - // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not - // computed, meaning that it can't appear to be a pointer. If the low bit is - // 0, then hash is computed, but the 0 bit prevents the field from appearing - // to be a pointer. - STATIC_ASSERT(WeakCell::kSize >= kPointerSize); - STATIC_ASSERT(AllocationSite::kTransitionInfoOffset == - WeakCell::kValueOffset && - WeakCell::kValueOffset == Symbol::kHashFieldSlot); - - __ LoadP(r8, FieldMemOperand(r7, WeakCell::kValueOffset)); - __ cmp(r4, r8); - __ bne(&extra_checks_or_miss); - - // The compare above could have been a SMI/SMI comparison. Guard against this - // convincing us that we have a monomorphic JSFunction. - __ JumpIfSmi(r4, &extra_checks_or_miss); - - __ bind(&call_function); - - // Increment the call count for monomorphic function calls. - IncrementCallCount(masm, r5, r6, r0); - - __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(), - tail_call_mode()), - RelocInfo::CODE_TARGET); - - __ bind(&extra_checks_or_miss); - Label uninitialized, miss, not_allocation_site; - - __ CompareRoot(r7, Heap::kmegamorphic_symbolRootIndex); - __ beq(&call); - - // Verify that r7 contains an AllocationSite - __ LoadP(r8, FieldMemOperand(r7, HeapObject::kMapOffset)); - __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex); - __ bne(¬_allocation_site); - - // We have an allocation site. - HandleArrayCase(masm, &miss); - - __ bind(¬_allocation_site); - - // The following cases attempt to handle MISS cases without going to the - // runtime. - if (FLAG_trace_ic) { - __ b(&miss); - } - - __ CompareRoot(r7, Heap::kuninitialized_symbolRootIndex); - __ beq(&uninitialized); - - // We are going megamorphic. If the feedback is a JSFunction, it is fine - // to handle it here. More complex cases are dealt with in the runtime. - __ AssertNotSmi(r7); - __ CompareObjectType(r7, r8, r8, JS_FUNCTION_TYPE); - __ bne(&miss); - __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex); - __ StoreP(ip, FieldMemOperand(r9, FixedArray::kHeaderSize), r0); - - __ bind(&call); - - // Increment the call count for megamorphic function calls. - IncrementCallCount(masm, r5, r6, r0); - - __ bind(&call_count_incremented); - __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()), - RelocInfo::CODE_TARGET); - - __ bind(&uninitialized); - - // We are going monomorphic, provided we actually have a JSFunction. - __ JumpIfSmi(r4, &miss); - - // Goto miss case if we do not have a function. - __ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE); - __ bne(&miss); - - // Make sure the function is not the Array() function, which requires special - // behavior on MISS. - __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7); - __ cmp(r4, r7); - __ beq(&miss); - - // Make sure the function belongs to the same native context. - __ LoadP(r7, FieldMemOperand(r4, JSFunction::kContextOffset)); - __ LoadP(r7, ContextMemOperand(r7, Context::NATIVE_CONTEXT_INDEX)); - __ LoadP(ip, NativeContextMemOperand()); - __ cmp(r7, ip); - __ bne(&miss); - - // Store the function. Use a stub since we need a frame for allocation. - // r5 - vector - // r6 - slot - // r4 - function - { - FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); - CreateWeakCellStub create_stub(masm->isolate()); - __ SmiTag(r3); - __ Push(r3, r5, r6, cp, r4); - __ CallStub(&create_stub); - __ Pop(r5, r6, cp, r4); - __ Pop(r3); - __ SmiUntag(r3); - } - - __ b(&call_function); - - // We are here because tracing is on or we encountered a MISS case we can't - // handle here. - __ bind(&miss); - GenerateMiss(masm); - - __ b(&call_count_incremented); -} - - -void CallICStub::GenerateMiss(MacroAssembler* masm) { - FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); - - // Preserve the number of arguments as Smi. - __ SmiTag(r3); - - // Push the receiver and the function and feedback info. - __ Push(r3, r4, r5, r6); - - // Call the entry. - __ CallRuntime(Runtime::kCallIC_Miss); - - // Move result to r4 and exit the internal frame. - __ mr(r4, r3); - - // Restore number of arguments. - __ Pop(r3); - __ SmiUntag(r3); -} - // StringCharCodeAtGenerator void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { @@ -2121,46 +1890,6 @@ void StringCharCodeAtGenerator::GenerateSlow( __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase); } - -// ------------------------------------------------------------------------- -// StringCharFromCodeGenerator - -void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { - // Fast case of Heap::LookupSingleCharacterStringFromCode. - DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1)); - __ LoadSmiLiteral(r0, Smi::FromInt(~String::kMaxOneByteCharCodeU)); - __ ori(r0, r0, Operand(kSmiTagMask)); - __ and_(r0, code_, r0, SetRC); - __ bne(&slow_case_, cr0); - - __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); - // At this point code register contains smi tagged one-byte char code. - __ mr(r0, code_); - __ SmiToPtrArrayOffset(code_, code_); - __ add(result_, result_, code_); - __ mr(code_, r0); - __ LoadP(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); - __ CompareRoot(result_, Heap::kUndefinedValueRootIndex); - __ beq(&slow_case_); - __ bind(&exit_); -} - - -void StringCharFromCodeGenerator::GenerateSlow( - MacroAssembler* masm, const RuntimeCallHelper& call_helper) { - __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase); - - __ bind(&slow_case_); - call_helper.BeforeCall(masm); - __ push(code_); - __ CallRuntime(Runtime::kStringCharFromCode); - __ Move(result_, r3); - call_helper.AfterCall(masm); - __ b(&exit_); - - __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase); -} - void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm, Register left, Register right, @@ -3070,12 +2799,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { __ Ret(); } -void CallICTrampolineStub::Generate(MacroAssembler* masm) { - __ EmitLoadFeedbackVector(r5); - CallICStub stub(isolate(), state()); - __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); -} - void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { if (masm->isolate()->function_entry_hook() != NULL) { PredictableCodeSizeScope predictable(masm, @@ -3446,549 +3169,6 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { GenerateCase(masm, FAST_ELEMENTS); } -void FastNewRestParameterStub::Generate(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- r4 : function - // -- cp : context - // -- fp : frame pointer - // -- lr : return address - // ----------------------------------- - __ AssertFunction(r4); - - // Make r5 point to the JavaScript frame. - __ mr(r5, fp); - if (skip_stub_frame()) { - // For Ignition we need to skip the handler/stub frame to reach the - // JavaScript frame for the function. - __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset)); - } - if (FLAG_debug_code) { - Label ok; - __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kFunctionOffset)); - __ cmp(ip, r4); - __ beq(&ok); - __ Abort(kInvalidFrameForFastNewRestArgumentsStub); - __ bind(&ok); - } - - // Check if we have rest parameters (only possible if we have an - // arguments adaptor frame below the function frame). - Label no_rest_parameters; - __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset)); - __ LoadP(ip, MemOperand(r5, CommonFrameConstants::kContextOrFrameTypeOffset)); - __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0); - __ bne(&no_rest_parameters); - - // Check if the arguments adaptor frame contains more arguments than - // specified by the function's internal formal parameter count. - Label rest_parameters; - __ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ LoadP(r6, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); - __ LoadWordArith( - r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset)); -#if V8_TARGET_ARCH_PPC64 - __ SmiTag(r6); -#endif - __ sub(r3, r3, r6, LeaveOE, SetRC); - __ bgt(&rest_parameters, cr0); - - // Return an empty rest parameter array. - __ bind(&no_rest_parameters); - { - // ----------- S t a t e ------------- - // -- cp : context - // -- lr : return address - // ----------------------------------- - - // Allocate an empty rest parameter array. - Label allocate, done_allocate; - __ Allocate(JSArray::kSize, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS); - __ bind(&done_allocate); - - // Setup the rest parameter array in r0. - __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r4); - __ StoreP(r4, FieldMemOperand(r3, JSArray::kMapOffset), r0); - __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex); - __ StoreP(r4, FieldMemOperand(r3, JSArray::kPropertiesOffset), r0); - __ StoreP(r4, FieldMemOperand(r3, JSArray::kElementsOffset), r0); - __ li(r4, Operand::Zero()); - __ StoreP(r4, FieldMemOperand(r3, JSArray::kLengthOffset), r0); - STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize); - __ Ret(); - - // Fall back to %AllocateInNewSpace. - __ bind(&allocate); - { - FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); - __ Push(Smi::FromInt(JSArray::kSize)); - __ CallRuntime(Runtime::kAllocateInNewSpace); - } - __ b(&done_allocate); - } - - __ bind(&rest_parameters); - { - // Compute the pointer to the first rest parameter (skippping the receiver). - __ SmiToPtrArrayOffset(r9, r3); - __ add(r5, r5, r9); - __ addi(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset)); - - // ----------- S t a t e ------------- - // -- cp : context - // -- r3 : number of rest parameters (tagged) - // -- r4 : function - // -- r5 : pointer just past first rest parameters - // -- r9 : size of rest parameters - // -- lr : return address - // ----------------------------------- - - // Allocate space for the rest parameter array plus the backing store. - Label allocate, done_allocate; - __ mov(r10, Operand(JSArray::kSize + FixedArray::kHeaderSize)); - __ add(r10, r10, r9); - __ Allocate(r10, r6, r7, r8, &allocate, NO_ALLOCATION_FLAGS); - __ bind(&done_allocate); - - // Setup the elements array in r6. - __ LoadRoot(r4, Heap::kFixedArrayMapRootIndex); - __ StoreP(r4, FieldMemOperand(r6, FixedArray::kMapOffset), r0); - __ StoreP(r3, FieldMemOperand(r6, FixedArray::kLengthOffset), r0); - __ addi(r7, r6, - Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize)); - { - Label loop; - __ SmiUntag(r0, r3); - __ mtctr(r0); - __ bind(&loop); - __ LoadPU(ip, MemOperand(r5, -kPointerSize)); - __ StorePU(ip, MemOperand(r7, kPointerSize)); - __ bdnz(&loop); - __ addi(r7, r7, Operand(kPointerSize)); - } - - // Setup the rest parameter array in r7. - __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r4); - __ StoreP(r4, MemOperand(r7, JSArray::kMapOffset)); - __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex); - __ StoreP(r4, MemOperand(r7, JSArray::kPropertiesOffset)); - __ StoreP(r6, MemOperand(r7, JSArray::kElementsOffset)); - __ StoreP(r3, MemOperand(r7, JSArray::kLengthOffset)); - STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize); - __ addi(r3, r7, Operand(kHeapObjectTag)); - __ Ret(); - - // Fall back to %AllocateInNewSpace (if not too big). - Label too_big_for_new_space; - __ bind(&allocate); - __ Cmpi(r10, Operand(kMaxRegularHeapObjectSize), r0); - __ bgt(&too_big_for_new_space); - { - FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); - __ SmiTag(r10); - __ Push(r3, r5, r10); - __ CallRuntime(Runtime::kAllocateInNewSpace); - __ mr(r6, r3); - __ Pop(r3, r5); - } - __ b(&done_allocate); - - // Fall back to %NewRestParameter. - __ bind(&too_big_for_new_space); - __ push(r4); - __ TailCallRuntime(Runtime::kNewRestParameter); - } -} - -void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- r4 : function - // -- cp : context - // -- fp : frame pointer - // -- lr : return address - // ----------------------------------- - __ AssertFunction(r4); - - // Make r10 point to the JavaScript frame. - __ mr(r10, fp); - if (skip_stub_frame()) { - // For Ignition we need to skip the handler/stub frame to reach the - // JavaScript frame for the function. - __ LoadP(r10, MemOperand(r10, StandardFrameConstants::kCallerFPOffset)); - } - if (FLAG_debug_code) { - Label ok; - __ LoadP(ip, MemOperand(r10, StandardFrameConstants::kFunctionOffset)); - __ cmp(ip, r4); - __ beq(&ok); - __ Abort(kInvalidFrameForFastNewRestArgumentsStub); - __ bind(&ok); - } - - - // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub. - __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); - __ LoadWordArith( - r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset)); -#if V8_TARGET_ARCH_PPC64 - __ SmiTag(r5); -#endif - __ SmiToPtrArrayOffset(r6, r5); - __ add(r6, r10, r6); - __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset)); - - // r4 : function - // r5 : number of parameters (tagged) - // r6 : parameters pointer - // r10 : JavaScript frame pointer - // Registers used over whole function: - // r8 : arguments count (tagged) - // r9 : mapped parameter count (tagged) - - // Check if the calling frame is an arguments adaptor frame. - Label adaptor_frame, try_allocate, runtime; - __ LoadP(r7, MemOperand(r10, StandardFrameConstants::kCallerFPOffset)); - __ LoadP(r3, MemOperand(r7, CommonFrameConstants::kContextOrFrameTypeOffset)); - __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0); - __ beq(&adaptor_frame); - - // No adaptor, parameter count = argument count. - __ mr(r8, r5); - __ mr(r9, r5); - __ b(&try_allocate); - - // We have an adaptor frame. Patch the parameters pointer. - __ bind(&adaptor_frame); - __ LoadP(r8, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ SmiToPtrArrayOffset(r6, r8); - __ add(r6, r6, r7); - __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset)); - - // r8 = argument count (tagged) - // r9 = parameter count (tagged) - // Compute the mapped parameter count = min(r5, r8) in r9. - __ cmp(r5, r8); - if (CpuFeatures::IsSupported(ISELECT)) { - __ isel(lt, r9, r5, r8); - } else { - Label skip; - __ mr(r9, r5); - __ blt(&skip); - __ mr(r9, r8); - __ bind(&skip); - } - - __ bind(&try_allocate); - - // Compute the sizes of backing store, parameter map, and arguments object. - // 1. Parameter map, has 2 extra words containing context and backing store. - const int kParameterMapHeaderSize = - FixedArray::kHeaderSize + 2 * kPointerSize; - // If there are no mapped parameters, we do not need the parameter_map. - __ CmpSmiLiteral(r9, Smi::kZero, r0); - if (CpuFeatures::IsSupported(ISELECT)) { - __ SmiToPtrArrayOffset(r11, r9); - __ addi(r11, r11, Operand(kParameterMapHeaderSize)); - __ isel(eq, r11, r0, r11); - } else { - Label skip2, skip3; - __ bne(&skip2); - __ li(r11, Operand::Zero()); - __ b(&skip3); - __ bind(&skip2); - __ SmiToPtrArrayOffset(r11, r9); - __ addi(r11, r11, Operand(kParameterMapHeaderSize)); - __ bind(&skip3); - } - - // 2. Backing store. - __ SmiToPtrArrayOffset(r7, r8); - __ add(r11, r11, r7); - __ addi(r11, r11, Operand(FixedArray::kHeaderSize)); - - // 3. Arguments object. - __ addi(r11, r11, Operand(JSSloppyArgumentsObject::kSize)); - - // Do the allocation of all three objects in one go. - __ Allocate(r11, r3, r11, r7, &runtime, NO_ALLOCATION_FLAGS); - - // r3 = address of new object(s) (tagged) - // r5 = argument count (smi-tagged) - // Get the arguments boilerplate from the current native context into r4. - const int kNormalOffset = - Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX); - const int kAliasedOffset = - Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX); - - __ LoadP(r7, NativeContextMemOperand()); - __ cmpi(r9, Operand::Zero()); - if (CpuFeatures::IsSupported(ISELECT)) { - __ LoadP(r11, MemOperand(r7, kNormalOffset)); - __ LoadP(r7, MemOperand(r7, kAliasedOffset)); - __ isel(eq, r7, r11, r7); - } else { - Label skip4, skip5; - __ bne(&skip4); - __ LoadP(r7, MemOperand(r7, kNormalOffset)); - __ b(&skip5); - __ bind(&skip4); - __ LoadP(r7, MemOperand(r7, kAliasedOffset)); - __ bind(&skip5); - } - - // r3 = address of new object (tagged) - // r5 = argument count (smi-tagged) - // r7 = address of arguments map (tagged) - // r9 = mapped parameter count (tagged) - __ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0); - __ LoadRoot(r11, Heap::kEmptyFixedArrayRootIndex); - __ StoreP(r11, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0); - __ StoreP(r11, FieldMemOperand(r3, JSObject::kElementsOffset), r0); - - // Set up the callee in-object property. - __ AssertNotSmi(r4); - __ StoreP(r4, FieldMemOperand(r3, JSSloppyArgumentsObject::kCalleeOffset), - r0); - - // Use the length (smi tagged) and set that as an in-object property too. - __ AssertSmi(r8); - __ StoreP(r8, FieldMemOperand(r3, JSSloppyArgumentsObject::kLengthOffset), - r0); - - // Set up the elements pointer in the allocated arguments object. - // If we allocated a parameter map, r7 will point there, otherwise - // it will point to the backing store. - __ addi(r7, r3, Operand(JSSloppyArgumentsObject::kSize)); - __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0); - - // r3 = address of new object (tagged) - // r5 = argument count (tagged) - // r7 = address of parameter map or backing store (tagged) - // r9 = mapped parameter count (tagged) - // Initialize parameter map. If there are no mapped arguments, we're done. - Label skip_parameter_map; - __ CmpSmiLiteral(r9, Smi::kZero, r0); - if (CpuFeatures::IsSupported(ISELECT)) { - __ isel(eq, r4, r7, r4); - __ beq(&skip_parameter_map); - } else { - Label skip6; - __ bne(&skip6); - // Move backing store address to r4, because it is - // expected there when filling in the unmapped arguments. - __ mr(r4, r7); - __ b(&skip_parameter_map); - __ bind(&skip6); - } - - __ LoadRoot(r8, Heap::kSloppyArgumentsElementsMapRootIndex); - __ StoreP(r8, FieldMemOperand(r7, FixedArray::kMapOffset), r0); - __ AddSmiLiteral(r8, r9, Smi::FromInt(2), r0); - __ StoreP(r8, FieldMemOperand(r7, FixedArray::kLengthOffset), r0); - __ StoreP(cp, FieldMemOperand(r7, FixedArray::kHeaderSize + 0 * kPointerSize), - r0); - __ SmiToPtrArrayOffset(r8, r9); - __ add(r8, r8, r7); - __ addi(r8, r8, Operand(kParameterMapHeaderSize)); - __ StoreP(r8, FieldMemOperand(r7, FixedArray::kHeaderSize + 1 * kPointerSize), - r0); - - // Copy the parameter slots and the holes in the arguments. - // We need to fill in mapped_parameter_count slots. They index the context, - // where parameters are stored in reverse order, at - // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 - // The mapped parameter thus need to get indices - // MIN_CONTEXT_SLOTS+parameter_count-1 .. - // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count - // We loop from right to left. - Label parameters_loop; - __ mr(r8, r9); - __ AddSmiLiteral(r11, r5, Smi::FromInt(Context::MIN_CONTEXT_SLOTS), r0); - __ sub(r11, r11, r9); - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ SmiToPtrArrayOffset(r4, r8); - __ add(r4, r4, r7); - __ addi(r4, r4, Operand(kParameterMapHeaderSize)); - - // r4 = address of backing store (tagged) - // r7 = address of parameter map (tagged) - // r8 = temporary scratch (a.o., for address calculation) - // r10 = temporary scratch (a.o., for address calculation) - // ip = the hole value - __ SmiUntag(r8); - __ mtctr(r8); - __ ShiftLeftImm(r8, r8, Operand(kPointerSizeLog2)); - __ add(r10, r4, r8); - __ add(r8, r7, r8); - __ addi(r10, r10, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ addi(r8, r8, Operand(kParameterMapHeaderSize - kHeapObjectTag)); - - __ bind(¶meters_loop); - __ StorePU(r11, MemOperand(r8, -kPointerSize)); - __ StorePU(ip, MemOperand(r10, -kPointerSize)); - __ AddSmiLiteral(r11, r11, Smi::FromInt(1), r0); - __ bdnz(¶meters_loop); - - // Restore r8 = argument count (tagged). - __ LoadP(r8, FieldMemOperand(r3, JSSloppyArgumentsObject::kLengthOffset)); - - __ bind(&skip_parameter_map); - // r3 = address of new object (tagged) - // r4 = address of backing store (tagged) - // r8 = argument count (tagged) - // r9 = mapped parameter count (tagged) - // r11 = scratch - // Copy arguments header and remaining slots (if there are any). - __ LoadRoot(r11, Heap::kFixedArrayMapRootIndex); - __ StoreP(r11, FieldMemOperand(r4, FixedArray::kMapOffset), r0); - __ StoreP(r8, FieldMemOperand(r4, FixedArray::kLengthOffset), r0); - __ sub(r11, r8, r9, LeaveOE, SetRC); - __ Ret(eq, cr0); - - Label arguments_loop; - __ SmiUntag(r11); - __ mtctr(r11); - - __ SmiToPtrArrayOffset(r0, r9); - __ sub(r6, r6, r0); - __ add(r11, r4, r0); - __ addi(r11, r11, - Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize)); - - __ bind(&arguments_loop); - __ LoadPU(r7, MemOperand(r6, -kPointerSize)); - __ StorePU(r7, MemOperand(r11, kPointerSize)); - __ bdnz(&arguments_loop); - - // Return. - __ Ret(); - - // Do the runtime call to allocate the arguments object. - // r8 = argument count (tagged) - __ bind(&runtime); - __ Push(r4, r6, r8); - __ TailCallRuntime(Runtime::kNewSloppyArguments); -} - -void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- r4 : function - // -- cp : context - // -- fp : frame pointer - // -- lr : return address - // ----------------------------------- - __ AssertFunction(r4); - - // Make r5 point to the JavaScript frame. - __ mr(r5, fp); - if (skip_stub_frame()) { - // For Ignition we need to skip the handler/stub frame to reach the - // JavaScript frame for the function. - __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset)); - } - if (FLAG_debug_code) { - Label ok; - __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kFunctionOffset)); - __ cmp(ip, r4); - __ b(&ok); - __ Abort(kInvalidFrameForFastNewRestArgumentsStub); - __ bind(&ok); - } - - // Check if we have an arguments adaptor frame below the function frame. - Label arguments_adaptor, arguments_done; - __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kCallerFPOffset)); - __ LoadP(ip, MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset)); - __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0); - __ beq(&arguments_adaptor); - { - __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); - __ LoadWordArith( - r3, - FieldMemOperand(r7, SharedFunctionInfo::kFormalParameterCountOffset)); -#if V8_TARGET_ARCH_PPC64 - __ SmiTag(r3); -#endif - __ SmiToPtrArrayOffset(r9, r3); - __ add(r5, r5, r9); - } - __ b(&arguments_done); - __ bind(&arguments_adaptor); - { - __ LoadP(r3, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ SmiToPtrArrayOffset(r9, r3); - __ add(r5, r6, r9); - } - __ bind(&arguments_done); - __ addi(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset)); - - // ----------- S t a t e ------------- - // -- cp : context - // -- r3 : number of rest parameters (tagged) - // -- r4 : function - // -- r5 : pointer just past first rest parameters - // -- r9 : size of rest parameters - // -- lr : return address - // ----------------------------------- - - // Allocate space for the strict arguments object plus the backing store. - Label allocate, done_allocate; - __ mov(r10, - Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize)); - __ add(r10, r10, r9); - __ Allocate(r10, r6, r7, r8, &allocate, NO_ALLOCATION_FLAGS); - __ bind(&done_allocate); - - // Setup the elements array in r6. - __ LoadRoot(r4, Heap::kFixedArrayMapRootIndex); - __ StoreP(r4, FieldMemOperand(r6, FixedArray::kMapOffset), r0); - __ StoreP(r3, FieldMemOperand(r6, FixedArray::kLengthOffset), r0); - __ addi(r7, r6, - Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize)); - { - Label loop, done_loop; - __ SmiUntag(r0, r3, SetRC); - __ beq(&done_loop, cr0); - __ mtctr(r0); - __ bind(&loop); - __ LoadPU(ip, MemOperand(r5, -kPointerSize)); - __ StorePU(ip, MemOperand(r7, kPointerSize)); - __ bdnz(&loop); - __ bind(&done_loop); - __ addi(r7, r7, Operand(kPointerSize)); - } - - // Setup the rest parameter array in r7. - __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r4); - __ StoreP(r4, MemOperand(r7, JSStrictArgumentsObject::kMapOffset)); - __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex); - __ StoreP(r4, MemOperand(r7, JSStrictArgumentsObject::kPropertiesOffset)); - __ StoreP(r6, MemOperand(r7, JSStrictArgumentsObject::kElementsOffset)); - __ StoreP(r3, MemOperand(r7, JSStrictArgumentsObject::kLengthOffset)); - STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize); - __ addi(r3, r7, Operand(kHeapObjectTag)); - __ Ret(); - - // Fall back to %AllocateInNewSpace (if not too big). - Label too_big_for_new_space; - __ bind(&allocate); - __ Cmpi(r10, Operand(kMaxRegularHeapObjectSize), r0); - __ bgt(&too_big_for_new_space); - { - FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); - __ SmiTag(r10); - __ Push(r3, r5, r10); - __ CallRuntime(Runtime::kAllocateInNewSpace); - __ mr(r6, r3); - __ Pop(r3, r5); - } - __ b(&done_allocate); - - // Fall back to %NewStrictArguments. - __ bind(&too_big_for_new_space); - __ push(r4); - __ TailCallRuntime(Runtime::kNewStrictArguments); -} - static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { return ref0.address() - ref1.address(); } diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc index bb365b4e63..212e6db11d 100644 --- a/deps/v8/src/ppc/codegen-ppc.cc +++ b/deps/v8/src/ppc/codegen-ppc.cc @@ -77,6 +77,9 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string, Register index, Register result, Label* call_runtime) { + Label indirect_string_loaded; + __ bind(&indirect_string_loaded); + // Fetch the instance type of the receiver into result register. __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset)); __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); @@ -86,20 +89,26 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string, __ andi(r0, result, Operand(kIsIndirectStringMask)); __ beq(&check_sequential, cr0); - // Dispatch on the indirect string shape: slice or cons. - Label cons_string; - __ mov(ip, Operand(kSlicedNotConsMask)); - __ and_(r0, result, ip, SetRC); - __ beq(&cons_string, cr0); + // Dispatch on the indirect string shape: slice or cons or thin. + Label cons_string, thin_string; + __ andi(ip, result, Operand(kStringRepresentationMask)); + __ cmpi(ip, Operand(kConsStringTag)); + __ beq(&cons_string); + __ cmpi(ip, Operand(kThinStringTag)); + __ beq(&thin_string); // Handle slices. - Label indirect_string_loaded; __ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset)); __ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset)); __ SmiUntag(ip, result); __ add(index, index, ip); __ b(&indirect_string_loaded); + // Handle thin strings. + __ bind(&thin_string); + __ LoadP(string, FieldMemOperand(string, ThinString::kActualOffset)); + __ b(&indirect_string_loaded); + // Handle cons strings. // Check whether the right hand side is the empty string (i.e. if // this is really a flat string in a cons string). If that is not @@ -111,10 +120,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string, __ bne(call_runtime); // Get the first of the two strings and load its instance type. __ LoadP(string, FieldMemOperand(string, ConsString::kFirstOffset)); - - __ bind(&indirect_string_loaded); - __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset)); - __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); + __ b(&indirect_string_loaded); // Distinguish sequential and external strings. Only these two string // representations can reach here (slices and flat cons strings have been diff --git a/deps/v8/src/ppc/constants-ppc.h b/deps/v8/src/ppc/constants-ppc.h index daa52257d6..d131438139 100644 --- a/deps/v8/src/ppc/constants-ppc.h +++ b/deps/v8/src/ppc/constants-ppc.h @@ -91,258 +91,2489 @@ inline Condition CommuteCondition(Condition cond) { // representing instructions from usual 32 bit values. // Instruction objects are pointers to 32bit values, and provide methods to // access the various ISA fields. -typedef int32_t Instr; - -// Opcodes as defined in section 4.2 table 34 (32bit PowerPC) -enum Opcode { - TWI = 3 << 26, // Trap Word Immediate - MULLI = 7 << 26, // Multiply Low Immediate - SUBFIC = 8 << 26, // Subtract from Immediate Carrying - CMPLI = 10 << 26, // Compare Logical Immediate - CMPI = 11 << 26, // Compare Immediate - ADDIC = 12 << 26, // Add Immediate Carrying - ADDICx = 13 << 26, // Add Immediate Carrying and Record - ADDI = 14 << 26, // Add Immediate - ADDIS = 15 << 26, // Add Immediate Shifted - BCX = 16 << 26, // Branch Conditional - SC = 17 << 26, // System Call - BX = 18 << 26, // Branch - EXT1 = 19 << 26, // Extended code set 1 - RLWIMIX = 20 << 26, // Rotate Left Word Immediate then Mask Insert - RLWINMX = 21 << 26, // Rotate Left Word Immediate then AND with Mask - RLWNMX = 23 << 26, // Rotate Left Word then AND with Mask - ORI = 24 << 26, // OR Immediate - ORIS = 25 << 26, // OR Immediate Shifted - XORI = 26 << 26, // XOR Immediate - XORIS = 27 << 26, // XOR Immediate Shifted - ANDIx = 28 << 26, // AND Immediate - ANDISx = 29 << 26, // AND Immediate Shifted - EXT5 = 30 << 26, // Extended code set 5 - 64bit only - EXT2 = 31 << 26, // Extended code set 2 - LWZ = 32 << 26, // Load Word and Zero - LWZU = 33 << 26, // Load Word with Zero Update - LBZ = 34 << 26, // Load Byte and Zero - LBZU = 35 << 26, // Load Byte and Zero with Update - STW = 36 << 26, // Store - STWU = 37 << 26, // Store Word with Update - STB = 38 << 26, // Store Byte - STBU = 39 << 26, // Store Byte with Update - LHZ = 40 << 26, // Load Half and Zero - LHZU = 41 << 26, // Load Half and Zero with Update - LHA = 42 << 26, // Load Half Algebraic - LHAU = 43 << 26, // Load Half Algebraic with Update - STH = 44 << 26, // Store Half - STHU = 45 << 26, // Store Half with Update - LMW = 46 << 26, // Load Multiple Word - STMW = 47 << 26, // Store Multiple Word - LFS = 48 << 26, // Load Floating-Point Single - LFSU = 49 << 26, // Load Floating-Point Single with Update - LFD = 50 << 26, // Load Floating-Point Double - LFDU = 51 << 26, // Load Floating-Point Double with Update - STFS = 52 << 26, // Store Floating-Point Single - STFSU = 53 << 26, // Store Floating-Point Single with Update - STFD = 54 << 26, // Store Floating-Point Double - STFDU = 55 << 26, // Store Floating-Point Double with Update - LD = 58 << 26, // Load Double Word - EXT3 = 59 << 26, // Extended code set 3 - EXT6 = 60 << 26, // Extended code set 6 - STD = 62 << 26, // Store Double Word (optionally with Update) - EXT4 = 63 << 26 // Extended code set 4 -}; - -// Bits 10-1 -enum OpcodeExt1 { - MCRF = 0 << 1, // Move Condition Register Field - BCLRX = 16 << 1, // Branch Conditional Link Register - CRNOR = 33 << 1, // Condition Register NOR) - RFI = 50 << 1, // Return from Interrupt - CRANDC = 129 << 1, // Condition Register AND with Complement - ISYNC = 150 << 1, // Instruction Synchronize - CRXOR = 193 << 1, // Condition Register XOR - CRNAND = 225 << 1, // Condition Register NAND - CRAND = 257 << 1, // Condition Register AND - CREQV = 289 << 1, // Condition Register Equivalent - CRORC = 417 << 1, // Condition Register OR with Complement - CROR = 449 << 1, // Condition Register OR - BCCTRX = 528 << 1 // Branch Conditional to Count Register -}; - -// Bits 9-1 or 10-1 -enum OpcodeExt2 { - CMP = 0 << 1, - TW = 4 << 1, - SUBFCX = 8 << 1, - ADDCX = 10 << 1, - MULHWUX = 11 << 1, - ISEL = 15 << 1, - MFCR = 19 << 1, - LWARX = 20 << 1, - LDX = 21 << 1, - LWZX = 23 << 1, // load word zero w/ x-form - SLWX = 24 << 1, - CNTLZWX = 26 << 1, - SLDX = 27 << 1, - ANDX = 28 << 1, - CMPL = 32 << 1, - SUBFX = 40 << 1, - MFVSRD = 51 << 1, // Move From VSR Doubleword - LDUX = 53 << 1, - DCBST = 54 << 1, - LWZUX = 55 << 1, // load word zero w/ update x-form - CNTLZDX = 58 << 1, - ANDCX = 60 << 1, - MULHWX = 75 << 1, - DCBF = 86 << 1, - LBZX = 87 << 1, // load byte zero w/ x-form - NEGX = 104 << 1, - MFVSRWZ = 115 << 1, // Move From VSR Word And Zero - LBZUX = 119 << 1, // load byte zero w/ update x-form - NORX = 124 << 1, - SUBFEX = 136 << 1, - ADDEX = 138 << 1, - STDX = 149 << 1, - STWX = 151 << 1, // store word w/ x-form - MTVSRD = 179 << 1, // Move To VSR Doubleword - STDUX = 181 << 1, - STWUX = 183 << 1, // store word w/ update x-form - /* - MTCRF - MTMSR - STWCXx - SUBFZEX - */ - ADDZEX = 202 << 1, // Add to Zero Extended - /* - MTSR - */ - - MTVSRWA = 211 << 1, // Move To VSR Word Algebraic - STBX = 215 << 1, // store byte w/ x-form - MULLD = 233 << 1, // Multiply Low Double Word - MULLW = 235 << 1, // Multiply Low Word - MTVSRWZ = 243 << 1, // Move To VSR Word And Zero - STBUX = 247 << 1, // store byte w/ update x-form - MODUD = 265 << 1, // Modulo Unsigned Dword - ADDX = 266 << 1, // Add - MODUW = 267 << 1, // Modulo Unsigned Word - LHZX = 279 << 1, // load half-word zero w/ x-form - LHZUX = 311 << 1, // load half-word zero w/ update x-form - LWAX = 341 << 1, // load word algebraic w/ x-form - LHAX = 343 << 1, // load half-word algebraic w/ x-form - LHAUX = 375 << 1, // load half-word algebraic w/ update x-form - XORX = 316 << 1, // Exclusive OR - MFSPR = 339 << 1, // Move from Special-Purpose-Register - POPCNTW = 378 << 1, // Population Count Words - STHX = 407 << 1, // store half-word w/ x-form - ORC = 412 << 1, // Or with Complement - STHUX = 439 << 1, // store half-word w/ update x-form - ORX = 444 << 1, // Or - DIVDU = 457 << 1, // Divide Double Word Unsigned - DIVWU = 459 << 1, // Divide Word Unsigned - MTSPR = 467 << 1, // Move to Special-Purpose-Register - DIVD = 489 << 1, // Divide Double Word - DIVW = 491 << 1, // Divide Word - POPCNTD = 506 << 1, // Population Count Doubleword - - // Below represent bits 10-1 (any value >= 512) - LDBRX = 532 << 1, // load double word byte reversed w/ x-form - LWBRX = 534 << 1, // load word byte reversed w/ x-form - LFSX = 535 << 1, // load float-single w/ x-form - SRWX = 536 << 1, // Shift Right Word - SRDX = 539 << 1, // Shift Right Double Word - LFSUX = 567 << 1, // load float-single w/ update x-form - SYNC = 598 << 1, // Synchronize - LFDX = 599 << 1, // load float-double w/ x-form - LFDUX = 631 << 1, // load float-double w/ update X-form - STFSX = 663 << 1, // store float-single w/ x-form - STFSUX = 695 << 1, // store float-single w/ update x-form - STFDX = 727 << 1, // store float-double w/ x-form - STFDUX = 759 << 1, // store float-double w/ update x-form - MODSD = 777 << 1, // Modulo Signed Dword - MODSW = 779 << 1, // Modulo Signed Word - LHBRX = 790 << 1, // load half word byte reversed w/ x-form - SRAW = 792 << 1, // Shift Right Algebraic Word - SRAD = 794 << 1, // Shift Right Algebraic Double Word - SRAWIX = 824 << 1, // Shift Right Algebraic Word Immediate - SRADIX = 413 << 2, // Shift Right Algebraic Double Word Immediate - EXTSH = 922 << 1, // Extend Sign Halfword - EXTSB = 954 << 1, // Extend Sign Byte - ICBI = 982 << 1, // Instruction Cache Block Invalidate - EXTSW = 986 << 1 // Extend Sign Word -}; - -// Some use Bits 10-1 and other only 5-1 for the opcode -enum OpcodeExt4 { - // Bits 5-1 - FDIV = 18 << 1, // Floating Divide - FSUB = 20 << 1, // Floating Subtract - FADD = 21 << 1, // Floating Add - FSQRT = 22 << 1, // Floating Square Root - FSEL = 23 << 1, // Floating Select - FMUL = 25 << 1, // Floating Multiply - FMSUB = 28 << 1, // Floating Multiply-Subtract - FMADD = 29 << 1, // Floating Multiply-Add - - // Bits 10-1 - FCMPU = 0 << 1, // Floating Compare Unordered - FRSP = 12 << 1, // Floating-Point Rounding - FCTIW = 14 << 1, // Floating Convert to Integer Word X-form - FCTIWZ = 15 << 1, // Floating Convert to Integer Word with Round to Zero - MTFSB1 = 38 << 1, // Move to FPSCR Bit 1 - FNEG = 40 << 1, // Floating Negate - MCRFS = 64 << 1, // Move to Condition Register from FPSCR - MTFSB0 = 70 << 1, // Move to FPSCR Bit 0 - FMR = 72 << 1, // Floating Move Register - MTFSFI = 134 << 1, // Move to FPSCR Field Immediate - FABS = 264 << 1, // Floating Absolute Value - FRIN = 392 << 1, // Floating Round to Integer Nearest - FRIZ = 424 << 1, // Floating Round to Integer Toward Zero - FRIP = 456 << 1, // Floating Round to Integer Plus - FRIM = 488 << 1, // Floating Round to Integer Minus - MFFS = 583 << 1, // move from FPSCR x-form - MTFSF = 711 << 1, // move to FPSCR fields XFL-form - FCTID = 814 << 1, // Floating convert to integer doubleword - FCTIDZ = 815 << 1, // ^^^ with round toward zero - FCFID = 846 << 1, // Floating convert from integer doubleword - FCTIDU = 942 << 1, // Floating convert to integer doubleword unsigned - FCTIDUZ = 943 << 1, // ^^^ with round toward zero - FCFIDU = 974 << 1 // Floating convert from integer doubleword unsigned -}; - -enum OpcodeExt5 { - // Bits 4-2 - RLDICL = 0 << 1, // Rotate Left Double Word Immediate then Clear Left - RLDICR = 2 << 1, // Rotate Left Double Word Immediate then Clear Right - RLDIC = 4 << 1, // Rotate Left Double Word Immediate then Clear - RLDIMI = 6 << 1, // Rotate Left Double Word Immediate then Mask Insert - // Bits 4-1 - RLDCL = 8 << 1, // Rotate Left Double Word then Clear Left - RLDCR = 9 << 1 // Rotate Left Double Word then Clear Right -}; - -// Bits 10-3 -#define XX3_OPCODE_LIST(V) \ - V(xsaddsp, XSADDSP, 0 << 3) /* VSX Scalar Add SP */ \ - V(xssubsp, XSSUBSP, 8 << 3) /* VSX Scalar Subtract SP */ \ - V(xsmulsp, XSMULSP, 16 << 3) /* VSX Scalar Multiply SP */ \ - V(xsdivsp, XSDIVSP, 24 << 3) /* VSX Scalar Divide SP */ \ - V(xsadddp, XSADDDP, 32 << 3) /* VSX Scalar Add DP */ \ - V(xssubdp, XSSUBDP, 40 << 3) /* VSX Scalar Subtract DP */ \ - V(xsmuldp, XSMULDP, 48 << 3) /* VSX Scalar Multiply DP */ \ - V(xsdivdp, XSDIVDP, 56 << 3) /* VSX Scalar Divide DP */ \ - V(xsmaxdp, XSMAXDP, 160 << 3) /* VSX Scalar Maximum DP */ \ - V(xsmindp, XSMINDP, 168 << 3) /* VSX Scalar Minimum DP */ - -// Bits 10-2 -#define XX2_OPCODE_LIST(V) \ - V(XSCVDPSP, XSCVDPSP, 265 << 2) /* VSX Scalar Convert DP to SP */ \ - V(XSCVSPDP, XSCVSPDP, 329 << 2) /* VSX Scalar Convert SP to DP */ - -enum OpcodeExt6 { -#define DECLARE_OPCODES(name, opcode_name, opcode_value) \ +typedef uint32_t Instr; + +#define PPC_XX3_OPCODE_LIST(V) \ + /* VSX Scalar Add Double-Precision */ \ + V(xsadddp, XSADDDP, 0xF0000100) \ + /* VSX Scalar Add Single-Precision */ \ + V(xsaddsp, XSADDSP, 0xF0000000) \ + /* VSX Scalar Compare Ordered Double-Precision */ \ + V(xscmpodp, XSCMPODP, 0xF0000158) \ + /* VSX Scalar Compare Unordered Double-Precision */ \ + V(xscmpudp, XSCMPUDP, 0xF0000118) \ + /* VSX Scalar Copy Sign Double-Precision */ \ + V(xscpsgndp, XSCPSGNDP, 0xF0000580) \ + /* VSX Scalar Divide Double-Precision */ \ + V(xsdivdp, XSDIVDP, 0xF00001C0) \ + /* VSX Scalar Divide Single-Precision */ \ + V(xsdivsp, XSDIVSP, 0xF00000C0) \ + /* VSX Scalar Multiply-Add Type-A Double-Precision */ \ + V(xsmaddadp, XSMADDADP, 0xF0000108) \ + /* VSX Scalar Multiply-Add Type-A Single-Precision */ \ + V(xsmaddasp, XSMADDASP, 0xF0000008) \ + /* VSX Scalar Multiply-Add Type-M Double-Precision */ \ + V(xsmaddmdp, XSMADDMDP, 0xF0000148) \ + /* VSX Scalar Multiply-Add Type-M Single-Precision */ \ + V(xsmaddmsp, XSMADDMSP, 0xF0000048) \ + /* VSX Scalar Maximum Double-Precision */ \ + V(xsmaxdp, XSMAXDP, 0xF0000500) \ + /* VSX Scalar Minimum Double-Precision */ \ + V(xsmindp, XSMINDP, 0xF0000540) \ + /* VSX Scalar Multiply-Subtract Type-A Double-Precision */ \ + V(xsmsubadp, XSMSUBADP, 0xF0000188) \ + /* VSX Scalar Multiply-Subtract Type-A Single-Precision */ \ + V(xsmsubasp, XSMSUBASP, 0xF0000088) \ + /* VSX Scalar Multiply-Subtract Type-M Double-Precision */ \ + V(xsmsubmdp, XSMSUBMDP, 0xF00001C8) \ + /* VSX Scalar Multiply-Subtract Type-M Single-Precision */ \ + V(xsmsubmsp, XSMSUBMSP, 0xF00000C8) \ + /* VSX Scalar Multiply Double-Precision */ \ + V(xsmuldp, XSMULDP, 0xF0000180) \ + /* VSX Scalar Multiply Single-Precision */ \ + V(xsmulsp, XSMULSP, 0xF0000080) \ + /* VSX Scalar Negative Multiply-Add Type-A Double-Precision */ \ + V(xsnmaddadp, XSNMADDADP, 0xF0000508) \ + /* VSX Scalar Negative Multiply-Add Type-A Single-Precision */ \ + V(xsnmaddasp, XSNMADDASP, 0xF0000408) \ + /* VSX Scalar Negative Multiply-Add Type-M Double-Precision */ \ + V(xsnmaddmdp, XSNMADDMDP, 0xF0000548) \ + /* VSX Scalar Negative Multiply-Add Type-M Single-Precision */ \ + V(xsnmaddmsp, XSNMADDMSP, 0xF0000448) \ + /* VSX Scalar Negative Multiply-Subtract Type-A Double-Precision */ \ + V(xsnmsubadp, XSNMSUBADP, 0xF0000588) \ + /* VSX Scalar Negative Multiply-Subtract Type-A Single-Precision */ \ + V(xsnmsubasp, XSNMSUBASP, 0xF0000488) \ + /* VSX Scalar Negative Multiply-Subtract Type-M Double-Precision */ \ + V(xsnmsubmdp, XSNMSUBMDP, 0xF00005C8) \ + /* VSX Scalar Negative Multiply-Subtract Type-M Single-Precision */ \ + V(xsnmsubmsp, XSNMSUBMSP, 0xF00004C8) \ + /* VSX Scalar Reciprocal Estimate Double-Precision */ \ + V(xsredp, XSREDP, 0xF0000168) \ + /* VSX Scalar Reciprocal Estimate Single-Precision */ \ + V(xsresp, XSRESP, 0xF0000068) \ + /* VSX Scalar Subtract Double-Precision */ \ + V(xssubdp, XSSUBDP, 0xF0000140) \ + /* VSX Scalar Subtract Single-Precision */ \ + V(xssubsp, XSSUBSP, 0xF0000040) \ + /* VSX Scalar Test for software Divide Double-Precision */ \ + V(xstdivdp, XSTDIVDP, 0xF00001E8) \ + /* VSX Vector Add Double-Precision */ \ + V(xvadddp, XVADDDP, 0xF0000300) \ + /* VSX Vector Add Single-Precision */ \ + V(xvaddsp, XVADDSP, 0xF0000200) \ + /* VSX Vector Compare Equal To Double-Precision */ \ + V(xvcmpeqdp, XVCMPEQDP, 0xF0000318) \ + /* VSX Vector Compare Equal To Double-Precision & record CR6 */ \ + V(xvcmpeqdpx, XVCMPEQDPx, 0xF0000718) \ + /* VSX Vector Compare Equal To Single-Precision */ \ + V(xvcmpeqsp, XVCMPEQSP, 0xF0000218) \ + /* VSX Vector Compare Equal To Single-Precision & record CR6 */ \ + V(xvcmpeqspx, XVCMPEQSPx, 0xF0000618) \ + /* VSX Vector Compare Greater Than or Equal To Double-Precision */ \ + V(xvcmpgedp, XVCMPGEDP, 0xF0000398) \ + /* VSX Vector Compare Greater Than or Equal To Double-Precision & record */ \ + /* CR6 */ \ + V(xvcmpgedpx, XVCMPGEDPx, 0xF0000798) \ + /* VSX Vector Compare Greater Than or Equal To Single-Precision */ \ + V(xvcmpgesp, XVCMPGESP, 0xF0000298) \ + /* VSX Vector Compare Greater Than or Equal To Single-Precision & record */ \ + /* CR6 */ \ + V(xvcmpgespx, XVCMPGESPx, 0xF0000698) \ + /* VSX Vector Compare Greater Than Double-Precision */ \ + V(xvcmpgtdp, XVCMPGTDP, 0xF0000358) \ + /* VSX Vector Compare Greater Than Double-Precision & record CR6 */ \ + V(xvcmpgtdpx, XVCMPGTDPx, 0xF0000758) \ + /* VSX Vector Compare Greater Than Single-Precision */ \ + V(xvcmpgtsp, XVCMPGTSP, 0xF0000258) \ + /* VSX Vector Compare Greater Than Single-Precision & record CR6 */ \ + V(xvcmpgtspx, XVCMPGTSPx, 0xF0000658) \ + /* VSX Vector Copy Sign Double-Precision */ \ + V(xvcpsgndp, XVCPSGNDP, 0xF0000780) \ + /* VSX Vector Copy Sign Single-Precision */ \ + V(xvcpsgnsp, XVCPSGNSP, 0xF0000680) \ + /* VSX Vector Divide Double-Precision */ \ + V(xvdivdp, XVDIVDP, 0xF00003C0) \ + /* VSX Vector Divide Single-Precision */ \ + V(xvdivsp, XVDIVSP, 0xF00002C0) \ + /* VSX Vector Multiply-Add Type-A Double-Precision */ \ + V(xvmaddadp, XVMADDADP, 0xF0000308) \ + /* VSX Vector Multiply-Add Type-A Single-Precision */ \ + V(xvmaddasp, XVMADDASP, 0xF0000208) \ + /* VSX Vector Multiply-Add Type-M Double-Precision */ \ + V(xvmaddmdp, XVMADDMDP, 0xF0000348) \ + /* VSX Vector Multiply-Add Type-M Single-Precision */ \ + V(xvmaddmsp, XVMADDMSP, 0xF0000248) \ + /* VSX Vector Maximum Double-Precision */ \ + V(xvmaxdp, XVMAXDP, 0xF0000700) \ + /* VSX Vector Maximum Single-Precision */ \ + V(xvmaxsp, XVMAXSP, 0xF0000600) \ + /* VSX Vector Minimum Double-Precision */ \ + V(xvmindp, XVMINDP, 0xF0000740) \ + /* VSX Vector Minimum Single-Precision */ \ + V(xvminsp, XVMINSP, 0xF0000640) \ + /* VSX Vector Multiply-Subtract Type-A Double-Precision */ \ + V(xvmsubadp, XVMSUBADP, 0xF0000388) \ + /* VSX Vector Multiply-Subtract Type-A Single-Precision */ \ + V(xvmsubasp, XVMSUBASP, 0xF0000288) \ + /* VSX Vector Multiply-Subtract Type-M Double-Precision */ \ + V(xvmsubmdp, XVMSUBMDP, 0xF00003C8) \ + /* VSX Vector Multiply-Subtract Type-M Single-Precision */ \ + V(xvmsubmsp, XVMSUBMSP, 0xF00002C8) \ + /* VSX Vector Multiply Double-Precision */ \ + V(xvmuldp, XVMULDP, 0xF0000380) \ + /* VSX Vector Multiply Single-Precision */ \ + V(xvmulsp, XVMULSP, 0xF0000280) \ + /* VSX Vector Negative Multiply-Add Type-A Double-Precision */ \ + V(xvnmaddadp, XVNMADDADP, 0xF0000708) \ + /* VSX Vector Negative Multiply-Add Type-A Single-Precision */ \ + V(xvnmaddasp, XVNMADDASP, 0xF0000608) \ + /* VSX Vector Negative Multiply-Add Type-M Double-Precision */ \ + V(xvnmaddmdp, XVNMADDMDP, 0xF0000748) \ + /* VSX Vector Negative Multiply-Add Type-M Single-Precision */ \ + V(xvnmaddmsp, XVNMADDMSP, 0xF0000648) \ + /* VSX Vector Negative Multiply-Subtract Type-A Double-Precision */ \ + V(xvnmsubadp, XVNMSUBADP, 0xF0000788) \ + /* VSX Vector Negative Multiply-Subtract Type-A Single-Precision */ \ + V(xvnmsubasp, XVNMSUBASP, 0xF0000688) \ + /* VSX Vector Negative Multiply-Subtract Type-M Double-Precision */ \ + V(xvnmsubmdp, XVNMSUBMDP, 0xF00007C8) \ + /* VSX Vector Negative Multiply-Subtract Type-M Single-Precision */ \ + V(xvnmsubmsp, XVNMSUBMSP, 0xF00006C8) \ + /* VSX Vector Reciprocal Estimate Double-Precision */ \ + V(xvredp, XVREDP, 0xF0000368) \ + /* VSX Vector Reciprocal Estimate Single-Precision */ \ + V(xvresp, XVRESP, 0xF0000268) \ + /* VSX Vector Subtract Double-Precision */ \ + V(xvsubdp, XVSUBDP, 0xF0000340) \ + /* VSX Vector Subtract Single-Precision */ \ + V(xvsubsp, XVSUBSP, 0xF0000240) \ + /* VSX Vector Test for software Divide Double-Precision */ \ + V(xvtdivdp, XVTDIVDP, 0xF00003E8) \ + /* VSX Vector Test for software Divide Single-Precision */ \ + V(xvtdivsp, XVTDIVSP, 0xF00002E8) \ + /* VSX Logical AND */ \ + V(xxland, XXLAND, 0xF0000410) \ + /* VSX Logical AND with Complement */ \ + V(xxlandc, XXLANDC, 0xF0000450) \ + /* VSX Logical Equivalence */ \ + V(xxleqv, XXLEQV, 0xF00005D0) \ + /* VSX Logical NAND */ \ + V(xxlnand, XXLNAND, 0xF0000590) \ + /* VSX Logical NOR */ \ + V(xxlnor, XXLNOR, 0xF0000510) \ + /* VSX Logical OR */ \ + V(xxlor, XXLOR, 0xF0000490) \ + /* VSX Logical OR with Complement */ \ + V(xxlorc, XXLORC, 0xF0000550) \ + /* VSX Logical XOR */ \ + V(xxlxor, XXLXOR, 0xF00004D0) \ + /* VSX Merge High Word */ \ + V(xxmrghw, XXMRGHW, 0xF0000090) \ + /* VSX Merge Low Word */ \ + V(xxmrglw, XXMRGLW, 0xF0000190) \ + /* VSX Permute Doubleword Immediate */ \ + V(xxpermdi, XXPERMDI, 0xF0000050) \ + /* VSX Shift Left Double by Word Immediate */ \ + V(xxsldwi, XXSLDWI, 0xF0000010) \ + /* VSX Splat Word */ \ + V(xxspltw, XXSPLTW, 0xF0000290) + +#define PPC_Z23_OPCODE_LIST(V) \ + /* Decimal Quantize */ \ + V(dqua, DQUA, 0xEC000006) \ + /* Decimal Quantize Immediate */ \ + V(dquai, DQUAI, 0xEC000086) \ + /* Decimal Quantize Immediate Quad */ \ + V(dquaiq, DQUAIQ, 0xFC000086) \ + /* Decimal Quantize Quad */ \ + V(dquaq, DQUAQ, 0xFC000006) \ + /* Decimal Floating Round To FP Integer Without Inexact */ \ + V(drintn, DRINTN, 0xEC0001C6) \ + /* Decimal Floating Round To FP Integer Without Inexact Quad */ \ + V(drintnq, DRINTNQ, 0xFC0001C6) \ + /* Decimal Floating Round To FP Integer With Inexact */ \ + V(drintx, DRINTX, 0xEC0000C6) \ + /* Decimal Floating Round To FP Integer With Inexact Quad */ \ + V(drintxq, DRINTXQ, 0xFC0000C6) \ + /* Decimal Floating Reround */ \ + V(drrnd, DRRND, 0xEC000046) \ + /* Decimal Floating Reround Quad */ \ + V(drrndq, DRRNDQ, 0xFC000046) + +#define PPC_Z22_OPCODE_LIST(V) \ + /* Decimal Floating Shift Coefficient Left Immediate */ \ + V(dscli, DSCLI, 0xEC000084) \ + /* Decimal Floating Shift Coefficient Left Immediate Quad */ \ + V(dscliq, DSCLIQ, 0xFC000084) \ + /* Decimal Floating Shift Coefficient Right Immediate */ \ + V(dscri, DSCRI, 0xEC0000C4) \ + /* Decimal Floating Shift Coefficient Right Immediate Quad */ \ + V(dscriq, DSCRIQ, 0xFC0000C4) \ + /* Decimal Floating Test Data Class */ \ + V(dtstdc, DTSTDC, 0xEC000184) \ + /* Decimal Floating Test Data Class Quad */ \ + V(dtstdcq, DTSTDCQ, 0xFC000184) \ + /* Decimal Floating Test Data Group */ \ + V(dtstdg, DTSTDG, 0xEC0001C4) \ + /* Decimal Floating Test Data Group Quad */ \ + V(dtstdgq, DTSTDGQ, 0xFC0001C4) + +#define PPC_XX2_OPCODE_LIST(V) \ + /* Move To VSR Doubleword */ \ + V(mtvsrd, MTVSRD, 0x7C000166) \ + /* Move To VSR Word Algebraic */ \ + V(mtvsrwa, MTVSRWA, 0x7C0001A6) \ + /* Move To VSR Word and Zero */ \ + V(mtvsrwz, MTVSRWZ, 0x7C0001E6) \ + /* VSX Scalar Absolute Value Double-Precision */ \ + V(xsabsdp, XSABSDP, 0xF0000564) \ + /* VSX Scalar Convert Double-Precision to Single-Precision */ \ + V(xscvdpsp, XSCVDPSP, 0xF0000424) \ + /* VSX Scalar Convert Double-Precision to Single-Precision format Non- */ \ + /* signalling */ \ + V(xscvdpspn, XSCVDPSPN, 0xF000042C) \ + /* VSX Scalar Convert Double-Precision to Signed Fixed-Point Doubleword */ \ + /* Saturate */ \ + V(xscvdpsxds, XSCVDPSXDS, 0xF0000560) \ + /* VSX Scalar Convert Double-Precision to Signed Fixed-Point Word */ \ + /* Saturate */ \ + V(xscvdpsxws, XSCVDPSXWS, 0xF0000160) \ + /* VSX Scalar Convert Double-Precision to Unsigned Fixed-Point */ \ + /* Doubleword Saturate */ \ + V(xscvdpuxds, XSCVDPUXDS, 0xF0000520) \ + /* VSX Scalar Convert Double-Precision to Unsigned Fixed-Point Word */ \ + /* Saturate */ \ + V(xscvdpuxws, XSCVDPUXWS, 0xF0000120) \ + /* VSX Scalar Convert Single-Precision to Double-Precision (p=1) */ \ + V(xscvspdp, XSCVSPDP, 0xF0000524) \ + /* Scalar Convert Single-Precision to Double-Precision format Non- */ \ + /* signalling */ \ + V(xscvspdpn, XSCVSPDPN, 0xF000052C) \ + /* VSX Scalar Convert Signed Fixed-Point Doubleword to Double-Precision */ \ + V(xscvsxddp, XSCVSXDDP, 0xF00005E0) \ + /* VSX Scalar Convert Signed Fixed-Point Doubleword to Single-Precision */ \ + V(xscvsxdsp, XSCVSXDSP, 0xF00004E0) \ + /* VSX Scalar Convert Unsigned Fixed-Point Doubleword to Double- */ \ + /* Precision */ \ + V(xscvuxddp, XSCVUXDDP, 0xF00005A0) \ + /* VSX Scalar Convert Unsigned Fixed-Point Doubleword to Single- */ \ + /* Precision */ \ + V(xscvuxdsp, XSCVUXDSP, 0xF00004A0) \ + /* VSX Scalar Negative Absolute Value Double-Precision */ \ + V(xsnabsdp, XSNABSDP, 0xF00005A4) \ + /* VSX Scalar Negate Double-Precision */ \ + V(xsnegdp, XSNEGDP, 0xF00005E4) \ + /* VSX Scalar Round to Double-Precision Integer */ \ + V(xsrdpi, XSRDPI, 0xF0000124) \ + /* VSX Scalar Round to Double-Precision Integer using Current rounding */ \ + /* mode */ \ + V(xsrdpic, XSRDPIC, 0xF00001AC) \ + /* VSX Scalar Round to Double-Precision Integer toward -Infinity */ \ + V(xsrdpim, XSRDPIM, 0xF00001E4) \ + /* VSX Scalar Round to Double-Precision Integer toward +Infinity */ \ + V(xsrdpip, XSRDPIP, 0xF00001A4) \ + /* VSX Scalar Round to Double-Precision Integer toward Zero */ \ + V(xsrdpiz, XSRDPIZ, 0xF0000164) \ + /* VSX Scalar Round to Single-Precision */ \ + V(xsrsp, XSRSP, 0xF0000464) \ + /* VSX Scalar Reciprocal Square Root Estimate Double-Precision */ \ + V(xsrsqrtedp, XSRSQRTEDP, 0xF0000128) \ + /* VSX Scalar Reciprocal Square Root Estimate Single-Precision */ \ + V(xsrsqrtesp, XSRSQRTESP, 0xF0000028) \ + /* VSX Scalar Square Root Double-Precision */ \ + V(xssqrtdp, XSSQRTDP, 0xF000012C) \ + /* VSX Scalar Square Root Single-Precision */ \ + V(xssqrtsp, XSSQRTSP, 0xF000002C) \ + /* VSX Scalar Test for software Square Root Double-Precision */ \ + V(xstsqrtdp, XSTSQRTDP, 0xF00001A8) \ + /* VSX Vector Absolute Value Double-Precision */ \ + V(xvabsdp, XVABSDP, 0xF0000764) \ + /* VSX Vector Absolute Value Single-Precision */ \ + V(xvabssp, XVABSSP, 0xF0000664) \ + /* VSX Vector Convert Double-Precision to Single-Precision */ \ + V(xvcvdpsp, XVCVDPSP, 0xF0000624) \ + /* VSX Vector Convert Double-Precision to Signed Fixed-Point Doubleword */ \ + /* Saturate */ \ + V(xvcvdpsxds, XVCVDPSXDS, 0xF0000760) \ + /* VSX Vector Convert Double-Precision to Signed Fixed-Point Word */ \ + /* Saturate */ \ + V(xvcvdpsxws, XVCVDPSXWS, 0xF0000360) \ + /* VSX Vector Convert Double-Precision to Unsigned Fixed-Point */ \ + /* Doubleword Saturate */ \ + V(xvcvdpuxds, XVCVDPUXDS, 0xF0000720) \ + /* VSX Vector Convert Double-Precision to Unsigned Fixed-Point Word */ \ + /* Saturate */ \ + V(xvcvdpuxws, XVCVDPUXWS, 0xF0000320) \ + /* VSX Vector Convert Single-Precision to Double-Precision */ \ + V(xvcvspdp, XVCVSPDP, 0xF0000724) \ + /* VSX Vector Convert Single-Precision to Signed Fixed-Point Doubleword */ \ + /* Saturate */ \ + V(xvcvspsxds, XVCVSPSXDS, 0xF0000660) \ + /* VSX Vector Convert Single-Precision to Signed Fixed-Point Word */ \ + /* Saturate */ \ + V(xvcvspsxws, XVCVSPSXWS, 0xF0000260) \ + /* VSX Vector Convert Single-Precision to Unsigned Fixed-Point */ \ + /* Doubleword Saturate */ \ + V(xvcvspuxds, XVCVSPUXDS, 0xF0000620) \ + /* VSX Vector Convert Single-Precision to Unsigned Fixed-Point Word */ \ + /* Saturate */ \ + V(xvcvspuxws, XVCVSPUXWS, 0xF0000220) \ + /* VSX Vector Convert Signed Fixed-Point Doubleword to Double-Precision */ \ + V(xvcvsxddp, XVCVSXDDP, 0xF00007E0) \ + /* VSX Vector Convert Signed Fixed-Point Doubleword to Single-Precision */ \ + V(xvcvsxdsp, XVCVSXDSP, 0xF00006E0) \ + /* VSX Vector Convert Signed Fixed-Point Word to Double-Precision */ \ + V(xvcvsxwdp, XVCVSXWDP, 0xF00003E0) \ + /* VSX Vector Convert Signed Fixed-Point Word to Single-Precision */ \ + V(xvcvsxwsp, XVCVSXWSP, 0xF00002E0) \ + /* VSX Vector Convert Unsigned Fixed-Point Doubleword to Double- */ \ + /* Precision */ \ + V(xvcvuxddp, XVCVUXDDP, 0xF00007A0) \ + /* VSX Vector Convert Unsigned Fixed-Point Doubleword to Single- */ \ + /* Precision */ \ + V(xvcvuxdsp, XVCVUXDSP, 0xF00006A0) \ + /* VSX Vector Convert Unsigned Fixed-Point Word to Double-Precision */ \ + V(xvcvuxwdp, XVCVUXWDP, 0xF00003A0) \ + /* VSX Vector Convert Unsigned Fixed-Point Word to Single-Precision */ \ + V(xvcvuxwsp, XVCVUXWSP, 0xF00002A0) \ + /* VSX Vector Negative Absolute Value Double-Precision */ \ + V(xvnabsdp, XVNABSDP, 0xF00007A4) \ + /* VSX Vector Negative Absolute Value Single-Precision */ \ + V(xvnabssp, XVNABSSP, 0xF00006A4) \ + /* VSX Vector Negate Double-Precision */ \ + V(xvnegdp, XVNEGDP, 0xF00007E4) \ + /* VSX Vector Negate Single-Precision */ \ + V(xvnegsp, XVNEGSP, 0xF00006E4) \ + /* VSX Vector Round to Double-Precision Integer */ \ + V(xvrdpi, XVRDPI, 0xF0000324) \ + /* VSX Vector Round to Double-Precision Integer using Current rounding */ \ + /* mode */ \ + V(xvrdpic, XVRDPIC, 0xF00003AC) \ + /* VSX Vector Round to Double-Precision Integer toward -Infinity */ \ + V(xvrdpim, XVRDPIM, 0xF00003E4) \ + /* VSX Vector Round to Double-Precision Integer toward +Infinity */ \ + V(xvrdpip, XVRDPIP, 0xF00003A4) \ + /* VSX Vector Round to Double-Precision Integer toward Zero */ \ + V(xvrdpiz, XVRDPIZ, 0xF0000364) \ + /* VSX Vector Round to Single-Precision Integer */ \ + V(xvrspi, XVRSPI, 0xF0000224) \ + /* VSX Vector Round to Single-Precision Integer using Current rounding */ \ + /* mode */ \ + V(xvrspic, XVRSPIC, 0xF00002AC) \ + /* VSX Vector Round to Single-Precision Integer toward -Infinity */ \ + V(xvrspim, XVRSPIM, 0xF00002E4) \ + /* VSX Vector Round to Single-Precision Integer toward +Infinity */ \ + V(xvrspip, XVRSPIP, 0xF00002A4) \ + /* VSX Vector Round to Single-Precision Integer toward Zero */ \ + V(xvrspiz, XVRSPIZ, 0xF0000264) \ + /* VSX Vector Reciprocal Square Root Estimate Double-Precision */ \ + V(xvrsqrtedp, XVRSQRTEDP, 0xF0000328) \ + /* VSX Vector Reciprocal Square Root Estimate Single-Precision */ \ + V(xvrsqrtesp, XVRSQRTESP, 0xF0000228) \ + /* VSX Vector Square Root Double-Precision */ \ + V(xvsqrtdp, XVSQRTDP, 0xF000032C) \ + /* VSX Vector Square Root Single-Precision */ \ + V(xvsqrtsp, XVSQRTSP, 0xF000022C) \ + /* VSX Vector Test for software Square Root Double-Precision */ \ + V(xvtsqrtdp, XVTSQRTDP, 0xF00003A8) \ + /* VSX Vector Test for software Square Root Single-Precision */ \ + V(xvtsqrtsp, XVTSQRTSP, 0xF00002A8) + +#define PPC_EVX_OPCODE_LIST(V) \ + /* Vector Load Double Word into Double Word by External PID Indexed */ \ + V(evlddepx, EVLDDEPX, 0x7C00063E) \ + /* Vector Store Double of Double by External PID Indexed */ \ + V(evstddepx, EVSTDDEPX, 0x7C00073E) \ + /* Bit Reversed Increment */ \ + V(brinc, BRINC, 0x1000020F) \ + /* Vector Absolute Value */ \ + V(evabs, EVABS, 0x10000208) \ + /* Vector Add Immediate Word */ \ + V(evaddiw, EVADDIW, 0x10000202) \ + /* Vector Add Signed, Modulo, Integer to Accumulator Word */ \ + V(evaddsmiaaw, EVADDSMIAAW, 0x100004C9) \ + /* Vector Add Signed, Saturate, Integer to Accumulator Word */ \ + V(evaddssiaaw, EVADDSSIAAW, 0x100004C1) \ + /* Vector Add Unsigned, Modulo, Integer to Accumulator Word */ \ + V(evaddumiaaw, EVADDUMIAAW, 0x100004C8) \ + /* Vector Add Unsigned, Saturate, Integer to Accumulator Word */ \ + V(evaddusiaaw, EVADDUSIAAW, 0x100004C0) \ + /* Vector Add Word */ \ + V(evaddw, EVADDW, 0x10000200) \ + /* Vector AND */ \ + V(evand, EVAND, 0x10000211) \ + /* Vector AND with Complement */ \ + V(evandc, EVANDC, 0x10000212) \ + /* Vector Compare Equal */ \ + V(evcmpeq, EVCMPEQ, 0x10000234) \ + /* Vector Compare Greater Than Signed */ \ + V(evcmpgts, EVCMPGTS, 0x10000231) \ + /* Vector Compare Greater Than Unsigned */ \ + V(evcmpgtu, EVCMPGTU, 0x10000230) \ + /* Vector Compare Less Than Signed */ \ + V(evcmplts, EVCMPLTS, 0x10000233) \ + /* Vector Compare Less Than Unsigned */ \ + V(evcmpltu, EVCMPLTU, 0x10000232) \ + /* Vector Count Leading Signed Bits Word */ \ + V(evcntlsw, EVCNTLSW, 0x1000020E) \ + /* Vector Count Leading Zeros Word */ \ + V(evcntlzw, EVCNTLZW, 0x1000020D) \ + /* Vector Divide Word Signed */ \ + V(evdivws, EVDIVWS, 0x100004C6) \ + /* Vector Divide Word Unsigned */ \ + V(evdivwu, EVDIVWU, 0x100004C7) \ + /* Vector Equivalent */ \ + V(eveqv, EVEQV, 0x10000219) \ + /* Vector Extend Sign Byte */ \ + V(evextsb, EVEXTSB, 0x1000020A) \ + /* Vector Extend Sign Half Word */ \ + V(evextsh, EVEXTSH, 0x1000020B) \ + /* Vector Load Double Word into Double Word */ \ + V(evldd, EVLDD, 0x10000301) \ + /* Vector Load Double Word into Double Word Indexed */ \ + V(evlddx, EVLDDX, 0x10000300) \ + /* Vector Load Double into Four Half Words */ \ + V(evldh, EVLDH, 0x10000305) \ + /* Vector Load Double into Four Half Words Indexed */ \ + V(evldhx, EVLDHX, 0x10000304) \ + /* Vector Load Double into Two Words */ \ + V(evldw, EVLDW, 0x10000303) \ + /* Vector Load Double into Two Words Indexed */ \ + V(evldwx, EVLDWX, 0x10000302) \ + /* Vector Load Half Word into Half Words Even and Splat */ \ + V(evlhhesplat, EVLHHESPLAT, 0x10000309) \ + /* Vector Load Half Word into Half Words Even and Splat Indexed */ \ + V(evlhhesplatx, EVLHHESPLATX, 0x10000308) \ + /* Vector Load Half Word into Half Word Odd Signed and Splat */ \ + V(evlhhossplat, EVLHHOSSPLAT, 0x1000030F) \ + /* Vector Load Half Word into Half Word Odd Signed and Splat Indexed */ \ + V(evlhhossplatx, EVLHHOSSPLATX, 0x1000030E) \ + /* Vector Load Half Word into Half Word Odd Unsigned and Splat */ \ + V(evlhhousplat, EVLHHOUSPLAT, 0x1000030D) \ + /* Vector Load Half Word into Half Word Odd Unsigned and Splat Indexed */ \ + V(evlhhousplatx, EVLHHOUSPLATX, 0x1000030C) \ + /* Vector Load Word into Two Half Words Even */ \ + V(evlwhe, EVLWHE, 0x10000311) \ + /* Vector Load Word into Two Half Words Even Indexed */ \ + V(evlwhex, EVLWHEX, 0x10000310) \ + /* Vector Load Word into Two Half Words Odd Signed (with sign extension) */ \ + V(evlwhos, EVLWHOS, 0x10000317) \ + /* Vector Load Word into Two Half Words Odd Signed Indexed (with sign */ \ + /* extension) */ \ + V(evlwhosx, EVLWHOSX, 0x10000316) \ + /* Vector Load Word into Two Half Words Odd Unsigned (zero-extended) */ \ + V(evlwhou, EVLWHOU, 0x10000315) \ + /* Vector Load Word into Two Half Words Odd Unsigned Indexed (zero- */ \ + /* extended) */ \ + V(evlwhoux, EVLWHOUX, 0x10000314) \ + /* Vector Load Word into Two Half Words and Splat */ \ + V(evlwhsplat, EVLWHSPLAT, 0x1000031D) \ + /* Vector Load Word into Two Half Words and Splat Indexed */ \ + V(evlwhsplatx, EVLWHSPLATX, 0x1000031C) \ + /* Vector Load Word into Word and Splat */ \ + V(evlwwsplat, EVLWWSPLAT, 0x10000319) \ + /* Vector Load Word into Word and Splat Indexed */ \ + V(evlwwsplatx, EVLWWSPLATX, 0x10000318) \ + /* Vector Merge High */ \ + V(evmergehi, EVMERGEHI, 0x1000022C) \ + /* Vector Merge High/Low */ \ + V(evmergehilo, EVMERGEHILO, 0x1000022E) \ + /* Vector Merge Low */ \ + V(evmergelo, EVMERGELO, 0x1000022D) \ + /* Vector Merge Low/High */ \ + V(evmergelohi, EVMERGELOHI, 0x1000022F) \ + /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Fractional */ \ + /* and Accumulate */ \ + V(evmhegsmfaa, EVMHEGSMFAA, 0x1000052B) \ + /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Fractional */ \ + /* and Accumulate Negative */ \ + V(evmhegsmfan, EVMHEGSMFAN, 0x100005AB) \ + /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Integer */ \ + /* and Accumulate */ \ + V(evmhegsmiaa, EVMHEGSMIAA, 0x10000529) \ + /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Integer */ \ + /* and Accumulate Negative */ \ + V(evmhegsmian, EVMHEGSMIAN, 0x100005A9) \ + /* Vector Multiply Half Words, Even, Guarded, Unsigned, Modulo, Integer */ \ + /* and Accumulate */ \ + V(evmhegumiaa, EVMHEGUMIAA, 0x10000528) \ + /* Vector Multiply Half Words, Even, Guarded, Unsigned, Modulo, Integer */ \ + /* and Accumulate Negative */ \ + V(evmhegumian, EVMHEGUMIAN, 0x100005A8) \ + /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional */ \ + V(evmhesmf, EVMHESMF, 0x1000040B) \ + /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional to */ \ + /* Accumulator */ \ + V(evmhesmfa, EVMHESMFA, 0x1000042B) \ + /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional and */ \ + /* Accumulate into Words */ \ + V(evmhesmfaaw, EVMHESMFAAW, 0x1000050B) \ + /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional and */ \ + /* Accumulate Negative into Words */ \ + V(evmhesmfanw, EVMHESMFANW, 0x1000058B) \ + /* Vector Multiply Half Words, Even, Signed, Modulo, Integer */ \ + V(evmhesmi, EVMHESMI, 0x10000409) \ + /* Vector Multiply Half Words, Even, Signed, Modulo, Integer to */ \ + /* Accumulator */ \ + V(evmhesmia, EVMHESMIA, 0x10000429) \ + /* Vector Multiply Half Words, Even, Signed, Modulo, Integer and */ \ + /* Accumulate into Words */ \ + V(evmhesmiaaw, EVMHESMIAAW, 0x10000509) \ + /* Vector Multiply Half Words, Even, Signed, Modulo, Integer and */ \ + /* Accumulate Negative into Words */ \ + V(evmhesmianw, EVMHESMIANW, 0x10000589) \ + /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional */ \ + V(evmhessf, EVMHESSF, 0x10000403) \ + /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional to */ \ + /* Accumulator */ \ + V(evmhessfa, EVMHESSFA, 0x10000423) \ + /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional and */ \ + /* Accumulate into Words */ \ + V(evmhessfaaw, EVMHESSFAAW, 0x10000503) \ + /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional and */ \ + /* Accumulate Negative into Words */ \ + V(evmhessfanw, EVMHESSFANW, 0x10000583) \ + /* Vector Multiply Half Words, Even, Signed, Saturate, Integer and */ \ + /* Accumulate into Words */ \ + V(evmhessiaaw, EVMHESSIAAW, 0x10000501) \ + /* Vector Multiply Half Words, Even, Signed, Saturate, Integer and */ \ + /* Accumulate Negative into Words */ \ + V(evmhessianw, EVMHESSIANW, 0x10000581) \ + /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer */ \ + V(evmheumi, EVMHEUMI, 0x10000408) \ + /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer to */ \ + /* Accumulator */ \ + V(evmheumia, EVMHEUMIA, 0x10000428) \ + /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer and */ \ + /* Accumulate into Words */ \ + V(evmheumiaaw, EVMHEUMIAAW, 0x10000508) \ + /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer and */ \ + /* Accumulate Negative into Words */ \ + V(evmheumianw, EVMHEUMIANW, 0x10000588) \ + /* Vector Multiply Half Words, Even, Unsigned, Saturate, Integer and */ \ + /* Accumulate into Words */ \ + V(evmheusiaaw, EVMHEUSIAAW, 0x10000500) \ + /* Vector Multiply Half Words, Even, Unsigned, Saturate, Integer and */ \ + /* Accumulate Negative into Words */ \ + V(evmheusianw, EVMHEUSIANW, 0x10000580) \ + /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Fractional */ \ + /* and Accumulate */ \ + V(evmhogsmfaa, EVMHOGSMFAA, 0x1000052F) \ + /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Fractional */ \ + /* and Accumulate Negative */ \ + V(evmhogsmfan, EVMHOGSMFAN, 0x100005AF) \ + /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Integer, */ \ + /* and Accumulate */ \ + V(evmhogsmiaa, EVMHOGSMIAA, 0x1000052D) \ + /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Integer and */ \ + /* Accumulate Negative */ \ + V(evmhogsmian, EVMHOGSMIAN, 0x100005AD) \ + /* Vector Multiply Half Words, Odd, Guarded, Unsigned, Modulo, Integer */ \ + /* and Accumulate */ \ + V(evmhogumiaa, EVMHOGUMIAA, 0x1000052C) \ + /* Vector Multiply Half Words, Odd, Guarded, Unsigned, Modulo, Integer */ \ + /* and Accumulate Negative */ \ + V(evmhogumian, EVMHOGUMIAN, 0x100005AC) \ + /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional */ \ + V(evmhosmf, EVMHOSMF, 0x1000040F) \ + /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional to */ \ + /* Accumulator */ \ + V(evmhosmfa, EVMHOSMFA, 0x1000042F) \ + /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional and */ \ + /* Accumulate into Words */ \ + V(evmhosmfaaw, EVMHOSMFAAW, 0x1000050F) \ + /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional and */ \ + /* Accumulate Negative into Words */ \ + V(evmhosmfanw, EVMHOSMFANW, 0x1000058F) \ + /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer */ \ + V(evmhosmi, EVMHOSMI, 0x1000040D) \ + /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer to */ \ + /* Accumulator */ \ + V(evmhosmia, EVMHOSMIA, 0x1000042D) \ + /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer and */ \ + /* Accumulate into Words */ \ + V(evmhosmiaaw, EVMHOSMIAAW, 0x1000050D) \ + /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer and */ \ + /* Accumulate Negative into Words */ \ + V(evmhosmianw, EVMHOSMIANW, 0x1000058D) \ + /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional */ \ + V(evmhossf, EVMHOSSF, 0x10000407) \ + /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional to */ \ + /* Accumulator */ \ + V(evmhossfa, EVMHOSSFA, 0x10000427) \ + /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional and */ \ + /* Accumulate into Words */ \ + V(evmhossfaaw, EVMHOSSFAAW, 0x10000507) \ + /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional and */ \ + /* Accumulate Negative into Words */ \ + V(evmhossfanw, EVMHOSSFANW, 0x10000587) \ + /* Vector Multiply Half Words, Odd, Signed, Saturate, Integer and */ \ + /* Accumulate into Words */ \ + V(evmhossiaaw, EVMHOSSIAAW, 0x10000505) \ + /* Vector Multiply Half Words, Odd, Signed, Saturate, Integer and */ \ + /* Accumulate Negative into Words */ \ + V(evmhossianw, EVMHOSSIANW, 0x10000585) \ + /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer */ \ + V(evmhoumi, EVMHOUMI, 0x1000040C) \ + /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer to */ \ + /* Accumulator */ \ + V(evmhoumia, EVMHOUMIA, 0x1000042C) \ + /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer and */ \ + /* Accumulate into Words */ \ + V(evmhoumiaaw, EVMHOUMIAAW, 0x1000050C) \ + /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer and */ \ + /* Accumulate Negative into Words */ \ + V(evmhoumianw, EVMHOUMIANW, 0x1000058C) \ + /* Vector Multiply Half Words, Odd, Unsigned, Saturate, Integer and */ \ + /* Accumulate into Words */ \ + V(evmhousiaaw, EVMHOUSIAAW, 0x10000504) \ + /* Vector Multiply Half Words, Odd, Unsigned, Saturate, Integer and */ \ + /* Accumulate Negative into Words */ \ + V(evmhousianw, EVMHOUSIANW, 0x10000584) \ + /* Initialize Accumulator */ \ + V(evmra, EVMRA, 0x100004C4) \ + /* Vector Multiply Word High Signed, Modulo, Fractional */ \ + V(evmwhsmf, EVMWHSMF, 0x1000044F) \ + /* Vector Multiply Word High Signed, Modulo, Fractional to Accumulator */ \ + V(evmwhsmfa, EVMWHSMFA, 0x1000046F) \ + /* Vector Multiply Word High Signed, Modulo, Integer */ \ + V(evmwhsmi, EVMWHSMI, 0x1000044D) \ + /* Vector Multiply Word High Signed, Modulo, Integer to Accumulator */ \ + V(evmwhsmia, EVMWHSMIA, 0x1000046D) \ + /* Vector Multiply Word High Signed, Saturate, Fractional */ \ + V(evmwhssf, EVMWHSSF, 0x10000447) \ + /* Vector Multiply Word High Signed, Saturate, Fractional to Accumulator */ \ + V(evmwhssfa, EVMWHSSFA, 0x10000467) \ + /* Vector Multiply Word High Unsigned, Modulo, Integer */ \ + V(evmwhumi, EVMWHUMI, 0x1000044C) \ + /* Vector Multiply Word High Unsigned, Modulo, Integer to Accumulator */ \ + V(evmwhumia, EVMWHUMIA, 0x1000046C) \ + /* Vector Multiply Word Low Signed, Modulo, Integer and Accumulate in */ \ + /* Words */ \ + V(evmwlsmiaaw, EVMWLSMIAAW, 0x10000549) \ + /* Vector Multiply Word Low Signed, Modulo, Integer and Accumulate */ \ + /* Negative in Words */ \ + V(evmwlsmianw, EVMWLSMIANW, 0x100005C9) \ + /* Vector Multiply Word Low Signed, Saturate, Integer and Accumulate in */ \ + /* Words */ \ + V(evmwlssiaaw, EVMWLSSIAAW, 0x10000541) \ + /* Vector Multiply Word Low Signed, Saturate, Integer and Accumulate */ \ + /* Negative in Words */ \ + V(evmwlssianw, EVMWLSSIANW, 0x100005C1) \ + /* Vector Multiply Word Low Unsigned, Modulo, Integer */ \ + V(evmwlumi, EVMWLUMI, 0x10000448) \ + /* Vector Multiply Word Low Unsigned, Modulo, Integer to Accumulator */ \ + V(evmwlumia, EVMWLUMIA, 0x10000468) \ + /* Vector Multiply Word Low Unsigned, Modulo, Integer and Accumulate in */ \ + /* Words */ \ + V(evmwlumiaaw, EVMWLUMIAAW, 0x10000548) \ + /* Vector Multiply Word Low Unsigned, Modulo, Integer and Accumulate */ \ + /* Negative in Words */ \ + V(evmwlumianw, EVMWLUMIANW, 0x100005C8) \ + /* Vector Multiply Word Low Unsigned, Saturate, Integer and Accumulate */ \ + /* in Words */ \ + V(evmwlusiaaw, EVMWLUSIAAW, 0x10000540) \ + /* Vector Multiply Word Low Unsigned, Saturate, Integer and Accumulate */ \ + /* Negative in Words */ \ + V(evmwlusianw, EVMWLUSIANW, 0x100005C0) \ + /* Vector Multiply Word Signed, Modulo, Fractional */ \ + V(evmwsmf, EVMWSMF, 0x1000045B) \ + /* Vector Multiply Word Signed, Modulo, Fractional to Accumulator */ \ + V(evmwsmfa, EVMWSMFA, 0x1000047B) \ + /* Vector Multiply Word Signed, Modulo, Fractional and Accumulate */ \ + V(evmwsmfaa, EVMWSMFAA, 0x1000055B) \ + /* Vector Multiply Word Signed, Modulo, Fractional and Accumulate */ \ + /* Negative */ \ + V(evmwsmfan, EVMWSMFAN, 0x100005DB) \ + /* Vector Multiply Word Signed, Modulo, Integer */ \ + V(evmwsmi, EVMWSMI, 0x10000459) \ + /* Vector Multiply Word Signed, Modulo, Integer to Accumulator */ \ + V(evmwsmia, EVMWSMIA, 0x10000479) \ + /* Vector Multiply Word Signed, Modulo, Integer and Accumulate */ \ + V(evmwsmiaa, EVMWSMIAA, 0x10000559) \ + /* Vector Multiply Word Signed, Modulo, Integer and Accumulate Negative */ \ + V(evmwsmian, EVMWSMIAN, 0x100005D9) \ + /* Vector Multiply Word Signed, Saturate, Fractional */ \ + V(evmwssf, EVMWSSF, 0x10000453) \ + /* Vector Multiply Word Signed, Saturate, Fractional to Accumulator */ \ + V(evmwssfa, EVMWSSFA, 0x10000473) \ + /* Vector Multiply Word Signed, Saturate, Fractional and Accumulate */ \ + V(evmwssfaa, EVMWSSFAA, 0x10000553) \ + /* Vector Multiply Word Signed, Saturate, Fractional and Accumulate */ \ + /* Negative */ \ + V(evmwssfan, EVMWSSFAN, 0x100005D3) \ + /* Vector Multiply Word Unsigned, Modulo, Integer */ \ + V(evmwumi, EVMWUMI, 0x10000458) \ + /* Vector Multiply Word Unsigned, Modulo, Integer to Accumulator */ \ + V(evmwumia, EVMWUMIA, 0x10000478) \ + /* Vector Multiply Word Unsigned, Modulo, Integer and Accumulate */ \ + V(evmwumiaa, EVMWUMIAA, 0x10000558) \ + /* Vector Multiply Word Unsigned, Modulo, Integer and Accumulate */ \ + /* Negative */ \ + V(evmwumian, EVMWUMIAN, 0x100005D8) \ + /* Vector NAND */ \ + V(evnand, EVNAND, 0x1000021E) \ + /* Vector Negate */ \ + V(evneg, EVNEG, 0x10000209) \ + /* Vector NOR */ \ + V(evnor, EVNOR, 0x10000218) \ + /* Vector OR */ \ + V(evor, EVOR, 0x10000217) \ + /* Vector OR with Complement */ \ + V(evorc, EVORC, 0x1000021B) \ + /* Vector Rotate Left Word */ \ + V(evrlw, EVRLW, 0x10000228) \ + /* Vector Rotate Left Word Immediate */ \ + V(evrlwi, EVRLWI, 0x1000022A) \ + /* Vector Round Word */ \ + V(evrndw, EVRNDW, 0x1000020C) \ + /* Vector Shift Left Word */ \ + V(evslw, EVSLW, 0x10000224) \ + /* Vector Shift Left Word Immediate */ \ + V(evslwi, EVSLWI, 0x10000226) \ + /* Vector Splat Fractional Immediate */ \ + V(evsplatfi, EVSPLATFI, 0x1000022B) \ + /* Vector Splat Immediate */ \ + V(evsplati, EVSPLATI, 0x10000229) \ + /* Vector Shift Right Word Immediate Signed */ \ + V(evsrwis, EVSRWIS, 0x10000223) \ + /* Vector Shift Right Word Immediate Unsigned */ \ + V(evsrwiu, EVSRWIU, 0x10000222) \ + /* Vector Shift Right Word Signed */ \ + V(evsrws, EVSRWS, 0x10000221) \ + /* Vector Shift Right Word Unsigned */ \ + V(evsrwu, EVSRWU, 0x10000220) \ + /* Vector Store Double of Double */ \ + V(evstdd, EVSTDD, 0x10000321) \ + /* Vector Store Double of Double Indexed */ \ + V(evstddx, EVSTDDX, 0x10000320) \ + /* Vector Store Double of Four Half Words */ \ + V(evstdh, EVSTDH, 0x10000325) \ + /* Vector Store Double of Four Half Words Indexed */ \ + V(evstdhx, EVSTDHX, 0x10000324) \ + /* Vector Store Double of Two Words */ \ + V(evstdw, EVSTDW, 0x10000323) \ + /* Vector Store Double of Two Words Indexed */ \ + V(evstdwx, EVSTDWX, 0x10000322) \ + /* Vector Store Word of Two Half Words from Even */ \ + V(evstwhe, EVSTWHE, 0x10000331) \ + /* Vector Store Word of Two Half Words from Even Indexed */ \ + V(evstwhex, EVSTWHEX, 0x10000330) \ + /* Vector Store Word of Two Half Words from Odd */ \ + V(evstwho, EVSTWHO, 0x10000335) \ + /* Vector Store Word of Two Half Words from Odd Indexed */ \ + V(evstwhox, EVSTWHOX, 0x10000334) \ + /* Vector Store Word of Word from Even */ \ + V(evstwwe, EVSTWWE, 0x10000339) \ + /* Vector Store Word of Word from Even Indexed */ \ + V(evstwwex, EVSTWWEX, 0x10000338) \ + /* Vector Store Word of Word from Odd */ \ + V(evstwwo, EVSTWWO, 0x1000033D) \ + /* Vector Store Word of Word from Odd Indexed */ \ + V(evstwwox, EVSTWWOX, 0x1000033C) \ + /* Vector Subtract Signed, Modulo, Integer to Accumulator Word */ \ + V(evsubfsmiaaw, EVSUBFSMIAAW, 0x100004CB) \ + /* Vector Subtract Signed, Saturate, Integer to Accumulator Word */ \ + V(evsubfssiaaw, EVSUBFSSIAAW, 0x100004C3) \ + /* Vector Subtract Unsigned, Modulo, Integer to Accumulator Word */ \ + V(evsubfumiaaw, EVSUBFUMIAAW, 0x100004CA) \ + /* Vector Subtract Unsigned, Saturate, Integer to Accumulator Word */ \ + V(evsubfusiaaw, EVSUBFUSIAAW, 0x100004C2) \ + /* Vector Subtract from Word */ \ + V(evsubfw, EVSUBFW, 0x10000204) \ + /* Vector Subtract Immediate from Word */ \ + V(evsubifw, EVSUBIFW, 0x10000206) \ + /* Vector XOR */ \ + V(evxor, EVXOR, 0x10000216) \ + /* Floating-Point Double-Precision Absolute Value */ \ + V(efdabs, EFDABS, 0x100002E4) \ + /* Floating-Point Double-Precision Add */ \ + V(efdadd, EFDADD, 0x100002E0) \ + /* Floating-Point Double-Precision Convert from Single-Precision */ \ + V(efdcfs, EFDCFS, 0x100002EF) \ + /* Convert Floating-Point Double-Precision from Signed Fraction */ \ + V(efdcfsf, EFDCFSF, 0x100002F3) \ + /* Convert Floating-Point Double-Precision from Signed Integer */ \ + V(efdcfsi, EFDCFSI, 0x100002F1) \ + /* Convert Floating-Point Double-Precision from Signed Integer */ \ + /* Doubleword */ \ + V(efdcfsid, EFDCFSID, 0x100002E3) \ + /* Convert Floating-Point Double-Precision from Unsigned Fraction */ \ + V(efdcfuf, EFDCFUF, 0x100002F2) \ + /* Convert Floating-Point Double-Precision from Unsigned Integer */ \ + V(efdcfui, EFDCFUI, 0x100002F0) \ + /* Convert Floating-Point Double-Precision fromUnsigned Integer */ \ + /* Doubleword */ \ + V(efdcfuid, EFDCFUID, 0x100002E2) \ + /* Floating-Point Double-Precision Compare Equal */ \ + V(efdcmpeq, EFDCMPEQ, 0x100002EE) \ + /* Floating-Point Double-Precision Compare Greater Than */ \ + V(efdcmpgt, EFDCMPGT, 0x100002EC) \ + /* Floating-Point Double-Precision Compare Less Than */ \ + V(efdcmplt, EFDCMPLT, 0x100002ED) \ + /* Convert Floating-Point Double-Precision to Signed Fraction */ \ + V(efdctsf, EFDCTSF, 0x100002F7) \ + /* Convert Floating-Point Double-Precision to Signed Integer */ \ + V(efdctsi, EFDCTSI, 0x100002F5) \ + /* Convert Floating-Point Double-Precision to Signed Integer Doubleword */ \ + /* with Round toward Zero */ \ + V(efdctsidz, EFDCTSIDZ, 0x100002EB) \ + /* Convert Floating-Point Double-Precision to Signed Integer with Round */ \ + /* toward Zero */ \ + V(efdctsiz, EFDCTSIZ, 0x100002FA) \ + /* Convert Floating-Point Double-Precision to Unsigned Fraction */ \ + V(efdctuf, EFDCTUF, 0x100002F6) \ + /* Convert Floating-Point Double-Precision to Unsigned Integer */ \ + V(efdctui, EFDCTUI, 0x100002F4) \ + /* Convert Floating-Point Double-Precision to Unsigned Integer */ \ + /* Doubleword with Round toward Zero */ \ + V(efdctuidz, EFDCTUIDZ, 0x100002EA) \ + /* Convert Floating-Point Double-Precision to Unsigned Integer with */ \ + /* Round toward Zero */ \ + V(efdctuiz, EFDCTUIZ, 0x100002F8) \ + /* Floating-Point Double-Precision Divide */ \ + V(efddiv, EFDDIV, 0x100002E9) \ + /* Floating-Point Double-Precision Multiply */ \ + V(efdmul, EFDMUL, 0x100002E8) \ + /* Floating-Point Double-Precision Negative Absolute Value */ \ + V(efdnabs, EFDNABS, 0x100002E5) \ + /* Floating-Point Double-Precision Negate */ \ + V(efdneg, EFDNEG, 0x100002E6) \ + /* Floating-Point Double-Precision Subtract */ \ + V(efdsub, EFDSUB, 0x100002E1) \ + /* Floating-Point Double-Precision Test Equal */ \ + V(efdtsteq, EFDTSTEQ, 0x100002FE) \ + /* Floating-Point Double-Precision Test Greater Than */ \ + V(efdtstgt, EFDTSTGT, 0x100002FC) \ + /* Floating-Point Double-Precision Test Less Than */ \ + V(efdtstlt, EFDTSTLT, 0x100002FD) \ + /* Floating-Point Single-Precision Convert from Double-Precision */ \ + V(efscfd, EFSCFD, 0x100002CF) \ + /* Floating-Point Absolute Value */ \ + V(efsabs, EFSABS, 0x100002C4) \ + /* Floating-Point Add */ \ + V(efsadd, EFSADD, 0x100002C0) \ + /* Convert Floating-Point from Signed Fraction */ \ + V(efscfsf, EFSCFSF, 0x100002D3) \ + /* Convert Floating-Point from Signed Integer */ \ + V(efscfsi, EFSCFSI, 0x100002D1) \ + /* Convert Floating-Point from Unsigned Fraction */ \ + V(efscfuf, EFSCFUF, 0x100002D2) \ + /* Convert Floating-Point from Unsigned Integer */ \ + V(efscfui, EFSCFUI, 0x100002D0) \ + /* Floating-Point Compare Equal */ \ + V(efscmpeq, EFSCMPEQ, 0x100002CE) \ + /* Floating-Point Compare Greater Than */ \ + V(efscmpgt, EFSCMPGT, 0x100002CC) \ + /* Floating-Point Compare Less Than */ \ + V(efscmplt, EFSCMPLT, 0x100002CD) \ + /* Convert Floating-Point to Signed Fraction */ \ + V(efsctsf, EFSCTSF, 0x100002D7) \ + /* Convert Floating-Point to Signed Integer */ \ + V(efsctsi, EFSCTSI, 0x100002D5) \ + /* Convert Floating-Point to Signed Integer with Round toward Zero */ \ + V(efsctsiz, EFSCTSIZ, 0x100002DA) \ + /* Convert Floating-Point to Unsigned Fraction */ \ + V(efsctuf, EFSCTUF, 0x100002D6) \ + /* Convert Floating-Point to Unsigned Integer */ \ + V(efsctui, EFSCTUI, 0x100002D4) \ + /* Convert Floating-Point to Unsigned Integer with Round toward Zero */ \ + V(efsctuiz, EFSCTUIZ, 0x100002D8) \ + /* Floating-Point Divide */ \ + V(efsdiv, EFSDIV, 0x100002C9) \ + /* Floating-Point Multiply */ \ + V(efsmul, EFSMUL, 0x100002C8) \ + /* Floating-Point Negative Absolute Value */ \ + V(efsnabs, EFSNABS, 0x100002C5) \ + /* Floating-Point Negate */ \ + V(efsneg, EFSNEG, 0x100002C6) \ + /* Floating-Point Subtract */ \ + V(efssub, EFSSUB, 0x100002C1) \ + /* Floating-Point Test Equal */ \ + V(efststeq, EFSTSTEQ, 0x100002DE) \ + /* Floating-Point Test Greater Than */ \ + V(efststgt, EFSTSTGT, 0x100002DC) \ + /* Floating-Point Test Less Than */ \ + V(efststlt, EFSTSTLT, 0x100002DD) \ + /* Vector Floating-Point Absolute Value */ \ + V(evfsabs, EVFSABS, 0x10000284) \ + /* Vector Floating-Point Add */ \ + V(evfsadd, EVFSADD, 0x10000280) \ + /* Vector Convert Floating-Point from Signed Fraction */ \ + V(evfscfsf, EVFSCFSF, 0x10000293) \ + /* Vector Convert Floating-Point from Signed Integer */ \ + V(evfscfsi, EVFSCFSI, 0x10000291) \ + /* Vector Convert Floating-Point from Unsigned Fraction */ \ + V(evfscfuf, EVFSCFUF, 0x10000292) \ + /* Vector Convert Floating-Point from Unsigned Integer */ \ + V(evfscfui, EVFSCFUI, 0x10000290) \ + /* Vector Floating-Point Compare Equal */ \ + V(evfscmpeq, EVFSCMPEQ, 0x1000028E) \ + /* Vector Floating-Point Compare Greater Than */ \ + V(evfscmpgt, EVFSCMPGT, 0x1000028C) \ + /* Vector Floating-Point Compare Less Than */ \ + V(evfscmplt, EVFSCMPLT, 0x1000028D) \ + /* Vector Convert Floating-Point to Signed Fraction */ \ + V(evfsctsf, EVFSCTSF, 0x10000297) \ + /* Vector Convert Floating-Point to Signed Integer */ \ + V(evfsctsi, EVFSCTSI, 0x10000295) \ + /* Vector Convert Floating-Point to Signed Integer with Round toward */ \ + /* Zero */ \ + V(evfsctsiz, EVFSCTSIZ, 0x1000029A) \ + /* Vector Convert Floating-Point to Unsigned Fraction */ \ + V(evfsctuf, EVFSCTUF, 0x10000296) \ + /* Vector Convert Floating-Point to Unsigned Integer */ \ + V(evfsctui, EVFSCTUI, 0x10000294) \ + /* Vector Convert Floating-Point to Unsigned Integer with Round toward */ \ + /* Zero */ \ + V(evfsctuiz, EVFSCTUIZ, 0x10000298) \ + /* Vector Floating-Point Divide */ \ + V(evfsdiv, EVFSDIV, 0x10000289) \ + /* Vector Floating-Point Multiply */ \ + V(evfsmul, EVFSMUL, 0x10000288) \ + /* Vector Floating-Point Negative Absolute Value */ \ + V(evfsnabs, EVFSNABS, 0x10000285) \ + /* Vector Floating-Point Negate */ \ + V(evfsneg, EVFSNEG, 0x10000286) \ + /* Vector Floating-Point Subtract */ \ + V(evfssub, EVFSSUB, 0x10000281) \ + /* Vector Floating-Point Test Equal */ \ + V(evfststeq, EVFSTSTEQ, 0x1000029E) \ + /* Vector Floating-Point Test Greater Than */ \ + V(evfststgt, EVFSTSTGT, 0x1000029C) \ + /* Vector Floating-Point Test Less Than */ \ + V(evfststlt, EVFSTSTLT, 0x1000029D) + +#define PPC_VC_OPCODE_LIST(V) \ + /* Vector Compare Bounds Single-Precision */ \ + V(vcmpbfp, VCMPBFP, 0x100003C6) \ + /* Vector Compare Equal To Single-Precision */ \ + V(vcmpeqfp, VCMPEQFP, 0x100000C6) \ + /* Vector Compare Equal To Unsigned Byte */ \ + V(vcmpequb, VCMPEQUB, 0x10000006) \ + /* Vector Compare Equal To Unsigned Doubleword */ \ + V(vcmpequd, VCMPEQUD, 0x100000C7) \ + /* Vector Compare Equal To Unsigned Halfword */ \ + V(vcmpequh, VCMPEQUH, 0x10000046) \ + /* Vector Compare Equal To Unsigned Word */ \ + V(vcmpequw, VCMPEQUW, 0x10000086) \ + /* Vector Compare Greater Than or Equal To Single-Precision */ \ + V(vcmpgefp, VCMPGEFP, 0x100001C6) \ + /* Vector Compare Greater Than Single-Precision */ \ + V(vcmpgtfp, VCMPGTFP, 0x100002C6) \ + /* Vector Compare Greater Than Signed Byte */ \ + V(vcmpgtsb, VCMPGTSB, 0x10000306) \ + /* Vector Compare Greater Than Signed Doubleword */ \ + V(vcmpgtsd, VCMPGTSD, 0x100003C7) \ + /* Vector Compare Greater Than Signed Halfword */ \ + V(vcmpgtsh, VCMPGTSH, 0x10000346) \ + /* Vector Compare Greater Than Signed Word */ \ + V(vcmpgtsw, VCMPGTSW, 0x10000386) \ + /* Vector Compare Greater Than Unsigned Byte */ \ + V(vcmpgtub, VCMPGTUB, 0x10000206) \ + /* Vector Compare Greater Than Unsigned Doubleword */ \ + V(vcmpgtud, VCMPGTUD, 0x100002C7) \ + /* Vector Compare Greater Than Unsigned Halfword */ \ + V(vcmpgtuh, VCMPGTUH, 0x10000246) \ + /* Vector Compare Greater Than Unsigned Word */ \ + V(vcmpgtuw, VCMPGTUW, 0x10000286) + +#define PPC_X_OPCODE_LIST(V) \ + /* Bit Permute Doubleword */ \ + V(bpermd, BPERMD, 0x7C0001F8) \ + /* Count Leading Zeros Doubleword */ \ + V(cntlzd, CNTLZDX, 0x7C000074) \ + /* Extend Sign Word */ \ + V(extsw, EXTSW, 0x7C0007B4) \ + /* Load Doubleword And Reserve Indexed */ \ + V(ldarx, LDARX, 0x7C0000A8) \ + /* Load Doubleword Byte-Reverse Indexed */ \ + V(ldbrx, LDBRX, 0x7C000428) \ + /* Load Doubleword with Update Indexed */ \ + V(ldux, LDUX, 0x7C00006A) \ + /* Load Doubleword Indexed */ \ + V(ldx, LDX, 0x7C00002A) \ + /* Load Word Algebraic with Update Indexed */ \ + V(lwaux, LWAUX, 0x7C0002EA) \ + /* Load Word Algebraic Indexed */ \ + V(lwax, LWAX, 0x7C0002AA) \ + /* Modulo Signed Dword */ \ + V(modsd, MODSD, 0x7C000612) \ + /* Modulo Unsigned Dword */ \ + V(modud, MODUD, 0x7C000212) \ + /* Population Count Doubleword */ \ + V(popcntd, POPCNTD, 0x7C0003F4) \ + /* Parity Doubleword */ \ + V(prtyd, PRTYD, 0x7C000174) \ + /* Shift Left Doubleword */ \ + V(sld, SLDX, 0x7C000036) \ + /* Shift Right Algebraic Doubleword */ \ + V(srad, SRAD, 0x7C000634) \ + /* Shift Right Doubleword */ \ + V(srd, SRDX, 0x7C000436) \ + /* Store Doubleword Byte-Reverse Indexed */ \ + V(stdbrx, STDBRX, 0x7C000528) \ + /* Store Doubleword Conditional Indexed & record CR0 */ \ + V(stdcx, STDCX, 0x7C0001AD) \ + /* Store Doubleword with Update Indexed */ \ + V(stdux, STDUX, 0x7C00016A) \ + /* Store Doubleword Indexed */ \ + V(stdx, STDX, 0x7C00012A) \ + /* Trap Doubleword */ \ + V(td, TD, 0x7C000088) \ + /* AND */ \ + V(andx, ANDX, 0x7C000038) \ + /* AND with Complement */ \ + V(andc, ANDCX, 0x7C000078) \ + /* Branch Conditional to Branch Target Address Register */ \ + V(bctar, BCTAR, 0x4C000460) \ + /* Compare */ \ + V(cmp, CMP, 0x7C000000) \ + /* Compare Byte */ \ + V(cmpb, CMPB, 0x7C0003F8) \ + /* Compare Logical */ \ + V(cmpl, CMPL, 0x7C000040) \ + /* Count Leading Zeros Word */ \ + V(cntlzw, CNTLZWX, 0x7C000034) \ + /* Data Cache Block Flush */ \ + V(dcbf, DCBF, 0x7C0000AC) \ + /* Data Cache Block Store */ \ + V(dcbst, DCBST, 0x7C00006C) \ + /* Data Cache Block Touch */ \ + V(dcbt, DCBT, 0x7C00022C) \ + /* Data Cache Block Touch for Store */ \ + V(dcbtst, DCBTST, 0x7C0001EC) \ + /* Data Cache Block Zero */ \ + V(dcbz, DCBZ, 0x7C0007EC) \ + /* Equivalent */ \ + V(eqv, EQV, 0x7C000238) \ + /* Extend Sign Byte */ \ + V(extsb, EXTSB, 0x7C000774) \ + /* Extend Sign Halfword */ \ + V(extsh, EXTSH, 0x7C000734) \ + /* Instruction Cache Block Invalidate */ \ + V(icbi, ICBI, 0x7C0007AC) \ + /* Load Byte And Reserve Indexed */ \ + V(lbarx, LBARX, 0x7C000068) \ + /* Load Byte and Zero with Update Indexed */ \ + V(lbzux, LBZUX, 0x7C0000EE) \ + /* Load Byte and Zero Indexed */ \ + V(lbzx, LBZX, 0x7C0000AE) \ + /* Load Halfword And Reserve Indexed Xform */ \ + V(lharx, LHARX, 0x7C0000E8) \ + /* Load Halfword Algebraic with Update Indexed */ \ + V(lhaux, LHAUX, 0x7C0002EE) \ + /* Load Halfword Algebraic Indexed */ \ + V(lhax, LHAX, 0x7C0002AE) \ + /* Load Halfword Byte-Reverse Indexed */ \ + V(lhbrx, LHBRX, 0x7C00062C) \ + /* Load Halfword and Zero with Update Indexed */ \ + V(lhzux, LHZUX, 0x7C00026E) \ + /* Load Halfword and Zero Indexed */ \ + V(lhzx, LHZX, 0x7C00022E) \ + /* Load Word and Reserve Indexed */ \ + V(lwarx, LWARX, 0x7C000028) \ + /* Load Word Byte-Reverse Indexed */ \ + V(lwbrx, LWBRX, 0x7C00042C) \ + /* Load Word and Zero with Update Indexed */ \ + V(lwzux, LWZUX, 0x7C00006E) \ + /* Load Word and Zero Indexed */ \ + V(lwzx, LWZX, 0x7C00002E) \ + /* Modulo Signed Word */ \ + V(mods, MODSW, 0x7C000616) \ + /* Modulo Unsigned Word */ \ + V(moduw, MODUW, 0x7C000216) \ + /* NAND */ \ + V(nand, NAND, 0x7C0003B8) \ + /* NOR */ \ + V(nor, NORX, 0x7C0000F8) \ + /* OR */ \ + V(orx, ORX, 0x7C000378) \ + /* OR with Complement */ \ + V(orc, ORC, 0x7C000338) \ + /* Population Count Byte-wise */ \ + V(popcntb, POPCNTB, 0x7C0000F4) \ + /* Population Count Words */ \ + V(popcntw, POPCNTW, 0x7C0002F4) \ + /* Parity Word */ \ + V(prtyw, PRTYW, 0x7C000134) \ + /* Shift Left Word */ \ + V(slw, SLWX, 0x7C000030) \ + /* Shift Right Algebraic Word */ \ + V(sraw, SRAW, 0x7C000630) \ + /* Shift Right Algebraic Word Immediate */ \ + V(srawi, SRAWIX, 0x7C000670) \ + /* Shift Right Word */ \ + V(srw, SRWX, 0x7C000430) \ + /* Store Byte Conditional Indexed */ \ + V(stbcx, STBCX, 0x7C00056D) \ + /* Store Byte with Update Indexed */ \ + V(stbux, STBUX, 0x7C0001EE) \ + /* Store Byte Indexed */ \ + V(stbx, STBX, 0x7C0001AE) \ + /* Store Halfword Byte-Reverse Indexed */ \ + V(sthbrx, STHBRX, 0x7C00072C) \ + /* Store Halfword Conditional Indexed Xform */ \ + V(sthcx, STHCX, 0x7C0005AD) \ + /* Store Halfword with Update Indexed */ \ + V(sthux, STHUX, 0x7C00036E) \ + /* Store Halfword Indexed */ \ + V(sthx, STHX, 0x7C00032E) \ + /* Store Word Byte-Reverse Indexed */ \ + V(stwbrx, STWBRX, 0x7C00052C) \ + /* Store Word Conditional Indexed & record CR0 */ \ + V(stwcx, STWCX, 0x7C00012D) \ + /* Store Word with Update Indexed */ \ + V(stwux, STWUX, 0x7C00016E) \ + /* Store Word Indexed */ \ + V(stwx, STWX, 0x7C00012E) \ + /* Synchronize */ \ + V(sync, SYNC, 0x7C0004AC) \ + /* Trap Word */ \ + V(tw, TW, 0x7C000008) \ + /* ExecuExecuted No Operation */ \ + V(xnop, XNOP, 0x68000000) \ + /* XOR */ \ + V(xorx, XORX, 0x7C000278) \ + /* Convert Binary Coded Decimal To Declets */ \ + V(cbcdtd, CBCDTD, 0x7C000274) \ + /* Convert Declets To Binary Coded Decimal */ \ + V(cdtbcd, CDTBCD, 0x7C000234) \ + /* Decimal Floating Add */ \ + V(dadd, DADD, 0xEC000004) \ + /* Decimal Floating Add Quad */ \ + V(daddq, DADDQ, 0xFC000004) \ + /* Decimal Floating Convert From Fixed */ \ + V(dcffix, DCFFIX, 0xEC000644) \ + /* Decimal Floating Convert From Fixed Quad */ \ + V(dcffixq, DCFFIXQ, 0xFC000644) \ + /* Decimal Floating Compare Ordered */ \ + V(dcmpo, DCMPO, 0xEC000104) \ + /* Decimal Floating Compare Ordered Quad */ \ + V(dcmpoq, DCMPOQ, 0xFC000104) \ + /* Decimal Floating Compare Unordered */ \ + V(dcmpu, DCMPU, 0xEC000504) \ + /* Decimal Floating Compare Unordered Quad */ \ + V(dcmpuq, DCMPUQ, 0xFC000504) \ + /* Decimal Floating Convert To DFP Long */ \ + V(dctdp, DCTDP, 0xEC000204) \ + /* Decimal Floating Convert To Fixed */ \ + V(dctfix, DCTFIX, 0xEC000244) \ + /* Decimal Floating Convert To Fixed Quad */ \ + V(dctfixq, DCTFIXQ, 0xFC000244) \ + /* Decimal Floating Convert To DFP Extended */ \ + V(dctqpq, DCTQPQ, 0xFC000204) \ + /* Decimal Floating Decode DPD To BCD */ \ + V(ddedpd, DDEDPD, 0xEC000284) \ + /* Decimal Floating Decode DPD To BCD Quad */ \ + V(ddedpdq, DDEDPDQ, 0xFC000284) \ + /* Decimal Floating Divide */ \ + V(ddiv, DDIV, 0xEC000444) \ + /* Decimal Floating Divide Quad */ \ + V(ddivq, DDIVQ, 0xFC000444) \ + /* Decimal Floating Encode BCD To DPD */ \ + V(denbcd, DENBCD, 0xEC000684) \ + /* Decimal Floating Encode BCD To DPD Quad */ \ + V(denbcdq, DENBCDQ, 0xFC000684) \ + /* Decimal Floating Insert Exponent */ \ + V(diex, DIEX, 0xEC0006C4) \ + /* Decimal Floating Insert Exponent Quad */ \ + V(diexq, DIEXQ, 0xFC0006C4) \ + /* Decimal Floating Multiply */ \ + V(dmul, DMUL, 0xEC000044) \ + /* Decimal Floating Multiply Quad */ \ + V(dmulq, DMULQ, 0xFC000044) \ + /* Decimal Floating Round To DFP Long */ \ + V(drdpq, DRDPQ, 0xFC000604) \ + /* Decimal Floating Round To DFP Short */ \ + V(drsp, DRSP, 0xEC000604) \ + /* Decimal Floating Subtract */ \ + V(dsub, DSUB, 0xEC000404) \ + /* Decimal Floating Subtract Quad */ \ + V(dsubq, DSUBQ, 0xFC000404) \ + /* Decimal Floating Test Exponent */ \ + V(dtstex, DTSTEX, 0xEC000144) \ + /* Decimal Floating Test Exponent Quad */ \ + V(dtstexq, DTSTEXQ, 0xFC000144) \ + /* Decimal Floating Test Significance */ \ + V(dtstsf, DTSTSF, 0xEC000544) \ + /* Decimal Floating Test Significance Quad */ \ + V(dtstsfq, DTSTSFQ, 0xFC000544) \ + /* Decimal Floating Extract Exponent */ \ + V(dxex, DXEX, 0xEC0002C4) \ + /* Decimal Floating Extract Exponent Quad */ \ + V(dxexq, DXEXQ, 0xFC0002C4) \ + /* Decorated Storage Notify */ \ + V(dsn, DSN, 0x7C0003C6) \ + /* Load Byte with Decoration Indexed */ \ + V(lbdx, LBDX, 0x7C000406) \ + /* Load Doubleword with Decoration Indexed */ \ + V(lddx, LDDX, 0x7C0004C6) \ + /* Load Floating Doubleword with Decoration Indexed */ \ + V(lfddx, LFDDX, 0x7C000646) \ + /* Load Halfword with Decoration Indexed */ \ + V(lhdx, LHDX, 0x7C000446) \ + /* Load Word with Decoration Indexed */ \ + V(lwdx, LWDX, 0x7C000486) \ + /* Store Byte with Decoration Indexed */ \ + V(stbdx, STBDX, 0x7C000506) \ + /* Store Doubleword with Decoration Indexed */ \ + V(stddx, STDDX, 0x7C0005C6) \ + /* Store Floating Doubleword with Decoration Indexed */ \ + V(stfddx, STFDDX, 0x7C000746) \ + /* Store Halfword with Decoration Indexed */ \ + V(sthdx, STHDX, 0x7C000546) \ + /* Store Word with Decoration Indexed */ \ + V(stwdx, STWDX, 0x7C000586) \ + /* Data Cache Block Allocate */ \ + V(dcba, DCBA, 0x7C0005EC) \ + /* Data Cache Block Invalidate */ \ + V(dcbi, DCBI, 0x7C0003AC) \ + /* Instruction Cache Block Touch */ \ + V(icbt, ICBT, 0x7C00002C) \ + /* Memory Barrier */ \ + V(mbar, MBAR, 0x7C0006AC) \ + /* Move to Condition Register from XER */ \ + V(mcrxr, MCRXR, 0x7C000400) \ + /* TLB Invalidate Local Indexed */ \ + V(tlbilx, TLBILX, 0x7C000024) \ + /* TLB Invalidate Virtual Address Indexed */ \ + V(tlbivax, TLBIVAX, 0x7C000624) \ + /* TLB Read Entry */ \ + V(tlbre, TLBRE, 0x7C000764) \ + /* TLB Search Indexed */ \ + V(tlbsx, TLBSX, 0x7C000724) \ + /* TLB Write Entry */ \ + V(tlbwe, TLBWE, 0x7C0007A4) \ + /* Write External Enable */ \ + V(wrtee, WRTEE, 0x7C000106) \ + /* Write External Enable Immediate */ \ + V(wrteei, WRTEEI, 0x7C000146) \ + /* Data Cache Read */ \ + V(dcread, DCREAD, 0x7C00028C) \ + /* Instruction Cache Read */ \ + V(icread, ICREAD, 0x7C0007CC) \ + /* Data Cache Invalidate */ \ + V(dci, DCI, 0x7C00038C) \ + /* Instruction Cache Invalidate */ \ + V(ici, ICI, 0x7C00078C) \ + /* Move From Device Control Register User Mode Indexed */ \ + V(mfdcrux, MFDCRUX, 0x7C000246) \ + /* Move From Device Control Register Indexed */ \ + V(mfdcrx, MFDCRX, 0x7C000206) \ + /* Move To Device Control Register User Mode Indexed */ \ + V(mtdcrux, MTDCRUX, 0x7C000346) \ + /* Move To Device Control Register Indexed */ \ + V(mtdcrx, MTDCRX, 0x7C000306) \ + /* Return From Debug Interrupt */ \ + V(rfdi, RFDI, 0x4C00004E) \ + /* Data Cache Block Flush by External PID */ \ + V(dcbfep, DCBFEP, 0x7C0000FE) \ + /* Data Cache Block Store by External PID */ \ + V(dcbstep, DCBSTEP, 0x7C00007E) \ + /* Data Cache Block Touch by External PID */ \ + V(dcbtep, DCBTEP, 0x7C00027E) \ + /* Data Cache Block Touch for Store by External PID */ \ + V(dcbtstep, DCBTSTEP, 0x7C0001FE) \ + /* Data Cache Block Zero by External PID */ \ + V(dcbzep, DCBZEP, 0x7C0007FE) \ + /* Instruction Cache Block Invalidate by External PID */ \ + V(icbiep, ICBIEP, 0x7C0007BE) \ + /* Load Byte and Zero by External PID Indexed */ \ + V(lbepx, LBEPX, 0x7C0000BE) \ + /* Load Floating-Point Double by External PID Indexed */ \ + V(lfdepx, LFDEPX, 0x7C0004BE) \ + /* Load Halfword and Zero by External PID Indexed */ \ + V(lhepx, LHEPX, 0x7C00023E) \ + /* Load Vector by External PID Indexed */ \ + V(lvepx, LVEPX, 0x7C00024E) \ + /* Load Vector by External PID Indexed Last */ \ + V(lvepxl, LVEPXL, 0x7C00020E) \ + /* Load Word and Zero by External PID Indexed */ \ + V(lwepx, LWEPX, 0x7C00003E) \ + /* Store Byte by External PID Indexed */ \ + V(stbepx, STBEPX, 0x7C0001BE) \ + /* Store Floating-Point Double by External PID Indexed */ \ + V(stfdepx, STFDEPX, 0x7C0005BE) \ + /* Store Halfword by External PID Indexed */ \ + V(sthepx, STHEPX, 0x7C00033E) \ + /* Store Vector by External PID Indexed */ \ + V(stvepx, STVEPX, 0x7C00064E) \ + /* Store Vector by External PID Indexed Last */ \ + V(stvepxl, STVEPXL, 0x7C00060E) \ + /* Store Word by External PID Indexed */ \ + V(stwepx, STWEPX, 0x7C00013E) \ + /* Load Doubleword by External PID Indexed */ \ + V(ldepx, LDEPX, 0x7C00003A) \ + /* Store Doubleword by External PID Indexed */ \ + V(stdepx, STDEPX, 0x7C00013A) \ + /* TLB Search and Reserve Indexed */ \ + V(tlbsrx, TLBSRX, 0x7C0006A5) \ + /* External Control In Word Indexed */ \ + V(eciwx, ECIWX, 0x7C00026C) \ + /* External Control Out Word Indexed */ \ + V(ecowx, ECOWX, 0x7C00036C) \ + /* Data Cache Block Lock Clear */ \ + V(dcblc, DCBLC, 0x7C00030C) \ + /* Data Cache Block Lock Query */ \ + V(dcblq, DCBLQ, 0x7C00034D) \ + /* Data Cache Block Touch and Lock Set */ \ + V(dcbtls, DCBTLS, 0x7C00014C) \ + /* Data Cache Block Touch for Store and Lock Set */ \ + V(dcbtstls, DCBTSTLS, 0x7C00010C) \ + /* Instruction Cache Block Lock Clear */ \ + V(icblc, ICBLC, 0x7C0001CC) \ + /* Instruction Cache Block Lock Query */ \ + V(icblq, ICBLQ, 0x7C00018D) \ + /* Instruction Cache Block Touch and Lock Set */ \ + V(icbtls, ICBTLS, 0x7C0003CC) \ + /* Floating Compare Ordered */ \ + V(fcmpo, FCMPO, 0xFC000040) \ + /* Floating Compare Unordered */ \ + V(fcmpu, FCMPU, 0xFC000000) \ + /* Floating Test for software Divide */ \ + V(ftdiv, FTDIV, 0xFC000100) \ + /* Floating Test for software Square Root */ \ + V(ftsqrt, FTSQRT, 0xFC000140) \ + /* Load Floating-Point Double with Update Indexed */ \ + V(lfdux, LFDUX, 0x7C0004EE) \ + /* Load Floating-Point Double Indexed */ \ + V(lfdx, LFDX, 0x7C0004AE) \ + /* Load Floating-Point as Integer Word Algebraic Indexed */ \ + V(lfiwax, LFIWAX, 0x7C0006AE) \ + /* Load Floating-Point as Integer Word and Zero Indexed */ \ + V(lfiwzx, LFIWZX, 0x7C0006EE) \ + /* Load Floating-Point Single with Update Indexed */ \ + V(lfsux, LFSUX, 0x7C00046E) \ + /* Load Floating-Point Single Indexed */ \ + V(lfsx, LFSX, 0x7C00042E) \ + /* Move To Condition Register from FPSCR */ \ + V(mcrfs, MCRFS, 0xFC000080) \ + /* Store Floating-Point Double with Update Indexed */ \ + V(stfdux, STFDUX, 0x7C0005EE) \ + /* Store Floating-Point Double Indexed */ \ + V(stfdx, STFDX, 0x7C0005AE) \ + /* Store Floating-Point as Integer Word Indexed */ \ + V(stfiwx, STFIWX, 0x7C0007AE) \ + /* Store Floating-Point Single with Update Indexed */ \ + V(stfsux, STFSUX, 0x7C00056E) \ + /* Store Floating-Point Single Indexed */ \ + V(stfsx, STFSX, 0x7C00052E) \ + /* Load Floating-Point Double Pair Indexed */ \ + V(lfdpx, LFDPX, 0x7C00062E) \ + /* Store Floating-Point Double Pair Indexed */ \ + V(stfdpx, STFDPX, 0x7C00072E) \ + /* Floating Absolute Value */ \ + V(fabs, FABS, 0xFC000210) \ + /* Floating Convert From Integer Doubleword */ \ + V(fcfid, FCFID, 0xFC00069C) \ + /* Floating Convert From Integer Doubleword Single */ \ + V(fcfids, FCFIDS, 0xEC00069C) \ + /* Floating Convert From Integer Doubleword Unsigned */ \ + V(fcfidu, FCFIDU, 0xFC00079C) \ + /* Floating Convert From Integer Doubleword Unsigned Single */ \ + V(fcfidus, FCFIDUS, 0xEC00079C) \ + /* Floating Copy Sign */ \ + V(fcpsgn, FCPSGN, 0xFC000010) \ + /* Floating Convert To Integer Doubleword */ \ + V(fctid, FCTID, 0xFC00065C) \ + /* Floating Convert To Integer Doubleword Unsigned */ \ + V(fctidu, FCTIDU, 0xFC00075C) \ + /* Floating Convert To Integer Doubleword Unsigned with round toward */ \ + /* Zero */ \ + V(fctiduz, FCTIDUZ, 0xFC00075E) \ + /* Floating Convert To Integer Doubleword with round toward Zero */ \ + V(fctidz, FCTIDZ, 0xFC00065E) \ + /* Floating Convert To Integer Word */ \ + V(fctiw, FCTIW, 0xFC00001C) \ + /* Floating Convert To Integer Word Unsigned */ \ + V(fctiwu, FCTIWU, 0xFC00011C) \ + /* Floating Convert To Integer Word Unsigned with round toward Zero */ \ + V(fctiwuz, FCTIWUZ, 0xFC00011E) \ + /* Floating Convert To Integer Word with round to Zero */ \ + V(fctiwz, FCTIWZ, 0xFC00001E) \ + /* Floating Move Register */ \ + V(fmr, FMR, 0xFC000090) \ + /* Floating Negative Absolute Value */ \ + V(fnabs, FNABS, 0xFC000110) \ + /* Floating Negate */ \ + V(fneg, FNEG, 0xFC000050) \ + /* Floating Round to Single-Precision */ \ + V(frsp, FRSP, 0xFC000018) \ + /* Move From FPSCR */ \ + V(mffs, MFFS, 0xFC00048E) \ + /* Move To FPSCR Bit 0 */ \ + V(mtfsb0, MTFSB0, 0xFC00008C) \ + /* Move To FPSCR Bit 1 */ \ + V(mtfsb1, MTFSB1, 0xFC00004C) \ + /* Move To FPSCR Field Immediate */ \ + V(mtfsfi, MTFSFI, 0xFC00010C) \ + /* Floating Round To Integer Minus */ \ + V(frim, FRIM, 0xFC0003D0) \ + /* Floating Round To Integer Nearest */ \ + V(frin, FRIN, 0xFC000310) \ + /* Floating Round To Integer Plus */ \ + V(frip, FRIP, 0xFC000390) \ + /* Floating Round To Integer toward Zero */ \ + V(friz, FRIZ, 0xFC000350) \ + /* Multiply Cross Halfword to Word Signed */ \ + V(mulchw, MULCHW, 0x10000150) \ + /* Multiply Cross Halfword to Word Unsigned */ \ + V(mulchwu, MULCHWU, 0x10000110) \ + /* Multiply High Halfword to Word Signed */ \ + V(mulhhw, MULHHW, 0x10000050) \ + /* Multiply High Halfword to Word Unsigned */ \ + V(mulhhwu, MULHHWU, 0x10000010) \ + /* Multiply Low Halfword to Word Signed */ \ + V(mullhw, MULLHW, 0x10000350) \ + /* Multiply Low Halfword to Word Unsigned */ \ + V(mullhwu, MULLHWU, 0x10000310) \ + /* Determine Leftmost Zero Byte DQ 56 E0000000 P 58 LSQ lq Load Quadword */ \ + V(dlmzb, DLMZB, 0x7C00009C) \ + /* Load Quadword And Reserve Indexed */ \ + V(lqarx, LQARX, 0x7C000228) \ + /* Store Quadword Conditional Indexed and record CR0 */ \ + V(stqcx, STQCX, 0x7C00016D) \ + /* Load String Word Immediate */ \ + V(lswi, LSWI, 0x7C0004AA) \ + /* Load String Word Indexed */ \ + V(lswx, LSWX, 0x7C00042A) \ + /* Store String Word Immediate */ \ + V(stswi, STSWI, 0x7C0005AA) \ + /* Store String Word Indexed */ \ + V(stswx, STSWX, 0x7C00052A) \ + /* Clear BHRB */ \ + V(clrbhrb, CLRBHRB, 0x7C00035C) \ + /* Enforce In-order Execution of I/O */ \ + V(eieio, EIEIO, 0x7C0006AC) \ + /* Load Byte and Zero Caching Inhibited Indexed */ \ + V(lbzcix, LBZCIX, 0x7C0006AA) \ + /* Load Doubleword Caching Inhibited Indexed */ \ + V(ldcix, LDCIX, 0x7C0006EA) \ + /* Load Halfword and Zero Caching Inhibited Indexed */ \ + V(lhzcix, LHZCIX, 0x7C00066A) \ + /* Load Word and Zero Caching Inhibited Indexed */ \ + V(lwzcix, LWZCIX, 0x7C00062A) \ + /* Move From Segment Register */ \ + V(mfsr, MFSR, 0x7C0004A6) \ + /* Move From Segment Register Indirect */ \ + V(mfsrin, MFSRIN, 0x7C000526) \ + /* Move To Machine State Register Doubleword */ \ + V(mtmsrd, MTMSRD, 0x7C000164) \ + /* Move To Split Little Endian */ \ + V(mtsle, MTSLE, 0x7C000126) \ + /* Move To Segment Register */ \ + V(mtsr, MTSR, 0x7C0001A4) \ + /* Move To Segment Register Indirect */ \ + V(mtsrin, MTSRIN, 0x7C0001E4) \ + /* SLB Find Entry ESID */ \ + V(slbfee, SLBFEE, 0x7C0007A7) \ + /* SLB Invalidate All */ \ + V(slbia, SLBIA, 0x7C0003E4) \ + /* SLB Invalidate Entry */ \ + V(slbie, SLBIE, 0x7C000364) \ + /* SLB Move From Entry ESID */ \ + V(slbmfee, SLBMFEE, 0x7C000726) \ + /* SLB Move From Entry VSID */ \ + V(slbmfev, SLBMFEV, 0x7C0006A6) \ + /* SLB Move To Entry */ \ + V(slbmte, SLBMTE, 0x7C000324) \ + /* Store Byte Caching Inhibited Indexed */ \ + V(stbcix, STBCIX, 0x7C0007AA) \ + /* Store Doubleword Caching Inhibited Indexed */ \ + V(stdcix, STDCIX, 0x7C0007EA) \ + /* Store Halfword and Zero Caching Inhibited Indexed */ \ + V(sthcix, STHCIX, 0x7C00076A) \ + /* Store Word and Zero Caching Inhibited Indexed */ \ + V(stwcix, STWCIX, 0x7C00072A) \ + /* TLB Invalidate All */ \ + V(tlbia, TLBIA, 0x7C0002E4) \ + /* TLB Invalidate Entry */ \ + V(tlbie, TLBIE, 0x7C000264) \ + /* TLB Invalidate Entry Local */ \ + V(tlbiel, TLBIEL, 0x7C000224) \ + /* Message Clear Privileged */ \ + V(msgclrp, MSGCLRP, 0x7C00015C) \ + /* Message Send Privileged */ \ + V(msgsndp, MSGSNDP, 0x7C00011C) \ + /* Message Clear */ \ + V(msgclr, MSGCLR, 0x7C0001DC) \ + /* Message Send */ \ + V(msgsnd, MSGSND, 0x7C00019C) \ + /* Move From Machine State Register */ \ + V(mfmsr, MFMSR, 0x7C0000A6) \ + /* Move To Machine State Register */ \ + V(mtmsr, MTMSR, 0x7C000124) \ + /* TLB Synchronize */ \ + V(tlbsync, TLBSYNC, 0x7C00046C) \ + /* Transaction Abort */ \ + V(tabort, TABORT, 0x7C00071D) \ + /* Transaction Abort Doubleword Conditional */ \ + V(tabortdc, TABORTDC, 0x7C00065D) \ + /* Transaction Abort Doubleword Conditional Immediate */ \ + V(tabortdci, TABORTDCI, 0x7C0006DD) \ + /* Transaction Abort Word Conditional */ \ + V(tabortwc, TABORTWC, 0x7C00061D) \ + /* Transaction Abort Word Conditional Immediate */ \ + V(tabortwci, TABORTWCI, 0x7C00069D) \ + /* Transaction Begin */ \ + V(tbegin, TBEGIN, 0x7C00051D) \ + /* Transaction Check */ \ + V(tcheck, TCHECK, 0x7C00059C) \ + /* Transaction End */ \ + V(tend, TEND, 0x7C00055C) \ + /* Transaction Recheckpoint */ \ + V(trechkpt, TRECHKPT, 0x7C0007DD) \ + /* Transaction Reclaim */ \ + V(treclaim, TRECLAIM, 0x7C00075D) \ + /* Transaction Suspend or Resume */ \ + V(tsr, TSR, 0x7C0005DC) \ + /* Load Vector Element Byte Indexed */ \ + V(lvebx, LVEBX, 0x7C00000E) \ + /* Load Vector Element Halfword Indexed */ \ + V(lvehx, LVEHX, 0x7C00004E) \ + /* Load Vector Element Word Indexed */ \ + V(lvewx, LVEWX, 0x7C00008E) \ + /* Load Vector for Shift Left */ \ + V(lvsl, LVSL, 0x7C00000C) \ + /* Load Vector for Shift Right */ \ + V(lvsr, LVSR, 0x7C00004C) \ + /* Load Vector Indexed */ \ + V(lvx, LVX, 0x7C0000CE) \ + /* Load Vector Indexed Last */ \ + V(lvxl, LVXL, 0x7C0002CE) \ + /* Store Vector Element Byte Indexed */ \ + V(stvebx, STVEBX, 0x7C00010E) \ + /* Store Vector Element Halfword Indexed */ \ + V(stvehx, STVEHX, 0x7C00014E) \ + /* Store Vector Element Word Indexed */ \ + V(stvewx, STVEWX, 0x7C00018E) \ + /* Store Vector Indexed */ \ + V(stvx, STVX, 0x7C0001CE) \ + /* Store Vector Indexed Last */ \ + V(stvxl, STVXL, 0x7C0003CE) \ + /* Vector Minimum Signed Doubleword */ \ + V(vminsd, VMINSD, 0x100003C2) \ + /* Floating Merge Even Word */ \ + V(fmrgew, FMRGEW, 0xFC00078C) \ + /* Floating Merge Odd Word */ \ + V(fmrgow, FMRGOW, 0xFC00068C) \ + /* Wait for Interrupt */ \ + V(wait, WAIT, 0x7C00007C) + +#define PPC_EVS_OPCODE_LIST(V) \ + /* Vector Select */ \ + V(evsel, EVSEL, 0x10000278) + +#define PPC_DS_OPCODE_LIST(V) \ + /* Load Doubleword */ \ + V(ld, LD, 0xE8000000) \ + /* Load Doubleword with Update */ \ + V(ldu, LDU, 0xE8000001) \ + /* Load Word Algebraic */ \ + V(lwa, LWA, 0xE8000002) \ + /* Store Doubleword */ \ + V(std, STD, 0xF8000000) \ + /* Store Doubleword with Update */ \ + V(stdu, STDU, 0xF8000001) \ + /* Load Floating-Point Double Pair */ \ + V(lfdp, LFDP, 0xE4000000) \ + /* Store Floating-Point Double Pair */ \ + V(stfdp, STFDP, 0xF4000000) \ + /* Store Quadword */ \ + V(stq, STQ, 0xF8000002) + +#define PPC_D_OPCODE_LIST(V) \ + /* Trap Doubleword Immediate */ \ + V(tdi, TDI, 0x08000000) \ + /* Add Immediate */ \ + V(addi, ADDI, 0x38000000) \ + /* Add Immediate Carrying */ \ + V(addic, ADDIC, 0x30000000) \ + /* Add Immediate Carrying & record CR0 */ \ + V(addicx, ADDICx, 0x34000000) \ + /* Add Immediate Shifted */ \ + V(addis, ADDIS, 0x3C000000) \ + /* AND Immediate & record CR0 */ \ + V(andix, ANDIx, 0x70000000) \ + /* AND Immediate Shifted & record CR0 */ \ + V(andisx, ANDISx, 0x74000000) \ + /* Compare Immediate */ \ + V(cmpi, CMPI, 0x2C000000) \ + /* Compare Logical Immediate */ \ + V(cmpli, CMPLI, 0x28000000) \ + /* Load Byte and Zero */ \ + V(lbz, LBZ, 0x88000000) \ + /* Load Byte and Zero with Update */ \ + V(lbzu, LBZU, 0x8C000000) \ + /* Load Halfword Algebraic */ \ + V(lha, LHA, 0xA8000000) \ + /* Load Halfword Algebraic with Update */ \ + V(lhau, LHAU, 0xAC000000) \ + /* Load Halfword and Zero */ \ + V(lhz, LHZ, 0xA0000000) \ + /* Load Halfword and Zero with Update */ \ + V(lhzu, LHZU, 0xA4000000) \ + /* Load Multiple Word */ \ + V(lmw, LMW, 0xB8000000) \ + /* Load Word and Zero */ \ + V(lwz, LWZ, 0x80000000) \ + /* Load Word and Zero with Update */ \ + V(lwzu, LWZU, 0x84000000) \ + /* Multiply Low Immediate */ \ + V(mulli, MULLI, 0x1C000000) \ + /* OR Immediate */ \ + V(ori, ORI, 0x60000000) \ + /* OR Immediate Shifted */ \ + V(oris, ORIS, 0x64000000) \ + /* Store Byte */ \ + V(stb, STB, 0x98000000) \ + /* Store Byte with Update */ \ + V(stbu, STBU, 0x9C000000) \ + /* Store Halfword */ \ + V(sth, STH, 0xB0000000) \ + /* Store Halfword with Update */ \ + V(sthu, STHU, 0xB4000000) \ + /* Store Multiple Word */ \ + V(stmw, STMW, 0xBC000000) \ + /* Store Word */ \ + V(stw, STW, 0x90000000) \ + /* Store Word with Update */ \ + V(stwu, STWU, 0x94000000) \ + /* Subtract From Immediate Carrying */ \ + V(subfic, SUBFIC, 0x20000000) \ + /* Trap Word Immediate */ \ + V(twi, TWI, 0x0C000000) \ + /* XOR Immediate */ \ + V(xori, XORI, 0x68000000) \ + /* XOR Immediate Shifted */ \ + V(xoris, XORIS, 0x6C000000) \ + /* Load Floating-Point Double */ \ + V(lfd, LFD, 0xC8000000) \ + /* Load Floating-Point Double with Update */ \ + V(lfdu, LFDU, 0xCC000000) \ + /* Load Floating-Point Single */ \ + V(lfs, LFS, 0xC0000000) \ + /* Load Floating-Point Single with Update */ \ + V(lfsu, LFSU, 0xC4000000) \ + /* Store Floating-Point Double */ \ + V(stfd, STFD, 0xD8000000) \ + /* Store Floating-Point Double with Update */ \ + V(stfdu, STFDU, 0xDC000000) \ + /* Store Floating-Point Single */ \ + V(stfs, STFS, 0xD0000000) \ + /* Store Floating-Point Single with Update */ \ + V(stfsu, STFSU, 0xD4000000) + +#define PPC_XFL_OPCODE_LIST(V) \ + /* Move To FPSCR Fields */ \ + V(mtfsf, MTFSF, 0xFC00058E) + +#define PPC_XFX_OPCODE_LIST(V) \ + /* Move From Condition Register */ \ + V(mfcr, MFCR, 0x7C000026) \ + /* Move From One Condition Register Field */ \ + V(mfocrf, MFOCRF, 0x7C100026) \ + /* Move From Special Purpose Register */ \ + V(mfspr, MFSPR, 0x7C0002A6) \ + /* Move To Condition Register Fields */ \ + V(mtcrf, MTCRF, 0x7C000120) \ + /* Move To One Condition Register Field */ \ + V(mtocrf, MTOCRF, 0x7C100120) \ + /* Move To Special Purpose Register */ \ + V(mtspr, MTSPR, 0x7C0003A6) \ + /* Debugger Notify Halt */ \ + V(dnh, DNH, 0x4C00018C) \ + /* Move From Device Control Register */ \ + V(mfdcr, MFDCR, 0x7C000286) \ + /* Move To Device Control Register */ \ + V(mtdcr, MTDCR, 0x7C000386) \ + /* Move from Performance Monitor Register */ \ + V(mfpmr, MFPMR, 0x7C00029C) \ + /* Move To Performance Monitor Register */ \ + V(mtpmr, MTPMR, 0x7C00039C) \ + /* Move From Branch History Rolling Buffer */ \ + V(mfbhrbe, MFBHRBE, 0x7C00025C) \ + /* Move From Time Base */ \ + V(mftb, MFTB, 0x7C0002E6) + +#define PPC_MDS_OPCODE_LIST(V) \ + /* Rotate Left Doubleword then Clear Left */ \ + V(rldcl, RLDCL, 0x78000010) \ + /* Rotate Left Doubleword then Clear Right */ \ + V(rldcr, RLDCR, 0x78000012) + +#define PPC_A_OPCODE_LIST(V) \ + /* Integer Select */ \ + V(isel, ISEL, 0x7C00001E) \ + /* Floating Add */ \ + V(fadd, FADD, 0xFC00002A) \ + /* Floating Add Single */ \ + V(fadds, FADDS, 0xEC00002A) \ + /* Floating Divide */ \ + V(fdiv, FDIV, 0xFC000024) \ + /* Floating Divide Single */ \ + V(fdivs, FDIVS, 0xEC000024) \ + /* Floating Multiply-Add */ \ + V(fmadd, FMADD, 0xFC00003A) \ + /* Floating Multiply-Add Single */ \ + V(fmadds, FMADDS, 0xEC00003A) \ + /* Floating Multiply-Subtract */ \ + V(fmsub, FMSUB, 0xFC000038) \ + /* Floating Multiply-Subtract Single */ \ + V(fmsubs, FMSUBS, 0xEC000038) \ + /* Floating Multiply */ \ + V(fmul, FMUL, 0xFC000032) \ + /* Floating Multiply Single */ \ + V(fmuls, FMULS, 0xEC000032) \ + /* Floating Negative Multiply-Add */ \ + V(fnmadd, FNMADD, 0xFC00003E) \ + /* Floating Negative Multiply-Add Single */ \ + V(fnmadds, FNMADDS, 0xEC00003E) \ + /* Floating Negative Multiply-Subtract */ \ + V(fnmsub, FNMSUB, 0xFC00003C) \ + /* Floating Negative Multiply-Subtract Single */ \ + V(fnmsubs, FNMSUBS, 0xEC00003C) \ + /* Floating Reciprocal Estimate Single */ \ + V(fres, FRES, 0xEC000030) \ + /* Floating Reciprocal Square Root Estimate */ \ + V(frsqrte, FRSQRTE, 0xFC000034) \ + /* Floating Select */ \ + V(fsel, FSEL, 0xFC00002E) \ + /* Floating Square Root */ \ + V(fsqrt, FSQRT, 0xFC00002C) \ + /* Floating Square Root Single */ \ + V(fsqrts, FSQRTS, 0xEC00002C) \ + /* Floating Subtract */ \ + V(fsub, FSUB, 0xFC000028) \ + /* Floating Subtract Single */ \ + V(fsubs, FSUBS, 0xEC000028) \ + /* Floating Reciprocal Estimate */ \ + V(fre, FRE, 0xFC000030) \ + /* Floating Reciprocal Square Root Estimate Single */ \ + V(frsqrtes, FRSQRTES, 0xEC000034) + +#define PPC_VA_OPCODE_LIST(V) \ + /* Vector Add Extended & write Carry Unsigned Quadword */ \ + V(vaddecuq, VADDECUQ, 0x1000003D) \ + /* Vector Add Extended Unsigned Quadword Modulo */ \ + V(vaddeuqm, VADDEUQM, 0x1000003C) \ + /* Vector Multiply-Add Single-Precision */ \ + V(vmaddfp, VMADDFP, 0x1000002E) \ + /* Vector Multiply-High-Add Signed Halfword Saturate */ \ + V(vmhaddshs, VMHADDSHS, 0x10000020) \ + /* Vector Multiply-High-Round-Add Signed Halfword Saturate */ \ + V(vmhraddshs, VMHRADDSHS, 0x10000021) \ + /* Vector Multiply-Low-Add Unsigned Halfword Modulo */ \ + V(vmladduhm, VMLADDUHM, 0x10000022) \ + /* Vector Multiply-Sum Mixed Byte Modulo */ \ + V(vmsummbm, VMSUMMBM, 0x10000025) \ + /* Vector Multiply-Sum Signed Halfword Modulo */ \ + V(vmsumshm, VMSUMSHM, 0x10000028) \ + /* Vector Multiply-Sum Signed Halfword Saturate */ \ + V(vmsumshs, VMSUMSHS, 0x10000029) \ + /* Vector Multiply-Sum Unsigned Byte Modulo */ \ + V(vmsumubm, VMSUMUBM, 0x10000024) \ + /* Vector Multiply-Sum Unsigned Halfword Modulo */ \ + V(vmsumuhm, VMSUMUHM, 0x10000026) \ + /* Vector Multiply-Sum Unsigned Halfword Saturate */ \ + V(vmsumuhs, VMSUMUHS, 0x10000027) \ + /* Vector Negative Multiply-Subtract Single-Precision */ \ + V(vnmsubfp, VNMSUBFP, 0x1000002F) \ + /* Vector Permute */ \ + V(vperm, VPERM, 0x1000002B) \ + /* Vector Select */ \ + V(vsel, VSEL, 0x1000002A) \ + /* Vector Shift Left Double by Octet Immediate */ \ + V(vsldoi, VSLDOI, 0x1000002C) \ + /* Vector Subtract Extended & write Carry Unsigned Quadword */ \ + V(vsubecuq, VSUBECUQ, 0x1000003F) \ + /* Vector Subtract Extended Unsigned Quadword Modulo */ \ + V(vsubeuqm, VSUBEUQM, 0x1000003E) \ + /* Vector Permute and Exclusive-OR */ \ + V(vpermxor, VPERMXOR, 0x1000002D) + +#define PPC_XX1_OPCODE_LIST(V) \ + /* Load VSR Scalar Doubleword Indexed */ \ + V(lxsdx, LXSDX, 0x7C000498) \ + /* Load VSX Scalar as Integer Word Algebraic Indexed */ \ + V(lxsiwax, LXSIWAX, 0x7C000098) \ + /* Load VSX Scalar as Integer Word and Zero Indexed */ \ + V(lxsiwzx, LXSIWZX, 0x7C000018) \ + /* Load VSX Scalar Single-Precision Indexed */ \ + V(lxsspx, LXSSPX, 0x7C000418) \ + /* Load VSR Vector Doubleword*2 Indexed */ \ + V(lxvd, LXVD, 0x7C000698) \ + /* Load VSR Vector Doubleword & Splat Indexed */ \ + V(lxvdsx, LXVDSX, 0x7C000298) \ + /* Load VSR Vector Word*4 Indexed */ \ + V(lxvw, LXVW, 0x7C000618) \ + /* Move From VSR Doubleword */ \ + V(mfvsrd, MFVSRD, 0x7C000066) \ + /* Move From VSR Word and Zero */ \ + V(mfvsrwz, MFVSRWZ, 0x7C0000E6) \ + /* Store VSR Scalar Doubleword Indexed */ \ + V(stxsdx, STXSDX, 0x7C000598) \ + /* Store VSX Scalar as Integer Word Indexed */ \ + V(stxsiwx, STXSIWX, 0x7C000118) \ + /* Store VSR Scalar Word Indexed */ \ + V(stxsspx, STXSSPX, 0x7C000518) \ + /* Store VSR Vector Doubleword*2 Indexed */ \ + V(stxvd, STXVD, 0x7C000798) \ + /* Store VSR Vector Word*4 Indexed */ \ + V(stxvw, STXVW, 0x7C000718) + +#define PPC_B_OPCODE_LIST(V) \ + /* Branch Conditional */ \ + V(bc, BCX, 0x40000000) + +#define PPC_XO_OPCODE_LIST(V) \ + /* Divide Doubleword */ \ + V(divd, DIVD, 0x7C0003D2) \ + /* Divide Doubleword Extended */ \ + V(divde, DIVDE, 0x7C000352) \ + /* Divide Doubleword Extended & record OV */ \ + V(divdeo, DIVDEO, 0x7C000752) \ + /* Divide Doubleword Extended Unsigned */ \ + V(divdeu, DIVDEU, 0x7C000312) \ + /* Divide Doubleword Extended Unsigned & record OV */ \ + V(divdeuo, DIVDEUO, 0x7C000712) \ + /* Divide Doubleword & record OV */ \ + V(divdo, DIVDO, 0x7C0007D2) \ + /* Divide Doubleword Unsigned */ \ + V(divdu, DIVDU, 0x7C000392) \ + /* Divide Doubleword Unsigned & record OV */ \ + V(divduo, DIVDUO, 0x7C000792) \ + /* Multiply High Doubleword */ \ + V(mulhd, MULHD, 0x7C000092) \ + /* Multiply High Doubleword Unsigned */ \ + V(mulhdu, MULHDU, 0x7C000012) \ + /* Multiply Low Doubleword */ \ + V(mulld, MULLD, 0x7C0001D2) \ + /* Multiply Low Doubleword & record OV */ \ + V(mulldo, MULLDO, 0x7C0005D2) \ + /* Add */ \ + V(add, ADDX, 0x7C000214) \ + /* Add Carrying */ \ + V(addc, ADDCX, 0x7C000014) \ + /* Add Carrying & record OV */ \ + V(addco, ADDCO, 0x7C000414) \ + /* Add Extended */ \ + V(adde, ADDEX, 0x7C000114) \ + /* Add Extended & record OV & record OV */ \ + V(addeo, ADDEO, 0x7C000514) \ + /* Add to Minus One Extended */ \ + V(addme, ADDME, 0x7C0001D4) \ + /* Add to Minus One Extended & record OV */ \ + V(addmeo, ADDMEO, 0x7C0005D4) \ + /* Add & record OV */ \ + V(addo, ADDO, 0x7C000614) \ + /* Add to Zero Extended */ \ + V(addze, ADDZEX, 0x7C000194) \ + /* Add to Zero Extended & record OV */ \ + V(addzeo, ADDZEO, 0x7C000594) \ + /* Divide Word Format */ \ + V(divw, DIVW, 0x7C0003D6) \ + /* Divide Word Extended */ \ + V(divwe, DIVWE, 0x7C000356) \ + /* Divide Word Extended & record OV */ \ + V(divweo, DIVWEO, 0x7C000756) \ + /* Divide Word Extended Unsigned */ \ + V(divweu, DIVWEU, 0x7C000316) \ + /* Divide Word Extended Unsigned & record OV */ \ + V(divweuo, DIVWEUO, 0x7C000716) \ + /* Divide Word & record OV */ \ + V(divwo, DIVWO, 0x7C0007D6) \ + /* Divide Word Unsigned */ \ + V(divwu, DIVWU, 0x7C000396) \ + /* Divide Word Unsigned & record OV */ \ + V(divwuo, DIVWUO, 0x7C000796) \ + /* Multiply High Word */ \ + V(mulhw, MULHWX, 0x7C000096) \ + /* Multiply High Word Unsigned */ \ + V(mulhwu, MULHWUX, 0x7C000016) \ + /* Multiply Low Word */ \ + V(mullw, MULLW, 0x7C0001D6) \ + /* Multiply Low Word & record OV */ \ + V(mullwo, MULLWO, 0x7C0005D6) \ + /* Negate */ \ + V(neg, NEGX, 0x7C0000D0) \ + /* Negate & record OV */ \ + V(nego, NEGO, 0x7C0004D0) \ + /* Subtract From */ \ + V(subf, SUBFX, 0x7C000050) \ + /* Subtract From Carrying */ \ + V(subfc, SUBFCX, 0x7C000010) \ + /* Subtract From Carrying & record OV */ \ + V(subfco, SUBFCO, 0x7C000410) \ + /* Subtract From Extended */ \ + V(subfe, SUBFEX, 0x7C000110) \ + /* Subtract From Extended & record OV */ \ + V(subfeo, SUBFEO, 0x7C000510) \ + /* Subtract From Minus One Extended */ \ + V(subfme, SUBFME, 0x7C0001D0) \ + /* Subtract From Minus One Extended & record OV */ \ + V(subfmeo, SUBFMEO, 0x7C0005D0) \ + /* Subtract From & record OV */ \ + V(subfo, SUBFO, 0x7C000450) \ + /* Subtract From Zero Extended */ \ + V(subfze, SUBFZE, 0x7C000190) \ + /* Subtract From Zero Extended & record OV */ \ + V(subfzeo, SUBFZEO, 0x7C000590) \ + /* Add and Generate Sixes */ \ + V(addg, ADDG, 0x7C000094) \ + /* Multiply Accumulate Cross Halfword to Word Modulo Signed */ \ + V(macchw, MACCHW, 0x10000158) \ + /* Multiply Accumulate Cross Halfword to Word Modulo Signed & record OV */ \ + V(macchwo, MACCHWO, 0x10000158) \ + /* Multiply Accumulate Cross Halfword to Word Saturate Signed */ \ + V(macchws, MACCHWS, 0x100001D8) \ + /* Multiply Accumulate Cross Halfword to Word Saturate Signed & record */ \ + /* OV */ \ + V(macchwso, MACCHWSO, 0x100001D8) \ + /* Multiply Accumulate Cross Halfword to Word Saturate Unsigned */ \ + V(macchwsu, MACCHWSU, 0x10000198) \ + /* Multiply Accumulate Cross Halfword to Word Saturate Unsigned & record */ \ + /* OV */ \ + V(macchwsuo, MACCHWSUO, 0x10000198) \ + /* Multiply Accumulate Cross Halfword to Word Modulo Unsigned */ \ + V(macchwu, MACCHWU, 0x10000118) \ + /* Multiply Accumulate Cross Halfword to Word Modulo Unsigned & record */ \ + /* OV */ \ + V(macchwuo, MACCHWUO, 0x10000118) \ + /* Multiply Accumulate High Halfword to Word Modulo Signed */ \ + V(machhw, MACHHW, 0x10000058) \ + /* Multiply Accumulate High Halfword to Word Modulo Signed & record OV */ \ + V(machhwo, MACHHWO, 0x10000058) \ + /* Multiply Accumulate High Halfword to Word Saturate Signed */ \ + V(machhws, MACHHWS, 0x100000D8) \ + /* Multiply Accumulate High Halfword to Word Saturate Signed & record OV */ \ + V(machhwso, MACHHWSO, 0x100000D8) \ + /* Multiply Accumulate High Halfword to Word Saturate Unsigned */ \ + V(machhwsu, MACHHWSU, 0x10000098) \ + /* Multiply Accumulate High Halfword to Word Saturate Unsigned & record */ \ + /* OV */ \ + V(machhwsuo, MACHHWSUO, 0x10000098) \ + /* Multiply Accumulate High Halfword to Word Modulo Unsigned */ \ + V(machhwu, MACHHWU, 0x10000018) \ + /* Multiply Accumulate High Halfword to Word Modulo Unsigned & record OV */ \ + V(machhwuo, MACHHWUO, 0x10000018) \ + /* Multiply Accumulate Low Halfword to Word Modulo Signed */ \ + V(maclhw, MACLHW, 0x10000358) \ + /* Multiply Accumulate Low Halfword to Word Modulo Signed & record OV */ \ + V(maclhwo, MACLHWO, 0x10000358) \ + /* Multiply Accumulate Low Halfword to Word Saturate Signed */ \ + V(maclhws, MACLHWS, 0x100003D8) \ + /* Multiply Accumulate Low Halfword to Word Saturate Signed & record OV */ \ + V(maclhwso, MACLHWSO, 0x100003D8) \ + /* Multiply Accumulate Low Halfword to Word Saturate Unsigned */ \ + V(maclhwsu, MACLHWSU, 0x10000398) \ + /* Multiply Accumulate Low Halfword to Word Saturate Unsigned & record */ \ + /* OV */ \ + V(maclhwsuo, MACLHWSUO, 0x10000398) \ + /* Multiply Accumulate Low Halfword to Word Modulo Unsigned */ \ + V(maclhwu, MACLHWU, 0x10000318) \ + /* Multiply Accumulate Low Halfword to Word Modulo Unsigned & record OV */ \ + V(maclhwuo, MACLHWUO, 0x10000318) \ + /* Negative Multiply Accumulate Cross Halfword to Word Modulo Signed */ \ + V(nmacchw, NMACCHW, 0x1000015C) \ + /* Negative Multiply Accumulate Cross Halfword to Word Modulo Signed & */ \ + /* record OV */ \ + V(nmacchwo, NMACCHWO, 0x1000015C) \ + /* Negative Multiply Accumulate Cross Halfword to Word Saturate Signed */ \ + V(nmacchws, NMACCHWS, 0x100001DC) \ + /* Negative Multiply Accumulate Cross Halfword to Word Saturate Signed & */ \ + /* record OV */ \ + V(nmacchwso, NMACCHWSO, 0x100001DC) \ + /* Negative Multiply Accumulate High Halfword to Word Modulo Signed */ \ + V(nmachhw, NMACHHW, 0x1000005C) \ + /* Negative Multiply Accumulate High Halfword to Word Modulo Signed & */ \ + /* record OV */ \ + V(nmachhwo, NMACHHWO, 0x1000005C) \ + /* Negative Multiply Accumulate High Halfword to Word Saturate Signed */ \ + V(nmachhws, NMACHHWS, 0x100000DC) \ + /* Negative Multiply Accumulate High Halfword to Word Saturate Signed & */ \ + /* record OV */ \ + V(nmachhwso, NMACHHWSO, 0x100000DC) \ + /* Negative Multiply Accumulate Low Halfword to Word Modulo Signed */ \ + V(nmaclhw, NMACLHW, 0x1000035C) \ + /* Negative Multiply Accumulate Low Halfword to Word Modulo Signed & */ \ + /* record OV */ \ + V(nmaclhwo, NMACLHWO, 0x1000035C) \ + /* Negative Multiply Accumulate Low Halfword to Word Saturate Signed */ \ + V(nmaclhws, NMACLHWS, 0x100003DC) \ + /* Negative Multiply Accumulate Low Halfword to Word Saturate Signed & */ \ + /* record OV */ \ + V(nmaclhwso, NMACLHWSO, 0x100003DC) + +#define PPC_XL_OPCODE_LIST(V) \ + /* Branch Conditional to Count Register */ \ + V(bcctr, BCCTRX, 0x4C000420) \ + /* Branch Conditional to Link Register */ \ + V(bclr, BCLRX, 0x4C000020) \ + /* Condition Register AND */ \ + V(crand, CRAND, 0x4C000202) \ + /* Condition Register AND with Complement */ \ + V(crandc, CRANDC, 0x4C000102) \ + /* Condition Register Equivalent */ \ + V(creqv, CREQV, 0x4C000242) \ + /* Condition Register NAND */ \ + V(crnand, CRNAND, 0x4C0001C2) \ + /* Condition Register NOR */ \ + V(crnor, CRNOR, 0x4C000042) \ + /* Condition Register OR */ \ + V(cror, CROR, 0x4C000382) \ + /* Condition Register OR with Complement */ \ + V(crorc, CRORC, 0x4C000342) \ + /* Condition Register XOR */ \ + V(crxor, CRXOR, 0x4C000182) \ + /* Instruction Synchronize */ \ + V(isync, ISYNC, 0x4C00012C) \ + /* Move Condition Register Field */ \ + V(mcrf, MCRF, 0x4C000000) \ + /* Return From Critical Interrupt */ \ + V(rfci, RFCI, 0x4C000066) \ + /* Return From Interrupt */ \ + V(rfi, RFI, 0x4C000064) \ + /* Return From Machine Check Interrupt */ \ + V(rfmci, RFMCI, 0x4C00004C) \ + /* Embedded Hypervisor Privilege */ \ + V(ehpriv, EHPRIV, 0x7C00021C) \ + /* Return From Guest Interrupt */ \ + V(rfgi, RFGI, 0x4C0000CC) \ + /* Doze */ \ + V(doze, DOZE, 0x4C000324) \ + /* Return From Interrupt Doubleword Hypervisor */ \ + V(hrfid, HRFID, 0x4C000224) \ + /* Nap */ \ + V(nap, NAP, 0x4C000364) \ + /* Return from Event Based Branch */ \ + V(rfebb, RFEBB, 0x4C000124) \ + /* Return from Interrupt Doubleword */ \ + V(rfid, RFID, 0x4C000024) \ + /* Rip Van Winkle */ \ + V(rvwinkle, RVWINKLE, 0x4C0003E4) \ + /* Sleep */ \ + V(sleep, SLEEP, 0x4C0003A4) + +#define PPC_XX4_OPCODE_LIST(V) \ + /* VSX Select */ \ + V(xxsel, XXSEL, 0xF0000030) + +#define PPC_I_OPCODE_LIST(V) \ + /* Branch */ \ + V(b, BX, 0x48000000) + +#define PPC_M_OPCODE_LIST(V) \ + /* Rotate Left Word Immediate then Mask Insert */ \ + V(rlwimi, RLWIMIX, 0x50000000) \ + /* Rotate Left Word Immediate then AND with Mask */ \ + V(rlwinm, RLWINMX, 0x54000000) \ + /* Rotate Left Word then AND with Mask */ \ + V(rlwnm, RLWNMX, 0x5C000000) + +#define PPC_VX_OPCODE_LIST(V) \ + /* Decimal Add Modulo */ \ + V(bcdadd, BCDADD, 0xF0000400) \ + /* Decimal Subtract Modulo */ \ + V(bcdsub, BCDSUB, 0xF0000440) \ + /* Move From Vector Status and Control Register */ \ + V(mfvscr, MFVSCR, 0x10000604) \ + /* Move To Vector Status and Control Register */ \ + V(mtvscr, MTVSCR, 0x10000644) \ + /* Vector Add & write Carry Unsigned Quadword */ \ + V(vaddcuq, VADDCUQ, 0x10000140) \ + /* Vector Add and Write Carry-Out Unsigned Word */ \ + V(vaddcuw, VADDCUW, 0x10000180) \ + /* Vector Add Single-Precision */ \ + V(vaddfp, VADDFP, 0x1000000A) \ + /* Vector Add Signed Byte Saturate */ \ + V(vaddsbs, VADDSBS, 0x10000300) \ + /* Vector Add Signed Halfword Saturate */ \ + V(vaddshs, VADDSHS, 0x10000340) \ + /* Vector Add Signed Word Saturate */ \ + V(vaddsws, VADDSWS, 0x10000380) \ + /* Vector Add Unsigned Byte Modulo */ \ + V(vaddubm, VADDUBM, 0x10000000) \ + /* Vector Add Unsigned Byte Saturate */ \ + V(vaddubs, VADDUBS, 0x10000200) \ + /* Vector Add Unsigned Doubleword Modulo */ \ + V(vaddudm, VADDUDM, 0x100000C0) \ + /* Vector Add Unsigned Halfword Modulo */ \ + V(vadduhm, VADDUHM, 0x10000040) \ + /* Vector Add Unsigned Halfword Saturate */ \ + V(vadduhs, VADDUHS, 0x10000240) \ + /* Vector Add Unsigned Quadword Modulo */ \ + V(vadduqm, VADDUQM, 0x10000100) \ + /* Vector Add Unsigned Word Modulo */ \ + V(vadduwm, VADDUWM, 0x10000080) \ + /* Vector Add Unsigned Word Saturate */ \ + V(vadduws, VADDUWS, 0x10000280) \ + /* Vector Logical AND */ \ + V(vand, VAND, 0x10000404) \ + /* Vector Logical AND with Complement */ \ + V(vandc, VANDC, 0x10000444) \ + /* Vector Average Signed Byte */ \ + V(vavgsb, VAVGSB, 0x10000502) \ + /* Vector Average Signed Halfword */ \ + V(vavgsh, VAVGSH, 0x10000542) \ + /* Vector Average Signed Word */ \ + V(vavgsw, VAVGSW, 0x10000582) \ + /* Vector Average Unsigned Byte */ \ + V(vavgub, VAVGUB, 0x10000402) \ + /* Vector Average Unsigned Halfword */ \ + V(vavguh, VAVGUH, 0x10000442) \ + /* Vector Average Unsigned Word */ \ + V(vavguw, VAVGUW, 0x10000482) \ + /* Vector Bit Permute Quadword */ \ + V(vbpermq, VBPERMQ, 0x1000054C) \ + /* Vector Convert From Signed Fixed-Point Word To Single-Precision */ \ + V(vcfsx, VCFSX, 0x1000034A) \ + /* Vector Convert From Unsigned Fixed-Point Word To Single-Precision */ \ + V(vcfux, VCFUX, 0x1000030A) \ + /* Vector Count Leading Zeros Byte */ \ + V(vclzb, VCLZB, 0x10000702) \ + /* Vector Count Leading Zeros Doubleword */ \ + V(vclzd, VCLZD, 0x100007C2) \ + /* Vector Count Leading Zeros Halfword */ \ + V(vclzh, VCLZH, 0x10000742) \ + /* Vector Count Leading Zeros Word */ \ + V(vclzw, VCLZW, 0x10000782) \ + /* Vector Convert From Single-Precision To Signed Fixed-Point Word */ \ + /* Saturate */ \ + V(vctsxs, VCTSXS, 0x100003CA) \ + /* Vector Convert From Single-Precision To Unsigned Fixed-Point Word */ \ + /* Saturate */ \ + V(vctuxs, VCTUXS, 0x1000038A) \ + /* Vector Equivalence */ \ + V(veqv, VEQV, 0x10000684) \ + /* Vector 2 Raised to the Exponent Estimate Single-Precision */ \ + V(vexptefp, VEXPTEFP, 0x1000018A) \ + /* Vector Gather Bits by Byte by Doubleword */ \ + V(vgbbd, VGBBD, 0x1000050C) \ + /* Vector Log Base 2 Estimate Single-Precision */ \ + V(vlogefp, VLOGEFP, 0x100001CA) \ + /* Vector Maximum Single-Precision */ \ + V(vmaxfp, VMAXFP, 0x1000040A) \ + /* Vector Maximum Signed Byte */ \ + V(vmaxsb, VMAXSB, 0x10000102) \ + /* Vector Maximum Signed Doubleword */ \ + V(vmaxsd, VMAXSD, 0x100001C2) \ + /* Vector Maximum Signed Halfword */ \ + V(vmaxsh, VMAXSH, 0x10000142) \ + /* Vector Maximum Signed Word */ \ + V(vmaxsw, VMAXSW, 0x10000182) \ + /* Vector Maximum Unsigned Byte */ \ + V(vmaxub, VMAXUB, 0x10000002) \ + /* Vector Maximum Unsigned Doubleword */ \ + V(vmaxud, VMAXUD, 0x100000C2) \ + /* Vector Maximum Unsigned Halfword */ \ + V(vmaxuh, VMAXUH, 0x10000042) \ + /* Vector Maximum Unsigned Word */ \ + V(vmaxuw, VMAXUW, 0x10000082) \ + /* Vector Minimum Single-Precision */ \ + V(vminfp, VMINFP, 0x1000044A) \ + /* Vector Minimum Signed Byte */ \ + V(vminsb, VMINSB, 0x10000302) \ + /* Vector Minimum Signed Halfword */ \ + V(vminsh, VMINSH, 0x10000342) \ + /* Vector Minimum Signed Word */ \ + V(vminsw, VMINSW, 0x10000382) \ + /* Vector Minimum Unsigned Byte */ \ + V(vminub, VMINUB, 0x10000202) \ + /* Vector Minimum Unsigned Doubleword */ \ + V(vminud, VMINUD, 0x100002C2) \ + /* Vector Minimum Unsigned Halfword */ \ + V(vminuh, VMINUH, 0x10000242) \ + /* Vector Minimum Unsigned Word */ \ + V(vminuw, VMINUW, 0x10000282) \ + /* Vector Merge High Byte */ \ + V(vmrghb, VMRGHB, 0x1000000C) \ + /* Vector Merge High Halfword */ \ + V(vmrghh, VMRGHH, 0x1000004C) \ + /* Vector Merge High Word */ \ + V(vmrghw, VMRGHW, 0x1000008C) \ + /* Vector Merge Low Byte */ \ + V(vmrglb, VMRGLB, 0x1000010C) \ + /* Vector Merge Low Halfword */ \ + V(vmrglh, VMRGLH, 0x1000014C) \ + /* Vector Merge Low Word */ \ + V(vmrglw, VMRGLW, 0x1000018C) \ + /* Vector Multiply Even Signed Byte */ \ + V(vmulesb, VMULESB, 0x10000308) \ + /* Vector Multiply Even Signed Halfword */ \ + V(vmulesh, VMULESH, 0x10000348) \ + /* Vector Multiply Even Signed Word */ \ + V(vmulesw, VMULESW, 0x10000388) \ + /* Vector Multiply Even Unsigned Byte */ \ + V(vmuleub, VMULEUB, 0x10000208) \ + /* Vector Multiply Even Unsigned Halfword */ \ + V(vmuleuh, VMULEUH, 0x10000248) \ + /* Vector Multiply Even Unsigned Word */ \ + V(vmuleuw, VMULEUW, 0x10000288) \ + /* Vector Multiply Odd Signed Byte */ \ + V(vmulosb, VMULOSB, 0x10000108) \ + /* Vector Multiply Odd Signed Halfword */ \ + V(vmulosh, VMULOSH, 0x10000148) \ + /* Vector Multiply Odd Signed Word */ \ + V(vmulosw, VMULOSW, 0x10000188) \ + /* Vector Multiply Odd Unsigned Byte */ \ + V(vmuloub, VMULOUB, 0x10000008) \ + /* Vector Multiply Odd Unsigned Halfword */ \ + V(vmulouh, VMULOUH, 0x10000048) \ + /* Vector Multiply Odd Unsigned Word */ \ + V(vmulouw, VMULOUW, 0x10000088) \ + /* Vector Multiply Unsigned Word Modulo */ \ + V(vmuluwm, VMULUWM, 0x10000089) \ + /* Vector NAND */ \ + V(vnand, VNAND, 0x10000584) \ + /* Vector Logical NOR */ \ + V(vnor, VNOR, 0x10000504) \ + /* Vector Logical OR */ \ + V(vor, VOR, 0x10000484) \ + /* Vector OR with Complement */ \ + V(vorc, VORC, 0x10000544) \ + /* Vector Pack Pixel */ \ + V(vpkpx, VPKPX, 0x1000030E) \ + /* Vector Pack Signed Doubleword Signed Saturate */ \ + V(vpksdss, VPKSDSS, 0x100005CE) \ + /* Vector Pack Signed Doubleword Unsigned Saturate */ \ + V(vpksdus, VPKSDUS, 0x1000054E) \ + /* Vector Pack Signed Halfword Signed Saturate */ \ + V(vpkshss, VPKSHSS, 0x1000018E) \ + /* Vector Pack Signed Halfword Unsigned Saturate */ \ + V(vpkshus, VPKSHUS, 0x1000010E) \ + /* Vector Pack Signed Word Signed Saturate */ \ + V(vpkswss, VPKSWSS, 0x100001CE) \ + /* Vector Pack Signed Word Unsigned Saturate */ \ + V(vpkswus, VPKSWUS, 0x1000014E) \ + /* Vector Pack Unsigned Doubleword Unsigned Modulo */ \ + V(vpkudum, VPKUDUM, 0x1000044E) \ + /* Vector Pack Unsigned Doubleword Unsigned Saturate */ \ + V(vpkudus, VPKUDUS, 0x100004CE) \ + /* Vector Pack Unsigned Halfword Unsigned Modulo */ \ + V(vpkuhum, VPKUHUM, 0x1000000E) \ + /* Vector Pack Unsigned Halfword Unsigned Saturate */ \ + V(vpkuhus, VPKUHUS, 0x1000008E) \ + /* Vector Pack Unsigned Word Unsigned Modulo */ \ + V(vpkuwum, VPKUWUM, 0x1000004E) \ + /* Vector Pack Unsigned Word Unsigned Saturate */ \ + V(vpkuwus, VPKUWUS, 0x100000CE) \ + /* Vector Polynomial Multiply-Sum Byte */ \ + V(vpmsumb, VPMSUMB, 0x10000408) \ + /* Vector Polynomial Multiply-Sum Doubleword */ \ + V(vpmsumd, VPMSUMD, 0x100004C8) \ + /* Vector Polynomial Multiply-Sum Halfword */ \ + V(vpmsumh, VPMSUMH, 0x10000448) \ + /* Vector Polynomial Multiply-Sum Word */ \ + V(vpmsumw, VPMSUMW, 0x10000488) \ + /* Vector Population Count Byte */ \ + V(vpopcntb, VPOPCNTB, 0x10000703) \ + /* Vector Population Count Doubleword */ \ + V(vpopcntd, VPOPCNTD, 0x100007C3) \ + /* Vector Population Count Halfword */ \ + V(vpopcnth, VPOPCNTH, 0x10000743) \ + /* Vector Population Count Word */ \ + V(vpopcntw, VPOPCNTW, 0x10000783) \ + /* Vector Reciprocal Estimate Single-Precision */ \ + V(vrefp, VREFP, 0x1000010A) \ + /* Vector Round to Single-Precision Integer toward -Infinity */ \ + V(vrfim, VRFIM, 0x100002CA) \ + /* Vector Round to Single-Precision Integer Nearest */ \ + V(vrfin, VRFIN, 0x1000020A) \ + /* Vector Round to Single-Precision Integer toward +Infinity */ \ + V(vrfip, VRFIP, 0x1000028A) \ + /* Vector Round to Single-Precision Integer toward Zero */ \ + V(vrfiz, VRFIZ, 0x1000024A) \ + /* Vector Rotate Left Byte */ \ + V(vrlb, VRLB, 0x10000004) \ + /* Vector Rotate Left Doubleword */ \ + V(vrld, VRLD, 0x100000C4) \ + /* Vector Rotate Left Halfword */ \ + V(vrlh, VRLH, 0x10000044) \ + /* Vector Rotate Left Word */ \ + V(vrlw, VRLW, 0x10000084) \ + /* Vector Reciprocal Square Root Estimate Single-Precision */ \ + V(vrsqrtefp, VRSQRTEFP, 0x1000014A) \ + /* Vector Shift Left */ \ + V(vsl, VSL, 0x100001C4) \ + /* Vector Shift Left Byte */ \ + V(vslb, VSLB, 0x10000104) \ + /* Vector Shift Left Doubleword */ \ + V(vsld, VSLD, 0x100005C4) \ + /* Vector Shift Left Halfword */ \ + V(vslh, VSLH, 0x10000144) \ + /* Vector Shift Left by Octet */ \ + V(vslo, VSLO, 0x1000040C) \ + /* Vector Shift Left Word */ \ + V(vslw, VSLW, 0x10000184) \ + /* Vector Splat Byte */ \ + V(vspltb, VSPLTB, 0x1000020C) \ + /* Vector Splat Halfword */ \ + V(vsplth, VSPLTH, 0x1000024C) \ + /* Vector Splat Immediate Signed Byte */ \ + V(vspltisb, VSPLTISB, 0x1000030C) \ + /* Vector Splat Immediate Signed Halfword */ \ + V(vspltish, VSPLTISH, 0x1000034C) \ + /* Vector Splat Immediate Signed Word */ \ + V(vspltisw, VSPLTISW, 0x1000038C) \ + /* Vector Splat Word */ \ + V(vspltw, VSPLTW, 0x1000028C) \ + /* Vector Shift Right */ \ + V(vsr, VSR, 0x100002C4) \ + /* Vector Shift Right Algebraic Byte */ \ + V(vsrab, VSRAB, 0x10000304) \ + /* Vector Shift Right Algebraic Doubleword */ \ + V(vsrad, VSRAD, 0x100003C4) \ + /* Vector Shift Right Algebraic Halfword */ \ + V(vsrah, VSRAH, 0x10000344) \ + /* Vector Shift Right Algebraic Word */ \ + V(vsraw, VSRAW, 0x10000384) \ + /* Vector Shift Right Byte */ \ + V(vsrb, VSRB, 0x10000204) \ + /* Vector Shift Right Doubleword */ \ + V(vsrd, VSRD, 0x100006C4) \ + /* Vector Shift Right Halfword */ \ + V(vsrh, VSRH, 0x10000244) \ + /* Vector Shift Right by Octet */ \ + V(vsro, VSRO, 0x1000044C) \ + /* Vector Shift Right Word */ \ + V(vsrw, VSRW, 0x10000284) \ + /* Vector Subtract & write Carry Unsigned Quadword */ \ + V(vsubcuq, VSUBCUQ, 0x10000540) \ + /* Vector Subtract and Write Carry-Out Unsigned Word */ \ + V(vsubcuw, VSUBCUW, 0x10000580) \ + /* Vector Subtract Single-Precision */ \ + V(vsubfp, VSUBFP, 0x1000004A) \ + /* Vector Subtract Signed Byte Saturate */ \ + V(vsubsbs, VSUBSBS, 0x10000700) \ + /* Vector Subtract Signed Halfword Saturate */ \ + V(vsubshs, VSUBSHS, 0x10000740) \ + /* Vector Subtract Signed Word Saturate */ \ + V(vsubsws, VSUBSWS, 0x10000780) \ + /* Vector Subtract Unsigned Byte Modulo */ \ + V(vsububm, VSUBUBM, 0x10000400) \ + /* Vector Subtract Unsigned Byte Saturate */ \ + V(vsububs, VSUBUBS, 0x10000600) \ + /* Vector Subtract Unsigned Doubleword Modulo */ \ + V(vsubudm, VSUBUDM, 0x100004C0) \ + /* Vector Subtract Unsigned Halfword Modulo */ \ + V(vsubuhm, VSUBUHM, 0x10000440) \ + /* Vector Subtract Unsigned Halfword Saturate */ \ + V(vsubuhs, VSUBUHS, 0x10000640) \ + /* Vector Subtract Unsigned Quadword Modulo */ \ + V(vsubuqm, VSUBUQM, 0x10000500) \ + /* Vector Subtract Unsigned Word Modulo */ \ + V(vsubuwm, VSUBUWM, 0x10000480) \ + /* Vector Subtract Unsigned Word Saturate */ \ + V(vsubuws, VSUBUWS, 0x10000680) \ + /* Vector Sum across Half Signed Word Saturate */ \ + V(vsum2sws, VSUM2SWS, 0x10000688) \ + /* Vector Sum across Quarter Signed Byte Saturate */ \ + V(vsum4sbs, VSUM4SBS, 0x10000708) \ + /* Vector Sum across Quarter Signed Halfword Saturate */ \ + V(vsum4shs, VSUM4SHS, 0x10000648) \ + /* Vector Sum across Quarter Unsigned Byte Saturate */ \ + V(vsum4bus, VSUM4BUS, 0x10000608) \ + /* Vector Sum across Signed Word Saturate */ \ + V(vsumsws, VSUMSWS, 0x10000788) \ + /* Vector Unpack High Pixel */ \ + V(vupkhpx, VUPKHPX, 0x1000034E) \ + /* Vector Unpack High Signed Byte */ \ + V(vupkhsb, VUPKHSB, 0x1000020E) \ + /* Vector Unpack High Signed Halfword */ \ + V(vupkhsh, VUPKHSH, 0x1000024E) \ + /* Vector Unpack High Signed Word */ \ + V(vupkhsw, VUPKHSW, 0x1000064E) \ + /* Vector Unpack Low Pixel */ \ + V(vupklpx, VUPKLPX, 0x100003CE) \ + /* Vector Unpack Low Signed Byte */ \ + V(vupklsb, VUPKLSB, 0x1000028E) \ + /* Vector Unpack Low Signed Halfword */ \ + V(vupklsh, VUPKLSH, 0x100002CE) \ + /* Vector Unpack Low Signed Word */ \ + V(vupklsw, VUPKLSW, 0x100006CE) \ + /* Vector Logical XOR */ \ + V(vxor, VXOR, 0x100004C4) \ + /* Vector AES Cipher */ \ + V(vcipher, VCIPHER, 0x10000508) \ + /* Vector AES Cipher Last */ \ + V(vcipherlast, VCIPHERLAST, 0x10000509) \ + /* Vector AES Inverse Cipher */ \ + V(vncipher, VNCIPHER, 0x10000548) \ + /* Vector AES Inverse Cipher Last */ \ + V(vncipherlast, VNCIPHERLAST, 0x10000549) \ + /* Vector AES S-Box */ \ + V(vsbox, VSBOX, 0x100005C8) \ + /* Vector SHA-512 Sigma Doubleword */ \ + V(vshasigmad, VSHASIGMAD, 0x100006C2) \ + /* Vector SHA-256 Sigma Word */ \ + V(vshasigmaw, VSHASIGMAW, 0x10000682) \ + /* Vector Merge Even Word */ \ + V(vmrgew, VMRGEW, 0x1000078C) \ + /* Vector Merge Odd Word */ \ + V(vmrgow, VMRGOW, 0x1000068C) + +#define PPC_XS_OPCODE_LIST(V) \ + /* Shift Right Algebraic Doubleword Immediate */ \ + V(sradi, SRADIX, 0x7C000674) + +#define PPC_MD_OPCODE_LIST(V) \ + /* Rotate Left Doubleword Immediate then Clear */ \ + V(rldic, RLDIC, 0x78000008) \ + /* Rotate Left Doubleword Immediate then Clear Left */ \ + V(rldicl, RLDICL, 0x78000000) \ + /* Rotate Left Doubleword Immediate then Clear Right */ \ + V(rldicr, RLDICR, 0x78000004) \ + /* Rotate Left Doubleword Immediate then Mask Insert */ \ + V(rldimi, RLDIMI, 0x7800000C) + +#define PPC_SC_OPCODE_LIST(V) \ + /* System Call */ \ + V(sc, SC, 0x44000002) + + +#define PPC_OPCODE_LIST(V) \ + PPC_X_OPCODE_LIST(V) \ + PPC_XO_OPCODE_LIST(V) \ + PPC_DS_OPCODE_LIST(V) \ + PPC_MDS_OPCODE_LIST(V) \ + PPC_MD_OPCODE_LIST(V) \ + PPC_XS_OPCODE_LIST(V) \ + PPC_D_OPCODE_LIST(V) \ + PPC_I_OPCODE_LIST(V) \ + PPC_B_OPCODE_LIST(V) \ + PPC_XL_OPCODE_LIST(V) \ + PPC_A_OPCODE_LIST(V) \ + PPC_XFX_OPCODE_LIST(V) \ + PPC_M_OPCODE_LIST(V) \ + PPC_SC_OPCODE_LIST(V) \ + PPC_Z23_OPCODE_LIST(V) \ + PPC_Z22_OPCODE_LIST(V) \ + PPC_EVX_OPCODE_LIST(V) \ + PPC_XFL_OPCODE_LIST(V) \ + PPC_EVS_OPCODE_LIST(V) \ + PPC_VX_OPCODE_LIST(V) \ + PPC_VA_OPCODE_LIST(V) \ + PPC_VC_OPCODE_LIST(V) \ + PPC_XX1_OPCODE_LIST(V) \ + PPC_XX2_OPCODE_LIST(V) \ + PPC_XX3_OPCODE_LIST(V) \ + PPC_XX4_OPCODE_LIST(V) + + +enum Opcode : uint32_t { +#define DECLARE_INSTRUCTION(name, opcode_name, opcode_value) \ opcode_name = opcode_value, - XX3_OPCODE_LIST(DECLARE_OPCODES) XX2_OPCODE_LIST(DECLARE_OPCODES) -#undef DECLARE_OPCODES + PPC_OPCODE_LIST(DECLARE_INSTRUCTION) +#undef DECLARE_INSTRUCTION + EXT1 = 0x4C000000, // Extended code set 1 + EXT2 = 0x7C000000, // Extended code set 2 + EXT3 = 0xEC000000, // Extended code set 3 + EXT4 = 0xFC000000, // Extended code set 4 + EXT5 = 0x78000000, // Extended code set 5 - 64bit only + EXT6 = 0xF0000000, // Extended code set 6 }; // Instruction encoding bits and masks. @@ -556,7 +2787,7 @@ class Instruction { } // Read a bit field out of the instruction bits. - inline int BitField(int hi, int lo) const { + inline uint32_t BitField(int hi, int lo) const { return InstructionBits() & (((2 << (hi - lo)) - 1) << lo); } @@ -572,7 +2803,7 @@ class Instruction { // Read a bit field out of the instruction bits. - static inline int BitField(Instr instr, int hi, int lo) { + static inline uint32_t BitField(Instr instr, int hi, int lo) { return instr & (((2 << (hi - lo)) - 1) << lo); } @@ -587,7 +2818,7 @@ class Instruction { inline int OpcodeValue() const { return static_cast<Opcode>(Bits(31, 26)); } inline Opcode OpcodeField() const { - return static_cast<Opcode>(BitField(24, 21)); + return static_cast<Opcode>(BitField(31, 26)); } // Fields used in Software interrupt instructions diff --git a/deps/v8/src/ppc/deoptimizer-ppc.cc b/deps/v8/src/ppc/deoptimizer-ppc.cc index 39102a1ef0..b96dc6fece 100644 --- a/deps/v8/src/ppc/deoptimizer-ppc.cc +++ b/deps/v8/src/ppc/deoptimizer-ppc.cc @@ -101,7 +101,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters( void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) { for (int i = 0; i < DoubleRegister::kNumRegisters; ++i) { - double double_value = input_->GetDoubleRegister(i); + Float64 double_value = input_->GetDoubleRegister(i); output_frame->SetDoubleRegister(i, double_value); } } diff --git a/deps/v8/src/ppc/disasm-ppc.cc b/deps/v8/src/ppc/disasm-ppc.cc index 5da45f27f0..6baf3d0c7f 100644 --- a/deps/v8/src/ppc/disasm-ppc.cc +++ b/deps/v8/src/ppc/disasm-ppc.cc @@ -371,13 +371,13 @@ void Decoder::UnknownFormat(Instruction* instr, const char* name) { void Decoder::DecodeExt1(Instruction* instr) { - switch (instr->Bits(10, 1) << 1) { + switch (EXT1 | (instr->BitField(10, 1))) { case MCRF: { UnknownFormat(instr, "mcrf"); // not used by V8 break; } case BCLRX: { - int bo = instr->Bits(25, 21) << 21; + int bo = instr->BitField(25, 21); int bi = instr->Bits(20, 16); CRBit cond = static_cast<CRBit>(bi & (CRWIDTH - 1)); switch (bo) { @@ -447,7 +447,7 @@ void Decoder::DecodeExt1(Instruction* instr) { break; } case BCCTRX: { - switch (instr->Bits(25, 21) << 21) { + switch (instr->BitField(25, 21)) { case DCBNZF: { UnknownFormat(instr, "bcctrx-dcbnzf"); break; @@ -541,7 +541,7 @@ void Decoder::DecodeExt1(Instruction* instr) { void Decoder::DecodeExt2(Instruction* instr) { // Some encodings are 10-1 bits, handle those first - switch (instr->Bits(10, 1) << 1) { + switch (EXT2 | (instr->BitField(10, 1))) { case SRWX: { Format(instr, "srw'. 'ra, 'rs, 'rb"); return; @@ -642,7 +642,7 @@ void Decoder::DecodeExt2(Instruction* instr) { #endif } - switch (instr->Bits(10, 2) << 2) { + switch (EXT2 | (instr->BitField(10, 2))) { case SRADIX: { Format(instr, "sradi'. 'ra,'rs,'sh"); return; @@ -650,7 +650,7 @@ void Decoder::DecodeExt2(Instruction* instr) { } // ?? are all of these xo_form? - switch (instr->Bits(9, 1) << 1) { + switch (EXT2 | (instr->BitField(9, 1))) { case CMP: { #if V8_TARGET_ARCH_PPC64 if (instr->Bit(21)) { @@ -899,7 +899,7 @@ void Decoder::DecodeExt2(Instruction* instr) { #endif } - switch (instr->Bits(5, 1) << 1) { + switch (EXT2 | (instr->BitField(5, 1))) { case ISEL: { Format(instr, "isel 'rt, 'ra, 'rb"); return; @@ -912,7 +912,7 @@ void Decoder::DecodeExt2(Instruction* instr) { void Decoder::DecodeExt3(Instruction* instr) { - switch (instr->Bits(10, 1) << 1) { + switch (EXT3 | (instr->BitField(10, 1))) { case FCFID: { Format(instr, "fcfids'. 'Dt, 'Db"); break; @@ -929,7 +929,7 @@ void Decoder::DecodeExt3(Instruction* instr) { void Decoder::DecodeExt4(Instruction* instr) { - switch (instr->Bits(5, 1) << 1) { + switch (EXT4 | (instr->BitField(5, 1))) { case FDIV: { Format(instr, "fdiv'. 'Dt, 'Da, 'Db"); return; @@ -964,7 +964,7 @@ void Decoder::DecodeExt4(Instruction* instr) { } } - switch (instr->Bits(10, 1) << 1) { + switch (EXT4 | (instr->BitField(10, 1))) { case FCMPU: { Format(instr, "fcmpu 'Da, 'Db"); break; @@ -1065,7 +1065,7 @@ void Decoder::DecodeExt4(Instruction* instr) { void Decoder::DecodeExt5(Instruction* instr) { - switch (instr->Bits(4, 2) << 2) { + switch (EXT5 | (instr->BitField(4, 2))) { case RLDICL: { Format(instr, "rldicl'. 'ra, 'rs, 'sh, 'mb"); return; @@ -1083,7 +1083,7 @@ void Decoder::DecodeExt5(Instruction* instr) { return; } } - switch (instr->Bits(4, 1) << 1) { + switch (EXT5 | (instr->BitField(4, 1))) { case RLDCL: { Format(instr, "rldcl'. 'ra, 'rs, 'sb, 'mb"); return; @@ -1093,22 +1093,22 @@ void Decoder::DecodeExt5(Instruction* instr) { } void Decoder::DecodeExt6(Instruction* instr) { - switch (instr->Bits(10, 3) << 3) { + switch (EXT6 | (instr->BitField(10, 3))) { #define DECODE_XX3_INSTRUCTIONS(name, opcode_name, opcode_value) \ case opcode_name: { \ Format(instr, #name" 'Dt, 'Da, 'Db"); \ return; \ } - XX3_OPCODE_LIST(DECODE_XX3_INSTRUCTIONS) + PPC_XX3_OPCODE_LIST(DECODE_XX3_INSTRUCTIONS) #undef DECODE_XX3_INSTRUCTIONS } - switch (instr->Bits(10, 2) << 2) { + switch (EXT6 | (instr->BitField(10, 2))) { #define DECODE_XX2_INSTRUCTIONS(name, opcode_name, opcode_value) \ case opcode_name: { \ Format(instr, #name" 'Dt, 'Db"); \ return; \ } - XX2_OPCODE_LIST(DECODE_XX2_INSTRUCTIONS) + PPC_XX2_OPCODE_LIST(DECODE_XX2_INSTRUCTIONS) } #undef DECODE_XX3_INSTRUCTIONS Unknown(instr); // not used by V8 @@ -1130,7 +1130,8 @@ int Decoder::InstructionDecode(byte* instr_ptr) { return Instruction::kInstrSize; } - switch (instr->OpcodeValue() << 26) { + uint32_t opcode = instr->OpcodeValue() << 26; + switch (opcode) { case TWI: { PrintSoftwareInterrupt(instr->SvcValue()); break; diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc index 4ff59bbaf1..ed03094dbd 100644 --- a/deps/v8/src/ppc/interface-descriptors-ppc.cc +++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc @@ -67,24 +67,6 @@ void FastNewClosureDescriptor::InitializePlatformSpecific( data->InitializePlatformSpecific(arraysize(registers), registers); } -void FastNewRestParameterDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - Register registers[] = {r4}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - Register registers[] = {r4}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - Register registers[] = {r4}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - // static const Register TypeConversionDescriptor::ArgumentRegister() { return r3; } @@ -137,15 +119,13 @@ void CallFunctionDescriptor::InitializePlatformSpecific( data->InitializePlatformSpecific(arraysize(registers), registers); } - -void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific( +void CallICTrampolineDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { - Register registers[] = {r4, r6}; + Register registers[] = {r4, r3, r6}; data->InitializePlatformSpecific(arraysize(registers), registers); } - -void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific( +void CallICDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { Register registers[] = {r4, r3, r6, r5}; data->InitializePlatformSpecific(arraysize(registers), registers); @@ -174,6 +154,13 @@ void CallTrampolineDescriptor::InitializePlatformSpecific( data->InitializePlatformSpecific(arraysize(registers), registers); } +void CallForwardVarargsDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + // r5 : start index (to support rest parameters) + // r4 : the target to call + Register registers[] = {r4, r5}; + data->InitializePlatformSpecific(arraysize(registers), registers); +} void ConstructStubDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { @@ -208,13 +195,12 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific( data->InitializePlatformSpecific(0, nullptr, nullptr); } -#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \ - void Allocate##Type##Descriptor::InitializePlatformSpecific( \ - CallInterfaceDescriptorData* data) { \ - data->InitializePlatformSpecific(0, nullptr, nullptr); \ - } -SIMD128_TYPES(SIMD128_ALLOC_DESC) -#undef SIMD128_ALLOC_DESC +void ArrayConstructorDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + // kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite + Register registers[] = {r4, r6, r3, r5}; + data->InitializePlatformSpecific(arraysize(registers), registers, NULL); +} void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { @@ -405,6 +391,15 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific( }; data->InitializePlatformSpecific(arraysize(registers), registers); } + +void FrameDropperTrampolineDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + Register registers[] = { + r4, // loaded new FP + }; + data->InitializePlatformSpecific(arraysize(registers), registers); +} + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc index a3e7641eb7..f2aa2e06f4 100644 --- a/deps/v8/src/ppc/macro-assembler-ppc.cc +++ b/deps/v8/src/ppc/macro-assembler-ppc.cc @@ -969,7 +969,7 @@ void MacroAssembler::StubPrologue(StackFrame::Type type, Register base, int prologue_offset) { { ConstantPoolUnavailableScope constant_pool_unavailable(this); - LoadSmiLiteral(r11, Smi::FromInt(type)); + mov(r11, Operand(StackFrame::TypeToMarker(type))); PushCommonFrame(r11); } if (FLAG_enable_embedded_constant_pool) { @@ -1022,8 +1022,8 @@ void MacroAssembler::Prologue(bool code_pre_aging, Register base, void MacroAssembler::EmitLoadFeedbackVector(Register vector) { LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); - LoadP(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset)); - LoadP(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset)); + LoadP(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset)); + LoadP(vector, FieldMemOperand(vector, Cell::kValueOffset)); } @@ -1034,10 +1034,10 @@ void MacroAssembler::EnterFrame(StackFrame::Type type, // This path cannot rely on ip containing code entry. PushCommonFrame(); LoadConstantPoolPointerRegister(); - LoadSmiLiteral(ip, Smi::FromInt(type)); + mov(ip, Operand(StackFrame::TypeToMarker(type))); push(ip); } else { - LoadSmiLiteral(ip, Smi::FromInt(type)); + mov(ip, Operand(StackFrame::TypeToMarker(type))); PushCommonFrame(ip); } if (type == StackFrame::INTERNAL) { @@ -1143,7 +1143,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, // all of the pushes that have happened inside of V8 // since we were called from C code - LoadSmiLiteral(ip, Smi::FromInt(frame_type)); + mov(ip, Operand(StackFrame::TypeToMarker(frame_type))); PushCommonFrame(ip); // Reserve room for saved entry sp and code object. subi(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp)); @@ -1192,19 +1192,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset)); } - -void MacroAssembler::InitializeNewString(Register string, Register length, - Heap::RootListIndex map_index, - Register scratch1, Register scratch2) { - SmiTag(scratch1, length); - LoadRoot(scratch2, map_index); - StoreP(scratch1, FieldMemOperand(string, String::kLengthOffset), r0); - li(scratch1, Operand(String::kEmptyHashField)); - StoreP(scratch2, FieldMemOperand(string, HeapObject::kMapOffset), r0); - StoreP(scratch1, FieldMemOperand(string, String::kHashFieldSlot), r0); -} - - int MacroAssembler::ActivationFrameAlignment() { #if !defined(USE_SIMULATOR) // Running on the real platform. Use the alignment as mandated by the local @@ -1566,16 +1553,17 @@ void MacroAssembler::IsObjectNameType(Register object, Register scratch, } -void MacroAssembler::DebugBreak() { - li(r3, Operand::Zero()); - mov(r4, - Operand(ExternalReference(Runtime::kHandleDebuggerStatement, isolate()))); - CEntryStub ces(isolate(), 1); - DCHECK(AllowThisStubCall(&ces)); - Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT); +void MacroAssembler::MaybeDropFrames() { + // Check whether we need to drop frames to restart a function on the stack. + ExternalReference restart_fp = + ExternalReference::debug_restart_fp_address(isolate()); + mov(r4, Operand(restart_fp)); + LoadWordArith(r4, MemOperand(r4)); + cmpi(r4, Operand::Zero()); + Jump(isolate()->builtins()->FrameDropperTrampoline(), RelocInfo::CODE_TARGET, + ne); } - void MacroAssembler::PushStackHandler() { // Adjust this code if not the case. STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize); @@ -2150,33 +2138,6 @@ void MacroAssembler::GetMapConstructor(Register result, Register map, bind(&done); } - -void MacroAssembler::TryGetFunctionPrototype(Register function, Register result, - Register scratch, Label* miss) { - // Get the prototype or initial map from the function. - LoadP(result, - FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); - - // If the prototype or initial map is the hole, don't return it and - // simply miss the cache instead. This will allow us to allocate a - // prototype object on-demand in the runtime system. - LoadRoot(r0, Heap::kTheHoleValueRootIndex); - cmp(result, r0); - beq(miss); - - // If the function does not have an initial map, we're done. - Label done; - CompareObjectType(result, scratch, scratch, MAP_TYPE); - bne(&done); - - // Get the prototype from the initial map. - LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset)); - - // All done. - bind(&done); -} - - void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id, Condition cond) { DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h index b43dab711a..a1d2932f43 100644 --- a/deps/v8/src/ppc/macro-assembler-ppc.h +++ b/deps/v8/src/ppc/macro-assembler-ppc.h @@ -635,12 +635,10 @@ class MacroAssembler : public Assembler { void IsObjectNameType(Register object, Register scratch, Label* fail); - // --------------------------------------------------------------------------- - // Debugger Support - void DebugBreak(); + // Frame restart support + void MaybeDropFrames(); - // --------------------------------------------------------------------------- // Exception handling // Push a new stack handler and link into stack handler chain. @@ -751,14 +749,6 @@ class MacroAssembler : public Assembler { void GetMapConstructor(Register result, Register map, Register temp, Register temp2); - // Try to get function prototype of a function and puts the value in - // the result register. Checks that the function really is a - // function and jumps to the miss label if the fast checks fail. The - // function register will be untouched; the other registers may be - // clobbered. - void TryGetFunctionPrototype(Register function, Register result, - Register scratch, Label* miss); - // Compare object type for heap object. heap_object contains a non-Smi // whose object type should be compared with the given type. This both // sets the flags and leaves the object type in the type_reg register. @@ -1503,10 +1493,6 @@ class MacroAssembler : public Assembler { bool* definitely_mismatches, InvokeFlag flag, const CallWrapper& call_wrapper); - void InitializeNewString(Register string, Register length, - Heap::RootListIndex map_index, Register scratch1, - Register scratch2); - // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace. void InNewSpace(Register object, Register scratch, Condition cond, // eq for new space, ne otherwise. diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc index e3761579b0..058632847d 100644 --- a/deps/v8/src/ppc/simulator-ppc.cc +++ b/deps/v8/src/ppc/simulator-ppc.cc @@ -1623,7 +1623,8 @@ void Simulator::ExecuteBranchConditional(Instruction* instr, BCType type) { // Handle execution based on instruction types. void Simulator::ExecuteExt1(Instruction* instr) { - switch (instr->Bits(10, 1) << 1) { + uint32_t opcode = EXT1 | instr->BitField(10, 1); + switch (opcode) { case MCRF: UNIMPLEMENTED(); // Not used by V8. case BCLRX: @@ -1678,7 +1679,7 @@ void Simulator::ExecuteExt1(Instruction* instr) { bool Simulator::ExecuteExt2_10bit(Instruction* instr) { bool found = true; - int opcode = instr->Bits(10, 1) << 1; + uint32_t opcode = EXT2 | instr->BitField(10, 1); switch (opcode) { case SRWX: { int rs = instr->RSValue(); @@ -1949,7 +1950,7 @@ bool Simulator::ExecuteExt2_10bit(Instruction* instr) { if (found) return found; found = true; - opcode = instr->Bits(10, 2) << 2; + opcode = EXT2 | instr->BitField(10, 2); switch (opcode) { case SRADIX: { int ra = instr->RAValue(); @@ -1976,7 +1977,7 @@ bool Simulator::ExecuteExt2_10bit(Instruction* instr) { bool Simulator::ExecuteExt2_9bit_part1(Instruction* instr) { bool found = true; - int opcode = instr->Bits(9, 1) << 1; + uint32_t opcode = EXT2 | instr->BitField(9, 1); switch (opcode) { case TW: { // used for call redirection in simulation mode @@ -2234,7 +2235,7 @@ bool Simulator::ExecuteExt2_9bit_part1(Instruction* instr) { bool Simulator::ExecuteExt2_9bit_part2(Instruction* instr) { bool found = true; - int opcode = instr->Bits(9, 1) << 1; + uint32_t opcode = EXT2 | instr->BitField(9, 1); switch (opcode) { case CNTLZWX: { int rs = instr->RSValue(); @@ -2752,7 +2753,7 @@ bool Simulator::ExecuteExt2_9bit_part2(Instruction* instr) { void Simulator::ExecuteExt2_5bit(Instruction* instr) { - int opcode = instr->Bits(5, 1) << 1; + uint32_t opcode = EXT2 | instr->BitField(5, 1); switch (opcode) { case ISEL: { int rt = instr->RTValue(); @@ -2785,9 +2786,9 @@ void Simulator::ExecuteExt2(Instruction* instr) { void Simulator::ExecuteExt3(Instruction* instr) { - int opcode = instr->Bits(10, 1) << 1; + uint32_t opcode = EXT3 | instr->BitField(10, 1); switch (opcode) { - case FCFID: { + case FCFIDS: { // fcfids int frt = instr->RTValue(); int frb = instr->RBValue(); @@ -2796,7 +2797,7 @@ void Simulator::ExecuteExt3(Instruction* instr) { set_d_register_from_double(frt, frt_val); return; } - case FCFIDU: { + case FCFIDUS: { // fcfidus int frt = instr->RTValue(); int frb = instr->RBValue(); @@ -2811,7 +2812,8 @@ void Simulator::ExecuteExt3(Instruction* instr) { void Simulator::ExecuteExt4(Instruction* instr) { - switch (instr->Bits(5, 1) << 1) { + uint32_t opcode = EXT4 | instr->BitField(5, 1); + switch (opcode) { case FDIV: { int frt = instr->RTValue(); int fra = instr->RAValue(); @@ -2898,7 +2900,7 @@ void Simulator::ExecuteExt4(Instruction* instr) { return; } } - int opcode = instr->Bits(10, 1) << 1; + opcode = EXT4 | instr->BitField(10, 1); switch (opcode) { case FCMPU: { int fra = instr->RAValue(); @@ -3236,7 +3238,8 @@ void Simulator::ExecuteExt4(Instruction* instr) { #if V8_TARGET_ARCH_PPC64 void Simulator::ExecuteExt5(Instruction* instr) { - switch (instr->Bits(4, 2) << 2) { + uint32_t opcode = EXT5 | instr->BitField(4, 2); + switch (opcode) { case RLDICL: { int ra = instr->RAValue(); int rs = instr->RSValue(); @@ -3324,7 +3327,8 @@ void Simulator::ExecuteExt5(Instruction* instr) { return; } } - switch (instr->Bits(4, 1) << 1) { + opcode = EXT5 | instr->BitField(4, 1); + switch (opcode) { case RLDCL: { int ra = instr->RAValue(); int rs = instr->RSValue(); @@ -3350,7 +3354,8 @@ void Simulator::ExecuteExt5(Instruction* instr) { #endif void Simulator::ExecuteExt6(Instruction* instr) { - switch (instr->Bits(10, 3) << 3) { + uint32_t opcode = EXT6 | instr->BitField(10, 3); + switch (opcode) { case XSADDDP: { int frt = instr->RTValue(); int fra = instr->RAValue(); @@ -3396,7 +3401,7 @@ void Simulator::ExecuteExt6(Instruction* instr) { } void Simulator::ExecuteGeneric(Instruction* instr) { - int opcode = instr->OpcodeValue() << 26; + uint32_t opcode = instr->OpcodeField(); switch (opcode) { case SUBFIC: { int rt = instr->RTValue(); @@ -3963,7 +3968,7 @@ void Simulator::ExecuteInstruction(Instruction* instr) { if (::v8::internal::FLAG_trace_sim) { Trace(instr); } - int opcode = instr->OpcodeValue() << 26; + uint32_t opcode = instr->OpcodeField(); if (opcode == TWI) { SoftwareInterrupt(instr); } else { |