/* aarch64-asm.c -- AArch64 assembler support. Copyright (C) 2012-2023 Free Software Foundation, Inc. Contributed by ARM Ltd. This file is part of the GNU opcodes library. This library is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. It is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING3. If not, see . */ #include "sysdep.h" #include #include "libiberty.h" #include "aarch64-asm.h" #include "opintl.h" /* Utilities. */ /* The unnamed arguments consist of the number of fields and information about these fields where the VALUE will be inserted into CODE. MASK can be zero or the base mask of the opcode. N.B. the fields are required to be in such an order than the least signficant field for VALUE comes the first, e.g. the in SQDMLAL , , .[] is encoded in H:L:M in some cases, the fields H:L:M should be passed in the order of M, L, H. */ static inline void insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...) { uint32_t num; const aarch64_field *field; enum aarch64_field_kind kind; va_list va; va_start (va, mask); num = va_arg (va, uint32_t); assert (num <= 5); while (num--) { kind = va_arg (va, enum aarch64_field_kind); field = &fields[kind]; insert_field (kind, code, value, mask); value >>= field->width; } va_end (va); } /* Insert a raw field value VALUE into all fields in SELF->fields after START. The least significant bit goes in the final field. */ static void insert_all_fields_after (const aarch64_operand *self, unsigned int start, aarch64_insn *code, aarch64_insn value) { unsigned int i; enum aarch64_field_kind kind; for (i = ARRAY_SIZE (self->fields); i-- > start; ) if (self->fields[i] != FLD_NIL) { kind = self->fields[i]; insert_field (kind, code, value, 0); value >>= fields[kind].width; } } /* Insert a raw field value VALUE into all fields in SELF->fields. The least significant bit goes in the final field. */ static void insert_all_fields (const aarch64_operand *self, aarch64_insn *code, aarch64_insn value) { return insert_all_fields_after (self, 0, code, value); } /* Operand inserters. */ /* Insert nothing. */ bool aarch64_ins_none (const aarch64_operand *self ATTRIBUTE_UNUSED, const aarch64_opnd_info *info ATTRIBUTE_UNUSED, aarch64_insn *code ATTRIBUTE_UNUSED, const aarch64_inst *inst ATTRIBUTE_UNUSED, aarch64_operand_error *errors ATTRIBUTE_UNUSED) { return true; } /* Insert register number. */ bool aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED, aarch64_operand_error *errors ATTRIBUTE_UNUSED) { int val = info->reg.regno - get_operand_specific_data (self); insert_field (self->fields[0], code, val, 0); return true; } /* Insert register number, index and/or other data for SIMD register element operand, e.g. the last source operand in SQDMLAL , , .[]. */ bool aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst, aarch64_operand_error *errors ATTRIBUTE_UNUSED) { /* regno */ insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask); /* index and/or type */ if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins) { int pos = info->qualifier - AARCH64_OPND_QLF_S_B; if (info->type == AARCH64_OPND_En && inst->opcode->operands[0] == AARCH64_OPND_Ed) { /* index2 for e.g. INS .[], .[]. */ assert (info->idx == 1); /* Vn */ aarch64_insn value = info->reglane.index << pos; insert_field (FLD_imm4_11, code, value, 0); } else { /* index and type for e.g. DUP , .[]. imm5<3:0> 0000 RESERVED xxx1 B xx10 H x100 S 1000 D */ aarch64_insn value = ((info->reglane.index << 1) | 1) << pos; insert_field (FLD_imm5, code, value, 0); } } else if (inst->opcode->iclass == dotproduct) { unsigned reglane_index = info->reglane.index; switch (info->qualifier) { case AARCH64_OPND_QLF_S_4B: case AARCH64_OPND_QLF_S_2H: /* L:H */ assert (reglane_index < 4); insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H); break; default: return false; } } else if (inst->opcode->iclass == cryptosm3) { /* index for e.g. SM3TT2A .4S, .4S, S[]. */ unsigned reglane_index = info->reglane.index; assert (reglane_index < 4); insert_field (FLD_SM3_imm2, code, reglane_index, 0); } else { /* index for e.g. SQDMLAL , , .[] or SQDMLAL , , .[]. */ unsigned reglane_index = info->reglane.index; if (inst->opcode->op == OP_FCMLA_ELEM) /* Complex operand takes two elements. */ reglane_index *= 2; switch (info->qualifier) { case AARCH64_OPND_QLF_S_H: /* H:L:M */ assert (reglane_index < 8); insert_fields (code, reglane_index, 0, 3, FLD_M, FLD_L, FLD_H); break; case AARCH64_OPND_QLF_S_S: /* H:L */ assert (reglane_index < 4); insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H); break; case AARCH64_OPND_QLF_S_D: /* H */ assert (reglane_index < 2); insert_field (FLD_H, code, reglane_index, 0); break; default: return false; } } return true; } /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */ bool aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED, aarch64_operand_error *errors ATTRIBUTE_UNUSED) { /* R */ insert_field (self->fields[0], code, info->reglist.first_regno, 0); /* len */ insert_field (FLD_len, code, info->reglist.num_regs - 1, 0); return true; } /* Insert Rt and opcode fields for a register list operand, e.g. Vt in AdvSIMD load/store instructions. */ bool aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst, aarch64_operand_error *errors ATTRIBUTE_UNUSED) { aarch64_insn value = 0; /* Number of elements in each structure to be loaded/stored. */ unsigned num = get_opcode_dependent_value (inst->opcode); /* Rt */ insert_field (FLD_Rt, code, info->reglist.first_regno, 0); /* opcode */ switch (num) { case 1: switch (info->reglist.num_regs) { case 1: value = 0x7; break; case 2: value = 0xa; break; case 3: value = 0x6; break; case 4: value = 0x2; break; default: return false; } break; case 2: value = info->reglist.num_regs == 4 ? 0x3 : 0x8; break; case 3: value = 0x4; break; case 4: value = 0x0; break; default: return false; } insert_field (FLD_opcode, code, value, 0); return true; } /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load single structure to all lanes instructions. */ bool aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst, aarch64_operand_error *errors ATTRIBUTE_UNUSED) { aarch64_insn value; /* The opcode dependent area stores the number of elements in each structure to be loaded/stored. */ int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1; /* Rt */ insert_field (FLD_Rt, code, info->reglist.first_regno, 0); /* S */ value = (aarch64_insn) 0; if (is_ld1r && info->reglist.num_regs == 2) /* OP_LD1R does not have alternating variant, but have "two consecutive" instead. */ value = (aarch64_insn) 1; insert_field (FLD_S, code, value, 0); return true; } /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list operand e.g. Vt in AdvSIMD load/store single element instructions. */ bool aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED, aarch64_operand_error *errors ATTRIBUTE_UNUSED) { aarch64_field field = {0, 0}; aarch64_insn QSsize = 0; /* fields Q:S:size. */ aarch64_insn opcodeh2 = 0; /* opcode<2:1> */ assert (info->reglist.has_index); /* Rt */ insert_field (FLD_Rt, code, info->reglist.first_regno, 0); /* Encode the index, opcode<2:1> and size. */ switch (info->qualifier) { case AARCH64_OPND_QLF_S_B: /* Index encoded in "Q:S:size". */ QSsize = info->reglist.index; opcodeh2 = 0x0; break; case AARCH64_OPND_QLF_S_H: /* Index encoded in "Q:S:size<1>". */ QSsize = info->reglist.index << 1; opcodeh2 = 0x1; break; case AARCH64_OPND_QLF_S_S: /* Index encoded in "Q:S". */ QSsize = info->reglist.index << 2; opcodeh2 = 0x2; break; case AARCH64_OPND_QLF_S_D: /* Index encoded in "Q". */ QSsize = info->reglist.index << 3 | 0x1; opcodeh2 = 0x2; break; default: return false; } insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q); gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field); insert_field_2 (&field, code, opcodeh2, 0); return true; } /* Insert fields immh:immb and/or Q for e.g. the shift immediate in SSHR ., ., # or SSHR , , #. */ bool aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst, aarch64_operand_error *errors ATTRIBUTE_UNUSED) { unsigned val = aarch64_get_qualifier_standard_value (info->qualifier); aarch64_insn Q, imm; if (inst->opcode->iclass == asimdshf) { /* Q immh Q 0000 x SEE AdvSIMD modified immediate 0001 0 8B 0001 1 16B 001x 0 4H 001x 1 8H 01xx 0 2S 01xx 1 4S 1xxx 0 RESERVED 1xxx 1 2D */ Q = (val & 0x1) ? 1 : 0; insert_field (FLD_Q, code, Q, inst->opcode->mask); val >>= 1; } assert (info->type == AARCH64_OPND_IMM_VLSR || info->type == AARCH64_OPND_IMM_VLSL); if (info->type == AARCH64_OPND_IMM_VLSR) /* immh:immb immh 0000 SEE AdvSIMD modified immediate 0001 (16-UInt(immh:immb)) 001x (32-UInt(immh:immb)) 01xx (64-UInt(immh:immb)) 1xxx (128-UInt(immh:immb)) */ imm = (16 << (unsigned)val) - info->imm.value; else /* immh:immb immh 0000 SEE AdvSIMD modified immediate 0001 (UInt(immh:immb)-8) 001x (UInt(immh:immb)-16) 01xx (UInt(immh:immb)-32) 1xxx (UInt(immh:immb)-64) */ imm = info->imm.value + (8 << (unsigned)val); insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh); return true; } /* Insert fields for e.g. the immediate operands in BFM , , #, #. */ bool aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED, aarch64_operand_error *errors ATTRIBUTE_UNUSED) { int64_t imm; imm = info->imm.value; if (operand_need_shift_by_two (self)) imm >>= 2; if (operand_need_shift_by_three (self)) imm >>= 3; if (operand_need_shift_by_four (self)) imm >>= 4; insert_all_fields (self, code, imm); return true; } /* Insert immediate and its shift amount for e.g. the last operand in MOVZ , #{, LSL #}. */ bool aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst, aarch64_operand_error *errors) { /* imm16 */ aarch64_ins_imm (self, info, code, inst, errors); /* hw */ insert_field (FLD_hw, code, info->shifter.amount >> 4, 0); return true; } /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in MOVI ., # {, LSL #}. */ bool aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED, aarch64_operand_error *errors ATTRIBUTE_UNUSED) { enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier; uint64_t imm = info->imm.value; enum aarch64_modifier_kind kind = info->shifter.kind; int amount = info->shifter.amount; aarch64_field field = {0, 0}; /* a:b:c:d:e:f:g:h */ if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8) { /* Either MOVI
, # or MOVI .2D, #. is a 64-bit immediate "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh", encoded in "a:b:c:d:e:f:g:h". */ imm = aarch64_shrink_expanded_imm8 (imm); assert ((int)imm >= 0); } insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc); if (kind == AARCH64_MOD_NONE) return true; /* shift amount partially in cmode */ assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL); if (kind == AARCH64_MOD_LSL) { /* AARCH64_MOD_LSL: shift zeros. */ int esize = aarch64_get_qualifier_esize (opnd0_qualifier); assert (esize == 4 || esize == 2 || esize == 1); /* For 8-bit move immediate, the optional LSL #0 does not require encoding. */ if (esize == 1) return true; amount >>= 3; if (esize == 4) gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */ else gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */ } else { /* AARCH64_MOD_MSL: shift ones. */ amount >>= 4; gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */ } insert_field_2 (&field, code, amount, 0); return true; } /* Insert fields for an 8-bit floating-point immediate. */ bool aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED, aarch64_operand_error *errors ATTRIBUTE_UNUSED) { insert_all_fields (self, code, info->imm.value); return true; } /* Insert 1-bit rotation immediate (#90 or #270). */ bool aarch64_ins_imm_rotate1 (const aarch64_operand *self, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst, aarch64_operand_error *errors ATTRIBUTE_UNUSED) { uint64_t rot = (info->imm.value - 90) / 180; assert (rot < 2U); insert_field (self->fields[0], code, rot, inst->opcode->mask); return true; } /* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */ bool aarch64_ins_imm_rotate2 (const aarch64_operand *self, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst, aarch64_operand_error *errors ATTRIBUTE_UNUSED) { uint64_t rot = info->imm.value / 90; assert (rot < 4U); insert_field (self->fields[0], code, rot, inst->opcode->mask); return true; } /* Insert # for the immediate operand in fp fix-point instructions, e.g. SCVTF
, , #. */ bool aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED, aarch64_operand_error *errors ATTRIBUTE_UNUSED) { insert_field (self->fields[0], code, 64 - info->imm.value, 0); return true; } /* Insert arithmetic immediate for e.g. the last operand in SUBS , , # {, }. */ bool aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED, aarch64_operand_error *errors ATTRIBUTE_UNUSED) { /* shift */ aarch64_insn value = info->shifter.amount ? 1 : 0; insert_field (self->fields[0], code, value, 0); /* imm12 (unsigned) */ insert_field (self->fields[1], code, info->imm.value, 0); return true; } /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether the operand should be inverted before encoding. */ static bool aarch64_ins_limm_1 (const aarch64_operand *self, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst, bool invert_p, aarch64_operand_error *errors ATTRIBUTE_UNUSED) { bool res; aarch64_insn value; uint64_t imm = info->imm.value; int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier); if (invert_p) imm = ~imm; /* The constraint check should guarantee that this will work. */ res = aarch64_logical_immediate_p (imm, esize, &value); if (res) insert_fields (code, value, 0, 3, self->fields[2], self->fields[1], self->fields[0]); return res; } /* Insert logical/bitmask immediate for e.g. the last operand in ORR , , #. */ bool aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst, aarch64_operand_error *errors ATTRIBUTE_UNUSED) { return aarch64_ins_limm_1 (self, info, code, inst, inst->opcode->op == OP_BIC, errors); } /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */ bool aarch64_ins_inv_limm (const aarch64_operand *self, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst, aarch64_operand_error *errors ATTRIBUTE_UNUSED) { return aarch64_ins_limm_1 (self, info, code, inst, true, errors); } /* Encode Ft for e.g. STR , [, {, {}}] or LDP , , [], #. */ bool aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst, aarch64_operand_error *errors) { aarch64_insn value = 0; assert (info->idx == 0); /* Rt */ aarch64_ins_regno (self, info, code, inst, errors); if (inst->opcode->iclass == ldstpair_indexed || inst->opcode->iclass == ldstnapair_offs || inst->opcode->iclass == ldstpair_off || inst->opcode->iclass == loadlit) { /* size */ switch (info->qualifier) { case AARCH64_OPND_QLF_S_S: value = 0; break; case AARCH64_OPND_QLF_S_D: value = 1; break; case AARCH64_OPND_QLF_S_Q: value = 2; break; default: return false; } insert_field (FLD_ldst_size, code, value, 0); } else { /* opc[1]:size */ value = aarch64_get_qualifier_standard_value (info->qualifier); insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1); } return true; } /* Encode the address operand for e.g. STXRB , , [{,#0}]. */ bool aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED, aarch64_operand_error *errors ATTRIBUTE_UNUSED) { /* Rn */ insert_field (FLD_Rn, code, info->addr.base_regno, 0); return true; } /* Encode the address operand for e.g. STR , [, {, {}}]. */ bool aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED, aarch64_operand_error *errors ATTRIBUTE_UNUSED) { aarch64_insn S; enum aarch64_modifier_kind kind = info->shifter.kind; /* Rn */ insert_field (FLD_Rn, code, info->addr.base_regno, 0); /* Rm */ insert_field (FLD_Rm, code, info->addr.offset.regno, 0); /* option */ if (kind == AARCH64_MOD_LSL) kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */ insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0); /* S */ if (info->qualifier != AARCH64_OPND_QLF_S_B) S = info->shifter.amount != 0; else /* For STR , [, {, {}}, S 0 [absent] 1 #0 Must be #0 if is explicitly LSL. */ S = info->shifter.operator_present && info->shifter.amount_present; insert_field (FLD_S, code, S, 0); return true; } /* Encode the address operand for e.g. stlur , [{, }]. */ bool aarch64_ins_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED, aarch64_operand_error *errors ATTRIBUTE_UNUSED) { /* Rn */ insert_field (self->fields[0], code, info->addr.base_regno, 0); /* simm9 */ int imm = info->addr.offset.imm; insert_field (self->fields[1], code, imm, 0); /* writeback */ if (info->addr.writeback) { assert (info->addr.preind == 1 && info->addr.postind == 0); insert_field (self->fields[2], code, 1, 0); } return true; } /* Encode the address operand for e.g. LDRSW , [, #]!. */ bool aarch64_ins_addr_simm (const aarch64_operand *self, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED, aarch64_operand_error *errors ATTRIBUTE_UNUSED) { int imm; /* Rn */ insert_field (FLD_Rn, code, info->addr.base_regno, 0); /* simm (imm9 or imm7) */ imm = info->addr.offset.imm; if (self->fields[0] == FLD_imm7 || info->qualifier == AARCH64_OPND_QLF_imm_tag) /* scaled immediate in ld/st pair instructions.. */ imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier)); insert_field (self->fields[0], code, imm, 0); /* pre/post- index */ if (info->addr.writeback) { assert (inst->opcode->iclass != ldst_unscaled && inst->opcode->iclass != ldstnapair_offs && inst->opcode->iclass != ldstpair_off && inst->opcode->iclass != ldst_unpriv); assert (info->addr.preind != info->addr.postind); if (info->addr.preind) insert_field (self->fields[1], code, 1, 0); } return true; } /* Encode the address operand for e.g. LDRAA , [{, #}]. */ bool aarch64_ins_addr_simm10 (const aarch64_operand *self, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED, aarch64_operand_error *errors ATTRIBUTE_UNUSED) { int imm; /* Rn */ insert_field (self->fields[0], code, info->addr.base_regno, 0); /* simm10 */ imm = info->addr.offset.imm >> 3; insert_field (self->fields[1], code, imm >> 9, 0); insert_field (self->fields[2], code, imm, 0); /* writeback */ if (info->addr.writeback) { assert (info->addr.preind == 1 && info->addr.postind == 0); insert_field (self->fields[3], code, 1, 0); } return true; } /* Encode the address operand for e.g. LDRSW , [{, #}]. */ bool aarch64_ins_addr_uimm12 (const aarch64_operand *self, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED, aarch64_operand_error *errors ATTRIBUTE_UNUSED) { int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier)); /* Rn */ insert_field (self->fields[0], code, info->addr.base_regno, 0); /* uimm12 */ insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0); return true; } /* Encode the address operand for e.g. LD1 {., ., .}, [], >. */ bool aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED, aarch64_operand_error *errors ATTRIBUTE_UNUSED) { /* Rn */ insert_field (FLD_Rn, code, info->addr.base_regno, 0); /* Rm | # */ if (info->addr.offset.is_reg) insert_field (FLD_Rm, code, info->addr.offset.regno, 0); else insert_field (FLD_Rm, code, 0x1f, 0); return true; } /* Encode the condition operand for e.g. CSEL , , , . */ bool aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED, aarch64_operand_error *errors ATTRIBUTE_UNUSED) { /* cond */ insert_field (FLD_cond, code, info->cond->value, 0); return true; } /* Encode the system register operand for e.g. MRS , . */ bool aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst, aarch64_operand_error *detail ATTRIBUTE_UNUSED) { /* If a system instruction check if we have any restrictions on which registers it can use. */ if (inst->opcode->iclass == ic_system) { uint64_t opcode_flags = inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE); uint32_t sysreg_flags = info->sysreg.flags & (F_REG_READ | F_REG_WRITE); /* Check to see if it's read-only, else check if it's write only. if it's both or unspecified don't care. */ if (opcode_flags == F_SYS_READ && sysreg_flags && sysreg_flags != F_REG_READ) { detail->kind = AARCH64_OPDE_SYNTAX_ERROR; detail->error = _("specified register cannot be read from"); detail->index = info->idx; detail->non_fatal = true; } else if (opcode_flags == F_SYS_WRITE && sysreg_flags && sysreg_flags != F_REG_WRITE) { detail->kind = AARCH64_OPDE_SYNTAX_ERROR; detail->error = _("specified register cannot be written to"); detail->index = info->idx; detail->non_fatal = true; } } /* op0:op1:CRn:CRm:op2 */ insert_fields (code, info->sysreg.value, inst->opcode->mask, 5, FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0); return true; } /* Encode the PSTATE field operand for e.g. MSR , #. */ bool aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED, aarch64_operand_error *errors ATTRIBUTE_UNUSED) { /* op1:op2 */ insert_fields (code, info->pstatefield, inst->opcode->mask, 2, FLD_op2, FLD_op1); /* Extra CRm mask. */ if (info->sysreg.flags | F_REG_IN_CRM) insert_field (FLD_CRm, code, PSTATE_DECODE_CRM (info->sysreg.flags), 0); return true; } /* Encode the system instruction op operand for e.g. AT , . */ bool aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED, const aarch64_opnd_info *info, aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED, aarch64_operand_error *errors ATTRIBUTE_UNUSED) { /* op1:CRn:CRm:op2 */ insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4, FLD_op2, FLD_CRm, FLD_CRn, FLD_op1); return true; } /* Encode the memory barrier option operand for e.g. DMB
, . */ static void encode_fcvt (aarch64_inst *inst) { aarch64_insn val; const aarch64_field field = {15, 2}; /* opc dstsize */ switch (inst->operands[0].qualifier) { case AARCH64_OPND_QLF_S_S: val = 0; break; case AARCH64_OPND_QLF_S_D: val = 1; break; case AARCH64_OPND_QLF_S_H: val = 3; break; default: abort (); } insert_field_2 (&field, &inst->value, val, 0); return; } /* Return the index in qualifiers_list that INST is using. Should only be called once the qualifiers are known to be valid. */ static int aarch64_get_variant (struct aarch64_inst *inst) { int i, nops, variant; nops = aarch64_num_of_operands (inst->opcode); for (variant = 0; variant < AARCH64_MAX_QLF_SEQ_NUM; ++variant) { for (i = 0; i < nops; ++i) if (inst->opcode->qualifiers_list[variant][i] != inst->operands[i].qualifier) break; if (i == nops) return variant; } abort (); } /* Do miscellaneous encodings that are not common enough to be driven by flags. */ static void do_misc_encoding (aarch64_inst *inst) { unsigned int value; switch (inst->opcode->op) { case OP_FCVT: encode_fcvt (inst); break; case OP_FCVTN: case OP_FCVTN2: case OP_FCVTL: case OP_FCVTL2: encode_asimd_fcvt (inst); break; case OP_FCVTXN_S: encode_asisd_fcvtxn (inst); break; case OP_MOV_P_P: case OP_MOV_PN_PN: case OP_MOVS_P_P: /* Copy Pn to Pm and Pg. */ value = extract_field (FLD_SVE_Pn, inst->value, 0); insert_field (FLD_SVE_Pm, &inst->value, value, 0); insert_field (FLD_SVE_Pg4_10, &inst->value, value, 0); break; case OP_MOV_Z_P_Z: /* Copy Zd to Zm. */ value = extract_field (FLD_SVE_Zd, inst->value, 0); insert_field (FLD_SVE_Zm_16, &inst->value, value, 0); break; case OP_MOV_Z_V: /* Fill in the zero immediate. */ insert_fields (&inst->value, 1 << aarch64_get_variant (inst), 0, 2, FLD_imm5, FLD_SVE_tszh); break; case OP_MOV_Z_Z: /* Copy Zn to Zm. */ value = extract_field (FLD_SVE_Zn, inst->value, 0); insert_field (FLD_SVE_Zm_16, &inst->value, value, 0); break; case OP_MOV_Z_Zi: break; case OP_MOVM_P_P_P: /* Copy Pd to Pm. */ value = extract_field (FLD_SVE_Pd, inst->value, 0); insert_field (FLD_SVE_Pm, &inst->value, value, 0); break; case OP_MOVZS_P_P_P: case OP_MOVZ_P_P_P: /* Copy Pn to Pm. */ value = extract_field (FLD_SVE_Pn, inst->value, 0); insert_field (FLD_SVE_Pm, &inst->value, value, 0); break; case OP_NOTS_P_P_P_Z: case OP_NOT_P_P_P_Z: /* Copy Pg to Pm. */ value = extract_field (FLD_SVE_Pg4_10, inst->value, 0); insert_field (FLD_SVE_Pm, &inst->value, value, 0); break; default: break; } } /* Encode the 'size' and 'Q' field for e.g. SHADD. */ static void encode_sizeq (aarch64_inst *inst) { aarch64_insn sizeq; enum aarch64_field_kind kind; int idx; /* Get the index of the operand whose information we are going to use to encode the size and Q fields. This is deduced from the possible valid qualifier lists. */ idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode); DEBUG_TRACE ("idx: %d; qualifier: %s", idx, aarch64_get_qualifier_name (inst->operands[idx].qualifier)); sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier); /* Q */ insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask); /* size */ if (inst->opcode->iclass == asisdlse || inst->opcode->iclass == asisdlsep || inst->opcode->iclass == asisdlso || inst->opcode->iclass == asisdlsop) kind = FLD_vldst_size; else kind = FLD_size; insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask); } /* Opcodes that have fields shared by multiple operands are usually flagged with flags. In this function, we detect such flags and use the information in one of the related operands to do the encoding. The 'one' operand is not any operand but one of the operands that has the enough information for such an encoding. */ static void do_special_encoding (struct aarch64_inst *inst) { int idx; aarch64_insn value = 0; DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value); /* Condition for truly conditional executed instructions, e.g. b.cond. */ if (inst->opcode->flags & F_COND) { insert_field (FLD_cond2, &inst->value, inst->cond->value, 0); } if (inst->opcode->flags & F_SF) { idx = select_operand_for_sf_field_coding (inst->opcode); value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP) ? 1 : 0; insert_field (FLD_sf, &inst->value, value, 0); if (inst->opcode->flags & F_N) insert_field (FLD_N, &inst->value, value, inst->opcode->mask); } if (inst->opcode->flags & F_LSE_SZ) { idx = select_operand_for_sf_field_coding (inst->opcode); value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP) ? 1 : 0; insert_field (FLD_lse_sz, &inst->value, value, 0); } if (inst->opcode->flags & F_SIZEQ) encode_sizeq (inst); if (inst->opcode->flags & F_FPTYPE) { idx = select_operand_for_fptype_field_coding (inst->opcode); switch (inst->operands[idx].qualifier) { case AARCH64_OPND_QLF_S_S: value = 0; break; case AARCH64_OPND_QLF_S_D: value = 1; break; case AARCH64_OPND_QLF_S_H: value = 3; break; default: return; } insert_field (FLD_type, &inst->value, value, 0); } if (inst->opcode->flags & F_SSIZE) { enum aarch64_opnd_qualifier qualifier; idx = select_operand_for_scalar_size_field_coding (inst->opcode); qualifier = inst->operands[idx].qualifier; assert (qualifier >= AARCH64_OPND_QLF_S_B && qualifier <= AARCH64_OPND_QLF_S_Q); value = aarch64_get_qualifier_standard_value (qualifier); insert_field (FLD_size, &inst->value, value, inst->opcode->mask); } if (inst->opcode->flags & F_T) { int num; /* num of consecutive '0's on the right side of imm5<3:0>. */ aarch64_field field = {0, 0}; enum aarch64_opnd_qualifier qualifier; idx = 0; qualifier = inst->operands[idx].qualifier; assert (aarch64_get_operand_class (inst->opcode->operands[0]) == AARCH64_OPND_CLASS_SIMD_REG && qualifier >= AARCH64_OPND_QLF_V_8B && qualifier <= AARCH64_OPND_QLF_V_2D); /* imm5<3:0> q 0000 x reserved xxx1 0 8b xxx1 1 16b xx10 0 4h xx10 1 8h x100 0 2s x100 1 4s 1000 0 reserved 1000 1 2d */ value = aarch64_get_qualifier_standard_value (qualifier); insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask); num = (int) value >> 1; assert (num >= 0 && num <= 3); gen_sub_field (FLD_imm5, 0, num + 1, &field); insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask); } if (inst->opcode->flags & F_GPRSIZE_IN_Q) { /* Use Rt to encode in the case of e.g. STXP , , , [{,#0}]. */ enum aarch64_opnd_qualifier qualifier; idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt); if (idx == -1) /* Otherwise use the result operand, which has to be a integer register. */ idx = 0; assert (idx == 0 || idx == 1); assert (aarch64_get_operand_class (inst->opcode->operands[idx]) == AARCH64_OPND_CLASS_INT_REG); qualifier = inst->operands[idx].qualifier; insert_field (FLD_Q, &inst->value, aarch64_get_qualifier_standard_value (qualifier), 0); } if (inst->opcode->flags & F_LDS_SIZE) { /* e.g. LDRSB , [, {, {}}]. */ enum aarch64_opnd_qualifier qualifier; aarch64_field field = {0, 0}; assert (aarch64_get_operand_class (inst->opcode->operands[0]) == AARCH64_OPND_CLASS_INT_REG); gen_sub_field (FLD_opc, 0, 1, &field); qualifier = inst->operands[0].qualifier; insert_field_2 (&field, &inst->value, 1 - aarch64_get_qualifier_standard_value (qualifier), 0); } /* Miscellaneous encoding as the last step. */ if (inst->opcode->flags & F_MISC) do_misc_encoding (inst); DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value); } /* Some instructions (including all SVE ones) use the instruction class to describe how a qualifiers_list index is represented in the instruction encoding. If INST is such an instruction, encode the chosen qualifier variant. */ static void aarch64_encode_variant_using_iclass (struct aarch64_inst *inst) { int variant = 0; switch (inst->opcode->iclass) { case sme_mov: case sme_psel: /* The variant is encoded as part of the immediate. */ break; case sme_size_12_bhs: insert_field (FLD_SME_size_12, &inst->value, aarch64_get_variant (inst), 0); break; case sme_size_22: insert_field (FLD_SME_size_22, &inst->value, aarch64_get_variant (inst), 0); break; case sme_size_22_hsd: insert_field (FLD_SME_size_22, &inst->value, aarch64_get_variant (inst) + 1, 0); break; case sme_size_12_hs: insert_field (FLD_SME_size_12, &inst->value, aarch64_get_variant (inst) + 1, 0); break; case sme_sz_23: insert_field (FLD_SME_sz_23, &inst->value, aarch64_get_variant (inst), 0); break; case sve_cpy: insert_fields (&inst->value, aarch64_get_variant (inst), 0, 2, FLD_SVE_M_14, FLD_size); break; case sme_shift: case sve_index: case sve_shift_pred: case sve_shift_unpred: case sve_shift_tsz_hsd: case sve_shift_tsz_bhsd: /* For indices and shift amounts, the variant is encoded as part of the immediate. */ break; case sve_limm: case sme2_mov: /* For sve_limm, the .B, .H, and .S forms are just a convenience and depend on the immediate. They don't have a separate encoding. */ break; case sme_misc: case sve_misc: /* These instructions have only a single variant. */ break; case sve_movprfx: insert_fields (&inst->value, aarch64_get_variant (inst), 0, 2, FLD_SVE_M_16, FLD_size); break; case sve_pred_zm: insert_field (FLD_SVE_M_4, &inst->value, aarch64_get_variant (inst), 0); break; case sve_size_bhs: case sve_size_bhsd: insert_field (FLD_size, &inst->value, aarch64_get_variant (inst), 0); break; case sve_size_hsd: /* MOD 3 For `OP_SVE_Vv_HSD`. */ insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) % 3 + 1, 0); break; case sme_fp_sd: case sme_int_sd: case sve_size_bh: case sve_size_sd: insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0); break; case sve_size_sd2: insert_field (FLD_SVE_sz2, &inst->value, aarch64_get_variant (inst), 0); break; case sve_size_hsd2: insert_field (FLD_SVE_size, &inst->value, aarch64_get_variant (inst) + 1, 0); break; case sve_size_tsz_bhs: insert_fields (&inst->value, (1 << aarch64_get_variant (inst)), 0, 2, FLD_SVE_tszl_19, FLD_SVE_sz); break; case sve_size_13: variant = aarch64_get_variant (inst) + 1; if (variant == 2) variant = 3; insert_field (FLD_size, &inst->value, variant, 0); break; default: break; } } /* Converters converting an alias opcode instruction to its real form. */ /* ROR , , # is equivalent to: EXTR , , , #. */ static void convert_ror_to_extr (aarch64_inst *inst) { copy_operand_info (inst, 3, 2); copy_operand_info (inst, 2, 1); } /* UXTL ., . is equivalent to: USHLL ., ., #0. */ static void convert_xtl_to_shll (aarch64_inst *inst) { inst->operands[2].qualifier = inst->operands[1].qualifier; inst->operands[2].imm.value = 0; } /* Convert LSR , , # to UBFM , , #, #63. */ static void convert_sr_to_bfm (aarch64_inst *inst) { inst->operands[3].imm.value = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63; } /* Convert MOV to ORR. */ static void convert_mov_to_orr (aarch64_inst *inst) { /* MOV ., . is equivalent to: ORR ., ., .. */ copy_operand_info (inst, 2, 1); } /* When >= , the instruction written: SBFX , , #, # is equivalent to: SBFM , , #, #(+-1). */ static void convert_bfx_to_bfm (aarch64_inst *inst) { int64_t lsb, width; /* Convert the operand. */ lsb = inst->operands[2].imm.value; width = inst->operands[3].imm.value; inst->operands[2].imm.value = lsb; inst->operands[3].imm.value = lsb + width - 1; } /* When < , the instruction written: SBFIZ , , #, # is equivalent to: SBFM , , #((64-)&0x3f), #(-1). */ static void convert_bfi_to_bfm (aarch64_inst *inst) { int64_t lsb, width; /* Convert the operand. */ lsb = inst->operands[2].imm.value; width = inst->operands[3].imm.value; if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31) { inst->operands[2].imm.value = (32 - lsb) & 0x1f; inst->operands[3].imm.value = width - 1; } else { inst->operands[2].imm.value = (64 - lsb) & 0x3f; inst->operands[3].imm.value = width - 1; } } /* The instruction written: BFC , #, # is equivalent to: BFM , XZR, #((64-)&0x3f), #(-1). */ static void convert_bfc_to_bfm (aarch64_inst *inst) { int64_t lsb, width; /* Insert XZR. */ copy_operand_info (inst, 3, 2); copy_operand_info (inst, 2, 1); copy_operand_info (inst, 1, 0); inst->operands[1].reg.regno = 0x1f; /* Convert the immediate operand. */ lsb = inst->operands[2].imm.value; width = inst->operands[3].imm.value; if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31) { inst->operands[2].imm.value = (32 - lsb) & 0x1f; inst->operands[3].imm.value = width - 1; } else { inst->operands[2].imm.value = (64 - lsb) & 0x3f; inst->operands[3].imm.value = width - 1; } } /* The instruction written: LSL , , # is equivalent to: UBFM , , #((64-)&0x3f), #(63-). */ static void convert_lsl_to_ubfm (aarch64_inst *inst) { int64_t shift = inst->operands[2].imm.value; if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31) { inst->operands[2].imm.value = (32 - shift) & 0x1f; inst->operands[3].imm.value = 31 - shift; } else { inst->operands[2].imm.value = (64 - shift) & 0x3f; inst->operands[3].imm.value = 63 - shift; } } /* CINC , , is equivalent to: CSINC , , , invert(). */ static void convert_to_csel (aarch64_inst *inst) { copy_operand_info (inst, 3, 2); copy_operand_info (inst, 2, 1); inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond); } /* CSET , is equivalent to: CSINC , WZR, WZR, invert(). */ static void convert_cset_to_csinc (aarch64_inst *inst) { copy_operand_info (inst, 3, 1); copy_operand_info (inst, 2, 0); copy_operand_info (inst, 1, 0); inst->operands[1].reg.regno = 0x1f; inst->operands[2].reg.regno = 0x1f; inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond); } /* MOV , # is equivalent to: MOVZ , #, LSL #. */ static void convert_mov_to_movewide (aarch64_inst *inst) { int is32; uint32_t shift_amount; uint64_t value = ~(uint64_t)0; switch (inst->opcode->op) { case OP_MOV_IMM_WIDE: value = inst->operands[1].imm.value; break; case OP_MOV_IMM_WIDEN: value = ~inst->operands[1].imm.value; break; default: return; } inst->operands[1].type = AARCH64_OPND_HALF; is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W; if (! aarch64_wide_constant_p (value, is32, &shift_amount)) /* The constraint check should have guaranteed this wouldn't happen. */ return; value >>= shift_amount; value &= 0xffff; inst->operands[1].imm.value = value; inst->operands[1].shifter.kind = AARCH64_MOD_LSL; inst->operands[1].shifter.amount = shift_amount; } /* MOV , # is equivalent to: ORR , WZR, #. */ static void convert_mov_to_movebitmask (aarch64_inst *inst) { copy_operand_info (inst, 2, 1); inst->operands[1].reg.regno = 0x1f; inst->operands[1].skip = 0; } /* Some alias opcodes are assembled by being converted to their real-form. */ static void convert_to_real (aarch64_inst *inst, const aarch64_opcode *real) { const aarch64_opcode *alias = inst->opcode; if ((alias->flags & F_CONV) == 0) goto convert_to_real_return; switch (alias->op) { case OP_ASR_IMM: case OP_LSR_IMM: convert_sr_to_bfm (inst); break; case OP_LSL_IMM: convert_lsl_to_ubfm (inst); break; case OP_CINC: case OP_CINV: case OP_CNEG: convert_to_csel (inst); break; case OP_CSET: case OP_CSETM: convert_cset_to_csinc (inst); break; case OP_UBFX: case OP_BFXIL: case OP_SBFX: convert_bfx_to_bfm (inst); break; case OP_SBFIZ: case OP_BFI: case OP_UBFIZ: convert_bfi_to_bfm (inst); break; case OP_BFC: convert_bfc_to_bfm (inst); break; case OP_MOV_V: convert_mov_to_orr (inst); break; case OP_MOV_IMM_WIDE: case OP_MOV_IMM_WIDEN: convert_mov_to_movewide (inst); break; case OP_MOV_IMM_LOG: convert_mov_to_movebitmask (inst); break; case OP_ROR_IMM: convert_ror_to_extr (inst); break; case OP_SXTL: case OP_SXTL2: case OP_UXTL: case OP_UXTL2: convert_xtl_to_shll (inst); break; default: break; } convert_to_real_return: aarch64_replace_opcode (inst, real); } /* Encode *INST_ORI of the opcode code OPCODE. Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the matched operand qualifier sequence in *QLF_SEQ. */ bool aarch64_opcode_encode (const aarch64_opcode *opcode, const aarch64_inst *inst_ori, aarch64_insn *code, aarch64_opnd_qualifier_t *qlf_seq, aarch64_operand_error *mismatch_detail, aarch64_instr_sequence* insn_sequence) { int i; const aarch64_opcode *aliased; aarch64_inst copy, *inst; DEBUG_TRACE ("enter with %s", opcode->name); /* Create a copy of *INST_ORI, so that we can do any change we want. */ copy = *inst_ori; inst = © assert (inst->opcode == NULL || inst->opcode == opcode); if (inst->opcode == NULL) inst->opcode = opcode; /* Constrain the operands. After passing this, the encoding is guaranteed to succeed. */ if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0) { DEBUG_TRACE ("FAIL since operand constraint not met"); return 0; } /* Get the base value. Note: this has to be before the aliasing handling below in order to get the base value from the alias opcode before we move on to the aliased opcode for encoding. */ inst->value = opcode->opcode; /* No need to do anything else if the opcode does not have any operand. */ if (aarch64_num_of_operands (opcode) == 0) goto encoding_exit; /* Assign operand indexes and check types. Also put the matched operand qualifiers in *QLF_SEQ to return. */ for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i) { assert (opcode->operands[i] == inst->operands[i].type); inst->operands[i].idx = i; if (qlf_seq != NULL) *qlf_seq = inst->operands[i].qualifier; } aliased = aarch64_find_real_opcode (opcode); /* If the opcode is an alias and it does not ask for direct encoding by itself, the instruction will be transformed to the form of real opcode and the encoding will be carried out using the rules for the aliased opcode. */ if (aliased != NULL && (opcode->flags & F_CONV)) { DEBUG_TRACE ("real opcode '%s' has been found for the alias %s", aliased->name, opcode->name); /* Convert the operands to the form of the real opcode. */ convert_to_real (inst, aliased); opcode = aliased; } aarch64_opnd_info *info = inst->operands; /* Call the inserter of each operand. */ for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info) { const aarch64_operand *opnd; enum aarch64_opnd type = opcode->operands[i]; if (type == AARCH64_OPND_NIL) break; if (info->skip) { DEBUG_TRACE ("skip the incomplete operand %d", i); continue; } opnd = &aarch64_operands[type]; if (operand_has_inserter (opnd) && !aarch64_insert_operand (opnd, info, &inst->value, inst, mismatch_detail)) return false; } /* Call opcode encoders indicated by flags. */ if (opcode_has_special_coder (opcode)) do_special_encoding (inst); /* Possibly use the instruction class to encode the chosen qualifier variant. */ aarch64_encode_variant_using_iclass (inst); /* Run a verifier if the instruction has one set. */ if (opcode->verifier) { enum err_type result = opcode->verifier (inst, *code, 0, true, mismatch_detail, insn_sequence); switch (result) { case ERR_UND: case ERR_UNP: case ERR_NYI: return false; default: break; } } /* Always run constrain verifiers, this is needed because constrains need to maintain a global state. Regardless if the instruction has the flag set or not. */ enum err_type result = verify_constraints (inst, *code, 0, true, mismatch_detail, insn_sequence); switch (result) { case ERR_UND: case ERR_UNP: case ERR_NYI: return false; default: break; } encoding_exit: DEBUG_TRACE ("exit with %s", opcode->name); *code = inst->value; return true; }