., .. */ static int decode_asimd_fcvt (aarch64_inst *inst) { aarch64_field field = {0, 0}; aarch64_insn value; enum aarch64_opnd_qualifier qualifier; gen_sub_field (FLD_size, 0, 1, &field); value = extract_field_2 (&field, inst->value, 0); qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S : AARCH64_OPND_QLF_V_2D; switch (inst->opcode->op) { case OP_FCVTN: case OP_FCVTN2: /* FCVTN ., .. */ inst->operands[1].qualifier = qualifier; break; case OP_FCVTL: case OP_FCVTL2: /* FCVTL ., .. */ inst->operands[0].qualifier = qualifier; break; default: assert (0); return 0; } return 1; } /* Decode size[0], i.e. bit 22, for e.g. FCVTXN , . */ static int decode_asisd_fcvtxn (aarch64_inst *inst) { aarch64_field field = {0, 0}; gen_sub_field (FLD_size, 0, 1, &field); if (!extract_field_2 (&field, inst->value, 0)) return 0; inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S; return 1; } /* Decode the 'opc' field for e.g. FCVT , . */ static int decode_fcvt (aarch64_inst *inst) { enum aarch64_opnd_qualifier qualifier; aarch64_insn value; const aarch64_field field = {15, 2}; /* opc dstsize */ value = extract_field_2 (&field, inst->value, 0); switch (value) { case 0: qualifier = AARCH64_OPND_QLF_S_S; break; case 1: qualifier = AARCH64_OPND_QLF_S_D; break; case 3: qualifier = AARCH64_OPND_QLF_S_H; break; default: return 0; } inst->operands[0].qualifier = qualifier; return 1; } /* Do miscellaneous decodings that are not common enough to be driven by flags. */ static int do_misc_decoding (aarch64_inst *inst) { switch (inst->opcode->op) { case OP_FCVT: return decode_fcvt (inst); case OP_FCVTN: case OP_FCVTN2: case OP_FCVTL: case OP_FCVTL2: return decode_asimd_fcvt (inst); case OP_FCVTXN_S: return decode_asisd_fcvtxn (inst); default: return 0; } } /* Opcodes that have fields shared by multiple operands are usually flagged with flags. In this function, we detect such flags, decode the related field(s) and store the information in one of the related operands. The 'one' operand is not any operand but one of the operands that can accommadate all the information that has been decoded. */ static int do_special_decoding (aarch64_inst *inst) { int idx; aarch64_insn value; /* Condition for truly conditional executed instructions, e.g. b.cond. */ if (inst->opcode->flags & F_COND) { value = extract_field (FLD_cond2, inst->value, 0); inst->cond = get_cond_from_value (value); } /* 'sf' field. */ if (inst->opcode->flags & F_SF) { idx = select_operand_for_sf_field_coding (inst->opcode); value = extract_field (FLD_sf, inst->value, 0); inst->operands[idx].qualifier = get_greg_qualifier_from_value (value); if ((inst->opcode->flags & F_N) && extract_field (FLD_N, inst->value, 0) != value) return 0; } /* 'sf' field. */ if (inst->opcode->flags & F_LSE_SZ) { idx = select_operand_for_sf_field_coding (inst->opcode); value = extract_field (FLD_lse_sz, inst->value, 0); inst->operands[idx].qualifier = get_greg_qualifier_from_value (value); } /* size:Q fields. */ if (inst->opcode->flags & F_SIZEQ) return decode_sizeq (inst); if (inst->opcode->flags & F_FPTYPE) { idx = select_operand_for_fptype_field_coding (inst->opcode); value = extract_field (FLD_type, inst->value, 0); switch (value) { case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break; case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break; case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break; default: return 0; } } if (inst->opcode->flags & F_SSIZE) { /* N.B. some opcodes like FCMGT , , #0 have the size[1] as part of the base opcode. */ aarch64_insn mask; enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM]; idx = select_operand_for_scalar_size_field_coding (inst->opcode); value = extract_field (FLD_size, inst->value, inst->opcode->mask); mask = extract_field (FLD_size, ~inst->opcode->mask, 0); /* For most related instruciton, the 'size' field is fully available for operand encoding. */ if (mask == 0x3) inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value); else { get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list, candidates); inst->operands[idx].qualifier = get_qualifier_from_partial_encoding (value, candidates, mask); } } if (inst->opcode->flags & F_T) { /* Num of consecutive '0's on the right side of imm5<3:0>. */ int num = 0; unsigned val, Q; assert (aarch64_get_operand_class (inst->opcode->operands[0]) == AARCH64_OPND_CLASS_SIMD_REG); /* imm5<3:0> q 0000 x reserved xxx1 0 8b xxx1 1 16b xx10 0 4h xx10 1 8h x100 0 2s x100 1 4s 1000 0 reserved 1000 1 2d */ val = extract_field (FLD_imm5, inst->value, 0); while ((val & 0x1) == 0 && ++num <= 3) val >>= 1; if (num > 3) return 0; Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask); inst->operands[0].qualifier = get_vreg_qualifier_from_value ((num << 1) | Q); } if (inst->opcode->flags & F_GPRSIZE_IN_Q) { /* Use Rt to encode in the case of e.g. STXP , , , [{,#0}]. */ idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt); if (idx == -1) { /* Otherwise use the result operand, which has to be a integer register. */ assert (aarch64_get_operand_class (inst->opcode->operands[0]) == AARCH64_OPND_CLASS_INT_REG); idx = 0; } assert (idx == 0 || idx == 1); value = extract_field (FLD_Q, inst->value, 0); inst->operands[idx].qualifier = get_greg_qualifier_from_value (value); } if (inst->opcode->flags & F_LDS_SIZE) { aarch64_field field = {0, 0}; assert (aarch64_get_operand_class (inst->opcode->operands[0]) == AARCH64_OPND_CLASS_INT_REG); gen_sub_field (FLD_opc, 0, 1, &field); value = extract_field_2 (&field, inst->value, 0); inst->operands[0].qualifier = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X; } /* Miscellaneous decoding; done as the last step. */ if (inst->opcode->flags & F_MISC) return do_misc_decoding (inst); return 1; } /* Converters converting a real opcode instruction to its alias form. */ /* ROR , , # is equivalent to: EXTR , , , #. */ static int convert_extr_to_ror (aarch64_inst *inst) { if (inst->operands[1].reg.regno == inst->operands[2].reg.regno) { copy_operand_info (inst, 2, 3); inst->operands[3].type = AARCH64_OPND_NIL; return 1; } return 0; } /* UXTL ., . is equivalent to: USHLL ., ., #0. */ static int convert_shll_to_xtl (aarch64_inst *inst) { if (inst->operands[2].imm.value == 0) { inst->operands[2].type = AARCH64_OPND_NIL; return 1; } return 0; } /* Convert UBFM , , #, #63. to LSR , , #. */ static int convert_bfm_to_sr (aarch64_inst *inst) { int64_t imms, val; imms = inst->operands[3].imm.value; val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63; if (imms == val) { inst->operands[3].type = AARCH64_OPND_NIL; return 1; } return 0; } /* Convert MOV to ORR. */ static int convert_orr_to_mov (aarch64_inst *inst) { /* MOV ., . is equivalent to: ORR ., ., .. */ if (inst->operands[1].reg.regno == inst->operands[2].reg.regno) { inst->operands[2].type = AARCH64_OPND_NIL; return 1; } return 0; } /* When >= , the instruction written: SBFX , , #, # is equivalent to: SBFM , , #, #(+-1). */ static int convert_bfm_to_bfx (aarch64_inst *inst) { int64_t immr, imms; immr = inst->operands[2].imm.value; imms = inst->operands[3].imm.value; if (imms >= immr) { int64_t lsb = immr; inst->operands[2].imm.value = lsb; inst->operands[3].imm.value = imms + 1 - lsb; /* The two opcodes have different qualifiers for the immediate operands; reset to help the checking. */ reset_operand_qualifier (inst, 2); reset_operand_qualifier (inst, 3); return 1; } return 0; } /* When < , the instruction written: SBFIZ , , #, # is equivalent to: SBFM , , #((64-)&0x3f), #(-1). */ static int convert_bfm_to_bfi (aarch64_inst *inst) { int64_t immr, imms, val; immr = inst->operands[2].imm.value; imms = inst->operands[3].imm.value; val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64; if (imms < immr) { inst->operands[2].imm.value = (val - immr) & (val - 1); inst->operands[3].imm.value = imms + 1; /* The two opcodes have different qualifiers for the immediate operands; reset to help the checking. */ reset_operand_qualifier (inst, 2); reset_operand_qualifier (inst, 3); return 1; } return 0; } /* The instruction written: LSL , , # is equivalent to: UBFM , , #((64-)&0x3f), #(63-). */ static int convert_ubfm_to_lsl (aarch64_inst *inst) { int64_t immr = inst->operands[2].imm.value; int64_t imms = inst->operands[3].imm.value; int64_t val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63; if ((immr == 0 && imms == val) || immr == imms + 1) { inst->operands[3].type = AARCH64_OPND_NIL; inst->operands[2].imm.value = val - imms; return 1; } return 0; } /* CINC , , is equivalent to: CSINC , , , invert() where is not AL or NV. */ static int convert_from_csel (aarch64_inst *inst) { if (inst->operands[1].reg.regno == inst->operands[2].reg.regno && (inst->operands[3].cond->value & 0xe) != 0xe) { copy_operand_info (inst, 2, 3); inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond); inst->operands[3].type = AARCH64_OPND_NIL; return 1; } return 0; } /* CSET , is equivalent to: CSINC , WZR, WZR, invert() where is not AL or NV. */ static int convert_csinc_to_cset (aarch64_inst *inst) { if (inst->operands[1].reg.regno == 0x1f && inst->operands[2].reg.regno == 0x1f && (inst->operands[3].cond->value & 0xe) != 0xe) { copy_operand_info (inst, 1, 3); inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond); inst->operands[3].type = AARCH64_OPND_NIL; inst->operands[2].type = AARCH64_OPND_NIL; return 1; } return 0; } /* MOV , # is equivalent to: MOVZ
., .. */ inst->operands[1].qualifier = qualifier; break; case OP_FCVTL: case OP_FCVTL2: /* FCVTL ., .. */ inst->operands[0].qualifier = qualifier; break; default: assert (0); return 0; } return 1; } /* Decode size[0], i.e. bit 22, for e.g. FCVTXN , . */ static int decode_asisd_fcvtxn (aarch64_inst *inst) { aarch64_field field = {0, 0}; gen_sub_field (FLD_size, 0, 1, &field); if (!extract_field_2 (&field, inst->value, 0)) return 0; inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S; return 1; } /* Decode the 'opc' field for e.g. FCVT , . */ static int decode_fcvt (aarch64_inst *inst) { enum aarch64_opnd_qualifier qualifier; aarch64_insn value; const aarch64_field field = {15, 2}; /* opc dstsize */ value = extract_field_2 (&field, inst->value, 0); switch (value) { case 0: qualifier = AARCH64_OPND_QLF_S_S; break; case 1: qualifier = AARCH64_OPND_QLF_S_D; break; case 3: qualifier = AARCH64_OPND_QLF_S_H; break; default: return 0; } inst->operands[0].qualifier = qualifier; return 1; } /* Do miscellaneous decodings that are not common enough to be driven by flags. */ static int do_misc_decoding (aarch64_inst *inst) { switch (inst->opcode->op) { case OP_FCVT: return decode_fcvt (inst); case OP_FCVTN: case OP_FCVTN2: case OP_FCVTL: case OP_FCVTL2: return decode_asimd_fcvt (inst); case OP_FCVTXN_S: return decode_asisd_fcvtxn (inst); default: return 0; } } /* Opcodes that have fields shared by multiple operands are usually flagged with flags. In this function, we detect such flags, decode the related field(s) and store the information in one of the related operands. The 'one' operand is not any operand but one of the operands that can accommadate all the information that has been decoded. */ static int do_special_decoding (aarch64_inst *inst) { int idx; aarch64_insn value; /* Condition for truly conditional executed instructions, e.g. b.cond. */ if (inst->opcode->flags & F_COND) { value = extract_field (FLD_cond2, inst->value, 0); inst->cond = get_cond_from_value (value); } /* 'sf' field. */ if (inst->opcode->flags & F_SF) { idx = select_operand_for_sf_field_coding (inst->opcode); value = extract_field (FLD_sf, inst->value, 0); inst->operands[idx].qualifier = get_greg_qualifier_from_value (value); if ((inst->opcode->flags & F_N) && extract_field (FLD_N, inst->value, 0) != value) return 0; } /* 'sf' field. */ if (inst->opcode->flags & F_LSE_SZ) { idx = select_operand_for_sf_field_coding (inst->opcode); value = extract_field (FLD_lse_sz, inst->value, 0); inst->operands[idx].qualifier = get_greg_qualifier_from_value (value); } /* size:Q fields. */ if (inst->opcode->flags & F_SIZEQ) return decode_sizeq (inst); if (inst->opcode->flags & F_FPTYPE) { idx = select_operand_for_fptype_field_coding (inst->opcode); value = extract_field (FLD_type, inst->value, 0); switch (value) { case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break; case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break; case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break; default: return 0; } } if (inst->opcode->flags & F_SSIZE) { /* N.B. some opcodes like FCMGT , , #0 have the size[1] as part of the base opcode. */ aarch64_insn mask; enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM]; idx = select_operand_for_scalar_size_field_coding (inst->opcode); value = extract_field (FLD_size, inst->value, inst->opcode->mask); mask = extract_field (FLD_size, ~inst->opcode->mask, 0); /* For most related instruciton, the 'size' field is fully available for operand encoding. */ if (mask == 0x3) inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value); else { get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list, candidates); inst->operands[idx].qualifier = get_qualifier_from_partial_encoding (value, candidates, mask); } } if (inst->opcode->flags & F_T) { /* Num of consecutive '0's on the right side of imm5<3:0>. */ int num = 0; unsigned val, Q; assert (aarch64_get_operand_class (inst->opcode->operands[0]) == AARCH64_OPND_CLASS_SIMD_REG); /* imm5<3:0> q 0000 x reserved xxx1 0 8b xxx1 1 16b xx10 0 4h xx10 1 8h x100 0 2s x100 1 4s 1000 0 reserved 1000 1 2d */ val = extract_field (FLD_imm5, inst->value, 0); while ((val & 0x1) == 0 && ++num <= 3) val >>= 1; if (num > 3) return 0; Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask); inst->operands[0].qualifier = get_vreg_qualifier_from_value ((num << 1) | Q); } if (inst->opcode->flags & F_GPRSIZE_IN_Q) { /* Use Rt to encode in the case of e.g. STXP , , , [{,#0}]. */ idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt); if (idx == -1) { /* Otherwise use the result operand, which has to be a integer register. */ assert (aarch64_get_operand_class (inst->opcode->operands[0]) == AARCH64_OPND_CLASS_INT_REG); idx = 0; } assert (idx == 0 || idx == 1); value = extract_field (FLD_Q, inst->value, 0); inst->operands[idx].qualifier = get_greg_qualifier_from_value (value); } if (inst->opcode->flags & F_LDS_SIZE) { aarch64_field field = {0, 0}; assert (aarch64_get_operand_class (inst->opcode->operands[0]) == AARCH64_OPND_CLASS_INT_REG); gen_sub_field (FLD_opc, 0, 1, &field); value = extract_field_2 (&field, inst->value, 0); inst->operands[0].qualifier = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X; } /* Miscellaneous decoding; done as the last step. */ if (inst->opcode->flags & F_MISC) return do_misc_decoding (inst); return 1; } /* Converters converting a real opcode instruction to its alias form. */ /* ROR , , # is equivalent to: EXTR , , , #. */ static int convert_extr_to_ror (aarch64_inst *inst) { if (inst->operands[1].reg.regno == inst->operands[2].reg.regno) { copy_operand_info (inst, 2, 3); inst->operands[3].type = AARCH64_OPND_NIL; return 1; } return 0; } /* UXTL ., . is equivalent to: USHLL ., ., #0. */ static int convert_shll_to_xtl (aarch64_inst *inst) { if (inst->operands[2].imm.value == 0) { inst->operands[2].type = AARCH64_OPND_NIL; return 1; } return 0; } /* Convert UBFM , , #, #63. to LSR , , #. */ static int convert_bfm_to_sr (aarch64_inst *inst) { int64_t imms, val; imms = inst->operands[3].imm.value; val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63; if (imms == val) { inst->operands[3].type = AARCH64_OPND_NIL; return 1; } return 0; } /* Convert MOV to ORR. */ static int convert_orr_to_mov (aarch64_inst *inst) { /* MOV ., . is equivalent to: ORR ., ., .. */ if (inst->operands[1].reg.regno == inst->operands[2].reg.regno) { inst->operands[2].type = AARCH64_OPND_NIL; return 1; } return 0; } /* When >= , the instruction written: SBFX , , #, # is equivalent to: SBFM , , #, #(+-1). */ static int convert_bfm_to_bfx (aarch64_inst *inst) { int64_t immr, imms; immr = inst->operands[2].imm.value; imms = inst->operands[3].imm.value; if (imms >= immr) { int64_t lsb = immr; inst->operands[2].imm.value = lsb; inst->operands[3].imm.value = imms + 1 - lsb; /* The two opcodes have different qualifiers for the immediate operands; reset to help the checking. */ reset_operand_qualifier (inst, 2); reset_operand_qualifier (inst, 3); return 1; } return 0; } /* When < , the instruction written: SBFIZ , , #, # is equivalent to: SBFM , , #((64-)&0x3f), #(-1). */ static int convert_bfm_to_bfi (aarch64_inst *inst) { int64_t immr, imms, val; immr = inst->operands[2].imm.value; imms = inst->operands[3].imm.value; val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64; if (imms < immr) { inst->operands[2].imm.value = (val - immr) & (val - 1); inst->operands[3].imm.value = imms + 1; /* The two opcodes have different qualifiers for the immediate operands; reset to help the checking. */ reset_operand_qualifier (inst, 2); reset_operand_qualifier (inst, 3); return 1; } return 0; } /* The instruction written: LSL , , # is equivalent to: UBFM , , #((64-)&0x3f), #(63-). */ static int convert_ubfm_to_lsl (aarch64_inst *inst) { int64_t immr = inst->operands[2].imm.value; int64_t imms = inst->operands[3].imm.value; int64_t val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63; if ((immr == 0 && imms == val) || immr == imms + 1) { inst->operands[3].type = AARCH64_OPND_NIL; inst->operands[2].imm.value = val - imms; return 1; } return 0; } /* CINC , , is equivalent to: CSINC , , , invert() where is not AL or NV. */ static int convert_from_csel (aarch64_inst *inst) { if (inst->operands[1].reg.regno == inst->operands[2].reg.regno && (inst->operands[3].cond->value & 0xe) != 0xe) { copy_operand_info (inst, 2, 3); inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond); inst->operands[3].type = AARCH64_OPND_NIL; return 1; } return 0; } /* CSET , is equivalent to: CSINC , WZR, WZR, invert() where is not AL or NV. */ static int convert_csinc_to_cset (aarch64_inst *inst) { if (inst->operands[1].reg.regno == 0x1f && inst->operands[2].reg.regno == 0x1f && (inst->operands[3].cond->value & 0xe) != 0xe) { copy_operand_info (inst, 1, 3); inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond); inst->operands[3].type = AARCH64_OPND_NIL; inst->operands[2].type = AARCH64_OPND_NIL; return 1; } return 0; } /* MOV , # is equivalent to: MOVZ
., .. */ inst->operands[0].qualifier = qualifier; break; default: assert (0); return 0; } return 1; } /* Decode size[0], i.e. bit 22, for e.g. FCVTXN , . */ static int decode_asisd_fcvtxn (aarch64_inst *inst) { aarch64_field field = {0, 0}; gen_sub_field (FLD_size, 0, 1, &field); if (!extract_field_2 (&field, inst->value, 0)) return 0; inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S; return 1; } /* Decode the 'opc' field for e.g. FCVT , . */ static int decode_fcvt (aarch64_inst *inst) { enum aarch64_opnd_qualifier qualifier; aarch64_insn value; const aarch64_field field = {15, 2}; /* opc dstsize */ value = extract_field_2 (&field, inst->value, 0); switch (value) { case 0: qualifier = AARCH64_OPND_QLF_S_S; break; case 1: qualifier = AARCH64_OPND_QLF_S_D; break; case 3: qualifier = AARCH64_OPND_QLF_S_H; break; default: return 0; } inst->operands[0].qualifier = qualifier; return 1; } /* Do miscellaneous decodings that are not common enough to be driven by flags. */ static int do_misc_decoding (aarch64_inst *inst) { switch (inst->opcode->op) { case OP_FCVT: return decode_fcvt (inst); case OP_FCVTN: case OP_FCVTN2: case OP_FCVTL: case OP_FCVTL2: return decode_asimd_fcvt (inst); case OP_FCVTXN_S: return decode_asisd_fcvtxn (inst); default: return 0; } } /* Opcodes that have fields shared by multiple operands are usually flagged with flags. In this function, we detect such flags, decode the related field(s) and store the information in one of the related operands. The 'one' operand is not any operand but one of the operands that can accommadate all the information that has been decoded. */ static int do_special_decoding (aarch64_inst *inst) { int idx; aarch64_insn value; /* Condition for truly conditional executed instructions, e.g. b.cond. */ if (inst->opcode->flags & F_COND) { value = extract_field (FLD_cond2, inst->value, 0); inst->cond = get_cond_from_value (value); } /* 'sf' field. */ if (inst->opcode->flags & F_SF) { idx = select_operand_for_sf_field_coding (inst->opcode); value = extract_field (FLD_sf, inst->value, 0); inst->operands[idx].qualifier = get_greg_qualifier_from_value (value); if ((inst->opcode->flags & F_N) && extract_field (FLD_N, inst->value, 0) != value) return 0; } /* 'sf' field. */ if (inst->opcode->flags & F_LSE_SZ) { idx = select_operand_for_sf_field_coding (inst->opcode); value = extract_field (FLD_lse_sz, inst->value, 0); inst->operands[idx].qualifier = get_greg_qualifier_from_value (value); } /* size:Q fields. */ if (inst->opcode->flags & F_SIZEQ) return decode_sizeq (inst); if (inst->opcode->flags & F_FPTYPE) { idx = select_operand_for_fptype_field_coding (inst->opcode); value = extract_field (FLD_type, inst->value, 0); switch (value) { case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break; case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break; case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break; default: return 0; } } if (inst->opcode->flags & F_SSIZE) { /* N.B. some opcodes like FCMGT , , #0 have the size[1] as part of the base opcode. */ aarch64_insn mask; enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM]; idx = select_operand_for_scalar_size_field_coding (inst->opcode); value = extract_field (FLD_size, inst->value, inst->opcode->mask); mask = extract_field (FLD_size, ~inst->opcode->mask, 0); /* For most related instruciton, the 'size' field is fully available for operand encoding. */ if (mask == 0x3) inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value); else { get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list, candidates); inst->operands[idx].qualifier = get_qualifier_from_partial_encoding (value, candidates, mask); } } if (inst->opcode->flags & F_T) { /* Num of consecutive '0's on the right side of imm5<3:0>. */ int num = 0; unsigned val, Q; assert (aarch64_get_operand_class (inst->opcode->operands[0]) == AARCH64_OPND_CLASS_SIMD_REG); /* imm5<3:0> q 0000 x reserved xxx1 0 8b xxx1 1 16b xx10 0 4h xx10 1 8h x100 0 2s x100 1 4s 1000 0 reserved 1000 1 2d */ val = extract_field (FLD_imm5, inst->value, 0); while ((val & 0x1) == 0 && ++num <= 3) val >>= 1; if (num > 3) return 0; Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask); inst->operands[0].qualifier = get_vreg_qualifier_from_value ((num << 1) | Q); } if (inst->opcode->flags & F_GPRSIZE_IN_Q) { /* Use Rt to encode in the case of e.g. STXP , , , [{,#0}]. */ idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt); if (idx == -1) { /* Otherwise use the result operand, which has to be a integer register. */ assert (aarch64_get_operand_class (inst->opcode->operands[0]) == AARCH64_OPND_CLASS_INT_REG); idx = 0; } assert (idx == 0 || idx == 1); value = extract_field (FLD_Q, inst->value, 0); inst->operands[idx].qualifier = get_greg_qualifier_from_value (value); } if (inst->opcode->flags & F_LDS_SIZE) { aarch64_field field = {0, 0}; assert (aarch64_get_operand_class (inst->opcode->operands[0]) == AARCH64_OPND_CLASS_INT_REG); gen_sub_field (FLD_opc, 0, 1, &field); value = extract_field_2 (&field, inst->value, 0); inst->operands[0].qualifier = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X; } /* Miscellaneous decoding; done as the last step. */ if (inst->opcode->flags & F_MISC) return do_misc_decoding (inst); return 1; } /* Converters converting a real opcode instruction to its alias form. */ /* ROR , , # is equivalent to: EXTR , , , #. */ static int convert_extr_to_ror (aarch64_inst *inst) { if (inst->operands[1].reg.regno == inst->operands[2].reg.regno) { copy_operand_info (inst, 2, 3); inst->operands[3].type = AARCH64_OPND_NIL; return 1; } return 0; } /* UXTL ., . is equivalent to: USHLL ., ., #0. */ static int convert_shll_to_xtl (aarch64_inst *inst) { if (inst->operands[2].imm.value == 0) { inst->operands[2].type = AARCH64_OPND_NIL; return 1; } return 0; } /* Convert UBFM , , #, #63. to LSR , , #. */ static int convert_bfm_to_sr (aarch64_inst *inst) { int64_t imms, val; imms = inst->operands[3].imm.value; val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63; if (imms == val) { inst->operands[3].type = AARCH64_OPND_NIL; return 1; } return 0; } /* Convert MOV to ORR. */ static int convert_orr_to_mov (aarch64_inst *inst) { /* MOV ., . is equivalent to: ORR ., ., .. */ if (inst->operands[1].reg.regno == inst->operands[2].reg.regno) { inst->operands[2].type = AARCH64_OPND_NIL; return 1; } return 0; } /* When >= , the instruction written: SBFX , , #, # is equivalent to: SBFM , , #, #(+-1). */ static int convert_bfm_to_bfx (aarch64_inst *inst) { int64_t immr, imms; immr = inst->operands[2].imm.value; imms = inst->operands[3].imm.value; if (imms >= immr) { int64_t lsb = immr; inst->operands[2].imm.value = lsb; inst->operands[3].imm.value = imms + 1 - lsb; /* The two opcodes have different qualifiers for the immediate operands; reset to help the checking. */ reset_operand_qualifier (inst, 2); reset_operand_qualifier (inst, 3); return 1; } return 0; } /* When < , the instruction written: SBFIZ , , #, # is equivalent to: SBFM , , #((64-)&0x3f), #(-1). */ static int convert_bfm_to_bfi (aarch64_inst *inst) { int64_t immr, imms, val; immr = inst->operands[2].imm.value; imms = inst->operands[3].imm.value; val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64; if (imms < immr) { inst->operands[2].imm.value = (val - immr) & (val - 1); inst->operands[3].imm.value = imms + 1; /* The two opcodes have different qualifiers for the immediate operands; reset to help the checking. */ reset_operand_qualifier (inst, 2); reset_operand_qualifier (inst, 3); return 1; } return 0; } /* The instruction written: LSL , , # is equivalent to: UBFM , , #((64-)&0x3f), #(63-). */ static int convert_ubfm_to_lsl (aarch64_inst *inst) { int64_t immr = inst->operands[2].imm.value; int64_t imms = inst->operands[3].imm.value; int64_t val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63; if ((immr == 0 && imms == val) || immr == imms + 1) { inst->operands[3].type = AARCH64_OPND_NIL; inst->operands[2].imm.value = val - imms; return 1; } return 0; } /* CINC , , is equivalent to: CSINC , , , invert() where is not AL or NV. */ static int convert_from_csel (aarch64_inst *inst) { if (inst->operands[1].reg.regno == inst->operands[2].reg.regno && (inst->operands[3].cond->value & 0xe) != 0xe) { copy_operand_info (inst, 2, 3); inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond); inst->operands[3].type = AARCH64_OPND_NIL; return 1; } return 0; } /* CSET , is equivalent to: CSINC , WZR, WZR, invert() where is not AL or NV. */ static int convert_csinc_to_cset (aarch64_inst *inst) { if (inst->operands[1].reg.regno == 0x1f && inst->operands[2].reg.regno == 0x1f && (inst->operands[3].cond->value & 0xe) != 0xe) { copy_operand_info (inst, 1, 3); inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond); inst->operands[3].type = AARCH64_OPND_NIL; inst->operands[2].type = AARCH64_OPND_NIL; return 1; } return 0; } /* MOV , # is equivalent to: MOVZ
., . is equivalent to: USHLL ., ., #0. */ static int convert_shll_to_xtl (aarch64_inst *inst) { if (inst->operands[2].imm.value == 0) { inst->operands[2].type = AARCH64_OPND_NIL; return 1; } return 0; } /* Convert UBFM , , #, #63. to LSR , , #. */ static int convert_bfm_to_sr (aarch64_inst *inst) { int64_t imms, val; imms = inst->operands[3].imm.value; val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63; if (imms == val) { inst->operands[3].type = AARCH64_OPND_NIL; return 1; } return 0; } /* Convert MOV to ORR. */ static int convert_orr_to_mov (aarch64_inst *inst) { /* MOV ., . is equivalent to: ORR ., ., .. */ if (inst->operands[1].reg.regno == inst->operands[2].reg.regno) { inst->operands[2].type = AARCH64_OPND_NIL; return 1; } return 0; } /* When >= , the instruction written: SBFX , , #, # is equivalent to: SBFM , , #, #(+-1). */ static int convert_bfm_to_bfx (aarch64_inst *inst) { int64_t immr, imms; immr = inst->operands[2].imm.value; imms = inst->operands[3].imm.value; if (imms >= immr) { int64_t lsb = immr; inst->operands[2].imm.value = lsb; inst->operands[3].imm.value = imms + 1 - lsb; /* The two opcodes have different qualifiers for the immediate operands; reset to help the checking. */ reset_operand_qualifier (inst, 2); reset_operand_qualifier (inst, 3); return 1; } return 0; } /* When < , the instruction written: SBFIZ , , #, # is equivalent to: SBFM , , #((64-)&0x3f), #(-1). */ static int convert_bfm_to_bfi (aarch64_inst *inst) { int64_t immr, imms, val; immr = inst->operands[2].imm.value; imms = inst->operands[3].imm.value; val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64; if (imms < immr) { inst->operands[2].imm.value = (val - immr) & (val - 1); inst->operands[3].imm.value = imms + 1; /* The two opcodes have different qualifiers for the immediate operands; reset to help the checking. */ reset_operand_qualifier (inst, 2); reset_operand_qualifier (inst, 3); return 1; } return 0; } /* The instruction written: LSL , , # is equivalent to: UBFM , , #((64-)&0x3f), #(63-). */ static int convert_ubfm_to_lsl (aarch64_inst *inst) { int64_t immr = inst->operands[2].imm.value; int64_t imms = inst->operands[3].imm.value; int64_t val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63; if ((immr == 0 && imms == val) || immr == imms + 1) { inst->operands[3].type = AARCH64_OPND_NIL; inst->operands[2].imm.value = val - imms; return 1; } return 0; } /* CINC , , is equivalent to: CSINC , , , invert() where is not AL or NV. */ static int convert_from_csel (aarch64_inst *inst) { if (inst->operands[1].reg.regno == inst->operands[2].reg.regno && (inst->operands[3].cond->value & 0xe) != 0xe) { copy_operand_info (inst, 2, 3); inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond); inst->operands[3].type = AARCH64_OPND_NIL; return 1; } return 0; } /* CSET , is equivalent to: CSINC , WZR, WZR, invert() where is not AL or NV. */ static int convert_csinc_to_cset (aarch64_inst *inst) { if (inst->operands[1].reg.regno == 0x1f && inst->operands[2].reg.regno == 0x1f && (inst->operands[3].cond->value & 0xe) != 0xe) { copy_operand_info (inst, 1, 3); inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond); inst->operands[3].type = AARCH64_OPND_NIL; inst->operands[2].type = AARCH64_OPND_NIL; return 1; } return 0; } /* MOV , # is equivalent to: MOVZ
., ., #0. */ static int convert_shll_to_xtl (aarch64_inst *inst) { if (inst->operands[2].imm.value == 0) { inst->operands[2].type = AARCH64_OPND_NIL; return 1; } return 0; } /* Convert UBFM , , #, #63. to LSR , , #. */ static int convert_bfm_to_sr (aarch64_inst *inst) { int64_t imms, val; imms = inst->operands[3].imm.value; val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63; if (imms == val) { inst->operands[3].type = AARCH64_OPND_NIL; return 1; } return 0; } /* Convert MOV to ORR. */ static int convert_orr_to_mov (aarch64_inst *inst) { /* MOV ., . is equivalent to: ORR ., ., .. */ if (inst->operands[1].reg.regno == inst->operands[2].reg.regno) { inst->operands[2].type = AARCH64_OPND_NIL; return 1; } return 0; } /* When >= , the instruction written: SBFX , , #, # is equivalent to: SBFM , , #, #(+-1). */ static int convert_bfm_to_bfx (aarch64_inst *inst) { int64_t immr, imms; immr = inst->operands[2].imm.value; imms = inst->operands[3].imm.value; if (imms >= immr) { int64_t lsb = immr; inst->operands[2].imm.value = lsb; inst->operands[3].imm.value = imms + 1 - lsb; /* The two opcodes have different qualifiers for the immediate operands; reset to help the checking. */ reset_operand_qualifier (inst, 2); reset_operand_qualifier (inst, 3); return 1; } return 0; } /* When < , the instruction written: SBFIZ , , #, # is equivalent to: SBFM , , #((64-)&0x3f), #(-1). */ static int convert_bfm_to_bfi (aarch64_inst *inst) { int64_t immr, imms, val; immr = inst->operands[2].imm.value; imms = inst->operands[3].imm.value; val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64; if (imms < immr) { inst->operands[2].imm.value = (val - immr) & (val - 1); inst->operands[3].imm.value = imms + 1; /* The two opcodes have different qualifiers for the immediate operands; reset to help the checking. */ reset_operand_qualifier (inst, 2); reset_operand_qualifier (inst, 3); return 1; } return 0; } /* The instruction written: LSL , , # is equivalent to: UBFM , , #((64-)&0x3f), #(63-). */ static int convert_ubfm_to_lsl (aarch64_inst *inst) { int64_t immr = inst->operands[2].imm.value; int64_t imms = inst->operands[3].imm.value; int64_t val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63; if ((immr == 0 && imms == val) || immr == imms + 1) { inst->operands[3].type = AARCH64_OPND_NIL; inst->operands[2].imm.value = val - imms; return 1; } return 0; } /* CINC , , is equivalent to: CSINC , , , invert() where is not AL or NV. */ static int convert_from_csel (aarch64_inst *inst) { if (inst->operands[1].reg.regno == inst->operands[2].reg.regno && (inst->operands[3].cond->value & 0xe) != 0xe) { copy_operand_info (inst, 2, 3); inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond); inst->operands[3].type = AARCH64_OPND_NIL; return 1; } return 0; } /* CSET , is equivalent to: CSINC , WZR, WZR, invert() where is not AL or NV. */ static int convert_csinc_to_cset (aarch64_inst *inst) { if (inst->operands[1].reg.regno == 0x1f && inst->operands[2].reg.regno == 0x1f && (inst->operands[3].cond->value & 0xe) != 0xe) { copy_operand_info (inst, 1, 3); inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond); inst->operands[3].type = AARCH64_OPND_NIL; inst->operands[2].type = AARCH64_OPND_NIL; return 1; } return 0; } /* MOV , # is equivalent to: MOVZ