1 //===- ARMDisassemblerCore.cpp - ARM disassembler helpers -------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is part of the ARM Disassembler.
11 // It contains code to represent the core concepts of Builder and DisassembleFP
12 // to solve the problem of disassembling an ARM instr.
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "arm-disassembler"
18 #include "ARMDisassemblerCore.h"
19 #include "ARMAddressingModes.h"
20 #include "llvm/Support/Debug.h"
21 #include "llvm/Support/raw_ostream.h"
23 //#define DEBUG(X) do { X; } while (0)
25 /// ARMGenInstrInfo.inc - ARMGenInstrInfo.inc contains the static const
26 /// TargetInstrDesc ARMInsts[] definition and the TargetOperandInfo[]'s
27 /// describing the operand info for each ARMInsts[i].
29 /// Together with an instruction's encoding format, we can take advantage of the
30 /// NumOperands and the OpInfo fields of the target instruction description in
31 /// the quest to build out the MCOperand list for an MCInst.
33 /// The general guideline is that with a known format, the number of dst and src
34 /// operands are well-known. The dst is built first, followed by the src
35 /// operand(s). The operands not yet used at this point are for the Implicit
36 /// Uses and Defs by this instr. For the Uses part, the pred:$p operand is
37 /// defined with two components:
39 /// def pred { // Operand PredicateOperand
40 /// ValueType Type = OtherVT;
41 /// string PrintMethod = "printPredicateOperand";
42 /// string AsmOperandLowerMethod = ?;
43 /// dag MIOperandInfo = (ops i32imm, CCR);
44 /// AsmOperandClass ParserMatchClass = ImmAsmOperand;
45 /// dag DefaultOps = (ops (i32 14), (i32 zero_reg));
48 /// which is manifested by the TargetOperandInfo[] of:
50 /// { 0, 0|(1<<TOI::Predicate), 0 },
51 /// { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }
53 /// So the first predicate MCOperand corresponds to the immediate part of the
54 /// ARM condition field (Inst{31-28}), and the second predicate MCOperand
55 /// corresponds to a register kind of ARM::CPSR.
57 /// For the Defs part, in the simple case of only cc_out:$s, we have:
59 /// def cc_out { // Operand OptionalDefOperand
60 /// ValueType Type = OtherVT;
61 /// string PrintMethod = "printSBitModifierOperand";
62 /// string AsmOperandLowerMethod = ?;
63 /// dag MIOperandInfo = (ops CCR);
64 /// AsmOperandClass ParserMatchClass = ImmAsmOperand;
65 /// dag DefaultOps = (ops (i32 zero_reg));
68 /// which is manifested by the one TargetOperandInfo of:
70 /// { ARM::CCRRegClassID, 0|(1<<TOI::OptionalDef), 0 }
72 /// And this maps to one MCOperand with the regsiter kind of ARM::CPSR.
73 #include "ARMGenInstrInfo.inc"
77 const char *ARMUtils::OpcodeName(unsigned Opcode) {
78 return ARMInsts[Opcode].Name;
81 // Return the register enum Based on RegClass and the raw register number.
84 getRegisterEnum(BO B, unsigned RegClassID, unsigned RawRegister) {
85 if (RegClassID == ARM::rGPRRegClassID) {
86 // Check for The register numbers 13 and 15 that are not permitted for many
87 // Thumb register specifiers.
88 if (RawRegister == 13 || RawRegister == 15) {
92 // For this purpose, we can treat rGPR as if it were GPR.
93 RegClassID = ARM::GPRRegClassID;
96 // See also decodeNEONRd(), decodeNEONRn(), decodeNEONRm().
97 // A7.3 register encoding
102 // If one of these bits is 1, the instruction is UNDEFINED.
103 if (RegClassID == ARM::QPRRegClassID && slice(RawRegister, 0, 0) == 1) {
108 RegClassID == ARM::QPRRegClassID ? RawRegister >> 1 : RawRegister;
114 switch (RegClassID) {
115 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R0;
116 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
117 case ARM::DPR_VFP2RegClassID:
119 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
120 case ARM::QPR_VFP2RegClassID:
122 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S0;
126 switch (RegClassID) {
127 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R1;
128 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
129 case ARM::DPR_VFP2RegClassID:
131 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
132 case ARM::QPR_VFP2RegClassID:
134 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S1;
138 switch (RegClassID) {
139 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R2;
140 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
141 case ARM::DPR_VFP2RegClassID:
143 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
144 case ARM::QPR_VFP2RegClassID:
146 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S2;
150 switch (RegClassID) {
151 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R3;
152 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
153 case ARM::DPR_VFP2RegClassID:
155 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
156 case ARM::QPR_VFP2RegClassID:
158 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S3;
162 switch (RegClassID) {
163 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R4;
164 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
165 case ARM::DPR_VFP2RegClassID:
167 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q4;
168 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S4;
172 switch (RegClassID) {
173 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R5;
174 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
175 case ARM::DPR_VFP2RegClassID:
177 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q5;
178 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S5;
182 switch (RegClassID) {
183 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R6;
184 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
185 case ARM::DPR_VFP2RegClassID:
187 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q6;
188 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S6;
192 switch (RegClassID) {
193 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R7;
194 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
195 case ARM::DPR_VFP2RegClassID:
197 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q7;
198 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S7;
202 switch (RegClassID) {
203 case ARM::GPRRegClassID: return ARM::R8;
204 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D8;
205 case ARM::QPRRegClassID: return ARM::Q8;
206 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S8;
210 switch (RegClassID) {
211 case ARM::GPRRegClassID: return ARM::R9;
212 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D9;
213 case ARM::QPRRegClassID: return ARM::Q9;
214 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S9;
218 switch (RegClassID) {
219 case ARM::GPRRegClassID: return ARM::R10;
220 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D10;
221 case ARM::QPRRegClassID: return ARM::Q10;
222 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S10;
226 switch (RegClassID) {
227 case ARM::GPRRegClassID: return ARM::R11;
228 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D11;
229 case ARM::QPRRegClassID: return ARM::Q11;
230 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S11;
234 switch (RegClassID) {
235 case ARM::GPRRegClassID: return ARM::R12;
236 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D12;
237 case ARM::QPRRegClassID: return ARM::Q12;
238 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S12;
242 switch (RegClassID) {
243 case ARM::GPRRegClassID: return ARM::SP;
244 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D13;
245 case ARM::QPRRegClassID: return ARM::Q13;
246 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S13;
250 switch (RegClassID) {
251 case ARM::GPRRegClassID: return ARM::LR;
252 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D14;
253 case ARM::QPRRegClassID: return ARM::Q14;
254 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S14;
258 switch (RegClassID) {
259 case ARM::GPRRegClassID: return ARM::PC;
260 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D15;
261 case ARM::QPRRegClassID: return ARM::Q15;
262 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S15;
266 switch (RegClassID) {
267 case ARM::DPRRegClassID: return ARM::D16;
268 case ARM::SPRRegClassID: return ARM::S16;
272 switch (RegClassID) {
273 case ARM::DPRRegClassID: return ARM::D17;
274 case ARM::SPRRegClassID: return ARM::S17;
278 switch (RegClassID) {
279 case ARM::DPRRegClassID: return ARM::D18;
280 case ARM::SPRRegClassID: return ARM::S18;
284 switch (RegClassID) {
285 case ARM::DPRRegClassID: return ARM::D19;
286 case ARM::SPRRegClassID: return ARM::S19;
290 switch (RegClassID) {
291 case ARM::DPRRegClassID: return ARM::D20;
292 case ARM::SPRRegClassID: return ARM::S20;
296 switch (RegClassID) {
297 case ARM::DPRRegClassID: return ARM::D21;
298 case ARM::SPRRegClassID: return ARM::S21;
302 switch (RegClassID) {
303 case ARM::DPRRegClassID: return ARM::D22;
304 case ARM::SPRRegClassID: return ARM::S22;
308 switch (RegClassID) {
309 case ARM::DPRRegClassID: return ARM::D23;
310 case ARM::SPRRegClassID: return ARM::S23;
314 switch (RegClassID) {
315 case ARM::DPRRegClassID: return ARM::D24;
316 case ARM::SPRRegClassID: return ARM::S24;
320 switch (RegClassID) {
321 case ARM::DPRRegClassID: return ARM::D25;
322 case ARM::SPRRegClassID: return ARM::S25;
326 switch (RegClassID) {
327 case ARM::DPRRegClassID: return ARM::D26;
328 case ARM::SPRRegClassID: return ARM::S26;
332 switch (RegClassID) {
333 case ARM::DPRRegClassID: return ARM::D27;
334 case ARM::SPRRegClassID: return ARM::S27;
338 switch (RegClassID) {
339 case ARM::DPRRegClassID: return ARM::D28;
340 case ARM::SPRRegClassID: return ARM::S28;
344 switch (RegClassID) {
345 case ARM::DPRRegClassID: return ARM::D29;
346 case ARM::SPRRegClassID: return ARM::S29;
350 switch (RegClassID) {
351 case ARM::DPRRegClassID: return ARM::D30;
352 case ARM::SPRRegClassID: return ARM::S30;
356 switch (RegClassID) {
357 case ARM::DPRRegClassID: return ARM::D31;
358 case ARM::SPRRegClassID: return ARM::S31;
362 DEBUG(errs() << "Invalid (RegClassID, RawRegister) combination\n");
363 // Encoding error. Mark the builder with error code != 0.
368 ///////////////////////////////
370 // Utility Functions //
372 ///////////////////////////////
374 // Extract/Decode Rd: Inst{15-12}.
375 static inline unsigned decodeRd(uint32_t insn) {
376 return (insn >> ARMII::RegRdShift) & ARMII::GPRRegMask;
379 // Extract/Decode Rn: Inst{19-16}.
380 static inline unsigned decodeRn(uint32_t insn) {
381 return (insn >> ARMII::RegRnShift) & ARMII::GPRRegMask;
384 // Extract/Decode Rm: Inst{3-0}.
385 static inline unsigned decodeRm(uint32_t insn) {
386 return (insn & ARMII::GPRRegMask);
389 // Extract/Decode Rs: Inst{11-8}.
390 static inline unsigned decodeRs(uint32_t insn) {
391 return (insn >> ARMII::RegRsShift) & ARMII::GPRRegMask;
394 static inline unsigned getCondField(uint32_t insn) {
395 return (insn >> ARMII::CondShift);
398 static inline unsigned getIBit(uint32_t insn) {
399 return (insn >> ARMII::I_BitShift) & 1;
402 static inline unsigned getAM3IBit(uint32_t insn) {
403 return (insn >> ARMII::AM3_I_BitShift) & 1;
406 static inline unsigned getPBit(uint32_t insn) {
407 return (insn >> ARMII::P_BitShift) & 1;
410 static inline unsigned getUBit(uint32_t insn) {
411 return (insn >> ARMII::U_BitShift) & 1;
414 static inline unsigned getPUBits(uint32_t insn) {
415 return (insn >> ARMII::U_BitShift) & 3;
418 static inline unsigned getSBit(uint32_t insn) {
419 return (insn >> ARMII::S_BitShift) & 1;
422 static inline unsigned getWBit(uint32_t insn) {
423 return (insn >> ARMII::W_BitShift) & 1;
426 static inline unsigned getDBit(uint32_t insn) {
427 return (insn >> ARMII::D_BitShift) & 1;
430 static inline unsigned getNBit(uint32_t insn) {
431 return (insn >> ARMII::N_BitShift) & 1;
434 static inline unsigned getMBit(uint32_t insn) {
435 return (insn >> ARMII::M_BitShift) & 1;
438 // See A8.4 Shifts applied to a register.
439 // A8.4.2 Register controlled shifts.
441 // getShiftOpcForBits - getShiftOpcForBits translates from the ARM encoding bits
442 // into llvm enums for shift opcode. The API clients should pass in the value
443 // encoded with two bits, so the assert stays to signal a wrong API usage.
445 // A8-12: DecodeRegShift()
446 static inline ARM_AM::ShiftOpc getShiftOpcForBits(unsigned bits) {
448 default: assert(0 && "No such value"); return ARM_AM::no_shift;
449 case 0: return ARM_AM::lsl;
450 case 1: return ARM_AM::lsr;
451 case 2: return ARM_AM::asr;
452 case 3: return ARM_AM::ror;
456 // See A8.4 Shifts applied to a register.
457 // A8.4.1 Constant shifts.
459 // getImmShiftSE - getImmShiftSE translates from the raw ShiftOpc and raw Imm5
460 // encodings into the intended ShiftOpc and shift amount.
462 // A8-11: DecodeImmShift()
463 static inline void getImmShiftSE(ARM_AM::ShiftOpc &ShOp, unsigned &ShImm) {
467 case ARM_AM::no_shift:
471 ShOp = ARM_AM::no_shift;
483 // getAMSubModeForBits - getAMSubModeForBits translates from the ARM encoding
484 // bits Inst{24-23} (P(24) and U(23)) into llvm enums for AMSubMode. The API
485 // clients should pass in the value encoded with two bits, so the assert stays
486 // to signal a wrong API usage.
487 static inline ARM_AM::AMSubMode getAMSubModeForBits(unsigned bits) {
489 default: assert(0 && "No such value"); return ARM_AM::bad_am_submode;
490 case 1: return ARM_AM::ia; // P=0 U=1
491 case 3: return ARM_AM::ib; // P=1 U=1
492 case 0: return ARM_AM::da; // P=0 U=0
493 case 2: return ARM_AM::db; // P=1 U=0
497 ////////////////////////////////////////////
499 // Disassemble function definitions //
501 ////////////////////////////////////////////
503 /// There is a separate Disassemble*Frm function entry for disassembly of an ARM
504 /// instr into a list of MCOperands in the appropriate order, with possible dst,
505 /// followed by possible src(s).
507 /// The processing of the predicate, and the 'S' modifier bit, if MI modifies
508 /// the CPSR, is factored into ARMBasicMCBuilder's method named
509 /// TryPredicateAndSBitModifier.
511 static bool DisassemblePseudo(MCInst &MI, unsigned Opcode, uint32_t insn,
512 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
514 assert(0 && "Unexpected pseudo instruction!");
519 // if d == 15 || n == 15 || m == 15 || a == 15 then UNPREDICTABLE;
522 // if d == 15 || n == 15 || m == 15 then UNPREDICTABLE;
525 // if dLo == 15 || dHi == 15 || n == 15 || m == 15 then UNPREDICTABLE;
526 // if dHi == dLo then UNPREDICTABLE;
527 static bool BadRegsMulFrm(unsigned Opcode, uint32_t insn) {
528 unsigned R19_16 = slice(insn, 19, 16);
529 unsigned R15_12 = slice(insn, 15, 12);
530 unsigned R11_8 = slice(insn, 11, 8);
531 unsigned R3_0 = slice(insn, 3, 0);
534 // Did we miss an opcode?
535 assert(0 && "Unexpected opcode!");
537 case ARM::MLA: case ARM::MLS: case ARM::SMLABB: case ARM::SMLABT:
538 case ARM::SMLATB: case ARM::SMLATT: case ARM::SMLAWB: case ARM::SMLAWT:
539 case ARM::SMMLA: case ARM::SMMLS: case ARM::USADA8:
540 if (R19_16 == 15 || R15_12 == 15 || R11_8 == 15 || R3_0 == 15)
543 case ARM::MUL: case ARM::SMMUL: case ARM::SMULBB: case ARM::SMULBT:
544 case ARM::SMULTB: case ARM::SMULTT: case ARM::SMULWB: case ARM::SMULWT:
545 case ARM::SMUAD: case ARM::SMUADX:
546 // A8.6.167 SMLAD & A8.6.172 SMLSD
547 case ARM::SMLAD: case ARM::SMLADX: case ARM::SMLSD: case ARM::SMLSDX:
549 if (R19_16 == 15 || R11_8 == 15 || R3_0 == 15)
552 case ARM::SMLAL: case ARM::SMULL: case ARM::UMAAL: case ARM::UMLAL:
554 case ARM::SMLALBB: case ARM::SMLALBT: case ARM::SMLALTB: case ARM::SMLALTT:
555 case ARM::SMLALD: case ARM::SMLALDX: case ARM::SMLSLD: case ARM::SMLSLDX:
556 if (R19_16 == 15 || R15_12 == 15 || R11_8 == 15 || R3_0 == 15)
558 if (R19_16 == R15_12)
564 // Multiply Instructions.
565 // MLA, MLS, SMLABB, SMLABT, SMLATB, SMLATT, SMLAWB, SMLAWT, SMMLA, SMMLS,
566 // SMLAD, SMLADX, SMLSD, SMLSDX, USADA8 (for convenience):
567 // Rd{19-16} Rn{3-0} Rm{11-8} Ra{15-12}
568 // But note that register checking for {SMLAD, SMLADX, SMLSD, SMLSDX} is
569 // only for {d, n, m}.
571 // MUL, SMMUL, SMULBB, SMULBT, SMULTB, SMULTT, SMULWB, SMULWT, SMUAD, SMUADX,
572 // USAD8 (for convenience):
573 // Rd{19-16} Rn{3-0} Rm{11-8}
575 // SMLAL, SMULL, UMAAL, UMLAL, UMULL, SMLALBB, SMLALBT, SMLALTB, SMLALTT,
576 // SMLALD, SMLADLX, SMLSLD, SMLSLDX:
577 // RdLo{15-12} RdHi{19-16} Rn{3-0} Rm{11-8}
579 // The mapping of the multiply registers to the "regular" ARM registers, where
580 // there are convenience decoder functions, is:
586 static bool DisassembleMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
587 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
589 const TargetInstrDesc &TID = ARMInsts[Opcode];
590 unsigned short NumDefs = TID.getNumDefs();
591 const TargetOperandInfo *OpInfo = TID.OpInfo;
592 unsigned &OpIdx = NumOpsAdded;
596 assert(NumDefs > 0 && "NumDefs should be greater than 0 for MulFrm");
598 && OpInfo[0].RegClass == ARM::GPRRegClassID
599 && OpInfo[1].RegClass == ARM::GPRRegClassID
600 && OpInfo[2].RegClass == ARM::GPRRegClassID
601 && "Expect three register operands");
603 // Sanity check for the register encodings.
604 if (BadRegsMulFrm(Opcode, insn))
607 // Instructions with two destination registers have RdLo{15-12} first.
609 assert(NumOps >= 4 && OpInfo[3].RegClass == ARM::GPRRegClassID &&
610 "Expect 4th register operand");
611 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
616 // The destination register: RdHi{19-16} or Rd{19-16}.
617 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
620 // The two src regsiters: Rn{3-0}, then Rm{11-8}.
621 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
623 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
627 // Many multiply instructions (e.g., MLA) have three src registers.
628 // The third register operand is Ra{15-12}.
629 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) {
630 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
638 // Helper routines for disassembly of coprocessor instructions.
640 static bool LdStCopOpcode(unsigned Opcode) {
641 if ((Opcode >= ARM::LDC2L_OFFSET && Opcode <= ARM::LDC_PRE) ||
642 (Opcode >= ARM::STC2L_OFFSET && Opcode <= ARM::STC_PRE))
646 static bool CoprocessorOpcode(unsigned Opcode) {
647 if (LdStCopOpcode(Opcode))
653 case ARM::CDP: case ARM::CDP2:
654 case ARM::MCR: case ARM::MCR2: case ARM::MRC: case ARM::MRC2:
655 case ARM::MCRR: case ARM::MCRR2: case ARM::MRRC: case ARM::MRRC2:
659 static inline unsigned GetCoprocessor(uint32_t insn) {
660 return slice(insn, 11, 8);
662 static inline unsigned GetCopOpc1(uint32_t insn, bool CDP) {
663 return CDP ? slice(insn, 23, 20) : slice(insn, 23, 21);
665 static inline unsigned GetCopOpc2(uint32_t insn) {
666 return slice(insn, 7, 5);
668 static inline unsigned GetCopOpc(uint32_t insn) {
669 return slice(insn, 7, 4);
671 // Most of the operands are in immediate forms, except Rd and Rn, which are ARM
674 // CDP, CDP2: cop opc1 CRd CRn CRm opc2
676 // MCR, MCR2, MRC, MRC2: cop opc1 Rd CRn CRm opc2
678 // MCRR, MCRR2, MRRC, MRRc2: cop opc Rd Rn CRm
680 // LDC_OFFSET, LDC_PRE, LDC_POST: cop CRd Rn R0 [+/-]imm8:00
682 // STC_OFFSET, STC_PRE, STC_POST: cop CRd Rn R0 [+/-]imm8:00
686 // LDC_OPTION: cop CRd Rn imm8
688 // STC_OPTION: cop CRd Rn imm8
691 static bool DisassembleCoprocessor(MCInst &MI, unsigned Opcode, uint32_t insn,
692 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
694 assert(NumOps >= 4 && "Num of operands >= 4 for coprocessor instr");
696 unsigned &OpIdx = NumOpsAdded;
698 // if coproc == '101x' then SEE "Advanced SIMD and VFP"
699 // But since the special instructions have more explicit encoding bits
700 // specified, if coproc == 10 or 11, we should reject it as invalid.
701 unsigned coproc = GetCoprocessor(insn);
702 if ((Opcode == ARM::MCR || Opcode == ARM::MCRR ||
703 Opcode == ARM::MRC || Opcode == ARM::MRRC) &&
704 (coproc == 10 || coproc == 11)) {
705 DEBUG(errs() << "Encoding error: coproc == 10 or 11 for MCR[R]/MR[R]C\n");
709 bool OneCopOpc = (Opcode == ARM::MCRR || Opcode == ARM::MCRR2 ||
710 Opcode == ARM::MRRC || Opcode == ARM::MRRC2);
712 // CDP/CDP2 has no GPR operand; the opc1 operand is also wider (Inst{23-20}).
713 bool NoGPR = (Opcode == ARM::CDP || Opcode == ARM::CDP2);
714 bool LdStCop = LdStCopOpcode(Opcode);
715 bool RtOut = (Opcode == ARM::MRC || Opcode == ARM::MRC2);
720 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
724 MI.addOperand(MCOperand::CreateImm(coproc));
728 // Unindex if P:W = 0b00 --> _OPTION variant
729 unsigned PW = getPBit(insn) << 1 | getWBit(insn);
731 MI.addOperand(MCOperand::CreateImm(decodeRd(insn)));
733 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
738 MI.addOperand(MCOperand::CreateReg(0));
739 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
740 const TargetInstrDesc &TID = ARMInsts[Opcode];
742 (TID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
743 unsigned Offset = ARM_AM::getAM2Opc(AddrOpcode, slice(insn, 7, 0) << 2,
744 ARM_AM::no_shift, IndexMode);
745 MI.addOperand(MCOperand::CreateImm(Offset));
748 MI.addOperand(MCOperand::CreateImm(slice(insn, 7, 0)));
752 MI.addOperand(MCOperand::CreateImm(OneCopOpc ? GetCopOpc(insn)
753 : GetCopOpc1(insn, NoGPR)));
757 MI.addOperand(NoGPR ? MCOperand::CreateImm(decodeRd(insn))
758 : MCOperand::CreateReg(
759 getRegisterEnum(B, ARM::GPRRegClassID,
764 MI.addOperand(OneCopOpc ? MCOperand::CreateReg(
765 getRegisterEnum(B, ARM::GPRRegClassID,
767 : MCOperand::CreateImm(decodeRn(insn)));
769 MI.addOperand(MCOperand::CreateImm(decodeRm(insn)));
774 MI.addOperand(MCOperand::CreateImm(GetCopOpc2(insn)));
782 // Branch Instructions.
783 // BL: SignExtend(Imm24:'00', 32)
784 // Bcc, BL_pred: SignExtend(Imm24:'00', 32) Pred0 Pred1
785 // SMC: ZeroExtend(imm4, 32)
786 // SVC: ZeroExtend(Imm24, 32)
788 // Various coprocessor instructions are assigned BrFrm arbitrarily.
789 // Delegates to DisassembleCoprocessor() helper function.
792 // MSR/MSRsys: Rm mask=Inst{19-16}
794 // MSRi/MSRsysi: so_imm
795 // SRSW/SRS: ldstm_mode:$amode mode_imm
796 // RFEW/RFE: ldstm_mode:$amode Rn
797 static bool DisassembleBrFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
798 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
800 if (CoprocessorOpcode(Opcode))
801 return DisassembleCoprocessor(MI, Opcode, insn, NumOps, NumOpsAdded, B);
803 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
804 if (!OpInfo) return false;
806 // MRS and MRSsys take one GPR reg Rd.
807 if (Opcode == ARM::MRS || Opcode == ARM::MRSsys) {
808 assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
809 "Reg operand expected");
810 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
815 // BXJ takes one GPR reg Rm.
816 if (Opcode == ARM::BXJ) {
817 assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
818 "Reg operand expected");
819 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
824 // MSR take a mask, followed by one GPR reg Rm. The mask contains the R Bit in
825 // bit 4, and the special register fields in bits 3-0.
826 if (Opcode == ARM::MSR) {
827 assert(NumOps >= 1 && OpInfo[1].RegClass == ARM::GPRRegClassID &&
828 "Reg operand expected");
829 MI.addOperand(MCOperand::CreateImm(slice(insn, 22, 22) << 4 /* R Bit */ |
830 slice(insn, 19, 16) /* Special Reg */ ));
831 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
836 // MSRi take a mask, followed by one so_imm operand. The mask contains the
837 // R Bit in bit 4, and the special register fields in bits 3-0.
838 if (Opcode == ARM::MSRi) {
839 // A5.2.11 MSR (immediate), and hints & B6.1.6 MSR (immediate)
840 // The hints instructions have more specific encodings, so if mask == 0,
841 // we should reject this as an invalid instruction.
842 if (slice(insn, 19, 16) == 0)
844 MI.addOperand(MCOperand::CreateImm(slice(insn, 22, 22) << 4 /* R Bit */ |
845 slice(insn, 19, 16) /* Special Reg */ ));
846 // SOImm is 4-bit rotate amount in bits 11-8 with 8-bit imm in bits 7-0.
847 // A5.2.4 Rotate amount is twice the numeric value of Inst{11-8}.
848 // See also ARMAddressingModes.h: getSOImmValImm() and getSOImmValRot().
849 unsigned Rot = (insn >> ARMII::SoRotImmShift) & 0xF;
850 unsigned Imm = insn & 0xFF;
851 MI.addOperand(MCOperand::CreateImm(ARM_AM::rotr32(Imm, 2*Rot)));
855 if (Opcode == ARM::SRSW || Opcode == ARM::SRS ||
856 Opcode == ARM::RFEW || Opcode == ARM::RFE) {
857 ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
858 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(SubMode)));
860 if (Opcode == ARM::SRSW || Opcode == ARM::SRS)
861 MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0)));
863 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
869 assert((Opcode == ARM::Bcc || Opcode == ARM::BL || Opcode == ARM::BL_pred
870 || Opcode == ARM::SMC || Opcode == ARM::SVC) &&
871 "Unexpected Opcode");
873 assert(NumOps >= 1 && OpInfo[0].RegClass < 0 && "Imm operand expected");
876 if (Opcode == ARM::SMC) {
877 // ZeroExtend(imm4, 32) where imm24 = Inst{3-0}.
878 Imm32 = slice(insn, 3, 0);
879 } else if (Opcode == ARM::SVC) {
880 // ZeroExtend(imm24, 32) where imm24 = Inst{23-0}.
881 Imm32 = slice(insn, 23, 0);
883 // SignExtend(imm24:'00', 32) where imm24 = Inst{23-0}.
884 unsigned Imm26 = slice(insn, 23, 0) << 2;
885 //Imm32 = signextend<signed int, 26>(Imm26);
886 Imm32 = SignExtend32<26>(Imm26);
889 MI.addOperand(MCOperand::CreateImm(Imm32));
895 // Misc. Branch Instructions.
898 static bool DisassembleBrMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
899 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
901 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
902 if (!OpInfo) return false;
904 unsigned &OpIdx = NumOpsAdded;
908 // BX_RET and MOVPCLR have only two predicate operands; do an early return.
909 if (Opcode == ARM::BX_RET || Opcode == ARM::MOVPCLR)
912 // BLX and BX take one GPR reg.
913 if (Opcode == ARM::BLX || Opcode == ARM::BLX_pred ||
915 assert(NumOps >= 1 && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
916 "Reg operand expected");
917 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
923 // BLXi takes imm32 (the PC offset).
924 if (Opcode == ARM::BLXi) {
925 assert(NumOps >= 1 && OpInfo[0].RegClass < 0 && "Imm operand expected");
926 // SignExtend(imm24:H:'0', 32) where imm24 = Inst{23-0} and H = Inst{24}.
927 unsigned Imm26 = slice(insn, 23, 0) << 2 | slice(insn, 24, 24) << 1;
928 int Imm32 = SignExtend32<26>(Imm26);
929 MI.addOperand(MCOperand::CreateImm(Imm32));
937 static inline bool getBFCInvMask(uint32_t insn, uint32_t &mask) {
938 uint32_t lsb = slice(insn, 11, 7);
939 uint32_t msb = slice(insn, 20, 16);
942 DEBUG(errs() << "Encoding error: msb < lsb\n");
946 for (uint32_t i = lsb; i <= msb; ++i)
952 // Standard data-processing instructions allow PC as a register specifier,
953 // but we should reject other DPFrm instructions with PC as registers.
954 static bool BadRegsDPFrm(unsigned Opcode, uint32_t insn) {
957 // Did we miss an opcode?
958 if (decodeRd(insn) == 15 | decodeRn(insn) == 15 || decodeRm(insn) == 15) {
959 DEBUG(errs() << "DPFrm with bad reg specifier(s)\n");
962 case ARM::ADCrr: case ARM::ADDSrr: case ARM::ADDrr: case ARM::ANDrr:
963 case ARM::BICrr: case ARM::CMNzrr: case ARM::CMPrr: case ARM::EORrr:
964 case ARM::ORRrr: case ARM::RSBrr: case ARM::RSCrr: case ARM::SBCrr:
965 case ARM::SUBSrr: case ARM::SUBrr: case ARM::TEQrr: case ARM::TSTrr:
970 // A major complication is the fact that some of the saturating add/subtract
971 // operations have Rd Rm Rn, instead of the "normal" Rd Rn Rm.
972 // They are QADD, QDADD, QDSUB, and QSUB.
973 static bool DisassembleDPFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
974 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
976 const TargetInstrDesc &TID = ARMInsts[Opcode];
977 unsigned short NumDefs = TID.getNumDefs();
978 bool isUnary = isUnaryDP(TID.TSFlags);
979 const TargetOperandInfo *OpInfo = TID.OpInfo;
980 unsigned &OpIdx = NumOpsAdded;
984 // Disassemble register def if there is one.
985 if (NumDefs && (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID)) {
986 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
991 // Now disassemble the src operands.
995 // Special-case handling of BFC/BFI/SBFX/UBFX.
996 if (Opcode == ARM::BFC || Opcode == ARM::BFI) {
997 // A8.6.17 BFC & A8.6.18 BFI
999 if (decodeRd(insn) == 15)
1001 MI.addOperand(MCOperand::CreateReg(0));
1002 if (Opcode == ARM::BFI) {
1003 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1008 if (!getBFCInvMask(insn, mask))
1011 MI.addOperand(MCOperand::CreateImm(mask));
1015 if (Opcode == ARM::SBFX || Opcode == ARM::UBFX) {
1016 // Sanity check Rd and Rm.
1017 if (decodeRd(insn) == 15 || decodeRm(insn) == 15)
1019 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1021 MI.addOperand(MCOperand::CreateImm(slice(insn, 11, 7)));
1022 MI.addOperand(MCOperand::CreateImm(slice(insn, 20, 16) + 1));
1027 bool RmRn = (Opcode == ARM::QADD || Opcode == ARM::QDADD ||
1028 Opcode == ARM::QDSUB || Opcode == ARM::QSUB);
1030 // BinaryDP has an Rn operand.
1032 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1033 "Reg operand expected");
1034 MI.addOperand(MCOperand::CreateReg(
1035 getRegisterEnum(B, ARM::GPRRegClassID,
1036 RmRn ? decodeRm(insn) : decodeRn(insn))));
1040 // If this is a two-address operand, skip it, e.g., MOVCCr operand 1.
1041 if (isUnary && (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)) {
1042 MI.addOperand(MCOperand::CreateReg(0));
1046 // Now disassemble operand 2.
1047 if (OpIdx >= NumOps)
1050 if (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) {
1051 // We have a reg/reg form.
1052 // Assert disabled because saturating operations, e.g., A8.6.127 QASX, are
1053 // routed here as well.
1054 // assert(getIBit(insn) == 0 && "I_Bit != '0' reg/reg form");
1055 if (BadRegsDPFrm(Opcode, insn))
1057 MI.addOperand(MCOperand::CreateReg(
1058 getRegisterEnum(B, ARM::GPRRegClassID,
1059 RmRn? decodeRn(insn) : decodeRm(insn))));
1061 } else if (Opcode == ARM::MOVi16 || Opcode == ARM::MOVTi16) {
1062 // These two instructions don't allow d as 15.
1063 if (decodeRd(insn) == 15)
1065 // We have an imm16 = imm4:imm12 (imm4=Inst{19:16}, imm12 = Inst{11:0}).
1066 assert(getIBit(insn) == 1 && "I_Bit != '1' reg/imm form");
1067 unsigned Imm16 = slice(insn, 19, 16) << 12 | slice(insn, 11, 0);
1068 MI.addOperand(MCOperand::CreateImm(Imm16));
1071 // We have a reg/imm form.
1072 // SOImm is 4-bit rotate amount in bits 11-8 with 8-bit imm in bits 7-0.
1073 // A5.2.4 Rotate amount is twice the numeric value of Inst{11-8}.
1074 // See also ARMAddressingModes.h: getSOImmValImm() and getSOImmValRot().
1075 assert(getIBit(insn) == 1 && "I_Bit != '1' reg/imm form");
1076 unsigned Rot = (insn >> ARMII::SoRotImmShift) & 0xF;
1077 unsigned Imm = insn & 0xFF;
1078 MI.addOperand(MCOperand::CreateImm(ARM_AM::rotr32(Imm, 2*Rot)));
1085 static bool DisassembleDPSoRegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1086 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1088 const TargetInstrDesc &TID = ARMInsts[Opcode];
1089 unsigned short NumDefs = TID.getNumDefs();
1090 bool isUnary = isUnaryDP(TID.TSFlags);
1091 const TargetOperandInfo *OpInfo = TID.OpInfo;
1092 unsigned &OpIdx = NumOpsAdded;
1096 // Disassemble register def if there is one.
1097 if (NumDefs && (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID)) {
1098 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1103 // Disassemble the src operands.
1104 if (OpIdx >= NumOps)
1107 // BinaryDP has an Rn operand.
1109 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1110 "Reg operand expected");
1111 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1116 // If this is a two-address operand, skip it, e.g., MOVCCs operand 1.
1117 if (isUnary && (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)) {
1118 MI.addOperand(MCOperand::CreateReg(0));
1122 // Disassemble operand 2, which consists of three components.
1123 if (OpIdx + 2 >= NumOps)
1126 assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
1127 (OpInfo[OpIdx+1].RegClass == ARM::GPRRegClassID) &&
1128 (OpInfo[OpIdx+2].RegClass < 0) &&
1129 "Expect 3 reg operands");
1131 // Register-controlled shifts have Inst{7} = 0 and Inst{4} = 1.
1132 unsigned Rs = slice(insn, 4, 4);
1134 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1137 // If Inst{7} != 0, we should reject this insn as an invalid encoding.
1138 if (slice(insn, 7, 7))
1141 // A8.6.3 ADC (register-shifted register)
1142 // if d == 15 || n == 15 || m == 15 || s == 15 then UNPREDICTABLE;
1144 // This also accounts for shift instructions (register) where, fortunately,
1145 // Inst{19-16} = 0b0000.
1146 // A8.6.89 LSL (register)
1147 // if d == 15 || n == 15 || m == 15 then UNPREDICTABLE;
1148 if (decodeRd(insn) == 15 || decodeRn(insn) == 15 ||
1149 decodeRm(insn) == 15 || decodeRs(insn) == 15)
1152 // Register-controlled shifts: [Rm, Rs, shift].
1153 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1155 // Inst{6-5} encodes the shift opcode.
1156 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1157 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(ShOp, 0)));
1159 // Constant shifts: [Rm, reg0, shift_imm].
1160 MI.addOperand(MCOperand::CreateReg(0)); // NoRegister
1161 // Inst{6-5} encodes the shift opcode.
1162 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1163 // Inst{11-7} encodes the imm5 shift amount.
1164 unsigned ShImm = slice(insn, 11, 7);
1166 // A8.4.1. Possible rrx or shift amount of 32...
1167 getImmShiftSE(ShOp, ShImm);
1168 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(ShOp, ShImm)));
1175 static bool DisassembleLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1176 unsigned short NumOps, unsigned &NumOpsAdded, bool isStore, BO B) {
1178 const TargetInstrDesc &TID = ARMInsts[Opcode];
1179 bool isPrePost = isPrePostLdSt(TID.TSFlags);
1180 const TargetOperandInfo *OpInfo = TID.OpInfo;
1181 if (!OpInfo) return false;
1183 unsigned &OpIdx = NumOpsAdded;
1187 assert(((!isStore && TID.getNumDefs() > 0) ||
1188 (isStore && (TID.getNumDefs() == 0 || isPrePost)))
1189 && "Invalid arguments");
1191 // Operand 0 of a pre- and post-indexed store is the address base writeback.
1192 if (isPrePost && isStore) {
1193 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1194 "Reg operand expected");
1195 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1200 // Disassemble the dst/src operand.
1201 if (OpIdx >= NumOps)
1204 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1205 "Reg operand expected");
1206 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1210 // After dst of a pre- and post-indexed load is the address base writeback.
1211 if (isPrePost && !isStore) {
1212 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1213 "Reg operand expected");
1214 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1219 // Disassemble the base operand.
1220 if (OpIdx >= NumOps)
1223 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1224 "Reg operand expected");
1225 assert((!isPrePost || (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1))
1226 && "Index mode or tied_to operand expected");
1227 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1231 // For reg/reg form, base reg is followed by +/- reg shop imm.
1232 // For immediate form, it is followed by +/- imm12.
1233 // See also ARMAddressingModes.h (Addressing Mode #2).
1234 if (OpIdx + 1 >= NumOps)
1237 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1238 unsigned IndexMode =
1239 (TID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
1240 if (getIBit(insn) == 0) {
1241 // For pre- and post-indexed case, add a reg0 operand (Addressing Mode #2).
1242 // Otherwise, skip the reg operand since for addrmode_imm12, Rn has already
1245 MI.addOperand(MCOperand::CreateReg(0));
1249 unsigned Imm12 = slice(insn, 11, 0);
1250 if (Opcode == ARM::LDRBi12 || Opcode == ARM::LDRi12 ||
1251 Opcode == ARM::STRBi12 || Opcode == ARM::STRi12) {
1252 // Disassemble the 12-bit immediate offset, which is the second operand in
1253 // $addrmode_imm12 => (ops GPR:$base, i32imm:$offsimm).
1254 int Offset = AddrOpcode == ARM_AM::add ? 1 * Imm12 : -1 * Imm12;
1255 MI.addOperand(MCOperand::CreateImm(Offset));
1257 // Disassemble the 12-bit immediate offset, which is the second operand in
1258 // $am2offset => (ops GPR, i32imm).
1259 unsigned Offset = ARM_AM::getAM2Opc(AddrOpcode, Imm12, ARM_AM::no_shift,
1261 MI.addOperand(MCOperand::CreateImm(Offset));
1265 // If Inst{25} = 1 and Inst{4} != 0, we should reject this as invalid.
1266 if (slice(insn,4,4) == 1)
1269 // Disassemble the offset reg (Rm), shift type, and immediate shift length.
1270 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1272 // Inst{6-5} encodes the shift opcode.
1273 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1274 // Inst{11-7} encodes the imm5 shift amount.
1275 unsigned ShImm = slice(insn, 11, 7);
1277 // A8.4.1. Possible rrx or shift amount of 32...
1278 getImmShiftSE(ShOp, ShImm);
1279 MI.addOperand(MCOperand::CreateImm(
1280 ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp, IndexMode)));
1287 static bool DisassembleLdFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1288 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1289 return DisassembleLdStFrm(MI, Opcode, insn, NumOps, NumOpsAdded, false, B);
1292 static bool DisassembleStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1293 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1294 return DisassembleLdStFrm(MI, Opcode, insn, NumOps, NumOpsAdded, true, B);
1297 static bool HasDualReg(unsigned Opcode) {
1301 case ARM::LDRD: case ARM::LDRD_PRE: case ARM::LDRD_POST:
1302 case ARM::STRD: case ARM::STRD_PRE: case ARM::STRD_POST:
1307 static bool DisassembleLdStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1308 unsigned short NumOps, unsigned &NumOpsAdded, bool isStore, BO B) {
1310 const TargetInstrDesc &TID = ARMInsts[Opcode];
1311 bool isPrePost = isPrePostLdSt(TID.TSFlags);
1312 const TargetOperandInfo *OpInfo = TID.OpInfo;
1313 if (!OpInfo) return false;
1315 unsigned &OpIdx = NumOpsAdded;
1319 assert(((!isStore && TID.getNumDefs() > 0) ||
1320 (isStore && (TID.getNumDefs() == 0 || isPrePost)))
1321 && "Invalid arguments");
1323 // Operand 0 of a pre- and post-indexed store is the address base writeback.
1324 if (isPrePost && isStore) {
1325 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1326 "Reg operand expected");
1327 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1332 // Disassemble the dst/src operand.
1333 if (OpIdx >= NumOps)
1336 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1337 "Reg operand expected");
1338 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1342 // Fill in LDRD and STRD's second operand Rt operand.
1343 if (HasDualReg(Opcode)) {
1344 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1345 decodeRd(insn) + 1)));
1349 // After dst of a pre- and post-indexed load is the address base writeback.
1350 if (isPrePost && !isStore) {
1351 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1352 "Reg operand expected");
1353 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1358 // Disassemble the base operand.
1359 if (OpIdx >= NumOps)
1362 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1363 "Reg operand expected");
1364 assert((!isPrePost || (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1))
1365 && "Offset mode or tied_to operand expected");
1366 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1370 // For reg/reg form, base reg is followed by +/- reg.
1371 // For immediate form, it is followed by +/- imm8.
1372 // See also ARMAddressingModes.h (Addressing Mode #3).
1373 if (OpIdx + 1 >= NumOps)
1376 assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
1377 (OpInfo[OpIdx+1].RegClass < 0) &&
1378 "Expect 1 reg operand followed by 1 imm operand");
1380 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1381 unsigned IndexMode =
1382 (TID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
1383 if (getAM3IBit(insn) == 1) {
1384 MI.addOperand(MCOperand::CreateReg(0));
1386 // Disassemble the 8-bit immediate offset.
1387 unsigned Imm4H = (insn >> ARMII::ImmHiShift) & 0xF;
1388 unsigned Imm4L = insn & 0xF;
1389 unsigned Offset = ARM_AM::getAM3Opc(AddrOpcode, (Imm4H << 4) | Imm4L,
1391 MI.addOperand(MCOperand::CreateImm(Offset));
1393 // Disassemble the offset reg (Rm).
1394 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1396 unsigned Offset = ARM_AM::getAM3Opc(AddrOpcode, 0, IndexMode);
1397 MI.addOperand(MCOperand::CreateImm(Offset));
1404 static bool DisassembleLdMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1405 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1406 return DisassembleLdStMiscFrm(MI, Opcode, insn, NumOps, NumOpsAdded, false,
1410 static bool DisassembleStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1411 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1412 return DisassembleLdStMiscFrm(MI, Opcode, insn, NumOps, NumOpsAdded, true, B);
1415 // The algorithm for disassembly of LdStMulFrm is different from others because
1416 // it explicitly populates the two predicate operands after the base register.
1417 // After that, we need to populate the reglist with each affected register
1418 // encoded as an MCOperand.
1419 static bool DisassembleLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1420 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1422 assert(NumOps >= 4 && "LdStMulFrm expects NumOps >= 4");
1425 unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
1427 // Writeback to base, if necessary.
1428 if (Opcode == ARM::LDMIA_UPD || Opcode == ARM::STMIA_UPD ||
1429 Opcode == ARM::LDMDA_UPD || Opcode == ARM::STMDA_UPD ||
1430 Opcode == ARM::LDMDB_UPD || Opcode == ARM::STMDB_UPD ||
1431 Opcode == ARM::LDMIB_UPD || Opcode == ARM::STMIB_UPD) {
1432 MI.addOperand(MCOperand::CreateReg(Base));
1436 // Add the base register operand.
1437 MI.addOperand(MCOperand::CreateReg(Base));
1439 // Handling the two predicate operands before the reglist.
1440 int64_t CondVal = getCondField(insn);
1443 MI.addOperand(MCOperand::CreateImm(CondVal));
1444 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
1448 // Fill the variadic part of reglist.
1449 unsigned RegListBits = insn & ((1 << 16) - 1);
1450 for (unsigned i = 0; i < 16; ++i) {
1451 if ((RegListBits >> i) & 1) {
1452 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1461 // LDREX, LDREXB, LDREXH: Rd Rn
1462 // LDREXD: Rd Rd+1 Rn
1463 // STREX, STREXB, STREXH: Rd Rm Rn
1464 // STREXD: Rd Rm Rm+1 Rn
1466 // SWP, SWPB: Rd Rm Rn
1467 static bool DisassembleLdStExFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1468 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1470 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1471 if (!OpInfo) return false;
1473 unsigned &OpIdx = NumOpsAdded;
1478 && OpInfo[0].RegClass == ARM::GPRRegClassID
1479 && OpInfo[1].RegClass == ARM::GPRRegClassID
1480 && "Expect 2 reg operands");
1482 bool isStore = slice(insn, 20, 20) == 0;
1483 bool isDW = (Opcode == ARM::LDREXD || Opcode == ARM::STREXD);
1485 // Add the destination operand.
1486 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1490 // Store register Exclusive needs a source operand.
1492 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1497 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1498 decodeRm(insn)+1)));
1502 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1503 decodeRd(insn)+1)));
1507 // Finally add the pointer operand.
1508 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1515 // Misc. Arithmetic Instructions.
1517 // PKHBT, PKHTB: Rd Rn Rm , LSL/ASR #imm5
1518 // RBIT, REV, REV16, REVSH: Rd Rm
1519 static bool DisassembleArithMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1520 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1522 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1523 unsigned &OpIdx = NumOpsAdded;
1528 && OpInfo[0].RegClass == ARM::GPRRegClassID
1529 && OpInfo[1].RegClass == ARM::GPRRegClassID
1530 && "Expect 2 reg operands");
1532 bool ThreeReg = NumOps > 2 && OpInfo[2].RegClass == ARM::GPRRegClassID;
1534 // Sanity check the registers, which should not be 15.
1535 if (decodeRd(insn) == 15 || decodeRm(insn) == 15)
1537 if (ThreeReg && decodeRn(insn) == 15)
1540 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1545 assert(NumOps >= 4 && "Expect >= 4 operands");
1546 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1551 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1555 // If there is still an operand info left which is an immediate operand, add
1556 // an additional imm5 LSL/ASR operand.
1557 if (ThreeReg && OpInfo[OpIdx].RegClass < 0
1558 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
1559 // Extract the 5-bit immediate field Inst{11-7}.
1560 unsigned ShiftAmt = (insn >> ARMII::ShiftShift) & 0x1F;
1561 ARM_AM::ShiftOpc Opc = ARM_AM::no_shift;
1562 if (Opcode == ARM::PKHBT)
1564 else if (Opcode == ARM::PKHTB)
1566 getImmShiftSE(Opc, ShiftAmt);
1567 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(Opc, ShiftAmt)));
1574 /// DisassembleSatFrm - Disassemble saturate instructions:
1575 /// SSAT, SSAT16, USAT, and USAT16.
1576 static bool DisassembleSatFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1577 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1580 // if d == 15 || n == 15 then UNPREDICTABLE;
1581 if (decodeRd(insn) == 15 || decodeRm(insn) == 15)
1584 const TargetInstrDesc &TID = ARMInsts[Opcode];
1585 NumOpsAdded = TID.getNumOperands() - 2; // ignore predicate operands
1587 // Disassemble register def.
1588 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1591 unsigned Pos = slice(insn, 20, 16);
1592 if (Opcode == ARM::SSAT || Opcode == ARM::SSAT16)
1594 MI.addOperand(MCOperand::CreateImm(Pos));
1596 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1599 if (NumOpsAdded == 4) {
1600 ARM_AM::ShiftOpc Opc = (slice(insn, 6, 6) != 0 ? ARM_AM::asr : ARM_AM::lsl);
1601 // Inst{11-7} encodes the imm5 shift amount.
1602 unsigned ShAmt = slice(insn, 11, 7);
1604 // A8.6.183. Possible ASR shift amount of 32...
1605 if (Opc == ARM_AM::asr)
1608 Opc = ARM_AM::no_shift;
1610 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(Opc, ShAmt)));
1615 // Extend instructions.
1616 // SXT* and UXT*: Rd [Rn] Rm [rot_imm].
1617 // The 2nd operand register is Rn and the 3rd operand regsiter is Rm for the
1618 // three register operand form. Otherwise, Rn=0b1111 and only Rm is used.
1619 static bool DisassembleExtFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1620 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1623 // if d == 15 || m == 15 then UNPREDICTABLE;
1624 if (decodeRd(insn) == 15 || decodeRm(insn) == 15)
1627 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1628 unsigned &OpIdx = NumOpsAdded;
1633 && OpInfo[0].RegClass == ARM::GPRRegClassID
1634 && OpInfo[1].RegClass == ARM::GPRRegClassID
1635 && "Expect 2 reg operands");
1637 bool ThreeReg = NumOps > 2 && OpInfo[2].RegClass == ARM::GPRRegClassID;
1639 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1644 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1649 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1653 // If there is still an operand info left which is an immediate operand, add
1654 // an additional rotate immediate operand.
1655 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
1656 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
1657 // Extract the 2-bit rotate field Inst{11-10}.
1658 unsigned rot = (insn >> ARMII::ExtRotImmShift) & 3;
1659 // Rotation by 8, 16, or 24 bits.
1660 MI.addOperand(MCOperand::CreateImm(rot << 3));
1667 /////////////////////////////////////
1669 // Utility Functions For VFP //
1671 /////////////////////////////////////
1673 // Extract/Decode Dd/Sd:
1675 // SP => d = UInt(Vd:D)
1676 // DP => d = UInt(D:Vd)
1677 static unsigned decodeVFPRd(uint32_t insn, bool isSPVFP) {
1678 return isSPVFP ? (decodeRd(insn) << 1 | getDBit(insn))
1679 : (decodeRd(insn) | getDBit(insn) << 4);
1682 // Extract/Decode Dn/Sn:
1684 // SP => n = UInt(Vn:N)
1685 // DP => n = UInt(N:Vn)
1686 static unsigned decodeVFPRn(uint32_t insn, bool isSPVFP) {
1687 return isSPVFP ? (decodeRn(insn) << 1 | getNBit(insn))
1688 : (decodeRn(insn) | getNBit(insn) << 4);
1691 // Extract/Decode Dm/Sm:
1693 // SP => m = UInt(Vm:M)
1694 // DP => m = UInt(M:Vm)
1695 static unsigned decodeVFPRm(uint32_t insn, bool isSPVFP) {
1696 return isSPVFP ? (decodeRm(insn) << 1 | getMBit(insn))
1697 : (decodeRm(insn) | getMBit(insn) << 4);
1701 static APInt VFPExpandImm(unsigned char byte, unsigned N) {
1702 assert(N == 32 || N == 64);
1705 unsigned bit6 = slice(byte, 6, 6);
1707 Result = slice(byte, 7, 7) << 31 | slice(byte, 5, 0) << 19;
1709 Result |= 0x1f << 25;
1711 Result |= 0x1 << 30;
1713 Result = (uint64_t)slice(byte, 7, 7) << 63 |
1714 (uint64_t)slice(byte, 5, 0) << 48;
1716 Result |= 0xffULL << 54;
1718 Result |= 0x1ULL << 62;
1720 return APInt(N, Result);
1723 // VFP Unary Format Instructions:
1725 // VCMP[E]ZD, VCMP[E]ZS: compares one floating-point register with zero
1726 // VCVTDS, VCVTSD: converts between double-precision and single-precision
1727 // The rest of the instructions have homogeneous [VFP]Rd and [VFP]Rm registers.
1728 static bool DisassembleVFPUnaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1729 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1731 assert(NumOps >= 1 && "VFPUnaryFrm expects NumOps >= 1");
1733 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1734 unsigned &OpIdx = NumOpsAdded;
1738 unsigned RegClass = OpInfo[OpIdx].RegClass;
1739 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1740 "Reg operand expected");
1741 bool isSP = (RegClass == ARM::SPRRegClassID);
1743 MI.addOperand(MCOperand::CreateReg(
1744 getRegisterEnum(B, RegClass, decodeVFPRd(insn, isSP))));
1747 // Early return for compare with zero instructions.
1748 if (Opcode == ARM::VCMPEZD || Opcode == ARM::VCMPEZS
1749 || Opcode == ARM::VCMPZD || Opcode == ARM::VCMPZS)
1752 RegClass = OpInfo[OpIdx].RegClass;
1753 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1754 "Reg operand expected");
1755 isSP = (RegClass == ARM::SPRRegClassID);
1757 MI.addOperand(MCOperand::CreateReg(
1758 getRegisterEnum(B, RegClass, decodeVFPRm(insn, isSP))));
1764 // All the instructions have homogeneous [VFP]Rd, [VFP]Rn, and [VFP]Rm regs.
1765 // Some of them have operand constraints which tie the first operand in the
1766 // InOperandList to that of the dst. As far as asm printing is concerned, this
1767 // tied_to operand is simply skipped.
1768 static bool DisassembleVFPBinaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1769 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1771 assert(NumOps >= 3 && "VFPBinaryFrm expects NumOps >= 3");
1773 const TargetInstrDesc &TID = ARMInsts[Opcode];
1774 const TargetOperandInfo *OpInfo = TID.OpInfo;
1775 unsigned &OpIdx = NumOpsAdded;
1779 unsigned RegClass = OpInfo[OpIdx].RegClass;
1780 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1781 "Reg operand expected");
1782 bool isSP = (RegClass == ARM::SPRRegClassID);
1784 MI.addOperand(MCOperand::CreateReg(
1785 getRegisterEnum(B, RegClass, decodeVFPRd(insn, isSP))));
1788 // Skip tied_to operand constraint.
1789 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
1790 assert(NumOps >= 4 && "Expect >=4 operands");
1791 MI.addOperand(MCOperand::CreateReg(0));
1795 MI.addOperand(MCOperand::CreateReg(
1796 getRegisterEnum(B, RegClass, decodeVFPRn(insn, isSP))));
1799 MI.addOperand(MCOperand::CreateReg(
1800 getRegisterEnum(B, RegClass, decodeVFPRm(insn, isSP))));
1806 // A8.6.295 vcvt (floating-point <-> integer)
1807 // Int to FP: VSITOD, VSITOS, VUITOD, VUITOS
1808 // FP to Int: VTOSI[Z|R]D, VTOSI[Z|R]S, VTOUI[Z|R]D, VTOUI[Z|R]S
1810 // A8.6.297 vcvt (floating-point and fixed-point)
1811 // Dd|Sd Dd|Sd(TIED_TO) #fbits(= 16|32 - UInt(imm4:i))
1812 static bool DisassembleVFPConv1Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1813 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1815 assert(NumOps >= 2 && "VFPConv1Frm expects NumOps >= 2");
1817 const TargetInstrDesc &TID = ARMInsts[Opcode];
1818 const TargetOperandInfo *OpInfo = TID.OpInfo;
1819 if (!OpInfo) return false;
1821 bool SP = slice(insn, 8, 8) == 0; // A8.6.295 & A8.6.297
1822 bool fixed_point = slice(insn, 17, 17) == 1; // A8.6.297
1823 unsigned RegClassID = SP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
1827 assert(NumOps >= 3 && "Expect >= 3 operands");
1828 int size = slice(insn, 7, 7) == 0 ? 16 : 32;
1829 int fbits = size - (slice(insn,3,0) << 1 | slice(insn,5,5));
1830 MI.addOperand(MCOperand::CreateReg(
1831 getRegisterEnum(B, RegClassID,
1832 decodeVFPRd(insn, SP))));
1834 assert(TID.getOperandConstraint(1, TOI::TIED_TO) != -1 &&
1835 "Tied to operand expected");
1836 MI.addOperand(MI.getOperand(0));
1838 assert(OpInfo[2].RegClass < 0 && !OpInfo[2].isPredicate() &&
1839 !OpInfo[2].isOptionalDef() && "Imm operand expected");
1840 MI.addOperand(MCOperand::CreateImm(fbits));
1845 // The Rd (destination) and Rm (source) bits have different interpretations
1846 // depending on their single-precisonness.
1848 if (slice(insn, 18, 18) == 1) { // to_integer operation
1849 d = decodeVFPRd(insn, true /* Is Single Precision */);
1850 MI.addOperand(MCOperand::CreateReg(
1851 getRegisterEnum(B, ARM::SPRRegClassID, d)));
1852 m = decodeVFPRm(insn, SP);
1853 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID, m)));
1855 d = decodeVFPRd(insn, SP);
1856 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID, d)));
1857 m = decodeVFPRm(insn, true /* Is Single Precision */);
1858 MI.addOperand(MCOperand::CreateReg(
1859 getRegisterEnum(B, ARM::SPRRegClassID, m)));
1867 // VMOVRS - A8.6.330
1868 // Rt => Rd; Sn => UInt(Vn:N)
1869 static bool DisassembleVFPConv2Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1870 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1872 assert(NumOps >= 2 && "VFPConv2Frm expects NumOps >= 2");
1874 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1876 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1877 decodeVFPRn(insn, true))));
1882 // VMOVRRD - A8.6.332
1883 // Rt => Rd; Rt2 => Rn; Dm => UInt(M:Vm)
1885 // VMOVRRS - A8.6.331
1886 // Rt => Rd; Rt2 => Rn; Sm => UInt(Vm:M); Sm1 = Sm+1
1887 static bool DisassembleVFPConv3Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1888 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1890 assert(NumOps >= 3 && "VFPConv3Frm expects NumOps >= 3");
1892 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1893 unsigned &OpIdx = NumOpsAdded;
1895 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1897 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1901 if (OpInfo[OpIdx].RegClass == ARM::SPRRegClassID) {
1902 unsigned Sm = decodeVFPRm(insn, true);
1903 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1905 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1909 MI.addOperand(MCOperand::CreateReg(
1910 getRegisterEnum(B, ARM::DPRRegClassID,
1911 decodeVFPRm(insn, false))));
1917 // VMOVSR - A8.6.330
1918 // Rt => Rd; Sn => UInt(Vn:N)
1919 static bool DisassembleVFPConv4Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1920 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1922 assert(NumOps >= 2 && "VFPConv4Frm expects NumOps >= 2");
1924 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1925 decodeVFPRn(insn, true))));
1926 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1932 // VMOVDRR - A8.6.332
1933 // Rt => Rd; Rt2 => Rn; Dm => UInt(M:Vm)
1935 // VMOVRRS - A8.6.331
1936 // Rt => Rd; Rt2 => Rn; Sm => UInt(Vm:M); Sm1 = Sm+1
1937 static bool DisassembleVFPConv5Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1938 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1940 assert(NumOps >= 3 && "VFPConv5Frm expects NumOps >= 3");
1942 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1943 unsigned &OpIdx = NumOpsAdded;
1947 if (OpInfo[OpIdx].RegClass == ARM::SPRRegClassID) {
1948 unsigned Sm = decodeVFPRm(insn, true);
1949 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1951 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1955 MI.addOperand(MCOperand::CreateReg(
1956 getRegisterEnum(B, ARM::DPRRegClassID,
1957 decodeVFPRm(insn, false))));
1961 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1963 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1969 // VFP Load/Store Instructions.
1970 // VLDRD, VLDRS, VSTRD, VSTRS
1971 static bool DisassembleVFPLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1972 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1974 assert(NumOps >= 3 && "VFPLdStFrm expects NumOps >= 3");
1976 bool isSPVFP = (Opcode == ARM::VLDRS || Opcode == ARM::VSTRS);
1977 unsigned RegClassID = isSPVFP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
1979 // Extract Dd/Sd for operand 0.
1980 unsigned RegD = decodeVFPRd(insn, isSPVFP);
1982 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID, RegD)));
1984 unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
1985 MI.addOperand(MCOperand::CreateReg(Base));
1987 // Next comes the AM5 Opcode.
1988 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1989 unsigned char Imm8 = insn & 0xFF;
1990 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM5Opc(AddrOpcode, Imm8)));
1997 // VFP Load/Store Multiple Instructions.
1998 // We have an optional write back reg, the base, and two predicate operands.
1999 // It is then followed by a reglist of either DPR(s) or SPR(s).
2001 // VLDMD[_UPD], VLDMS[_UPD], VSTMD[_UPD], VSTMS[_UPD]
2002 static bool DisassembleVFPLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2003 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2005 assert(NumOps >= 4 && "VFPLdStMulFrm expects NumOps >= 4");
2007 unsigned &OpIdx = NumOpsAdded;
2011 unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
2013 // Writeback to base, if necessary.
2014 if (Opcode == ARM::VLDMDIA_UPD || Opcode == ARM::VLDMSIA_UPD ||
2015 Opcode == ARM::VLDMDDB_UPD || Opcode == ARM::VLDMSDB_UPD ||
2016 Opcode == ARM::VSTMDIA_UPD || Opcode == ARM::VSTMSIA_UPD ||
2017 Opcode == ARM::VSTMDDB_UPD || Opcode == ARM::VSTMSDB_UPD) {
2018 MI.addOperand(MCOperand::CreateReg(Base));
2022 MI.addOperand(MCOperand::CreateReg(Base));
2024 // Handling the two predicate operands before the reglist.
2025 int64_t CondVal = getCondField(insn);
2028 MI.addOperand(MCOperand::CreateImm(CondVal));
2029 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
2033 bool isSPVFP = (Opcode == ARM::VLDMSIA ||
2034 Opcode == ARM::VLDMSIA_UPD || Opcode == ARM::VLDMSDB_UPD ||
2035 Opcode == ARM::VSTMSIA ||
2036 Opcode == ARM::VSTMSIA_UPD || Opcode == ARM::VSTMSDB_UPD);
2037 unsigned RegClassID = isSPVFP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
2040 unsigned RegD = decodeVFPRd(insn, isSPVFP);
2042 // Fill the variadic part of reglist.
2043 unsigned char Imm8 = insn & 0xFF;
2044 unsigned Regs = isSPVFP ? Imm8 : Imm8/2;
2046 // Apply some sanity checks before proceeding.
2047 if (Regs == 0 || (RegD + Regs) > 32 || (!isSPVFP && Regs > 16))
2050 for (unsigned i = 0; i < Regs; ++i) {
2051 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID,
2059 // Misc. VFP Instructions.
2060 // FMSTAT (vmrs with Rt=0b1111, i.e., to apsr_nzcv and no register operand)
2061 // FCONSTD (DPR and a VFPf64Imm operand)
2062 // FCONSTS (SPR and a VFPf32Imm operand)
2063 // VMRS/VMSR (GPR operand)
2064 static bool DisassembleVFPMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2065 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2067 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
2068 unsigned &OpIdx = NumOpsAdded;
2072 if (Opcode == ARM::FMSTAT)
2075 assert(NumOps >= 2 && "VFPMiscFrm expects >=2 operands");
2077 unsigned RegEnum = 0;
2078 switch (OpInfo[0].RegClass) {
2079 case ARM::DPRRegClassID:
2080 RegEnum = getRegisterEnum(B, ARM::DPRRegClassID, decodeVFPRd(insn, false));
2082 case ARM::SPRRegClassID:
2083 RegEnum = getRegisterEnum(B, ARM::SPRRegClassID, decodeVFPRd(insn, true));
2085 case ARM::GPRRegClassID:
2086 RegEnum = getRegisterEnum(B, ARM::GPRRegClassID, decodeRd(insn));
2089 assert(0 && "Invalid reg class id");
2093 MI.addOperand(MCOperand::CreateReg(RegEnum));
2096 // Extract/decode the f64/f32 immediate.
2097 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2098 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2099 // The asm syntax specifies the floating point value, not the 8-bit literal.
2100 APInt immRaw = VFPExpandImm(slice(insn,19,16) << 4 | slice(insn, 3, 0),
2101 Opcode == ARM::FCONSTD ? 64 : 32);
2102 APFloat immFP = APFloat(immRaw, true);
2103 double imm = Opcode == ARM::FCONSTD ? immFP.convertToDouble() :
2104 immFP.convertToFloat();
2105 MI.addOperand(MCOperand::CreateFPImm(imm));
2113 // DisassembleThumbFrm() is defined in ThumbDisassemblerCore.h file.
2114 #include "ThumbDisassemblerCore.h"
2116 /////////////////////////////////////////////////////
2118 // Utility Functions For ARM Advanced SIMD //
2120 /////////////////////////////////////////////////////
2122 // The following NEON namings are based on A8.6.266 VABA, VABAL. Notice that
2123 // A8.6.303 VDUP (ARM core register)'s D/Vd pair is the N/Vn pair of VABA/VABAL.
2125 // A7.3 Register encoding
2127 // Extract/Decode NEON D/Vd:
2129 // Note that for quadword, Qd = UInt(D:Vd<3:1>) = Inst{22:15-13}, whereas for
2130 // doubleword, Dd = UInt(D:Vd). We compensate for this difference by
2131 // handling it in the getRegisterEnum() utility function.
2132 // D = Inst{22}, Vd = Inst{15-12}
2133 static unsigned decodeNEONRd(uint32_t insn) {
2134 return ((insn >> ARMII::NEON_D_BitShift) & 1) << 4
2135 | ((insn >> ARMII::NEON_RegRdShift) & ARMII::NEONRegMask);
2138 // Extract/Decode NEON N/Vn:
2140 // Note that for quadword, Qn = UInt(N:Vn<3:1>) = Inst{7:19-17}, whereas for
2141 // doubleword, Dn = UInt(N:Vn). We compensate for this difference by
2142 // handling it in the getRegisterEnum() utility function.
2143 // N = Inst{7}, Vn = Inst{19-16}
2144 static unsigned decodeNEONRn(uint32_t insn) {
2145 return ((insn >> ARMII::NEON_N_BitShift) & 1) << 4
2146 | ((insn >> ARMII::NEON_RegRnShift) & ARMII::NEONRegMask);
2149 // Extract/Decode NEON M/Vm:
2151 // Note that for quadword, Qm = UInt(M:Vm<3:1>) = Inst{5:3-1}, whereas for
2152 // doubleword, Dm = UInt(M:Vm). We compensate for this difference by
2153 // handling it in the getRegisterEnum() utility function.
2154 // M = Inst{5}, Vm = Inst{3-0}
2155 static unsigned decodeNEONRm(uint32_t insn) {
2156 return ((insn >> ARMII::NEON_M_BitShift) & 1) << 4
2157 | ((insn >> ARMII::NEON_RegRmShift) & ARMII::NEONRegMask);
2168 } // End of unnamed namespace
2170 // size field -> Inst{11-10}
2171 // index_align field -> Inst{7-4}
2173 // The Lane Index interpretation depends on the Data Size:
2174 // 8 (encoded as size = 0b00) -> Index = index_align[3:1]
2175 // 16 (encoded as size = 0b01) -> Index = index_align[3:2]
2176 // 32 (encoded as size = 0b10) -> Index = index_align[3]
2178 // Ref: A8.6.317 VLD4 (single 4-element structure to one lane).
2179 static unsigned decodeLaneIndex(uint32_t insn) {
2180 unsigned size = insn >> 10 & 3;
2181 assert((size == 0 || size == 1 || size == 2) &&
2182 "Encoding error: size should be either 0, 1, or 2");
2184 unsigned index_align = insn >> 4 & 0xF;
2185 return (index_align >> 1) >> size;
2188 // imm64 = AdvSIMDExpandImm(op, cmode, i:imm3:imm4)
2189 // op = Inst{5}, cmode = Inst{11-8}
2190 // i = Inst{24} (ARM architecture)
2191 // imm3 = Inst{18-16}, imm4 = Inst{3-0}
2192 // Ref: Table A7-15 Modified immediate values for Advanced SIMD instructions.
2193 static uint64_t decodeN1VImm(uint32_t insn, ElemSize esize) {
2194 unsigned char op = (insn >> 5) & 1;
2195 unsigned char cmode = (insn >> 8) & 0xF;
2196 unsigned char Imm8 = ((insn >> 24) & 1) << 7 |
2197 ((insn >> 16) & 7) << 4 |
2199 return (op << 12) | (cmode << 8) | Imm8;
2202 // A8.6.339 VMUL, VMULL (by scalar)
2203 // ESize16 => m = Inst{2-0} (Vm<2:0>) D0-D7
2204 // ESize32 => m = Inst{3-0} (Vm<3:0>) D0-D15
2205 static unsigned decodeRestrictedDm(uint32_t insn, ElemSize esize) {
2212 assert(0 && "Unreachable code!");
2217 // A8.6.339 VMUL, VMULL (by scalar)
2218 // ESize16 => index = Inst{5:3} (M:Vm<3>) D0-D7
2219 // ESize32 => index = Inst{5} (M) D0-D15
2220 static unsigned decodeRestrictedDmIndex(uint32_t insn, ElemSize esize) {
2223 return (((insn >> 5) & 1) << 1) | ((insn >> 3) & 1);
2225 return (insn >> 5) & 1;
2227 assert(0 && "Unreachable code!");
2232 // A8.6.296 VCVT (between floating-point and fixed-point, Advanced SIMD)
2233 // (64 - <fbits>) is encoded as imm6, i.e., Inst{21-16}.
2234 static unsigned decodeVCVTFractionBits(uint32_t insn) {
2235 return 64 - ((insn >> 16) & 0x3F);
2238 // A8.6.302 VDUP (scalar)
2239 // ESize8 => index = Inst{19-17}
2240 // ESize16 => index = Inst{19-18}
2241 // ESize32 => index = Inst{19}
2242 static unsigned decodeNVLaneDupIndex(uint32_t insn, ElemSize esize) {
2245 return (insn >> 17) & 7;
2247 return (insn >> 18) & 3;
2249 return (insn >> 19) & 1;
2251 assert(0 && "Unspecified element size!");
2256 // A8.6.328 VMOV (ARM core register to scalar)
2257 // A8.6.329 VMOV (scalar to ARM core register)
2258 // ESize8 => index = Inst{21:6-5}
2259 // ESize16 => index = Inst{21:6}
2260 // ESize32 => index = Inst{21}
2261 static unsigned decodeNVLaneOpIndex(uint32_t insn, ElemSize esize) {
2264 return ((insn >> 21) & 1) << 2 | ((insn >> 5) & 3);
2266 return ((insn >> 21) & 1) << 1 | ((insn >> 6) & 1);
2268 return ((insn >> 21) & 1);
2270 assert(0 && "Unspecified element size!");
2275 // Imm6 = Inst{21-16}, L = Inst{7}
2277 // LeftShift == true (A8.6.367 VQSHL, A8.6.387 VSLI):
2279 // '0001xxx' => esize = 8; shift_amount = imm6 - 8
2280 // '001xxxx' => esize = 16; shift_amount = imm6 - 16
2281 // '01xxxxx' => esize = 32; shift_amount = imm6 - 32
2282 // '1xxxxxx' => esize = 64; shift_amount = imm6
2284 // LeftShift == false (A8.6.376 VRSHR, A8.6.368 VQSHRN):
2286 // '0001xxx' => esize = 8; shift_amount = 16 - imm6
2287 // '001xxxx' => esize = 16; shift_amount = 32 - imm6
2288 // '01xxxxx' => esize = 32; shift_amount = 64 - imm6
2289 // '1xxxxxx' => esize = 64; shift_amount = 64 - imm6
2291 static unsigned decodeNVSAmt(uint32_t insn, bool LeftShift) {
2292 ElemSize esize = ESizeNA;
2293 unsigned L = (insn >> 7) & 1;
2294 unsigned imm6 = (insn >> 16) & 0x3F;
2298 else if (imm6 >> 4 == 1)
2300 else if (imm6 >> 5 == 1)
2303 assert(0 && "Wrong encoding of Inst{7:21-16}!");
2308 return esize == ESize64 ? imm6 : (imm6 - esize);
2310 return esize == ESize64 ? (esize - imm6) : (2*esize - imm6);
2314 // Imm4 = Inst{11-8}
2315 static unsigned decodeN3VImm(uint32_t insn) {
2316 return (insn >> 8) & 0xF;
2320 // D[d] D[d2] ... Rn [TIED_TO Rn] align [Rm]
2322 // D[d] D[d2] ... Rn [TIED_TO Rn] align [Rm] TIED_TO ... imm(idx)
2324 // Rn [TIED_TO Rn] align [Rm] D[d] D[d2] ...
2326 // Rn [TIED_TO Rn] align [Rm] D[d] D[d2] ... [imm(idx)]
2328 // Correctly set VLD*/VST*'s TIED_TO GPR, as the asm printer needs it.
2329 static bool DisassembleNLdSt0(MCInst &MI, unsigned Opcode, uint32_t insn,
2330 unsigned short NumOps, unsigned &NumOpsAdded, bool Store, bool DblSpaced,
2331 unsigned alignment, BO B) {
2333 const TargetInstrDesc &TID = ARMInsts[Opcode];
2334 const TargetOperandInfo *OpInfo = TID.OpInfo;
2336 // At least one DPR register plus addressing mode #6.
2337 assert(NumOps >= 3 && "Expect >= 3 operands");
2339 unsigned &OpIdx = NumOpsAdded;
2343 // We have homogeneous NEON registers for Load/Store.
2344 unsigned RegClass = 0;
2346 // Double-spaced registers have increments of 2.
2347 unsigned Inc = DblSpaced ? 2 : 1;
2349 unsigned Rn = decodeRn(insn);
2350 unsigned Rm = decodeRm(insn);
2351 unsigned Rd = decodeNEONRd(insn);
2353 // A7.7.1 Advanced SIMD addressing mode.
2356 // LLVM Addressing Mode #6.
2357 unsigned RmEnum = 0;
2359 RmEnum = getRegisterEnum(B, ARM::GPRRegClassID, Rm);
2362 // Consume possible WB, AddrMode6, possible increment reg, the DPR/QPR's,
2363 // then possible lane index.
2364 assert(OpIdx < NumOps && OpInfo[0].RegClass == ARM::GPRRegClassID &&
2365 "Reg operand expected");
2368 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2373 assert((OpIdx+1) < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
2374 OpInfo[OpIdx + 1].RegClass < 0 && "Addrmode #6 Operands expected");
2375 // addrmode6 := (ops GPR:$addr, i32imm)
2376 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2378 MI.addOperand(MCOperand::CreateImm(alignment)); // Alignment
2382 MI.addOperand(MCOperand::CreateReg(RmEnum));
2386 assert(OpIdx < NumOps &&
2387 (OpInfo[OpIdx].RegClass == ARM::DPRRegClassID ||
2388 OpInfo[OpIdx].RegClass == ARM::QPRRegClassID) &&
2389 "Reg operand expected");
2391 RegClass = OpInfo[OpIdx].RegClass;
2392 while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
2393 MI.addOperand(MCOperand::CreateReg(
2394 getRegisterEnum(B, RegClass, Rd)));
2399 // Handle possible lane index.
2400 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2401 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2402 MI.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn)));
2407 // Consume the DPR/QPR's, possible WB, AddrMode6, possible incrment reg,
2408 // possible TIED_TO DPR/QPR's (ignored), then possible lane index.
2409 RegClass = OpInfo[0].RegClass;
2411 while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
2412 MI.addOperand(MCOperand::CreateReg(
2413 getRegisterEnum(B, RegClass, Rd)));
2419 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2424 assert((OpIdx+1) < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
2425 OpInfo[OpIdx + 1].RegClass < 0 && "Addrmode #6 Operands expected");
2426 // addrmode6 := (ops GPR:$addr, i32imm)
2427 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2429 MI.addOperand(MCOperand::CreateImm(alignment)); // Alignment
2433 MI.addOperand(MCOperand::CreateReg(RmEnum));
2437 while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
2438 assert(TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1 &&
2439 "Tied to operand expected");
2440 MI.addOperand(MCOperand::CreateReg(0));
2444 // Handle possible lane index.
2445 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2446 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2447 MI.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn)));
2452 // Accessing registers past the end of the NEON register file is not
2460 // A8.6.308, A8.6.311, A8.6.314, A8.6.317.
2461 static bool Align4OneLaneInst(unsigned elem, unsigned size,
2462 unsigned index_align, unsigned & alignment) {
2470 return slice(index_align, 0, 0) == 0;
2471 else if (size == 1) {
2472 bits = slice(index_align, 1, 0);
2473 if (bits != 0 && bits != 1)
2478 } else if (size == 2) {
2479 bits = slice(index_align, 2, 0);
2480 if (bits != 0 && bits != 3)
2490 if (slice(index_align, 0, 0) == 1)
2494 if (slice(index_align, 0, 0) == 1)
2497 } else if (size == 2) {
2498 if (slice(index_align, 1, 1) != 0)
2500 if (slice(index_align, 0, 0) == 1)
2508 if (slice(index_align, 0, 0) != 0)
2512 if (slice(index_align, 0, 0) != 0)
2516 } else if (size == 2) {
2517 if (slice(index_align, 1, 0) != 0)
2525 if (slice(index_align, 0, 0) == 1)
2529 if (slice(index_align, 0, 0) == 1)
2532 } else if (size == 2) {
2533 bits = slice(index_align, 1, 0);
2547 // If L (Inst{21}) == 0, store instructions.
2548 // Find out about double-spaced-ness of the Opcode and pass it on to
2549 // DisassembleNLdSt0().
2550 static bool DisassembleNLdSt(MCInst &MI, unsigned Opcode, uint32_t insn,
2551 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2553 const StringRef Name = ARMInsts[Opcode].Name;
2554 bool DblSpaced = false;
2555 // 0 represents standard alignment, i.e., unaligned data access.
2556 unsigned alignment = 0;
2558 unsigned elem = 0; // legal values: {1, 2, 3, 4}
2559 if (Name.startswith("VST1") || Name.startswith("VLD1"))
2562 if (Name.startswith("VST2") || Name.startswith("VLD2"))
2565 if (Name.startswith("VST3") || Name.startswith("VLD3"))
2568 if (Name.startswith("VST4") || Name.startswith("VLD4"))
2571 if (Name.find("LN") != std::string::npos) {
2572 // To one lane instructions.
2573 // See, for example, 8.6.317 VLD4 (single 4-element structure to one lane).
2575 // Utility function takes number of elements, size, and index_align.
2576 if (!Align4OneLaneInst(elem,
2577 slice(insn, 11, 10),
2582 // <size> == 16 && Inst{5} == 1 --> DblSpaced = true
2583 if (Name.endswith("16") || Name.endswith("16_UPD"))
2584 DblSpaced = slice(insn, 5, 5) == 1;
2586 // <size> == 32 && Inst{6} == 1 --> DblSpaced = true
2587 if (Name.endswith("32") || Name.endswith("32_UPD"))
2588 DblSpaced = slice(insn, 6, 6) == 1;
2590 // Multiple n-element structures with type encoded as Inst{11-8}.
2591 // See, for example, A8.6.316 VLD4 (multiple 4-element structures).
2593 // Inst{5-4} encodes alignment.
2594 unsigned align = slice(insn, 5, 4);
2599 alignment = 64; break;
2601 alignment = 128; break;
2603 alignment = 256; break;
2606 unsigned type = slice(insn, 11, 8);
2607 // Reject UNDEFINED instructions based on type and align.
2608 // Plus set DblSpaced flag where appropriate.
2614 // A8.6.307 & A8.6.391
2615 if ((type == 7 && slice(align, 1, 1) == 1) ||
2616 (type == 10 && align == 3) ||
2617 (type == 6 && slice(align, 1, 1) == 1))
2621 // n == 2 && type == 0b1001 -> DblSpaced = true
2622 // A8.6.310 & A8.6.393
2623 if ((type == 8 || type == 9) && align == 3)
2625 DblSpaced = (type == 9);
2628 // n == 3 && type == 0b0101 -> DblSpaced = true
2629 // A8.6.313 & A8.6.395
2630 if (slice(insn, 7, 6) == 3 || slice(align, 1, 1) == 1)
2632 DblSpaced = (type == 5);
2635 // n == 4 && type == 0b0001 -> DblSpaced = true
2636 // A8.6.316 & A8.6.397
2637 if (slice(insn, 7, 6) == 3)
2639 DblSpaced = (type == 1);
2643 return DisassembleNLdSt0(MI, Opcode, insn, NumOps, NumOpsAdded,
2644 slice(insn, 21, 21) == 0, DblSpaced, alignment/8, B);
2651 // Qd/Dd imm src(=Qd/Dd)
2652 static bool DisassembleN1RegModImmFrm(MCInst &MI, unsigned Opcode,
2653 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2655 const TargetInstrDesc &TID = ARMInsts[Opcode];
2656 const TargetOperandInfo *OpInfo = TID.OpInfo;
2658 assert(NumOps >= 2 &&
2659 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2660 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2661 (OpInfo[1].RegClass < 0) &&
2662 "Expect 1 reg operand followed by 1 imm operand");
2664 // Qd/Dd = Inst{22:15-12} => NEON Rd
2665 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[0].RegClass,
2666 decodeNEONRd(insn))));
2668 ElemSize esize = ESizeNA;
2671 case ARM::VMOVv16i8:
2674 case ARM::VMOVv4i16:
2675 case ARM::VMOVv8i16:
2676 case ARM::VMVNv4i16:
2677 case ARM::VMVNv8i16:
2678 case ARM::VBICiv4i16:
2679 case ARM::VBICiv8i16:
2680 case ARM::VORRiv4i16:
2681 case ARM::VORRiv8i16:
2684 case ARM::VMOVv2i32:
2685 case ARM::VMOVv4i32:
2686 case ARM::VMVNv2i32:
2687 case ARM::VMVNv4i32:
2688 case ARM::VBICiv2i32:
2689 case ARM::VBICiv4i32:
2690 case ARM::VORRiv2i32:
2691 case ARM::VORRiv4i32:
2694 case ARM::VMOVv1i64:
2695 case ARM::VMOVv2i64:
2699 assert(0 && "Unexpected opcode!");
2703 // One register and a modified immediate value.
2704 // Add the imm operand.
2705 MI.addOperand(MCOperand::CreateImm(decodeN1VImm(insn, esize)));
2709 // VBIC/VORRiv*i* variants have an extra $src = $Vd to be filled in.
2711 (OpInfo[2].RegClass == ARM::DPRRegClassID ||
2712 OpInfo[2].RegClass == ARM::QPRRegClassID)) {
2713 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[0].RegClass,
2714 decodeNEONRd(insn))));
2725 N2V_VectorConvert_Between_Float_Fixed
2727 } // End of unnamed namespace
2729 // Vector Convert [between floating-point and fixed-point]
2730 // Qd/Dd Qm/Dm [fbits]
2732 // Vector Duplicate Lane (from scalar to all elements) Instructions.
2733 // VDUPLN16d, VDUPLN16q, VDUPLN32d, VDUPLN32q, VDUPLN8d, VDUPLN8q:
2736 // Vector Move Long:
2739 // Vector Move Narrow:
2743 static bool DisassembleNVdVmOptImm(MCInst &MI, unsigned Opc, uint32_t insn,
2744 unsigned short NumOps, unsigned &NumOpsAdded, N2VFlag Flag, BO B) {
2746 const TargetInstrDesc &TID = ARMInsts[Opc];
2747 const TargetOperandInfo *OpInfo = TID.OpInfo;
2749 assert(NumOps >= 2 &&
2750 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2751 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2752 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2753 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
2754 "Expect >= 2 operands and first 2 as reg operands");
2756 unsigned &OpIdx = NumOpsAdded;
2760 ElemSize esize = ESizeNA;
2761 if (Flag == N2V_VectorDupLane) {
2762 // VDUPLN has its index embedded. Its size can be inferred from the Opcode.
2763 assert(Opc >= ARM::VDUPLN16d && Opc <= ARM::VDUPLN8q &&
2764 "Unexpected Opcode");
2765 esize = (Opc == ARM::VDUPLN8d || Opc == ARM::VDUPLN8q) ? ESize8
2766 : ((Opc == ARM::VDUPLN16d || Opc == ARM::VDUPLN16q) ? ESize16
2770 // Qd/Dd = Inst{22:15-12} => NEON Rd
2771 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2772 decodeNEONRd(insn))));
2776 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
2778 MI.addOperand(MCOperand::CreateReg(0));
2782 // Dm = Inst{5:3-0} => NEON Rm
2783 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2784 decodeNEONRm(insn))));
2787 // VZIP and others have two TIED_TO reg operands.
2789 while (OpIdx < NumOps &&
2790 (Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) {
2791 // Add TIED_TO operand.
2792 MI.addOperand(MI.getOperand(Idx));
2796 // Add the imm operand, if required.
2797 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2798 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2800 unsigned imm = 0xFFFFFFFF;
2802 if (Flag == N2V_VectorDupLane)
2803 imm = decodeNVLaneDupIndex(insn, esize);
2804 if (Flag == N2V_VectorConvert_Between_Float_Fixed)
2805 imm = decodeVCVTFractionBits(insn);
2807 assert(imm != 0xFFFFFFFF && "Internal error");
2808 MI.addOperand(MCOperand::CreateImm(imm));
2815 static bool DisassembleN2RegFrm(MCInst &MI, unsigned Opc, uint32_t insn,
2816 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2818 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
2821 static bool DisassembleNVCVTFrm(MCInst &MI, unsigned Opc, uint32_t insn,
2822 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2824 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
2825 N2V_VectorConvert_Between_Float_Fixed, B);
2827 static bool DisassembleNVecDupLnFrm(MCInst &MI, unsigned Opc, uint32_t insn,
2828 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2830 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
2831 N2V_VectorDupLane, B);
2834 // Vector Shift [Accumulate] Instructions.
2835 // Qd/Dd [Qd/Dd (TIED_TO)] Qm/Dm ShiftAmt
2837 // Vector Shift Left Long (with maximum shift count) Instructions.
2838 // VSHLLi16, VSHLLi32, VSHLLi8: Qd Dm imm (== size)
2840 static bool DisassembleNVectorShift(MCInst &MI, unsigned Opcode, uint32_t insn,
2841 unsigned short NumOps, unsigned &NumOpsAdded, bool LeftShift, BO B) {
2843 const TargetInstrDesc &TID = ARMInsts[Opcode];
2844 const TargetOperandInfo *OpInfo = TID.OpInfo;
2846 assert(NumOps >= 3 &&
2847 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2848 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2849 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2850 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
2851 "Expect >= 3 operands and first 2 as reg operands");
2853 unsigned &OpIdx = NumOpsAdded;
2857 // Qd/Dd = Inst{22:15-12} => NEON Rd
2858 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2859 decodeNEONRd(insn))));
2862 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
2864 MI.addOperand(MCOperand::CreateReg(0));
2868 assert((OpInfo[OpIdx].RegClass == ARM::DPRRegClassID ||
2869 OpInfo[OpIdx].RegClass == ARM::QPRRegClassID) &&
2870 "Reg operand expected");
2872 // Qm/Dm = Inst{5:3-0} => NEON Rm
2873 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2874 decodeNEONRm(insn))));
2877 assert(OpInfo[OpIdx].RegClass < 0 && "Imm operand expected");
2879 // Add the imm operand.
2881 // VSHLL has maximum shift count as the imm, inferred from its size.
2885 Imm = decodeNVSAmt(insn, LeftShift);
2897 MI.addOperand(MCOperand::CreateImm(Imm));
2903 // Left shift instructions.
2904 static bool DisassembleN2RegVecShLFrm(MCInst &MI, unsigned Opcode,
2905 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2907 return DisassembleNVectorShift(MI, Opcode, insn, NumOps, NumOpsAdded, true,
2910 // Right shift instructions have different shift amount interpretation.
2911 static bool DisassembleN2RegVecShRFrm(MCInst &MI, unsigned Opcode,
2912 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2914 return DisassembleNVectorShift(MI, Opcode, insn, NumOps, NumOpsAdded, false,
2923 N3V_Multiply_By_Scalar
2925 } // End of unnamed namespace
2927 // NEON Three Register Instructions with Optional Immediate Operand
2929 // Vector Extract Instructions.
2930 // Qd/Dd Qn/Dn Qm/Dm imm4
2932 // Vector Shift (Register) Instructions.
2933 // Qd/Dd Qm/Dm Qn/Dn (notice the order of m, n)
2935 // Vector Multiply [Accumulate/Subtract] [Long] By Scalar Instructions.
2936 // Qd/Dd Qn/Dn RestrictedDm index
2939 static bool DisassembleNVdVnVmOptImm(MCInst &MI, unsigned Opcode, uint32_t insn,
2940 unsigned short NumOps, unsigned &NumOpsAdded, N3VFlag Flag, BO B) {
2942 const TargetInstrDesc &TID = ARMInsts[Opcode];
2943 const TargetOperandInfo *OpInfo = TID.OpInfo;
2945 // No checking for OpInfo[2] because of MOVDneon/MOVQ with only two regs.
2946 assert(NumOps >= 3 &&
2947 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2948 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2949 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2950 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
2951 "Expect >= 3 operands and first 2 as reg operands");
2953 unsigned &OpIdx = NumOpsAdded;
2957 bool VdVnVm = Flag == N3V_VectorShift ? false : true;
2958 bool IsImm4 = Flag == N3V_VectorExtract ? true : false;
2959 bool IsDmRestricted = Flag == N3V_Multiply_By_Scalar ? true : false;
2960 ElemSize esize = ESizeNA;
2961 if (Flag == N3V_Multiply_By_Scalar) {
2962 unsigned size = (insn >> 20) & 3;
2963 if (size == 1) esize = ESize16;
2964 if (size == 2) esize = ESize32;
2965 assert (esize == ESize16 || esize == ESize32);
2968 // Qd/Dd = Inst{22:15-12} => NEON Rd
2969 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2970 decodeNEONRd(insn))));
2973 // VABA, VABAL, VBSLd, VBSLq, ...
2974 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
2976 MI.addOperand(MCOperand::CreateReg(0));
2980 // Dn = Inst{7:19-16} => NEON Rn
2982 // Dm = Inst{5:3-0} => NEON Rm
2983 MI.addOperand(MCOperand::CreateReg(
2984 getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2985 VdVnVm ? decodeNEONRn(insn)
2986 : decodeNEONRm(insn))));
2989 // Special case handling for VMOVDneon and VMOVQ because they are marked as
2991 if (Opcode == ARM::VMOVDneon || Opcode == ARM::VMOVQ)
2994 // Dm = Inst{5:3-0} => NEON Rm
2996 // Dm is restricted to D0-D7 if size is 16, D0-D15 otherwise
2998 // Dn = Inst{7:19-16} => NEON Rn
2999 unsigned m = VdVnVm ? (IsDmRestricted ? decodeRestrictedDm(insn, esize)
3000 : decodeNEONRm(insn))
3001 : decodeNEONRn(insn);
3003 MI.addOperand(MCOperand::CreateReg(
3004 getRegisterEnum(B, OpInfo[OpIdx].RegClass, m)));
3007 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
3008 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
3009 // Add the imm operand.
3012 Imm = decodeN3VImm(insn);
3013 else if (IsDmRestricted)
3014 Imm = decodeRestrictedDmIndex(insn, esize);
3016 assert(0 && "Internal error: unreachable code!");
3020 MI.addOperand(MCOperand::CreateImm(Imm));
3027 static bool DisassembleN3RegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3028 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3030 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
3033 static bool DisassembleN3RegVecShFrm(MCInst &MI, unsigned Opcode,
3034 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3036 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
3037 N3V_VectorShift, B);
3039 static bool DisassembleNVecExtractFrm(MCInst &MI, unsigned Opcode,
3040 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3042 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
3043 N3V_VectorExtract, B);
3045 static bool DisassembleNVecMulScalarFrm(MCInst &MI, unsigned Opcode,
3046 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3048 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
3049 N3V_Multiply_By_Scalar, B);
3052 // Vector Table Lookup
3054 // VTBL1, VTBX1: Dd [Dd(TIED_TO)] Dn Dm
3055 // VTBL2, VTBX2: Dd [Dd(TIED_TO)] Dn Dn+1 Dm
3056 // VTBL3, VTBX3: Dd [Dd(TIED_TO)] Dn Dn+1 Dn+2 Dm
3057 // VTBL4, VTBX4: Dd [Dd(TIED_TO)] Dn Dn+1 Dn+2 Dn+3 Dm
3058 static bool DisassembleNVTBLFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3059 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3061 const TargetInstrDesc &TID = ARMInsts[Opcode];
3062 const TargetOperandInfo *OpInfo = TID.OpInfo;
3063 if (!OpInfo) return false;
3065 assert(NumOps >= 3 &&
3066 OpInfo[0].RegClass == ARM::DPRRegClassID &&
3067 OpInfo[1].RegClass == ARM::DPRRegClassID &&
3068 OpInfo[2].RegClass == ARM::DPRRegClassID &&
3069 "Expect >= 3 operands and first 3 as reg operands");
3071 unsigned &OpIdx = NumOpsAdded;
3075 unsigned Rn = decodeNEONRn(insn);
3077 // {Dn} encoded as len = 0b00
3078 // {Dn Dn+1} encoded as len = 0b01
3079 // {Dn Dn+1 Dn+2 } encoded as len = 0b10
3080 // {Dn Dn+1 Dn+2 Dn+3} encoded as len = 0b11
3081 unsigned Len = slice(insn, 9, 8) + 1;
3083 // Dd (the destination vector)
3084 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
3085 decodeNEONRd(insn))));
3088 // Process tied_to operand constraint.
3090 if ((Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) {
3091 MI.addOperand(MI.getOperand(Idx));
3095 // Do the <list> now.
3096 for (unsigned i = 0; i < Len; ++i) {
3097 assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::DPRRegClassID &&
3098 "Reg operand expected");
3099 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
3104 // Dm (the index vector)
3105 assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::DPRRegClassID &&
3106 "Reg operand (index vector) expected");
3107 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
3108 decodeNEONRm(insn))));
3114 // Vector Get Lane (move scalar to ARM core register) Instructions.
3115 // VGETLNi32, VGETLNs16, VGETLNs8, VGETLNu16, VGETLNu8: Rt Dn index
3116 static bool DisassembleNGetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3117 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3119 const TargetInstrDesc &TID = ARMInsts[Opcode];
3120 const TargetOperandInfo *OpInfo = TID.OpInfo;
3121 if (!OpInfo) return false;
3123 assert(TID.getNumDefs() == 1 && NumOps >= 3 &&
3124 OpInfo[0].RegClass == ARM::GPRRegClassID &&
3125 OpInfo[1].RegClass == ARM::DPRRegClassID &&
3126 OpInfo[2].RegClass < 0 &&
3127 "Expect >= 3 operands with one dst operand");
3130 Opcode == ARM::VGETLNi32 ? ESize32
3131 : ((Opcode == ARM::VGETLNs16 || Opcode == ARM::VGETLNu16) ? ESize16
3134 // Rt = Inst{15-12} => ARM Rd
3135 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
3138 // Dn = Inst{7:19-16} => NEON Rn
3139 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
3140 decodeNEONRn(insn))));
3142 MI.addOperand(MCOperand::CreateImm(decodeNVLaneOpIndex(insn, esize)));
3148 // Vector Set Lane (move ARM core register to scalar) Instructions.
3149 // VSETLNi16, VSETLNi32, VSETLNi8: Dd Dd (TIED_TO) Rt index
3150 static bool DisassembleNSetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3151 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3153 const TargetInstrDesc &TID = ARMInsts[Opcode];
3154 const TargetOperandInfo *OpInfo = TID.OpInfo;
3155 if (!OpInfo) return false;
3157 assert(TID.getNumDefs() == 1 && NumOps >= 3 &&
3158 OpInfo[0].RegClass == ARM::DPRRegClassID &&
3159 OpInfo[1].RegClass == ARM::DPRRegClassID &&
3160 TID.getOperandConstraint(1, TOI::TIED_TO) != -1 &&
3161 OpInfo[2].RegClass == ARM::GPRRegClassID &&
3162 OpInfo[3].RegClass < 0 &&
3163 "Expect >= 3 operands with one dst operand");
3166 Opcode == ARM::VSETLNi8 ? ESize8
3167 : (Opcode == ARM::VSETLNi16 ? ESize16
3170 // Dd = Inst{7:19-16} => NEON Rn
3171 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
3172 decodeNEONRn(insn))));
3175 MI.addOperand(MCOperand::CreateReg(0));
3177 // Rt = Inst{15-12} => ARM Rd
3178 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
3181 MI.addOperand(MCOperand::CreateImm(decodeNVLaneOpIndex(insn, esize)));
3187 // Vector Duplicate Instructions (from ARM core register to all elements).
3188 // VDUP8d, VDUP16d, VDUP32d, VDUP8q, VDUP16q, VDUP32q: Qd/Dd Rt
3189 static bool DisassembleNDupFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3190 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3192 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
3194 assert(NumOps >= 2 &&
3195 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
3196 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
3197 OpInfo[1].RegClass == ARM::GPRRegClassID &&
3198 "Expect >= 2 operands and first 2 as reg operand");
3200 unsigned RegClass = OpInfo[0].RegClass;
3202 // Qd/Dd = Inst{7:19-16} => NEON Rn
3203 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClass,
3204 decodeNEONRn(insn))));
3206 // Rt = Inst{15-12} => ARM Rd
3207 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
3217 static inline bool MemBarrierInstr(uint32_t insn) {
3218 unsigned op7_4 = slice(insn, 7, 4);
3219 if (slice(insn, 31, 8) == 0xf57ff0 && (op7_4 >= 4 && op7_4 <= 6))
3225 static inline bool PreLoadOpcode(unsigned Opcode) {
3227 case ARM::PLDi12: case ARM::PLDrs:
3228 case ARM::PLDWi12: case ARM::PLDWrs:
3229 case ARM::PLIi12: case ARM::PLIrs:
3236 static bool DisassemblePreLoadFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3237 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3239 // Preload Data/Instruction requires either 2 or 3 operands.
3240 // PLDi12, PLDWi12, PLIi12: addrmode_imm12
3241 // PLDrs, PLDWrs, PLIrs: ldst_so_reg
3243 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
3246 if (Opcode == ARM::PLDi12 || Opcode == ARM::PLDWi12
3247 || Opcode == ARM::PLIi12) {
3248 unsigned Imm12 = slice(insn, 11, 0);
3249 bool Negative = getUBit(insn) == 0;
3251 // A8.6.118 PLD (literal) PLDWi12 with Rn=PC is transformed to PLDi12.
3252 if (Opcode == ARM::PLDWi12 && slice(insn, 19, 16) == 0xF) {
3253 DEBUG(errs() << "Rn == '1111': PLDWi12 morphed to PLDi12\n");
3254 MI.setOpcode(ARM::PLDi12);
3257 // -0 is represented specially. All other values are as normal.
3258 int Offset = Negative ? -1 * Imm12 : Imm12;
3259 if (Imm12 == 0 && Negative)
3262 MI.addOperand(MCOperand::CreateImm(Offset));
3265 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
3268 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
3270 // Inst{6-5} encodes the shift opcode.
3271 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
3272 // Inst{11-7} encodes the imm5 shift amount.
3273 unsigned ShImm = slice(insn, 11, 7);
3275 // A8.4.1. Possible rrx or shift amount of 32...
3276 getImmShiftSE(ShOp, ShImm);
3277 MI.addOperand(MCOperand::CreateImm(
3278 ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp)));
3285 static bool DisassembleMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3286 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3288 if (MemBarrierInstr(insn)) {
3289 // DMBsy, DSBsy, and ISBsy instructions have zero operand and are taken care
3290 // of within the generic ARMBasicMCBuilder::BuildIt() method.
3292 // Inst{3-0} encodes the memory barrier option for the variants.
3293 MI.addOperand(MCOperand::CreateImm(slice(insn, 3, 0)));
3309 // SWP, SWPB: Rd Rm Rn
3310 // Delegate to DisassembleLdStExFrm()....
3311 return DisassembleLdStExFrm(MI, Opcode, insn, NumOps, NumOpsAdded, B);
3316 if (Opcode == ARM::SETEND) {
3318 MI.addOperand(MCOperand::CreateImm(slice(insn, 9, 9)));
3322 // FIXME: To enable correct asm parsing and disasm of CPS we need 3 different
3323 // opcodes which match the same real instruction. This is needed since there's
3324 // no current handling of optional arguments. Fix here when a better handling
3325 // of optional arguments is implemented.
3326 if (Opcode == ARM::CPS3p) { // M = 1
3327 // Let's reject these impossible imod values by returning false:
3330 // AsmPrinter cannot handle imod=0b00, plus (imod=0b00,M=1,iflags!=0) is an
3331 // invalid combination, so we just check for imod=0b00 here.
3332 if (slice(insn, 19, 18) == 0 || slice(insn, 19, 18) == 1)
3334 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 18))); // imod
3335 MI.addOperand(MCOperand::CreateImm(slice(insn, 8, 6))); // iflags
3336 MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0))); // mode
3340 if (Opcode == ARM::CPS2p) { // mode = 0, M = 0
3341 // Let's reject these impossible imod values by returning false:
3342 // 1. (imod=0b00,M=0)
3344 if (slice(insn, 19, 18) == 0 || slice(insn, 19, 18) == 1)
3346 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 18))); // imod
3347 MI.addOperand(MCOperand::CreateImm(slice(insn, 8, 6))); // iflags
3351 if (Opcode == ARM::CPS1p) { // imod = 0, iflags = 0, M = 1
3352 MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0))); // mode
3357 // DBG has its option specified in Inst{3-0}.
3358 if (Opcode == ARM::DBG) {
3359 MI.addOperand(MCOperand::CreateImm(slice(insn, 3, 0)));
3364 // BKPT takes an imm32 val equal to ZeroExtend(Inst{19-8:3-0}).
3365 if (Opcode == ARM::BKPT) {
3366 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 8) << 4 |
3367 slice(insn, 3, 0)));
3372 if (PreLoadOpcode(Opcode))
3373 return DisassemblePreLoadFrm(MI, Opcode, insn, NumOps, NumOpsAdded, B);
3375 assert(0 && "Unexpected misc instruction!");
3379 /// FuncPtrs - FuncPtrs maps ARMFormat to its corresponding DisassembleFP.
3380 /// We divide the disassembly task into different categories, with each one
3381 /// corresponding to a specific instruction encoding format. There could be
3382 /// exceptions when handling a specific format, and that is why the Opcode is
3383 /// also present in the function prototype.
3384 static const DisassembleFP FuncPtrs[] = {
3388 &DisassembleBrMiscFrm,
3390 &DisassembleDPSoRegFrm,
3393 &DisassembleLdMiscFrm,
3394 &DisassembleStMiscFrm,
3395 &DisassembleLdStMulFrm,
3396 &DisassembleLdStExFrm,
3397 &DisassembleArithMiscFrm,
3400 &DisassembleVFPUnaryFrm,
3401 &DisassembleVFPBinaryFrm,
3402 &DisassembleVFPConv1Frm,
3403 &DisassembleVFPConv2Frm,
3404 &DisassembleVFPConv3Frm,
3405 &DisassembleVFPConv4Frm,
3406 &DisassembleVFPConv5Frm,
3407 &DisassembleVFPLdStFrm,
3408 &DisassembleVFPLdStMulFrm,
3409 &DisassembleVFPMiscFrm,
3410 &DisassembleThumbFrm,
3411 &DisassembleMiscFrm,
3412 &DisassembleNGetLnFrm,
3413 &DisassembleNSetLnFrm,
3414 &DisassembleNDupFrm,
3416 // VLD and VST (including one lane) Instructions.
3419 // A7.4.6 One register and a modified immediate value
3420 // 1-Register Instructions with imm.
3421 // LLVM only defines VMOVv instructions.
3422 &DisassembleN1RegModImmFrm,
3424 // 2-Register Instructions with no imm.
3425 &DisassembleN2RegFrm,
3427 // 2-Register Instructions with imm (vector convert float/fixed point).
3428 &DisassembleNVCVTFrm,
3430 // 2-Register Instructions with imm (vector dup lane).
3431 &DisassembleNVecDupLnFrm,
3433 // Vector Shift Left Instructions.
3434 &DisassembleN2RegVecShLFrm,
3436 // Vector Shift Righ Instructions, which has different interpretation of the
3437 // shift amount from the imm6 field.
3438 &DisassembleN2RegVecShRFrm,
3440 // 3-Register Data-Processing Instructions.
3441 &DisassembleN3RegFrm,
3443 // Vector Shift (Register) Instructions.
3444 // D:Vd M:Vm N:Vn (notice that M:Vm is the first operand)
3445 &DisassembleN3RegVecShFrm,
3447 // Vector Extract Instructions.
3448 &DisassembleNVecExtractFrm,
3450 // Vector [Saturating Rounding Doubling] Multiply [Accumulate/Subtract] [Long]
3451 // By Scalar Instructions.
3452 &DisassembleNVecMulScalarFrm,
3454 // Vector Table Lookup uses byte indexes in a control vector to look up byte
3455 // values in a table and generate a new vector.
3456 &DisassembleNVTBLFrm,
3461 /// BuildIt - BuildIt performs the build step for this ARM Basic MC Builder.
3462 /// The general idea is to set the Opcode for the MCInst, followed by adding
3463 /// the appropriate MCOperands to the MCInst. ARM Basic MC Builder delegates
3464 /// to the Format-specific disassemble function for disassembly, followed by
3465 /// TryPredicateAndSBitModifier() to do PredicateOperand and OptionalDefOperand
3466 /// which follow the Dst/Src Operands.
3467 bool ARMBasicMCBuilder::BuildIt(MCInst &MI, uint32_t insn) {
3468 // Stage 1 sets the Opcode.
3469 MI.setOpcode(Opcode);
3470 // If the number of operands is zero, we're done!
3474 // Stage 2 calls the format-specific disassemble function to build the operand
3478 unsigned NumOpsAdded = 0;
3479 bool OK = (*Disasm)(MI, Opcode, insn, NumOps, NumOpsAdded, this);
3481 if (!OK || this->Err != 0) return false;
3482 if (NumOpsAdded >= NumOps)
3485 // Stage 3 deals with operands unaccounted for after stage 2 is finished.
3486 // FIXME: Should this be done selectively?
3487 return TryPredicateAndSBitModifier(MI, Opcode, insn, NumOps - NumOpsAdded);
3490 // A8.3 Conditional execution
3491 // A8.3.1 Pseudocode details of conditional execution
3492 // Condition bits '111x' indicate the instruction is always executed.
3493 static uint32_t CondCode(uint32_t CondField) {
3494 if (CondField == 0xF)
3499 /// DoPredicateOperands - DoPredicateOperands process the predicate operands
3500 /// of some Thumb instructions which come before the reglist operands. It
3501 /// returns true if the two predicate operands have been processed.
3502 bool ARMBasicMCBuilder::DoPredicateOperands(MCInst& MI, unsigned Opcode,
3503 uint32_t /* insn */, unsigned short NumOpsRemaining) {
3505 assert(NumOpsRemaining > 0 && "Invalid argument");
3507 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
3508 unsigned Idx = MI.getNumOperands();
3510 // First, we check whether this instr specifies the PredicateOperand through
3511 // a pair of TargetOperandInfos with isPredicate() property.
3512 if (NumOpsRemaining >= 2 &&
3513 OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
3514 OpInfo[Idx].RegClass < 0 &&
3515 OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
3517 // If we are inside an IT block, get the IT condition bits maintained via
3518 // ARMBasicMCBuilder::ITState[7:0], through ARMBasicMCBuilder::GetITCond().
3521 MI.addOperand(MCOperand::CreateImm(GetITCond()));
3523 MI.addOperand(MCOperand::CreateImm(ARMCC::AL));
3524 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
3531 /// TryPredicateAndSBitModifier - TryPredicateAndSBitModifier tries to process
3532 /// the possible Predicate and SBitModifier, to build the remaining MCOperand
3534 bool ARMBasicMCBuilder::TryPredicateAndSBitModifier(MCInst& MI, unsigned Opcode,
3535 uint32_t insn, unsigned short NumOpsRemaining) {
3537 assert(NumOpsRemaining > 0 && "Invalid argument");
3539 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
3540 const std::string &Name = ARMInsts[Opcode].Name;
3541 unsigned Idx = MI.getNumOperands();
3542 uint64_t TSFlags = ARMInsts[Opcode].TSFlags;
3544 // First, we check whether this instr specifies the PredicateOperand through
3545 // a pair of TargetOperandInfos with isPredicate() property.
3546 if (NumOpsRemaining >= 2 &&
3547 OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
3548 OpInfo[Idx].RegClass < 0 &&
3549 OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
3551 // If we are inside an IT block, get the IT condition bits maintained via
3552 // ARMBasicMCBuilder::ITState[7:0], through ARMBasicMCBuilder::GetITCond().
3555 MI.addOperand(MCOperand::CreateImm(GetITCond()));
3557 if (Name.length() > 1 && Name[0] == 't') {
3558 // Thumb conditional branch instructions have their cond field embedded,
3562 if (Name == "t2Bcc")
3563 MI.addOperand(MCOperand::CreateImm(CondCode(slice(insn, 25, 22))));
3564 else if (Name == "tBcc")
3565 MI.addOperand(MCOperand::CreateImm(CondCode(slice(insn, 11, 8))));
3567 MI.addOperand(MCOperand::CreateImm(ARMCC::AL));
3569 // ARM instructions get their condition field from Inst{31-28}.
3570 // We should reject Inst{31-28} = 0b1111 as invalid encoding.
3571 if (!isNEONDomain(TSFlags) && getCondField(insn) == 0xF)
3573 MI.addOperand(MCOperand::CreateImm(CondCode(getCondField(insn))));
3576 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
3578 NumOpsRemaining -= 2;
3581 if (NumOpsRemaining == 0)
3584 // Next, if OptionalDefOperand exists, we check whether the 'S' bit is set.
3585 if (OpInfo[Idx].isOptionalDef() && OpInfo[Idx].RegClass==ARM::CCRRegClassID) {
3586 MI.addOperand(MCOperand::CreateReg(getSBit(insn) == 1 ? ARM::CPSR : 0));
3590 if (NumOpsRemaining == 0)
3596 /// RunBuildAfterHook - RunBuildAfterHook performs operations deemed necessary
3597 /// after BuildIt is finished.
3598 bool ARMBasicMCBuilder::RunBuildAfterHook(bool Status, MCInst &MI,
3601 if (!SP) return Status;
3603 if (Opcode == ARM::t2IT)
3604 Status = SP->InitIT(slice(insn, 7, 0)) ? Status : false;
3605 else if (InITBlock())
3611 /// Opcode, Format, and NumOperands make up an ARM Basic MCBuilder.
3612 ARMBasicMCBuilder::ARMBasicMCBuilder(unsigned opc, ARMFormat format,
3614 : Opcode(opc), Format(format), NumOps(num), SP(0), Err(0) {
3615 unsigned Idx = (unsigned)format;
3616 assert(Idx < (array_lengthof(FuncPtrs) - 1) && "Unknown format");
3617 Disasm = FuncPtrs[Idx];
3620 /// CreateMCBuilder - Return an ARMBasicMCBuilder that can build up the MC
3621 /// infrastructure of an MCInst given the Opcode and Format of the instr.
3622 /// Return NULL if it fails to create/return a proper builder. API clients
3623 /// are responsible for freeing up of the allocated memory. Cacheing can be
3624 /// performed by the API clients to improve performance.
3625 ARMBasicMCBuilder *llvm::CreateMCBuilder(unsigned Opcode, ARMFormat Format) {
3626 // For "Unknown format", fail by returning a NULL pointer.
3627 if ((unsigned)Format >= (array_lengthof(FuncPtrs) - 1)) {
3628 DEBUG(errs() << "Unknown format\n");
3632 return new ARMBasicMCBuilder(Opcode, Format,
3633 ARMInsts[Opcode].getNumOperands());