1 //===- ARMDisassemblerCore.cpp - ARM disassembler helpers -------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is part of the ARM Disassembler.
11 // It contains code to represent the core concepts of Builder and DisassembleFP
12 // to solve the problem of disassembling an ARM instr.
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "arm-disassembler"
18 #include "ARMDisassemblerCore.h"
19 #include "MCTargetDesc/ARMAddressingModes.h"
20 #include "MCTargetDesc/ARMMCExpr.h"
21 #include "llvm/ADT/APInt.h"
22 #include "llvm/ADT/APFloat.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/Support/raw_ostream.h"
26 //#define DEBUG(X) do { X; } while (0)
28 /// ARMGenInstrInfo.inc - ARMGenInstrInfo.inc contains the static const
29 /// MCInstrDesc ARMInsts[] definition and the MCOperandInfo[]'s describing the
30 /// operand info for each ARMInsts[i].
32 /// Together with an instruction's encoding format, we can take advantage of the
33 /// NumOperands and the OpInfo fields of the target instruction description in
34 /// the quest to build out the MCOperand list for an MCInst.
36 /// The general guideline is that with a known format, the number of dst and src
37 /// operands are well-known. The dst is built first, followed by the src
38 /// operand(s). The operands not yet used at this point are for the Implicit
39 /// Uses and Defs by this instr. For the Uses part, the pred:$p operand is
40 /// defined with two components:
42 /// def pred { // Operand PredicateOperand
43 /// ValueType Type = OtherVT;
44 /// string PrintMethod = "printPredicateOperand";
45 /// string AsmOperandLowerMethod = ?;
46 /// dag MIOperandInfo = (ops i32imm, CCR);
47 /// AsmOperandClass ParserMatchClass = ImmAsmOperand;
48 /// dag DefaultOps = (ops (i32 14), (i32 zero_reg));
51 /// which is manifested by the MCOperandInfo[] of:
53 /// { 0, 0|(1<<MCOI::Predicate), 0 },
54 /// { ARM::CCRRegClassID, 0|(1<<MCOI::Predicate), 0 }
56 /// So the first predicate MCOperand corresponds to the immediate part of the
57 /// ARM condition field (Inst{31-28}), and the second predicate MCOperand
58 /// corresponds to a register kind of ARM::CPSR.
60 /// For the Defs part, in the simple case of only cc_out:$s, we have:
62 /// def cc_out { // Operand OptionalDefOperand
63 /// ValueType Type = OtherVT;
64 /// string PrintMethod = "printSBitModifierOperand";
65 /// string AsmOperandLowerMethod = ?;
66 /// dag MIOperandInfo = (ops CCR);
67 /// AsmOperandClass ParserMatchClass = ImmAsmOperand;
68 /// dag DefaultOps = (ops (i32 zero_reg));
71 /// which is manifested by the one MCOperandInfo of:
73 /// { ARM::CCRRegClassID, 0|(1<<MCOI::OptionalDef), 0 }
77 extern MCInstrDesc ARMInsts[];
82 const char *ARMUtils::OpcodeName(unsigned Opcode) {
83 return ARMInsts[Opcode].Name;
86 // Return the register enum Based on RegClass and the raw register number.
89 getRegisterEnum(BO B, unsigned RegClassID, unsigned RawRegister) {
90 if (RegClassID == ARM::rGPRRegClassID) {
91 // Check for The register numbers 13 and 15 that are not permitted for many
92 // Thumb register specifiers.
93 if (RawRegister == 13 || RawRegister == 15) {
97 // For this purpose, we can treat rGPR as if it were GPR.
98 RegClassID = ARM::GPRRegClassID;
101 // See also decodeNEONRd(), decodeNEONRn(), decodeNEONRm().
102 // A7.3 register encoding
103 // Qd -> bit[12] == 0
104 // Qn -> bit[16] == 0
107 // If one of these bits is 1, the instruction is UNDEFINED.
108 if (RegClassID == ARM::QPRRegClassID && slice(RawRegister, 0, 0) == 1) {
113 RegClassID == ARM::QPRRegClassID ? RawRegister >> 1 : RawRegister;
119 switch (RegClassID) {
120 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R0;
121 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
122 case ARM::DPR_VFP2RegClassID:
124 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
125 case ARM::QPR_VFP2RegClassID:
127 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S0;
131 switch (RegClassID) {
132 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R1;
133 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
134 case ARM::DPR_VFP2RegClassID:
136 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
137 case ARM::QPR_VFP2RegClassID:
139 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S1;
143 switch (RegClassID) {
144 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R2;
145 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
146 case ARM::DPR_VFP2RegClassID:
148 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
149 case ARM::QPR_VFP2RegClassID:
151 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S2;
155 switch (RegClassID) {
156 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R3;
157 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
158 case ARM::DPR_VFP2RegClassID:
160 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
161 case ARM::QPR_VFP2RegClassID:
163 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S3;
167 switch (RegClassID) {
168 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R4;
169 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
170 case ARM::DPR_VFP2RegClassID:
172 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q4;
173 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S4;
177 switch (RegClassID) {
178 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R5;
179 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
180 case ARM::DPR_VFP2RegClassID:
182 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q5;
183 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S5;
187 switch (RegClassID) {
188 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R6;
189 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
190 case ARM::DPR_VFP2RegClassID:
192 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q6;
193 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S6;
197 switch (RegClassID) {
198 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R7;
199 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
200 case ARM::DPR_VFP2RegClassID:
202 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q7;
203 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S7;
207 switch (RegClassID) {
208 case ARM::GPRRegClassID: return ARM::R8;
209 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D8;
210 case ARM::QPRRegClassID: return ARM::Q8;
211 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S8;
215 switch (RegClassID) {
216 case ARM::GPRRegClassID: return ARM::R9;
217 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D9;
218 case ARM::QPRRegClassID: return ARM::Q9;
219 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S9;
223 switch (RegClassID) {
224 case ARM::GPRRegClassID: return ARM::R10;
225 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D10;
226 case ARM::QPRRegClassID: return ARM::Q10;
227 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S10;
231 switch (RegClassID) {
232 case ARM::GPRRegClassID: return ARM::R11;
233 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D11;
234 case ARM::QPRRegClassID: return ARM::Q11;
235 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S11;
239 switch (RegClassID) {
240 case ARM::GPRRegClassID: return ARM::R12;
241 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D12;
242 case ARM::QPRRegClassID: return ARM::Q12;
243 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S12;
247 switch (RegClassID) {
248 case ARM::GPRRegClassID: return ARM::SP;
249 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D13;
250 case ARM::QPRRegClassID: return ARM::Q13;
251 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S13;
255 switch (RegClassID) {
256 case ARM::GPRRegClassID: return ARM::LR;
257 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D14;
258 case ARM::QPRRegClassID: return ARM::Q14;
259 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S14;
263 switch (RegClassID) {
264 case ARM::GPRRegClassID: return ARM::PC;
265 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D15;
266 case ARM::QPRRegClassID: return ARM::Q15;
267 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S15;
271 switch (RegClassID) {
272 case ARM::DPRRegClassID: return ARM::D16;
273 case ARM::SPRRegClassID: return ARM::S16;
277 switch (RegClassID) {
278 case ARM::DPRRegClassID: return ARM::D17;
279 case ARM::SPRRegClassID: return ARM::S17;
283 switch (RegClassID) {
284 case ARM::DPRRegClassID: return ARM::D18;
285 case ARM::SPRRegClassID: return ARM::S18;
289 switch (RegClassID) {
290 case ARM::DPRRegClassID: return ARM::D19;
291 case ARM::SPRRegClassID: return ARM::S19;
295 switch (RegClassID) {
296 case ARM::DPRRegClassID: return ARM::D20;
297 case ARM::SPRRegClassID: return ARM::S20;
301 switch (RegClassID) {
302 case ARM::DPRRegClassID: return ARM::D21;
303 case ARM::SPRRegClassID: return ARM::S21;
307 switch (RegClassID) {
308 case ARM::DPRRegClassID: return ARM::D22;
309 case ARM::SPRRegClassID: return ARM::S22;
313 switch (RegClassID) {
314 case ARM::DPRRegClassID: return ARM::D23;
315 case ARM::SPRRegClassID: return ARM::S23;
319 switch (RegClassID) {
320 case ARM::DPRRegClassID: return ARM::D24;
321 case ARM::SPRRegClassID: return ARM::S24;
325 switch (RegClassID) {
326 case ARM::DPRRegClassID: return ARM::D25;
327 case ARM::SPRRegClassID: return ARM::S25;
331 switch (RegClassID) {
332 case ARM::DPRRegClassID: return ARM::D26;
333 case ARM::SPRRegClassID: return ARM::S26;
337 switch (RegClassID) {
338 case ARM::DPRRegClassID: return ARM::D27;
339 case ARM::SPRRegClassID: return ARM::S27;
343 switch (RegClassID) {
344 case ARM::DPRRegClassID: return ARM::D28;
345 case ARM::SPRRegClassID: return ARM::S28;
349 switch (RegClassID) {
350 case ARM::DPRRegClassID: return ARM::D29;
351 case ARM::SPRRegClassID: return ARM::S29;
355 switch (RegClassID) {
356 case ARM::DPRRegClassID: return ARM::D30;
357 case ARM::SPRRegClassID: return ARM::S30;
361 switch (RegClassID) {
362 case ARM::DPRRegClassID: return ARM::D31;
363 case ARM::SPRRegClassID: return ARM::S31;
367 DEBUG(errs() << "Invalid (RegClassID, RawRegister) combination\n");
368 // Encoding error. Mark the builder with error code != 0.
373 ///////////////////////////////
375 // Utility Functions //
377 ///////////////////////////////
379 // Extract/Decode Rd: Inst{15-12}.
380 static inline unsigned decodeRd(uint32_t insn) {
381 return (insn >> ARMII::RegRdShift) & ARMII::GPRRegMask;
384 // Extract/Decode Rn: Inst{19-16}.
385 static inline unsigned decodeRn(uint32_t insn) {
386 return (insn >> ARMII::RegRnShift) & ARMII::GPRRegMask;
389 // Extract/Decode Rm: Inst{3-0}.
390 static inline unsigned decodeRm(uint32_t insn) {
391 return (insn & ARMII::GPRRegMask);
394 // Extract/Decode Rs: Inst{11-8}.
395 static inline unsigned decodeRs(uint32_t insn) {
396 return (insn >> ARMII::RegRsShift) & ARMII::GPRRegMask;
399 static inline unsigned getCondField(uint32_t insn) {
400 return (insn >> ARMII::CondShift);
403 static inline unsigned getIBit(uint32_t insn) {
404 return (insn >> ARMII::I_BitShift) & 1;
407 static inline unsigned getAM3IBit(uint32_t insn) {
408 return (insn >> ARMII::AM3_I_BitShift) & 1;
411 static inline unsigned getPBit(uint32_t insn) {
412 return (insn >> ARMII::P_BitShift) & 1;
415 static inline unsigned getUBit(uint32_t insn) {
416 return (insn >> ARMII::U_BitShift) & 1;
419 static inline unsigned getPUBits(uint32_t insn) {
420 return (insn >> ARMII::U_BitShift) & 3;
423 static inline unsigned getSBit(uint32_t insn) {
424 return (insn >> ARMII::S_BitShift) & 1;
427 static inline unsigned getWBit(uint32_t insn) {
428 return (insn >> ARMII::W_BitShift) & 1;
431 static inline unsigned getDBit(uint32_t insn) {
432 return (insn >> ARMII::D_BitShift) & 1;
435 static inline unsigned getNBit(uint32_t insn) {
436 return (insn >> ARMII::N_BitShift) & 1;
439 static inline unsigned getMBit(uint32_t insn) {
440 return (insn >> ARMII::M_BitShift) & 1;
443 // See A8.4 Shifts applied to a register.
444 // A8.4.2 Register controlled shifts.
446 // getShiftOpcForBits - getShiftOpcForBits translates from the ARM encoding bits
447 // into llvm enums for shift opcode. The API clients should pass in the value
448 // encoded with two bits, so the assert stays to signal a wrong API usage.
450 // A8-12: DecodeRegShift()
451 static inline ARM_AM::ShiftOpc getShiftOpcForBits(unsigned bits) {
453 default: assert(0 && "No such value"); return ARM_AM::no_shift;
454 case 0: return ARM_AM::lsl;
455 case 1: return ARM_AM::lsr;
456 case 2: return ARM_AM::asr;
457 case 3: return ARM_AM::ror;
461 // See A8.4 Shifts applied to a register.
462 // A8.4.1 Constant shifts.
464 // getImmShiftSE - getImmShiftSE translates from the raw ShiftOpc and raw Imm5
465 // encodings into the intended ShiftOpc and shift amount.
467 // A8-11: DecodeImmShift()
468 static inline void getImmShiftSE(ARM_AM::ShiftOpc &ShOp, unsigned &ShImm) {
472 case ARM_AM::no_shift:
476 ShOp = ARM_AM::no_shift;
488 // getAMSubModeForBits - getAMSubModeForBits translates from the ARM encoding
489 // bits Inst{24-23} (P(24) and U(23)) into llvm enums for AMSubMode. The API
490 // clients should pass in the value encoded with two bits, so the assert stays
491 // to signal a wrong API usage.
492 static inline ARM_AM::AMSubMode getAMSubModeForBits(unsigned bits) {
494 default: assert(0 && "No such value"); return ARM_AM::bad_am_submode;
495 case 1: return ARM_AM::ia; // P=0 U=1
496 case 3: return ARM_AM::ib; // P=1 U=1
497 case 0: return ARM_AM::da; // P=0 U=0
498 case 2: return ARM_AM::db; // P=1 U=0
502 ////////////////////////////////////////////
504 // Disassemble function definitions //
506 ////////////////////////////////////////////
508 /// There is a separate Disassemble*Frm function entry for disassembly of an ARM
509 /// instr into a list of MCOperands in the appropriate order, with possible dst,
510 /// followed by possible src(s).
512 /// The processing of the predicate, and the 'S' modifier bit, if MI modifies
513 /// the CPSR, is factored into ARMBasicMCBuilder's method named
514 /// TryPredicateAndSBitModifier.
516 static bool DisassemblePseudo(MCInst &MI, unsigned Opcode, uint32_t insn,
517 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
519 assert(0 && "Unexpected pseudo instruction!");
524 // if d == 15 || n == 15 || m == 15 || a == 15 then UNPREDICTABLE;
527 // if d == 15 || n == 15 || m == 15 then UNPREDICTABLE;
530 // if dLo == 15 || dHi == 15 || n == 15 || m == 15 then UNPREDICTABLE;
531 // if dHi == dLo then UNPREDICTABLE;
532 static bool BadRegsMulFrm(unsigned Opcode, uint32_t insn) {
533 unsigned R19_16 = slice(insn, 19, 16);
534 unsigned R15_12 = slice(insn, 15, 12);
535 unsigned R11_8 = slice(insn, 11, 8);
536 unsigned R3_0 = slice(insn, 3, 0);
539 // Did we miss an opcode?
540 DEBUG(errs() << "BadRegsMulFrm: unexpected opcode!");
542 case ARM::MLA: case ARM::MLS: case ARM::SMLABB: case ARM::SMLABT:
543 case ARM::SMLATB: case ARM::SMLATT: case ARM::SMLAWB: case ARM::SMLAWT:
544 case ARM::SMMLA: case ARM::SMMLAR: case ARM::SMMLS: case ARM::SMMLSR:
546 if (R19_16 == 15 || R15_12 == 15 || R11_8 == 15 || R3_0 == 15)
549 case ARM::MUL: case ARM::SMMUL: case ARM::SMMULR:
550 case ARM::SMULBB: case ARM::SMULBT: case ARM::SMULTB: case ARM::SMULTT:
551 case ARM::SMULWB: case ARM::SMULWT: case ARM::SMUAD: case ARM::SMUADX:
552 // A8.6.167 SMLAD & A8.6.172 SMLSD
553 case ARM::SMLAD: case ARM::SMLADX: case ARM::SMLSD: case ARM::SMLSDX:
555 if (R19_16 == 15 || R11_8 == 15 || R3_0 == 15)
558 case ARM::SMLAL: case ARM::SMULL: case ARM::UMAAL: case ARM::UMLAL:
560 case ARM::SMLALBB: case ARM::SMLALBT: case ARM::SMLALTB: case ARM::SMLALTT:
561 case ARM::SMLALD: case ARM::SMLALDX: case ARM::SMLSLD: case ARM::SMLSLDX:
562 if (R19_16 == 15 || R15_12 == 15 || R11_8 == 15 || R3_0 == 15)
564 if (R19_16 == R15_12)
570 // Multiply Instructions.
571 // MLA, MLS, SMLABB, SMLABT, SMLATB, SMLATT, SMLAWB, SMLAWT, SMMLA, SMMLAR,
572 // SMMLS, SMMLAR, SMLAD, SMLADX, SMLSD, SMLSDX, and USADA8 (for convenience):
573 // Rd{19-16} Rn{3-0} Rm{11-8} Ra{15-12}
574 // But note that register checking for {SMLAD, SMLADX, SMLSD, SMLSDX} is
575 // only for {d, n, m}.
577 // MUL, SMMUL, SMMULR, SMULBB, SMULBT, SMULTB, SMULTT, SMULWB, SMULWT, SMUAD,
578 // SMUADX, and USAD8 (for convenience):
579 // Rd{19-16} Rn{3-0} Rm{11-8}
581 // SMLAL, SMULL, UMAAL, UMLAL, UMULL, SMLALBB, SMLALBT, SMLALTB, SMLALTT,
582 // SMLALD, SMLADLX, SMLSLD, SMLSLDX:
583 // RdLo{15-12} RdHi{19-16} Rn{3-0} Rm{11-8}
585 // The mapping of the multiply registers to the "regular" ARM registers, where
586 // there are convenience decoder functions, is:
592 static bool DisassembleMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
593 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
595 const MCInstrDesc &MCID = ARMInsts[Opcode];
596 unsigned short NumDefs = MCID.getNumDefs();
597 const MCOperandInfo *OpInfo = MCID.OpInfo;
598 unsigned &OpIdx = NumOpsAdded;
602 assert(NumDefs > 0 && "NumDefs should be greater than 0 for MulFrm");
604 && OpInfo[0].RegClass == ARM::GPRRegClassID
605 && OpInfo[1].RegClass == ARM::GPRRegClassID
606 && OpInfo[2].RegClass == ARM::GPRRegClassID
607 && "Expect three register operands");
609 // Sanity check for the register encodings.
610 if (BadRegsMulFrm(Opcode, insn))
613 // Instructions with two destination registers have RdLo{15-12} first.
615 assert(NumOps >= 4 && OpInfo[3].RegClass == ARM::GPRRegClassID &&
616 "Expect 4th register operand");
617 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
622 // The destination register: RdHi{19-16} or Rd{19-16}.
623 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
626 // The two src regsiters: Rn{3-0}, then Rm{11-8}.
627 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
629 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
633 // Many multiply instructions (e.g., MLA) have three src registers.
634 // The third register operand is Ra{15-12}.
635 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) {
636 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
644 // Helper routines for disassembly of coprocessor instructions.
646 static bool LdStCopOpcode(unsigned Opcode) {
647 if ((Opcode >= ARM::LDC2L_OFFSET && Opcode <= ARM::LDC_PRE) ||
648 (Opcode >= ARM::STC2L_OFFSET && Opcode <= ARM::STC_PRE))
652 static bool CoprocessorOpcode(unsigned Opcode) {
653 if (LdStCopOpcode(Opcode))
659 case ARM::CDP: case ARM::CDP2:
660 case ARM::MCR: case ARM::MCR2: case ARM::MRC: case ARM::MRC2:
661 case ARM::MCRR: case ARM::MCRR2: case ARM::MRRC: case ARM::MRRC2:
665 static inline unsigned GetCoprocessor(uint32_t insn) {
666 return slice(insn, 11, 8);
668 static inline unsigned GetCopOpc1(uint32_t insn, bool CDP) {
669 return CDP ? slice(insn, 23, 20) : slice(insn, 23, 21);
671 static inline unsigned GetCopOpc2(uint32_t insn) {
672 return slice(insn, 7, 5);
674 static inline unsigned GetCopOpc(uint32_t insn) {
675 return slice(insn, 7, 4);
677 // Most of the operands are in immediate forms, except Rd and Rn, which are ARM
680 // CDP, CDP2: cop opc1 CRd CRn CRm opc2
682 // MCR, MCR2, MRC, MRC2: cop opc1 Rd CRn CRm opc2
684 // MCRR, MCRR2, MRRC, MRRc2: cop opc Rd Rn CRm
686 // LDC_OFFSET, LDC_PRE, LDC_POST: cop CRd Rn R0 [+/-]imm8:00
688 // STC_OFFSET, STC_PRE, STC_POST: cop CRd Rn R0 [+/-]imm8:00
692 // LDC_OPTION: cop CRd Rn imm8
694 // STC_OPTION: cop CRd Rn imm8
697 static bool DisassembleCoprocessor(MCInst &MI, unsigned Opcode, uint32_t insn,
698 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
700 assert(NumOps >= 4 && "Num of operands >= 4 for coprocessor instr");
702 unsigned &OpIdx = NumOpsAdded;
704 // if coproc == '101x' then SEE "Advanced SIMD and VFP"
705 // But since the special instructions have more explicit encoding bits
706 // specified, if coproc == 10 or 11, we should reject it as invalid.
707 unsigned coproc = GetCoprocessor(insn);
708 if ((Opcode == ARM::MCR || Opcode == ARM::MCRR ||
709 Opcode == ARM::MRC || Opcode == ARM::MRRC) &&
710 (coproc == 10 || coproc == 11)) {
711 DEBUG(errs() << "Encoding error: coproc == 10 or 11 for MCR[R]/MR[R]C\n");
715 bool OneCopOpc = (Opcode == ARM::MCRR || Opcode == ARM::MCRR2 ||
716 Opcode == ARM::MRRC || Opcode == ARM::MRRC2);
718 // CDP/CDP2 has no GPR operand; the opc1 operand is also wider (Inst{23-20}).
719 bool NoGPR = (Opcode == ARM::CDP || Opcode == ARM::CDP2);
720 bool LdStCop = LdStCopOpcode(Opcode);
721 bool RtOut = (Opcode == ARM::MRC || Opcode == ARM::MRC2);
726 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
730 MI.addOperand(MCOperand::CreateImm(coproc));
734 // Unindex if P:W = 0b00 --> _OPTION variant
735 unsigned PW = getPBit(insn) << 1 | getWBit(insn);
737 MI.addOperand(MCOperand::CreateImm(decodeRd(insn)));
739 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
744 MI.addOperand(MCOperand::CreateReg(0));
745 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
746 const MCInstrDesc &MCID = ARMInsts[Opcode];
748 (MCID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
749 unsigned Offset = ARM_AM::getAM2Opc(AddrOpcode, slice(insn, 7, 0) << 2,
750 ARM_AM::no_shift, IndexMode);
751 MI.addOperand(MCOperand::CreateImm(Offset));
754 MI.addOperand(MCOperand::CreateImm(slice(insn, 7, 0)));
758 MI.addOperand(MCOperand::CreateImm(OneCopOpc ? GetCopOpc(insn)
759 : GetCopOpc1(insn, NoGPR)));
763 MI.addOperand(NoGPR ? MCOperand::CreateImm(decodeRd(insn))
764 : MCOperand::CreateReg(
765 getRegisterEnum(B, ARM::GPRRegClassID,
770 MI.addOperand(OneCopOpc ? MCOperand::CreateReg(
771 getRegisterEnum(B, ARM::GPRRegClassID,
773 : MCOperand::CreateImm(decodeRn(insn)));
775 MI.addOperand(MCOperand::CreateImm(decodeRm(insn)));
780 MI.addOperand(MCOperand::CreateImm(GetCopOpc2(insn)));
788 // Branch Instructions.
789 // BL: SignExtend(Imm24:'00', 32)
790 // Bcc, BL_pred: SignExtend(Imm24:'00', 32) Pred0 Pred1
791 // SMC: ZeroExtend(imm4, 32)
792 // SVC: ZeroExtend(Imm24, 32)
794 // Various coprocessor instructions are assigned BrFrm arbitrarily.
795 // Delegates to DisassembleCoprocessor() helper function.
798 // MSR/MSRsys: Rm mask=Inst{19-16}
800 // MSRi/MSRsysi: so_imm
803 static bool DisassembleBrFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
804 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
806 if (CoprocessorOpcode(Opcode))
807 return DisassembleCoprocessor(MI, Opcode, insn, NumOps, NumOpsAdded, B);
809 const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
810 if (!OpInfo) return false;
812 // MRS and MRSsys take one GPR reg Rd.
813 if (Opcode == ARM::MRS || Opcode == ARM::MRSsys) {
814 assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
815 "Reg operand expected");
816 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
821 // BXJ takes one GPR reg Rm.
822 if (Opcode == ARM::BXJ) {
823 assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
824 "Reg operand expected");
825 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
830 // MSR take a mask, followed by one GPR reg Rm. The mask contains the R Bit in
831 // bit 4, and the special register fields in bits 3-0.
832 if (Opcode == ARM::MSR) {
833 assert(NumOps >= 1 && OpInfo[1].RegClass == ARM::GPRRegClassID &&
834 "Reg operand expected");
835 MI.addOperand(MCOperand::CreateImm(slice(insn, 22, 22) << 4 /* R Bit */ |
836 slice(insn, 19, 16) /* Special Reg */ ));
837 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
842 // MSRi take a mask, followed by one so_imm operand. The mask contains the
843 // R Bit in bit 4, and the special register fields in bits 3-0.
844 if (Opcode == ARM::MSRi) {
845 // A5.2.11 MSR (immediate), and hints & B6.1.6 MSR (immediate)
846 // The hints instructions have more specific encodings, so if mask == 0,
847 // we should reject this as an invalid instruction.
848 if (slice(insn, 19, 16) == 0)
850 MI.addOperand(MCOperand::CreateImm(slice(insn, 22, 22) << 4 /* R Bit */ |
851 slice(insn, 19, 16) /* Special Reg */ ));
852 // SOImm is 4-bit rotate amount in bits 11-8 with 8-bit imm in bits 7-0.
853 // A5.2.4 Rotate amount is twice the numeric value of Inst{11-8}.
854 // See also ARMAddressingModes.h: getSOImmValImm() and getSOImmValRot().
855 unsigned Rot = (insn >> ARMII::SoRotImmShift) & 0xF;
856 unsigned Imm = insn & 0xFF;
857 MI.addOperand(MCOperand::CreateImm(ARM_AM::rotr32(Imm, 2*Rot)));
861 if (Opcode == ARM::SRSDA || Opcode == ARM::SRSDB ||
862 Opcode == ARM::SRSIA || Opcode == ARM::SRSIB ||
863 Opcode == ARM::SRSDA_UPD || Opcode == ARM::SRSDB_UPD ||
864 Opcode == ARM::SRSIA_UPD || Opcode == ARM::SRSIB_UPD) {
865 MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0)));
869 if (Opcode == ARM::RFEDA || Opcode == ARM::RFEDB ||
870 Opcode == ARM::RFEIA || Opcode == ARM::RFEIB ||
871 Opcode == ARM::RFEDA_UPD || Opcode == ARM::RFEDB_UPD ||
872 Opcode == ARM::RFEIA_UPD || Opcode == ARM::RFEIB_UPD) {
873 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
879 assert((Opcode == ARM::Bcc || Opcode == ARM::BL || Opcode == ARM::BL_pred
880 || Opcode == ARM::SMC || Opcode == ARM::SVC) &&
881 "Unexpected Opcode");
883 assert(NumOps >= 1 && OpInfo[0].RegClass < 0 && "Imm operand expected");
886 if (Opcode == ARM::SMC) {
887 // ZeroExtend(imm4, 32) where imm24 = Inst{3-0}.
888 Imm32 = slice(insn, 3, 0);
889 } else if (Opcode == ARM::SVC) {
890 // ZeroExtend(imm24, 32) where imm24 = Inst{23-0}.
891 Imm32 = slice(insn, 23, 0);
893 // SignExtend(imm24:'00', 32) where imm24 = Inst{23-0}.
894 unsigned Imm26 = slice(insn, 23, 0) << 2;
895 //Imm32 = signextend<signed int, 26>(Imm26);
896 Imm32 = SignExtend32<26>(Imm26);
899 MI.addOperand(MCOperand::CreateImm(Imm32));
905 // Misc. Branch Instructions.
907 // BLX, BLX_pred, BX, BX_pred
909 static bool DisassembleBrMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
910 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
912 const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
913 if (!OpInfo) return false;
915 unsigned &OpIdx = NumOpsAdded;
919 // BX_RET and MOVPCLR have only two predicate operands; do an early return.
920 if (Opcode == ARM::BX_RET || Opcode == ARM::MOVPCLR)
923 // BLX and BX take one GPR reg.
924 if (Opcode == ARM::BLX || Opcode == ARM::BLX_pred ||
925 Opcode == ARM::BX || Opcode == ARM::BX_pred) {
926 assert(NumOps >= 1 && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
927 "Reg operand expected");
928 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
934 // BLXi takes imm32 (the PC offset).
935 if (Opcode == ARM::BLXi) {
936 assert(NumOps >= 1 && OpInfo[0].RegClass < 0 && "Imm operand expected");
937 // SignExtend(imm24:H:'0', 32) where imm24 = Inst{23-0} and H = Inst{24}.
938 unsigned Imm26 = slice(insn, 23, 0) << 2 | slice(insn, 24, 24) << 1;
939 int Imm32 = SignExtend32<26>(Imm26);
940 MI.addOperand(MCOperand::CreateImm(Imm32));
948 static inline bool getBFCInvMask(uint32_t insn, uint32_t &mask) {
949 uint32_t lsb = slice(insn, 11, 7);
950 uint32_t msb = slice(insn, 20, 16);
953 DEBUG(errs() << "Encoding error: msb < lsb\n");
957 for (uint32_t i = lsb; i <= msb; ++i)
963 // Standard data-processing instructions allow PC as a register specifier,
964 // but we should reject other DPFrm instructions with PC as registers.
965 static bool BadRegsDPFrm(unsigned Opcode, uint32_t insn) {
968 // Did we miss an opcode?
969 if (decodeRd(insn) == 15 || decodeRn(insn) == 15 || decodeRm(insn) == 15) {
970 DEBUG(errs() << "DPFrm with bad reg specifier(s)\n");
973 case ARM::ADCrr: case ARM::ADDSrr: case ARM::ADDrr: case ARM::ANDrr:
974 case ARM::BICrr: case ARM::CMNzrr: case ARM::CMPrr: case ARM::EORrr:
975 case ARM::ORRrr: case ARM::RSBrr: case ARM::RSCrr: case ARM::SBCrr:
976 case ARM::SUBSrr: case ARM::SUBrr: case ARM::TEQrr: case ARM::TSTrr:
981 // A major complication is the fact that some of the saturating add/subtract
982 // operations have Rd Rm Rn, instead of the "normal" Rd Rn Rm.
983 // They are QADD, QDADD, QDSUB, and QSUB.
984 static bool DisassembleDPFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
985 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
987 const MCInstrDesc &MCID = ARMInsts[Opcode];
988 unsigned short NumDefs = MCID.getNumDefs();
989 bool isUnary = isUnaryDP(MCID.TSFlags);
990 const MCOperandInfo *OpInfo = MCID.OpInfo;
991 unsigned &OpIdx = NumOpsAdded;
995 // Disassemble register def if there is one.
996 if (NumDefs && (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID)) {
997 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1002 // Now disassemble the src operands.
1003 if (OpIdx >= NumOps)
1006 // Special-case handling of BFC/BFI/SBFX/UBFX.
1007 if (Opcode == ARM::BFC || Opcode == ARM::BFI) {
1008 // A8.6.17 BFC & A8.6.18 BFI
1010 if (decodeRd(insn) == 15)
1012 MI.addOperand(MCOperand::CreateReg(0));
1013 if (Opcode == ARM::BFI) {
1014 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1019 if (!getBFCInvMask(insn, mask))
1022 MI.addOperand(MCOperand::CreateImm(mask));
1026 if (Opcode == ARM::SBFX || Opcode == ARM::UBFX) {
1027 // Sanity check Rd and Rm.
1028 if (decodeRd(insn) == 15 || decodeRm(insn) == 15)
1030 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1032 MI.addOperand(MCOperand::CreateImm(slice(insn, 11, 7)));
1033 MI.addOperand(MCOperand::CreateImm(slice(insn, 20, 16)));
1038 bool RmRn = (Opcode == ARM::QADD || Opcode == ARM::QDADD ||
1039 Opcode == ARM::QDSUB || Opcode == ARM::QSUB);
1041 // BinaryDP has an Rn operand.
1043 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1044 "Reg operand expected");
1045 MI.addOperand(MCOperand::CreateReg(
1046 getRegisterEnum(B, ARM::GPRRegClassID,
1047 RmRn ? decodeRm(insn) : decodeRn(insn))));
1051 // If this is a two-address operand, skip it, e.g., MOVCCr operand 1.
1052 if (isUnary && (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)) {
1053 MI.addOperand(MCOperand::CreateReg(0));
1057 // Now disassemble operand 2.
1058 if (OpIdx >= NumOps)
1061 if (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) {
1062 // We have a reg/reg form.
1063 // Assert disabled because saturating operations, e.g., A8.6.127 QASX, are
1064 // routed here as well.
1065 // assert(getIBit(insn) == 0 && "I_Bit != '0' reg/reg form");
1066 if (BadRegsDPFrm(Opcode, insn))
1068 MI.addOperand(MCOperand::CreateReg(
1069 getRegisterEnum(B, ARM::GPRRegClassID,
1070 RmRn? decodeRn(insn) : decodeRm(insn))));
1072 } else if (Opcode == ARM::MOVi16 || Opcode == ARM::MOVTi16) {
1073 // These two instructions don't allow d as 15.
1074 if (decodeRd(insn) == 15)
1076 // We have an imm16 = imm4:imm12 (imm4=Inst{19:16}, imm12 = Inst{11:0}).
1077 assert(getIBit(insn) == 1 && "I_Bit != '1' reg/imm form");
1078 unsigned Imm16 = slice(insn, 19, 16) << 12 | slice(insn, 11, 0);
1079 if (!B->tryAddingSymbolicOperand(Imm16, 4, MI))
1080 MI.addOperand(MCOperand::CreateImm(Imm16));
1083 // We have a reg/imm form.
1084 // SOImm is 4-bit rotate amount in bits 11-8 with 8-bit imm in bits 7-0.
1085 // A5.2.4 Rotate amount is twice the numeric value of Inst{11-8}.
1086 // See also ARMAddressingModes.h: getSOImmValImm() and getSOImmValRot().
1087 assert(getIBit(insn) == 1 && "I_Bit != '1' reg/imm form");
1088 unsigned Rot = (insn >> ARMII::SoRotImmShift) & 0xF;
1089 unsigned Imm = insn & 0xFF;
1090 MI.addOperand(MCOperand::CreateImm(ARM_AM::rotr32(Imm, 2*Rot)));
1097 static bool DisassembleDPSoRegRegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1098 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1100 const MCInstrDesc &MCID = ARMInsts[Opcode];
1101 unsigned short NumDefs = MCID.getNumDefs();
1102 bool isUnary = isUnaryDP(MCID.TSFlags);
1103 const MCOperandInfo *OpInfo = MCID.OpInfo;
1104 unsigned &OpIdx = NumOpsAdded;
1108 // Disassemble register def if there is one.
1109 if (NumDefs && (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID)) {
1110 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1115 // Disassemble the src operands.
1116 if (OpIdx >= NumOps)
1119 // BinaryDP has an Rn operand.
1121 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1122 "Reg operand expected");
1123 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1128 // If this is a two-address operand, skip it, e.g., MOVCCs operand 1.
1129 if (isUnary && (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)) {
1130 MI.addOperand(MCOperand::CreateReg(0));
1134 // Disassemble operand 2, which consists of three components.
1135 if (OpIdx + 2 >= NumOps)
1138 assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
1139 (OpInfo[OpIdx+1].RegClass == ARM::GPRRegClassID) &&
1140 (OpInfo[OpIdx+2].RegClass < 0) &&
1141 "Expect 3 reg operands");
1143 // Register-controlled shifts have Inst{7} = 0 and Inst{4} = 1.
1144 unsigned Rs = slice(insn, 4, 4);
1146 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1149 // If Inst{7} != 0, we should reject this insn as an invalid encoding.
1150 if (slice(insn, 7, 7))
1153 // A8.6.3 ADC (register-shifted register)
1154 // if d == 15 || n == 15 || m == 15 || s == 15 then UNPREDICTABLE;
1156 // This also accounts for shift instructions (register) where, fortunately,
1157 // Inst{19-16} = 0b0000.
1158 // A8.6.89 LSL (register)
1159 // if d == 15 || n == 15 || m == 15 then UNPREDICTABLE;
1160 if (decodeRd(insn) == 15 || decodeRn(insn) == 15 ||
1161 decodeRm(insn) == 15 || decodeRs(insn) == 15)
1164 // Register-controlled shifts: [Rm, Rs, shift].
1165 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1167 // Inst{6-5} encodes the shift opcode.
1168 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1169 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(ShOp, 0)));
1171 // Constant shifts: [Rm, reg0, shift_imm].
1172 MI.addOperand(MCOperand::CreateReg(0)); // NoRegister
1173 // Inst{6-5} encodes the shift opcode.
1174 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1175 // Inst{11-7} encodes the imm5 shift amount.
1176 unsigned ShImm = slice(insn, 11, 7);
1178 // A8.4.1. Possible rrx or shift amount of 32...
1179 getImmShiftSE(ShOp, ShImm);
1180 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(ShOp, ShImm)));
1187 static bool DisassembleDPSoRegImmFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1188 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1190 const MCInstrDesc &MCID = ARMInsts[Opcode];
1191 unsigned short NumDefs = MCID.getNumDefs();
1192 bool isUnary = isUnaryDP(MCID.TSFlags);
1193 const MCOperandInfo *OpInfo = MCID.OpInfo;
1194 unsigned &OpIdx = NumOpsAdded;
1198 // Disassemble register def if there is one.
1199 if (NumDefs && (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID)) {
1200 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1205 // Disassemble the src operands.
1206 if (OpIdx >= NumOps)
1209 // BinaryDP has an Rn operand.
1211 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1212 "Reg operand expected");
1213 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1218 // If this is a two-address operand, skip it, e.g., MOVCCs operand 1.
1219 if (isUnary && (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)) {
1220 MI.addOperand(MCOperand::CreateReg(0));
1224 // Disassemble operand 2, which consists of two components.
1225 if (OpIdx + 1 >= NumOps)
1228 assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
1229 (OpInfo[OpIdx+1].RegClass < 0) &&
1230 "Expect 2 reg operands");
1232 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1235 // Inst{6-5} encodes the shift opcode.
1236 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1237 // Inst{11-7} encodes the imm5 shift amount.
1238 unsigned ShImm = slice(insn, 11, 7);
1240 // A8.4.1. Possible rrx or shift amount of 32...
1241 getImmShiftSE(ShOp, ShImm);
1242 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(ShOp, ShImm)));
1250 static bool BadRegsLdStFrm(unsigned Opcode, uint32_t insn, bool Store, bool WBack,
1252 const StringRef Name = ARMInsts[Opcode].Name;
1253 unsigned Rt = decodeRd(insn);
1254 unsigned Rn = decodeRn(insn);
1255 unsigned Rm = decodeRm(insn);
1256 unsigned P = getPBit(insn);
1257 unsigned W = getWBit(insn);
1260 // Only STR (immediate, register) allows PC as the source.
1261 if (Name.startswith("STRB") && Rt == 15) {
1262 DEBUG(errs() << "if t == 15 then UNPREDICTABLE\n");
1265 if (WBack && (Rn == 15 || Rn == Rt)) {
1266 DEBUG(errs() << "if wback && (n == 15 || n == t) then UNPREDICTABLE\n");
1269 if (!Imm && Rm == 15) {
1270 DEBUG(errs() << "if m == 15 then UNPREDICTABLE\n");
1274 // Only LDR (immediate, register) allows PC as the destination.
1275 if (Name.startswith("LDRB") && Rt == 15) {
1276 DEBUG(errs() << "if t == 15 then UNPREDICTABLE\n");
1282 // The literal form must be in offset mode; it's an encoding error
1284 if (!(P == 1 && W == 0)) {
1285 DEBUG(errs() << "Ld literal form with !(P == 1 && W == 0)\n");
1288 // LDRB (literal) does not allow PC as the destination.
1289 if (Opcode != ARM::LDRi12 && Rt == 15) {
1290 DEBUG(errs() << "if t == 15 then UNPREDICTABLE\n");
1294 // Write back while Rn == Rt does not make sense.
1295 if (WBack && (Rn == Rt)) {
1296 DEBUG(errs() << "if wback && n == t then UNPREDICTABLE\n");
1303 DEBUG(errs() << "if m == 15 then UNPREDICTABLE\n");
1306 if (WBack && (Rn == 15 || Rn == Rt)) {
1307 DEBUG(errs() << "if wback && (n == 15 || n == t) then UNPREDICTABLE\n");
1315 static bool DisassembleLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1316 unsigned short NumOps, unsigned &NumOpsAdded, bool isStore, BO B) {
1318 const MCInstrDesc &MCID = ARMInsts[Opcode];
1319 bool isPrePost = isPrePostLdSt(MCID.TSFlags);
1320 const MCOperandInfo *OpInfo = MCID.OpInfo;
1321 if (!OpInfo) return false;
1323 unsigned &OpIdx = NumOpsAdded;
1327 assert(((!isStore && MCID.getNumDefs() > 0) ||
1328 (isStore && (MCID.getNumDefs() == 0 || isPrePost)))
1329 && "Invalid arguments");
1331 // Operand 0 of a pre- and post-indexed store is the address base writeback.
1332 if (isPrePost && isStore) {
1333 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1334 "Reg operand expected");
1335 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1340 // Disassemble the dst/src operand.
1341 if (OpIdx >= NumOps)
1344 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1345 "Reg operand expected");
1346 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1350 // After dst of a pre- and post-indexed load is the address base writeback.
1351 if (isPrePost && !isStore) {
1352 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1353 "Reg operand expected");
1354 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1359 // Disassemble the base operand.
1360 if (OpIdx >= NumOps)
1363 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1364 "Reg operand expected");
1365 assert((!isPrePost || (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1))
1366 && "Index mode or tied_to operand expected");
1367 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1371 // For reg/reg form, base reg is followed by +/- reg shop imm.
1372 // For immediate form, it is followed by +/- imm12.
1373 // See also ARMAddressingModes.h (Addressing Mode #2).
1374 if (OpIdx + 1 >= NumOps)
1377 if (BadRegsLdStFrm(Opcode, insn, isStore, isPrePost, getIBit(insn)==0))
1380 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1381 unsigned IndexMode =
1382 (MCID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
1383 if (getIBit(insn) == 0) {
1384 // For pre- and post-indexed case, add a reg0 operand (Addressing Mode #2).
1385 // Otherwise, skip the reg operand since for addrmode_imm12, Rn has already
1388 MI.addOperand(MCOperand::CreateReg(0));
1392 unsigned Imm12 = slice(insn, 11, 0);
1393 if (Opcode == ARM::LDRBi12 || Opcode == ARM::LDRi12 ||
1394 Opcode == ARM::STRBi12 || Opcode == ARM::STRi12) {
1395 // Disassemble the 12-bit immediate offset, which is the second operand in
1396 // $addrmode_imm12 => (ops GPR:$base, i32imm:$offsimm).
1397 int Offset = AddrOpcode == ARM_AM::add ? 1 * Imm12 : -1 * Imm12;
1398 MI.addOperand(MCOperand::CreateImm(Offset));
1400 // Disassemble the 12-bit immediate offset, which is the second operand in
1401 // $am2offset => (ops GPR, i32imm).
1402 unsigned Offset = ARM_AM::getAM2Opc(AddrOpcode, Imm12, ARM_AM::no_shift,
1404 MI.addOperand(MCOperand::CreateImm(Offset));
1408 // If Inst{25} = 1 and Inst{4} != 0, we should reject this as invalid.
1409 if (slice(insn,4,4) == 1)
1412 // Disassemble the offset reg (Rm), shift type, and immediate shift length.
1413 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1415 // Inst{6-5} encodes the shift opcode.
1416 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1417 // Inst{11-7} encodes the imm5 shift amount.
1418 unsigned ShImm = slice(insn, 11, 7);
1420 // A8.4.1. Possible rrx or shift amount of 32...
1421 getImmShiftSE(ShOp, ShImm);
1422 MI.addOperand(MCOperand::CreateImm(
1423 ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp, IndexMode)));
1430 static bool DisassembleLdFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1431 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1432 return DisassembleLdStFrm(MI, Opcode, insn, NumOps, NumOpsAdded, false, B);
1435 static bool DisassembleStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1436 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1437 return DisassembleLdStFrm(MI, Opcode, insn, NumOps, NumOpsAdded, true, B);
1440 static bool HasDualReg(unsigned Opcode) {
1444 case ARM::LDRD: case ARM::LDRD_PRE: case ARM::LDRD_POST:
1445 case ARM::STRD: case ARM::STRD_PRE: case ARM::STRD_POST:
1450 static bool DisassembleLdStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1451 unsigned short NumOps, unsigned &NumOpsAdded, bool isStore, BO B) {
1453 const MCInstrDesc &MCID = ARMInsts[Opcode];
1454 bool isPrePost = isPrePostLdSt(MCID.TSFlags);
1455 const MCOperandInfo *OpInfo = MCID.OpInfo;
1456 if (!OpInfo) return false;
1458 unsigned &OpIdx = NumOpsAdded;
1462 assert(((!isStore && MCID.getNumDefs() > 0) ||
1463 (isStore && (MCID.getNumDefs() == 0 || isPrePost)))
1464 && "Invalid arguments");
1466 // Operand 0 of a pre- and post-indexed store is the address base writeback.
1467 if (isPrePost && isStore) {
1468 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1469 "Reg operand expected");
1470 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1475 // Disassemble the dst/src operand.
1476 if (OpIdx >= NumOps)
1479 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1480 "Reg operand expected");
1481 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1485 // Fill in LDRD and STRD's second operand Rt operand.
1486 if (HasDualReg(Opcode)) {
1487 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1488 decodeRd(insn) + 1)));
1492 // After dst of a pre- and post-indexed load is the address base writeback.
1493 if (isPrePost && !isStore) {
1494 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1495 "Reg operand expected");
1496 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1501 // Disassemble the base operand.
1502 if (OpIdx >= NumOps)
1505 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1506 "Reg operand expected");
1507 assert((!isPrePost || (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1))
1508 && "Offset mode or tied_to operand expected");
1509 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1513 // For reg/reg form, base reg is followed by +/- reg.
1514 // For immediate form, it is followed by +/- imm8.
1515 if (OpIdx + 1 >= NumOps)
1518 unsigned IndexMode =
1519 (MCID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
1520 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1521 if (getAM3IBit(insn) == 1) {
1522 // FIXME: Conditional while in the midst of refactoring addrmode3. Will
1523 // go away entirely when the rest are converted.
1524 if (Opcode != ARM::STRHTi && Opcode != ARM::LDRSBTi &&
1525 Opcode != ARM::LDRHTi && Opcode != ARM::LDRSHTi) {
1526 MI.addOperand(MCOperand::CreateReg(0));
1530 // Disassemble the 8-bit immediate offset (postidx_imm8).
1531 unsigned Imm4H = (insn >> ARMII::ImmHiShift) & 0xF;
1532 unsigned Imm4L = insn & 0xF;
1534 // FIXME: Remove the 'else' once done w/ addrmode3 refactor.
1535 if (Opcode == ARM::STRHTi || Opcode == ARM::LDRSBTi ||
1536 Opcode == ARM::LDRHTi || Opcode == ARM::LDRSHTi)
1537 Offset = (Imm4H << 4) | Imm4L | (getUBit(insn) << 8);
1539 Offset = ARM_AM::getAM3Opc(AddrOpcode, (Imm4H << 4) | Imm4L,
1542 MI.addOperand(MCOperand::CreateImm(Offset));
1545 // Disassemble the offset reg (Rm).
1546 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1548 // FIXME: Remove the 'else' once done w/ addrmode3 refactor.
1549 if (Opcode == ARM::STRHTr || Opcode == ARM::LDRSBTr ||
1550 Opcode == ARM::LDRHTr || Opcode == ARM::LDRSHTr)
1551 MI.addOperand(MCOperand::CreateImm(getUBit(insn)));
1553 unsigned Offset = ARM_AM::getAM3Opc(AddrOpcode, 0);
1554 MI.addOperand(MCOperand::CreateImm(Offset));
1562 static bool DisassembleLdMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1563 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1564 return DisassembleLdStMiscFrm(MI, Opcode, insn, NumOps, NumOpsAdded, false,
1568 static bool DisassembleStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1569 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1570 return DisassembleLdStMiscFrm(MI, Opcode, insn, NumOps, NumOpsAdded, true, B);
1573 // The algorithm for disassembly of LdStMulFrm is different from others because
1574 // it explicitly populates the two predicate operands after the base register.
1575 // After that, we need to populate the reglist with each affected register
1576 // encoded as an MCOperand.
1577 static bool DisassembleLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1578 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1580 assert(NumOps >= 4 && "LdStMulFrm expects NumOps >= 4");
1583 unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
1585 // Writeback to base, if necessary.
1586 if (Opcode == ARM::LDMIA_UPD || Opcode == ARM::STMIA_UPD ||
1587 Opcode == ARM::LDMDA_UPD || Opcode == ARM::STMDA_UPD ||
1588 Opcode == ARM::LDMDB_UPD || Opcode == ARM::STMDB_UPD ||
1589 Opcode == ARM::LDMIB_UPD || Opcode == ARM::STMIB_UPD) {
1590 MI.addOperand(MCOperand::CreateReg(Base));
1594 // Add the base register operand.
1595 MI.addOperand(MCOperand::CreateReg(Base));
1597 // Handling the two predicate operands before the reglist.
1598 int64_t CondVal = getCondField(insn);
1601 MI.addOperand(MCOperand::CreateImm(CondVal));
1602 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
1606 // Fill the variadic part of reglist.
1607 unsigned RegListBits = insn & ((1 << 16) - 1);
1608 for (unsigned i = 0; i < 16; ++i) {
1609 if ((RegListBits >> i) & 1) {
1610 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1619 // LDREX, LDREXB, LDREXH: Rd Rn
1620 // LDREXD: Rd Rd+1 Rn
1621 // STREX, STREXB, STREXH: Rd Rm Rn
1622 // STREXD: Rd Rm Rm+1 Rn
1624 // SWP, SWPB: Rd Rm Rn
1625 static bool DisassembleLdStExFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1626 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1628 const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1629 if (!OpInfo) return false;
1631 unsigned &OpIdx = NumOpsAdded;
1636 && OpInfo[0].RegClass == ARM::GPRRegClassID
1637 && OpInfo[1].RegClass == ARM::GPRRegClassID
1638 && "Expect 2 reg operands");
1640 bool isStore = slice(insn, 20, 20) == 0;
1641 bool isDW = (Opcode == ARM::LDREXD || Opcode == ARM::STREXD);
1643 // Add the destination operand.
1644 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1648 // Store register Exclusive needs a source operand.
1650 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1655 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1656 decodeRm(insn)+1)));
1660 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1661 decodeRd(insn)+1)));
1665 // Finally add the pointer operand.
1666 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1673 // Misc. Arithmetic Instructions.
1675 // PKHBT, PKHTB: Rd Rn Rm , LSL/ASR #imm5
1676 // RBIT, REV, REV16, REVSH: Rd Rm
1677 static bool DisassembleArithMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1678 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1680 const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1681 unsigned &OpIdx = NumOpsAdded;
1686 && OpInfo[0].RegClass == ARM::GPRRegClassID
1687 && OpInfo[1].RegClass == ARM::GPRRegClassID
1688 && "Expect 2 reg operands");
1690 bool ThreeReg = NumOps > 2 && OpInfo[2].RegClass == ARM::GPRRegClassID;
1692 // Sanity check the registers, which should not be 15.
1693 if (decodeRd(insn) == 15 || decodeRm(insn) == 15)
1695 if (ThreeReg && decodeRn(insn) == 15)
1698 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1703 assert(NumOps >= 4 && "Expect >= 4 operands");
1704 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1709 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1713 // If there is still an operand info left which is an immediate operand, add
1714 // an additional imm5 LSL/ASR operand.
1715 if (ThreeReg && OpInfo[OpIdx].RegClass < 0
1716 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
1717 // Extract the 5-bit immediate field Inst{11-7}.
1718 unsigned ShiftAmt = (insn >> ARMII::ShiftShift) & 0x1F;
1719 if (Opcode == ARM::PKHBT || Opcode == ARM::PKHTB)
1720 MI.addOperand(MCOperand::CreateImm(ShiftAmt));
1722 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(ARM_AM::no_shift,
1730 /// DisassembleSatFrm - Disassemble saturate instructions:
1731 /// SSAT, SSAT16, USAT, and USAT16.
1732 static bool DisassembleSatFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1733 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1736 // if d == 15 || n == 15 then UNPREDICTABLE;
1737 if (decodeRd(insn) == 15 || decodeRm(insn) == 15)
1740 const MCInstrDesc &MCID = ARMInsts[Opcode];
1741 NumOpsAdded = MCID.getNumOperands() - 2; // ignore predicate operands
1743 // Disassemble register def.
1744 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1747 unsigned Pos = slice(insn, 20, 16);
1748 MI.addOperand(MCOperand::CreateImm(Pos));
1750 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1753 if (NumOpsAdded == 4) {
1754 // Inst{6} encodes the shift type.
1755 bool isASR = slice(insn, 6, 6);
1756 // Inst{11-7} encodes the imm5 shift amount.
1757 unsigned ShAmt = slice(insn, 11, 7);
1758 MI.addOperand(MCOperand::CreateImm(isASR << 5 | ShAmt));
1763 // Extend instructions.
1764 // SXT* and UXT*: Rd [Rn] Rm [rot_imm].
1765 // The 2nd operand register is Rn and the 3rd operand regsiter is Rm for the
1766 // three register operand form. Otherwise, Rn=0b1111 and only Rm is used.
1767 static bool DisassembleExtFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1768 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1771 // if d == 15 || m == 15 then UNPREDICTABLE;
1772 if (decodeRd(insn) == 15 || decodeRm(insn) == 15)
1775 const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1776 unsigned &OpIdx = NumOpsAdded;
1781 && OpInfo[0].RegClass == ARM::GPRRegClassID
1782 && OpInfo[1].RegClass == ARM::GPRRegClassID
1783 && "Expect 2 reg operands");
1785 bool ThreeReg = NumOps > 2 && OpInfo[2].RegClass == ARM::GPRRegClassID;
1787 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1792 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1797 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1801 // If there is still an operand info left which is an immediate operand, add
1802 // an additional rotate immediate operand.
1803 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
1804 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
1805 // Extract the 2-bit rotate field Inst{11-10}.
1806 unsigned rot = (insn >> ARMII::ExtRotImmShift) & 3;
1807 MI.addOperand(MCOperand::CreateImm(rot));
1814 /////////////////////////////////////
1816 // Utility Functions For VFP //
1818 /////////////////////////////////////
1820 // Extract/Decode Dd/Sd:
1822 // SP => d = UInt(Vd:D)
1823 // DP => d = UInt(D:Vd)
1824 static unsigned decodeVFPRd(uint32_t insn, bool isSPVFP) {
1825 return isSPVFP ? (decodeRd(insn) << 1 | getDBit(insn))
1826 : (decodeRd(insn) | getDBit(insn) << 4);
1829 // Extract/Decode Dn/Sn:
1831 // SP => n = UInt(Vn:N)
1832 // DP => n = UInt(N:Vn)
1833 static unsigned decodeVFPRn(uint32_t insn, bool isSPVFP) {
1834 return isSPVFP ? (decodeRn(insn) << 1 | getNBit(insn))
1835 : (decodeRn(insn) | getNBit(insn) << 4);
1838 // Extract/Decode Dm/Sm:
1840 // SP => m = UInt(Vm:M)
1841 // DP => m = UInt(M:Vm)
1842 static unsigned decodeVFPRm(uint32_t insn, bool isSPVFP) {
1843 return isSPVFP ? (decodeRm(insn) << 1 | getMBit(insn))
1844 : (decodeRm(insn) | getMBit(insn) << 4);
1848 static APInt VFPExpandImm(unsigned char byte, unsigned N) {
1849 assert(N == 32 || N == 64);
1852 unsigned bit6 = slice(byte, 6, 6);
1854 Result = slice(byte, 7, 7) << 31 | slice(byte, 5, 0) << 19;
1856 Result |= 0x1f << 25;
1858 Result |= 0x1 << 30;
1860 Result = (uint64_t)slice(byte, 7, 7) << 63 |
1861 (uint64_t)slice(byte, 5, 0) << 48;
1863 Result |= 0xffULL << 54;
1865 Result |= 0x1ULL << 62;
1867 return APInt(N, Result);
1870 // VFP Unary Format Instructions:
1872 // VCMP[E]ZD, VCMP[E]ZS: compares one floating-point register with zero
1873 // VCVTDS, VCVTSD: converts between double-precision and single-precision
1874 // The rest of the instructions have homogeneous [VFP]Rd and [VFP]Rm registers.
1875 static bool DisassembleVFPUnaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1876 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1878 assert(NumOps >= 1 && "VFPUnaryFrm expects NumOps >= 1");
1880 const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1881 unsigned &OpIdx = NumOpsAdded;
1885 unsigned RegClass = OpInfo[OpIdx].RegClass;
1886 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1887 "Reg operand expected");
1888 bool isSP = (RegClass == ARM::SPRRegClassID);
1890 MI.addOperand(MCOperand::CreateReg(
1891 getRegisterEnum(B, RegClass, decodeVFPRd(insn, isSP))));
1894 // Early return for compare with zero instructions.
1895 if (Opcode == ARM::VCMPEZD || Opcode == ARM::VCMPEZS
1896 || Opcode == ARM::VCMPZD || Opcode == ARM::VCMPZS)
1899 RegClass = OpInfo[OpIdx].RegClass;
1900 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1901 "Reg operand expected");
1902 isSP = (RegClass == ARM::SPRRegClassID);
1904 MI.addOperand(MCOperand::CreateReg(
1905 getRegisterEnum(B, RegClass, decodeVFPRm(insn, isSP))));
1911 // All the instructions have homogeneous [VFP]Rd, [VFP]Rn, and [VFP]Rm regs.
1912 // Some of them have operand constraints which tie the first operand in the
1913 // InOperandList to that of the dst. As far as asm printing is concerned, this
1914 // tied_to operand is simply skipped.
1915 static bool DisassembleVFPBinaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1916 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1918 assert(NumOps >= 3 && "VFPBinaryFrm expects NumOps >= 3");
1920 const MCInstrDesc &MCID = ARMInsts[Opcode];
1921 const MCOperandInfo *OpInfo = MCID.OpInfo;
1922 unsigned &OpIdx = NumOpsAdded;
1926 unsigned RegClass = OpInfo[OpIdx].RegClass;
1927 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1928 "Reg operand expected");
1929 bool isSP = (RegClass == ARM::SPRRegClassID);
1931 MI.addOperand(MCOperand::CreateReg(
1932 getRegisterEnum(B, RegClass, decodeVFPRd(insn, isSP))));
1935 // Skip tied_to operand constraint.
1936 if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) {
1937 assert(NumOps >= 4 && "Expect >=4 operands");
1938 MI.addOperand(MCOperand::CreateReg(0));
1942 MI.addOperand(MCOperand::CreateReg(
1943 getRegisterEnum(B, RegClass, decodeVFPRn(insn, isSP))));
1946 MI.addOperand(MCOperand::CreateReg(
1947 getRegisterEnum(B, RegClass, decodeVFPRm(insn, isSP))));
1953 // A8.6.295 vcvt (floating-point <-> integer)
1954 // Int to FP: VSITOD, VSITOS, VUITOD, VUITOS
1955 // FP to Int: VTOSI[Z|R]D, VTOSI[Z|R]S, VTOUI[Z|R]D, VTOUI[Z|R]S
1957 // A8.6.297 vcvt (floating-point and fixed-point)
1958 // Dd|Sd Dd|Sd(TIED_TO) #fbits(= 16|32 - UInt(imm4:i))
1959 static bool DisassembleVFPConv1Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1960 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1962 assert(NumOps >= 2 && "VFPConv1Frm expects NumOps >= 2");
1964 const MCInstrDesc &MCID = ARMInsts[Opcode];
1965 const MCOperandInfo *OpInfo = MCID.OpInfo;
1966 if (!OpInfo) return false;
1968 bool SP = slice(insn, 8, 8) == 0; // A8.6.295 & A8.6.297
1969 bool fixed_point = slice(insn, 17, 17) == 1; // A8.6.297
1970 unsigned RegClassID = SP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
1974 assert(NumOps >= 3 && "Expect >= 3 operands");
1975 int size = slice(insn, 7, 7) == 0 ? 16 : 32;
1976 int fbits = size - (slice(insn,3,0) << 1 | slice(insn,5,5));
1977 MI.addOperand(MCOperand::CreateReg(
1978 getRegisterEnum(B, RegClassID,
1979 decodeVFPRd(insn, SP))));
1981 assert(MCID.getOperandConstraint(1, MCOI::TIED_TO) != -1 &&
1982 "Tied to operand expected");
1983 MI.addOperand(MI.getOperand(0));
1985 assert(OpInfo[2].RegClass < 0 && !OpInfo[2].isPredicate() &&
1986 !OpInfo[2].isOptionalDef() && "Imm operand expected");
1987 MI.addOperand(MCOperand::CreateImm(fbits));
1992 // The Rd (destination) and Rm (source) bits have different interpretations
1993 // depending on their single-precisonness.
1995 if (slice(insn, 18, 18) == 1) { // to_integer operation
1996 d = decodeVFPRd(insn, true /* Is Single Precision */);
1997 MI.addOperand(MCOperand::CreateReg(
1998 getRegisterEnum(B, ARM::SPRRegClassID, d)));
1999 m = decodeVFPRm(insn, SP);
2000 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID, m)));
2002 d = decodeVFPRd(insn, SP);
2003 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID, d)));
2004 m = decodeVFPRm(insn, true /* Is Single Precision */);
2005 MI.addOperand(MCOperand::CreateReg(
2006 getRegisterEnum(B, ARM::SPRRegClassID, m)));
2014 // VMOVRS - A8.6.330
2015 // Rt => Rd; Sn => UInt(Vn:N)
2016 static bool DisassembleVFPConv2Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
2017 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2019 assert(NumOps >= 2 && "VFPConv2Frm expects NumOps >= 2");
2021 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2023 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
2024 decodeVFPRn(insn, true))));
2029 // VMOVRRD - A8.6.332
2030 // Rt => Rd; Rt2 => Rn; Dm => UInt(M:Vm)
2032 // VMOVRRS - A8.6.331
2033 // Rt => Rd; Rt2 => Rn; Sm => UInt(Vm:M); Sm1 = Sm+1
2034 static bool DisassembleVFPConv3Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
2035 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2037 assert(NumOps >= 3 && "VFPConv3Frm expects NumOps >= 3");
2039 const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
2040 unsigned &OpIdx = NumOpsAdded;
2042 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2044 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2048 if (OpInfo[OpIdx].RegClass == ARM::SPRRegClassID) {
2049 unsigned Sm = decodeVFPRm(insn, true);
2050 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
2052 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
2056 MI.addOperand(MCOperand::CreateReg(
2057 getRegisterEnum(B, ARM::DPRRegClassID,
2058 decodeVFPRm(insn, false))));
2064 // VMOVSR - A8.6.330
2065 // Rt => Rd; Sn => UInt(Vn:N)
2066 static bool DisassembleVFPConv4Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
2067 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2069 assert(NumOps >= 2 && "VFPConv4Frm expects NumOps >= 2");
2071 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
2072 decodeVFPRn(insn, true))));
2073 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2079 // VMOVDRR - A8.6.332
2080 // Rt => Rd; Rt2 => Rn; Dm => UInt(M:Vm)
2082 // VMOVRRS - A8.6.331
2083 // Rt => Rd; Rt2 => Rn; Sm => UInt(Vm:M); Sm1 = Sm+1
2084 static bool DisassembleVFPConv5Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
2085 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2087 assert(NumOps >= 3 && "VFPConv5Frm expects NumOps >= 3");
2089 const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
2090 unsigned &OpIdx = NumOpsAdded;
2094 if (OpInfo[OpIdx].RegClass == ARM::SPRRegClassID) {
2095 unsigned Sm = decodeVFPRm(insn, true);
2096 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
2098 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
2102 MI.addOperand(MCOperand::CreateReg(
2103 getRegisterEnum(B, ARM::DPRRegClassID,
2104 decodeVFPRm(insn, false))));
2108 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2110 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2116 // VFP Load/Store Instructions.
2117 // VLDRD, VLDRS, VSTRD, VSTRS
2118 static bool DisassembleVFPLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2119 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2121 assert(NumOps >= 3 && "VFPLdStFrm expects NumOps >= 3");
2123 bool isSPVFP = (Opcode == ARM::VLDRS || Opcode == ARM::VSTRS);
2124 unsigned RegClassID = isSPVFP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
2126 // Extract Dd/Sd for operand 0.
2127 unsigned RegD = decodeVFPRd(insn, isSPVFP);
2129 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID, RegD)));
2131 unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
2132 MI.addOperand(MCOperand::CreateReg(Base));
2134 // Next comes the AM5 Opcode.
2135 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
2136 unsigned char Imm8 = insn & 0xFF;
2137 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM5Opc(AddrOpcode, Imm8)));
2144 // VFP Load/Store Multiple Instructions.
2145 // We have an optional write back reg, the base, and two predicate operands.
2146 // It is then followed by a reglist of either DPR(s) or SPR(s).
2148 // VLDMD[_UPD], VLDMS[_UPD], VSTMD[_UPD], VSTMS[_UPD]
2149 static bool DisassembleVFPLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2150 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2152 assert(NumOps >= 4 && "VFPLdStMulFrm expects NumOps >= 4");
2154 unsigned &OpIdx = NumOpsAdded;
2158 unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
2160 // Writeback to base, if necessary.
2161 if (Opcode == ARM::VLDMDIA_UPD || Opcode == ARM::VLDMSIA_UPD ||
2162 Opcode == ARM::VLDMDDB_UPD || Opcode == ARM::VLDMSDB_UPD ||
2163 Opcode == ARM::VSTMDIA_UPD || Opcode == ARM::VSTMSIA_UPD ||
2164 Opcode == ARM::VSTMDDB_UPD || Opcode == ARM::VSTMSDB_UPD) {
2165 MI.addOperand(MCOperand::CreateReg(Base));
2169 MI.addOperand(MCOperand::CreateReg(Base));
2171 // Handling the two predicate operands before the reglist.
2172 int64_t CondVal = getCondField(insn);
2175 MI.addOperand(MCOperand::CreateImm(CondVal));
2176 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
2180 bool isSPVFP = (Opcode == ARM::VLDMSIA ||
2181 Opcode == ARM::VLDMSIA_UPD || Opcode == ARM::VLDMSDB_UPD ||
2182 Opcode == ARM::VSTMSIA ||
2183 Opcode == ARM::VSTMSIA_UPD || Opcode == ARM::VSTMSDB_UPD);
2184 unsigned RegClassID = isSPVFP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
2187 unsigned RegD = decodeVFPRd(insn, isSPVFP);
2189 // Fill the variadic part of reglist.
2190 unsigned char Imm8 = insn & 0xFF;
2191 unsigned Regs = isSPVFP ? Imm8 : Imm8/2;
2193 // Apply some sanity checks before proceeding.
2194 if (Regs == 0 || (RegD + Regs) > 32 || (!isSPVFP && Regs > 16))
2197 for (unsigned i = 0; i < Regs; ++i) {
2198 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID,
2206 // Misc. VFP Instructions.
2207 // FMSTAT (vmrs with Rt=0b1111, i.e., to apsr_nzcv and no register operand)
2208 // FCONSTD (DPR and a VFPf64Imm operand)
2209 // FCONSTS (SPR and a VFPf32Imm operand)
2210 // VMRS/VMSR (GPR operand)
2211 static bool DisassembleVFPMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2212 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2214 const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
2215 unsigned &OpIdx = NumOpsAdded;
2219 if (Opcode == ARM::FMSTAT)
2222 assert(NumOps >= 2 && "VFPMiscFrm expects >=2 operands");
2224 unsigned RegEnum = 0;
2225 switch (OpInfo[0].RegClass) {
2226 case ARM::DPRRegClassID:
2227 RegEnum = getRegisterEnum(B, ARM::DPRRegClassID, decodeVFPRd(insn, false));
2229 case ARM::SPRRegClassID:
2230 RegEnum = getRegisterEnum(B, ARM::SPRRegClassID, decodeVFPRd(insn, true));
2232 case ARM::GPRRegClassID:
2233 RegEnum = getRegisterEnum(B, ARM::GPRRegClassID, decodeRd(insn));
2236 assert(0 && "Invalid reg class id");
2240 MI.addOperand(MCOperand::CreateReg(RegEnum));
2243 // Extract/decode the f64/f32 immediate.
2244 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2245 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2246 // The asm syntax specifies the floating point value, not the 8-bit literal.
2247 APInt immRaw = VFPExpandImm(slice(insn,19,16) << 4 | slice(insn, 3, 0),
2248 Opcode == ARM::FCONSTD ? 64 : 32);
2249 APFloat immFP = APFloat(immRaw, true);
2250 double imm = Opcode == ARM::FCONSTD ? immFP.convertToDouble() :
2251 immFP.convertToFloat();
2252 MI.addOperand(MCOperand::CreateFPImm(imm));
2260 // DisassembleThumbFrm() is defined in ThumbDisassemblerCore.h file.
2261 #include "ThumbDisassemblerCore.h"
2263 /////////////////////////////////////////////////////
2265 // Utility Functions For ARM Advanced SIMD //
2267 /////////////////////////////////////////////////////
2269 // The following NEON namings are based on A8.6.266 VABA, VABAL. Notice that
2270 // A8.6.303 VDUP (ARM core register)'s D/Vd pair is the N/Vn pair of VABA/VABAL.
2272 // A7.3 Register encoding
2274 // Extract/Decode NEON D/Vd:
2276 // Note that for quadword, Qd = UInt(D:Vd<3:1>) = Inst{22:15-13}, whereas for
2277 // doubleword, Dd = UInt(D:Vd). We compensate for this difference by
2278 // handling it in the getRegisterEnum() utility function.
2279 // D = Inst{22}, Vd = Inst{15-12}
2280 static unsigned decodeNEONRd(uint32_t insn) {
2281 return ((insn >> ARMII::NEON_D_BitShift) & 1) << 4
2282 | ((insn >> ARMII::NEON_RegRdShift) & ARMII::NEONRegMask);
2285 // Extract/Decode NEON N/Vn:
2287 // Note that for quadword, Qn = UInt(N:Vn<3:1>) = Inst{7:19-17}, whereas for
2288 // doubleword, Dn = UInt(N:Vn). We compensate for this difference by
2289 // handling it in the getRegisterEnum() utility function.
2290 // N = Inst{7}, Vn = Inst{19-16}
2291 static unsigned decodeNEONRn(uint32_t insn) {
2292 return ((insn >> ARMII::NEON_N_BitShift) & 1) << 4
2293 | ((insn >> ARMII::NEON_RegRnShift) & ARMII::NEONRegMask);
2296 // Extract/Decode NEON M/Vm:
2298 // Note that for quadword, Qm = UInt(M:Vm<3:1>) = Inst{5:3-1}, whereas for
2299 // doubleword, Dm = UInt(M:Vm). We compensate for this difference by
2300 // handling it in the getRegisterEnum() utility function.
2301 // M = Inst{5}, Vm = Inst{3-0}
2302 static unsigned decodeNEONRm(uint32_t insn) {
2303 return ((insn >> ARMII::NEON_M_BitShift) & 1) << 4
2304 | ((insn >> ARMII::NEON_RegRmShift) & ARMII::NEONRegMask);
2315 } // End of unnamed namespace
2317 // size field -> Inst{11-10}
2318 // index_align field -> Inst{7-4}
2320 // The Lane Index interpretation depends on the Data Size:
2321 // 8 (encoded as size = 0b00) -> Index = index_align[3:1]
2322 // 16 (encoded as size = 0b01) -> Index = index_align[3:2]
2323 // 32 (encoded as size = 0b10) -> Index = index_align[3]
2325 // Ref: A8.6.317 VLD4 (single 4-element structure to one lane).
2326 static unsigned decodeLaneIndex(uint32_t insn) {
2327 unsigned size = insn >> 10 & 3;
2328 assert((size == 0 || size == 1 || size == 2) &&
2329 "Encoding error: size should be either 0, 1, or 2");
2331 unsigned index_align = insn >> 4 & 0xF;
2332 return (index_align >> 1) >> size;
2335 // imm64 = AdvSIMDExpandImm(op, cmode, i:imm3:imm4)
2336 // op = Inst{5}, cmode = Inst{11-8}
2337 // i = Inst{24} (ARM architecture)
2338 // imm3 = Inst{18-16}, imm4 = Inst{3-0}
2339 // Ref: Table A7-15 Modified immediate values for Advanced SIMD instructions.
2340 static uint64_t decodeN1VImm(uint32_t insn, ElemSize esize) {
2341 unsigned char op = (insn >> 5) & 1;
2342 unsigned char cmode = (insn >> 8) & 0xF;
2343 unsigned char Imm8 = ((insn >> 24) & 1) << 7 |
2344 ((insn >> 16) & 7) << 4 |
2346 return (op << 12) | (cmode << 8) | Imm8;
2349 // A8.6.339 VMUL, VMULL (by scalar)
2350 // ESize16 => m = Inst{2-0} (Vm<2:0>) D0-D7
2351 // ESize32 => m = Inst{3-0} (Vm<3:0>) D0-D15
2352 static unsigned decodeRestrictedDm(uint32_t insn, ElemSize esize) {
2359 assert(0 && "Unreachable code!");
2364 // A8.6.339 VMUL, VMULL (by scalar)
2365 // ESize16 => index = Inst{5:3} (M:Vm<3>) D0-D7
2366 // ESize32 => index = Inst{5} (M) D0-D15
2367 static unsigned decodeRestrictedDmIndex(uint32_t insn, ElemSize esize) {
2370 return (((insn >> 5) & 1) << 1) | ((insn >> 3) & 1);
2372 return (insn >> 5) & 1;
2374 assert(0 && "Unreachable code!");
2379 // A8.6.296 VCVT (between floating-point and fixed-point, Advanced SIMD)
2380 // (64 - <fbits>) is encoded as imm6, i.e., Inst{21-16}.
2381 static unsigned decodeVCVTFractionBits(uint32_t insn) {
2382 return 64 - ((insn >> 16) & 0x3F);
2385 // A8.6.302 VDUP (scalar)
2386 // ESize8 => index = Inst{19-17}
2387 // ESize16 => index = Inst{19-18}
2388 // ESize32 => index = Inst{19}
2389 static unsigned decodeNVLaneDupIndex(uint32_t insn, ElemSize esize) {
2392 return (insn >> 17) & 7;
2394 return (insn >> 18) & 3;
2396 return (insn >> 19) & 1;
2398 assert(0 && "Unspecified element size!");
2403 // A8.6.328 VMOV (ARM core register to scalar)
2404 // A8.6.329 VMOV (scalar to ARM core register)
2405 // ESize8 => index = Inst{21:6-5}
2406 // ESize16 => index = Inst{21:6}
2407 // ESize32 => index = Inst{21}
2408 static unsigned decodeNVLaneOpIndex(uint32_t insn, ElemSize esize) {
2411 return ((insn >> 21) & 1) << 2 | ((insn >> 5) & 3);
2413 return ((insn >> 21) & 1) << 1 | ((insn >> 6) & 1);
2415 return ((insn >> 21) & 1);
2417 assert(0 && "Unspecified element size!");
2422 // Imm6 = Inst{21-16}, L = Inst{7}
2424 // LeftShift == true (A8.6.367 VQSHL, A8.6.387 VSLI):
2426 // '0001xxx' => esize = 8; shift_amount = imm6 - 8
2427 // '001xxxx' => esize = 16; shift_amount = imm6 - 16
2428 // '01xxxxx' => esize = 32; shift_amount = imm6 - 32
2429 // '1xxxxxx' => esize = 64; shift_amount = imm6
2431 // LeftShift == false (A8.6.376 VRSHR, A8.6.368 VQSHRN):
2433 // '0001xxx' => esize = 8; shift_amount = 16 - imm6
2434 // '001xxxx' => esize = 16; shift_amount = 32 - imm6
2435 // '01xxxxx' => esize = 32; shift_amount = 64 - imm6
2436 // '1xxxxxx' => esize = 64; shift_amount = 64 - imm6
2438 static unsigned decodeNVSAmt(uint32_t insn, bool LeftShift) {
2439 ElemSize esize = ESizeNA;
2440 unsigned L = (insn >> 7) & 1;
2441 unsigned imm6 = (insn >> 16) & 0x3F;
2445 else if (imm6 >> 4 == 1)
2447 else if (imm6 >> 5 == 1)
2450 assert(0 && "Wrong encoding of Inst{7:21-16}!");
2455 return esize == ESize64 ? imm6 : (imm6 - esize);
2457 return esize == ESize64 ? (esize - imm6) : (2*esize - imm6);
2461 // Imm4 = Inst{11-8}
2462 static unsigned decodeN3VImm(uint32_t insn) {
2463 return (insn >> 8) & 0xF;
2467 // D[d] D[d2] ... Rn [TIED_TO Rn] align [Rm]
2469 // D[d] D[d2] ... Rn [TIED_TO Rn] align [Rm] TIED_TO ... imm(idx)
2471 // Rn [TIED_TO Rn] align [Rm] D[d] D[d2] ...
2473 // Rn [TIED_TO Rn] align [Rm] D[d] D[d2] ... [imm(idx)]
2475 // Correctly set VLD*/VST*'s TIED_TO GPR, as the asm printer needs it.
2476 static bool DisassembleNLdSt0(MCInst &MI, unsigned Opcode, uint32_t insn,
2477 unsigned short NumOps, unsigned &NumOpsAdded, bool Store, bool DblSpaced,
2478 unsigned alignment, BO B) {
2480 const MCInstrDesc &MCID = ARMInsts[Opcode];
2481 const MCOperandInfo *OpInfo = MCID.OpInfo;
2483 // At least one DPR register plus addressing mode #6.
2484 assert(NumOps >= 3 && "Expect >= 3 operands");
2486 unsigned &OpIdx = NumOpsAdded;
2490 // We have homogeneous NEON registers for Load/Store.
2491 unsigned RegClass = 0;
2493 // Double-spaced registers have increments of 2.
2494 unsigned Inc = DblSpaced ? 2 : 1;
2496 unsigned Rn = decodeRn(insn);
2497 unsigned Rm = decodeRm(insn);
2498 unsigned Rd = decodeNEONRd(insn);
2500 // A7.7.1 Advanced SIMD addressing mode.
2503 // LLVM Addressing Mode #6.
2504 unsigned RmEnum = 0;
2506 RmEnum = getRegisterEnum(B, ARM::GPRRegClassID, Rm);
2509 // Consume possible WB, AddrMode6, possible increment reg, the DPR/QPR's,
2510 // then possible lane index.
2511 assert(OpIdx < NumOps && OpInfo[0].RegClass == ARM::GPRRegClassID &&
2512 "Reg operand expected");
2515 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2520 assert((OpIdx+1) < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
2521 OpInfo[OpIdx + 1].RegClass < 0 && "Addrmode #6 Operands expected");
2522 // addrmode6 := (ops GPR:$addr, i32imm)
2523 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2525 MI.addOperand(MCOperand::CreateImm(alignment)); // Alignment
2529 MI.addOperand(MCOperand::CreateReg(RmEnum));
2533 assert(OpIdx < NumOps &&
2534 (OpInfo[OpIdx].RegClass == ARM::DPRRegClassID ||
2535 OpInfo[OpIdx].RegClass == ARM::QPRRegClassID) &&
2536 "Reg operand expected");
2538 RegClass = OpInfo[OpIdx].RegClass;
2539 while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
2540 MI.addOperand(MCOperand::CreateReg(
2541 getRegisterEnum(B, RegClass, Rd)));
2546 // Handle possible lane index.
2547 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2548 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2549 MI.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn)));
2554 // Consume the DPR/QPR's, possible WB, AddrMode6, possible incrment reg,
2555 // possible TIED_TO DPR/QPR's (ignored), then possible lane index.
2556 RegClass = OpInfo[0].RegClass;
2558 while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
2559 MI.addOperand(MCOperand::CreateReg(
2560 getRegisterEnum(B, RegClass, Rd)));
2566 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2571 assert((OpIdx+1) < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
2572 OpInfo[OpIdx + 1].RegClass < 0 && "Addrmode #6 Operands expected");
2573 // addrmode6 := (ops GPR:$addr, i32imm)
2574 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2576 MI.addOperand(MCOperand::CreateImm(alignment)); // Alignment
2580 MI.addOperand(MCOperand::CreateReg(RmEnum));
2584 while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
2585 assert(MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1 &&
2586 "Tied to operand expected");
2587 MI.addOperand(MCOperand::CreateReg(0));
2591 // Handle possible lane index.
2592 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2593 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2594 MI.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn)));
2599 // Accessing registers past the end of the NEON register file is not
2607 // A8.6.308, A8.6.311, A8.6.314, A8.6.317.
2608 static bool Align4OneLaneInst(unsigned elem, unsigned size,
2609 unsigned index_align, unsigned & alignment) {
2617 return slice(index_align, 0, 0) == 0;
2618 else if (size == 1) {
2619 bits = slice(index_align, 1, 0);
2620 if (bits != 0 && bits != 1)
2625 } else if (size == 2) {
2626 bits = slice(index_align, 2, 0);
2627 if (bits != 0 && bits != 3)
2637 if (slice(index_align, 0, 0) == 1)
2641 if (slice(index_align, 0, 0) == 1)
2644 } else if (size == 2) {
2645 if (slice(index_align, 1, 1) != 0)
2647 if (slice(index_align, 0, 0) == 1)
2655 if (slice(index_align, 0, 0) != 0)
2659 if (slice(index_align, 0, 0) != 0)
2663 } else if (size == 2) {
2664 if (slice(index_align, 1, 0) != 0)
2672 if (slice(index_align, 0, 0) == 1)
2676 if (slice(index_align, 0, 0) == 1)
2679 } else if (size == 2) {
2680 bits = slice(index_align, 1, 0);
2694 // If L (Inst{21}) == 0, store instructions.
2695 // Find out about double-spaced-ness of the Opcode and pass it on to
2696 // DisassembleNLdSt0().
2697 static bool DisassembleNLdSt(MCInst &MI, unsigned Opcode, uint32_t insn,
2698 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2700 const StringRef Name = ARMInsts[Opcode].Name;
2701 bool DblSpaced = false;
2702 // 0 represents standard alignment, i.e., unaligned data access.
2703 unsigned alignment = 0;
2705 unsigned elem = 0; // legal values: {1, 2, 3, 4}
2706 if (Name.startswith("VST1") || Name.startswith("VLD1"))
2709 if (Name.startswith("VST2") || Name.startswith("VLD2"))
2712 if (Name.startswith("VST3") || Name.startswith("VLD3"))
2715 if (Name.startswith("VST4") || Name.startswith("VLD4"))
2718 if (Name.find("LN") != std::string::npos) {
2719 // To one lane instructions.
2720 // See, for example, 8.6.317 VLD4 (single 4-element structure to one lane).
2722 // Utility function takes number of elements, size, and index_align.
2723 if (!Align4OneLaneInst(elem,
2724 slice(insn, 11, 10),
2729 // <size> == 16 && Inst{5} == 1 --> DblSpaced = true
2730 if (Name.endswith("16") || Name.endswith("16_UPD"))
2731 DblSpaced = slice(insn, 5, 5) == 1;
2733 // <size> == 32 && Inst{6} == 1 --> DblSpaced = true
2734 if (Name.endswith("32") || Name.endswith("32_UPD"))
2735 DblSpaced = slice(insn, 6, 6) == 1;
2736 } else if (Name.find("DUP") != std::string::npos) {
2737 // Single element (or structure) to all lanes.
2738 // Inst{9-8} encodes the number of element(s) in the structure, with:
2739 // 0b00 (VLD1DUP) (for this, a bit makes sense only for data size 16 and 32.
2741 // 0b10 (VLD3DUP) (for this, a bit must be encoded as 0)
2744 // Inst{7-6} encodes the data size, with:
2745 // 0b00 => 8, 0b01 => 16, 0b10 => 32
2747 // Inst{4} (the a bit) encodes the align action (0: standard alignment)
2748 unsigned elem = slice(insn, 9, 8) + 1;
2749 unsigned a = slice(insn, 4, 4);
2751 // 0b11 is not a valid encoding for Inst{7-6}.
2752 if (slice(insn, 7, 6) == 3)
2754 unsigned data_size = 8 << slice(insn, 7, 6);
2755 // For VLD1DUP, a bit makes sense only for data size of 16 and 32.
2756 if (a && data_size == 8)
2759 // Now we can calculate the alignment!
2761 alignment = elem * data_size;
2764 // A8.6.315 VLD3 (single 3-element structure to all lanes)
2765 // The a bit must be encoded as 0.
2770 // Multiple n-element structures with type encoded as Inst{11-8}.
2771 // See, for example, A8.6.316 VLD4 (multiple 4-element structures).
2773 // Inst{5-4} encodes alignment.
2774 unsigned align = slice(insn, 5, 4);
2779 alignment = 64; break;
2781 alignment = 128; break;
2783 alignment = 256; break;
2786 unsigned type = slice(insn, 11, 8);
2787 // Reject UNDEFINED instructions based on type and align.
2788 // Plus set DblSpaced flag where appropriate.
2794 // A8.6.307 & A8.6.391
2795 if ((type == 7 && slice(align, 1, 1) == 1) ||
2796 (type == 10 && align == 3) ||
2797 (type == 6 && slice(align, 1, 1) == 1))
2801 // n == 2 && type == 0b1001 -> DblSpaced = true
2802 // A8.6.310 & A8.6.393
2803 if ((type == 8 || type == 9) && align == 3)
2805 DblSpaced = (type == 9);
2808 // n == 3 && type == 0b0101 -> DblSpaced = true
2809 // A8.6.313 & A8.6.395
2810 if (slice(insn, 7, 6) == 3 || slice(align, 1, 1) == 1)
2812 DblSpaced = (type == 5);
2815 // n == 4 && type == 0b0001 -> DblSpaced = true
2816 // A8.6.316 & A8.6.397
2817 if (slice(insn, 7, 6) == 3)
2819 DblSpaced = (type == 1);
2823 return DisassembleNLdSt0(MI, Opcode, insn, NumOps, NumOpsAdded,
2824 slice(insn, 21, 21) == 0, DblSpaced, alignment/8, B);
2831 // Qd/Dd imm src(=Qd/Dd)
2832 static bool DisassembleN1RegModImmFrm(MCInst &MI, unsigned Opcode,
2833 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2835 const MCInstrDesc &MCID = ARMInsts[Opcode];
2836 const MCOperandInfo *OpInfo = MCID.OpInfo;
2838 assert(NumOps >= 2 &&
2839 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2840 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2841 (OpInfo[1].RegClass < 0) &&
2842 "Expect 1 reg operand followed by 1 imm operand");
2844 // Qd/Dd = Inst{22:15-12} => NEON Rd
2845 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[0].RegClass,
2846 decodeNEONRd(insn))));
2848 ElemSize esize = ESizeNA;
2851 case ARM::VMOVv16i8:
2854 case ARM::VMOVv4i16:
2855 case ARM::VMOVv8i16:
2856 case ARM::VMVNv4i16:
2857 case ARM::VMVNv8i16:
2858 case ARM::VBICiv4i16:
2859 case ARM::VBICiv8i16:
2860 case ARM::VORRiv4i16:
2861 case ARM::VORRiv8i16:
2864 case ARM::VMOVv2i32:
2865 case ARM::VMOVv4i32:
2866 case ARM::VMVNv2i32:
2867 case ARM::VMVNv4i32:
2868 case ARM::VBICiv2i32:
2869 case ARM::VBICiv4i32:
2870 case ARM::VORRiv2i32:
2871 case ARM::VORRiv4i32:
2874 case ARM::VMOVv1i64:
2875 case ARM::VMOVv2i64:
2879 assert(0 && "Unexpected opcode!");
2883 // One register and a modified immediate value.
2884 // Add the imm operand.
2885 MI.addOperand(MCOperand::CreateImm(decodeN1VImm(insn, esize)));
2889 // VBIC/VORRiv*i* variants have an extra $src = $Vd to be filled in.
2891 (OpInfo[2].RegClass == ARM::DPRRegClassID ||
2892 OpInfo[2].RegClass == ARM::QPRRegClassID)) {
2893 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[0].RegClass,
2894 decodeNEONRd(insn))));
2905 N2V_VectorConvert_Between_Float_Fixed
2907 } // End of unnamed namespace
2909 // Vector Convert [between floating-point and fixed-point]
2910 // Qd/Dd Qm/Dm [fbits]
2912 // Vector Duplicate Lane (from scalar to all elements) Instructions.
2913 // VDUPLN16d, VDUPLN16q, VDUPLN32d, VDUPLN32q, VDUPLN8d, VDUPLN8q:
2916 // Vector Move Long:
2919 // Vector Move Narrow:
2923 static bool DisassembleNVdVmOptImm(MCInst &MI, unsigned Opc, uint32_t insn,
2924 unsigned short NumOps, unsigned &NumOpsAdded, N2VFlag Flag, BO B) {
2926 const MCInstrDesc &MCID = ARMInsts[Opc];
2927 const MCOperandInfo *OpInfo = MCID.OpInfo;
2929 assert(NumOps >= 2 &&
2930 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2931 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2932 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2933 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
2934 "Expect >= 2 operands and first 2 as reg operands");
2936 unsigned &OpIdx = NumOpsAdded;
2940 ElemSize esize = ESizeNA;
2941 if (Flag == N2V_VectorDupLane) {
2942 // VDUPLN has its index embedded. Its size can be inferred from the Opcode.
2943 assert(Opc >= ARM::VDUPLN16d && Opc <= ARM::VDUPLN8q &&
2944 "Unexpected Opcode");
2945 esize = (Opc == ARM::VDUPLN8d || Opc == ARM::VDUPLN8q) ? ESize8
2946 : ((Opc == ARM::VDUPLN16d || Opc == ARM::VDUPLN16q) ? ESize16
2950 // Qd/Dd = Inst{22:15-12} => NEON Rd
2951 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2952 decodeNEONRd(insn))));
2956 if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) {
2958 MI.addOperand(MCOperand::CreateReg(0));
2962 // Dm = Inst{5:3-0} => NEON Rm
2963 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2964 decodeNEONRm(insn))));
2967 // VZIP and others have two TIED_TO reg operands.
2969 while (OpIdx < NumOps &&
2970 (Idx = MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO)) != -1) {
2971 // Add TIED_TO operand.
2972 MI.addOperand(MI.getOperand(Idx));
2976 // Add the imm operand, if required.
2977 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2978 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2980 unsigned imm = 0xFFFFFFFF;
2982 if (Flag == N2V_VectorDupLane)
2983 imm = decodeNVLaneDupIndex(insn, esize);
2984 if (Flag == N2V_VectorConvert_Between_Float_Fixed)
2985 imm = decodeVCVTFractionBits(insn);
2987 assert(imm != 0xFFFFFFFF && "Internal error");
2988 MI.addOperand(MCOperand::CreateImm(imm));
2995 static bool DisassembleN2RegFrm(MCInst &MI, unsigned Opc, uint32_t insn,
2996 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2998 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
3001 static bool DisassembleNVCVTFrm(MCInst &MI, unsigned Opc, uint32_t insn,
3002 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3004 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
3005 N2V_VectorConvert_Between_Float_Fixed, B);
3007 static bool DisassembleNVecDupLnFrm(MCInst &MI, unsigned Opc, uint32_t insn,
3008 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3010 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
3011 N2V_VectorDupLane, B);
3014 // Vector Shift [Accumulate] Instructions.
3015 // Qd/Dd [Qd/Dd (TIED_TO)] Qm/Dm ShiftAmt
3017 // Vector Shift Left Long (with maximum shift count) Instructions.
3018 // VSHLLi16, VSHLLi32, VSHLLi8: Qd Dm imm (== size)
3020 static bool DisassembleNVectorShift(MCInst &MI, unsigned Opcode, uint32_t insn,
3021 unsigned short NumOps, unsigned &NumOpsAdded, bool LeftShift, BO B) {
3023 const MCInstrDesc &MCID = ARMInsts[Opcode];
3024 const MCOperandInfo *OpInfo = MCID.OpInfo;
3026 assert(NumOps >= 3 &&
3027 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
3028 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
3029 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
3030 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
3031 "Expect >= 3 operands and first 2 as reg operands");
3033 unsigned &OpIdx = NumOpsAdded;
3037 // Qd/Dd = Inst{22:15-12} => NEON Rd
3038 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
3039 decodeNEONRd(insn))));
3042 if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) {
3044 MI.addOperand(MCOperand::CreateReg(0));
3048 assert((OpInfo[OpIdx].RegClass == ARM::DPRRegClassID ||
3049 OpInfo[OpIdx].RegClass == ARM::QPRRegClassID) &&
3050 "Reg operand expected");
3052 // Qm/Dm = Inst{5:3-0} => NEON Rm
3053 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
3054 decodeNEONRm(insn))));
3057 assert(OpInfo[OpIdx].RegClass < 0 && "Imm operand expected");
3059 // Add the imm operand.
3061 // VSHLL has maximum shift count as the imm, inferred from its size.
3065 Imm = decodeNVSAmt(insn, LeftShift);
3077 MI.addOperand(MCOperand::CreateImm(Imm));
3083 // Left shift instructions.
3084 static bool DisassembleN2RegVecShLFrm(MCInst &MI, unsigned Opcode,
3085 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3087 return DisassembleNVectorShift(MI, Opcode, insn, NumOps, NumOpsAdded, true,
3090 // Right shift instructions have different shift amount interpretation.
3091 static bool DisassembleN2RegVecShRFrm(MCInst &MI, unsigned Opcode,
3092 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3094 return DisassembleNVectorShift(MI, Opcode, insn, NumOps, NumOpsAdded, false,
3103 N3V_Multiply_By_Scalar
3105 } // End of unnamed namespace
3107 // NEON Three Register Instructions with Optional Immediate Operand
3109 // Vector Extract Instructions.
3110 // Qd/Dd Qn/Dn Qm/Dm imm4
3112 // Vector Shift (Register) Instructions.
3113 // Qd/Dd Qm/Dm Qn/Dn (notice the order of m, n)
3115 // Vector Multiply [Accumulate/Subtract] [Long] By Scalar Instructions.
3116 // Qd/Dd Qn/Dn RestrictedDm index
3119 static bool DisassembleNVdVnVmOptImm(MCInst &MI, unsigned Opcode, uint32_t insn,
3120 unsigned short NumOps, unsigned &NumOpsAdded, N3VFlag Flag, BO B) {
3122 const MCInstrDesc &MCID = ARMInsts[Opcode];
3123 const MCOperandInfo *OpInfo = MCID.OpInfo;
3125 // No checking for OpInfo[2] because of MOVDneon/MOVQ with only two regs.
3126 assert(NumOps >= 3 &&
3127 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
3128 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
3129 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
3130 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
3131 "Expect >= 3 operands and first 2 as reg operands");
3133 unsigned &OpIdx = NumOpsAdded;
3137 bool VdVnVm = Flag == N3V_VectorShift ? false : true;
3138 bool IsImm4 = Flag == N3V_VectorExtract ? true : false;
3139 bool IsDmRestricted = Flag == N3V_Multiply_By_Scalar ? true : false;
3140 ElemSize esize = ESizeNA;
3141 if (Flag == N3V_Multiply_By_Scalar) {
3142 unsigned size = (insn >> 20) & 3;
3143 if (size == 1) esize = ESize16;
3144 if (size == 2) esize = ESize32;
3145 assert (esize == ESize16 || esize == ESize32);
3148 // Qd/Dd = Inst{22:15-12} => NEON Rd
3149 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
3150 decodeNEONRd(insn))));
3153 // VABA, VABAL, VBSLd, VBSLq, ...
3154 if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) {
3156 MI.addOperand(MCOperand::CreateReg(0));
3160 // Dn = Inst{7:19-16} => NEON Rn
3162 // Dm = Inst{5:3-0} => NEON Rm
3163 MI.addOperand(MCOperand::CreateReg(
3164 getRegisterEnum(B, OpInfo[OpIdx].RegClass,
3165 VdVnVm ? decodeNEONRn(insn)
3166 : decodeNEONRm(insn))));
3169 // Dm = Inst{5:3-0} => NEON Rm
3171 // Dm is restricted to D0-D7 if size is 16, D0-D15 otherwise
3173 // Dn = Inst{7:19-16} => NEON Rn
3174 unsigned m = VdVnVm ? (IsDmRestricted ? decodeRestrictedDm(insn, esize)
3175 : decodeNEONRm(insn))
3176 : decodeNEONRn(insn);
3178 MI.addOperand(MCOperand::CreateReg(
3179 getRegisterEnum(B, OpInfo[OpIdx].RegClass, m)));
3182 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
3183 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
3184 // Add the imm operand.
3187 Imm = decodeN3VImm(insn);
3188 else if (IsDmRestricted)
3189 Imm = decodeRestrictedDmIndex(insn, esize);
3191 assert(0 && "Internal error: unreachable code!");
3195 MI.addOperand(MCOperand::CreateImm(Imm));
3202 static bool DisassembleN3RegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3203 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3205 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
3208 static bool DisassembleN3RegVecShFrm(MCInst &MI, unsigned Opcode,
3209 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3211 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
3212 N3V_VectorShift, B);
3214 static bool DisassembleNVecExtractFrm(MCInst &MI, unsigned Opcode,
3215 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3217 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
3218 N3V_VectorExtract, B);
3220 static bool DisassembleNVecMulScalarFrm(MCInst &MI, unsigned Opcode,
3221 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3223 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
3224 N3V_Multiply_By_Scalar, B);
3227 // Vector Table Lookup
3229 // VTBL1, VTBX1: Dd [Dd(TIED_TO)] Dn Dm
3230 // VTBL2, VTBX2: Dd [Dd(TIED_TO)] Dn Dn+1 Dm
3231 // VTBL3, VTBX3: Dd [Dd(TIED_TO)] Dn Dn+1 Dn+2 Dm
3232 // VTBL4, VTBX4: Dd [Dd(TIED_TO)] Dn Dn+1 Dn+2 Dn+3 Dm
3233 static bool DisassembleNVTBLFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3234 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3236 const MCInstrDesc &MCID = ARMInsts[Opcode];
3237 const MCOperandInfo *OpInfo = MCID.OpInfo;
3238 if (!OpInfo) return false;
3240 assert(NumOps >= 3 &&
3241 OpInfo[0].RegClass == ARM::DPRRegClassID &&
3242 OpInfo[1].RegClass == ARM::DPRRegClassID &&
3243 OpInfo[2].RegClass == ARM::DPRRegClassID &&
3244 "Expect >= 3 operands and first 3 as reg operands");
3246 unsigned &OpIdx = NumOpsAdded;
3250 unsigned Rn = decodeNEONRn(insn);
3252 // {Dn} encoded as len = 0b00
3253 // {Dn Dn+1} encoded as len = 0b01
3254 // {Dn Dn+1 Dn+2 } encoded as len = 0b10
3255 // {Dn Dn+1 Dn+2 Dn+3} encoded as len = 0b11
3256 unsigned Len = slice(insn, 9, 8) + 1;
3258 // Dd (the destination vector)
3259 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
3260 decodeNEONRd(insn))));
3263 // Process tied_to operand constraint.
3265 if ((Idx = MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO)) != -1) {
3266 MI.addOperand(MI.getOperand(Idx));
3270 // Do the <list> now.
3271 for (unsigned i = 0; i < Len; ++i) {
3272 assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::DPRRegClassID &&
3273 "Reg operand expected");
3274 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
3279 // Dm (the index vector)
3280 assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::DPRRegClassID &&
3281 "Reg operand (index vector) expected");
3282 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
3283 decodeNEONRm(insn))));
3289 // Vector Get Lane (move scalar to ARM core register) Instructions.
3290 // VGETLNi32, VGETLNs16, VGETLNs8, VGETLNu16, VGETLNu8: Rt Dn index
3291 static bool DisassembleNGetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3292 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3294 const MCInstrDesc &MCID = ARMInsts[Opcode];
3295 const MCOperandInfo *OpInfo = MCID.OpInfo;
3296 if (!OpInfo) return false;
3298 assert(MCID.getNumDefs() == 1 && NumOps >= 3 &&
3299 OpInfo[0].RegClass == ARM::GPRRegClassID &&
3300 OpInfo[1].RegClass == ARM::DPRRegClassID &&
3301 OpInfo[2].RegClass < 0 &&
3302 "Expect >= 3 operands with one dst operand");
3305 Opcode == ARM::VGETLNi32 ? ESize32
3306 : ((Opcode == ARM::VGETLNs16 || Opcode == ARM::VGETLNu16) ? ESize16
3309 // Rt = Inst{15-12} => ARM Rd
3310 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
3313 // Dn = Inst{7:19-16} => NEON Rn
3314 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
3315 decodeNEONRn(insn))));
3317 MI.addOperand(MCOperand::CreateImm(decodeNVLaneOpIndex(insn, esize)));
3323 // Vector Set Lane (move ARM core register to scalar) Instructions.
3324 // VSETLNi16, VSETLNi32, VSETLNi8: Dd Dd (TIED_TO) Rt index
3325 static bool DisassembleNSetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3326 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3328 const MCInstrDesc &MCID = ARMInsts[Opcode];
3329 const MCOperandInfo *OpInfo = MCID.OpInfo;
3330 if (!OpInfo) return false;
3332 assert(MCID.getNumDefs() == 1 && NumOps >= 3 &&
3333 OpInfo[0].RegClass == ARM::DPRRegClassID &&
3334 OpInfo[1].RegClass == ARM::DPRRegClassID &&
3335 MCID.getOperandConstraint(1, MCOI::TIED_TO) != -1 &&
3336 OpInfo[2].RegClass == ARM::GPRRegClassID &&
3337 OpInfo[3].RegClass < 0 &&
3338 "Expect >= 3 operands with one dst operand");
3341 Opcode == ARM::VSETLNi8 ? ESize8
3342 : (Opcode == ARM::VSETLNi16 ? ESize16
3345 // Dd = Inst{7:19-16} => NEON Rn
3346 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
3347 decodeNEONRn(insn))));
3350 MI.addOperand(MCOperand::CreateReg(0));
3352 // Rt = Inst{15-12} => ARM Rd
3353 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
3356 MI.addOperand(MCOperand::CreateImm(decodeNVLaneOpIndex(insn, esize)));
3362 // Vector Duplicate Instructions (from ARM core register to all elements).
3363 // VDUP8d, VDUP16d, VDUP32d, VDUP8q, VDUP16q, VDUP32q: Qd/Dd Rt
3364 static bool DisassembleNDupFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3365 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3367 const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
3369 assert(NumOps >= 2 &&
3370 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
3371 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
3372 OpInfo[1].RegClass == ARM::GPRRegClassID &&
3373 "Expect >= 2 operands and first 2 as reg operand");
3375 unsigned RegClass = OpInfo[0].RegClass;
3377 // Qd/Dd = Inst{7:19-16} => NEON Rn
3378 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClass,
3379 decodeNEONRn(insn))));
3381 // Rt = Inst{15-12} => ARM Rd
3382 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
3389 static inline bool PreLoadOpcode(unsigned Opcode) {
3391 case ARM::PLDi12: case ARM::PLDrs:
3392 case ARM::PLDWi12: case ARM::PLDWrs:
3393 case ARM::PLIi12: case ARM::PLIrs:
3400 static bool DisassemblePreLoadFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3401 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3403 // Preload Data/Instruction requires either 2 or 3 operands.
3404 // PLDi12, PLDWi12, PLIi12: addrmode_imm12
3405 // PLDrs, PLDWrs, PLIrs: ldst_so_reg
3407 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
3410 if (Opcode == ARM::PLDi12 || Opcode == ARM::PLDWi12
3411 || Opcode == ARM::PLIi12) {
3412 unsigned Imm12 = slice(insn, 11, 0);
3413 bool Negative = getUBit(insn) == 0;
3415 // A8.6.118 PLD (literal) PLDWi12 with Rn=PC is transformed to PLDi12.
3416 if (Opcode == ARM::PLDWi12 && slice(insn, 19, 16) == 0xF) {
3417 DEBUG(errs() << "Rn == '1111': PLDWi12 morphed to PLDi12\n");
3418 MI.setOpcode(ARM::PLDi12);
3421 // -0 is represented specially. All other values are as normal.
3422 int Offset = Negative ? -1 * Imm12 : Imm12;
3423 if (Imm12 == 0 && Negative)
3426 MI.addOperand(MCOperand::CreateImm(Offset));
3429 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
3432 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
3434 // Inst{6-5} encodes the shift opcode.
3435 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
3436 // Inst{11-7} encodes the imm5 shift amount.
3437 unsigned ShImm = slice(insn, 11, 7);
3439 // A8.4.1. Possible rrx or shift amount of 32...
3440 getImmShiftSE(ShOp, ShImm);
3441 MI.addOperand(MCOperand::CreateImm(
3442 ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp)));
3449 static bool DisassembleMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3450 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3452 if (Opcode == ARM::DMB || Opcode == ARM::DSB || Opcode == ARM::ISB) {
3453 // Inst{3-0} encodes the memory barrier option for the variants.
3454 unsigned opt = slice(insn, 3, 0);
3456 case ARM_MB::SY: case ARM_MB::ST:
3457 case ARM_MB::ISH: case ARM_MB::ISHST:
3458 case ARM_MB::NSH: case ARM_MB::NSHST:
3459 case ARM_MB::OSH: case ARM_MB::OSHST:
3460 MI.addOperand(MCOperand::CreateImm(opt));
3479 // SWP, SWPB: Rd Rm Rn
3480 // Delegate to DisassembleLdStExFrm()....
3481 return DisassembleLdStExFrm(MI, Opcode, insn, NumOps, NumOpsAdded, B);
3486 if (Opcode == ARM::SETEND) {
3488 MI.addOperand(MCOperand::CreateImm(slice(insn, 9, 9)));
3492 // FIXME: To enable correct asm parsing and disasm of CPS we need 3 different
3493 // opcodes which match the same real instruction. This is needed since there's
3494 // no current handling of optional arguments. Fix here when a better handling
3495 // of optional arguments is implemented.
3496 if (Opcode == ARM::CPS3p) { // M = 1
3497 // Let's reject these impossible imod values by returning false:
3500 // AsmPrinter cannot handle imod=0b00, plus (imod=0b00,M=1,iflags!=0) is an
3501 // invalid combination, so we just check for imod=0b00 here.
3502 if (slice(insn, 19, 18) == 0 || slice(insn, 19, 18) == 1)
3504 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 18))); // imod
3505 MI.addOperand(MCOperand::CreateImm(slice(insn, 8, 6))); // iflags
3506 MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0))); // mode
3510 if (Opcode == ARM::CPS2p) { // mode = 0, M = 0
3511 // Let's reject these impossible imod values by returning false:
3512 // 1. (imod=0b00,M=0)
3514 if (slice(insn, 19, 18) == 0 || slice(insn, 19, 18) == 1)
3516 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 18))); // imod
3517 MI.addOperand(MCOperand::CreateImm(slice(insn, 8, 6))); // iflags
3521 if (Opcode == ARM::CPS1p) { // imod = 0, iflags = 0, M = 1
3522 MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0))); // mode
3527 // DBG has its option specified in Inst{3-0}.
3528 if (Opcode == ARM::DBG) {
3529 MI.addOperand(MCOperand::CreateImm(slice(insn, 3, 0)));
3534 // BKPT takes an imm32 val equal to ZeroExtend(Inst{19-8:3-0}).
3535 if (Opcode == ARM::BKPT) {
3536 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 8) << 4 |
3537 slice(insn, 3, 0)));
3542 if (PreLoadOpcode(Opcode))
3543 return DisassemblePreLoadFrm(MI, Opcode, insn, NumOps, NumOpsAdded, B);
3545 assert(0 && "Unexpected misc instruction!");
3549 /// FuncPtrs - FuncPtrs maps ARMFormat to its corresponding DisassembleFP.
3550 /// We divide the disassembly task into different categories, with each one
3551 /// corresponding to a specific instruction encoding format. There could be
3552 /// exceptions when handling a specific format, and that is why the Opcode is
3553 /// also present in the function prototype.
3554 static const DisassembleFP FuncPtrs[] = {
3558 &DisassembleBrMiscFrm,
3560 &DisassembleDPSoRegRegFrm,
3563 &DisassembleLdMiscFrm,
3564 &DisassembleStMiscFrm,
3565 &DisassembleLdStMulFrm,
3566 &DisassembleLdStExFrm,
3567 &DisassembleArithMiscFrm,
3570 &DisassembleVFPUnaryFrm,
3571 &DisassembleVFPBinaryFrm,
3572 &DisassembleVFPConv1Frm,
3573 &DisassembleVFPConv2Frm,
3574 &DisassembleVFPConv3Frm,
3575 &DisassembleVFPConv4Frm,
3576 &DisassembleVFPConv5Frm,
3577 &DisassembleVFPLdStFrm,
3578 &DisassembleVFPLdStMulFrm,
3579 &DisassembleVFPMiscFrm,
3580 &DisassembleThumbFrm,
3581 &DisassembleMiscFrm,
3582 &DisassembleNGetLnFrm,
3583 &DisassembleNSetLnFrm,
3584 &DisassembleNDupFrm,
3586 // VLD and VST (including one lane) Instructions.
3589 // A7.4.6 One register and a modified immediate value
3590 // 1-Register Instructions with imm.
3591 // LLVM only defines VMOVv instructions.
3592 &DisassembleN1RegModImmFrm,
3594 // 2-Register Instructions with no imm.
3595 &DisassembleN2RegFrm,
3597 // 2-Register Instructions with imm (vector convert float/fixed point).
3598 &DisassembleNVCVTFrm,
3600 // 2-Register Instructions with imm (vector dup lane).
3601 &DisassembleNVecDupLnFrm,
3603 // Vector Shift Left Instructions.
3604 &DisassembleN2RegVecShLFrm,
3606 // Vector Shift Righ Instructions, which has different interpretation of the
3607 // shift amount from the imm6 field.
3608 &DisassembleN2RegVecShRFrm,
3610 // 3-Register Data-Processing Instructions.
3611 &DisassembleN3RegFrm,
3613 // Vector Shift (Register) Instructions.
3614 // D:Vd M:Vm N:Vn (notice that M:Vm is the first operand)
3615 &DisassembleN3RegVecShFrm,
3617 // Vector Extract Instructions.
3618 &DisassembleNVecExtractFrm,
3620 // Vector [Saturating Rounding Doubling] Multiply [Accumulate/Subtract] [Long]
3621 // By Scalar Instructions.
3622 &DisassembleNVecMulScalarFrm,
3624 // Vector Table Lookup uses byte indexes in a control vector to look up byte
3625 // values in a table and generate a new vector.
3626 &DisassembleNVTBLFrm,
3628 &DisassembleDPSoRegImmFrm,
3634 /// BuildIt - BuildIt performs the build step for this ARM Basic MC Builder.
3635 /// The general idea is to set the Opcode for the MCInst, followed by adding
3636 /// the appropriate MCOperands to the MCInst. ARM Basic MC Builder delegates
3637 /// to the Format-specific disassemble function for disassembly, followed by
3638 /// TryPredicateAndSBitModifier() to do PredicateOperand and OptionalDefOperand
3639 /// which follow the Dst/Src Operands.
3640 bool ARMBasicMCBuilder::BuildIt(MCInst &MI, uint32_t insn) {
3641 // Stage 1 sets the Opcode.
3642 MI.setOpcode(Opcode);
3643 // If the number of operands is zero, we're done!
3647 // Stage 2 calls the format-specific disassemble function to build the operand
3651 unsigned NumOpsAdded = 0;
3652 bool OK = (*Disasm)(MI, Opcode, insn, NumOps, NumOpsAdded, this);
3654 if (!OK || this->Err != 0) return false;
3655 if (NumOpsAdded >= NumOps)
3658 // Stage 3 deals with operands unaccounted for after stage 2 is finished.
3659 // FIXME: Should this be done selectively?
3660 return TryPredicateAndSBitModifier(MI, Opcode, insn, NumOps - NumOpsAdded);
3663 // A8.3 Conditional execution
3664 // A8.3.1 Pseudocode details of conditional execution
3665 // Condition bits '111x' indicate the instruction is always executed.
3666 static uint32_t CondCode(uint32_t CondField) {
3667 if (CondField == 0xF)
3672 /// DoPredicateOperands - DoPredicateOperands process the predicate operands
3673 /// of some Thumb instructions which come before the reglist operands. It
3674 /// returns true if the two predicate operands have been processed.
3675 bool ARMBasicMCBuilder::DoPredicateOperands(MCInst& MI, unsigned Opcode,
3676 uint32_t /* insn */, unsigned short NumOpsRemaining) {
3678 assert(NumOpsRemaining > 0 && "Invalid argument");
3680 const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
3681 unsigned Idx = MI.getNumOperands();
3683 // First, we check whether this instr specifies the PredicateOperand through
3684 // a pair of MCOperandInfos with isPredicate() property.
3685 if (NumOpsRemaining >= 2 &&
3686 OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
3687 OpInfo[Idx].RegClass < 0 &&
3688 OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
3690 // If we are inside an IT block, get the IT condition bits maintained via
3691 // ARMBasicMCBuilder::ITState[7:0], through ARMBasicMCBuilder::GetITCond().
3694 MI.addOperand(MCOperand::CreateImm(GetITCond()));
3696 MI.addOperand(MCOperand::CreateImm(ARMCC::AL));
3697 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
3704 /// TryPredicateAndSBitModifier - TryPredicateAndSBitModifier tries to process
3705 /// the possible Predicate and SBitModifier, to build the remaining MCOperand
3707 bool ARMBasicMCBuilder::TryPredicateAndSBitModifier(MCInst& MI, unsigned Opcode,
3708 uint32_t insn, unsigned short NumOpsRemaining) {
3710 assert(NumOpsRemaining > 0 && "Invalid argument");
3712 const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
3713 const std::string &Name = ARMInsts[Opcode].Name;
3714 unsigned Idx = MI.getNumOperands();
3715 uint64_t TSFlags = ARMInsts[Opcode].TSFlags;
3717 // First, we check whether this instr specifies the PredicateOperand through
3718 // a pair of MCOperandInfos with isPredicate() property.
3719 if (NumOpsRemaining >= 2 &&
3720 OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
3721 OpInfo[Idx].RegClass < 0 &&
3722 OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
3724 // If we are inside an IT block, get the IT condition bits maintained via
3725 // ARMBasicMCBuilder::ITState[7:0], through ARMBasicMCBuilder::GetITCond().
3728 MI.addOperand(MCOperand::CreateImm(GetITCond()));
3730 if (Name.length() > 1 && Name[0] == 't') {
3731 // Thumb conditional branch instructions have their cond field embedded,
3735 // Check for undefined encodings.
3737 if (Name == "t2Bcc") {
3738 if ((cond = slice(insn, 25, 22)) >= 14)
3740 MI.addOperand(MCOperand::CreateImm(CondCode(cond)));
3741 } else if (Name == "tBcc") {
3742 if ((cond = slice(insn, 11, 8)) == 14)
3744 MI.addOperand(MCOperand::CreateImm(CondCode(cond)));
3746 MI.addOperand(MCOperand::CreateImm(ARMCC::AL));
3748 // ARM instructions get their condition field from Inst{31-28}.
3749 // We should reject Inst{31-28} = 0b1111 as invalid encoding.
3750 if (!isNEONDomain(TSFlags) && getCondField(insn) == 0xF)
3752 MI.addOperand(MCOperand::CreateImm(CondCode(getCondField(insn))));
3755 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
3757 NumOpsRemaining -= 2;
3760 if (NumOpsRemaining == 0)
3763 // Next, if OptionalDefOperand exists, we check whether the 'S' bit is set.
3764 if (OpInfo[Idx].isOptionalDef() && OpInfo[Idx].RegClass==ARM::CCRRegClassID) {
3765 MI.addOperand(MCOperand::CreateReg(getSBit(insn) == 1 ? ARM::CPSR : 0));
3769 if (NumOpsRemaining == 0)
3775 /// RunBuildAfterHook - RunBuildAfterHook performs operations deemed necessary
3776 /// after BuildIt is finished.
3777 bool ARMBasicMCBuilder::RunBuildAfterHook(bool Status, MCInst &MI,
3780 if (!SP) return Status;
3782 if (Opcode == ARM::t2IT)
3783 Status = SP->InitIT(slice(insn, 7, 0)) ? Status : false;
3784 else if (InITBlock())
3790 /// Opcode, Format, and NumOperands make up an ARM Basic MCBuilder.
3791 ARMBasicMCBuilder::ARMBasicMCBuilder(unsigned opc, ARMFormat format,
3793 : Opcode(opc), Format(format), NumOps(num), SP(0), Err(0) {
3794 unsigned Idx = (unsigned)format;
3795 assert(Idx < (array_lengthof(FuncPtrs) - 1) && "Unknown format");
3796 Disasm = FuncPtrs[Idx];
3799 /// CreateMCBuilder - Return an ARMBasicMCBuilder that can build up the MC
3800 /// infrastructure of an MCInst given the Opcode and Format of the instr.
3801 /// Return NULL if it fails to create/return a proper builder. API clients
3802 /// are responsible for freeing up of the allocated memory. Cacheing can be
3803 /// performed by the API clients to improve performance.
3804 ARMBasicMCBuilder *llvm::CreateMCBuilder(unsigned Opcode, ARMFormat Format) {
3805 // For "Unknown format", fail by returning a NULL pointer.
3806 if ((unsigned)Format >= (array_lengthof(FuncPtrs) - 1)) {
3807 DEBUG(errs() << "Unknown format\n");
3811 return new ARMBasicMCBuilder(Opcode, Format,
3812 ARMInsts[Opcode].getNumOperands());
3815 /// tryAddingSymbolicOperand - tryAddingSymbolicOperand trys to add a symbolic
3816 /// operand in place of the immediate Value in the MCInst. The immediate
3817 /// Value has had any PC adjustment made by the caller. If the getOpInfo()
3818 /// function was set as part of the setupBuilderForSymbolicDisassembly() call
3819 /// then that function is called to get any symbolic information at the
3820 /// builder's Address for this instrution. If that returns non-zero then the
3821 /// symbolic information it returns is used to create an MCExpr and that is
3822 /// added as an operand to the MCInst. This function returns true if it adds
3823 /// an operand to the MCInst and false otherwise.
3824 bool ARMBasicMCBuilder::tryAddingSymbolicOperand(uint64_t Value,
3830 struct LLVMOpInfo1 SymbolicOp;
3831 SymbolicOp.Value = Value;
3832 if (!GetOpInfo(DisInfo, Address, 0 /* Offset */, InstSize, 1, &SymbolicOp))
3835 const MCExpr *Add = NULL;
3836 if (SymbolicOp.AddSymbol.Present) {
3837 if (SymbolicOp.AddSymbol.Name) {
3838 StringRef Name(SymbolicOp.AddSymbol.Name);
3839 MCSymbol *Sym = Ctx->GetOrCreateSymbol(Name);
3840 Add = MCSymbolRefExpr::Create(Sym, *Ctx);
3842 Add = MCConstantExpr::Create(SymbolicOp.AddSymbol.Value, *Ctx);
3846 const MCExpr *Sub = NULL;
3847 if (SymbolicOp.SubtractSymbol.Present) {
3848 if (SymbolicOp.SubtractSymbol.Name) {
3849 StringRef Name(SymbolicOp.SubtractSymbol.Name);
3850 MCSymbol *Sym = Ctx->GetOrCreateSymbol(Name);
3851 Sub = MCSymbolRefExpr::Create(Sym, *Ctx);
3853 Sub = MCConstantExpr::Create(SymbolicOp.SubtractSymbol.Value, *Ctx);
3857 const MCExpr *Off = NULL;
3858 if (SymbolicOp.Value != 0)
3859 Off = MCConstantExpr::Create(SymbolicOp.Value, *Ctx);
3865 LHS = MCBinaryExpr::CreateSub(Add, Sub, *Ctx);
3867 LHS = MCUnaryExpr::CreateMinus(Sub, *Ctx);
3869 Expr = MCBinaryExpr::CreateAdd(LHS, Off, *Ctx);
3874 Expr = MCBinaryExpr::CreateAdd(Add, Off, *Ctx);
3881 Expr = MCConstantExpr::Create(0, *Ctx);
3884 if (SymbolicOp.VariantKind == LLVMDisassembler_VariantKind_ARM_HI16)
3885 MI.addOperand(MCOperand::CreateExpr(ARMMCExpr::CreateUpper16(Expr, *Ctx)));
3886 else if (SymbolicOp.VariantKind == LLVMDisassembler_VariantKind_ARM_LO16)
3887 MI.addOperand(MCOperand::CreateExpr(ARMMCExpr::CreateLower16(Expr, *Ctx)));
3888 else if (SymbolicOp.VariantKind == LLVMDisassembler_VariantKind_None)
3889 MI.addOperand(MCOperand::CreateExpr(Expr));
3891 assert("bad SymbolicOp.VariantKind");