1 //===- ARMDisassemblerCore.cpp - ARM disassembler helpers -------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is part of the ARM Disassembler.
11 // It contains code to represent the core concepts of Builder and DisassembleFP
12 // to solve the problem of disassembling an ARM instr.
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "arm-disassembler"
18 #include "ARMDisassemblerCore.h"
19 #include "ARMAddressingModes.h"
20 #include "llvm/Support/Debug.h"
21 #include "llvm/Support/raw_ostream.h"
23 //#define DEBUG(X) do { X; } while (0)
25 /// ARMGenInstrInfo.inc - ARMGenInstrInfo.inc contains the static const
26 /// TargetInstrDesc ARMInsts[] definition and the TargetOperandInfo[]'s
27 /// describing the operand info for each ARMInsts[i].
29 /// Together with an instruction's encoding format, we can take advantage of the
30 /// NumOperands and the OpInfo fields of the target instruction description in
31 /// the quest to build out the MCOperand list for an MCInst.
33 /// The general guideline is that with a known format, the number of dst and src
34 /// operands are well-known. The dst is built first, followed by the src
35 /// operand(s). The operands not yet used at this point are for the Implicit
36 /// Uses and Defs by this instr. For the Uses part, the pred:$p operand is
37 /// defined with two components:
39 /// def pred { // Operand PredicateOperand
40 /// ValueType Type = OtherVT;
41 /// string PrintMethod = "printPredicateOperand";
42 /// string AsmOperandLowerMethod = ?;
43 /// dag MIOperandInfo = (ops i32imm, CCR);
44 /// AsmOperandClass ParserMatchClass = ImmAsmOperand;
45 /// dag DefaultOps = (ops (i32 14), (i32 zero_reg));
48 /// which is manifested by the TargetOperandInfo[] of:
50 /// { 0, 0|(1<<TOI::Predicate), 0 },
51 /// { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }
53 /// So the first predicate MCOperand corresponds to the immediate part of the
54 /// ARM condition field (Inst{31-28}), and the second predicate MCOperand
55 /// corresponds to a register kind of ARM::CPSR.
57 /// For the Defs part, in the simple case of only cc_out:$s, we have:
59 /// def cc_out { // Operand OptionalDefOperand
60 /// ValueType Type = OtherVT;
61 /// string PrintMethod = "printSBitModifierOperand";
62 /// string AsmOperandLowerMethod = ?;
63 /// dag MIOperandInfo = (ops CCR);
64 /// AsmOperandClass ParserMatchClass = ImmAsmOperand;
65 /// dag DefaultOps = (ops (i32 zero_reg));
68 /// which is manifested by the one TargetOperandInfo of:
70 /// { ARM::CCRRegClassID, 0|(1<<TOI::OptionalDef), 0 }
72 /// And this maps to one MCOperand with the regsiter kind of ARM::CPSR.
73 #include "ARMGenInstrInfo.inc"
77 const char *ARMUtils::OpcodeName(unsigned Opcode) {
78 return ARMInsts[Opcode].Name;
81 // Return the register enum Based on RegClass and the raw register number.
84 getRegisterEnum(BO B, unsigned RegClassID, unsigned RawRegister) {
85 if (RegClassID == ARM::rGPRRegClassID) {
86 // Check for The register numbers 13 and 15 that are not permitted for many
87 // Thumb register specifiers.
88 if (RawRegister == 13 || RawRegister == 15) {
92 // For this purpose, we can treat rGPR as if it were GPR.
93 RegClassID = ARM::GPRRegClassID;
96 // See also decodeNEONRd(), decodeNEONRn(), decodeNEONRm().
98 RegClassID == ARM::QPRRegClassID ? RawRegister >> 1 : RawRegister;
104 switch (RegClassID) {
105 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R0;
106 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
107 case ARM::DPR_VFP2RegClassID:
109 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
110 case ARM::QPR_VFP2RegClassID:
112 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S0;
116 switch (RegClassID) {
117 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R1;
118 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
119 case ARM::DPR_VFP2RegClassID:
121 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
122 case ARM::QPR_VFP2RegClassID:
124 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S1;
128 switch (RegClassID) {
129 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R2;
130 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
131 case ARM::DPR_VFP2RegClassID:
133 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
134 case ARM::QPR_VFP2RegClassID:
136 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S2;
140 switch (RegClassID) {
141 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R3;
142 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
143 case ARM::DPR_VFP2RegClassID:
145 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
146 case ARM::QPR_VFP2RegClassID:
148 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S3;
152 switch (RegClassID) {
153 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R4;
154 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
155 case ARM::DPR_VFP2RegClassID:
157 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q4;
158 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S4;
162 switch (RegClassID) {
163 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R5;
164 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
165 case ARM::DPR_VFP2RegClassID:
167 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q5;
168 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S5;
172 switch (RegClassID) {
173 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R6;
174 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
175 case ARM::DPR_VFP2RegClassID:
177 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q6;
178 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S6;
182 switch (RegClassID) {
183 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R7;
184 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
185 case ARM::DPR_VFP2RegClassID:
187 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q7;
188 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S7;
192 switch (RegClassID) {
193 case ARM::GPRRegClassID: return ARM::R8;
194 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D8;
195 case ARM::QPRRegClassID: return ARM::Q8;
196 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S8;
200 switch (RegClassID) {
201 case ARM::GPRRegClassID: return ARM::R9;
202 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D9;
203 case ARM::QPRRegClassID: return ARM::Q9;
204 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S9;
208 switch (RegClassID) {
209 case ARM::GPRRegClassID: return ARM::R10;
210 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D10;
211 case ARM::QPRRegClassID: return ARM::Q10;
212 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S10;
216 switch (RegClassID) {
217 case ARM::GPRRegClassID: return ARM::R11;
218 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D11;
219 case ARM::QPRRegClassID: return ARM::Q11;
220 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S11;
224 switch (RegClassID) {
225 case ARM::GPRRegClassID: return ARM::R12;
226 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D12;
227 case ARM::QPRRegClassID: return ARM::Q12;
228 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S12;
232 switch (RegClassID) {
233 case ARM::GPRRegClassID: return ARM::SP;
234 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D13;
235 case ARM::QPRRegClassID: return ARM::Q13;
236 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S13;
240 switch (RegClassID) {
241 case ARM::GPRRegClassID: return ARM::LR;
242 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D14;
243 case ARM::QPRRegClassID: return ARM::Q14;
244 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S14;
248 switch (RegClassID) {
249 case ARM::GPRRegClassID: return ARM::PC;
250 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D15;
251 case ARM::QPRRegClassID: return ARM::Q15;
252 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S15;
256 switch (RegClassID) {
257 case ARM::DPRRegClassID: return ARM::D16;
258 case ARM::SPRRegClassID: return ARM::S16;
262 switch (RegClassID) {
263 case ARM::DPRRegClassID: return ARM::D17;
264 case ARM::SPRRegClassID: return ARM::S17;
268 switch (RegClassID) {
269 case ARM::DPRRegClassID: return ARM::D18;
270 case ARM::SPRRegClassID: return ARM::S18;
274 switch (RegClassID) {
275 case ARM::DPRRegClassID: return ARM::D19;
276 case ARM::SPRRegClassID: return ARM::S19;
280 switch (RegClassID) {
281 case ARM::DPRRegClassID: return ARM::D20;
282 case ARM::SPRRegClassID: return ARM::S20;
286 switch (RegClassID) {
287 case ARM::DPRRegClassID: return ARM::D21;
288 case ARM::SPRRegClassID: return ARM::S21;
292 switch (RegClassID) {
293 case ARM::DPRRegClassID: return ARM::D22;
294 case ARM::SPRRegClassID: return ARM::S22;
298 switch (RegClassID) {
299 case ARM::DPRRegClassID: return ARM::D23;
300 case ARM::SPRRegClassID: return ARM::S23;
304 switch (RegClassID) {
305 case ARM::DPRRegClassID: return ARM::D24;
306 case ARM::SPRRegClassID: return ARM::S24;
310 switch (RegClassID) {
311 case ARM::DPRRegClassID: return ARM::D25;
312 case ARM::SPRRegClassID: return ARM::S25;
316 switch (RegClassID) {
317 case ARM::DPRRegClassID: return ARM::D26;
318 case ARM::SPRRegClassID: return ARM::S26;
322 switch (RegClassID) {
323 case ARM::DPRRegClassID: return ARM::D27;
324 case ARM::SPRRegClassID: return ARM::S27;
328 switch (RegClassID) {
329 case ARM::DPRRegClassID: return ARM::D28;
330 case ARM::SPRRegClassID: return ARM::S28;
334 switch (RegClassID) {
335 case ARM::DPRRegClassID: return ARM::D29;
336 case ARM::SPRRegClassID: return ARM::S29;
340 switch (RegClassID) {
341 case ARM::DPRRegClassID: return ARM::D30;
342 case ARM::SPRRegClassID: return ARM::S30;
346 switch (RegClassID) {
347 case ARM::DPRRegClassID: return ARM::D31;
348 case ARM::SPRRegClassID: return ARM::S31;
352 DEBUG(errs() << "Invalid (RegClassID, RawRegister) combination\n");
353 // Encoding error. Mark the builder with error code != 0.
358 ///////////////////////////////
360 // Utility Functions //
362 ///////////////////////////////
364 // Extract/Decode Rd: Inst{15-12}.
365 static inline unsigned decodeRd(uint32_t insn) {
366 return (insn >> ARMII::RegRdShift) & ARMII::GPRRegMask;
369 // Extract/Decode Rn: Inst{19-16}.
370 static inline unsigned decodeRn(uint32_t insn) {
371 return (insn >> ARMII::RegRnShift) & ARMII::GPRRegMask;
374 // Extract/Decode Rm: Inst{3-0}.
375 static inline unsigned decodeRm(uint32_t insn) {
376 return (insn & ARMII::GPRRegMask);
379 // Extract/Decode Rs: Inst{11-8}.
380 static inline unsigned decodeRs(uint32_t insn) {
381 return (insn >> ARMII::RegRsShift) & ARMII::GPRRegMask;
384 static inline unsigned getCondField(uint32_t insn) {
385 return (insn >> ARMII::CondShift);
388 static inline unsigned getIBit(uint32_t insn) {
389 return (insn >> ARMII::I_BitShift) & 1;
392 static inline unsigned getAM3IBit(uint32_t insn) {
393 return (insn >> ARMII::AM3_I_BitShift) & 1;
396 static inline unsigned getPBit(uint32_t insn) {
397 return (insn >> ARMII::P_BitShift) & 1;
400 static inline unsigned getUBit(uint32_t insn) {
401 return (insn >> ARMII::U_BitShift) & 1;
404 static inline unsigned getPUBits(uint32_t insn) {
405 return (insn >> ARMII::U_BitShift) & 3;
408 static inline unsigned getSBit(uint32_t insn) {
409 return (insn >> ARMII::S_BitShift) & 1;
412 static inline unsigned getWBit(uint32_t insn) {
413 return (insn >> ARMII::W_BitShift) & 1;
416 static inline unsigned getDBit(uint32_t insn) {
417 return (insn >> ARMII::D_BitShift) & 1;
420 static inline unsigned getNBit(uint32_t insn) {
421 return (insn >> ARMII::N_BitShift) & 1;
424 static inline unsigned getMBit(uint32_t insn) {
425 return (insn >> ARMII::M_BitShift) & 1;
428 // See A8.4 Shifts applied to a register.
429 // A8.4.2 Register controlled shifts.
431 // getShiftOpcForBits - getShiftOpcForBits translates from the ARM encoding bits
432 // into llvm enums for shift opcode. The API clients should pass in the value
433 // encoded with two bits, so the assert stays to signal a wrong API usage.
435 // A8-12: DecodeRegShift()
436 static inline ARM_AM::ShiftOpc getShiftOpcForBits(unsigned bits) {
438 default: assert(0 && "No such value"); return ARM_AM::no_shift;
439 case 0: return ARM_AM::lsl;
440 case 1: return ARM_AM::lsr;
441 case 2: return ARM_AM::asr;
442 case 3: return ARM_AM::ror;
446 // See A8.4 Shifts applied to a register.
447 // A8.4.1 Constant shifts.
449 // getImmShiftSE - getImmShiftSE translates from the raw ShiftOpc and raw Imm5
450 // encodings into the intended ShiftOpc and shift amount.
452 // A8-11: DecodeImmShift()
453 static inline void getImmShiftSE(ARM_AM::ShiftOpc &ShOp, unsigned &ShImm) {
457 case ARM_AM::no_shift:
461 ShOp = ARM_AM::no_shift;
473 // getAMSubModeForBits - getAMSubModeForBits translates from the ARM encoding
474 // bits Inst{24-23} (P(24) and U(23)) into llvm enums for AMSubMode. The API
475 // clients should pass in the value encoded with two bits, so the assert stays
476 // to signal a wrong API usage.
477 static inline ARM_AM::AMSubMode getAMSubModeForBits(unsigned bits) {
479 default: assert(0 && "No such value"); return ARM_AM::bad_am_submode;
480 case 1: return ARM_AM::ia; // P=0 U=1
481 case 3: return ARM_AM::ib; // P=1 U=1
482 case 0: return ARM_AM::da; // P=0 U=0
483 case 2: return ARM_AM::db; // P=1 U=0
487 ////////////////////////////////////////////
489 // Disassemble function definitions //
491 ////////////////////////////////////////////
493 /// There is a separate Disassemble*Frm function entry for disassembly of an ARM
494 /// instr into a list of MCOperands in the appropriate order, with possible dst,
495 /// followed by possible src(s).
497 /// The processing of the predicate, and the 'S' modifier bit, if MI modifies
498 /// the CPSR, is factored into ARMBasicMCBuilder's method named
499 /// TryPredicateAndSBitModifier.
501 static bool DisassemblePseudo(MCInst &MI, unsigned Opcode, uint32_t insn,
502 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
504 assert(0 && "Unexpected pseudo instruction!");
509 // if d == 15 || n == 15 || m == 15 || a == 15 then UNPREDICTABLE;
512 // if d == 15 || n == 15 || m == 15 then UNPREDICTABLE;
515 // if dLo == 15 || dHi == 15 || n == 15 || m == 15 then UNPREDICTABLE;
516 // if dHi == dLo then UNPREDICTABLE;
517 static bool BadRegsMulFrm(unsigned Opcode, uint32_t insn) {
518 unsigned R19_16 = slice(insn, 19, 16);
519 unsigned R15_12 = slice(insn, 15, 12);
520 unsigned R11_8 = slice(insn, 11, 8);
521 unsigned R3_0 = slice(insn, 3, 0);
524 // Did we miss an opcode?
525 assert(0 && "Unexpected opcode!");
527 case ARM::MLA: case ARM::MLS: case ARM::SMLABB: case ARM::SMLABT:
528 case ARM::SMLATB: case ARM::SMLATT: case ARM::SMLAWB: case ARM::SMLAWT:
529 case ARM::SMMLA: case ARM::SMMLS: case ARM::SMLSD: case ARM::SMLSDX:
530 if (R19_16 == 15 || R15_12 == 15 || R11_8 == 15 || R3_0 == 15)
533 case ARM::MUL: case ARM::SMMUL: case ARM::SMULBB: case ARM::SMULBT:
534 case ARM::SMULTB: case ARM::SMULTT: case ARM::SMULWB: case ARM::SMULWT:
535 if (R19_16 == 15 || R11_8 == 15 || R3_0 == 15)
538 case ARM::SMLAL: case ARM::SMULL: case ARM::UMAAL: case ARM::UMLAL:
539 case ARM::UMULL: case ARM::SMLALBB: case ARM::SMLALBT: case ARM::SMLALTB:
540 case ARM::SMLALTT: case ARM::SMLSLD:
541 if (R19_16 == 15 || R15_12 == 15 || R11_8 == 15 || R3_0 == 15)
543 if (R19_16 == R15_12)
549 // Multiply Instructions.
550 // MLA, MLS, SMLABB, SMLABT, SMLATB, SMLATT, SMLAWB, SMLAWT, SMMLA, SMMLS,
552 // Rd{19-16} Rn{3-0} Rm{11-8} Ra{15-12}
554 // MUL, SMMUL, SMULBB, SMULBT, SMULTB, SMULTT, SMULWB, SMULWT:
555 // Rd{19-16} Rn{3-0} Rm{11-8}
557 // SMLAL, SMULL, UMAAL, UMLAL, UMULL, SMLALBB, SMLALBT, SMLALTB, SMLALTT,
559 // RdLo{15-12} RdHi{19-16} Rn{3-0} Rm{11-8}
561 // The mapping of the multiply registers to the "regular" ARM registers, where
562 // there are convenience decoder functions, is:
568 static bool DisassembleMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
569 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
571 const TargetInstrDesc &TID = ARMInsts[Opcode];
572 unsigned short NumDefs = TID.getNumDefs();
573 const TargetOperandInfo *OpInfo = TID.OpInfo;
574 unsigned &OpIdx = NumOpsAdded;
578 assert(NumDefs > 0 && "NumDefs should be greater than 0 for MulFrm");
580 && OpInfo[0].RegClass == ARM::GPRRegClassID
581 && OpInfo[1].RegClass == ARM::GPRRegClassID
582 && OpInfo[2].RegClass == ARM::GPRRegClassID
583 && "Expect three register operands");
585 // Sanity check for the register encodings.
586 if (BadRegsMulFrm(Opcode, insn))
589 // Instructions with two destination registers have RdLo{15-12} first.
591 assert(NumOps >= 4 && OpInfo[3].RegClass == ARM::GPRRegClassID &&
592 "Expect 4th register operand");
593 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
598 // The destination register: RdHi{19-16} or Rd{19-16}.
599 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
602 // The two src regsiters: Rn{3-0}, then Rm{11-8}.
603 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
605 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
609 // Many multiply instructions (e.g., MLA) have three src registers.
610 // The third register operand is Ra{15-12}.
611 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) {
612 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
620 // Helper routines for disassembly of coprocessor instructions.
622 static bool LdStCopOpcode(unsigned Opcode) {
623 if ((Opcode >= ARM::LDC2L_OFFSET && Opcode <= ARM::LDC_PRE) ||
624 (Opcode >= ARM::STC2L_OFFSET && Opcode <= ARM::STC_PRE))
628 static bool CoprocessorOpcode(unsigned Opcode) {
629 if (LdStCopOpcode(Opcode))
635 case ARM::CDP: case ARM::CDP2:
636 case ARM::MCR: case ARM::MCR2: case ARM::MRC: case ARM::MRC2:
637 case ARM::MCRR: case ARM::MCRR2: case ARM::MRRC: case ARM::MRRC2:
641 static inline unsigned GetCoprocessor(uint32_t insn) {
642 return slice(insn, 11, 8);
644 static inline unsigned GetCopOpc1(uint32_t insn, bool CDP) {
645 return CDP ? slice(insn, 23, 20) : slice(insn, 23, 21);
647 static inline unsigned GetCopOpc2(uint32_t insn) {
648 return slice(insn, 7, 5);
650 static inline unsigned GetCopOpc(uint32_t insn) {
651 return slice(insn, 7, 4);
653 // Most of the operands are in immediate forms, except Rd and Rn, which are ARM
656 // CDP, CDP2: cop opc1 CRd CRn CRm opc2
658 // MCR, MCR2, MRC, MRC2: cop opc1 Rd CRn CRm opc2
660 // MCRR, MCRR2, MRRC, MRRc2: cop opc Rd Rn CRm
662 // LDC_OFFSET, LDC_PRE, LDC_POST: cop CRd Rn R0 [+/-]imm8:00
664 // STC_OFFSET, STC_PRE, STC_POST: cop CRd Rn R0 [+/-]imm8:00
668 // LDC_OPTION: cop CRd Rn imm8
670 // STC_OPTION: cop CRd Rn imm8
673 static bool DisassembleCoprocessor(MCInst &MI, unsigned Opcode, uint32_t insn,
674 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
676 assert(NumOps >= 4 && "Num of operands >= 4 for coprocessor instr");
678 unsigned &OpIdx = NumOpsAdded;
679 bool OneCopOpc = (Opcode == ARM::MCRR || Opcode == ARM::MCRR2 ||
680 Opcode == ARM::MRRC || Opcode == ARM::MRRC2);
681 // CDP/CDP2 has no GPR operand; the opc1 operand is also wider (Inst{23-20}).
682 bool NoGPR = (Opcode == ARM::CDP || Opcode == ARM::CDP2);
683 bool LdStCop = LdStCopOpcode(Opcode);
684 bool RtOut = (Opcode == ARM::MRC || Opcode == ARM::MRC2);
689 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
693 MI.addOperand(MCOperand::CreateImm(GetCoprocessor(insn)));
697 // Unindex if P:W = 0b00 --> _OPTION variant
698 unsigned PW = getPBit(insn) << 1 | getWBit(insn);
700 MI.addOperand(MCOperand::CreateImm(decodeRd(insn)));
702 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
707 MI.addOperand(MCOperand::CreateReg(0));
708 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
709 const TargetInstrDesc &TID = ARMInsts[Opcode];
711 (TID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
712 unsigned Offset = ARM_AM::getAM2Opc(AddrOpcode, slice(insn, 7, 0) << 2,
713 ARM_AM::no_shift, IndexMode);
714 MI.addOperand(MCOperand::CreateImm(Offset));
717 MI.addOperand(MCOperand::CreateImm(slice(insn, 7, 0)));
721 MI.addOperand(MCOperand::CreateImm(OneCopOpc ? GetCopOpc(insn)
722 : GetCopOpc1(insn, NoGPR)));
726 MI.addOperand(NoGPR ? MCOperand::CreateImm(decodeRd(insn))
727 : MCOperand::CreateReg(
728 getRegisterEnum(B, ARM::GPRRegClassID,
733 MI.addOperand(OneCopOpc ? MCOperand::CreateReg(
734 getRegisterEnum(B, ARM::GPRRegClassID,
736 : MCOperand::CreateImm(decodeRn(insn)));
738 MI.addOperand(MCOperand::CreateImm(decodeRm(insn)));
743 MI.addOperand(MCOperand::CreateImm(GetCopOpc2(insn)));
751 // Branch Instructions.
752 // BL: SignExtend(Imm24:'00', 32)
753 // Bcc, BL_pred: SignExtend(Imm24:'00', 32) Pred0 Pred1
754 // SMC: ZeroExtend(imm4, 32)
755 // SVC: ZeroExtend(Imm24, 32)
757 // Various coprocessor instructions are assigned BrFrm arbitrarily.
758 // Delegates to DisassembleCoprocessor() helper function.
761 // MSR/MSRsys: Rm mask=Inst{19-16}
763 // MSRi/MSRsysi: so_imm
764 // SRSW/SRS: ldstm_mode:$amode mode_imm
765 // RFEW/RFE: ldstm_mode:$amode Rn
766 static bool DisassembleBrFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
767 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
769 if (CoprocessorOpcode(Opcode))
770 return DisassembleCoprocessor(MI, Opcode, insn, NumOps, NumOpsAdded, B);
772 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
773 if (!OpInfo) return false;
775 // MRS and MRSsys take one GPR reg Rd.
776 if (Opcode == ARM::MRS || Opcode == ARM::MRSsys) {
777 assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
778 "Reg operand expected");
779 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
784 // BXJ takes one GPR reg Rm.
785 if (Opcode == ARM::BXJ) {
786 assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
787 "Reg operand expected");
788 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
793 // MSR take a mask, followed by one GPR reg Rm. The mask contains the R Bit in
794 // bit 4, and the special register fields in bits 3-0.
795 if (Opcode == ARM::MSR) {
796 assert(NumOps >= 1 && OpInfo[1].RegClass == ARM::GPRRegClassID &&
797 "Reg operand expected");
798 MI.addOperand(MCOperand::CreateImm(slice(insn, 22, 22) << 4 /* R Bit */ |
799 slice(insn, 19, 16) /* Special Reg */ ));
800 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
805 // MSRi take a mask, followed by one so_imm operand. The mask contains the
806 // R Bit in bit 4, and the special register fields in bits 3-0.
807 if (Opcode == ARM::MSRi) {
808 MI.addOperand(MCOperand::CreateImm(slice(insn, 22, 22) << 4 /* R Bit */ |
809 slice(insn, 19, 16) /* Special Reg */ ));
810 // SOImm is 4-bit rotate amount in bits 11-8 with 8-bit imm in bits 7-0.
811 // A5.2.4 Rotate amount is twice the numeric value of Inst{11-8}.
812 // See also ARMAddressingModes.h: getSOImmValImm() and getSOImmValRot().
813 unsigned Rot = (insn >> ARMII::SoRotImmShift) & 0xF;
814 unsigned Imm = insn & 0xFF;
815 MI.addOperand(MCOperand::CreateImm(ARM_AM::rotr32(Imm, 2*Rot)));
819 if (Opcode == ARM::SRSW || Opcode == ARM::SRS ||
820 Opcode == ARM::RFEW || Opcode == ARM::RFE) {
821 ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
822 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(SubMode)));
824 if (Opcode == ARM::SRSW || Opcode == ARM::SRS)
825 MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0)));
827 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
833 assert((Opcode == ARM::Bcc || Opcode == ARM::BL || Opcode == ARM::BL_pred
834 || Opcode == ARM::SMC || Opcode == ARM::SVC) &&
835 "Unexpected Opcode");
837 assert(NumOps >= 1 && OpInfo[0].RegClass < 0 && "Imm operand expected");
840 if (Opcode == ARM::SMC) {
841 // ZeroExtend(imm4, 32) where imm24 = Inst{3-0}.
842 Imm32 = slice(insn, 3, 0);
843 } else if (Opcode == ARM::SVC) {
844 // ZeroExtend(imm24, 32) where imm24 = Inst{23-0}.
845 Imm32 = slice(insn, 23, 0);
847 // SignExtend(imm24:'00', 32) where imm24 = Inst{23-0}.
848 unsigned Imm26 = slice(insn, 23, 0) << 2;
849 //Imm32 = signextend<signed int, 26>(Imm26);
850 Imm32 = SignExtend32<26>(Imm26);
853 MI.addOperand(MCOperand::CreateImm(Imm32));
859 // Misc. Branch Instructions.
862 static bool DisassembleBrMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
863 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
865 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
866 if (!OpInfo) return false;
868 unsigned &OpIdx = NumOpsAdded;
872 // BX_RET and MOVPCLR have only two predicate operands; do an early return.
873 if (Opcode == ARM::BX_RET || Opcode == ARM::MOVPCLR)
876 // BLX and BX take one GPR reg.
877 if (Opcode == ARM::BLX || Opcode == ARM::BLX_pred ||
879 assert(NumOps >= 1 && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
880 "Reg operand expected");
881 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
887 // BLXi takes imm32 (the PC offset).
888 if (Opcode == ARM::BLXi) {
889 assert(NumOps >= 1 && OpInfo[0].RegClass < 0 && "Imm operand expected");
890 // SignExtend(imm24:H:'0', 32) where imm24 = Inst{23-0} and H = Inst{24}.
891 unsigned Imm26 = slice(insn, 23, 0) << 2 | slice(insn, 24, 24) << 1;
892 int Imm32 = SignExtend32<26>(Imm26);
893 MI.addOperand(MCOperand::CreateImm(Imm32));
901 static inline bool getBFCInvMask(uint32_t insn, uint32_t &mask) {
902 uint32_t lsb = slice(insn, 11, 7);
903 uint32_t msb = slice(insn, 20, 16);
906 DEBUG(errs() << "Encoding error: msb < lsb\n");
910 for (uint32_t i = lsb; i <= msb; ++i)
916 // A major complication is the fact that some of the saturating add/subtract
917 // operations have Rd Rm Rn, instead of the "normal" Rd Rn Rm.
918 // They are QADD, QDADD, QDSUB, and QSUB.
919 static bool DisassembleDPFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
920 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
922 const TargetInstrDesc &TID = ARMInsts[Opcode];
923 unsigned short NumDefs = TID.getNumDefs();
924 bool isUnary = isUnaryDP(TID.TSFlags);
925 const TargetOperandInfo *OpInfo = TID.OpInfo;
926 unsigned &OpIdx = NumOpsAdded;
930 // Disassemble register def if there is one.
931 if (NumDefs && (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID)) {
932 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
937 // Now disassemble the src operands.
941 // Special-case handling of BFC/BFI/SBFX/UBFX.
942 if (Opcode == ARM::BFC || Opcode == ARM::BFI) {
943 MI.addOperand(MCOperand::CreateReg(0));
944 if (Opcode == ARM::BFI) {
945 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
950 if (!getBFCInvMask(insn, mask))
953 MI.addOperand(MCOperand::CreateImm(mask));
957 if (Opcode == ARM::SBFX || Opcode == ARM::UBFX) {
958 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
960 MI.addOperand(MCOperand::CreateImm(slice(insn, 11, 7)));
961 MI.addOperand(MCOperand::CreateImm(slice(insn, 20, 16) + 1));
966 bool RmRn = (Opcode == ARM::QADD || Opcode == ARM::QDADD ||
967 Opcode == ARM::QDSUB || Opcode == ARM::QSUB);
969 // BinaryDP has an Rn operand.
971 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
972 "Reg operand expected");
973 MI.addOperand(MCOperand::CreateReg(
974 getRegisterEnum(B, ARM::GPRRegClassID,
975 RmRn ? decodeRm(insn) : decodeRn(insn))));
979 // If this is a two-address operand, skip it, e.g., MOVCCr operand 1.
980 if (isUnary && (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)) {
981 MI.addOperand(MCOperand::CreateReg(0));
985 // Now disassemble operand 2.
989 if (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) {
990 // We have a reg/reg form.
991 // Assert disabled because saturating operations, e.g., A8.6.127 QASX, are
992 // routed here as well.
993 // assert(getIBit(insn) == 0 && "I_Bit != '0' reg/reg form");
994 MI.addOperand(MCOperand::CreateReg(
995 getRegisterEnum(B, ARM::GPRRegClassID,
996 RmRn? decodeRn(insn) : decodeRm(insn))));
998 } else if (Opcode == ARM::MOVi16 || Opcode == ARM::MOVTi16) {
999 // We have an imm16 = imm4:imm12 (imm4=Inst{19:16}, imm12 = Inst{11:0}).
1000 assert(getIBit(insn) == 1 && "I_Bit != '1' reg/imm form");
1001 unsigned Imm16 = slice(insn, 19, 16) << 12 | slice(insn, 11, 0);
1002 MI.addOperand(MCOperand::CreateImm(Imm16));
1005 // We have a reg/imm form.
1006 // SOImm is 4-bit rotate amount in bits 11-8 with 8-bit imm in bits 7-0.
1007 // A5.2.4 Rotate amount is twice the numeric value of Inst{11-8}.
1008 // See also ARMAddressingModes.h: getSOImmValImm() and getSOImmValRot().
1009 assert(getIBit(insn) == 1 && "I_Bit != '1' reg/imm form");
1010 unsigned Rot = (insn >> ARMII::SoRotImmShift) & 0xF;
1011 unsigned Imm = insn & 0xFF;
1012 MI.addOperand(MCOperand::CreateImm(ARM_AM::rotr32(Imm, 2*Rot)));
1019 static bool DisassembleDPSoRegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1020 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1022 const TargetInstrDesc &TID = ARMInsts[Opcode];
1023 unsigned short NumDefs = TID.getNumDefs();
1024 bool isUnary = isUnaryDP(TID.TSFlags);
1025 const TargetOperandInfo *OpInfo = TID.OpInfo;
1026 unsigned &OpIdx = NumOpsAdded;
1030 // Disassemble register def if there is one.
1031 if (NumDefs && (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID)) {
1032 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1037 // Disassemble the src operands.
1038 if (OpIdx >= NumOps)
1041 // BinaryDP has an Rn operand.
1043 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1044 "Reg operand expected");
1045 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1050 // If this is a two-address operand, skip it, e.g., MOVCCs operand 1.
1051 if (isUnary && (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)) {
1052 MI.addOperand(MCOperand::CreateReg(0));
1056 // Disassemble operand 2, which consists of three components.
1057 if (OpIdx + 2 >= NumOps)
1060 assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
1061 (OpInfo[OpIdx+1].RegClass == ARM::GPRRegClassID) &&
1062 (OpInfo[OpIdx+2].RegClass < 0) &&
1063 "Expect 3 reg operands");
1065 // Register-controlled shifts have Inst{7} = 0 and Inst{4} = 1.
1066 unsigned Rs = slice(insn, 4, 4);
1068 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1071 // If Inst{7} != 0, we should reject this insn as an invalid encoding.
1072 if (slice(insn, 7, 7))
1075 // A8.6.3 ADC (register-shifted register)
1076 // if d == 15 || n == 15 || m == 15 || s == 15 then UNPREDICTABLE;
1077 if (decodeRd(insn) == 15 || decodeRn(insn) == 15 ||
1078 decodeRm(insn) == 15 || decodeRs(insn) == 15)
1081 // Register-controlled shifts: [Rm, Rs, shift].
1082 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1084 // Inst{6-5} encodes the shift opcode.
1085 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1086 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(ShOp, 0)));
1088 // Constant shifts: [Rm, reg0, shift_imm].
1089 MI.addOperand(MCOperand::CreateReg(0)); // NoRegister
1090 // Inst{6-5} encodes the shift opcode.
1091 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1092 // Inst{11-7} encodes the imm5 shift amount.
1093 unsigned ShImm = slice(insn, 11, 7);
1095 // A8.4.1. Possible rrx or shift amount of 32...
1096 getImmShiftSE(ShOp, ShImm);
1097 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(ShOp, ShImm)));
1104 static bool DisassembleLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1105 unsigned short NumOps, unsigned &NumOpsAdded, bool isStore, BO B) {
1107 const TargetInstrDesc &TID = ARMInsts[Opcode];
1108 bool isPrePost = isPrePostLdSt(TID.TSFlags);
1109 const TargetOperandInfo *OpInfo = TID.OpInfo;
1110 if (!OpInfo) return false;
1112 unsigned &OpIdx = NumOpsAdded;
1116 assert(((!isStore && TID.getNumDefs() > 0) ||
1117 (isStore && (TID.getNumDefs() == 0 || isPrePost)))
1118 && "Invalid arguments");
1120 // Operand 0 of a pre- and post-indexed store is the address base writeback.
1121 if (isPrePost && isStore) {
1122 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1123 "Reg operand expected");
1124 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1129 // Disassemble the dst/src operand.
1130 if (OpIdx >= NumOps)
1133 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1134 "Reg operand expected");
1135 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1139 // After dst of a pre- and post-indexed load is the address base writeback.
1140 if (isPrePost && !isStore) {
1141 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1142 "Reg operand expected");
1143 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1148 // Disassemble the base operand.
1149 if (OpIdx >= NumOps)
1152 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1153 "Reg operand expected");
1154 assert((!isPrePost || (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1))
1155 && "Index mode or tied_to operand expected");
1156 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1160 // For reg/reg form, base reg is followed by +/- reg shop imm.
1161 // For immediate form, it is followed by +/- imm12.
1162 // See also ARMAddressingModes.h (Addressing Mode #2).
1163 if (OpIdx + 1 >= NumOps)
1166 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1167 unsigned IndexMode =
1168 (TID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
1169 if (getIBit(insn) == 0) {
1170 // For pre- and post-indexed case, add a reg0 operand (Addressing Mode #2).
1171 // Otherwise, skip the reg operand since for addrmode_imm12, Rn has already
1174 MI.addOperand(MCOperand::CreateReg(0));
1178 unsigned Imm12 = slice(insn, 11, 0);
1179 if (Opcode == ARM::LDRBi12 || Opcode == ARM::LDRi12 ||
1180 Opcode == ARM::STRBi12 || Opcode == ARM::STRi12) {
1181 // Disassemble the 12-bit immediate offset, which is the second operand in
1182 // $addrmode_imm12 => (ops GPR:$base, i32imm:$offsimm).
1183 int Offset = AddrOpcode == ARM_AM::add ? 1 * Imm12 : -1 * Imm12;
1184 MI.addOperand(MCOperand::CreateImm(Offset));
1186 // Disassemble the 12-bit immediate offset, which is the second operand in
1187 // $am2offset => (ops GPR, i32imm).
1188 unsigned Offset = ARM_AM::getAM2Opc(AddrOpcode, Imm12, ARM_AM::no_shift,
1190 MI.addOperand(MCOperand::CreateImm(Offset));
1194 // The opcode ARM::LDRT actually corresponds to both Encoding A1 and A2 of
1195 // A8.6.86 LDRT. So if Inst{4} != 0 while Inst{25} (getIBit(insn)) == 1,
1196 // we should reject this insn as invalid.
1199 if ((Opcode == ARM::LDRT || Opcode == ARM::LDRBT) && (slice(insn,4,4) == 1))
1202 // Disassemble the offset reg (Rm), shift type, and immediate shift length.
1203 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1205 // Inst{6-5} encodes the shift opcode.
1206 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1207 // Inst{11-7} encodes the imm5 shift amount.
1208 unsigned ShImm = slice(insn, 11, 7);
1210 // A8.4.1. Possible rrx or shift amount of 32...
1211 getImmShiftSE(ShOp, ShImm);
1212 MI.addOperand(MCOperand::CreateImm(
1213 ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp, IndexMode)));
1220 static bool DisassembleLdFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1221 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1222 return DisassembleLdStFrm(MI, Opcode, insn, NumOps, NumOpsAdded, false, B);
1225 static bool DisassembleStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1226 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1227 return DisassembleLdStFrm(MI, Opcode, insn, NumOps, NumOpsAdded, true, B);
1230 static bool HasDualReg(unsigned Opcode) {
1234 case ARM::LDRD: case ARM::LDRD_PRE: case ARM::LDRD_POST:
1235 case ARM::STRD: case ARM::STRD_PRE: case ARM::STRD_POST:
1240 static bool DisassembleLdStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1241 unsigned short NumOps, unsigned &NumOpsAdded, bool isStore, BO B) {
1243 const TargetInstrDesc &TID = ARMInsts[Opcode];
1244 bool isPrePost = isPrePostLdSt(TID.TSFlags);
1245 const TargetOperandInfo *OpInfo = TID.OpInfo;
1246 if (!OpInfo) return false;
1248 unsigned &OpIdx = NumOpsAdded;
1252 assert(((!isStore && TID.getNumDefs() > 0) ||
1253 (isStore && (TID.getNumDefs() == 0 || isPrePost)))
1254 && "Invalid arguments");
1256 // Operand 0 of a pre- and post-indexed store is the address base writeback.
1257 if (isPrePost && isStore) {
1258 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1259 "Reg operand expected");
1260 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1265 // Disassemble the dst/src operand.
1266 if (OpIdx >= NumOps)
1269 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1270 "Reg operand expected");
1271 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1275 // Fill in LDRD and STRD's second operand Rt operand.
1276 if (HasDualReg(Opcode)) {
1277 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1278 decodeRd(insn) + 1)));
1282 // After dst of a pre- and post-indexed load is the address base writeback.
1283 if (isPrePost && !isStore) {
1284 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1285 "Reg operand expected");
1286 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1291 // Disassemble the base operand.
1292 if (OpIdx >= NumOps)
1295 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1296 "Reg operand expected");
1297 assert((!isPrePost || (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1))
1298 && "Offset mode or tied_to operand expected");
1299 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1303 // For reg/reg form, base reg is followed by +/- reg.
1304 // For immediate form, it is followed by +/- imm8.
1305 // See also ARMAddressingModes.h (Addressing Mode #3).
1306 if (OpIdx + 1 >= NumOps)
1309 assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
1310 (OpInfo[OpIdx+1].RegClass < 0) &&
1311 "Expect 1 reg operand followed by 1 imm operand");
1313 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1314 unsigned IndexMode =
1315 (TID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
1316 if (getAM3IBit(insn) == 1) {
1317 MI.addOperand(MCOperand::CreateReg(0));
1319 // Disassemble the 8-bit immediate offset.
1320 unsigned Imm4H = (insn >> ARMII::ImmHiShift) & 0xF;
1321 unsigned Imm4L = insn & 0xF;
1322 unsigned Offset = ARM_AM::getAM3Opc(AddrOpcode, (Imm4H << 4) | Imm4L,
1324 MI.addOperand(MCOperand::CreateImm(Offset));
1326 // Disassemble the offset reg (Rm).
1327 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1329 unsigned Offset = ARM_AM::getAM3Opc(AddrOpcode, 0, IndexMode);
1330 MI.addOperand(MCOperand::CreateImm(Offset));
1337 static bool DisassembleLdMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1338 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1339 return DisassembleLdStMiscFrm(MI, Opcode, insn, NumOps, NumOpsAdded, false,
1343 static bool DisassembleStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1344 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1345 return DisassembleLdStMiscFrm(MI, Opcode, insn, NumOps, NumOpsAdded, true, B);
1348 // The algorithm for disassembly of LdStMulFrm is different from others because
1349 // it explicitly populates the two predicate operands after the base register.
1350 // After that, we need to populate the reglist with each affected register
1351 // encoded as an MCOperand.
1352 static bool DisassembleLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1353 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1355 assert(NumOps >= 4 && "LdStMulFrm expects NumOps >= 4");
1358 unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
1360 // Writeback to base, if necessary.
1361 if (Opcode == ARM::LDMIA_UPD || Opcode == ARM::STMIA_UPD ||
1362 Opcode == ARM::LDMDA_UPD || Opcode == ARM::STMDA_UPD ||
1363 Opcode == ARM::LDMDB_UPD || Opcode == ARM::STMDB_UPD ||
1364 Opcode == ARM::LDMIB_UPD || Opcode == ARM::STMIB_UPD) {
1365 MI.addOperand(MCOperand::CreateReg(Base));
1369 // Add the base register operand.
1370 MI.addOperand(MCOperand::CreateReg(Base));
1372 // Handling the two predicate operands before the reglist.
1373 int64_t CondVal = getCondField(insn);
1376 MI.addOperand(MCOperand::CreateImm(CondVal));
1377 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
1381 // Fill the variadic part of reglist.
1382 unsigned RegListBits = insn & ((1 << 16) - 1);
1383 for (unsigned i = 0; i < 16; ++i) {
1384 if ((RegListBits >> i) & 1) {
1385 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1394 // LDREX, LDREXB, LDREXH: Rd Rn
1395 // LDREXD: Rd Rd+1 Rn
1396 // STREX, STREXB, STREXH: Rd Rm Rn
1397 // STREXD: Rd Rm Rm+1 Rn
1399 // SWP, SWPB: Rd Rm Rn
1400 static bool DisassembleLdStExFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1401 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1403 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1404 if (!OpInfo) return false;
1406 unsigned &OpIdx = NumOpsAdded;
1411 && OpInfo[0].RegClass == ARM::GPRRegClassID
1412 && OpInfo[1].RegClass == ARM::GPRRegClassID
1413 && "Expect 2 reg operands");
1415 bool isStore = slice(insn, 20, 20) == 0;
1416 bool isDW = (Opcode == ARM::LDREXD || Opcode == ARM::STREXD);
1418 // Add the destination operand.
1419 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1423 // Store register Exclusive needs a source operand.
1425 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1430 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1431 decodeRm(insn)+1)));
1435 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1436 decodeRd(insn)+1)));
1440 // Finally add the pointer operand.
1441 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1448 // Misc. Arithmetic Instructions.
1450 // PKHBT, PKHTB: Rd Rn Rm , LSL/ASR #imm5
1451 // RBIT, REV, REV16, REVSH: Rd Rm
1452 static bool DisassembleArithMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1453 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1455 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1456 unsigned &OpIdx = NumOpsAdded;
1461 && OpInfo[0].RegClass == ARM::GPRRegClassID
1462 && OpInfo[1].RegClass == ARM::GPRRegClassID
1463 && "Expect 2 reg operands");
1465 bool ThreeReg = NumOps > 2 && OpInfo[2].RegClass == ARM::GPRRegClassID;
1467 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1472 assert(NumOps >= 4 && "Expect >= 4 operands");
1473 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1478 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1482 // If there is still an operand info left which is an immediate operand, add
1483 // an additional imm5 LSL/ASR operand.
1484 if (ThreeReg && OpInfo[OpIdx].RegClass < 0
1485 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
1486 // Extract the 5-bit immediate field Inst{11-7}.
1487 unsigned ShiftAmt = (insn >> ARMII::ShiftShift) & 0x1F;
1488 ARM_AM::ShiftOpc Opc = ARM_AM::no_shift;
1489 if (Opcode == ARM::PKHBT)
1491 else if (Opcode == ARM::PKHBT)
1493 getImmShiftSE(Opc, ShiftAmt);
1494 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(Opc, ShiftAmt)));
1501 /// DisassembleSatFrm - Disassemble saturate instructions:
1502 /// SSAT, SSAT16, USAT, and USAT16.
1503 static bool DisassembleSatFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1504 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1506 const TargetInstrDesc &TID = ARMInsts[Opcode];
1507 NumOpsAdded = TID.getNumOperands() - 2; // ignore predicate operands
1509 // Disassemble register def.
1510 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1513 unsigned Pos = slice(insn, 20, 16);
1514 if (Opcode == ARM::SSAT || Opcode == ARM::SSAT16)
1516 MI.addOperand(MCOperand::CreateImm(Pos));
1518 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1521 if (NumOpsAdded == 4) {
1522 ARM_AM::ShiftOpc Opc = (slice(insn, 6, 6) != 0 ? ARM_AM::asr : ARM_AM::lsl);
1523 // Inst{11-7} encodes the imm5 shift amount.
1524 unsigned ShAmt = slice(insn, 11, 7);
1526 // A8.6.183. Possible ASR shift amount of 32...
1527 if (Opc == ARM_AM::asr)
1530 Opc = ARM_AM::no_shift;
1532 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(Opc, ShAmt)));
1537 // Extend instructions.
1538 // SXT* and UXT*: Rd [Rn] Rm [rot_imm].
1539 // The 2nd operand register is Rn and the 3rd operand regsiter is Rm for the
1540 // three register operand form. Otherwise, Rn=0b1111 and only Rm is used.
1541 static bool DisassembleExtFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1542 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1544 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1545 unsigned &OpIdx = NumOpsAdded;
1550 && OpInfo[0].RegClass == ARM::GPRRegClassID
1551 && OpInfo[1].RegClass == ARM::GPRRegClassID
1552 && "Expect 2 reg operands");
1554 bool ThreeReg = NumOps > 2 && OpInfo[2].RegClass == ARM::GPRRegClassID;
1556 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1561 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1566 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1570 // If there is still an operand info left which is an immediate operand, add
1571 // an additional rotate immediate operand.
1572 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
1573 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
1574 // Extract the 2-bit rotate field Inst{11-10}.
1575 unsigned rot = (insn >> ARMII::ExtRotImmShift) & 3;
1576 // Rotation by 8, 16, or 24 bits.
1577 MI.addOperand(MCOperand::CreateImm(rot << 3));
1584 /////////////////////////////////////
1586 // Utility Functions For VFP //
1588 /////////////////////////////////////
1590 // Extract/Decode Dd/Sd:
1592 // SP => d = UInt(Vd:D)
1593 // DP => d = UInt(D:Vd)
1594 static unsigned decodeVFPRd(uint32_t insn, bool isSPVFP) {
1595 return isSPVFP ? (decodeRd(insn) << 1 | getDBit(insn))
1596 : (decodeRd(insn) | getDBit(insn) << 4);
1599 // Extract/Decode Dn/Sn:
1601 // SP => n = UInt(Vn:N)
1602 // DP => n = UInt(N:Vn)
1603 static unsigned decodeVFPRn(uint32_t insn, bool isSPVFP) {
1604 return isSPVFP ? (decodeRn(insn) << 1 | getNBit(insn))
1605 : (decodeRn(insn) | getNBit(insn) << 4);
1608 // Extract/Decode Dm/Sm:
1610 // SP => m = UInt(Vm:M)
1611 // DP => m = UInt(M:Vm)
1612 static unsigned decodeVFPRm(uint32_t insn, bool isSPVFP) {
1613 return isSPVFP ? (decodeRm(insn) << 1 | getMBit(insn))
1614 : (decodeRm(insn) | getMBit(insn) << 4);
1618 static APInt VFPExpandImm(unsigned char byte, unsigned N) {
1619 assert(N == 32 || N == 64);
1622 unsigned bit6 = slice(byte, 6, 6);
1624 Result = slice(byte, 7, 7) << 31 | slice(byte, 5, 0) << 19;
1626 Result |= 0x1f << 25;
1628 Result |= 0x1 << 30;
1630 Result = (uint64_t)slice(byte, 7, 7) << 63 |
1631 (uint64_t)slice(byte, 5, 0) << 48;
1633 Result |= 0xffULL << 54;
1635 Result |= 0x1ULL << 62;
1637 return APInt(N, Result);
1640 // VFP Unary Format Instructions:
1642 // VCMP[E]ZD, VCMP[E]ZS: compares one floating-point register with zero
1643 // VCVTDS, VCVTSD: converts between double-precision and single-precision
1644 // The rest of the instructions have homogeneous [VFP]Rd and [VFP]Rm registers.
1645 static bool DisassembleVFPUnaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1646 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1648 assert(NumOps >= 1 && "VFPUnaryFrm expects NumOps >= 1");
1650 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1651 unsigned &OpIdx = NumOpsAdded;
1655 unsigned RegClass = OpInfo[OpIdx].RegClass;
1656 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1657 "Reg operand expected");
1658 bool isSP = (RegClass == ARM::SPRRegClassID);
1660 MI.addOperand(MCOperand::CreateReg(
1661 getRegisterEnum(B, RegClass, decodeVFPRd(insn, isSP))));
1664 // Early return for compare with zero instructions.
1665 if (Opcode == ARM::VCMPEZD || Opcode == ARM::VCMPEZS
1666 || Opcode == ARM::VCMPZD || Opcode == ARM::VCMPZS)
1669 RegClass = OpInfo[OpIdx].RegClass;
1670 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1671 "Reg operand expected");
1672 isSP = (RegClass == ARM::SPRRegClassID);
1674 MI.addOperand(MCOperand::CreateReg(
1675 getRegisterEnum(B, RegClass, decodeVFPRm(insn, isSP))));
1681 // All the instructions have homogeneous [VFP]Rd, [VFP]Rn, and [VFP]Rm regs.
1682 // Some of them have operand constraints which tie the first operand in the
1683 // InOperandList to that of the dst. As far as asm printing is concerned, this
1684 // tied_to operand is simply skipped.
1685 static bool DisassembleVFPBinaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1686 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1688 assert(NumOps >= 3 && "VFPBinaryFrm expects NumOps >= 3");
1690 const TargetInstrDesc &TID = ARMInsts[Opcode];
1691 const TargetOperandInfo *OpInfo = TID.OpInfo;
1692 unsigned &OpIdx = NumOpsAdded;
1696 unsigned RegClass = OpInfo[OpIdx].RegClass;
1697 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1698 "Reg operand expected");
1699 bool isSP = (RegClass == ARM::SPRRegClassID);
1701 MI.addOperand(MCOperand::CreateReg(
1702 getRegisterEnum(B, RegClass, decodeVFPRd(insn, isSP))));
1705 // Skip tied_to operand constraint.
1706 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
1707 assert(NumOps >= 4 && "Expect >=4 operands");
1708 MI.addOperand(MCOperand::CreateReg(0));
1712 MI.addOperand(MCOperand::CreateReg(
1713 getRegisterEnum(B, RegClass, decodeVFPRn(insn, isSP))));
1716 MI.addOperand(MCOperand::CreateReg(
1717 getRegisterEnum(B, RegClass, decodeVFPRm(insn, isSP))));
1723 // A8.6.295 vcvt (floating-point <-> integer)
1724 // Int to FP: VSITOD, VSITOS, VUITOD, VUITOS
1725 // FP to Int: VTOSI[Z|R]D, VTOSI[Z|R]S, VTOUI[Z|R]D, VTOUI[Z|R]S
1727 // A8.6.297 vcvt (floating-point and fixed-point)
1728 // Dd|Sd Dd|Sd(TIED_TO) #fbits(= 16|32 - UInt(imm4:i))
1729 static bool DisassembleVFPConv1Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1730 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1732 assert(NumOps >= 2 && "VFPConv1Frm expects NumOps >= 2");
1734 const TargetInstrDesc &TID = ARMInsts[Opcode];
1735 const TargetOperandInfo *OpInfo = TID.OpInfo;
1736 if (!OpInfo) return false;
1738 bool SP = slice(insn, 8, 8) == 0; // A8.6.295 & A8.6.297
1739 bool fixed_point = slice(insn, 17, 17) == 1; // A8.6.297
1740 unsigned RegClassID = SP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
1744 assert(NumOps >= 3 && "Expect >= 3 operands");
1745 int size = slice(insn, 7, 7) == 0 ? 16 : 32;
1746 int fbits = size - (slice(insn,3,0) << 1 | slice(insn,5,5));
1747 MI.addOperand(MCOperand::CreateReg(
1748 getRegisterEnum(B, RegClassID,
1749 decodeVFPRd(insn, SP))));
1751 assert(TID.getOperandConstraint(1, TOI::TIED_TO) != -1 &&
1752 "Tied to operand expected");
1753 MI.addOperand(MI.getOperand(0));
1755 assert(OpInfo[2].RegClass < 0 && !OpInfo[2].isPredicate() &&
1756 !OpInfo[2].isOptionalDef() && "Imm operand expected");
1757 MI.addOperand(MCOperand::CreateImm(fbits));
1762 // The Rd (destination) and Rm (source) bits have different interpretations
1763 // depending on their single-precisonness.
1765 if (slice(insn, 18, 18) == 1) { // to_integer operation
1766 d = decodeVFPRd(insn, true /* Is Single Precision */);
1767 MI.addOperand(MCOperand::CreateReg(
1768 getRegisterEnum(B, ARM::SPRRegClassID, d)));
1769 m = decodeVFPRm(insn, SP);
1770 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID, m)));
1772 d = decodeVFPRd(insn, SP);
1773 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID, d)));
1774 m = decodeVFPRm(insn, true /* Is Single Precision */);
1775 MI.addOperand(MCOperand::CreateReg(
1776 getRegisterEnum(B, ARM::SPRRegClassID, m)));
1784 // VMOVRS - A8.6.330
1785 // Rt => Rd; Sn => UInt(Vn:N)
1786 static bool DisassembleVFPConv2Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1787 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1789 assert(NumOps >= 2 && "VFPConv2Frm expects NumOps >= 2");
1791 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1793 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1794 decodeVFPRn(insn, true))));
1799 // VMOVRRD - A8.6.332
1800 // Rt => Rd; Rt2 => Rn; Dm => UInt(M:Vm)
1802 // VMOVRRS - A8.6.331
1803 // Rt => Rd; Rt2 => Rn; Sm => UInt(Vm:M); Sm1 = Sm+1
1804 static bool DisassembleVFPConv3Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1805 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1807 assert(NumOps >= 3 && "VFPConv3Frm expects NumOps >= 3");
1809 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1810 unsigned &OpIdx = NumOpsAdded;
1812 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1814 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1818 if (OpInfo[OpIdx].RegClass == ARM::SPRRegClassID) {
1819 unsigned Sm = decodeVFPRm(insn, true);
1820 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1822 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1826 MI.addOperand(MCOperand::CreateReg(
1827 getRegisterEnum(B, ARM::DPRRegClassID,
1828 decodeVFPRm(insn, false))));
1834 // VMOVSR - A8.6.330
1835 // Rt => Rd; Sn => UInt(Vn:N)
1836 static bool DisassembleVFPConv4Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1837 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1839 assert(NumOps >= 2 && "VFPConv4Frm expects NumOps >= 2");
1841 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1842 decodeVFPRn(insn, true))));
1843 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1849 // VMOVDRR - A8.6.332
1850 // Rt => Rd; Rt2 => Rn; Dm => UInt(M:Vm)
1852 // VMOVRRS - A8.6.331
1853 // Rt => Rd; Rt2 => Rn; Sm => UInt(Vm:M); Sm1 = Sm+1
1854 static bool DisassembleVFPConv5Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1855 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1857 assert(NumOps >= 3 && "VFPConv5Frm expects NumOps >= 3");
1859 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1860 unsigned &OpIdx = NumOpsAdded;
1864 if (OpInfo[OpIdx].RegClass == ARM::SPRRegClassID) {
1865 unsigned Sm = decodeVFPRm(insn, true);
1866 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1868 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1872 MI.addOperand(MCOperand::CreateReg(
1873 getRegisterEnum(B, ARM::DPRRegClassID,
1874 decodeVFPRm(insn, false))));
1878 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1880 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1886 // VFP Load/Store Instructions.
1887 // VLDRD, VLDRS, VSTRD, VSTRS
1888 static bool DisassembleVFPLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1889 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1891 assert(NumOps >= 3 && "VFPLdStFrm expects NumOps >= 3");
1893 bool isSPVFP = (Opcode == ARM::VLDRS || Opcode == ARM::VSTRS);
1894 unsigned RegClassID = isSPVFP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
1896 // Extract Dd/Sd for operand 0.
1897 unsigned RegD = decodeVFPRd(insn, isSPVFP);
1899 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID, RegD)));
1901 unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
1902 MI.addOperand(MCOperand::CreateReg(Base));
1904 // Next comes the AM5 Opcode.
1905 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1906 unsigned char Imm8 = insn & 0xFF;
1907 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM5Opc(AddrOpcode, Imm8)));
1914 // VFP Load/Store Multiple Instructions.
1915 // We have an optional write back reg, the base, and two predicate operands.
1916 // It is then followed by a reglist of either DPR(s) or SPR(s).
1918 // VLDMD[_UPD], VLDMS[_UPD], VSTMD[_UPD], VSTMS[_UPD]
1919 static bool DisassembleVFPLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1920 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1922 assert(NumOps >= 4 && "VFPLdStMulFrm expects NumOps >= 4");
1924 unsigned &OpIdx = NumOpsAdded;
1928 unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
1930 // Writeback to base, if necessary.
1931 if (Opcode == ARM::VLDMDIA_UPD || Opcode == ARM::VLDMSIA_UPD ||
1932 Opcode == ARM::VLDMDDB_UPD || Opcode == ARM::VLDMSDB_UPD ||
1933 Opcode == ARM::VSTMDIA_UPD || Opcode == ARM::VSTMSIA_UPD ||
1934 Opcode == ARM::VSTMDDB_UPD || Opcode == ARM::VSTMSDB_UPD) {
1935 MI.addOperand(MCOperand::CreateReg(Base));
1939 MI.addOperand(MCOperand::CreateReg(Base));
1941 // Handling the two predicate operands before the reglist.
1942 int64_t CondVal = getCondField(insn);
1945 MI.addOperand(MCOperand::CreateImm(CondVal));
1946 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
1950 bool isSPVFP = (Opcode == ARM::VLDMSIA ||
1951 Opcode == ARM::VLDMSIA_UPD || Opcode == ARM::VLDMSDB_UPD ||
1952 Opcode == ARM::VSTMSIA ||
1953 Opcode == ARM::VSTMSIA_UPD || Opcode == ARM::VSTMSDB_UPD);
1954 unsigned RegClassID = isSPVFP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
1957 unsigned RegD = decodeVFPRd(insn, isSPVFP);
1959 // Fill the variadic part of reglist.
1960 unsigned char Imm8 = insn & 0xFF;
1961 unsigned Regs = isSPVFP ? Imm8 : Imm8/2;
1963 // Apply some sanity checks before proceeding.
1964 if (Regs == 0 || (RegD + Regs) > 32 || (!isSPVFP && Regs > 16))
1967 for (unsigned i = 0; i < Regs; ++i) {
1968 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID,
1976 // Misc. VFP Instructions.
1977 // FMSTAT (vmrs with Rt=0b1111, i.e., to apsr_nzcv and no register operand)
1978 // FCONSTD (DPR and a VFPf64Imm operand)
1979 // FCONSTS (SPR and a VFPf32Imm operand)
1980 // VMRS/VMSR (GPR operand)
1981 static bool DisassembleVFPMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1982 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1984 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1985 unsigned &OpIdx = NumOpsAdded;
1989 if (Opcode == ARM::FMSTAT)
1992 assert(NumOps >= 2 && "VFPMiscFrm expects >=2 operands");
1994 unsigned RegEnum = 0;
1995 switch (OpInfo[0].RegClass) {
1996 case ARM::DPRRegClassID:
1997 RegEnum = getRegisterEnum(B, ARM::DPRRegClassID, decodeVFPRd(insn, false));
1999 case ARM::SPRRegClassID:
2000 RegEnum = getRegisterEnum(B, ARM::SPRRegClassID, decodeVFPRd(insn, true));
2002 case ARM::GPRRegClassID:
2003 RegEnum = getRegisterEnum(B, ARM::GPRRegClassID, decodeRd(insn));
2006 assert(0 && "Invalid reg class id");
2010 MI.addOperand(MCOperand::CreateReg(RegEnum));
2013 // Extract/decode the f64/f32 immediate.
2014 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2015 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2016 // The asm syntax specifies the floating point value, not the 8-bit literal.
2017 APInt immRaw = VFPExpandImm(slice(insn,19,16) << 4 | slice(insn, 3, 0),
2018 Opcode == ARM::FCONSTD ? 64 : 32);
2019 APFloat immFP = APFloat(immRaw, true);
2020 double imm = Opcode == ARM::FCONSTD ? immFP.convertToDouble() :
2021 immFP.convertToFloat();
2022 MI.addOperand(MCOperand::CreateFPImm(imm));
2030 // DisassembleThumbFrm() is defined in ThumbDisassemblerCore.h file.
2031 #include "ThumbDisassemblerCore.h"
2033 /////////////////////////////////////////////////////
2035 // Utility Functions For ARM Advanced SIMD //
2037 /////////////////////////////////////////////////////
2039 // The following NEON namings are based on A8.6.266 VABA, VABAL. Notice that
2040 // A8.6.303 VDUP (ARM core register)'s D/Vd pair is the N/Vn pair of VABA/VABAL.
2042 // A7.3 Register encoding
2044 // Extract/Decode NEON D/Vd:
2046 // Note that for quadword, Qd = UInt(D:Vd<3:1>) = Inst{22:15-13}, whereas for
2047 // doubleword, Dd = UInt(D:Vd). We compensate for this difference by
2048 // handling it in the getRegisterEnum() utility function.
2049 // D = Inst{22}, Vd = Inst{15-12}
2050 static unsigned decodeNEONRd(uint32_t insn) {
2051 return ((insn >> ARMII::NEON_D_BitShift) & 1) << 4
2052 | ((insn >> ARMII::NEON_RegRdShift) & ARMII::NEONRegMask);
2055 // Extract/Decode NEON N/Vn:
2057 // Note that for quadword, Qn = UInt(N:Vn<3:1>) = Inst{7:19-17}, whereas for
2058 // doubleword, Dn = UInt(N:Vn). We compensate for this difference by
2059 // handling it in the getRegisterEnum() utility function.
2060 // N = Inst{7}, Vn = Inst{19-16}
2061 static unsigned decodeNEONRn(uint32_t insn) {
2062 return ((insn >> ARMII::NEON_N_BitShift) & 1) << 4
2063 | ((insn >> ARMII::NEON_RegRnShift) & ARMII::NEONRegMask);
2066 // Extract/Decode NEON M/Vm:
2068 // Note that for quadword, Qm = UInt(M:Vm<3:1>) = Inst{5:3-1}, whereas for
2069 // doubleword, Dm = UInt(M:Vm). We compensate for this difference by
2070 // handling it in the getRegisterEnum() utility function.
2071 // M = Inst{5}, Vm = Inst{3-0}
2072 static unsigned decodeNEONRm(uint32_t insn) {
2073 return ((insn >> ARMII::NEON_M_BitShift) & 1) << 4
2074 | ((insn >> ARMII::NEON_RegRmShift) & ARMII::NEONRegMask);
2085 } // End of unnamed namespace
2087 // size field -> Inst{11-10}
2088 // index_align field -> Inst{7-4}
2090 // The Lane Index interpretation depends on the Data Size:
2091 // 8 (encoded as size = 0b00) -> Index = index_align[3:1]
2092 // 16 (encoded as size = 0b01) -> Index = index_align[3:2]
2093 // 32 (encoded as size = 0b10) -> Index = index_align[3]
2095 // Ref: A8.6.317 VLD4 (single 4-element structure to one lane).
2096 static unsigned decodeLaneIndex(uint32_t insn) {
2097 unsigned size = insn >> 10 & 3;
2098 assert((size == 0 || size == 1 || size == 2) &&
2099 "Encoding error: size should be either 0, 1, or 2");
2101 unsigned index_align = insn >> 4 & 0xF;
2102 return (index_align >> 1) >> size;
2105 // imm64 = AdvSIMDExpandImm(op, cmode, i:imm3:imm4)
2106 // op = Inst{5}, cmode = Inst{11-8}
2107 // i = Inst{24} (ARM architecture)
2108 // imm3 = Inst{18-16}, imm4 = Inst{3-0}
2109 // Ref: Table A7-15 Modified immediate values for Advanced SIMD instructions.
2110 static uint64_t decodeN1VImm(uint32_t insn, ElemSize esize) {
2111 unsigned char op = (insn >> 5) & 1;
2112 unsigned char cmode = (insn >> 8) & 0xF;
2113 unsigned char Imm8 = ((insn >> 24) & 1) << 7 |
2114 ((insn >> 16) & 7) << 4 |
2116 return (op << 12) | (cmode << 8) | Imm8;
2119 // A8.6.339 VMUL, VMULL (by scalar)
2120 // ESize16 => m = Inst{2-0} (Vm<2:0>) D0-D7
2121 // ESize32 => m = Inst{3-0} (Vm<3:0>) D0-D15
2122 static unsigned decodeRestrictedDm(uint32_t insn, ElemSize esize) {
2129 assert(0 && "Unreachable code!");
2134 // A8.6.339 VMUL, VMULL (by scalar)
2135 // ESize16 => index = Inst{5:3} (M:Vm<3>) D0-D7
2136 // ESize32 => index = Inst{5} (M) D0-D15
2137 static unsigned decodeRestrictedDmIndex(uint32_t insn, ElemSize esize) {
2140 return (((insn >> 5) & 1) << 1) | ((insn >> 3) & 1);
2142 return (insn >> 5) & 1;
2144 assert(0 && "Unreachable code!");
2149 // A8.6.296 VCVT (between floating-point and fixed-point, Advanced SIMD)
2150 // (64 - <fbits>) is encoded as imm6, i.e., Inst{21-16}.
2151 static unsigned decodeVCVTFractionBits(uint32_t insn) {
2152 return 64 - ((insn >> 16) & 0x3F);
2155 // A8.6.302 VDUP (scalar)
2156 // ESize8 => index = Inst{19-17}
2157 // ESize16 => index = Inst{19-18}
2158 // ESize32 => index = Inst{19}
2159 static unsigned decodeNVLaneDupIndex(uint32_t insn, ElemSize esize) {
2162 return (insn >> 17) & 7;
2164 return (insn >> 18) & 3;
2166 return (insn >> 19) & 1;
2168 assert(0 && "Unspecified element size!");
2173 // A8.6.328 VMOV (ARM core register to scalar)
2174 // A8.6.329 VMOV (scalar to ARM core register)
2175 // ESize8 => index = Inst{21:6-5}
2176 // ESize16 => index = Inst{21:6}
2177 // ESize32 => index = Inst{21}
2178 static unsigned decodeNVLaneOpIndex(uint32_t insn, ElemSize esize) {
2181 return ((insn >> 21) & 1) << 2 | ((insn >> 5) & 3);
2183 return ((insn >> 21) & 1) << 1 | ((insn >> 6) & 1);
2185 return ((insn >> 21) & 1);
2187 assert(0 && "Unspecified element size!");
2192 // Imm6 = Inst{21-16}, L = Inst{7}
2194 // LeftShift == true (A8.6.367 VQSHL, A8.6.387 VSLI):
2196 // '0001xxx' => esize = 8; shift_amount = imm6 - 8
2197 // '001xxxx' => esize = 16; shift_amount = imm6 - 16
2198 // '01xxxxx' => esize = 32; shift_amount = imm6 - 32
2199 // '1xxxxxx' => esize = 64; shift_amount = imm6
2201 // LeftShift == false (A8.6.376 VRSHR, A8.6.368 VQSHRN):
2203 // '0001xxx' => esize = 8; shift_amount = 16 - imm6
2204 // '001xxxx' => esize = 16; shift_amount = 32 - imm6
2205 // '01xxxxx' => esize = 32; shift_amount = 64 - imm6
2206 // '1xxxxxx' => esize = 64; shift_amount = 64 - imm6
2208 static unsigned decodeNVSAmt(uint32_t insn, bool LeftShift) {
2209 ElemSize esize = ESizeNA;
2210 unsigned L = (insn >> 7) & 1;
2211 unsigned imm6 = (insn >> 16) & 0x3F;
2215 else if (imm6 >> 4 == 1)
2217 else if (imm6 >> 5 == 1)
2220 assert(0 && "Wrong encoding of Inst{7:21-16}!");
2225 return esize == ESize64 ? imm6 : (imm6 - esize);
2227 return esize == ESize64 ? (esize - imm6) : (2*esize - imm6);
2231 // Imm4 = Inst{11-8}
2232 static unsigned decodeN3VImm(uint32_t insn) {
2233 return (insn >> 8) & 0xF;
2237 // D[d] D[d2] ... Rn [TIED_TO Rn] align [Rm]
2239 // D[d] D[d2] ... Rn [TIED_TO Rn] align [Rm] TIED_TO ... imm(idx)
2241 // Rn [TIED_TO Rn] align [Rm] D[d] D[d2] ...
2243 // Rn [TIED_TO Rn] align [Rm] D[d] D[d2] ... [imm(idx)]
2245 // Correctly set VLD*/VST*'s TIED_TO GPR, as the asm printer needs it.
2246 static bool DisassembleNLdSt0(MCInst &MI, unsigned Opcode, uint32_t insn,
2247 unsigned short NumOps, unsigned &NumOpsAdded, bool Store, bool DblSpaced,
2248 unsigned alignment, BO B) {
2250 const TargetInstrDesc &TID = ARMInsts[Opcode];
2251 const TargetOperandInfo *OpInfo = TID.OpInfo;
2253 // At least one DPR register plus addressing mode #6.
2254 assert(NumOps >= 3 && "Expect >= 3 operands");
2256 unsigned &OpIdx = NumOpsAdded;
2260 // We have homogeneous NEON registers for Load/Store.
2261 unsigned RegClass = 0;
2263 // Double-spaced registers have increments of 2.
2264 unsigned Inc = DblSpaced ? 2 : 1;
2266 unsigned Rn = decodeRn(insn);
2267 unsigned Rm = decodeRm(insn);
2268 unsigned Rd = decodeNEONRd(insn);
2270 // A7.7.1 Advanced SIMD addressing mode.
2273 // LLVM Addressing Mode #6.
2274 unsigned RmEnum = 0;
2276 RmEnum = getRegisterEnum(B, ARM::GPRRegClassID, Rm);
2279 // Consume possible WB, AddrMode6, possible increment reg, the DPR/QPR's,
2280 // then possible lane index.
2281 assert(OpIdx < NumOps && OpInfo[0].RegClass == ARM::GPRRegClassID &&
2282 "Reg operand expected");
2285 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2290 assert((OpIdx+1) < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
2291 OpInfo[OpIdx + 1].RegClass < 0 && "Addrmode #6 Operands expected");
2292 // addrmode6 := (ops GPR:$addr, i32imm)
2293 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2295 MI.addOperand(MCOperand::CreateImm(alignment)); // Alignment
2299 MI.addOperand(MCOperand::CreateReg(RmEnum));
2303 assert(OpIdx < NumOps &&
2304 (OpInfo[OpIdx].RegClass == ARM::DPRRegClassID ||
2305 OpInfo[OpIdx].RegClass == ARM::QPRRegClassID) &&
2306 "Reg operand expected");
2308 RegClass = OpInfo[OpIdx].RegClass;
2309 while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
2310 MI.addOperand(MCOperand::CreateReg(
2311 getRegisterEnum(B, RegClass, Rd)));
2316 // Handle possible lane index.
2317 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2318 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2319 MI.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn)));
2324 // Consume the DPR/QPR's, possible WB, AddrMode6, possible incrment reg,
2325 // possible TIED_TO DPR/QPR's (ignored), then possible lane index.
2326 RegClass = OpInfo[0].RegClass;
2328 while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
2329 MI.addOperand(MCOperand::CreateReg(
2330 getRegisterEnum(B, RegClass, Rd)));
2336 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2341 assert((OpIdx+1) < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
2342 OpInfo[OpIdx + 1].RegClass < 0 && "Addrmode #6 Operands expected");
2343 // addrmode6 := (ops GPR:$addr, i32imm)
2344 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2346 MI.addOperand(MCOperand::CreateImm(alignment)); // Alignment
2350 MI.addOperand(MCOperand::CreateReg(RmEnum));
2354 while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
2355 assert(TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1 &&
2356 "Tied to operand expected");
2357 MI.addOperand(MCOperand::CreateReg(0));
2361 // Handle possible lane index.
2362 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2363 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2364 MI.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn)));
2369 // Accessing registers past the end of the NEON register file is not
2377 // A8.6.308, A8.6.311, A8.6.314, A8.6.317.
2378 static bool Align4OneLaneInst(unsigned elem, unsigned size,
2379 unsigned index_align, unsigned & alignment) {
2387 return slice(index_align, 0, 0) == 0;
2388 else if (size == 1) {
2389 bits = slice(index_align, 1, 0);
2390 if (bits != 0 && bits != 1)
2395 } else if (size == 2) {
2396 bits = slice(index_align, 2, 0);
2397 if (bits != 0 && bits != 3)
2407 if (slice(index_align, 0, 0) == 1)
2411 if (slice(index_align, 0, 0) == 1)
2414 } else if (size == 2) {
2415 if (slice(index_align, 1, 1) != 0)
2417 if (slice(index_align, 0, 0) == 1)
2425 if (slice(index_align, 0, 0) != 0)
2429 if (slice(index_align, 0, 0) != 0)
2433 } else if (size == 2) {
2434 if (slice(index_align, 1, 0) != 0)
2442 if (slice(index_align, 0, 0) == 1)
2446 if (slice(index_align, 0, 0) == 1)
2449 } else if (size == 2) {
2450 bits = slice(index_align, 1, 0);
2464 // If L (Inst{21}) == 0, store instructions.
2465 // Find out about double-spaced-ness of the Opcode and pass it on to
2466 // DisassembleNLdSt0().
2467 static bool DisassembleNLdSt(MCInst &MI, unsigned Opcode, uint32_t insn,
2468 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2470 const StringRef Name = ARMInsts[Opcode].Name;
2471 bool DblSpaced = false;
2472 // 0 represents standard alignment, i.e., unaligned data access.
2473 unsigned alignment = 0;
2475 if (Name.find("LN") != std::string::npos) {
2476 // To one lane instructions.
2477 // See, for example, 8.6.317 VLD4 (single 4-element structure to one lane).
2479 unsigned elem = 0; // legal values: {1, 2, 3, 4}
2480 if (Name.startswith("VST1") || Name.startswith("VLD1"))
2483 if (Name.startswith("VST2") || Name.startswith("VLD2"))
2486 if (Name.startswith("VST3") || Name.startswith("VLD3"))
2489 if (Name.startswith("VST4") || Name.startswith("VLD4"))
2492 // Utility function takes number of elements, size, and index_align.
2493 if (!Align4OneLaneInst(elem,
2494 slice(insn, 11, 10),
2499 // <size> == 16 && Inst{5} == 1 --> DblSpaced = true
2500 if (Name.endswith("16") || Name.endswith("16_UPD"))
2501 DblSpaced = slice(insn, 5, 5) == 1;
2503 // <size> == 32 && Inst{6} == 1 --> DblSpaced = true
2504 if (Name.endswith("32") || Name.endswith("32_UPD"))
2505 DblSpaced = slice(insn, 6, 6) == 1;
2507 // Multiple n-element structures with type encoded as Inst{11-8}.
2508 // See, for example, A8.6.316 VLD4 (multiple 4-element structures).
2510 // Inst{5-4} encodes alignment.
2511 switch (slice(insn, 5, 4)) {
2515 alignment = 64; break;
2517 alignment = 128; break;
2519 alignment = 256; break;
2522 // n == 2 && type == 0b1001 -> DblSpaced = true
2523 if (Name.startswith("VST2") || Name.startswith("VLD2"))
2524 DblSpaced = slice(insn, 11, 8) == 9;
2526 // n == 3 && type == 0b0101 -> DblSpaced = true
2527 if (Name.startswith("VST3") || Name.startswith("VLD3")) {
2528 // A8.6.313 & A8.6.395
2529 if (slice(insn, 7, 6) == 3 && slice(insn, 5, 5) == 1)
2532 DblSpaced = slice(insn, 11, 8) == 5;
2535 // n == 4 && type == 0b0001 -> DblSpaced = true
2536 if (Name.startswith("VST4") || Name.startswith("VLD4"))
2537 DblSpaced = slice(insn, 11, 8) == 1;
2539 return DisassembleNLdSt0(MI, Opcode, insn, NumOps, NumOpsAdded,
2540 slice(insn, 21, 21) == 0, DblSpaced, alignment/8, B);
2547 // Qd/Dd imm src(=Qd/Dd)
2548 static bool DisassembleN1RegModImmFrm(MCInst &MI, unsigned Opcode,
2549 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2551 const TargetInstrDesc &TID = ARMInsts[Opcode];
2552 const TargetOperandInfo *OpInfo = TID.OpInfo;
2554 assert(NumOps >= 2 &&
2555 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2556 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2557 (OpInfo[1].RegClass < 0) &&
2558 "Expect 1 reg operand followed by 1 imm operand");
2560 // Qd/Dd = Inst{22:15-12} => NEON Rd
2561 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[0].RegClass,
2562 decodeNEONRd(insn))));
2564 ElemSize esize = ESizeNA;
2567 case ARM::VMOVv16i8:
2570 case ARM::VMOVv4i16:
2571 case ARM::VMOVv8i16:
2572 case ARM::VMVNv4i16:
2573 case ARM::VMVNv8i16:
2574 case ARM::VBICiv4i16:
2575 case ARM::VBICiv8i16:
2576 case ARM::VORRiv4i16:
2577 case ARM::VORRiv8i16:
2580 case ARM::VMOVv2i32:
2581 case ARM::VMOVv4i32:
2582 case ARM::VMVNv2i32:
2583 case ARM::VMVNv4i32:
2584 case ARM::VBICiv2i32:
2585 case ARM::VBICiv4i32:
2586 case ARM::VORRiv2i32:
2587 case ARM::VORRiv4i32:
2590 case ARM::VMOVv1i64:
2591 case ARM::VMOVv2i64:
2595 assert(0 && "Unexpected opcode!");
2599 // One register and a modified immediate value.
2600 // Add the imm operand.
2601 MI.addOperand(MCOperand::CreateImm(decodeN1VImm(insn, esize)));
2605 // VBIC/VORRiv*i* variants have an extra $src = $Vd to be filled in.
2607 (OpInfo[2].RegClass == ARM::DPRRegClassID ||
2608 OpInfo[2].RegClass == ARM::QPRRegClassID)) {
2609 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[0].RegClass,
2610 decodeNEONRd(insn))));
2621 N2V_VectorConvert_Between_Float_Fixed
2623 } // End of unnamed namespace
2625 // Vector Convert [between floating-point and fixed-point]
2626 // Qd/Dd Qm/Dm [fbits]
2628 // Vector Duplicate Lane (from scalar to all elements) Instructions.
2629 // VDUPLN16d, VDUPLN16q, VDUPLN32d, VDUPLN32q, VDUPLN8d, VDUPLN8q:
2632 // Vector Move Long:
2635 // Vector Move Narrow:
2639 static bool DisassembleNVdVmOptImm(MCInst &MI, unsigned Opc, uint32_t insn,
2640 unsigned short NumOps, unsigned &NumOpsAdded, N2VFlag Flag, BO B) {
2642 const TargetInstrDesc &TID = ARMInsts[Opc];
2643 const TargetOperandInfo *OpInfo = TID.OpInfo;
2645 assert(NumOps >= 2 &&
2646 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2647 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2648 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2649 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
2650 "Expect >= 2 operands and first 2 as reg operands");
2652 unsigned &OpIdx = NumOpsAdded;
2656 ElemSize esize = ESizeNA;
2657 if (Flag == N2V_VectorDupLane) {
2658 // VDUPLN has its index embedded. Its size can be inferred from the Opcode.
2659 assert(Opc >= ARM::VDUPLN16d && Opc <= ARM::VDUPLN8q &&
2660 "Unexpected Opcode");
2661 esize = (Opc == ARM::VDUPLN8d || Opc == ARM::VDUPLN8q) ? ESize8
2662 : ((Opc == ARM::VDUPLN16d || Opc == ARM::VDUPLN16q) ? ESize16
2666 // Qd/Dd = Inst{22:15-12} => NEON Rd
2667 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2668 decodeNEONRd(insn))));
2672 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
2674 MI.addOperand(MCOperand::CreateReg(0));
2678 // Dm = Inst{5:3-0} => NEON Rm
2679 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2680 decodeNEONRm(insn))));
2683 // VZIP and others have two TIED_TO reg operands.
2685 while (OpIdx < NumOps &&
2686 (Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) {
2687 // Add TIED_TO operand.
2688 MI.addOperand(MI.getOperand(Idx));
2692 // Add the imm operand, if required.
2693 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2694 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2696 unsigned imm = 0xFFFFFFFF;
2698 if (Flag == N2V_VectorDupLane)
2699 imm = decodeNVLaneDupIndex(insn, esize);
2700 if (Flag == N2V_VectorConvert_Between_Float_Fixed)
2701 imm = decodeVCVTFractionBits(insn);
2703 assert(imm != 0xFFFFFFFF && "Internal error");
2704 MI.addOperand(MCOperand::CreateImm(imm));
2711 static bool DisassembleN2RegFrm(MCInst &MI, unsigned Opc, uint32_t insn,
2712 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2714 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
2717 static bool DisassembleNVCVTFrm(MCInst &MI, unsigned Opc, uint32_t insn,
2718 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2720 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
2721 N2V_VectorConvert_Between_Float_Fixed, B);
2723 static bool DisassembleNVecDupLnFrm(MCInst &MI, unsigned Opc, uint32_t insn,
2724 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2726 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
2727 N2V_VectorDupLane, B);
2730 // Vector Shift [Accumulate] Instructions.
2731 // Qd/Dd [Qd/Dd (TIED_TO)] Qm/Dm ShiftAmt
2733 // Vector Shift Left Long (with maximum shift count) Instructions.
2734 // VSHLLi16, VSHLLi32, VSHLLi8: Qd Dm imm (== size)
2736 static bool DisassembleNVectorShift(MCInst &MI, unsigned Opcode, uint32_t insn,
2737 unsigned short NumOps, unsigned &NumOpsAdded, bool LeftShift, BO B) {
2739 const TargetInstrDesc &TID = ARMInsts[Opcode];
2740 const TargetOperandInfo *OpInfo = TID.OpInfo;
2742 assert(NumOps >= 3 &&
2743 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2744 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2745 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2746 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
2747 "Expect >= 3 operands and first 2 as reg operands");
2749 unsigned &OpIdx = NumOpsAdded;
2753 // Qd/Dd = Inst{22:15-12} => NEON Rd
2754 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2755 decodeNEONRd(insn))));
2758 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
2760 MI.addOperand(MCOperand::CreateReg(0));
2764 assert((OpInfo[OpIdx].RegClass == ARM::DPRRegClassID ||
2765 OpInfo[OpIdx].RegClass == ARM::QPRRegClassID) &&
2766 "Reg operand expected");
2768 // Qm/Dm = Inst{5:3-0} => NEON Rm
2769 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2770 decodeNEONRm(insn))));
2773 assert(OpInfo[OpIdx].RegClass < 0 && "Imm operand expected");
2775 // Add the imm operand.
2777 // VSHLL has maximum shift count as the imm, inferred from its size.
2781 Imm = decodeNVSAmt(insn, LeftShift);
2793 MI.addOperand(MCOperand::CreateImm(Imm));
2799 // Left shift instructions.
2800 static bool DisassembleN2RegVecShLFrm(MCInst &MI, unsigned Opcode,
2801 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2803 return DisassembleNVectorShift(MI, Opcode, insn, NumOps, NumOpsAdded, true,
2806 // Right shift instructions have different shift amount interpretation.
2807 static bool DisassembleN2RegVecShRFrm(MCInst &MI, unsigned Opcode,
2808 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2810 return DisassembleNVectorShift(MI, Opcode, insn, NumOps, NumOpsAdded, false,
2819 N3V_Multiply_By_Scalar
2821 } // End of unnamed namespace
2823 // NEON Three Register Instructions with Optional Immediate Operand
2825 // Vector Extract Instructions.
2826 // Qd/Dd Qn/Dn Qm/Dm imm4
2828 // Vector Shift (Register) Instructions.
2829 // Qd/Dd Qm/Dm Qn/Dn (notice the order of m, n)
2831 // Vector Multiply [Accumulate/Subtract] [Long] By Scalar Instructions.
2832 // Qd/Dd Qn/Dn RestrictedDm index
2835 static bool DisassembleNVdVnVmOptImm(MCInst &MI, unsigned Opcode, uint32_t insn,
2836 unsigned short NumOps, unsigned &NumOpsAdded, N3VFlag Flag, BO B) {
2838 const TargetInstrDesc &TID = ARMInsts[Opcode];
2839 const TargetOperandInfo *OpInfo = TID.OpInfo;
2841 // No checking for OpInfo[2] because of MOVDneon/MOVQ with only two regs.
2842 assert(NumOps >= 3 &&
2843 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2844 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2845 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2846 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
2847 "Expect >= 3 operands and first 2 as reg operands");
2849 unsigned &OpIdx = NumOpsAdded;
2853 bool VdVnVm = Flag == N3V_VectorShift ? false : true;
2854 bool IsImm4 = Flag == N3V_VectorExtract ? true : false;
2855 bool IsDmRestricted = Flag == N3V_Multiply_By_Scalar ? true : false;
2856 ElemSize esize = ESizeNA;
2857 if (Flag == N3V_Multiply_By_Scalar) {
2858 unsigned size = (insn >> 20) & 3;
2859 if (size == 1) esize = ESize16;
2860 if (size == 2) esize = ESize32;
2861 assert (esize == ESize16 || esize == ESize32);
2864 // Qd/Dd = Inst{22:15-12} => NEON Rd
2865 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2866 decodeNEONRd(insn))));
2869 // VABA, VABAL, VBSLd, VBSLq, ...
2870 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
2872 MI.addOperand(MCOperand::CreateReg(0));
2876 // Dn = Inst{7:19-16} => NEON Rn
2878 // Dm = Inst{5:3-0} => NEON Rm
2879 MI.addOperand(MCOperand::CreateReg(
2880 getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2881 VdVnVm ? decodeNEONRn(insn)
2882 : decodeNEONRm(insn))));
2885 // Special case handling for VMOVDneon and VMOVQ because they are marked as
2887 if (Opcode == ARM::VMOVDneon || Opcode == ARM::VMOVQ)
2890 // Dm = Inst{5:3-0} => NEON Rm
2892 // Dm is restricted to D0-D7 if size is 16, D0-D15 otherwise
2894 // Dn = Inst{7:19-16} => NEON Rn
2895 unsigned m = VdVnVm ? (IsDmRestricted ? decodeRestrictedDm(insn, esize)
2896 : decodeNEONRm(insn))
2897 : decodeNEONRn(insn);
2899 MI.addOperand(MCOperand::CreateReg(
2900 getRegisterEnum(B, OpInfo[OpIdx].RegClass, m)));
2903 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2904 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2905 // Add the imm operand.
2908 Imm = decodeN3VImm(insn);
2909 else if (IsDmRestricted)
2910 Imm = decodeRestrictedDmIndex(insn, esize);
2912 assert(0 && "Internal error: unreachable code!");
2916 MI.addOperand(MCOperand::CreateImm(Imm));
2923 static bool DisassembleN3RegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2924 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2926 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
2929 static bool DisassembleN3RegVecShFrm(MCInst &MI, unsigned Opcode,
2930 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2932 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
2933 N3V_VectorShift, B);
2935 static bool DisassembleNVecExtractFrm(MCInst &MI, unsigned Opcode,
2936 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2938 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
2939 N3V_VectorExtract, B);
2941 static bool DisassembleNVecMulScalarFrm(MCInst &MI, unsigned Opcode,
2942 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2944 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
2945 N3V_Multiply_By_Scalar, B);
2948 // Vector Table Lookup
2950 // VTBL1, VTBX1: Dd [Dd(TIED_TO)] Dn Dm
2951 // VTBL2, VTBX2: Dd [Dd(TIED_TO)] Dn Dn+1 Dm
2952 // VTBL3, VTBX3: Dd [Dd(TIED_TO)] Dn Dn+1 Dn+2 Dm
2953 // VTBL4, VTBX4: Dd [Dd(TIED_TO)] Dn Dn+1 Dn+2 Dn+3 Dm
2954 static bool DisassembleNVTBLFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2955 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2957 const TargetInstrDesc &TID = ARMInsts[Opcode];
2958 const TargetOperandInfo *OpInfo = TID.OpInfo;
2959 if (!OpInfo) return false;
2961 assert(NumOps >= 3 &&
2962 OpInfo[0].RegClass == ARM::DPRRegClassID &&
2963 OpInfo[1].RegClass == ARM::DPRRegClassID &&
2964 OpInfo[2].RegClass == ARM::DPRRegClassID &&
2965 "Expect >= 3 operands and first 3 as reg operands");
2967 unsigned &OpIdx = NumOpsAdded;
2971 unsigned Rn = decodeNEONRn(insn);
2973 // {Dn} encoded as len = 0b00
2974 // {Dn Dn+1} encoded as len = 0b01
2975 // {Dn Dn+1 Dn+2 } encoded as len = 0b10
2976 // {Dn Dn+1 Dn+2 Dn+3} encoded as len = 0b11
2977 unsigned Len = slice(insn, 9, 8) + 1;
2979 // Dd (the destination vector)
2980 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
2981 decodeNEONRd(insn))));
2984 // Process tied_to operand constraint.
2986 if ((Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) {
2987 MI.addOperand(MI.getOperand(Idx));
2991 // Do the <list> now.
2992 for (unsigned i = 0; i < Len; ++i) {
2993 assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::DPRRegClassID &&
2994 "Reg operand expected");
2995 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
3000 // Dm (the index vector)
3001 assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::DPRRegClassID &&
3002 "Reg operand (index vector) expected");
3003 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
3004 decodeNEONRm(insn))));
3010 // Vector Get Lane (move scalar to ARM core register) Instructions.
3011 // VGETLNi32, VGETLNs16, VGETLNs8, VGETLNu16, VGETLNu8: Rt Dn index
3012 static bool DisassembleNGetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3013 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3015 const TargetInstrDesc &TID = ARMInsts[Opcode];
3016 const TargetOperandInfo *OpInfo = TID.OpInfo;
3017 if (!OpInfo) return false;
3019 assert(TID.getNumDefs() == 1 && NumOps >= 3 &&
3020 OpInfo[0].RegClass == ARM::GPRRegClassID &&
3021 OpInfo[1].RegClass == ARM::DPRRegClassID &&
3022 OpInfo[2].RegClass < 0 &&
3023 "Expect >= 3 operands with one dst operand");
3026 Opcode == ARM::VGETLNi32 ? ESize32
3027 : ((Opcode == ARM::VGETLNs16 || Opcode == ARM::VGETLNu16) ? ESize16
3030 // Rt = Inst{15-12} => ARM Rd
3031 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
3034 // Dn = Inst{7:19-16} => NEON Rn
3035 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
3036 decodeNEONRn(insn))));
3038 MI.addOperand(MCOperand::CreateImm(decodeNVLaneOpIndex(insn, esize)));
3044 // Vector Set Lane (move ARM core register to scalar) Instructions.
3045 // VSETLNi16, VSETLNi32, VSETLNi8: Dd Dd (TIED_TO) Rt index
3046 static bool DisassembleNSetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3047 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3049 const TargetInstrDesc &TID = ARMInsts[Opcode];
3050 const TargetOperandInfo *OpInfo = TID.OpInfo;
3051 if (!OpInfo) return false;
3053 assert(TID.getNumDefs() == 1 && NumOps >= 3 &&
3054 OpInfo[0].RegClass == ARM::DPRRegClassID &&
3055 OpInfo[1].RegClass == ARM::DPRRegClassID &&
3056 TID.getOperandConstraint(1, TOI::TIED_TO) != -1 &&
3057 OpInfo[2].RegClass == ARM::GPRRegClassID &&
3058 OpInfo[3].RegClass < 0 &&
3059 "Expect >= 3 operands with one dst operand");
3062 Opcode == ARM::VSETLNi8 ? ESize8
3063 : (Opcode == ARM::VSETLNi16 ? ESize16
3066 // Dd = Inst{7:19-16} => NEON Rn
3067 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
3068 decodeNEONRn(insn))));
3071 MI.addOperand(MCOperand::CreateReg(0));
3073 // Rt = Inst{15-12} => ARM Rd
3074 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
3077 MI.addOperand(MCOperand::CreateImm(decodeNVLaneOpIndex(insn, esize)));
3083 // Vector Duplicate Instructions (from ARM core register to all elements).
3084 // VDUP8d, VDUP16d, VDUP32d, VDUP8q, VDUP16q, VDUP32q: Qd/Dd Rt
3085 static bool DisassembleNDupFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3086 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3088 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
3090 assert(NumOps >= 2 &&
3091 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
3092 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
3093 OpInfo[1].RegClass == ARM::GPRRegClassID &&
3094 "Expect >= 2 operands and first 2 as reg operand");
3096 unsigned RegClass = OpInfo[0].RegClass;
3098 // Qd/Dd = Inst{7:19-16} => NEON Rn
3099 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClass,
3100 decodeNEONRn(insn))));
3102 // Rt = Inst{15-12} => ARM Rd
3103 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
3113 static inline bool MemBarrierInstr(uint32_t insn) {
3114 unsigned op7_4 = slice(insn, 7, 4);
3115 if (slice(insn, 31, 8) == 0xf57ff0 && (op7_4 >= 4 && op7_4 <= 6))
3121 static inline bool PreLoadOpcode(unsigned Opcode) {
3123 case ARM::PLDi12: case ARM::PLDrs:
3124 case ARM::PLDWi12: case ARM::PLDWrs:
3125 case ARM::PLIi12: case ARM::PLIrs:
3132 static bool DisassemblePreLoadFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3133 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3135 // Preload Data/Instruction requires either 2 or 3 operands.
3136 // PLDi12, PLDWi12, PLIi12: addrmode_imm12
3137 // PLDrs, PLDWrs, PLIrs: ldst_so_reg
3139 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
3142 if (Opcode == ARM::PLDi12 || Opcode == ARM::PLDWi12
3143 || Opcode == ARM::PLIi12) {
3144 unsigned Imm12 = slice(insn, 11, 0);
3145 bool Negative = getUBit(insn) == 0;
3147 // A8.6.118 PLD (literal) PLDWi12 with Rn=PC is transformed to PLDi12.
3148 if (Opcode == ARM::PLDWi12 && slice(insn, 19, 16) == 0xF) {
3149 DEBUG(errs() << "Rn == '1111': PLDWi12 morphed to PLDi12\n");
3150 MI.setOpcode(ARM::PLDi12);
3153 // -0 is represented specially. All other values are as normal.
3154 int Offset = Negative ? -1 * Imm12 : Imm12;
3155 if (Imm12 == 0 && Negative)
3158 MI.addOperand(MCOperand::CreateImm(Offset));
3161 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
3164 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
3166 // Inst{6-5} encodes the shift opcode.
3167 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
3168 // Inst{11-7} encodes the imm5 shift amount.
3169 unsigned ShImm = slice(insn, 11, 7);
3171 // A8.4.1. Possible rrx or shift amount of 32...
3172 getImmShiftSE(ShOp, ShImm);
3173 MI.addOperand(MCOperand::CreateImm(
3174 ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp)));
3181 static bool DisassembleMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3182 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3184 if (MemBarrierInstr(insn)) {
3185 // DMBsy, DSBsy, and ISBsy instructions have zero operand and are taken care
3186 // of within the generic ARMBasicMCBuilder::BuildIt() method.
3188 // Inst{3-0} encodes the memory barrier option for the variants.
3189 MI.addOperand(MCOperand::CreateImm(slice(insn, 3, 0)));
3205 // SWP, SWPB: Rd Rm Rn
3206 // Delegate to DisassembleLdStExFrm()....
3207 return DisassembleLdStExFrm(MI, Opcode, insn, NumOps, NumOpsAdded, B);
3212 if (Opcode == ARM::SETEND) {
3214 MI.addOperand(MCOperand::CreateImm(slice(insn, 9, 9)));
3218 // FIXME: To enable correct asm parsing and disasm of CPS we need 3 different
3219 // opcodes which match the same real instruction. This is needed since there's
3220 // no current handling of optional arguments. Fix here when a better handling
3221 // of optional arguments is implemented.
3222 if (Opcode == ARM::CPS3p) { // M = 1
3223 // Let's reject these impossible imod values by returning false:
3226 // AsmPrinter cannot handle imod=0b00, plus (imod=0b00,M=1,iflags!=0) is an
3227 // invalid combination, so we just check for imod=0b00 here.
3228 if (slice(insn, 19, 18) == 0 || slice(insn, 19, 18) == 1)
3230 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 18))); // imod
3231 MI.addOperand(MCOperand::CreateImm(slice(insn, 8, 6))); // iflags
3232 MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0))); // mode
3236 if (Opcode == ARM::CPS2p) { // mode = 0, M = 0
3237 // Let's reject these impossible imod values by returning false:
3238 // 1. (imod=0b00,M=0)
3240 if (slice(insn, 19, 18) == 0 || slice(insn, 19, 18) == 1)
3242 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 18))); // imod
3243 MI.addOperand(MCOperand::CreateImm(slice(insn, 8, 6))); // iflags
3247 if (Opcode == ARM::CPS1p) { // imod = 0, iflags = 0, M = 1
3248 MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0))); // mode
3253 // DBG has its option specified in Inst{3-0}.
3254 if (Opcode == ARM::DBG) {
3255 MI.addOperand(MCOperand::CreateImm(slice(insn, 3, 0)));
3260 // BKPT takes an imm32 val equal to ZeroExtend(Inst{19-8:3-0}).
3261 if (Opcode == ARM::BKPT) {
3262 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 8) << 4 |
3263 slice(insn, 3, 0)));
3268 if (PreLoadOpcode(Opcode))
3269 return DisassemblePreLoadFrm(MI, Opcode, insn, NumOps, NumOpsAdded, B);
3271 assert(0 && "Unexpected misc instruction!");
3275 /// FuncPtrs - FuncPtrs maps ARMFormat to its corresponding DisassembleFP.
3276 /// We divide the disassembly task into different categories, with each one
3277 /// corresponding to a specific instruction encoding format. There could be
3278 /// exceptions when handling a specific format, and that is why the Opcode is
3279 /// also present in the function prototype.
3280 static const DisassembleFP FuncPtrs[] = {
3284 &DisassembleBrMiscFrm,
3286 &DisassembleDPSoRegFrm,
3289 &DisassembleLdMiscFrm,
3290 &DisassembleStMiscFrm,
3291 &DisassembleLdStMulFrm,
3292 &DisassembleLdStExFrm,
3293 &DisassembleArithMiscFrm,
3296 &DisassembleVFPUnaryFrm,
3297 &DisassembleVFPBinaryFrm,
3298 &DisassembleVFPConv1Frm,
3299 &DisassembleVFPConv2Frm,
3300 &DisassembleVFPConv3Frm,
3301 &DisassembleVFPConv4Frm,
3302 &DisassembleVFPConv5Frm,
3303 &DisassembleVFPLdStFrm,
3304 &DisassembleVFPLdStMulFrm,
3305 &DisassembleVFPMiscFrm,
3306 &DisassembleThumbFrm,
3307 &DisassembleMiscFrm,
3308 &DisassembleNGetLnFrm,
3309 &DisassembleNSetLnFrm,
3310 &DisassembleNDupFrm,
3312 // VLD and VST (including one lane) Instructions.
3315 // A7.4.6 One register and a modified immediate value
3316 // 1-Register Instructions with imm.
3317 // LLVM only defines VMOVv instructions.
3318 &DisassembleN1RegModImmFrm,
3320 // 2-Register Instructions with no imm.
3321 &DisassembleN2RegFrm,
3323 // 2-Register Instructions with imm (vector convert float/fixed point).
3324 &DisassembleNVCVTFrm,
3326 // 2-Register Instructions with imm (vector dup lane).
3327 &DisassembleNVecDupLnFrm,
3329 // Vector Shift Left Instructions.
3330 &DisassembleN2RegVecShLFrm,
3332 // Vector Shift Righ Instructions, which has different interpretation of the
3333 // shift amount from the imm6 field.
3334 &DisassembleN2RegVecShRFrm,
3336 // 3-Register Data-Processing Instructions.
3337 &DisassembleN3RegFrm,
3339 // Vector Shift (Register) Instructions.
3340 // D:Vd M:Vm N:Vn (notice that M:Vm is the first operand)
3341 &DisassembleN3RegVecShFrm,
3343 // Vector Extract Instructions.
3344 &DisassembleNVecExtractFrm,
3346 // Vector [Saturating Rounding Doubling] Multiply [Accumulate/Subtract] [Long]
3347 // By Scalar Instructions.
3348 &DisassembleNVecMulScalarFrm,
3350 // Vector Table Lookup uses byte indexes in a control vector to look up byte
3351 // values in a table and generate a new vector.
3352 &DisassembleNVTBLFrm,
3357 /// BuildIt - BuildIt performs the build step for this ARM Basic MC Builder.
3358 /// The general idea is to set the Opcode for the MCInst, followed by adding
3359 /// the appropriate MCOperands to the MCInst. ARM Basic MC Builder delegates
3360 /// to the Format-specific disassemble function for disassembly, followed by
3361 /// TryPredicateAndSBitModifier() to do PredicateOperand and OptionalDefOperand
3362 /// which follow the Dst/Src Operands.
3363 bool ARMBasicMCBuilder::BuildIt(MCInst &MI, uint32_t insn) {
3364 // Stage 1 sets the Opcode.
3365 MI.setOpcode(Opcode);
3366 // If the number of operands is zero, we're done!
3370 // Stage 2 calls the format-specific disassemble function to build the operand
3374 unsigned NumOpsAdded = 0;
3375 bool OK = (*Disasm)(MI, Opcode, insn, NumOps, NumOpsAdded, this);
3377 if (!OK || this->Err != 0) return false;
3378 if (NumOpsAdded >= NumOps)
3381 // Stage 3 deals with operands unaccounted for after stage 2 is finished.
3382 // FIXME: Should this be done selectively?
3383 return TryPredicateAndSBitModifier(MI, Opcode, insn, NumOps - NumOpsAdded);
3386 // A8.3 Conditional execution
3387 // A8.3.1 Pseudocode details of conditional execution
3388 // Condition bits '111x' indicate the instruction is always executed.
3389 static uint32_t CondCode(uint32_t CondField) {
3390 if (CondField == 0xF)
3395 /// DoPredicateOperands - DoPredicateOperands process the predicate operands
3396 /// of some Thumb instructions which come before the reglist operands. It
3397 /// returns true if the two predicate operands have been processed.
3398 bool ARMBasicMCBuilder::DoPredicateOperands(MCInst& MI, unsigned Opcode,
3399 uint32_t /* insn */, unsigned short NumOpsRemaining) {
3401 assert(NumOpsRemaining > 0 && "Invalid argument");
3403 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
3404 unsigned Idx = MI.getNumOperands();
3406 // First, we check whether this instr specifies the PredicateOperand through
3407 // a pair of TargetOperandInfos with isPredicate() property.
3408 if (NumOpsRemaining >= 2 &&
3409 OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
3410 OpInfo[Idx].RegClass < 0 &&
3411 OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
3413 // If we are inside an IT block, get the IT condition bits maintained via
3414 // ARMBasicMCBuilder::ITState[7:0], through ARMBasicMCBuilder::GetITCond().
3417 MI.addOperand(MCOperand::CreateImm(GetITCond()));
3419 MI.addOperand(MCOperand::CreateImm(ARMCC::AL));
3420 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
3427 /// TryPredicateAndSBitModifier - TryPredicateAndSBitModifier tries to process
3428 /// the possible Predicate and SBitModifier, to build the remaining MCOperand
3430 bool ARMBasicMCBuilder::TryPredicateAndSBitModifier(MCInst& MI, unsigned Opcode,
3431 uint32_t insn, unsigned short NumOpsRemaining) {
3433 assert(NumOpsRemaining > 0 && "Invalid argument");
3435 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
3436 const std::string &Name = ARMInsts[Opcode].Name;
3437 unsigned Idx = MI.getNumOperands();
3438 uint64_t TSFlags = ARMInsts[Opcode].TSFlags;
3440 // First, we check whether this instr specifies the PredicateOperand through
3441 // a pair of TargetOperandInfos with isPredicate() property.
3442 if (NumOpsRemaining >= 2 &&
3443 OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
3444 OpInfo[Idx].RegClass < 0 &&
3445 OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
3447 // If we are inside an IT block, get the IT condition bits maintained via
3448 // ARMBasicMCBuilder::ITState[7:0], through ARMBasicMCBuilder::GetITCond().
3451 MI.addOperand(MCOperand::CreateImm(GetITCond()));
3453 if (Name.length() > 1 && Name[0] == 't') {
3454 // Thumb conditional branch instructions have their cond field embedded,
3458 if (Name == "t2Bcc")
3459 MI.addOperand(MCOperand::CreateImm(CondCode(slice(insn, 25, 22))));
3460 else if (Name == "tBcc")
3461 MI.addOperand(MCOperand::CreateImm(CondCode(slice(insn, 11, 8))));
3463 MI.addOperand(MCOperand::CreateImm(ARMCC::AL));
3465 // ARM instructions get their condition field from Inst{31-28}.
3466 // We should reject Inst{31-28} = 0b1111 as invalid encoding.
3467 if (!isNEONDomain(TSFlags) && getCondField(insn) == 0xF)
3469 MI.addOperand(MCOperand::CreateImm(CondCode(getCondField(insn))));
3472 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
3474 NumOpsRemaining -= 2;
3477 if (NumOpsRemaining == 0)
3480 // Next, if OptionalDefOperand exists, we check whether the 'S' bit is set.
3481 if (OpInfo[Idx].isOptionalDef() && OpInfo[Idx].RegClass==ARM::CCRRegClassID) {
3482 MI.addOperand(MCOperand::CreateReg(getSBit(insn) == 1 ? ARM::CPSR : 0));
3486 if (NumOpsRemaining == 0)
3492 /// RunBuildAfterHook - RunBuildAfterHook performs operations deemed necessary
3493 /// after BuildIt is finished.
3494 bool ARMBasicMCBuilder::RunBuildAfterHook(bool Status, MCInst &MI,
3497 if (!SP) return Status;
3499 if (Opcode == ARM::t2IT)
3500 Status = SP->InitIT(slice(insn, 7, 0)) ? Status : false;
3501 else if (InITBlock())
3507 /// Opcode, Format, and NumOperands make up an ARM Basic MCBuilder.
3508 ARMBasicMCBuilder::ARMBasicMCBuilder(unsigned opc, ARMFormat format,
3510 : Opcode(opc), Format(format), NumOps(num), SP(0), Err(0) {
3511 unsigned Idx = (unsigned)format;
3512 assert(Idx < (array_lengthof(FuncPtrs) - 1) && "Unknown format");
3513 Disasm = FuncPtrs[Idx];
3516 /// CreateMCBuilder - Return an ARMBasicMCBuilder that can build up the MC
3517 /// infrastructure of an MCInst given the Opcode and Format of the instr.
3518 /// Return NULL if it fails to create/return a proper builder. API clients
3519 /// are responsible for freeing up of the allocated memory. Cacheing can be
3520 /// performed by the API clients to improve performance.
3521 ARMBasicMCBuilder *llvm::CreateMCBuilder(unsigned Opcode, ARMFormat Format) {
3522 // For "Unknown format", fail by returning a NULL pointer.
3523 if ((unsigned)Format >= (array_lengthof(FuncPtrs) - 1)) {
3524 DEBUG(errs() << "Unknown format\n");
3528 return new ARMBasicMCBuilder(Opcode, Format,
3529 ARMInsts[Opcode].getNumOperands());