1 //===- ARMDisassemblerCore.cpp - ARM disassembler helpers -------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is part of the ARM Disassembler.
11 // It contains code to represent the core concepts of Builder and DisassembleFP
12 // to solve the problem of disassembling an ARM instr.
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "arm-disassembler"
18 #include "ARMDisassemblerCore.h"
19 #include "ARMAddressingModes.h"
20 #include "llvm/Support/Debug.h"
21 #include "llvm/Support/raw_ostream.h"
23 //#define DEBUG(X) do { X; } while (0)
25 /// ARMGenInstrInfo.inc - ARMGenInstrInfo.inc contains the static const
26 /// TargetInstrDesc ARMInsts[] definition and the TargetOperandInfo[]'s
27 /// describing the operand info for each ARMInsts[i].
29 /// Together with an instruction's encoding format, we can take advantage of the
30 /// NumOperands and the OpInfo fields of the target instruction description in
31 /// the quest to build out the MCOperand list for an MCInst.
33 /// The general guideline is that with a known format, the number of dst and src
34 /// operands are well-known. The dst is built first, followed by the src
35 /// operand(s). The operands not yet used at this point are for the Implicit
36 /// Uses and Defs by this instr. For the Uses part, the pred:$p operand is
37 /// defined with two components:
39 /// def pred { // Operand PredicateOperand
40 /// ValueType Type = OtherVT;
41 /// string PrintMethod = "printPredicateOperand";
42 /// string AsmOperandLowerMethod = ?;
43 /// dag MIOperandInfo = (ops i32imm, CCR);
44 /// AsmOperandClass ParserMatchClass = ImmAsmOperand;
45 /// dag DefaultOps = (ops (i32 14), (i32 zero_reg));
48 /// which is manifested by the TargetOperandInfo[] of:
50 /// { 0, 0|(1<<TOI::Predicate), 0 },
51 /// { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }
53 /// So the first predicate MCOperand corresponds to the immediate part of the
54 /// ARM condition field (Inst{31-28}), and the second predicate MCOperand
55 /// corresponds to a register kind of ARM::CPSR.
57 /// For the Defs part, in the simple case of only cc_out:$s, we have:
59 /// def cc_out { // Operand OptionalDefOperand
60 /// ValueType Type = OtherVT;
61 /// string PrintMethod = "printSBitModifierOperand";
62 /// string AsmOperandLowerMethod = ?;
63 /// dag MIOperandInfo = (ops CCR);
64 /// AsmOperandClass ParserMatchClass = ImmAsmOperand;
65 /// dag DefaultOps = (ops (i32 zero_reg));
68 /// which is manifested by the one TargetOperandInfo of:
70 /// { ARM::CCRRegClassID, 0|(1<<TOI::OptionalDef), 0 }
72 /// And this maps to one MCOperand with the regsiter kind of ARM::CPSR.
73 #include "ARMGenInstrInfo.inc"
77 const char *ARMUtils::OpcodeName(unsigned Opcode) {
78 return ARMInsts[Opcode].Name;
81 // Return the register enum Based on RegClass and the raw register number.
82 // For DRegPair, see comments below.
84 static unsigned getRegisterEnum(BO B, unsigned RegClassID, unsigned RawRegister,
85 bool DRegPair = false) {
87 if (DRegPair && RegClassID == ARM::QPRRegClassID) {
88 // LLVM expects { Dd, Dd+1 } to form a super register; this is not specified
89 // in the ARM Architecture Manual as far as I understand it (A8.6.307).
90 // Therefore, we morph the RegClassID to be the sub register class and don't
91 // subsequently transform the RawRegister encoding when calculating RegNum.
93 // See also ARMinstPrinter::printOperand() wrt "dregpair" modifier part
94 // where this workaround is meant for.
95 RegClassID = ARM::DPRRegClassID;
98 // For this purpose, we can treat rGPR as if it were GPR.
99 if (RegClassID == ARM::rGPRRegClassID) RegClassID = ARM::GPRRegClassID;
101 // See also decodeNEONRd(), decodeNEONRn(), decodeNEONRm().
103 RegClassID == ARM::QPRRegClassID ? RawRegister >> 1 : RawRegister;
109 switch (RegClassID) {
110 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R0;
111 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
112 case ARM::DPR_VFP2RegClassID:
114 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
115 case ARM::QPR_VFP2RegClassID:
117 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S0;
121 switch (RegClassID) {
122 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R1;
123 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
124 case ARM::DPR_VFP2RegClassID:
126 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
127 case ARM::QPR_VFP2RegClassID:
129 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S1;
133 switch (RegClassID) {
134 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R2;
135 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
136 case ARM::DPR_VFP2RegClassID:
138 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
139 case ARM::QPR_VFP2RegClassID:
141 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S2;
145 switch (RegClassID) {
146 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R3;
147 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
148 case ARM::DPR_VFP2RegClassID:
150 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
151 case ARM::QPR_VFP2RegClassID:
153 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S3;
157 switch (RegClassID) {
158 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R4;
159 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
160 case ARM::DPR_VFP2RegClassID:
162 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q4;
163 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S4;
167 switch (RegClassID) {
168 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R5;
169 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
170 case ARM::DPR_VFP2RegClassID:
172 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q5;
173 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S5;
177 switch (RegClassID) {
178 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R6;
179 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
180 case ARM::DPR_VFP2RegClassID:
182 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q6;
183 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S6;
187 switch (RegClassID) {
188 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R7;
189 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
190 case ARM::DPR_VFP2RegClassID:
192 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q7;
193 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S7;
197 switch (RegClassID) {
198 case ARM::GPRRegClassID: return ARM::R8;
199 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D8;
200 case ARM::QPRRegClassID: return ARM::Q8;
201 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S8;
205 switch (RegClassID) {
206 case ARM::GPRRegClassID: return ARM::R9;
207 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D9;
208 case ARM::QPRRegClassID: return ARM::Q9;
209 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S9;
213 switch (RegClassID) {
214 case ARM::GPRRegClassID: return ARM::R10;
215 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D10;
216 case ARM::QPRRegClassID: return ARM::Q10;
217 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S10;
221 switch (RegClassID) {
222 case ARM::GPRRegClassID: return ARM::R11;
223 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D11;
224 case ARM::QPRRegClassID: return ARM::Q11;
225 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S11;
229 switch (RegClassID) {
230 case ARM::GPRRegClassID: return ARM::R12;
231 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D12;
232 case ARM::QPRRegClassID: return ARM::Q12;
233 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S12;
237 switch (RegClassID) {
238 case ARM::GPRRegClassID: return ARM::SP;
239 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D13;
240 case ARM::QPRRegClassID: return ARM::Q13;
241 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S13;
245 switch (RegClassID) {
246 case ARM::GPRRegClassID: return ARM::LR;
247 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D14;
248 case ARM::QPRRegClassID: return ARM::Q14;
249 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S14;
253 switch (RegClassID) {
254 case ARM::GPRRegClassID: return ARM::PC;
255 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D15;
256 case ARM::QPRRegClassID: return ARM::Q15;
257 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S15;
261 switch (RegClassID) {
262 case ARM::DPRRegClassID: return ARM::D16;
263 case ARM::SPRRegClassID: return ARM::S16;
267 switch (RegClassID) {
268 case ARM::DPRRegClassID: return ARM::D17;
269 case ARM::SPRRegClassID: return ARM::S17;
273 switch (RegClassID) {
274 case ARM::DPRRegClassID: return ARM::D18;
275 case ARM::SPRRegClassID: return ARM::S18;
279 switch (RegClassID) {
280 case ARM::DPRRegClassID: return ARM::D19;
281 case ARM::SPRRegClassID: return ARM::S19;
285 switch (RegClassID) {
286 case ARM::DPRRegClassID: return ARM::D20;
287 case ARM::SPRRegClassID: return ARM::S20;
291 switch (RegClassID) {
292 case ARM::DPRRegClassID: return ARM::D21;
293 case ARM::SPRRegClassID: return ARM::S21;
297 switch (RegClassID) {
298 case ARM::DPRRegClassID: return ARM::D22;
299 case ARM::SPRRegClassID: return ARM::S22;
303 switch (RegClassID) {
304 case ARM::DPRRegClassID: return ARM::D23;
305 case ARM::SPRRegClassID: return ARM::S23;
309 switch (RegClassID) {
310 case ARM::DPRRegClassID: return ARM::D24;
311 case ARM::SPRRegClassID: return ARM::S24;
315 switch (RegClassID) {
316 case ARM::DPRRegClassID: return ARM::D25;
317 case ARM::SPRRegClassID: return ARM::S25;
321 switch (RegClassID) {
322 case ARM::DPRRegClassID: return ARM::D26;
323 case ARM::SPRRegClassID: return ARM::S26;
327 switch (RegClassID) {
328 case ARM::DPRRegClassID: return ARM::D27;
329 case ARM::SPRRegClassID: return ARM::S27;
333 switch (RegClassID) {
334 case ARM::DPRRegClassID: return ARM::D28;
335 case ARM::SPRRegClassID: return ARM::S28;
339 switch (RegClassID) {
340 case ARM::DPRRegClassID: return ARM::D29;
341 case ARM::SPRRegClassID: return ARM::S29;
345 switch (RegClassID) {
346 case ARM::DPRRegClassID: return ARM::D30;
347 case ARM::SPRRegClassID: return ARM::S30;
351 switch (RegClassID) {
352 case ARM::DPRRegClassID: return ARM::D31;
353 case ARM::SPRRegClassID: return ARM::S31;
357 DEBUG(errs() << "Invalid (RegClassID, RawRegister) combination\n");
358 // Encoding error. Mark the builder with error code != 0.
363 ///////////////////////////////
365 // Utility Functions //
367 ///////////////////////////////
369 // Extract/Decode Rd: Inst{15-12}.
370 static inline unsigned decodeRd(uint32_t insn) {
371 return (insn >> ARMII::RegRdShift) & ARMII::GPRRegMask;
374 // Extract/Decode Rn: Inst{19-16}.
375 static inline unsigned decodeRn(uint32_t insn) {
376 return (insn >> ARMII::RegRnShift) & ARMII::GPRRegMask;
379 // Extract/Decode Rm: Inst{3-0}.
380 static inline unsigned decodeRm(uint32_t insn) {
381 return (insn & ARMII::GPRRegMask);
384 // Extract/Decode Rs: Inst{11-8}.
385 static inline unsigned decodeRs(uint32_t insn) {
386 return (insn >> ARMII::RegRsShift) & ARMII::GPRRegMask;
389 static inline unsigned getCondField(uint32_t insn) {
390 return (insn >> ARMII::CondShift);
393 static inline unsigned getIBit(uint32_t insn) {
394 return (insn >> ARMII::I_BitShift) & 1;
397 static inline unsigned getAM3IBit(uint32_t insn) {
398 return (insn >> ARMII::AM3_I_BitShift) & 1;
401 static inline unsigned getPBit(uint32_t insn) {
402 return (insn >> ARMII::P_BitShift) & 1;
405 static inline unsigned getUBit(uint32_t insn) {
406 return (insn >> ARMII::U_BitShift) & 1;
409 static inline unsigned getPUBits(uint32_t insn) {
410 return (insn >> ARMII::U_BitShift) & 3;
413 static inline unsigned getSBit(uint32_t insn) {
414 return (insn >> ARMII::S_BitShift) & 1;
417 static inline unsigned getWBit(uint32_t insn) {
418 return (insn >> ARMII::W_BitShift) & 1;
421 static inline unsigned getDBit(uint32_t insn) {
422 return (insn >> ARMII::D_BitShift) & 1;
425 static inline unsigned getNBit(uint32_t insn) {
426 return (insn >> ARMII::N_BitShift) & 1;
429 static inline unsigned getMBit(uint32_t insn) {
430 return (insn >> ARMII::M_BitShift) & 1;
433 // See A8.4 Shifts applied to a register.
434 // A8.4.2 Register controlled shifts.
436 // getShiftOpcForBits - getShiftOpcForBits translates from the ARM encoding bits
437 // into llvm enums for shift opcode. The API clients should pass in the value
438 // encoded with two bits, so the assert stays to signal a wrong API usage.
440 // A8-12: DecodeRegShift()
441 static inline ARM_AM::ShiftOpc getShiftOpcForBits(unsigned bits) {
443 default: assert(0 && "No such value"); return ARM_AM::no_shift;
444 case 0: return ARM_AM::lsl;
445 case 1: return ARM_AM::lsr;
446 case 2: return ARM_AM::asr;
447 case 3: return ARM_AM::ror;
451 // See A8.4 Shifts applied to a register.
452 // A8.4.1 Constant shifts.
454 // getImmShiftSE - getImmShiftSE translates from the raw ShiftOpc and raw Imm5
455 // encodings into the intended ShiftOpc and shift amount.
457 // A8-11: DecodeImmShift()
458 static inline void getImmShiftSE(ARM_AM::ShiftOpc &ShOp, unsigned &ShImm) {
459 // If type == 0b11 and imm5 == 0, we have an rrx, instead.
460 if (ShOp == ARM_AM::ror && ShImm == 0)
462 // If (lsr or asr) and imm5 == 0, shift amount is 32.
463 if ((ShOp == ARM_AM::lsr || ShOp == ARM_AM::asr) && ShImm == 0)
467 // getAMSubModeForBits - getAMSubModeForBits translates from the ARM encoding
468 // bits Inst{24-23} (P(24) and U(23)) into llvm enums for AMSubMode. The API
469 // clients should pass in the value encoded with two bits, so the assert stays
470 // to signal a wrong API usage.
471 static inline ARM_AM::AMSubMode getAMSubModeForBits(unsigned bits) {
473 default: assert(0 && "No such value"); return ARM_AM::bad_am_submode;
474 case 1: return ARM_AM::ia; // P=0 U=1
475 case 3: return ARM_AM::ib; // P=1 U=1
476 case 0: return ARM_AM::da; // P=0 U=0
477 case 2: return ARM_AM::db; // P=1 U=0
481 ////////////////////////////////////////////
483 // Disassemble function definitions //
485 ////////////////////////////////////////////
487 /// There is a separate Disassemble*Frm function entry for disassembly of an ARM
488 /// instr into a list of MCOperands in the appropriate order, with possible dst,
489 /// followed by possible src(s).
491 /// The processing of the predicate, and the 'S' modifier bit, if MI modifies
492 /// the CPSR, is factored into ARMBasicMCBuilder's method named
493 /// TryPredicateAndSBitModifier.
495 static bool DisassemblePseudo(MCInst &MI, unsigned Opcode, uint32_t insn,
496 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
498 assert(0 && "Unexpected pseudo instruction!");
502 // Multiply Instructions.
503 // MLA, MLS, SMLABB, SMLABT, SMLATB, SMLATT, SMLAWB, SMLAWT, SMMLA, SMMLS:
504 // Rd{19-16} Rn{3-0} Rm{11-8} Ra{15-12}
506 // MUL, SMMUL, SMULBB, SMULBT, SMULTB, SMULTT, SMULWB, SMULWT:
507 // Rd{19-16} Rn{3-0} Rm{11-8}
509 // SMLAL, SMULL, UMAAL, UMLAL, UMULL, SMLALBB, SMLALBT, SMLALTB, SMLALTT:
510 // RdLo{15-12} RdHi{19-16} Rn{3-0} Rm{11-8}
512 // The mapping of the multiply registers to the "regular" ARM registers, where
513 // there are convenience decoder functions, is:
519 static bool DisassembleMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
520 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
522 const TargetInstrDesc &TID = ARMInsts[Opcode];
523 unsigned short NumDefs = TID.getNumDefs();
524 const TargetOperandInfo *OpInfo = TID.OpInfo;
525 unsigned &OpIdx = NumOpsAdded;
529 assert(NumDefs > 0 && "NumDefs should be greater than 0 for MulFrm");
531 && OpInfo[0].RegClass == ARM::GPRRegClassID
532 && OpInfo[1].RegClass == ARM::GPRRegClassID
533 && OpInfo[2].RegClass == ARM::GPRRegClassID
534 && "Expect three register operands");
536 // Instructions with two destination registers have RdLo{15-12} first.
538 assert(NumOps >= 4 && OpInfo[3].RegClass == ARM::GPRRegClassID &&
539 "Expect 4th register operand");
540 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
545 // The destination register: RdHi{19-16} or Rd{19-16}.
546 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
549 // The two src regsiters: Rn{3-0}, then Rm{11-8}.
550 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
552 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
556 // Many multiply instructions (e.g., MLA) have three src registers.
557 // The third register operand is Ra{15-12}.
558 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) {
559 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
567 // Helper routines for disassembly of coprocessor instructions.
569 static bool LdStCopOpcode(unsigned Opcode) {
570 if ((Opcode >= ARM::LDC2L_OFFSET && Opcode <= ARM::LDC_PRE) ||
571 (Opcode >= ARM::STC2L_OFFSET && Opcode <= ARM::STC_PRE))
575 static bool CoprocessorOpcode(unsigned Opcode) {
576 if (LdStCopOpcode(Opcode))
582 case ARM::CDP: case ARM::CDP2:
583 case ARM::MCR: case ARM::MCR2: case ARM::MRC: case ARM::MRC2:
584 case ARM::MCRR: case ARM::MCRR2: case ARM::MRRC: case ARM::MRRC2:
588 static inline unsigned GetCoprocessor(uint32_t insn) {
589 return slice(insn, 11, 8);
591 static inline unsigned GetCopOpc1(uint32_t insn, bool CDP) {
592 return CDP ? slice(insn, 23, 20) : slice(insn, 23, 21);
594 static inline unsigned GetCopOpc2(uint32_t insn) {
595 return slice(insn, 7, 5);
597 static inline unsigned GetCopOpc(uint32_t insn) {
598 return slice(insn, 7, 4);
600 // Most of the operands are in immediate forms, except Rd and Rn, which are ARM
603 // CDP, CDP2: cop opc1 CRd CRn CRm opc2
605 // MCR, MCR2, MRC, MRC2: cop opc1 Rd CRn CRm opc2
607 // MCRR, MCRR2, MRRC, MRRc2: cop opc Rd Rn CRm
609 // LDC_OFFSET, LDC_PRE, LDC_POST: cop CRd Rn R0 [+/-]imm8:00
611 // STC_OFFSET, STC_PRE, STC_POST: cop CRd Rn R0 [+/-]imm8:00
615 // LDC_OPTION: cop CRd Rn imm8
617 // STC_OPTION: cop CRd Rn imm8
620 static bool DisassembleCoprocessor(MCInst &MI, unsigned Opcode, uint32_t insn,
621 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
623 assert(NumOps >= 5 && "Num of operands >= 5 for coprocessor instr");
625 unsigned &OpIdx = NumOpsAdded;
626 bool OneCopOpc = (Opcode == ARM::MCRR || Opcode == ARM::MCRR2 ||
627 Opcode == ARM::MRRC || Opcode == ARM::MRRC2);
628 // CDP/CDP2 has no GPR operand; the opc1 operand is also wider (Inst{23-20}).
629 bool NoGPR = (Opcode == ARM::CDP || Opcode == ARM::CDP2);
630 bool LdStCop = LdStCopOpcode(Opcode);
634 MI.addOperand(MCOperand::CreateImm(GetCoprocessor(insn)));
637 // Unindex if P:W = 0b00 --> _OPTION variant
638 unsigned PW = getPBit(insn) << 1 | getWBit(insn);
640 MI.addOperand(MCOperand::CreateImm(decodeRd(insn)));
642 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
646 MI.addOperand(MCOperand::CreateReg(0));
647 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
648 unsigned Offset = ARM_AM::getAM2Opc(AddrOpcode, slice(insn, 7, 0) << 2,
650 MI.addOperand(MCOperand::CreateImm(Offset));
653 MI.addOperand(MCOperand::CreateImm(slice(insn, 7, 0)));
657 MI.addOperand(MCOperand::CreateImm(OneCopOpc ? GetCopOpc(insn)
658 : GetCopOpc1(insn, NoGPR)));
660 MI.addOperand(NoGPR ? MCOperand::CreateImm(decodeRd(insn))
661 : MCOperand::CreateReg(
662 getRegisterEnum(B, ARM::GPRRegClassID,
665 MI.addOperand(OneCopOpc ? MCOperand::CreateReg(
666 getRegisterEnum(B, ARM::GPRRegClassID,
668 : MCOperand::CreateImm(decodeRn(insn)));
670 MI.addOperand(MCOperand::CreateImm(decodeRm(insn)));
675 MI.addOperand(MCOperand::CreateImm(GetCopOpc2(insn)));
683 // Branch Instructions.
684 // BLr9: SignExtend(Imm24:'00', 32)
685 // Bcc, BLr9_pred: SignExtend(Imm24:'00', 32) Pred0 Pred1
686 // SMC: ZeroExtend(imm4, 32)
687 // SVC: ZeroExtend(Imm24, 32)
689 // Various coprocessor instructions are assigned BrFrm arbitrarily.
690 // Delegates to DisassembleCoprocessor() helper function.
693 // MSR/MSRsys: Rm mask=Inst{19-16}
695 // MSRi/MSRsysi: so_imm
696 // SRSW/SRS: addrmode4:$addr mode_imm
697 // RFEW/RFE: addrmode4:$addr Rn
698 static bool DisassembleBrFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
699 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
701 if (CoprocessorOpcode(Opcode))
702 return DisassembleCoprocessor(MI, Opcode, insn, NumOps, NumOpsAdded, B);
704 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
705 if (!OpInfo) return false;
707 // MRS and MRSsys take one GPR reg Rd.
708 if (Opcode == ARM::MRS || Opcode == ARM::MRSsys) {
709 assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
710 "Reg operand expected");
711 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
716 // BXJ takes one GPR reg Rm.
717 if (Opcode == ARM::BXJ) {
718 assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
719 "Reg operand expected");
720 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
725 // MSR and MSRsys take one GPR reg Rm, followed by the mask.
726 if (Opcode == ARM::MSR || Opcode == ARM::MSRsys) {
727 assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
728 "Reg operand expected");
729 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
731 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 16)));
735 // MSRi and MSRsysi take one so_imm operand, followed by the mask.
736 if (Opcode == ARM::MSRi || Opcode == ARM::MSRsysi) {
737 // SOImm is 4-bit rotate amount in bits 11-8 with 8-bit imm in bits 7-0.
738 // A5.2.4 Rotate amount is twice the numeric value of Inst{11-8}.
739 // See also ARMAddressingModes.h: getSOImmValImm() and getSOImmValRot().
740 unsigned Rot = (insn >> ARMII::SoRotImmShift) & 0xF;
741 unsigned Imm = insn & 0xFF;
742 MI.addOperand(MCOperand::CreateImm(ARM_AM::rotr32(Imm, 2*Rot)));
743 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 16)));
747 // SRSW and SRS requires addrmode4:$addr for ${addr:submode}, followed by the
748 // mode immediate (Inst{4-0}).
749 if (Opcode == ARM::SRSW || Opcode == ARM::SRS ||
750 Opcode == ARM::RFEW || Opcode == ARM::RFE) {
751 // ARMInstPrinter::printAddrMode4Operand() prints special mode string
752 // if the base register is SP; so don't set ARM::SP.
753 MI.addOperand(MCOperand::CreateReg(0));
754 ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
755 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(SubMode)));
757 if (Opcode == ARM::SRSW || Opcode == ARM::SRS)
758 MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0)));
760 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
766 assert((Opcode == ARM::Bcc || Opcode == ARM::BLr9 || Opcode == ARM::BLr9_pred
767 || Opcode == ARM::SMC || Opcode == ARM::SVC) &&
768 "Unexpected Opcode");
770 assert(NumOps >= 1 && OpInfo[0].RegClass < 0 && "Reg operand expected");
773 if (Opcode == ARM::SMC) {
774 // ZeroExtend(imm4, 32) where imm24 = Inst{3-0}.
775 Imm32 = slice(insn, 3, 0);
776 } else if (Opcode == ARM::SVC) {
777 // ZeroExtend(imm24, 32) where imm24 = Inst{23-0}.
778 Imm32 = slice(insn, 23, 0);
780 // SignExtend(imm24:'00', 32) where imm24 = Inst{23-0}.
781 unsigned Imm26 = slice(insn, 23, 0) << 2;
782 //Imm32 = signextend<signed int, 26>(Imm26);
783 Imm32 = SignExtend32<26>(Imm26);
785 // When executing an ARM instruction, PC reads as the address of the current
786 // instruction plus 8. The assembler subtracts 8 from the difference
787 // between the branch instruction and the target address, disassembler has
788 // to add 8 to compensate.
792 MI.addOperand(MCOperand::CreateImm(Imm32));
798 // Misc. Branch Instructions.
799 // BR_JTadd, BR_JTr, BR_JTm
802 static bool DisassembleBrMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
803 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
805 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
806 if (!OpInfo) return false;
808 unsigned &OpIdx = NumOpsAdded;
812 // BX_RET has only two predicate operands, do an early return.
813 if (Opcode == ARM::BX_RET)
816 // BLXr9 and BRIND take one GPR reg.
817 if (Opcode == ARM::BLXr9 || Opcode == ARM::BRIND) {
818 assert(NumOps >= 1 && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
819 "Reg operand expected");
820 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
826 // BR_JTadd is an ADD with Rd = PC, (Rn, Rm) as the target and index regs.
827 if (Opcode == ARM::BR_JTadd) {
828 // InOperandList with GPR:$target and GPR:$idx regs.
830 assert(NumOps == 4 && "Expect 4 operands");
831 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
833 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
836 // Fill in the two remaining imm operands to signify build completion.
837 MI.addOperand(MCOperand::CreateImm(0));
838 MI.addOperand(MCOperand::CreateImm(0));
844 // BR_JTr is a MOV with Rd = PC, and Rm as the source register.
845 if (Opcode == ARM::BR_JTr) {
846 // InOperandList with GPR::$target reg.
848 assert(NumOps == 3 && "Expect 3 operands");
849 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
852 // Fill in the two remaining imm operands to signify build completion.
853 MI.addOperand(MCOperand::CreateImm(0));
854 MI.addOperand(MCOperand::CreateImm(0));
860 // BR_JTm is an LDR with Rt = PC.
861 if (Opcode == ARM::BR_JTm) {
862 // This is the reg/reg form, with base reg followed by +/- reg shop imm.
863 // See also ARMAddressingModes.h (Addressing Mode #2).
865 assert(NumOps == 5 && getIBit(insn) == 1 && "Expect 5 operands && I-bit=1");
866 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
869 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
871 // Disassemble the offset reg (Rm), shift type, and immediate shift length.
872 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
874 // Inst{6-5} encodes the shift opcode.
875 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
876 // Inst{11-7} encodes the imm5 shift amount.
877 unsigned ShImm = slice(insn, 11, 7);
879 // A8.4.1. Possible rrx or shift amount of 32...
880 getImmShiftSE(ShOp, ShImm);
881 MI.addOperand(MCOperand::CreateImm(
882 ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp)));
884 // Fill in the two remaining imm operands to signify build completion.
885 MI.addOperand(MCOperand::CreateImm(0));
886 MI.addOperand(MCOperand::CreateImm(0));
895 static inline bool getBFCInvMask(uint32_t insn, uint32_t &mask) {
896 uint32_t lsb = slice(insn, 11, 7);
897 uint32_t msb = slice(insn, 20, 16);
900 DEBUG(errs() << "Encoding error: msb < lsb\n");
904 for (uint32_t i = lsb; i <= msb; ++i)
910 // A major complication is the fact that some of the saturating add/subtract
911 // operations have Rd Rm Rn, instead of the "normal" Rd Rn Rm.
912 // They are QADD, QDADD, QDSUB, and QSUB.
913 static bool DisassembleDPFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
914 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
916 const TargetInstrDesc &TID = ARMInsts[Opcode];
917 unsigned short NumDefs = TID.getNumDefs();
918 bool isUnary = isUnaryDP(TID.TSFlags);
919 const TargetOperandInfo *OpInfo = TID.OpInfo;
920 unsigned &OpIdx = NumOpsAdded;
924 // Disassemble register def if there is one.
925 if (NumDefs && (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID)) {
926 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
931 // Now disassemble the src operands.
935 // Special-case handling of BFC/BFI/SBFX/UBFX.
936 if (Opcode == ARM::BFC || Opcode == ARM::BFI) {
937 MI.addOperand(MCOperand::CreateReg(0));
938 if (Opcode == ARM::BFI) {
939 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
944 if (!getBFCInvMask(insn, mask))
947 MI.addOperand(MCOperand::CreateImm(mask));
951 if (Opcode == ARM::SBFX || Opcode == ARM::UBFX) {
952 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
954 MI.addOperand(MCOperand::CreateImm(slice(insn, 11, 7)));
955 MI.addOperand(MCOperand::CreateImm(slice(insn, 20, 16) + 1));
960 bool RmRn = (Opcode == ARM::QADD || Opcode == ARM::QDADD ||
961 Opcode == ARM::QDSUB || Opcode == ARM::QSUB);
963 // BinaryDP has an Rn operand.
965 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
966 "Reg operand expected");
967 MI.addOperand(MCOperand::CreateReg(
968 getRegisterEnum(B, ARM::GPRRegClassID,
969 RmRn ? decodeRm(insn) : decodeRn(insn))));
973 // If this is a two-address operand, skip it, e.g., MOVCCr operand 1.
974 if (isUnary && (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)) {
975 MI.addOperand(MCOperand::CreateReg(0));
979 // Now disassemble operand 2.
983 if (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) {
984 // We have a reg/reg form.
985 // Assert disabled because saturating operations, e.g., A8.6.127 QASX, are
986 // routed here as well.
987 // assert(getIBit(insn) == 0 && "I_Bit != '0' reg/reg form");
988 MI.addOperand(MCOperand::CreateReg(
989 getRegisterEnum(B, ARM::GPRRegClassID,
990 RmRn? decodeRn(insn) : decodeRm(insn))));
992 } else if (Opcode == ARM::MOVi16 || Opcode == ARM::MOVTi16) {
993 // We have an imm16 = imm4:imm12 (imm4=Inst{19:16}, imm12 = Inst{11:0}).
994 assert(getIBit(insn) == 1 && "I_Bit != '1' reg/imm form");
995 unsigned Imm16 = slice(insn, 19, 16) << 12 | slice(insn, 11, 0);
996 MI.addOperand(MCOperand::CreateImm(Imm16));
999 // We have a reg/imm form.
1000 // SOImm is 4-bit rotate amount in bits 11-8 with 8-bit imm in bits 7-0.
1001 // A5.2.4 Rotate amount is twice the numeric value of Inst{11-8}.
1002 // See also ARMAddressingModes.h: getSOImmValImm() and getSOImmValRot().
1003 assert(getIBit(insn) == 1 && "I_Bit != '1' reg/imm form");
1004 unsigned Rot = (insn >> ARMII::SoRotImmShift) & 0xF;
1005 unsigned Imm = insn & 0xFF;
1006 MI.addOperand(MCOperand::CreateImm(ARM_AM::rotr32(Imm, 2*Rot)));
1013 static bool DisassembleDPSoRegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1014 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1016 const TargetInstrDesc &TID = ARMInsts[Opcode];
1017 unsigned short NumDefs = TID.getNumDefs();
1018 bool isUnary = isUnaryDP(TID.TSFlags);
1019 const TargetOperandInfo *OpInfo = TID.OpInfo;
1020 unsigned &OpIdx = NumOpsAdded;
1024 // Disassemble register def if there is one.
1025 if (NumDefs && (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID)) {
1026 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1031 // Disassemble the src operands.
1032 if (OpIdx >= NumOps)
1035 // BinaryDP has an Rn operand.
1037 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1038 "Reg operand expected");
1039 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1044 // If this is a two-address operand, skip it, e.g., MOVCCs operand 1.
1045 if (isUnary && (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)) {
1046 MI.addOperand(MCOperand::CreateReg(0));
1050 // Disassemble operand 2, which consists of three components.
1051 if (OpIdx + 2 >= NumOps)
1054 assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
1055 (OpInfo[OpIdx+1].RegClass == ARM::GPRRegClassID) &&
1056 (OpInfo[OpIdx+2].RegClass < 0) &&
1057 "Expect 3 reg operands");
1059 // Register-controlled shifts have Inst{7} = 0 and Inst{4} = 1.
1060 unsigned Rs = slice(insn, 4, 4);
1062 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1065 // Register-controlled shifts: [Rm, Rs, shift].
1066 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1068 // Inst{6-5} encodes the shift opcode.
1069 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1070 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(ShOp, 0)));
1072 // Constant shifts: [Rm, reg0, shift_imm].
1073 MI.addOperand(MCOperand::CreateReg(0)); // NoRegister
1074 // Inst{6-5} encodes the shift opcode.
1075 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1076 // Inst{11-7} encodes the imm5 shift amount.
1077 unsigned ShImm = slice(insn, 11, 7);
1079 // A8.4.1. Possible rrx or shift amount of 32...
1080 getImmShiftSE(ShOp, ShImm);
1081 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(ShOp, ShImm)));
1088 static bool DisassembleLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1089 unsigned short NumOps, unsigned &NumOpsAdded, bool isStore, BO B) {
1091 const TargetInstrDesc &TID = ARMInsts[Opcode];
1092 bool isPrePost = isPrePostLdSt(TID.TSFlags);
1093 const TargetOperandInfo *OpInfo = TID.OpInfo;
1094 if (!OpInfo) return false;
1096 unsigned &OpIdx = NumOpsAdded;
1100 assert(((!isStore && TID.getNumDefs() > 0) ||
1101 (isStore && (TID.getNumDefs() == 0 || isPrePost)))
1102 && "Invalid arguments");
1104 // Operand 0 of a pre- and post-indexed store is the address base writeback.
1105 if (isPrePost && isStore) {
1106 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1107 "Reg operand expected");
1108 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1113 // Disassemble the dst/src operand.
1114 if (OpIdx >= NumOps)
1117 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1118 "Reg operand expected");
1119 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1123 // After dst of a pre- and post-indexed load is the address base writeback.
1124 if (isPrePost && !isStore) {
1125 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1126 "Reg operand expected");
1127 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1132 // Disassemble the base operand.
1133 if (OpIdx >= NumOps)
1136 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1137 "Reg operand expected");
1138 assert((!isPrePost || (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1))
1139 && "Index mode or tied_to operand expected");
1140 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1144 // For reg/reg form, base reg is followed by +/- reg shop imm.
1145 // For immediate form, it is followed by +/- imm12.
1146 // See also ARMAddressingModes.h (Addressing Mode #2).
1147 if (OpIdx + 1 >= NumOps)
1150 assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
1151 (OpInfo[OpIdx+1].RegClass < 0) &&
1152 "Expect 1 reg operand followed by 1 imm operand");
1154 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1155 if (getIBit(insn) == 0) {
1156 MI.addOperand(MCOperand::CreateReg(0));
1158 // Disassemble the 12-bit immediate offset.
1159 unsigned Imm12 = slice(insn, 11, 0);
1160 unsigned Offset = ARM_AM::getAM2Opc(AddrOpcode, Imm12, ARM_AM::no_shift);
1161 MI.addOperand(MCOperand::CreateImm(Offset));
1163 // Disassemble the offset reg (Rm), shift type, and immediate shift length.
1164 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1166 // Inst{6-5} encodes the shift opcode.
1167 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1168 // Inst{11-7} encodes the imm5 shift amount.
1169 unsigned ShImm = slice(insn, 11, 7);
1171 // A8.4.1. Possible rrx or shift amount of 32...
1172 getImmShiftSE(ShOp, ShImm);
1173 MI.addOperand(MCOperand::CreateImm(
1174 ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp)));
1181 static bool DisassembleLdFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1182 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1183 return DisassembleLdStFrm(MI, Opcode, insn, NumOps, NumOpsAdded, false, B);
1186 static bool DisassembleStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1187 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1188 return DisassembleLdStFrm(MI, Opcode, insn, NumOps, NumOpsAdded, true, B);
1191 static bool HasDualReg(unsigned Opcode) {
1195 case ARM::LDRD: case ARM::LDRD_PRE: case ARM::LDRD_POST:
1196 case ARM::STRD: case ARM::STRD_PRE: case ARM::STRD_POST:
1201 static bool DisassembleLdStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1202 unsigned short NumOps, unsigned &NumOpsAdded, bool isStore, BO B) {
1204 const TargetInstrDesc &TID = ARMInsts[Opcode];
1205 bool isPrePost = isPrePostLdSt(TID.TSFlags);
1206 const TargetOperandInfo *OpInfo = TID.OpInfo;
1207 if (!OpInfo) return false;
1209 unsigned &OpIdx = NumOpsAdded;
1213 assert(((!isStore && TID.getNumDefs() > 0) ||
1214 (isStore && (TID.getNumDefs() == 0 || isPrePost)))
1215 && "Invalid arguments");
1217 // Operand 0 of a pre- and post-indexed store is the address base writeback.
1218 if (isPrePost && isStore) {
1219 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1220 "Reg operand expected");
1221 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1226 bool DualReg = HasDualReg(Opcode);
1228 // Disassemble the dst/src operand.
1229 if (OpIdx >= NumOps)
1232 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1233 "Reg operand expected");
1234 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1238 // Fill in LDRD and STRD's second operand.
1240 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1241 decodeRd(insn) + 1)));
1245 // After dst of a pre- and post-indexed load is the address base writeback.
1246 if (isPrePost && !isStore) {
1247 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1248 "Reg operand expected");
1249 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1254 // Disassemble the base operand.
1255 if (OpIdx >= NumOps)
1258 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1259 "Reg operand expected");
1260 assert((!isPrePost || (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1))
1261 && "Index mode or tied_to operand expected");
1262 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1266 // For reg/reg form, base reg is followed by +/- reg.
1267 // For immediate form, it is followed by +/- imm8.
1268 // See also ARMAddressingModes.h (Addressing Mode #3).
1269 if (OpIdx + 1 >= NumOps)
1272 assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
1273 (OpInfo[OpIdx+1].RegClass < 0) &&
1274 "Expect 1 reg operand followed by 1 imm operand");
1276 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1277 if (getAM3IBit(insn) == 1) {
1278 MI.addOperand(MCOperand::CreateReg(0));
1280 // Disassemble the 8-bit immediate offset.
1281 unsigned Imm4H = (insn >> ARMII::ImmHiShift) & 0xF;
1282 unsigned Imm4L = insn & 0xF;
1283 unsigned Offset = ARM_AM::getAM3Opc(AddrOpcode, (Imm4H << 4) | Imm4L);
1284 MI.addOperand(MCOperand::CreateImm(Offset));
1286 // Disassemble the offset reg (Rm).
1287 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1289 unsigned Offset = ARM_AM::getAM3Opc(AddrOpcode, 0);
1290 MI.addOperand(MCOperand::CreateImm(Offset));
1297 static bool DisassembleLdMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1298 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1299 return DisassembleLdStMiscFrm(MI, Opcode, insn, NumOps, NumOpsAdded, false,
1303 static bool DisassembleStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1304 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1305 return DisassembleLdStMiscFrm(MI, Opcode, insn, NumOps, NumOpsAdded, true, B);
1308 // The algorithm for disassembly of LdStMulFrm is different from others because
1309 // it explicitly populates the two predicate operands after operand 0 (the base)
1310 // and operand 1 (the AM4 mode imm). After operand 3, we need to populate the
1311 // reglist with each affected register encoded as an MCOperand.
1312 static bool DisassembleLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1313 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1315 assert(NumOps >= 5 && "LdStMulFrm expects NumOps >= 5");
1317 unsigned &OpIdx = NumOpsAdded;
1321 unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
1323 // Writeback to base, if necessary.
1324 if (Opcode == ARM::LDM_UPD || Opcode == ARM::STM_UPD) {
1325 MI.addOperand(MCOperand::CreateReg(Base));
1329 MI.addOperand(MCOperand::CreateReg(Base));
1331 ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
1332 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(SubMode)));
1334 // Handling the two predicate operands before the reglist.
1335 int64_t CondVal = insn >> ARMII::CondShift;
1336 MI.addOperand(MCOperand::CreateImm(CondVal == 0xF ? 0xE : CondVal));
1337 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
1341 // Fill the variadic part of reglist.
1342 unsigned RegListBits = insn & ((1 << 16) - 1);
1343 for (unsigned i = 0; i < 16; ++i) {
1344 if ((RegListBits >> i) & 1) {
1345 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1354 // LDREX, LDREXB, LDREXH: Rd Rn
1355 // LDREXD: Rd Rd+1 Rn
1356 // STREX, STREXB, STREXH: Rd Rm Rn
1357 // STREXD: Rd Rm Rm+1 Rn
1359 // SWP, SWPB: Rd Rm Rn
1360 static bool DisassembleLdStExFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1361 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1363 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1364 if (!OpInfo) return false;
1366 unsigned &OpIdx = NumOpsAdded;
1371 && OpInfo[0].RegClass == ARM::GPRRegClassID
1372 && OpInfo[1].RegClass == ARM::GPRRegClassID
1373 && "Expect 2 reg operands");
1375 bool isStore = slice(insn, 20, 20) == 0;
1376 bool isDW = (Opcode == ARM::LDREXD || Opcode == ARM::STREXD);
1378 // Add the destination operand.
1379 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1383 // Store register Exclusive needs a source operand.
1385 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1390 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1391 decodeRm(insn)+1)));
1395 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1396 decodeRd(insn)+1)));
1400 // Finally add the pointer operand.
1401 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1408 // Misc. Arithmetic Instructions.
1410 // PKHBT, PKHTB: Rd Rn Rm , LSL/ASR #imm5
1411 // RBIT, REV, REV16, REVSH: Rd Rm
1412 static bool DisassembleArithMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1413 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1415 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1416 unsigned &OpIdx = NumOpsAdded;
1421 && OpInfo[0].RegClass == ARM::GPRRegClassID
1422 && OpInfo[1].RegClass == ARM::GPRRegClassID
1423 && "Expect 2 reg operands");
1425 bool ThreeReg = NumOps > 2 && OpInfo[2].RegClass == ARM::GPRRegClassID;
1427 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1432 assert(NumOps >= 4 && "Expect >= 4 operands");
1433 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1438 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1442 // If there is still an operand info left which is an immediate operand, add
1443 // an additional imm5 LSL/ASR operand.
1444 if (ThreeReg && OpInfo[OpIdx].RegClass < 0
1445 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
1446 // Extract the 5-bit immediate field Inst{11-7}.
1447 unsigned ShiftAmt = (insn >> ARMII::ShiftShift) & 0x1F;
1448 MI.addOperand(MCOperand::CreateImm(ShiftAmt));
1455 /// DisassembleSatFrm - Disassemble saturate instructions:
1456 /// SSAT, SSAT16, USAT, and USAT16.
1457 static bool DisassembleSatFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1458 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1460 const TargetInstrDesc &TID = ARMInsts[Opcode];
1461 NumOpsAdded = TID.getNumOperands() - 2; // ignore predicate operands
1463 // Disassemble register def.
1464 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1467 unsigned Pos = slice(insn, 20, 16);
1468 if (Opcode == ARM::SSAT || Opcode == ARM::SSAT16)
1470 MI.addOperand(MCOperand::CreateImm(Pos));
1472 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1475 if (NumOpsAdded == 4) {
1476 ARM_AM::ShiftOpc Opc = (slice(insn, 6, 6) != 0 ? ARM_AM::asr : ARM_AM::lsl);
1477 // Inst{11-7} encodes the imm5 shift amount.
1478 unsigned ShAmt = slice(insn, 11, 7);
1480 // A8.6.183. Possible ASR shift amount of 32...
1481 if (Opc == ARM_AM::asr)
1484 Opc = ARM_AM::no_shift;
1486 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(Opc, ShAmt)));
1491 // Extend instructions.
1492 // SXT* and UXT*: Rd [Rn] Rm [rot_imm].
1493 // The 2nd operand register is Rn and the 3rd operand regsiter is Rm for the
1494 // three register operand form. Otherwise, Rn=0b1111 and only Rm is used.
1495 static bool DisassembleExtFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1496 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1498 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1499 unsigned &OpIdx = NumOpsAdded;
1504 && OpInfo[0].RegClass == ARM::GPRRegClassID
1505 && OpInfo[1].RegClass == ARM::GPRRegClassID
1506 && "Expect 2 reg operands");
1508 bool ThreeReg = NumOps > 2 && OpInfo[2].RegClass == ARM::GPRRegClassID;
1510 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1515 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1520 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1524 // If there is still an operand info left which is an immediate operand, add
1525 // an additional rotate immediate operand.
1526 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
1527 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
1528 // Extract the 2-bit rotate field Inst{11-10}.
1529 unsigned rot = (insn >> ARMII::ExtRotImmShift) & 3;
1530 // Rotation by 8, 16, or 24 bits.
1531 MI.addOperand(MCOperand::CreateImm(rot << 3));
1538 /////////////////////////////////////
1540 // Utility Functions For VFP //
1542 /////////////////////////////////////
1544 // Extract/Decode Dd/Sd:
1546 // SP => d = UInt(Vd:D)
1547 // DP => d = UInt(D:Vd)
1548 static unsigned decodeVFPRd(uint32_t insn, bool isSPVFP) {
1549 return isSPVFP ? (decodeRd(insn) << 1 | getDBit(insn))
1550 : (decodeRd(insn) | getDBit(insn) << 4);
1553 // Extract/Decode Dn/Sn:
1555 // SP => n = UInt(Vn:N)
1556 // DP => n = UInt(N:Vn)
1557 static unsigned decodeVFPRn(uint32_t insn, bool isSPVFP) {
1558 return isSPVFP ? (decodeRn(insn) << 1 | getNBit(insn))
1559 : (decodeRn(insn) | getNBit(insn) << 4);
1562 // Extract/Decode Dm/Sm:
1564 // SP => m = UInt(Vm:M)
1565 // DP => m = UInt(M:Vm)
1566 static unsigned decodeVFPRm(uint32_t insn, bool isSPVFP) {
1567 return isSPVFP ? (decodeRm(insn) << 1 | getMBit(insn))
1568 : (decodeRm(insn) | getMBit(insn) << 4);
1573 static uint64_t VFPExpandImm(unsigned char byte, unsigned N) {
1574 assert(N == 32 || N == 64);
1577 unsigned bit6 = slice(byte, 6, 6);
1579 Result = slice(byte, 7, 7) << 31 | slice(byte, 5, 0) << 19;
1581 Result |= 0x1f << 25;
1583 Result |= 0x1 << 30;
1585 Result = (uint64_t)slice(byte, 7, 7) << 63 |
1586 (uint64_t)slice(byte, 5, 0) << 48;
1588 Result |= 0xffL << 54;
1590 Result |= 0x1L << 62;
1596 // VFP Unary Format Instructions:
1598 // VCMP[E]ZD, VCMP[E]ZS: compares one floating-point register with zero
1599 // VCVTDS, VCVTSD: converts between double-precision and single-precision
1600 // The rest of the instructions have homogeneous [VFP]Rd and [VFP]Rm registers.
1601 static bool DisassembleVFPUnaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1602 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1604 assert(NumOps >= 1 && "VFPUnaryFrm expects NumOps >= 1");
1606 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1607 unsigned &OpIdx = NumOpsAdded;
1611 unsigned RegClass = OpInfo[OpIdx].RegClass;
1612 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1613 "Reg operand expected");
1614 bool isSP = (RegClass == ARM::SPRRegClassID);
1616 MI.addOperand(MCOperand::CreateReg(
1617 getRegisterEnum(B, RegClass, decodeVFPRd(insn, isSP))));
1620 // Early return for compare with zero instructions.
1621 if (Opcode == ARM::VCMPEZD || Opcode == ARM::VCMPEZS
1622 || Opcode == ARM::VCMPZD || Opcode == ARM::VCMPZS)
1625 RegClass = OpInfo[OpIdx].RegClass;
1626 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1627 "Reg operand expected");
1628 isSP = (RegClass == ARM::SPRRegClassID);
1630 MI.addOperand(MCOperand::CreateReg(
1631 getRegisterEnum(B, RegClass, decodeVFPRm(insn, isSP))));
1637 // All the instructions have homogeneous [VFP]Rd, [VFP]Rn, and [VFP]Rm regs.
1638 // Some of them have operand constraints which tie the first operand in the
1639 // InOperandList to that of the dst. As far as asm printing is concerned, this
1640 // tied_to operand is simply skipped.
1641 static bool DisassembleVFPBinaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1642 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1644 assert(NumOps >= 3 && "VFPBinaryFrm expects NumOps >= 3");
1646 const TargetInstrDesc &TID = ARMInsts[Opcode];
1647 const TargetOperandInfo *OpInfo = TID.OpInfo;
1648 unsigned &OpIdx = NumOpsAdded;
1652 unsigned RegClass = OpInfo[OpIdx].RegClass;
1653 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1654 "Reg operand expected");
1655 bool isSP = (RegClass == ARM::SPRRegClassID);
1657 MI.addOperand(MCOperand::CreateReg(
1658 getRegisterEnum(B, RegClass, decodeVFPRd(insn, isSP))));
1661 // Skip tied_to operand constraint.
1662 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
1663 assert(NumOps >= 4 && "Expect >=4 operands");
1664 MI.addOperand(MCOperand::CreateReg(0));
1668 MI.addOperand(MCOperand::CreateReg(
1669 getRegisterEnum(B, RegClass, decodeVFPRn(insn, isSP))));
1672 MI.addOperand(MCOperand::CreateReg(
1673 getRegisterEnum(B, RegClass, decodeVFPRm(insn, isSP))));
1679 // A8.6.295 vcvt (floating-point <-> integer)
1680 // Int to FP: VSITOD, VSITOS, VUITOD, VUITOS
1681 // FP to Int: VTOSI[Z|R]D, VTOSI[Z|R]S, VTOUI[Z|R]D, VTOUI[Z|R]S
1683 // A8.6.297 vcvt (floating-point and fixed-point)
1684 // Dd|Sd Dd|Sd(TIED_TO) #fbits(= 16|32 - UInt(imm4:i))
1685 static bool DisassembleVFPConv1Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1686 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1688 assert(NumOps >= 2 && "VFPConv1Frm expects NumOps >= 2");
1690 const TargetInstrDesc &TID = ARMInsts[Opcode];
1691 const TargetOperandInfo *OpInfo = TID.OpInfo;
1692 if (!OpInfo) return false;
1694 bool SP = slice(insn, 8, 8) == 0; // A8.6.295 & A8.6.297
1695 bool fixed_point = slice(insn, 17, 17) == 1; // A8.6.297
1696 unsigned RegClassID = SP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
1700 assert(NumOps >= 3 && "Expect >= 3 operands");
1701 int size = slice(insn, 7, 7) == 0 ? 16 : 32;
1702 int fbits = size - (slice(insn,3,0) << 1 | slice(insn,5,5));
1703 MI.addOperand(MCOperand::CreateReg(
1704 getRegisterEnum(B, RegClassID,
1705 decodeVFPRd(insn, SP))));
1707 assert(TID.getOperandConstraint(1, TOI::TIED_TO) != -1 &&
1708 "Tied to operand expected");
1709 MI.addOperand(MI.getOperand(0));
1711 assert(OpInfo[2].RegClass < 0 && !OpInfo[2].isPredicate() &&
1712 !OpInfo[2].isOptionalDef() && "Imm operand expected");
1713 MI.addOperand(MCOperand::CreateImm(fbits));
1718 // The Rd (destination) and Rm (source) bits have different interpretations
1719 // depending on their single-precisonness.
1721 if (slice(insn, 18, 18) == 1) { // to_integer operation
1722 d = decodeVFPRd(insn, true /* Is Single Precision */);
1723 MI.addOperand(MCOperand::CreateReg(
1724 getRegisterEnum(B, ARM::SPRRegClassID, d)));
1725 m = decodeVFPRm(insn, SP);
1726 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID, m)));
1728 d = decodeVFPRd(insn, SP);
1729 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID, d)));
1730 m = decodeVFPRm(insn, true /* Is Single Precision */);
1731 MI.addOperand(MCOperand::CreateReg(
1732 getRegisterEnum(B, ARM::SPRRegClassID, m)));
1740 // VMOVRS - A8.6.330
1741 // Rt => Rd; Sn => UInt(Vn:N)
1742 static bool DisassembleVFPConv2Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1743 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1745 assert(NumOps >= 2 && "VFPConv2Frm expects NumOps >= 2");
1747 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1749 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1750 decodeVFPRn(insn, true))));
1755 // VMOVRRD - A8.6.332
1756 // Rt => Rd; Rt2 => Rn; Dm => UInt(M:Vm)
1758 // VMOVRRS - A8.6.331
1759 // Rt => Rd; Rt2 => Rn; Sm => UInt(Vm:M); Sm1 = Sm+1
1760 static bool DisassembleVFPConv3Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1761 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1763 assert(NumOps >= 3 && "VFPConv3Frm expects NumOps >= 3");
1765 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1766 unsigned &OpIdx = NumOpsAdded;
1768 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1770 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1774 if (OpInfo[OpIdx].RegClass == ARM::SPRRegClassID) {
1775 unsigned Sm = decodeVFPRm(insn, true);
1776 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1778 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1782 MI.addOperand(MCOperand::CreateReg(
1783 getRegisterEnum(B, ARM::DPRRegClassID,
1784 decodeVFPRm(insn, false))));
1790 // VMOVSR - A8.6.330
1791 // Rt => Rd; Sn => UInt(Vn:N)
1792 static bool DisassembleVFPConv4Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1793 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1795 assert(NumOps >= 2 && "VFPConv4Frm expects NumOps >= 2");
1797 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1798 decodeVFPRn(insn, true))));
1799 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1805 // VMOVDRR - A8.6.332
1806 // Rt => Rd; Rt2 => Rn; Dm => UInt(M:Vm)
1808 // VMOVRRS - A8.6.331
1809 // Rt => Rd; Rt2 => Rn; Sm => UInt(Vm:M); Sm1 = Sm+1
1810 static bool DisassembleVFPConv5Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1811 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1813 assert(NumOps >= 3 && "VFPConv5Frm expects NumOps >= 3");
1815 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1816 unsigned &OpIdx = NumOpsAdded;
1820 if (OpInfo[OpIdx].RegClass == ARM::SPRRegClassID) {
1821 unsigned Sm = decodeVFPRm(insn, true);
1822 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1824 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1828 MI.addOperand(MCOperand::CreateReg(
1829 getRegisterEnum(B, ARM::DPRRegClassID,
1830 decodeVFPRm(insn, false))));
1834 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1836 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1842 // VFP Load/Store Instructions.
1843 // VLDRD, VLDRS, VSTRD, VSTRS
1844 static bool DisassembleVFPLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1845 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1847 assert(NumOps >= 3 && "VFPLdStFrm expects NumOps >= 3");
1849 bool isSPVFP = (Opcode == ARM::VLDRS || Opcode == ARM::VSTRS) ? true : false;
1850 unsigned RegClassID = isSPVFP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
1852 // Extract Dd/Sd for operand 0.
1853 unsigned RegD = decodeVFPRd(insn, isSPVFP);
1855 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID, RegD)));
1857 unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
1858 MI.addOperand(MCOperand::CreateReg(Base));
1860 // Next comes the AM5 Opcode.
1861 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1862 unsigned char Imm8 = insn & 0xFF;
1863 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM5Opc(AddrOpcode, Imm8)));
1870 // VFP Load/Store Multiple Instructions.
1871 // This is similar to the algorithm for LDM/STM in that operand 0 (the base) and
1872 // operand 1 (the AM5 mode imm) is followed by two predicate operands. It is
1873 // followed by a reglist of either DPR(s) or SPR(s).
1875 // VLDMD[_UPD], VLDMS[_UPD], VSTMD[_UPD], VSTMS[_UPD]
1876 static bool DisassembleVFPLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1877 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1879 assert(NumOps >= 5 && "VFPLdStMulFrm expects NumOps >= 5");
1881 unsigned &OpIdx = NumOpsAdded;
1885 unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
1887 // Writeback to base, if necessary.
1888 if (Opcode == ARM::VLDMD_UPD || Opcode == ARM::VLDMS_UPD ||
1889 Opcode == ARM::VSTMD_UPD || Opcode == ARM::VSTMS_UPD) {
1890 MI.addOperand(MCOperand::CreateReg(Base));
1894 MI.addOperand(MCOperand::CreateReg(Base));
1896 // Next comes the AM5 Opcode.
1897 ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
1898 // Must be either "ia" or "db" submode.
1899 if (SubMode != ARM_AM::ia && SubMode != ARM_AM::db) {
1900 DEBUG(errs() << "Illegal addressing mode 5 sub-mode!\n");
1904 unsigned char Imm8 = insn & 0xFF;
1905 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM5Opc(SubMode, Imm8)));
1907 // Handling the two predicate operands before the reglist.
1908 int64_t CondVal = insn >> ARMII::CondShift;
1909 MI.addOperand(MCOperand::CreateImm(CondVal == 0xF ? 0xE : CondVal));
1910 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
1914 bool isSPVFP = (Opcode == ARM::VLDMS || Opcode == ARM::VLDMS_UPD ||
1915 Opcode == ARM::VSTMS || Opcode == ARM::VSTMS_UPD) ? true : false;
1916 unsigned RegClassID = isSPVFP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
1919 unsigned RegD = decodeVFPRd(insn, isSPVFP);
1921 // Fill the variadic part of reglist.
1922 unsigned Regs = isSPVFP ? Imm8 : Imm8/2;
1923 for (unsigned i = 0; i < Regs; ++i) {
1924 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID,
1932 // Misc. VFP Instructions.
1933 // FMSTAT (vmrs with Rt=0b1111, i.e., to apsr_nzcv and no register operand)
1934 // FCONSTD (DPR and a VFPf64Imm operand)
1935 // FCONSTS (SPR and a VFPf32Imm operand)
1936 // VMRS/VMSR (GPR operand)
1937 static bool DisassembleVFPMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1938 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1940 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1941 unsigned &OpIdx = NumOpsAdded;
1945 if (Opcode == ARM::FMSTAT)
1948 assert(NumOps >= 2 && "VFPMiscFrm expects >=2 operands");
1950 unsigned RegEnum = 0;
1951 switch (OpInfo[0].RegClass) {
1952 case ARM::DPRRegClassID:
1953 RegEnum = getRegisterEnum(B, ARM::DPRRegClassID, decodeVFPRd(insn, false));
1955 case ARM::SPRRegClassID:
1956 RegEnum = getRegisterEnum(B, ARM::SPRRegClassID, decodeVFPRd(insn, true));
1958 case ARM::GPRRegClassID:
1959 RegEnum = getRegisterEnum(B, ARM::GPRRegClassID, decodeRd(insn));
1962 assert(0 && "Invalid reg class id");
1966 MI.addOperand(MCOperand::CreateReg(RegEnum));
1969 // Extract/decode the f64/f32 immediate.
1970 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
1971 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
1972 // The asm syntax specifies the before-expanded <imm>.
1973 // Not VFPExpandImm(slice(insn,19,16) << 4 | slice(insn, 3, 0),
1974 // Opcode == ARM::FCONSTD ? 64 : 32)
1975 MI.addOperand(MCOperand::CreateImm(slice(insn,19,16)<<4 | slice(insn,3,0)));
1982 // DisassembleThumbFrm() is defined in ThumbDisassemblerCore.h file.
1983 #include "ThumbDisassemblerCore.h"
1985 /////////////////////////////////////////////////////
1987 // Utility Functions For ARM Advanced SIMD //
1989 /////////////////////////////////////////////////////
1991 // The following NEON namings are based on A8.6.266 VABA, VABAL. Notice that
1992 // A8.6.303 VDUP (ARM core register)'s D/Vd pair is the N/Vn pair of VABA/VABAL.
1994 // A7.3 Register encoding
1996 // Extract/Decode NEON D/Vd:
1998 // Note that for quadword, Qd = UInt(D:Vd<3:1>) = Inst{22:15-13}, whereas for
1999 // doubleword, Dd = UInt(D:Vd). We compensate for this difference by
2000 // handling it in the getRegisterEnum() utility function.
2001 // D = Inst{22}, Vd = Inst{15-12}
2002 static unsigned decodeNEONRd(uint32_t insn) {
2003 return ((insn >> ARMII::NEON_D_BitShift) & 1) << 4
2004 | ((insn >> ARMII::NEON_RegRdShift) & ARMII::NEONRegMask);
2007 // Extract/Decode NEON N/Vn:
2009 // Note that for quadword, Qn = UInt(N:Vn<3:1>) = Inst{7:19-17}, whereas for
2010 // doubleword, Dn = UInt(N:Vn). We compensate for this difference by
2011 // handling it in the getRegisterEnum() utility function.
2012 // N = Inst{7}, Vn = Inst{19-16}
2013 static unsigned decodeNEONRn(uint32_t insn) {
2014 return ((insn >> ARMII::NEON_N_BitShift) & 1) << 4
2015 | ((insn >> ARMII::NEON_RegRnShift) & ARMII::NEONRegMask);
2018 // Extract/Decode NEON M/Vm:
2020 // Note that for quadword, Qm = UInt(M:Vm<3:1>) = Inst{5:3-1}, whereas for
2021 // doubleword, Dm = UInt(M:Vm). We compensate for this difference by
2022 // handling it in the getRegisterEnum() utility function.
2023 // M = Inst{5}, Vm = Inst{3-0}
2024 static unsigned decodeNEONRm(uint32_t insn) {
2025 return ((insn >> ARMII::NEON_M_BitShift) & 1) << 4
2026 | ((insn >> ARMII::NEON_RegRmShift) & ARMII::NEONRegMask);
2037 } // End of unnamed namespace
2039 // size field -> Inst{11-10}
2040 // index_align field -> Inst{7-4}
2042 // The Lane Index interpretation depends on the Data Size:
2043 // 8 (encoded as size = 0b00) -> Index = index_align[3:1]
2044 // 16 (encoded as size = 0b01) -> Index = index_align[3:2]
2045 // 32 (encoded as size = 0b10) -> Index = index_align[3]
2047 // Ref: A8.6.317 VLD4 (single 4-element structure to one lane).
2048 static unsigned decodeLaneIndex(uint32_t insn) {
2049 unsigned size = insn >> 10 & 3;
2050 assert((size == 0 || size == 1 || size == 2) &&
2051 "Encoding error: size should be either 0, 1, or 2");
2053 unsigned index_align = insn >> 4 & 0xF;
2054 return (index_align >> 1) >> size;
2057 // imm64 = AdvSIMDExpandImm(op, cmode, i:imm3:imm4)
2058 // op = Inst{5}, cmode = Inst{11-8}
2059 // i = Inst{24} (ARM architecture)
2060 // imm3 = Inst{18-16}, imm4 = Inst{3-0}
2061 // Ref: Table A7-15 Modified immediate values for Advanced SIMD instructions.
2062 static uint64_t decodeN1VImm(uint32_t insn, ElemSize esize) {
2063 unsigned char op = (insn >> 5) & 1;
2064 unsigned char cmode = (insn >> 8) & 0xF;
2065 unsigned char Imm8 = ((insn >> 24) & 1) << 7 |
2066 ((insn >> 16) & 7) << 4 |
2068 return (op << 12) | (cmode << 8) | Imm8;
2071 // A8.6.339 VMUL, VMULL (by scalar)
2072 // ESize16 => m = Inst{2-0} (Vm<2:0>) D0-D7
2073 // ESize32 => m = Inst{3-0} (Vm<3:0>) D0-D15
2074 static unsigned decodeRestrictedDm(uint32_t insn, ElemSize esize) {
2081 assert(0 && "Unreachable code!");
2086 // A8.6.339 VMUL, VMULL (by scalar)
2087 // ESize16 => index = Inst{5:3} (M:Vm<3>) D0-D7
2088 // ESize32 => index = Inst{5} (M) D0-D15
2089 static unsigned decodeRestrictedDmIndex(uint32_t insn, ElemSize esize) {
2092 return (((insn >> 5) & 1) << 1) | ((insn >> 3) & 1);
2094 return (insn >> 5) & 1;
2096 assert(0 && "Unreachable code!");
2101 // A8.6.296 VCVT (between floating-point and fixed-point, Advanced SIMD)
2102 // (64 - <fbits>) is encoded as imm6, i.e., Inst{21-16}.
2103 static unsigned decodeVCVTFractionBits(uint32_t insn) {
2104 return 64 - ((insn >> 16) & 0x3F);
2107 // A8.6.302 VDUP (scalar)
2108 // ESize8 => index = Inst{19-17}
2109 // ESize16 => index = Inst{19-18}
2110 // ESize32 => index = Inst{19}
2111 static unsigned decodeNVLaneDupIndex(uint32_t insn, ElemSize esize) {
2114 return (insn >> 17) & 7;
2116 return (insn >> 18) & 3;
2118 return (insn >> 19) & 1;
2120 assert(0 && "Unspecified element size!");
2125 // A8.6.328 VMOV (ARM core register to scalar)
2126 // A8.6.329 VMOV (scalar to ARM core register)
2127 // ESize8 => index = Inst{21:6-5}
2128 // ESize16 => index = Inst{21:6}
2129 // ESize32 => index = Inst{21}
2130 static unsigned decodeNVLaneOpIndex(uint32_t insn, ElemSize esize) {
2133 return ((insn >> 21) & 1) << 2 | ((insn >> 5) & 3);
2135 return ((insn >> 21) & 1) << 1 | ((insn >> 6) & 1);
2137 return ((insn >> 21) & 1);
2139 assert(0 && "Unspecified element size!");
2144 // Imm6 = Inst{21-16}, L = Inst{7}
2146 // LeftShift == true (A8.6.367 VQSHL, A8.6.387 VSLI):
2148 // '0001xxx' => esize = 8; shift_amount = imm6 - 8
2149 // '001xxxx' => esize = 16; shift_amount = imm6 - 16
2150 // '01xxxxx' => esize = 32; shift_amount = imm6 - 32
2151 // '1xxxxxx' => esize = 64; shift_amount = imm6
2153 // LeftShift == false (A8.6.376 VRSHR, A8.6.368 VQSHRN):
2155 // '0001xxx' => esize = 8; shift_amount = 16 - imm6
2156 // '001xxxx' => esize = 16; shift_amount = 32 - imm6
2157 // '01xxxxx' => esize = 32; shift_amount = 64 - imm6
2158 // '1xxxxxx' => esize = 64; shift_amount = 64 - imm6
2160 static unsigned decodeNVSAmt(uint32_t insn, bool LeftShift) {
2161 ElemSize esize = ESizeNA;
2162 unsigned L = (insn >> 7) & 1;
2163 unsigned imm6 = (insn >> 16) & 0x3F;
2167 else if (imm6 >> 4 == 1)
2169 else if (imm6 >> 5 == 1)
2172 assert(0 && "Wrong encoding of Inst{7:21-16}!");
2177 return esize == ESize64 ? imm6 : (imm6 - esize);
2179 return esize == ESize64 ? (esize - imm6) : (2*esize - imm6);
2183 // Imm4 = Inst{11-8}
2184 static unsigned decodeN3VImm(uint32_t insn) {
2185 return (insn >> 8) & 0xF;
2188 static bool UseDRegPair(unsigned Opcode) {
2192 case ARM::VLD1q8_UPD:
2193 case ARM::VLD1q16_UPD:
2194 case ARM::VLD1q32_UPD:
2195 case ARM::VLD1q64_UPD:
2196 case ARM::VST1q8_UPD:
2197 case ARM::VST1q16_UPD:
2198 case ARM::VST1q32_UPD:
2199 case ARM::VST1q64_UPD:
2205 // D[d] D[d2] ... Rn [TIED_TO Rn] align [Rm]
2207 // D[d] D[d2] ... Rn [TIED_TO Rn] align [Rm] TIED_TO ... imm(idx)
2209 // Rn [TIED_TO Rn] align [Rm] D[d] D[d2] ...
2211 // Rn [TIED_TO Rn] align [Rm] D[d] D[d2] ... [imm(idx)]
2213 // Correctly set VLD*/VST*'s TIED_TO GPR, as the asm printer needs it.
2214 static bool DisassembleNLdSt0(MCInst &MI, unsigned Opcode, uint32_t insn,
2215 unsigned short NumOps, unsigned &NumOpsAdded, bool Store, bool DblSpaced,
2218 const TargetInstrDesc &TID = ARMInsts[Opcode];
2219 const TargetOperandInfo *OpInfo = TID.OpInfo;
2221 // At least one DPR register plus addressing mode #6.
2222 assert(NumOps >= 3 && "Expect >= 3 operands");
2224 unsigned &OpIdx = NumOpsAdded;
2228 // We have homogeneous NEON registers for Load/Store.
2229 unsigned RegClass = 0;
2230 bool DRegPair = UseDRegPair(Opcode);
2232 // Double-spaced registers have increments of 2.
2233 unsigned Inc = (DblSpaced || DRegPair) ? 2 : 1;
2235 unsigned Rn = decodeRn(insn);
2236 unsigned Rm = decodeRm(insn);
2237 unsigned Rd = decodeNEONRd(insn);
2239 // A7.7.1 Advanced SIMD addressing mode.
2242 // LLVM Addressing Mode #6.
2243 unsigned RmEnum = 0;
2245 RmEnum = getRegisterEnum(B, ARM::GPRRegClassID, Rm);
2248 // Consume possible WB, AddrMode6, possible increment reg, the DPR/QPR's,
2249 // then possible lane index.
2250 assert(OpIdx < NumOps && OpInfo[0].RegClass == ARM::GPRRegClassID &&
2251 "Reg operand expected");
2254 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2259 assert((OpIdx+1) < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
2260 OpInfo[OpIdx + 1].RegClass < 0 && "Addrmode #6 Operands expected");
2261 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2263 MI.addOperand(MCOperand::CreateImm(0)); // Alignment ignored?
2267 MI.addOperand(MCOperand::CreateReg(RmEnum));
2271 assert(OpIdx < NumOps &&
2272 (OpInfo[OpIdx].RegClass == ARM::DPRRegClassID ||
2273 OpInfo[OpIdx].RegClass == ARM::QPRRegClassID) &&
2274 "Reg operand expected");
2276 RegClass = OpInfo[OpIdx].RegClass;
2277 while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
2278 MI.addOperand(MCOperand::CreateReg(
2279 getRegisterEnum(B, RegClass, Rd, DRegPair)));
2284 // Handle possible lane index.
2285 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2286 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2287 MI.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn)));
2292 // Consume the DPR/QPR's, possible WB, AddrMode6, possible incrment reg,
2293 // possible TIED_TO DPR/QPR's (ignored), then possible lane index.
2294 RegClass = OpInfo[0].RegClass;
2296 while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
2297 MI.addOperand(MCOperand::CreateReg(
2298 getRegisterEnum(B, RegClass, Rd, DRegPair)));
2304 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2309 assert((OpIdx+1) < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
2310 OpInfo[OpIdx + 1].RegClass < 0 && "Addrmode #6 Operands expected");
2311 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2313 MI.addOperand(MCOperand::CreateImm(0)); // Alignment ignored?
2317 MI.addOperand(MCOperand::CreateReg(RmEnum));
2321 while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
2322 assert(TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1 &&
2323 "Tied to operand expected");
2324 MI.addOperand(MCOperand::CreateReg(0));
2328 // Handle possible lane index.
2329 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2330 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2331 MI.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn)));
2336 // Accessing registers past the end of the NEON register file is not
2345 // If L (Inst{21}) == 0, store instructions.
2346 // Find out about double-spaced-ness of the Opcode and pass it on to
2347 // DisassembleNLdSt0().
2348 static bool DisassembleNLdSt(MCInst &MI, unsigned Opcode, uint32_t insn,
2349 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2351 const StringRef Name = ARMInsts[Opcode].Name;
2352 bool DblSpaced = false;
2354 if (Name.find("LN") != std::string::npos) {
2355 // To one lane instructions.
2356 // See, for example, 8.6.317 VLD4 (single 4-element structure to one lane).
2358 // <size> == 16 && Inst{5} == 1 --> DblSpaced = true
2359 if (Name.endswith("16") || Name.endswith("16_UPD"))
2360 DblSpaced = slice(insn, 5, 5) == 1;
2362 // <size> == 32 && Inst{6} == 1 --> DblSpaced = true
2363 if (Name.endswith("32") || Name.endswith("32_UPD"))
2364 DblSpaced = slice(insn, 6, 6) == 1;
2367 // Multiple n-element structures with type encoded as Inst{11-8}.
2368 // See, for example, A8.6.316 VLD4 (multiple 4-element structures).
2370 // n == 2 && type == 0b1001 -> DblSpaced = true
2371 if (Name.startswith("VST2") || Name.startswith("VLD2"))
2372 DblSpaced = slice(insn, 11, 8) == 9;
2374 // n == 3 && type == 0b0101 -> DblSpaced = true
2375 if (Name.startswith("VST3") || Name.startswith("VLD3"))
2376 DblSpaced = slice(insn, 11, 8) == 5;
2378 // n == 4 && type == 0b0001 -> DblSpaced = true
2379 if (Name.startswith("VST4") || Name.startswith("VLD4"))
2380 DblSpaced = slice(insn, 11, 8) == 1;
2383 return DisassembleNLdSt0(MI, Opcode, insn, NumOps, NumOpsAdded,
2384 slice(insn, 21, 21) == 0, DblSpaced, B);
2389 static bool DisassembleN1RegModImmFrm(MCInst &MI, unsigned Opcode,
2390 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2392 const TargetInstrDesc &TID = ARMInsts[Opcode];
2393 const TargetOperandInfo *OpInfo = TID.OpInfo;
2395 assert(NumOps >= 2 &&
2396 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2397 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2398 (OpInfo[1].RegClass < 0) &&
2399 "Expect 1 reg operand followed by 1 imm operand");
2401 // Qd/Dd = Inst{22:15-12} => NEON Rd
2402 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[0].RegClass,
2403 decodeNEONRd(insn))));
2405 ElemSize esize = ESizeNA;
2408 case ARM::VMOVv16i8:
2411 case ARM::VMOVv4i16:
2412 case ARM::VMOVv8i16:
2413 case ARM::VMVNv4i16:
2414 case ARM::VMVNv8i16:
2417 case ARM::VMOVv2i32:
2418 case ARM::VMOVv4i32:
2419 case ARM::VMVNv2i32:
2420 case ARM::VMVNv4i32:
2423 case ARM::VMOVv1i64:
2424 case ARM::VMOVv2i64:
2428 assert(0 && "Unreachable code!");
2432 // One register and a modified immediate value.
2433 // Add the imm operand.
2434 MI.addOperand(MCOperand::CreateImm(decodeN1VImm(insn, esize)));
2444 N2V_VectorConvert_Between_Float_Fixed
2446 } // End of unnamed namespace
2448 // Vector Convert [between floating-point and fixed-point]
2449 // Qd/Dd Qm/Dm [fbits]
2451 // Vector Duplicate Lane (from scalar to all elements) Instructions.
2452 // VDUPLN16d, VDUPLN16q, VDUPLN32d, VDUPLN32q, VDUPLN8d, VDUPLN8q:
2455 // Vector Move Long:
2458 // Vector Move Narrow:
2462 static bool DisassembleNVdVmOptImm(MCInst &MI, unsigned Opc, uint32_t insn,
2463 unsigned short NumOps, unsigned &NumOpsAdded, N2VFlag Flag, BO B) {
2465 const TargetInstrDesc &TID = ARMInsts[Opc];
2466 const TargetOperandInfo *OpInfo = TID.OpInfo;
2468 assert(NumOps >= 2 &&
2469 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2470 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2471 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2472 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
2473 "Expect >= 2 operands and first 2 as reg operands");
2475 unsigned &OpIdx = NumOpsAdded;
2479 ElemSize esize = ESizeNA;
2480 if (Flag == N2V_VectorDupLane) {
2481 // VDUPLN has its index embedded. Its size can be inferred from the Opcode.
2482 assert(Opc >= ARM::VDUPLN16d && Opc <= ARM::VDUPLN8q &&
2483 "Unexpected Opcode");
2484 esize = (Opc == ARM::VDUPLN8d || Opc == ARM::VDUPLN8q) ? ESize8
2485 : ((Opc == ARM::VDUPLN16d || Opc == ARM::VDUPLN16q) ? ESize16
2489 // Qd/Dd = Inst{22:15-12} => NEON Rd
2490 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2491 decodeNEONRd(insn))));
2495 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
2497 MI.addOperand(MCOperand::CreateReg(0));
2501 // Dm = Inst{5:3-0} => NEON Rm
2502 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2503 decodeNEONRm(insn))));
2506 // VZIP and others have two TIED_TO reg operands.
2508 while (OpIdx < NumOps &&
2509 (Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) {
2510 // Add TIED_TO operand.
2511 MI.addOperand(MI.getOperand(Idx));
2515 // Add the imm operand, if required.
2516 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2517 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2519 unsigned imm = 0xFFFFFFFF;
2521 if (Flag == N2V_VectorDupLane)
2522 imm = decodeNVLaneDupIndex(insn, esize);
2523 if (Flag == N2V_VectorConvert_Between_Float_Fixed)
2524 imm = decodeVCVTFractionBits(insn);
2526 assert(imm != 0xFFFFFFFF && "Internal error");
2527 MI.addOperand(MCOperand::CreateImm(imm));
2534 static bool DisassembleN2RegFrm(MCInst &MI, unsigned Opc, uint32_t insn,
2535 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2537 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
2540 static bool DisassembleNVCVTFrm(MCInst &MI, unsigned Opc, uint32_t insn,
2541 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2543 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
2544 N2V_VectorConvert_Between_Float_Fixed, B);
2546 static bool DisassembleNVecDupLnFrm(MCInst &MI, unsigned Opc, uint32_t insn,
2547 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2549 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
2550 N2V_VectorDupLane, B);
2553 // Vector Shift [Accumulate] Instructions.
2554 // Qd/Dd [Qd/Dd (TIED_TO)] Qm/Dm ShiftAmt
2556 // Vector Shift Left Long (with maximum shift count) Instructions.
2557 // VSHLLi16, VSHLLi32, VSHLLi8: Qd Dm imm (== size)
2559 static bool DisassembleNVectorShift(MCInst &MI, unsigned Opcode, uint32_t insn,
2560 unsigned short NumOps, unsigned &NumOpsAdded, bool LeftShift, BO B) {
2562 const TargetInstrDesc &TID = ARMInsts[Opcode];
2563 const TargetOperandInfo *OpInfo = TID.OpInfo;
2565 assert(NumOps >= 3 &&
2566 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2567 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2568 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2569 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
2570 "Expect >= 3 operands and first 2 as reg operands");
2572 unsigned &OpIdx = NumOpsAdded;
2576 // Qd/Dd = Inst{22:15-12} => NEON Rd
2577 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2578 decodeNEONRd(insn))));
2581 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
2583 MI.addOperand(MCOperand::CreateReg(0));
2587 assert((OpInfo[OpIdx].RegClass == ARM::DPRRegClassID ||
2588 OpInfo[OpIdx].RegClass == ARM::QPRRegClassID) &&
2589 "Reg operand expected");
2591 // Qm/Dm = Inst{5:3-0} => NEON Rm
2592 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2593 decodeNEONRm(insn))));
2596 assert(OpInfo[OpIdx].RegClass < 0 && "Imm operand expected");
2598 // Add the imm operand.
2600 // VSHLL has maximum shift count as the imm, inferred from its size.
2604 Imm = decodeNVSAmt(insn, LeftShift);
2616 MI.addOperand(MCOperand::CreateImm(Imm));
2622 // Left shift instructions.
2623 static bool DisassembleN2RegVecShLFrm(MCInst &MI, unsigned Opcode,
2624 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2626 return DisassembleNVectorShift(MI, Opcode, insn, NumOps, NumOpsAdded, true,
2629 // Right shift instructions have different shift amount interpretation.
2630 static bool DisassembleN2RegVecShRFrm(MCInst &MI, unsigned Opcode,
2631 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2633 return DisassembleNVectorShift(MI, Opcode, insn, NumOps, NumOpsAdded, false,
2642 N3V_Multiply_By_Scalar
2644 } // End of unnamed namespace
2646 // NEON Three Register Instructions with Optional Immediate Operand
2648 // Vector Extract Instructions.
2649 // Qd/Dd Qn/Dn Qm/Dm imm4
2651 // Vector Shift (Register) Instructions.
2652 // Qd/Dd Qm/Dm Qn/Dn (notice the order of m, n)
2654 // Vector Multiply [Accumulate/Subtract] [Long] By Scalar Instructions.
2655 // Qd/Dd Qn/Dn RestrictedDm index
2658 static bool DisassembleNVdVnVmOptImm(MCInst &MI, unsigned Opcode, uint32_t insn,
2659 unsigned short NumOps, unsigned &NumOpsAdded, N3VFlag Flag, BO B) {
2661 const TargetInstrDesc &TID = ARMInsts[Opcode];
2662 const TargetOperandInfo *OpInfo = TID.OpInfo;
2664 // No checking for OpInfo[2] because of MOVDneon/MOVQ with only two regs.
2665 assert(NumOps >= 3 &&
2666 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2667 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2668 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2669 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
2670 "Expect >= 3 operands and first 2 as reg operands");
2672 unsigned &OpIdx = NumOpsAdded;
2676 bool VdVnVm = Flag == N3V_VectorShift ? false : true;
2677 bool IsImm4 = Flag == N3V_VectorExtract ? true : false;
2678 bool IsDmRestricted = Flag == N3V_Multiply_By_Scalar ? true : false;
2679 ElemSize esize = ESizeNA;
2680 if (Flag == N3V_Multiply_By_Scalar) {
2681 unsigned size = (insn >> 20) & 3;
2682 if (size == 1) esize = ESize16;
2683 if (size == 2) esize = ESize32;
2684 assert (esize == ESize16 || esize == ESize32);
2687 // Qd/Dd = Inst{22:15-12} => NEON Rd
2688 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2689 decodeNEONRd(insn))));
2692 // VABA, VABAL, VBSLd, VBSLq, ...
2693 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
2695 MI.addOperand(MCOperand::CreateReg(0));
2699 // Dn = Inst{7:19-16} => NEON Rn
2701 // Dm = Inst{5:3-0} => NEON Rm
2702 MI.addOperand(MCOperand::CreateReg(
2703 getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2704 VdVnVm ? decodeNEONRn(insn)
2705 : decodeNEONRm(insn))));
2708 // Special case handling for VMOVDneon and VMOVQ because they are marked as
2710 if (Opcode == ARM::VMOVDneon || Opcode == ARM::VMOVQ)
2713 // Dm = Inst{5:3-0} => NEON Rm
2715 // Dm is restricted to D0-D7 if size is 16, D0-D15 otherwise
2717 // Dn = Inst{7:19-16} => NEON Rn
2718 unsigned m = VdVnVm ? (IsDmRestricted ? decodeRestrictedDm(insn, esize)
2719 : decodeNEONRm(insn))
2720 : decodeNEONRn(insn);
2722 MI.addOperand(MCOperand::CreateReg(
2723 getRegisterEnum(B, OpInfo[OpIdx].RegClass, m)));
2726 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2727 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2728 // Add the imm operand.
2731 Imm = decodeN3VImm(insn);
2732 else if (IsDmRestricted)
2733 Imm = decodeRestrictedDmIndex(insn, esize);
2735 assert(0 && "Internal error: unreachable code!");
2739 MI.addOperand(MCOperand::CreateImm(Imm));
2746 static bool DisassembleN3RegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2747 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2749 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
2752 static bool DisassembleN3RegVecShFrm(MCInst &MI, unsigned Opcode,
2753 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2755 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
2756 N3V_VectorShift, B);
2758 static bool DisassembleNVecExtractFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2759 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2761 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
2762 N3V_VectorExtract, B);
2764 static bool DisassembleNVecMulScalarFrm(MCInst &MI, unsigned Opcode,
2765 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2767 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
2768 N3V_Multiply_By_Scalar, B);
2771 // Vector Table Lookup
2773 // VTBL1, VTBX1: Dd [Dd(TIED_TO)] Dn Dm
2774 // VTBL2, VTBX2: Dd [Dd(TIED_TO)] Dn Dn+1 Dm
2775 // VTBL3, VTBX3: Dd [Dd(TIED_TO)] Dn Dn+1 Dn+2 Dm
2776 // VTBL4, VTBX4: Dd [Dd(TIED_TO)] Dn Dn+1 Dn+2 Dn+3 Dm
2777 static bool DisassembleNVTBLFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2778 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2780 const TargetInstrDesc &TID = ARMInsts[Opcode];
2781 const TargetOperandInfo *OpInfo = TID.OpInfo;
2782 if (!OpInfo) return false;
2784 assert(NumOps >= 3 &&
2785 OpInfo[0].RegClass == ARM::DPRRegClassID &&
2786 OpInfo[1].RegClass == ARM::DPRRegClassID &&
2787 OpInfo[2].RegClass == ARM::DPRRegClassID &&
2788 "Expect >= 3 operands and first 3 as reg operands");
2790 unsigned &OpIdx = NumOpsAdded;
2794 unsigned Rn = decodeNEONRn(insn);
2796 // {Dn} encoded as len = 0b00
2797 // {Dn Dn+1} encoded as len = 0b01
2798 // {Dn Dn+1 Dn+2 } encoded as len = 0b10
2799 // {Dn Dn+1 Dn+2 Dn+3} encoded as len = 0b11
2800 unsigned Len = slice(insn, 9, 8) + 1;
2802 // Dd (the destination vector)
2803 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
2804 decodeNEONRd(insn))));
2807 // Process tied_to operand constraint.
2809 if ((Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) {
2810 MI.addOperand(MI.getOperand(Idx));
2814 // Do the <list> now.
2815 for (unsigned i = 0; i < Len; ++i) {
2816 assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::DPRRegClassID &&
2817 "Reg operand expected");
2818 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
2823 // Dm (the index vector)
2824 assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::DPRRegClassID &&
2825 "Reg operand (index vector) expected");
2826 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
2827 decodeNEONRm(insn))));
2833 // Vector Get Lane (move scalar to ARM core register) Instructions.
2834 // VGETLNi32, VGETLNs16, VGETLNs8, VGETLNu16, VGETLNu8: Rt Dn index
2835 static bool DisassembleNGetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2836 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2838 const TargetInstrDesc &TID = ARMInsts[Opcode];
2839 const TargetOperandInfo *OpInfo = TID.OpInfo;
2840 if (!OpInfo) return false;
2842 assert(TID.getNumDefs() == 1 && NumOps >= 3 &&
2843 OpInfo[0].RegClass == ARM::GPRRegClassID &&
2844 OpInfo[1].RegClass == ARM::DPRRegClassID &&
2845 OpInfo[2].RegClass < 0 &&
2846 "Expect >= 3 operands with one dst operand");
2849 Opcode == ARM::VGETLNi32 ? ESize32
2850 : ((Opcode == ARM::VGETLNs16 || Opcode == ARM::VGETLNu16) ? ESize16
2853 // Rt = Inst{15-12} => ARM Rd
2854 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2857 // Dn = Inst{7:19-16} => NEON Rn
2858 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
2859 decodeNEONRn(insn))));
2861 MI.addOperand(MCOperand::CreateImm(decodeNVLaneOpIndex(insn, esize)));
2867 // Vector Set Lane (move ARM core register to scalar) Instructions.
2868 // VSETLNi16, VSETLNi32, VSETLNi8: Dd Dd (TIED_TO) Rt index
2869 static bool DisassembleNSetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2870 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2872 const TargetInstrDesc &TID = ARMInsts[Opcode];
2873 const TargetOperandInfo *OpInfo = TID.OpInfo;
2874 if (!OpInfo) return false;
2876 assert(TID.getNumDefs() == 1 && NumOps >= 3 &&
2877 OpInfo[0].RegClass == ARM::DPRRegClassID &&
2878 OpInfo[1].RegClass == ARM::DPRRegClassID &&
2879 TID.getOperandConstraint(1, TOI::TIED_TO) != -1 &&
2880 OpInfo[2].RegClass == ARM::GPRRegClassID &&
2881 OpInfo[3].RegClass < 0 &&
2882 "Expect >= 3 operands with one dst operand");
2885 Opcode == ARM::VSETLNi8 ? ESize8
2886 : (Opcode == ARM::VSETLNi16 ? ESize16
2889 // Dd = Inst{7:19-16} => NEON Rn
2890 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
2891 decodeNEONRn(insn))));
2894 MI.addOperand(MCOperand::CreateReg(0));
2896 // Rt = Inst{15-12} => ARM Rd
2897 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2900 MI.addOperand(MCOperand::CreateImm(decodeNVLaneOpIndex(insn, esize)));
2906 // Vector Duplicate Instructions (from ARM core register to all elements).
2907 // VDUP8d, VDUP16d, VDUP32d, VDUP8q, VDUP16q, VDUP32q: Qd/Dd Rt
2908 static bool DisassembleNDupFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2909 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2911 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
2913 assert(NumOps >= 2 &&
2914 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2915 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2916 OpInfo[1].RegClass == ARM::GPRRegClassID &&
2917 "Expect >= 2 operands and first 2 as reg operand");
2919 unsigned RegClass = OpInfo[0].RegClass;
2921 // Qd/Dd = Inst{7:19-16} => NEON Rn
2922 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClass,
2923 decodeNEONRn(insn))));
2925 // Rt = Inst{15-12} => ARM Rd
2926 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2936 static inline bool MemBarrierInstr(uint32_t insn) {
2937 unsigned op7_4 = slice(insn, 7, 4);
2938 if (slice(insn, 31, 8) == 0xf57ff0 && (op7_4 >= 4 && op7_4 <= 6))
2944 static inline bool PreLoadOpcode(unsigned Opcode) {
2946 case ARM::PLDi: case ARM::PLDr:
2947 case ARM::PLDWi: case ARM::PLDWr:
2948 case ARM::PLIi: case ARM::PLIr:
2955 static bool DisassemblePreLoadFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2956 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2958 // Preload Data/Instruction requires either 2 or 4 operands.
2959 // PLDi, PLDWi, PLIi: Rn [+/-]imm12 add = (U == '1')
2960 // PLDr[a|m], PLDWr[a|m], PLIr[a|m]: Rn Rm addrmode2_opc
2962 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2965 if (Opcode == ARM::PLDi || Opcode == ARM::PLDWi || Opcode == ARM::PLIi) {
2966 unsigned Imm12 = slice(insn, 11, 0);
2967 bool Negative = getUBit(insn) == 0;
2968 int Offset = Negative ? -1 - Imm12 : 1 * Imm12;
2969 MI.addOperand(MCOperand::CreateImm(Offset));
2972 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2975 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
2977 // Inst{6-5} encodes the shift opcode.
2978 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
2979 // Inst{11-7} encodes the imm5 shift amount.
2980 unsigned ShImm = slice(insn, 11, 7);
2982 // A8.4.1. Possible rrx or shift amount of 32...
2983 getImmShiftSE(ShOp, ShImm);
2984 MI.addOperand(MCOperand::CreateImm(
2985 ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp)));
2992 static bool DisassembleMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2993 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2995 if (MemBarrierInstr(insn)) {
2996 // DMBsy, DSBsy, and ISBsy instructions have zero operand and are taken care
2997 // of within the generic ARMBasicMCBuilder::BuildIt() method.
2999 // Inst{3-0} encodes the memory barrier option for the variants.
3000 MI.addOperand(MCOperand::CreateImm(slice(insn, 3, 0)));
3020 // CPS has a singleton $opt operand that contains the following information:
3021 // opt{4-0} = mode from Inst{4-0}
3022 // opt{5} = changemode from Inst{17}
3023 // opt{8-6} = AIF from Inst{8-6}
3024 // opt{10-9} = imod from Inst{19-18} with 0b10 as enable and 0b11 as disable
3025 if (Opcode == ARM::CPS) {
3026 unsigned Option = slice(insn, 4, 0) | slice(insn, 17, 17) << 5 |
3027 slice(insn, 8, 6) << 6 | slice(insn, 19, 18) << 9;
3028 MI.addOperand(MCOperand::CreateImm(Option));
3033 // DBG has its option specified in Inst{3-0}.
3034 if (Opcode == ARM::DBG) {
3035 MI.addOperand(MCOperand::CreateImm(slice(insn, 3, 0)));
3040 // BKPT takes an imm32 val equal to ZeroExtend(Inst{19-8:3-0}).
3041 if (Opcode == ARM::BKPT) {
3042 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 8) << 4 |
3043 slice(insn, 3, 0)));
3048 if (PreLoadOpcode(Opcode))
3049 return DisassemblePreLoadFrm(MI, Opcode, insn, NumOps, NumOpsAdded, B);
3051 assert(0 && "Unexpected misc instruction!");
3055 /// FuncPtrs - FuncPtrs maps ARMFormat to its corresponding DisassembleFP.
3056 /// We divide the disassembly task into different categories, with each one
3057 /// corresponding to a specific instruction encoding format. There could be
3058 /// exceptions when handling a specific format, and that is why the Opcode is
3059 /// also present in the function prototype.
3060 static const DisassembleFP FuncPtrs[] = {
3064 &DisassembleBrMiscFrm,
3066 &DisassembleDPSoRegFrm,
3069 &DisassembleLdMiscFrm,
3070 &DisassembleStMiscFrm,
3071 &DisassembleLdStMulFrm,
3072 &DisassembleLdStExFrm,
3073 &DisassembleArithMiscFrm,
3076 &DisassembleVFPUnaryFrm,
3077 &DisassembleVFPBinaryFrm,
3078 &DisassembleVFPConv1Frm,
3079 &DisassembleVFPConv2Frm,
3080 &DisassembleVFPConv3Frm,
3081 &DisassembleVFPConv4Frm,
3082 &DisassembleVFPConv5Frm,
3083 &DisassembleVFPLdStFrm,
3084 &DisassembleVFPLdStMulFrm,
3085 &DisassembleVFPMiscFrm,
3086 &DisassembleThumbFrm,
3087 &DisassembleMiscFrm,
3088 &DisassembleNGetLnFrm,
3089 &DisassembleNSetLnFrm,
3090 &DisassembleNDupFrm,
3092 // VLD and VST (including one lane) Instructions.
3095 // A7.4.6 One register and a modified immediate value
3096 // 1-Register Instructions with imm.
3097 // LLVM only defines VMOVv instructions.
3098 &DisassembleN1RegModImmFrm,
3100 // 2-Register Instructions with no imm.
3101 &DisassembleN2RegFrm,
3103 // 2-Register Instructions with imm (vector convert float/fixed point).
3104 &DisassembleNVCVTFrm,
3106 // 2-Register Instructions with imm (vector dup lane).
3107 &DisassembleNVecDupLnFrm,
3109 // Vector Shift Left Instructions.
3110 &DisassembleN2RegVecShLFrm,
3112 // Vector Shift Righ Instructions, which has different interpretation of the
3113 // shift amount from the imm6 field.
3114 &DisassembleN2RegVecShRFrm,
3116 // 3-Register Data-Processing Instructions.
3117 &DisassembleN3RegFrm,
3119 // Vector Shift (Register) Instructions.
3120 // D:Vd M:Vm N:Vn (notice that M:Vm is the first operand)
3121 &DisassembleN3RegVecShFrm,
3123 // Vector Extract Instructions.
3124 &DisassembleNVecExtractFrm,
3126 // Vector [Saturating Rounding Doubling] Multiply [Accumulate/Subtract] [Long]
3127 // By Scalar Instructions.
3128 &DisassembleNVecMulScalarFrm,
3130 // Vector Table Lookup uses byte indexes in a control vector to look up byte
3131 // values in a table and generate a new vector.
3132 &DisassembleNVTBLFrm,
3137 /// BuildIt - BuildIt performs the build step for this ARM Basic MC Builder.
3138 /// The general idea is to set the Opcode for the MCInst, followed by adding
3139 /// the appropriate MCOperands to the MCInst. ARM Basic MC Builder delegates
3140 /// to the Format-specific disassemble function for disassembly, followed by
3141 /// TryPredicateAndSBitModifier() to do PredicateOperand and OptionalDefOperand
3142 /// which follow the Dst/Src Operands.
3143 bool ARMBasicMCBuilder::BuildIt(MCInst &MI, uint32_t insn) {
3144 // Stage 1 sets the Opcode.
3145 MI.setOpcode(Opcode);
3146 // If the number of operands is zero, we're done!
3150 // Stage 2 calls the format-specific disassemble function to build the operand
3154 unsigned NumOpsAdded = 0;
3155 bool OK = (*Disasm)(MI, Opcode, insn, NumOps, NumOpsAdded, this);
3157 if (!OK || this->Err != 0) return false;
3158 if (NumOpsAdded >= NumOps)
3161 // Stage 3 deals with operands unaccounted for after stage 2 is finished.
3162 // FIXME: Should this be done selectively?
3163 return TryPredicateAndSBitModifier(MI, Opcode, insn, NumOps - NumOpsAdded);
3166 // A8.3 Conditional execution
3167 // A8.3.1 Pseudocode details of conditional execution
3168 // Condition bits '111x' indicate the instruction is always executed.
3169 static uint32_t CondCode(uint32_t CondField) {
3170 if (CondField == 0xF)
3175 /// DoPredicateOperands - DoPredicateOperands process the predicate operands
3176 /// of some Thumb instructions which come before the reglist operands. It
3177 /// returns true if the two predicate operands have been processed.
3178 bool ARMBasicMCBuilder::DoPredicateOperands(MCInst& MI, unsigned Opcode,
3179 uint32_t /* insn */, unsigned short NumOpsRemaining) {
3181 assert(NumOpsRemaining > 0 && "Invalid argument");
3183 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
3184 unsigned Idx = MI.getNumOperands();
3186 // First, we check whether this instr specifies the PredicateOperand through
3187 // a pair of TargetOperandInfos with isPredicate() property.
3188 if (NumOpsRemaining >= 2 &&
3189 OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
3190 OpInfo[Idx].RegClass < 0 &&
3191 OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
3193 // If we are inside an IT block, get the IT condition bits maintained via
3194 // ARMBasicMCBuilder::ITState[7:0], through ARMBasicMCBuilder::GetITCond().
3197 MI.addOperand(MCOperand::CreateImm(GetITCond()));
3199 MI.addOperand(MCOperand::CreateImm(ARMCC::AL));
3200 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
3207 /// TryPredicateAndSBitModifier - TryPredicateAndSBitModifier tries to process
3208 /// the possible Predicate and SBitModifier, to build the remaining MCOperand
3210 bool ARMBasicMCBuilder::TryPredicateAndSBitModifier(MCInst& MI, unsigned Opcode,
3211 uint32_t insn, unsigned short NumOpsRemaining) {
3213 assert(NumOpsRemaining > 0 && "Invalid argument");
3215 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
3216 const std::string &Name = ARMInsts[Opcode].Name;
3217 unsigned Idx = MI.getNumOperands();
3219 // First, we check whether this instr specifies the PredicateOperand through
3220 // a pair of TargetOperandInfos with isPredicate() property.
3221 if (NumOpsRemaining >= 2 &&
3222 OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
3223 OpInfo[Idx].RegClass < 0 &&
3224 OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
3226 // If we are inside an IT block, get the IT condition bits maintained via
3227 // ARMBasicMCBuilder::ITState[7:0], through ARMBasicMCBuilder::GetITCond().
3230 MI.addOperand(MCOperand::CreateImm(GetITCond()));
3232 if (Name.length() > 1 && Name[0] == 't') {
3233 // Thumb conditional branch instructions have their cond field embedded,
3237 if (Name == "t2Bcc")
3238 MI.addOperand(MCOperand::CreateImm(CondCode(slice(insn, 25, 22))));
3239 else if (Name == "tBcc")
3240 MI.addOperand(MCOperand::CreateImm(CondCode(slice(insn, 11, 8))));
3242 MI.addOperand(MCOperand::CreateImm(ARMCC::AL));
3244 // ARM instructions get their condition field from Inst{31-28}.
3245 MI.addOperand(MCOperand::CreateImm(CondCode(getCondField(insn))));
3248 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
3250 NumOpsRemaining -= 2;
3253 if (NumOpsRemaining == 0)
3256 // Next, if OptionalDefOperand exists, we check whether the 'S' bit is set.
3257 if (OpInfo[Idx].isOptionalDef() && OpInfo[Idx].RegClass==ARM::CCRRegClassID) {
3258 MI.addOperand(MCOperand::CreateReg(getSBit(insn) == 1 ? ARM::CPSR : 0));
3262 if (NumOpsRemaining == 0)
3268 /// RunBuildAfterHook - RunBuildAfterHook performs operations deemed necessary
3269 /// after BuildIt is finished.
3270 bool ARMBasicMCBuilder::RunBuildAfterHook(bool Status, MCInst &MI,
3273 if (!SP) return Status;
3275 if (Opcode == ARM::t2IT)
3276 Status = SP->InitIT(slice(insn, 7, 0)) ? Status : false;
3277 else if (InITBlock())
3283 /// Opcode, Format, and NumOperands make up an ARM Basic MCBuilder.
3284 ARMBasicMCBuilder::ARMBasicMCBuilder(unsigned opc, ARMFormat format,
3286 : Opcode(opc), Format(format), NumOps(num), SP(0), Err(0) {
3287 unsigned Idx = (unsigned)format;
3288 assert(Idx < (array_lengthof(FuncPtrs) - 1) && "Unknown format");
3289 Disasm = FuncPtrs[Idx];
3292 /// CreateMCBuilder - Return an ARMBasicMCBuilder that can build up the MC
3293 /// infrastructure of an MCInst given the Opcode and Format of the instr.
3294 /// Return NULL if it fails to create/return a proper builder. API clients
3295 /// are responsible for freeing up of the allocated memory. Cacheing can be
3296 /// performed by the API clients to improve performance.
3297 ARMBasicMCBuilder *llvm::CreateMCBuilder(unsigned Opcode, ARMFormat Format) {
3298 // For "Unknown format", fail by returning a NULL pointer.
3299 if ((unsigned)Format >= (array_lengthof(FuncPtrs) - 1)) {
3300 DEBUG(errs() << "Unknown format\n");
3304 return new ARMBasicMCBuilder(Opcode, Format,
3305 ARMInsts[Opcode].getNumOperands());