1 //===- ARMDisassemblerCore.cpp - ARM disassembler helpers -------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is part of the ARM Disassembler.
11 // It contains code to represent the core concepts of Builder and DisassembleFP
12 // to solve the problem of disassembling an ARM instr.
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "arm-disassembler"
18 #include "ARMDisassemblerCore.h"
19 #include "ARMAddressingModes.h"
20 #include "llvm/Support/Debug.h"
21 #include "llvm/Support/raw_ostream.h"
23 /// ARMGenInstrInfo.inc - ARMGenInstrInfo.inc contains the static const
24 /// TargetInstrDesc ARMInsts[] definition and the TargetOperandInfo[]'s
25 /// describing the operand info for each ARMInsts[i].
27 /// Together with an instruction's encoding format, we can take advantage of the
28 /// NumOperands and the OpInfo fields of the target instruction description in
29 /// the quest to build out the MCOperand list for an MCInst.
31 /// The general guideline is that with a known format, the number of dst and src
32 /// operands are well-known. The dst is built first, followed by the src
33 /// operand(s). The operands not yet used at this point are for the Implicit
34 /// Uses and Defs by this instr. For the Uses part, the pred:$p operand is
35 /// defined with two components:
37 /// def pred { // Operand PredicateOperand
38 /// ValueType Type = OtherVT;
39 /// string PrintMethod = "printPredicateOperand";
40 /// string AsmOperandLowerMethod = ?;
41 /// dag MIOperandInfo = (ops i32imm, CCR);
42 /// AsmOperandClass ParserMatchClass = ImmAsmOperand;
43 /// dag DefaultOps = (ops (i32 14), (i32 zero_reg));
46 /// which is manifested by the TargetOperandInfo[] of:
48 /// { 0, 0|(1<<TOI::Predicate), 0 },
49 /// { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }
51 /// So the first predicate MCOperand corresponds to the immediate part of the
52 /// ARM condition field (Inst{31-28}), and the second predicate MCOperand
53 /// corresponds to a register kind of ARM::CPSR.
55 /// For the Defs part, in the simple case of only cc_out:$s, we have:
57 /// def cc_out { // Operand OptionalDefOperand
58 /// ValueType Type = OtherVT;
59 /// string PrintMethod = "printSBitModifierOperand";
60 /// string AsmOperandLowerMethod = ?;
61 /// dag MIOperandInfo = (ops CCR);
62 /// AsmOperandClass ParserMatchClass = ImmAsmOperand;
63 /// dag DefaultOps = (ops (i32 zero_reg));
66 /// which is manifested by the one TargetOperandInfo of:
68 /// { ARM::CCRRegClassID, 0|(1<<TOI::OptionalDef), 0 }
70 /// And this maps to one MCOperand with the regsiter kind of ARM::CPSR.
71 #include "ARMGenInstrInfo.inc"
75 const char *ARMUtils::OpcodeName(unsigned Opcode) {
76 return ARMInsts[Opcode].Name;
79 // Return the register enum Based on RegClass and the raw register number.
80 // For DRegPair, see comments below.
82 static unsigned getRegisterEnum(BO B, unsigned RegClassID, unsigned RawRegister,
83 bool DRegPair = false) {
85 if (DRegPair && RegClassID == ARM::QPRRegClassID) {
86 // LLVM expects { Dd, Dd+1 } to form a super register; this is not specified
87 // in the ARM Architecture Manual as far as I understand it (A8.6.307).
88 // Therefore, we morph the RegClassID to be the sub register class and don't
89 // subsequently transform the RawRegister encoding when calculating RegNum.
91 // See also ARMinstPrinter::printOperand() wrt "dregpair" modifier part
92 // where this workaround is meant for.
93 RegClassID = ARM::DPRRegClassID;
96 // For this purpose, we can treat rGPR as if it were GPR.
97 if (RegClassID == ARM::rGPRRegClassID) RegClassID = ARM::GPRRegClassID;
99 // See also decodeNEONRd(), decodeNEONRn(), decodeNEONRm().
101 RegClassID == ARM::QPRRegClassID ? RawRegister >> 1 : RawRegister;
107 switch (RegClassID) {
108 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R0;
109 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
110 case ARM::DPR_VFP2RegClassID:
112 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
113 case ARM::QPR_VFP2RegClassID:
115 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S0;
119 switch (RegClassID) {
120 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R1;
121 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
122 case ARM::DPR_VFP2RegClassID:
124 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
125 case ARM::QPR_VFP2RegClassID:
127 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S1;
131 switch (RegClassID) {
132 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R2;
133 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
134 case ARM::DPR_VFP2RegClassID:
136 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
137 case ARM::QPR_VFP2RegClassID:
139 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S2;
143 switch (RegClassID) {
144 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R3;
145 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
146 case ARM::DPR_VFP2RegClassID:
148 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
149 case ARM::QPR_VFP2RegClassID:
151 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S3;
155 switch (RegClassID) {
156 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R4;
157 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
158 case ARM::DPR_VFP2RegClassID:
160 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q4;
161 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S4;
165 switch (RegClassID) {
166 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R5;
167 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
168 case ARM::DPR_VFP2RegClassID:
170 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q5;
171 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S5;
175 switch (RegClassID) {
176 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R6;
177 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
178 case ARM::DPR_VFP2RegClassID:
180 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q6;
181 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S6;
185 switch (RegClassID) {
186 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R7;
187 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
188 case ARM::DPR_VFP2RegClassID:
190 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q7;
191 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S7;
195 switch (RegClassID) {
196 case ARM::GPRRegClassID: return ARM::R8;
197 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D8;
198 case ARM::QPRRegClassID: return ARM::Q8;
199 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S8;
203 switch (RegClassID) {
204 case ARM::GPRRegClassID: return ARM::R9;
205 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D9;
206 case ARM::QPRRegClassID: return ARM::Q9;
207 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S9;
211 switch (RegClassID) {
212 case ARM::GPRRegClassID: return ARM::R10;
213 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D10;
214 case ARM::QPRRegClassID: return ARM::Q10;
215 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S10;
219 switch (RegClassID) {
220 case ARM::GPRRegClassID: return ARM::R11;
221 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D11;
222 case ARM::QPRRegClassID: return ARM::Q11;
223 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S11;
227 switch (RegClassID) {
228 case ARM::GPRRegClassID: return ARM::R12;
229 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D12;
230 case ARM::QPRRegClassID: return ARM::Q12;
231 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S12;
235 switch (RegClassID) {
236 case ARM::GPRRegClassID: return ARM::SP;
237 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D13;
238 case ARM::QPRRegClassID: return ARM::Q13;
239 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S13;
243 switch (RegClassID) {
244 case ARM::GPRRegClassID: return ARM::LR;
245 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D14;
246 case ARM::QPRRegClassID: return ARM::Q14;
247 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S14;
251 switch (RegClassID) {
252 case ARM::GPRRegClassID: return ARM::PC;
253 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D15;
254 case ARM::QPRRegClassID: return ARM::Q15;
255 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S15;
259 switch (RegClassID) {
260 case ARM::DPRRegClassID: return ARM::D16;
261 case ARM::SPRRegClassID: return ARM::S16;
265 switch (RegClassID) {
266 case ARM::DPRRegClassID: return ARM::D17;
267 case ARM::SPRRegClassID: return ARM::S17;
271 switch (RegClassID) {
272 case ARM::DPRRegClassID: return ARM::D18;
273 case ARM::SPRRegClassID: return ARM::S18;
277 switch (RegClassID) {
278 case ARM::DPRRegClassID: return ARM::D19;
279 case ARM::SPRRegClassID: return ARM::S19;
283 switch (RegClassID) {
284 case ARM::DPRRegClassID: return ARM::D20;
285 case ARM::SPRRegClassID: return ARM::S20;
289 switch (RegClassID) {
290 case ARM::DPRRegClassID: return ARM::D21;
291 case ARM::SPRRegClassID: return ARM::S21;
295 switch (RegClassID) {
296 case ARM::DPRRegClassID: return ARM::D22;
297 case ARM::SPRRegClassID: return ARM::S22;
301 switch (RegClassID) {
302 case ARM::DPRRegClassID: return ARM::D23;
303 case ARM::SPRRegClassID: return ARM::S23;
307 switch (RegClassID) {
308 case ARM::DPRRegClassID: return ARM::D24;
309 case ARM::SPRRegClassID: return ARM::S24;
313 switch (RegClassID) {
314 case ARM::DPRRegClassID: return ARM::D25;
315 case ARM::SPRRegClassID: return ARM::S25;
319 switch (RegClassID) {
320 case ARM::DPRRegClassID: return ARM::D26;
321 case ARM::SPRRegClassID: return ARM::S26;
325 switch (RegClassID) {
326 case ARM::DPRRegClassID: return ARM::D27;
327 case ARM::SPRRegClassID: return ARM::S27;
331 switch (RegClassID) {
332 case ARM::DPRRegClassID: return ARM::D28;
333 case ARM::SPRRegClassID: return ARM::S28;
337 switch (RegClassID) {
338 case ARM::DPRRegClassID: return ARM::D29;
339 case ARM::SPRRegClassID: return ARM::S29;
343 switch (RegClassID) {
344 case ARM::DPRRegClassID: return ARM::D30;
345 case ARM::SPRRegClassID: return ARM::S30;
349 switch (RegClassID) {
350 case ARM::DPRRegClassID: return ARM::D31;
351 case ARM::SPRRegClassID: return ARM::S31;
355 DEBUG(errs() << "Invalid (RegClassID, RawRegister) combination\n");
356 // Encoding error. Mark the builder with error code != 0.
361 ///////////////////////////////
363 // Utility Functions //
365 ///////////////////////////////
367 // Extract/Decode Rd: Inst{15-12}.
368 static inline unsigned decodeRd(uint32_t insn) {
369 return (insn >> ARMII::RegRdShift) & ARMII::GPRRegMask;
372 // Extract/Decode Rn: Inst{19-16}.
373 static inline unsigned decodeRn(uint32_t insn) {
374 return (insn >> ARMII::RegRnShift) & ARMII::GPRRegMask;
377 // Extract/Decode Rm: Inst{3-0}.
378 static inline unsigned decodeRm(uint32_t insn) {
379 return (insn & ARMII::GPRRegMask);
382 // Extract/Decode Rs: Inst{11-8}.
383 static inline unsigned decodeRs(uint32_t insn) {
384 return (insn >> ARMII::RegRsShift) & ARMII::GPRRegMask;
387 static inline unsigned getCondField(uint32_t insn) {
388 return (insn >> ARMII::CondShift);
391 static inline unsigned getIBit(uint32_t insn) {
392 return (insn >> ARMII::I_BitShift) & 1;
395 static inline unsigned getAM3IBit(uint32_t insn) {
396 return (insn >> ARMII::AM3_I_BitShift) & 1;
399 static inline unsigned getPBit(uint32_t insn) {
400 return (insn >> ARMII::P_BitShift) & 1;
403 static inline unsigned getUBit(uint32_t insn) {
404 return (insn >> ARMII::U_BitShift) & 1;
407 static inline unsigned getPUBits(uint32_t insn) {
408 return (insn >> ARMII::U_BitShift) & 3;
411 static inline unsigned getSBit(uint32_t insn) {
412 return (insn >> ARMII::S_BitShift) & 1;
415 static inline unsigned getWBit(uint32_t insn) {
416 return (insn >> ARMII::W_BitShift) & 1;
419 static inline unsigned getDBit(uint32_t insn) {
420 return (insn >> ARMII::D_BitShift) & 1;
423 static inline unsigned getNBit(uint32_t insn) {
424 return (insn >> ARMII::N_BitShift) & 1;
427 static inline unsigned getMBit(uint32_t insn) {
428 return (insn >> ARMII::M_BitShift) & 1;
431 // See A8.4 Shifts applied to a register.
432 // A8.4.2 Register controlled shifts.
434 // getShiftOpcForBits - getShiftOpcForBits translates from the ARM encoding bits
435 // into llvm enums for shift opcode. The API clients should pass in the value
436 // encoded with two bits, so the assert stays to signal a wrong API usage.
438 // A8-12: DecodeRegShift()
439 static inline ARM_AM::ShiftOpc getShiftOpcForBits(unsigned bits) {
441 default: assert(0 && "No such value"); return ARM_AM::no_shift;
442 case 0: return ARM_AM::lsl;
443 case 1: return ARM_AM::lsr;
444 case 2: return ARM_AM::asr;
445 case 3: return ARM_AM::ror;
449 // See A8.4 Shifts applied to a register.
450 // A8.4.1 Constant shifts.
452 // getImmShiftSE - getImmShiftSE translates from the raw ShiftOpc and raw Imm5
453 // encodings into the intended ShiftOpc and shift amount.
455 // A8-11: DecodeImmShift()
456 static inline void getImmShiftSE(ARM_AM::ShiftOpc &ShOp, unsigned &ShImm) {
457 // If type == 0b11 and imm5 == 0, we have an rrx, instead.
458 if (ShOp == ARM_AM::ror && ShImm == 0)
460 // If (lsr or asr) and imm5 == 0, shift amount is 32.
461 if ((ShOp == ARM_AM::lsr || ShOp == ARM_AM::asr) && ShImm == 0)
465 // getAMSubModeForBits - getAMSubModeForBits translates from the ARM encoding
466 // bits Inst{24-23} (P(24) and U(23)) into llvm enums for AMSubMode. The API
467 // clients should pass in the value encoded with two bits, so the assert stays
468 // to signal a wrong API usage.
469 static inline ARM_AM::AMSubMode getAMSubModeForBits(unsigned bits) {
471 default: assert(0 && "No such value"); return ARM_AM::bad_am_submode;
472 case 1: return ARM_AM::ia; // P=0 U=1
473 case 3: return ARM_AM::ib; // P=1 U=1
474 case 0: return ARM_AM::da; // P=0 U=0
475 case 2: return ARM_AM::db; // P=1 U=0
479 ////////////////////////////////////////////
481 // Disassemble function definitions //
483 ////////////////////////////////////////////
485 /// There is a separate Disassemble*Frm function entry for disassembly of an ARM
486 /// instr into a list of MCOperands in the appropriate order, with possible dst,
487 /// followed by possible src(s).
489 /// The processing of the predicate, and the 'S' modifier bit, if MI modifies
490 /// the CPSR, is factored into ARMBasicMCBuilder's method named
491 /// TryPredicateAndSBitModifier.
493 static bool DisassemblePseudo(MCInst &MI, unsigned Opcode, uint32_t insn,
494 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
496 if (Opcode == ARM::DMBsy || Opcode == ARM::DSBsy)
499 assert(0 && "Unexpected pseudo instruction!");
503 // Multiply Instructions.
504 // MLA, MLS, SMLABB, SMLABT, SMLATB, SMLATT, SMLAWB, SMLAWT, SMMLA, SMMLS:
505 // Rd{19-16} Rn{3-0} Rm{11-8} Ra{15-12}
507 // MUL, SMMUL, SMULBB, SMULBT, SMULTB, SMULTT, SMULWB, SMULWT:
508 // Rd{19-16} Rn{3-0} Rm{11-8}
510 // SMLAL, SMULL, UMAAL, UMLAL, UMULL, SMLALBB, SMLALBT, SMLALTB, SMLALTT:
511 // RdLo{15-12} RdHi{19-16} Rn{3-0} Rm{11-8}
513 // The mapping of the multiply registers to the "regular" ARM registers, where
514 // there are convenience decoder functions, is:
520 static bool DisassembleMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
521 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
523 const TargetInstrDesc &TID = ARMInsts[Opcode];
524 unsigned short NumDefs = TID.getNumDefs();
525 const TargetOperandInfo *OpInfo = TID.OpInfo;
526 unsigned &OpIdx = NumOpsAdded;
530 assert(NumDefs > 0 && "NumDefs should be greater than 0 for MulFrm");
532 && OpInfo[0].RegClass == ARM::GPRRegClassID
533 && OpInfo[1].RegClass == ARM::GPRRegClassID
534 && OpInfo[2].RegClass == ARM::GPRRegClassID
535 && "Expect three register operands");
537 // Instructions with two destination registers have RdLo{15-12} first.
539 assert(NumOps >= 4 && OpInfo[3].RegClass == ARM::GPRRegClassID &&
540 "Expect 4th register operand");
541 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
546 // The destination register: RdHi{19-16} or Rd{19-16}.
547 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
550 // The two src regsiters: Rn{3-0}, then Rm{11-8}.
551 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
553 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
557 // Many multiply instructions (e.g., MLA) have three src registers.
558 // The third register operand is Ra{15-12}.
559 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) {
560 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
568 // Helper routines for disassembly of coprocessor instructions.
570 static bool LdStCopOpcode(unsigned Opcode) {
571 if ((Opcode >= ARM::LDC2L_OFFSET && Opcode <= ARM::LDC_PRE) ||
572 (Opcode >= ARM::STC2L_OFFSET && Opcode <= ARM::STC_PRE))
576 static bool CoprocessorOpcode(unsigned Opcode) {
577 if (LdStCopOpcode(Opcode))
583 case ARM::CDP: case ARM::CDP2:
584 case ARM::MCR: case ARM::MCR2: case ARM::MRC: case ARM::MRC2:
585 case ARM::MCRR: case ARM::MCRR2: case ARM::MRRC: case ARM::MRRC2:
589 static inline unsigned GetCoprocessor(uint32_t insn) {
590 return slice(insn, 11, 8);
592 static inline unsigned GetCopOpc1(uint32_t insn, bool CDP) {
593 return CDP ? slice(insn, 23, 20) : slice(insn, 23, 21);
595 static inline unsigned GetCopOpc2(uint32_t insn) {
596 return slice(insn, 7, 5);
598 static inline unsigned GetCopOpc(uint32_t insn) {
599 return slice(insn, 7, 4);
601 // Most of the operands are in immediate forms, except Rd and Rn, which are ARM
604 // CDP, CDP2: cop opc1 CRd CRn CRm opc2
606 // MCR, MCR2, MRC, MRC2: cop opc1 Rd CRn CRm opc2
608 // MCRR, MCRR2, MRRC, MRRc2: cop opc Rd Rn CRm
610 // LDC_OFFSET, LDC_PRE, LDC_POST: cop CRd Rn R0 [+/-]imm8:00
612 // STC_OFFSET, STC_PRE, STC_POST: cop CRd Rn R0 [+/-]imm8:00
616 // LDC_OPTION: cop CRd Rn imm8
618 // STC_OPTION: cop CRd Rn imm8
621 static bool DisassembleCoprocessor(MCInst &MI, unsigned Opcode, uint32_t insn,
622 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
624 assert(NumOps >= 5 && "Num of operands >= 5 for coprocessor instr");
626 unsigned &OpIdx = NumOpsAdded;
627 bool OneCopOpc = (Opcode == ARM::MCRR || Opcode == ARM::MCRR2 ||
628 Opcode == ARM::MRRC || Opcode == ARM::MRRC2);
629 // CDP/CDP2 has no GPR operand; the opc1 operand is also wider (Inst{23-20}).
630 bool NoGPR = (Opcode == ARM::CDP || Opcode == ARM::CDP2);
631 bool LdStCop = LdStCopOpcode(Opcode);
635 MI.addOperand(MCOperand::CreateImm(GetCoprocessor(insn)));
638 // Unindex if P:W = 0b00 --> _OPTION variant
639 unsigned PW = getPBit(insn) << 1 | getWBit(insn);
641 MI.addOperand(MCOperand::CreateImm(decodeRd(insn)));
643 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
647 MI.addOperand(MCOperand::CreateReg(0));
648 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
649 unsigned Offset = ARM_AM::getAM2Opc(AddrOpcode, slice(insn, 7, 0) << 2,
651 MI.addOperand(MCOperand::CreateImm(Offset));
654 MI.addOperand(MCOperand::CreateImm(slice(insn, 7, 0)));
658 MI.addOperand(MCOperand::CreateImm(OneCopOpc ? GetCopOpc(insn)
659 : GetCopOpc1(insn, NoGPR)));
661 MI.addOperand(NoGPR ? MCOperand::CreateImm(decodeRd(insn))
662 : MCOperand::CreateReg(
663 getRegisterEnum(B, ARM::GPRRegClassID,
666 MI.addOperand(OneCopOpc ? MCOperand::CreateReg(
667 getRegisterEnum(B, ARM::GPRRegClassID,
669 : MCOperand::CreateImm(decodeRn(insn)));
671 MI.addOperand(MCOperand::CreateImm(decodeRm(insn)));
676 MI.addOperand(MCOperand::CreateImm(GetCopOpc2(insn)));
684 // Branch Instructions.
685 // BLr9: SignExtend(Imm24:'00', 32)
686 // Bcc, BLr9_pred: SignExtend(Imm24:'00', 32) Pred0 Pred1
687 // SMC: ZeroExtend(imm4, 32)
688 // SVC: ZeroExtend(Imm24, 32)
690 // Various coprocessor instructions are assigned BrFrm arbitrarily.
691 // Delegates to DisassembleCoprocessor() helper function.
694 // MSR/MSRsys: Rm mask=Inst{19-16}
696 // MSRi/MSRsysi: so_imm
697 // SRSW/SRS: addrmode4:$addr mode_imm
698 // RFEW/RFE: addrmode4:$addr Rn
699 static bool DisassembleBrFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
700 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
702 if (CoprocessorOpcode(Opcode))
703 return DisassembleCoprocessor(MI, Opcode, insn, NumOps, NumOpsAdded, B);
705 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
706 if (!OpInfo) return false;
708 // MRS and MRSsys take one GPR reg Rd.
709 if (Opcode == ARM::MRS || Opcode == ARM::MRSsys) {
710 assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
711 "Reg operand expected");
712 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
717 // BXJ takes one GPR reg Rm.
718 if (Opcode == ARM::BXJ) {
719 assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
720 "Reg operand expected");
721 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
726 // MSR and MSRsys take one GPR reg Rm, followed by the mask.
727 if (Opcode == ARM::MSR || Opcode == ARM::MSRsys) {
728 assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
729 "Reg operand expected");
730 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
732 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 16)));
736 // MSRi and MSRsysi take one so_imm operand, followed by the mask.
737 if (Opcode == ARM::MSRi || Opcode == ARM::MSRsysi) {
738 // SOImm is 4-bit rotate amount in bits 11-8 with 8-bit imm in bits 7-0.
739 // A5.2.4 Rotate amount is twice the numeric value of Inst{11-8}.
740 // See also ARMAddressingModes.h: getSOImmValImm() and getSOImmValRot().
741 unsigned Rot = (insn >> ARMII::SoRotImmShift) & 0xF;
742 unsigned Imm = insn & 0xFF;
743 MI.addOperand(MCOperand::CreateImm(ARM_AM::rotr32(Imm, 2*Rot)));
744 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 16)));
748 // SRSW and SRS requires addrmode4:$addr for ${addr:submode}, followed by the
749 // mode immediate (Inst{4-0}).
750 if (Opcode == ARM::SRSW || Opcode == ARM::SRS ||
751 Opcode == ARM::RFEW || Opcode == ARM::RFE) {
752 // ARMInstPrinter::printAddrMode4Operand() prints special mode string
753 // if the base register is SP; so don't set ARM::SP.
754 MI.addOperand(MCOperand::CreateReg(0));
755 ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
756 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(SubMode)));
758 if (Opcode == ARM::SRSW || Opcode == ARM::SRS)
759 MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0)));
761 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
767 assert((Opcode == ARM::Bcc || Opcode == ARM::BLr9 || Opcode == ARM::BLr9_pred
768 || Opcode == ARM::SMC || Opcode == ARM::SVC) &&
769 "Unexpected Opcode");
771 assert(NumOps >= 1 && OpInfo[0].RegClass < 0 && "Reg operand expected");
774 if (Opcode == ARM::SMC) {
775 // ZeroExtend(imm4, 32) where imm24 = Inst{3-0}.
776 Imm32 = slice(insn, 3, 0);
777 } else if (Opcode == ARM::SVC) {
778 // ZeroExtend(imm24, 32) where imm24 = Inst{23-0}.
779 Imm32 = slice(insn, 23, 0);
781 // SignExtend(imm24:'00', 32) where imm24 = Inst{23-0}.
782 unsigned Imm26 = slice(insn, 23, 0) << 2;
783 //Imm32 = signextend<signed int, 26>(Imm26);
784 Imm32 = SignExtend32<26>(Imm26);
786 // When executing an ARM instruction, PC reads as the address of the current
787 // instruction plus 8. The assembler subtracts 8 from the difference
788 // between the branch instruction and the target address, disassembler has
789 // to add 8 to compensate.
793 MI.addOperand(MCOperand::CreateImm(Imm32));
799 // Misc. Branch Instructions.
800 // BR_JTadd, BR_JTr, BR_JTm
803 static bool DisassembleBrMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
804 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
806 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
807 if (!OpInfo) return false;
809 unsigned &OpIdx = NumOpsAdded;
813 // BX_RET has only two predicate operands, do an early return.
814 if (Opcode == ARM::BX_RET)
817 // BLXr9 and BRIND take one GPR reg.
818 if (Opcode == ARM::BLXr9 || Opcode == ARM::BRIND) {
819 assert(NumOps >= 1 && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
820 "Reg operand expected");
821 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
827 // BR_JTadd is an ADD with Rd = PC, (Rn, Rm) as the target and index regs.
828 if (Opcode == ARM::BR_JTadd) {
829 // InOperandList with GPR:$target and GPR:$idx regs.
831 assert(NumOps == 4 && "Expect 4 operands");
832 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
834 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
837 // Fill in the two remaining imm operands to signify build completion.
838 MI.addOperand(MCOperand::CreateImm(0));
839 MI.addOperand(MCOperand::CreateImm(0));
845 // BR_JTr is a MOV with Rd = PC, and Rm as the source register.
846 if (Opcode == ARM::BR_JTr) {
847 // InOperandList with GPR::$target reg.
849 assert(NumOps == 3 && "Expect 3 operands");
850 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
853 // Fill in the two remaining imm operands to signify build completion.
854 MI.addOperand(MCOperand::CreateImm(0));
855 MI.addOperand(MCOperand::CreateImm(0));
861 // BR_JTm is an LDR with Rt = PC.
862 if (Opcode == ARM::BR_JTm) {
863 // This is the reg/reg form, with base reg followed by +/- reg shop imm.
864 // See also ARMAddressingModes.h (Addressing Mode #2).
866 assert(NumOps == 5 && getIBit(insn) == 1 && "Expect 5 operands && I-bit=1");
867 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
870 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
872 // Disassemble the offset reg (Rm), shift type, and immediate shift length.
873 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
875 // Inst{6-5} encodes the shift opcode.
876 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
877 // Inst{11-7} encodes the imm5 shift amount.
878 unsigned ShImm = slice(insn, 11, 7);
880 // A8.4.1. Possible rrx or shift amount of 32...
881 getImmShiftSE(ShOp, ShImm);
882 MI.addOperand(MCOperand::CreateImm(
883 ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp)));
885 // Fill in the two remaining imm operands to signify build completion.
886 MI.addOperand(MCOperand::CreateImm(0));
887 MI.addOperand(MCOperand::CreateImm(0));
896 static inline bool getBFCInvMask(uint32_t insn, uint32_t &mask) {
897 uint32_t lsb = slice(insn, 11, 7);
898 uint32_t msb = slice(insn, 20, 16);
901 DEBUG(errs() << "Encoding error: msb < lsb\n");
905 for (uint32_t i = lsb; i <= msb; ++i)
911 // A major complication is the fact that some of the saturating add/subtract
912 // operations have Rd Rm Rn, instead of the "normal" Rd Rn Rm.
913 // They are QADD, QDADD, QDSUB, and QSUB.
914 static bool DisassembleDPFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
915 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
917 const TargetInstrDesc &TID = ARMInsts[Opcode];
918 unsigned short NumDefs = TID.getNumDefs();
919 bool isUnary = isUnaryDP(TID.TSFlags);
920 const TargetOperandInfo *OpInfo = TID.OpInfo;
921 unsigned &OpIdx = NumOpsAdded;
925 // Disassemble register def if there is one.
926 if (NumDefs && (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID)) {
927 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
932 // Now disassemble the src operands.
936 // Special-case handling of BFC/BFI/SBFX/UBFX.
937 if (Opcode == ARM::BFC || Opcode == ARM::BFI) {
938 MI.addOperand(MCOperand::CreateReg(0));
939 if (Opcode == ARM::BFI) {
940 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
945 if (!getBFCInvMask(insn, mask))
948 MI.addOperand(MCOperand::CreateImm(mask));
952 if (Opcode == ARM::SBFX || Opcode == ARM::UBFX) {
953 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
955 MI.addOperand(MCOperand::CreateImm(slice(insn, 11, 7)));
956 MI.addOperand(MCOperand::CreateImm(slice(insn, 20, 16) + 1));
961 bool RmRn = (Opcode == ARM::QADD || Opcode == ARM::QDADD ||
962 Opcode == ARM::QDSUB || Opcode == ARM::QSUB);
964 // BinaryDP has an Rn operand.
966 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
967 "Reg operand expected");
968 MI.addOperand(MCOperand::CreateReg(
969 getRegisterEnum(B, ARM::GPRRegClassID,
970 RmRn ? decodeRm(insn) : decodeRn(insn))));
974 // If this is a two-address operand, skip it, e.g., MOVCCr operand 1.
975 if (isUnary && (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)) {
976 MI.addOperand(MCOperand::CreateReg(0));
980 // Now disassemble operand 2.
984 if (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) {
985 // We have a reg/reg form.
986 // Assert disabled because saturating operations, e.g., A8.6.127 QASX, are
987 // routed here as well.
988 // assert(getIBit(insn) == 0 && "I_Bit != '0' reg/reg form");
989 MI.addOperand(MCOperand::CreateReg(
990 getRegisterEnum(B, ARM::GPRRegClassID,
991 RmRn? decodeRn(insn) : decodeRm(insn))));
993 } else if (Opcode == ARM::MOVi16 || Opcode == ARM::MOVTi16) {
994 // We have an imm16 = imm4:imm12 (imm4=Inst{19:16}, imm12 = Inst{11:0}).
995 assert(getIBit(insn) == 1 && "I_Bit != '1' reg/imm form");
996 unsigned Imm16 = slice(insn, 19, 16) << 12 | slice(insn, 11, 0);
997 MI.addOperand(MCOperand::CreateImm(Imm16));
1000 // We have a reg/imm form.
1001 // SOImm is 4-bit rotate amount in bits 11-8 with 8-bit imm in bits 7-0.
1002 // A5.2.4 Rotate amount is twice the numeric value of Inst{11-8}.
1003 // See also ARMAddressingModes.h: getSOImmValImm() and getSOImmValRot().
1004 assert(getIBit(insn) == 1 && "I_Bit != '1' reg/imm form");
1005 unsigned Rot = (insn >> ARMII::SoRotImmShift) & 0xF;
1006 unsigned Imm = insn & 0xFF;
1007 MI.addOperand(MCOperand::CreateImm(ARM_AM::rotr32(Imm, 2*Rot)));
1014 static bool DisassembleDPSoRegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1015 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1017 const TargetInstrDesc &TID = ARMInsts[Opcode];
1018 unsigned short NumDefs = TID.getNumDefs();
1019 bool isUnary = isUnaryDP(TID.TSFlags);
1020 const TargetOperandInfo *OpInfo = TID.OpInfo;
1021 unsigned &OpIdx = NumOpsAdded;
1025 // Disassemble register def if there is one.
1026 if (NumDefs && (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID)) {
1027 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1032 // Disassemble the src operands.
1033 if (OpIdx >= NumOps)
1036 // BinaryDP has an Rn operand.
1038 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1039 "Reg operand expected");
1040 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1045 // If this is a two-address operand, skip it, e.g., MOVCCs operand 1.
1046 if (isUnary && (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)) {
1047 MI.addOperand(MCOperand::CreateReg(0));
1051 // Disassemble operand 2, which consists of three components.
1052 if (OpIdx + 2 >= NumOps)
1055 assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
1056 (OpInfo[OpIdx+1].RegClass == ARM::GPRRegClassID) &&
1057 (OpInfo[OpIdx+2].RegClass < 0) &&
1058 "Expect 3 reg operands");
1060 // Register-controlled shifts have Inst{7} = 0 and Inst{4} = 1.
1061 unsigned Rs = slice(insn, 4, 4);
1063 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1066 // Register-controlled shifts: [Rm, Rs, shift].
1067 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1069 // Inst{6-5} encodes the shift opcode.
1070 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1071 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(ShOp, 0)));
1073 // Constant shifts: [Rm, reg0, shift_imm].
1074 MI.addOperand(MCOperand::CreateReg(0)); // NoRegister
1075 // Inst{6-5} encodes the shift opcode.
1076 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1077 // Inst{11-7} encodes the imm5 shift amount.
1078 unsigned ShImm = slice(insn, 11, 7);
1080 // A8.4.1. Possible rrx or shift amount of 32...
1081 getImmShiftSE(ShOp, ShImm);
1082 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(ShOp, ShImm)));
1089 static bool DisassembleLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1090 unsigned short NumOps, unsigned &NumOpsAdded, bool isStore, BO B) {
1092 const TargetInstrDesc &TID = ARMInsts[Opcode];
1093 bool isPrePost = isPrePostLdSt(TID.TSFlags);
1094 const TargetOperandInfo *OpInfo = TID.OpInfo;
1095 if (!OpInfo) return false;
1097 unsigned &OpIdx = NumOpsAdded;
1101 assert(((!isStore && TID.getNumDefs() > 0) ||
1102 (isStore && (TID.getNumDefs() == 0 || isPrePost)))
1103 && "Invalid arguments");
1105 // Operand 0 of a pre- and post-indexed store is the address base writeback.
1106 if (isPrePost && isStore) {
1107 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1108 "Reg operand expected");
1109 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1114 // Disassemble the dst/src operand.
1115 if (OpIdx >= NumOps)
1118 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1119 "Reg operand expected");
1120 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1124 // After dst of a pre- and post-indexed load is the address base writeback.
1125 if (isPrePost && !isStore) {
1126 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1127 "Reg operand expected");
1128 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1133 // Disassemble the base operand.
1134 if (OpIdx >= NumOps)
1137 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1138 "Reg operand expected");
1139 assert((!isPrePost || (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1))
1140 && "Index mode or tied_to operand expected");
1141 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1145 // For reg/reg form, base reg is followed by +/- reg shop imm.
1146 // For immediate form, it is followed by +/- imm12.
1147 // See also ARMAddressingModes.h (Addressing Mode #2).
1148 if (OpIdx + 1 >= NumOps)
1151 assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
1152 (OpInfo[OpIdx+1].RegClass < 0) &&
1153 "Expect 1 reg operand followed by 1 imm operand");
1155 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1156 if (getIBit(insn) == 0) {
1157 MI.addOperand(MCOperand::CreateReg(0));
1159 // Disassemble the 12-bit immediate offset.
1160 unsigned Imm12 = slice(insn, 11, 0);
1161 unsigned Offset = ARM_AM::getAM2Opc(AddrOpcode, Imm12, ARM_AM::no_shift);
1162 MI.addOperand(MCOperand::CreateImm(Offset));
1164 // Disassemble the offset reg (Rm), shift type, and immediate shift length.
1165 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1167 // Inst{6-5} encodes the shift opcode.
1168 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1169 // Inst{11-7} encodes the imm5 shift amount.
1170 unsigned ShImm = slice(insn, 11, 7);
1172 // A8.4.1. Possible rrx or shift amount of 32...
1173 getImmShiftSE(ShOp, ShImm);
1174 MI.addOperand(MCOperand::CreateImm(
1175 ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp)));
1182 static bool DisassembleLdFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1183 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1184 return DisassembleLdStFrm(MI, Opcode, insn, NumOps, NumOpsAdded, false, B);
1187 static bool DisassembleStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1188 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1189 return DisassembleLdStFrm(MI, Opcode, insn, NumOps, NumOpsAdded, true, B);
1192 static bool HasDualReg(unsigned Opcode) {
1196 case ARM::LDRD: case ARM::LDRD_PRE: case ARM::LDRD_POST:
1197 case ARM::STRD: case ARM::STRD_PRE: case ARM::STRD_POST:
1202 static bool DisassembleLdStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1203 unsigned short NumOps, unsigned &NumOpsAdded, bool isStore, BO B) {
1205 const TargetInstrDesc &TID = ARMInsts[Opcode];
1206 bool isPrePost = isPrePostLdSt(TID.TSFlags);
1207 const TargetOperandInfo *OpInfo = TID.OpInfo;
1208 if (!OpInfo) return false;
1210 unsigned &OpIdx = NumOpsAdded;
1214 assert(((!isStore && TID.getNumDefs() > 0) ||
1215 (isStore && (TID.getNumDefs() == 0 || isPrePost)))
1216 && "Invalid arguments");
1218 // Operand 0 of a pre- and post-indexed store is the address base writeback.
1219 if (isPrePost && isStore) {
1220 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1221 "Reg operand expected");
1222 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1227 bool DualReg = HasDualReg(Opcode);
1229 // Disassemble the dst/src operand.
1230 if (OpIdx >= NumOps)
1233 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1234 "Reg operand expected");
1235 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1239 // Fill in LDRD and STRD's second operand.
1241 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1242 decodeRd(insn) + 1)));
1246 // After dst of a pre- and post-indexed load is the address base writeback.
1247 if (isPrePost && !isStore) {
1248 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1249 "Reg operand expected");
1250 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1255 // Disassemble the base operand.
1256 if (OpIdx >= NumOps)
1259 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1260 "Reg operand expected");
1261 assert((!isPrePost || (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1))
1262 && "Index mode or tied_to operand expected");
1263 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1267 // For reg/reg form, base reg is followed by +/- reg.
1268 // For immediate form, it is followed by +/- imm8.
1269 // See also ARMAddressingModes.h (Addressing Mode #3).
1270 if (OpIdx + 1 >= NumOps)
1273 assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
1274 (OpInfo[OpIdx+1].RegClass < 0) &&
1275 "Expect 1 reg operand followed by 1 imm operand");
1277 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1278 if (getAM3IBit(insn) == 1) {
1279 MI.addOperand(MCOperand::CreateReg(0));
1281 // Disassemble the 8-bit immediate offset.
1282 unsigned Imm4H = (insn >> ARMII::ImmHiShift) & 0xF;
1283 unsigned Imm4L = insn & 0xF;
1284 unsigned Offset = ARM_AM::getAM3Opc(AddrOpcode, (Imm4H << 4) | Imm4L);
1285 MI.addOperand(MCOperand::CreateImm(Offset));
1287 // Disassemble the offset reg (Rm).
1288 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1290 unsigned Offset = ARM_AM::getAM3Opc(AddrOpcode, 0);
1291 MI.addOperand(MCOperand::CreateImm(Offset));
1298 static bool DisassembleLdMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1299 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1300 return DisassembleLdStMiscFrm(MI, Opcode, insn, NumOps, NumOpsAdded, false,
1304 static bool DisassembleStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1305 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1306 return DisassembleLdStMiscFrm(MI, Opcode, insn, NumOps, NumOpsAdded, true, B);
1309 // The algorithm for disassembly of LdStMulFrm is different from others because
1310 // it explicitly populates the two predicate operands after operand 0 (the base)
1311 // and operand 1 (the AM4 mode imm). After operand 3, we need to populate the
1312 // reglist with each affected register encoded as an MCOperand.
1313 static bool DisassembleLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1314 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1316 assert(NumOps >= 5 && "LdStMulFrm expects NumOps >= 5");
1318 unsigned &OpIdx = NumOpsAdded;
1322 unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
1324 // Writeback to base, if necessary.
1325 if (Opcode == ARM::LDM_UPD || Opcode == ARM::STM_UPD) {
1326 MI.addOperand(MCOperand::CreateReg(Base));
1330 MI.addOperand(MCOperand::CreateReg(Base));
1332 ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
1333 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(SubMode)));
1335 // Handling the two predicate operands before the reglist.
1336 int64_t CondVal = insn >> ARMII::CondShift;
1337 MI.addOperand(MCOperand::CreateImm(CondVal == 0xF ? 0xE : CondVal));
1338 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
1342 // Fill the variadic part of reglist.
1343 unsigned RegListBits = insn & ((1 << 16) - 1);
1344 for (unsigned i = 0; i < 16; ++i) {
1345 if ((RegListBits >> i) & 1) {
1346 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1355 // LDREX, LDREXB, LDREXH: Rd Rn
1356 // LDREXD: Rd Rd+1 Rn
1357 // STREX, STREXB, STREXH: Rd Rm Rn
1358 // STREXD: Rd Rm Rm+1 Rn
1360 // SWP, SWPB: Rd Rm Rn
1361 static bool DisassembleLdStExFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1362 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1364 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1365 if (!OpInfo) return false;
1367 unsigned &OpIdx = NumOpsAdded;
1372 && OpInfo[0].RegClass == ARM::GPRRegClassID
1373 && OpInfo[1].RegClass == ARM::GPRRegClassID
1374 && "Expect 2 reg operands");
1376 bool isStore = slice(insn, 20, 20) == 0;
1377 bool isDW = (Opcode == ARM::LDREXD || Opcode == ARM::STREXD);
1379 // Add the destination operand.
1380 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1384 // Store register Exclusive needs a source operand.
1386 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1391 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1392 decodeRm(insn)+1)));
1396 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1397 decodeRd(insn)+1)));
1401 // Finally add the pointer operand.
1402 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1409 // Misc. Arithmetic Instructions.
1411 // PKHBT, PKHTB: Rd Rn Rm , LSL/ASR #imm5
1412 // RBIT, REV, REV16, REVSH: Rd Rm
1413 static bool DisassembleArithMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1414 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1416 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1417 unsigned &OpIdx = NumOpsAdded;
1422 && OpInfo[0].RegClass == ARM::GPRRegClassID
1423 && OpInfo[1].RegClass == ARM::GPRRegClassID
1424 && "Expect 2 reg operands");
1426 bool ThreeReg = NumOps > 2 && OpInfo[2].RegClass == ARM::GPRRegClassID;
1428 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1433 assert(NumOps >= 4 && "Expect >= 4 operands");
1434 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1439 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1443 // If there is still an operand info left which is an immediate operand, add
1444 // an additional imm5 LSL/ASR operand.
1445 if (ThreeReg && OpInfo[OpIdx].RegClass < 0
1446 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
1447 // Extract the 5-bit immediate field Inst{11-7}.
1448 unsigned ShiftAmt = (insn >> ARMII::ShiftShift) & 0x1F;
1449 MI.addOperand(MCOperand::CreateImm(ShiftAmt));
1456 /// DisassembleSatFrm - Disassemble saturate instructions:
1457 /// SSAT, SSAT16, USAT, and USAT16.
1458 static bool DisassembleSatFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1459 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1461 const TargetInstrDesc &TID = ARMInsts[Opcode];
1462 NumOpsAdded = TID.getNumOperands() - 2; // ignore predicate operands
1464 // Disassemble register def.
1465 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1468 unsigned Pos = slice(insn, 20, 16);
1469 if (Opcode == ARM::SSAT || Opcode == ARM::SSAT16)
1471 MI.addOperand(MCOperand::CreateImm(Pos));
1473 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1476 if (NumOpsAdded == 4) {
1477 ARM_AM::ShiftOpc Opc = (slice(insn, 6, 6) != 0 ? ARM_AM::asr : ARM_AM::lsl);
1478 // Inst{11-7} encodes the imm5 shift amount.
1479 unsigned ShAmt = slice(insn, 11, 7);
1481 // A8.6.183. Possible ASR shift amount of 32...
1482 if (Opc == ARM_AM::asr)
1485 Opc = ARM_AM::no_shift;
1487 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(Opc, ShAmt)));
1492 // Extend instructions.
1493 // SXT* and UXT*: Rd [Rn] Rm [rot_imm].
1494 // The 2nd operand register is Rn and the 3rd operand regsiter is Rm for the
1495 // three register operand form. Otherwise, Rn=0b1111 and only Rm is used.
1496 static bool DisassembleExtFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1497 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1499 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1500 unsigned &OpIdx = NumOpsAdded;
1505 && OpInfo[0].RegClass == ARM::GPRRegClassID
1506 && OpInfo[1].RegClass == ARM::GPRRegClassID
1507 && "Expect 2 reg operands");
1509 bool ThreeReg = NumOps > 2 && OpInfo[2].RegClass == ARM::GPRRegClassID;
1511 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1516 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1521 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1525 // If there is still an operand info left which is an immediate operand, add
1526 // an additional rotate immediate operand.
1527 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
1528 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
1529 // Extract the 2-bit rotate field Inst{11-10}.
1530 unsigned rot = (insn >> ARMII::ExtRotImmShift) & 3;
1531 // Rotation by 8, 16, or 24 bits.
1532 MI.addOperand(MCOperand::CreateImm(rot << 3));
1539 /////////////////////////////////////
1541 // Utility Functions For VFP //
1543 /////////////////////////////////////
1545 // Extract/Decode Dd/Sd:
1547 // SP => d = UInt(Vd:D)
1548 // DP => d = UInt(D:Vd)
1549 static unsigned decodeVFPRd(uint32_t insn, bool isSPVFP) {
1550 return isSPVFP ? (decodeRd(insn) << 1 | getDBit(insn))
1551 : (decodeRd(insn) | getDBit(insn) << 4);
1554 // Extract/Decode Dn/Sn:
1556 // SP => n = UInt(Vn:N)
1557 // DP => n = UInt(N:Vn)
1558 static unsigned decodeVFPRn(uint32_t insn, bool isSPVFP) {
1559 return isSPVFP ? (decodeRn(insn) << 1 | getNBit(insn))
1560 : (decodeRn(insn) | getNBit(insn) << 4);
1563 // Extract/Decode Dm/Sm:
1565 // SP => m = UInt(Vm:M)
1566 // DP => m = UInt(M:Vm)
1567 static unsigned decodeVFPRm(uint32_t insn, bool isSPVFP) {
1568 return isSPVFP ? (decodeRm(insn) << 1 | getMBit(insn))
1569 : (decodeRm(insn) | getMBit(insn) << 4);
1574 static uint64_t VFPExpandImm(unsigned char byte, unsigned N) {
1575 assert(N == 32 || N == 64);
1578 unsigned bit6 = slice(byte, 6, 6);
1580 Result = slice(byte, 7, 7) << 31 | slice(byte, 5, 0) << 19;
1582 Result |= 0x1f << 25;
1584 Result |= 0x1 << 30;
1586 Result = (uint64_t)slice(byte, 7, 7) << 63 |
1587 (uint64_t)slice(byte, 5, 0) << 48;
1589 Result |= 0xffL << 54;
1591 Result |= 0x1L << 62;
1597 // VFP Unary Format Instructions:
1599 // VCMP[E]ZD, VCMP[E]ZS: compares one floating-point register with zero
1600 // VCVTDS, VCVTSD: converts between double-precision and single-precision
1601 // The rest of the instructions have homogeneous [VFP]Rd and [VFP]Rm registers.
1602 static bool DisassembleVFPUnaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1603 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1605 assert(NumOps >= 1 && "VFPUnaryFrm expects NumOps >= 1");
1607 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1608 unsigned &OpIdx = NumOpsAdded;
1612 unsigned RegClass = OpInfo[OpIdx].RegClass;
1613 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1614 "Reg operand expected");
1615 bool isSP = (RegClass == ARM::SPRRegClassID);
1617 MI.addOperand(MCOperand::CreateReg(
1618 getRegisterEnum(B, RegClass, decodeVFPRd(insn, isSP))));
1621 // Early return for compare with zero instructions.
1622 if (Opcode == ARM::VCMPEZD || Opcode == ARM::VCMPEZS
1623 || Opcode == ARM::VCMPZD || Opcode == ARM::VCMPZS)
1626 RegClass = OpInfo[OpIdx].RegClass;
1627 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1628 "Reg operand expected");
1629 isSP = (RegClass == ARM::SPRRegClassID);
1631 MI.addOperand(MCOperand::CreateReg(
1632 getRegisterEnum(B, RegClass, decodeVFPRm(insn, isSP))));
1638 // All the instructions have homogeneous [VFP]Rd, [VFP]Rn, and [VFP]Rm regs.
1639 // Some of them have operand constraints which tie the first operand in the
1640 // InOperandList to that of the dst. As far as asm printing is concerned, this
1641 // tied_to operand is simply skipped.
1642 static bool DisassembleVFPBinaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1643 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1645 assert(NumOps >= 3 && "VFPBinaryFrm expects NumOps >= 3");
1647 const TargetInstrDesc &TID = ARMInsts[Opcode];
1648 const TargetOperandInfo *OpInfo = TID.OpInfo;
1649 unsigned &OpIdx = NumOpsAdded;
1653 unsigned RegClass = OpInfo[OpIdx].RegClass;
1654 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1655 "Reg operand expected");
1656 bool isSP = (RegClass == ARM::SPRRegClassID);
1658 MI.addOperand(MCOperand::CreateReg(
1659 getRegisterEnum(B, RegClass, decodeVFPRd(insn, isSP))));
1662 // Skip tied_to operand constraint.
1663 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
1664 assert(NumOps >= 4 && "Expect >=4 operands");
1665 MI.addOperand(MCOperand::CreateReg(0));
1669 MI.addOperand(MCOperand::CreateReg(
1670 getRegisterEnum(B, RegClass, decodeVFPRn(insn, isSP))));
1673 MI.addOperand(MCOperand::CreateReg(
1674 getRegisterEnum(B, RegClass, decodeVFPRm(insn, isSP))));
1680 // A8.6.295 vcvt (floating-point <-> integer)
1681 // Int to FP: VSITOD, VSITOS, VUITOD, VUITOS
1682 // FP to Int: VTOSI[Z|R]D, VTOSI[Z|R]S, VTOUI[Z|R]D, VTOUI[Z|R]S
1684 // A8.6.297 vcvt (floating-point and fixed-point)
1685 // Dd|Sd Dd|Sd(TIED_TO) #fbits(= 16|32 - UInt(imm4:i))
1686 static bool DisassembleVFPConv1Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1687 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1689 assert(NumOps >= 2 && "VFPConv1Frm expects NumOps >= 2");
1691 const TargetInstrDesc &TID = ARMInsts[Opcode];
1692 const TargetOperandInfo *OpInfo = TID.OpInfo;
1693 if (!OpInfo) return false;
1695 bool SP = slice(insn, 8, 8) == 0; // A8.6.295 & A8.6.297
1696 bool fixed_point = slice(insn, 17, 17) == 1; // A8.6.297
1697 unsigned RegClassID = SP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
1701 assert(NumOps >= 3 && "Expect >= 3 operands");
1702 int size = slice(insn, 7, 7) == 0 ? 16 : 32;
1703 int fbits = size - (slice(insn,3,0) << 1 | slice(insn,5,5));
1704 MI.addOperand(MCOperand::CreateReg(
1705 getRegisterEnum(B, RegClassID,
1706 decodeVFPRd(insn, SP))));
1708 assert(TID.getOperandConstraint(1, TOI::TIED_TO) != -1 &&
1709 "Tied to operand expected");
1710 MI.addOperand(MI.getOperand(0));
1712 assert(OpInfo[2].RegClass < 0 && !OpInfo[2].isPredicate() &&
1713 !OpInfo[2].isOptionalDef() && "Imm operand expected");
1714 MI.addOperand(MCOperand::CreateImm(fbits));
1719 // The Rd (destination) and Rm (source) bits have different interpretations
1720 // depending on their single-precisonness.
1722 if (slice(insn, 18, 18) == 1) { // to_integer operation
1723 d = decodeVFPRd(insn, true /* Is Single Precision */);
1724 MI.addOperand(MCOperand::CreateReg(
1725 getRegisterEnum(B, ARM::SPRRegClassID, d)));
1726 m = decodeVFPRm(insn, SP);
1727 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID, m)));
1729 d = decodeVFPRd(insn, SP);
1730 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID, d)));
1731 m = decodeVFPRm(insn, true /* Is Single Precision */);
1732 MI.addOperand(MCOperand::CreateReg(
1733 getRegisterEnum(B, ARM::SPRRegClassID, m)));
1741 // VMOVRS - A8.6.330
1742 // Rt => Rd; Sn => UInt(Vn:N)
1743 static bool DisassembleVFPConv2Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1744 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1746 assert(NumOps >= 2 && "VFPConv2Frm expects NumOps >= 2");
1748 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1750 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1751 decodeVFPRn(insn, true))));
1756 // VMOVRRD - A8.6.332
1757 // Rt => Rd; Rt2 => Rn; Dm => UInt(M:Vm)
1759 // VMOVRRS - A8.6.331
1760 // Rt => Rd; Rt2 => Rn; Sm => UInt(Vm:M); Sm1 = Sm+1
1761 static bool DisassembleVFPConv3Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1762 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1764 assert(NumOps >= 3 && "VFPConv3Frm expects NumOps >= 3");
1766 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1767 unsigned &OpIdx = NumOpsAdded;
1769 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1771 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1775 if (OpInfo[OpIdx].RegClass == ARM::SPRRegClassID) {
1776 unsigned Sm = decodeVFPRm(insn, true);
1777 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1779 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1783 MI.addOperand(MCOperand::CreateReg(
1784 getRegisterEnum(B, ARM::DPRRegClassID,
1785 decodeVFPRm(insn, false))));
1791 // VMOVSR - A8.6.330
1792 // Rt => Rd; Sn => UInt(Vn:N)
1793 static bool DisassembleVFPConv4Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1794 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1796 assert(NumOps >= 2 && "VFPConv4Frm expects NumOps >= 2");
1798 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1799 decodeVFPRn(insn, true))));
1800 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1806 // VMOVDRR - A8.6.332
1807 // Rt => Rd; Rt2 => Rn; Dm => UInt(M:Vm)
1809 // VMOVRRS - A8.6.331
1810 // Rt => Rd; Rt2 => Rn; Sm => UInt(Vm:M); Sm1 = Sm+1
1811 static bool DisassembleVFPConv5Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1812 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1814 assert(NumOps >= 3 && "VFPConv5Frm expects NumOps >= 3");
1816 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1817 unsigned &OpIdx = NumOpsAdded;
1821 if (OpInfo[OpIdx].RegClass == ARM::SPRRegClassID) {
1822 unsigned Sm = decodeVFPRm(insn, true);
1823 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1825 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1829 MI.addOperand(MCOperand::CreateReg(
1830 getRegisterEnum(B, ARM::DPRRegClassID,
1831 decodeVFPRm(insn, false))));
1835 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1837 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1843 // VFP Load/Store Instructions.
1844 // VLDRD, VLDRS, VSTRD, VSTRS
1845 static bool DisassembleVFPLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1846 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1848 assert(NumOps >= 3 && "VFPLdStFrm expects NumOps >= 3");
1850 bool isSPVFP = (Opcode == ARM::VLDRS || Opcode == ARM::VSTRS) ? true : false;
1851 unsigned RegClassID = isSPVFP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
1853 // Extract Dd/Sd for operand 0.
1854 unsigned RegD = decodeVFPRd(insn, isSPVFP);
1856 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID, RegD)));
1858 unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
1859 MI.addOperand(MCOperand::CreateReg(Base));
1861 // Next comes the AM5 Opcode.
1862 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1863 unsigned char Imm8 = insn & 0xFF;
1864 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM5Opc(AddrOpcode, Imm8)));
1871 // VFP Load/Store Multiple Instructions.
1872 // This is similar to the algorithm for LDM/STM in that operand 0 (the base) and
1873 // operand 1 (the AM5 mode imm) is followed by two predicate operands. It is
1874 // followed by a reglist of either DPR(s) or SPR(s).
1876 // VLDMD[_UPD], VLDMS[_UPD], VSTMD[_UPD], VSTMS[_UPD]
1877 static bool DisassembleVFPLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1878 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1880 assert(NumOps >= 5 && "VFPLdStMulFrm expects NumOps >= 5");
1882 unsigned &OpIdx = NumOpsAdded;
1886 unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
1888 // Writeback to base, if necessary.
1889 if (Opcode == ARM::VLDMD_UPD || Opcode == ARM::VLDMS_UPD ||
1890 Opcode == ARM::VSTMD_UPD || Opcode == ARM::VSTMS_UPD) {
1891 MI.addOperand(MCOperand::CreateReg(Base));
1895 MI.addOperand(MCOperand::CreateReg(Base));
1897 // Next comes the AM5 Opcode.
1898 ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
1899 // Must be either "ia" or "db" submode.
1900 if (SubMode != ARM_AM::ia && SubMode != ARM_AM::db) {
1901 DEBUG(errs() << "Illegal addressing mode 5 sub-mode!\n");
1905 unsigned char Imm8 = insn & 0xFF;
1906 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM5Opc(SubMode, Imm8)));
1908 // Handling the two predicate operands before the reglist.
1909 int64_t CondVal = insn >> ARMII::CondShift;
1910 MI.addOperand(MCOperand::CreateImm(CondVal == 0xF ? 0xE : CondVal));
1911 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
1915 bool isSPVFP = (Opcode == ARM::VLDMS || Opcode == ARM::VLDMS_UPD ||
1916 Opcode == ARM::VSTMS || Opcode == ARM::VSTMS_UPD) ? true : false;
1917 unsigned RegClassID = isSPVFP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
1920 unsigned RegD = decodeVFPRd(insn, isSPVFP);
1922 // Fill the variadic part of reglist.
1923 unsigned Regs = isSPVFP ? Imm8 : Imm8/2;
1924 for (unsigned i = 0; i < Regs; ++i) {
1925 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID,
1933 // Misc. VFP Instructions.
1934 // FMSTAT (vmrs with Rt=0b1111, i.e., to apsr_nzcv and no register operand)
1935 // FCONSTD (DPR and a VFPf64Imm operand)
1936 // FCONSTS (SPR and a VFPf32Imm operand)
1937 // VMRS/VMSR (GPR operand)
1938 static bool DisassembleVFPMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1939 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1941 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1942 unsigned &OpIdx = NumOpsAdded;
1946 if (Opcode == ARM::FMSTAT)
1949 assert(NumOps >= 2 && "VFPMiscFrm expects >=2 operands");
1951 unsigned RegEnum = 0;
1952 switch (OpInfo[0].RegClass) {
1953 case ARM::DPRRegClassID:
1954 RegEnum = getRegisterEnum(B, ARM::DPRRegClassID, decodeVFPRd(insn, false));
1956 case ARM::SPRRegClassID:
1957 RegEnum = getRegisterEnum(B, ARM::SPRRegClassID, decodeVFPRd(insn, true));
1959 case ARM::GPRRegClassID:
1960 RegEnum = getRegisterEnum(B, ARM::GPRRegClassID, decodeRd(insn));
1963 assert(0 && "Invalid reg class id");
1967 MI.addOperand(MCOperand::CreateReg(RegEnum));
1970 // Extract/decode the f64/f32 immediate.
1971 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
1972 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
1973 // The asm syntax specifies the before-expanded <imm>.
1974 // Not VFPExpandImm(slice(insn,19,16) << 4 | slice(insn, 3, 0),
1975 // Opcode == ARM::FCONSTD ? 64 : 32)
1976 MI.addOperand(MCOperand::CreateImm(slice(insn,19,16)<<4 | slice(insn,3,0)));
1983 // DisassembleThumbFrm() is defined in ThumbDisassemblerCore.h file.
1984 #include "ThumbDisassemblerCore.h"
1986 /////////////////////////////////////////////////////
1988 // Utility Functions For ARM Advanced SIMD //
1990 /////////////////////////////////////////////////////
1992 // The following NEON namings are based on A8.6.266 VABA, VABAL. Notice that
1993 // A8.6.303 VDUP (ARM core register)'s D/Vd pair is the N/Vn pair of VABA/VABAL.
1995 // A7.3 Register encoding
1997 // Extract/Decode NEON D/Vd:
1999 // Note that for quadword, Qd = UInt(D:Vd<3:1>) = Inst{22:15-13}, whereas for
2000 // doubleword, Dd = UInt(D:Vd). We compensate for this difference by
2001 // handling it in the getRegisterEnum() utility function.
2002 // D = Inst{22}, Vd = Inst{15-12}
2003 static unsigned decodeNEONRd(uint32_t insn) {
2004 return ((insn >> ARMII::NEON_D_BitShift) & 1) << 4
2005 | ((insn >> ARMII::NEON_RegRdShift) & ARMII::NEONRegMask);
2008 // Extract/Decode NEON N/Vn:
2010 // Note that for quadword, Qn = UInt(N:Vn<3:1>) = Inst{7:19-17}, whereas for
2011 // doubleword, Dn = UInt(N:Vn). We compensate for this difference by
2012 // handling it in the getRegisterEnum() utility function.
2013 // N = Inst{7}, Vn = Inst{19-16}
2014 static unsigned decodeNEONRn(uint32_t insn) {
2015 return ((insn >> ARMII::NEON_N_BitShift) & 1) << 4
2016 | ((insn >> ARMII::NEON_RegRnShift) & ARMII::NEONRegMask);
2019 // Extract/Decode NEON M/Vm:
2021 // Note that for quadword, Qm = UInt(M:Vm<3:1>) = Inst{5:3-1}, whereas for
2022 // doubleword, Dm = UInt(M:Vm). We compensate for this difference by
2023 // handling it in the getRegisterEnum() utility function.
2024 // M = Inst{5}, Vm = Inst{3-0}
2025 static unsigned decodeNEONRm(uint32_t insn) {
2026 return ((insn >> ARMII::NEON_M_BitShift) & 1) << 4
2027 | ((insn >> ARMII::NEON_RegRmShift) & ARMII::NEONRegMask);
2038 } // End of unnamed namespace
2040 // size field -> Inst{11-10}
2041 // index_align field -> Inst{7-4}
2043 // The Lane Index interpretation depends on the Data Size:
2044 // 8 (encoded as size = 0b00) -> Index = index_align[3:1]
2045 // 16 (encoded as size = 0b01) -> Index = index_align[3:2]
2046 // 32 (encoded as size = 0b10) -> Index = index_align[3]
2048 // Ref: A8.6.317 VLD4 (single 4-element structure to one lane).
2049 static unsigned decodeLaneIndex(uint32_t insn) {
2050 unsigned size = insn >> 10 & 3;
2051 assert((size == 0 || size == 1 || size == 2) &&
2052 "Encoding error: size should be either 0, 1, or 2");
2054 unsigned index_align = insn >> 4 & 0xF;
2055 return (index_align >> 1) >> size;
2058 // imm64 = AdvSIMDExpandImm(op, cmode, i:imm3:imm4)
2059 // op = Inst{5}, cmode = Inst{11-8}
2060 // i = Inst{24} (ARM architecture)
2061 // imm3 = Inst{18-16}, imm4 = Inst{3-0}
2062 // Ref: Table A7-15 Modified immediate values for Advanced SIMD instructions.
2063 static uint64_t decodeN1VImm(uint32_t insn, ElemSize esize) {
2064 unsigned char op = (insn >> 5) & 1;
2065 unsigned char cmode = (insn >> 8) & 0xF;
2066 unsigned char Imm8 = ((insn >> 24) & 1) << 7 |
2067 ((insn >> 16) & 7) << 4 |
2069 return (op << 12) | (cmode << 8) | Imm8;
2072 // A8.6.339 VMUL, VMULL (by scalar)
2073 // ESize16 => m = Inst{2-0} (Vm<2:0>) D0-D7
2074 // ESize32 => m = Inst{3-0} (Vm<3:0>) D0-D15
2075 static unsigned decodeRestrictedDm(uint32_t insn, ElemSize esize) {
2082 assert(0 && "Unreachable code!");
2087 // A8.6.339 VMUL, VMULL (by scalar)
2088 // ESize16 => index = Inst{5:3} (M:Vm<3>) D0-D7
2089 // ESize32 => index = Inst{5} (M) D0-D15
2090 static unsigned decodeRestrictedDmIndex(uint32_t insn, ElemSize esize) {
2093 return (((insn >> 5) & 1) << 1) | ((insn >> 3) & 1);
2095 return (insn >> 5) & 1;
2097 assert(0 && "Unreachable code!");
2102 // A8.6.296 VCVT (between floating-point and fixed-point, Advanced SIMD)
2103 // (64 - <fbits>) is encoded as imm6, i.e., Inst{21-16}.
2104 static unsigned decodeVCVTFractionBits(uint32_t insn) {
2105 return 64 - ((insn >> 16) & 0x3F);
2108 // A8.6.302 VDUP (scalar)
2109 // ESize8 => index = Inst{19-17}
2110 // ESize16 => index = Inst{19-18}
2111 // ESize32 => index = Inst{19}
2112 static unsigned decodeNVLaneDupIndex(uint32_t insn, ElemSize esize) {
2115 return (insn >> 17) & 7;
2117 return (insn >> 18) & 3;
2119 return (insn >> 19) & 1;
2121 assert(0 && "Unspecified element size!");
2126 // A8.6.328 VMOV (ARM core register to scalar)
2127 // A8.6.329 VMOV (scalar to ARM core register)
2128 // ESize8 => index = Inst{21:6-5}
2129 // ESize16 => index = Inst{21:6}
2130 // ESize32 => index = Inst{21}
2131 static unsigned decodeNVLaneOpIndex(uint32_t insn, ElemSize esize) {
2134 return ((insn >> 21) & 1) << 2 | ((insn >> 5) & 3);
2136 return ((insn >> 21) & 1) << 1 | ((insn >> 6) & 1);
2138 return ((insn >> 21) & 1);
2140 assert(0 && "Unspecified element size!");
2145 // Imm6 = Inst{21-16}, L = Inst{7}
2147 // LeftShift == true (A8.6.367 VQSHL, A8.6.387 VSLI):
2149 // '0001xxx' => esize = 8; shift_amount = imm6 - 8
2150 // '001xxxx' => esize = 16; shift_amount = imm6 - 16
2151 // '01xxxxx' => esize = 32; shift_amount = imm6 - 32
2152 // '1xxxxxx' => esize = 64; shift_amount = imm6
2154 // LeftShift == false (A8.6.376 VRSHR, A8.6.368 VQSHRN):
2156 // '0001xxx' => esize = 8; shift_amount = 16 - imm6
2157 // '001xxxx' => esize = 16; shift_amount = 32 - imm6
2158 // '01xxxxx' => esize = 32; shift_amount = 64 - imm6
2159 // '1xxxxxx' => esize = 64; shift_amount = 64 - imm6
2161 static unsigned decodeNVSAmt(uint32_t insn, bool LeftShift) {
2162 ElemSize esize = ESizeNA;
2163 unsigned L = (insn >> 7) & 1;
2164 unsigned imm6 = (insn >> 16) & 0x3F;
2168 else if (imm6 >> 4 == 1)
2170 else if (imm6 >> 5 == 1)
2173 assert(0 && "Wrong encoding of Inst{7:21-16}!");
2178 return esize == ESize64 ? imm6 : (imm6 - esize);
2180 return esize == ESize64 ? (esize - imm6) : (2*esize - imm6);
2184 // Imm4 = Inst{11-8}
2185 static unsigned decodeN3VImm(uint32_t insn) {
2186 return (insn >> 8) & 0xF;
2189 static bool UseDRegPair(unsigned Opcode) {
2193 case ARM::VLD1q8_UPD:
2194 case ARM::VLD1q16_UPD:
2195 case ARM::VLD1q32_UPD:
2196 case ARM::VLD1q64_UPD:
2197 case ARM::VST1q8_UPD:
2198 case ARM::VST1q16_UPD:
2199 case ARM::VST1q32_UPD:
2200 case ARM::VST1q64_UPD:
2206 // D[d] D[d2] ... Rn [TIED_TO Rn] align [Rm]
2208 // D[d] D[d2] ... Rn [TIED_TO Rn] align [Rm] TIED_TO ... imm(idx)
2210 // Rn [TIED_TO Rn] align [Rm] D[d] D[d2] ...
2212 // Rn [TIED_TO Rn] align [Rm] D[d] D[d2] ... [imm(idx)]
2214 // Correctly set VLD*/VST*'s TIED_TO GPR, as the asm printer needs it.
2215 static bool DisassembleNLdSt0(MCInst &MI, unsigned Opcode, uint32_t insn,
2216 unsigned short NumOps, unsigned &NumOpsAdded, bool Store, bool DblSpaced,
2219 const TargetInstrDesc &TID = ARMInsts[Opcode];
2220 const TargetOperandInfo *OpInfo = TID.OpInfo;
2222 // At least one DPR register plus addressing mode #6.
2223 assert(NumOps >= 3 && "Expect >= 3 operands");
2225 unsigned &OpIdx = NumOpsAdded;
2229 // We have homogeneous NEON registers for Load/Store.
2230 unsigned RegClass = 0;
2231 bool DRegPair = UseDRegPair(Opcode);
2233 // Double-spaced registers have increments of 2.
2234 unsigned Inc = (DblSpaced || DRegPair) ? 2 : 1;
2236 unsigned Rn = decodeRn(insn);
2237 unsigned Rm = decodeRm(insn);
2238 unsigned Rd = decodeNEONRd(insn);
2240 // A7.7.1 Advanced SIMD addressing mode.
2243 // LLVM Addressing Mode #6.
2244 unsigned RmEnum = 0;
2246 RmEnum = getRegisterEnum(B, ARM::GPRRegClassID, Rm);
2249 // Consume possible WB, AddrMode6, possible increment reg, the DPR/QPR's,
2250 // then possible lane index.
2251 assert(OpIdx < NumOps && OpInfo[0].RegClass == ARM::GPRRegClassID &&
2252 "Reg operand expected");
2255 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2260 assert((OpIdx+1) < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
2261 OpInfo[OpIdx + 1].RegClass < 0 && "Addrmode #6 Operands expected");
2262 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2264 MI.addOperand(MCOperand::CreateImm(0)); // Alignment ignored?
2268 MI.addOperand(MCOperand::CreateReg(RmEnum));
2272 assert(OpIdx < NumOps &&
2273 (OpInfo[OpIdx].RegClass == ARM::DPRRegClassID ||
2274 OpInfo[OpIdx].RegClass == ARM::QPRRegClassID) &&
2275 "Reg operand expected");
2277 RegClass = OpInfo[OpIdx].RegClass;
2278 while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
2279 MI.addOperand(MCOperand::CreateReg(
2280 getRegisterEnum(B, RegClass, Rd, DRegPair)));
2285 // Handle possible lane index.
2286 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2287 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2288 MI.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn)));
2293 // Consume the DPR/QPR's, possible WB, AddrMode6, possible incrment reg,
2294 // possible TIED_TO DPR/QPR's (ignored), then possible lane index.
2295 RegClass = OpInfo[0].RegClass;
2297 while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
2298 MI.addOperand(MCOperand::CreateReg(
2299 getRegisterEnum(B, RegClass, Rd, DRegPair)));
2305 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2310 assert((OpIdx+1) < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
2311 OpInfo[OpIdx + 1].RegClass < 0 && "Addrmode #6 Operands expected");
2312 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2314 MI.addOperand(MCOperand::CreateImm(0)); // Alignment ignored?
2318 MI.addOperand(MCOperand::CreateReg(RmEnum));
2322 while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
2323 assert(TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1 &&
2324 "Tied to operand expected");
2325 MI.addOperand(MCOperand::CreateReg(0));
2329 // Handle possible lane index.
2330 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2331 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2332 MI.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn)));
2337 // Accessing registers past the end of the NEON register file is not
2346 // If L (Inst{21}) == 0, store instructions.
2347 // Find out about double-spaced-ness of the Opcode and pass it on to
2348 // DisassembleNLdSt0().
2349 static bool DisassembleNLdSt(MCInst &MI, unsigned Opcode, uint32_t insn,
2350 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2352 const StringRef Name = ARMInsts[Opcode].Name;
2353 bool DblSpaced = false;
2355 if (Name.find("LN") != std::string::npos) {
2356 // To one lane instructions.
2357 // See, for example, 8.6.317 VLD4 (single 4-element structure to one lane).
2359 // <size> == 16 && Inst{5} == 1 --> DblSpaced = true
2360 if (Name.endswith("16") || Name.endswith("16_UPD"))
2361 DblSpaced = slice(insn, 5, 5) == 1;
2363 // <size> == 32 && Inst{6} == 1 --> DblSpaced = true
2364 if (Name.endswith("32") || Name.endswith("32_UPD"))
2365 DblSpaced = slice(insn, 6, 6) == 1;
2368 // Multiple n-element structures with type encoded as Inst{11-8}.
2369 // See, for example, A8.6.316 VLD4 (multiple 4-element structures).
2371 // n == 2 && type == 0b1001 -> DblSpaced = true
2372 if (Name.startswith("VST2") || Name.startswith("VLD2"))
2373 DblSpaced = slice(insn, 11, 8) == 9;
2375 // n == 3 && type == 0b0101 -> DblSpaced = true
2376 if (Name.startswith("VST3") || Name.startswith("VLD3"))
2377 DblSpaced = slice(insn, 11, 8) == 5;
2379 // n == 4 && type == 0b0001 -> DblSpaced = true
2380 if (Name.startswith("VST4") || Name.startswith("VLD4"))
2381 DblSpaced = slice(insn, 11, 8) == 1;
2384 return DisassembleNLdSt0(MI, Opcode, insn, NumOps, NumOpsAdded,
2385 slice(insn, 21, 21) == 0, DblSpaced, B);
2390 static bool DisassembleN1RegModImmFrm(MCInst &MI, unsigned Opcode,
2391 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2393 const TargetInstrDesc &TID = ARMInsts[Opcode];
2394 const TargetOperandInfo *OpInfo = TID.OpInfo;
2396 assert(NumOps >= 2 &&
2397 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2398 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2399 (OpInfo[1].RegClass < 0) &&
2400 "Expect 1 reg operand followed by 1 imm operand");
2402 // Qd/Dd = Inst{22:15-12} => NEON Rd
2403 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[0].RegClass,
2404 decodeNEONRd(insn))));
2406 ElemSize esize = ESizeNA;
2409 case ARM::VMOVv16i8:
2412 case ARM::VMOVv4i16:
2413 case ARM::VMOVv8i16:
2414 case ARM::VMVNv4i16:
2415 case ARM::VMVNv8i16:
2418 case ARM::VMOVv2i32:
2419 case ARM::VMOVv4i32:
2420 case ARM::VMVNv2i32:
2421 case ARM::VMVNv4i32:
2424 case ARM::VMOVv1i64:
2425 case ARM::VMOVv2i64:
2429 assert(0 && "Unreachable code!");
2433 // One register and a modified immediate value.
2434 // Add the imm operand.
2435 MI.addOperand(MCOperand::CreateImm(decodeN1VImm(insn, esize)));
2445 N2V_VectorConvert_Between_Float_Fixed
2447 } // End of unnamed namespace
2449 // Vector Convert [between floating-point and fixed-point]
2450 // Qd/Dd Qm/Dm [fbits]
2452 // Vector Duplicate Lane (from scalar to all elements) Instructions.
2453 // VDUPLN16d, VDUPLN16q, VDUPLN32d, VDUPLN32q, VDUPLN8d, VDUPLN8q:
2456 // Vector Move Long:
2459 // Vector Move Narrow:
2463 static bool DisassembleNVdVmOptImm(MCInst &MI, unsigned Opc, uint32_t insn,
2464 unsigned short NumOps, unsigned &NumOpsAdded, N2VFlag Flag, BO B) {
2466 const TargetInstrDesc &TID = ARMInsts[Opc];
2467 const TargetOperandInfo *OpInfo = TID.OpInfo;
2469 assert(NumOps >= 2 &&
2470 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2471 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2472 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2473 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
2474 "Expect >= 2 operands and first 2 as reg operands");
2476 unsigned &OpIdx = NumOpsAdded;
2480 ElemSize esize = ESizeNA;
2481 if (Flag == N2V_VectorDupLane) {
2482 // VDUPLN has its index embedded. Its size can be inferred from the Opcode.
2483 assert(Opc >= ARM::VDUPLN16d && Opc <= ARM::VDUPLN8q &&
2484 "Unexpected Opcode");
2485 esize = (Opc == ARM::VDUPLN8d || Opc == ARM::VDUPLN8q) ? ESize8
2486 : ((Opc == ARM::VDUPLN16d || Opc == ARM::VDUPLN16q) ? ESize16
2490 // Qd/Dd = Inst{22:15-12} => NEON Rd
2491 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2492 decodeNEONRd(insn))));
2496 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
2498 MI.addOperand(MCOperand::CreateReg(0));
2502 // Dm = Inst{5:3-0} => NEON Rm
2503 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2504 decodeNEONRm(insn))));
2507 // VZIP and others have two TIED_TO reg operands.
2509 while (OpIdx < NumOps &&
2510 (Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) {
2511 // Add TIED_TO operand.
2512 MI.addOperand(MI.getOperand(Idx));
2516 // Add the imm operand, if required.
2517 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2518 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2520 unsigned imm = 0xFFFFFFFF;
2522 if (Flag == N2V_VectorDupLane)
2523 imm = decodeNVLaneDupIndex(insn, esize);
2524 if (Flag == N2V_VectorConvert_Between_Float_Fixed)
2525 imm = decodeVCVTFractionBits(insn);
2527 assert(imm != 0xFFFFFFFF && "Internal error");
2528 MI.addOperand(MCOperand::CreateImm(imm));
2535 static bool DisassembleN2RegFrm(MCInst &MI, unsigned Opc, uint32_t insn,
2536 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2538 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
2541 static bool DisassembleNVCVTFrm(MCInst &MI, unsigned Opc, uint32_t insn,
2542 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2544 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
2545 N2V_VectorConvert_Between_Float_Fixed, B);
2547 static bool DisassembleNVecDupLnFrm(MCInst &MI, unsigned Opc, uint32_t insn,
2548 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2550 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
2551 N2V_VectorDupLane, B);
2554 // Vector Shift [Accumulate] Instructions.
2555 // Qd/Dd [Qd/Dd (TIED_TO)] Qm/Dm ShiftAmt
2557 // Vector Shift Left Long (with maximum shift count) Instructions.
2558 // VSHLLi16, VSHLLi32, VSHLLi8: Qd Dm imm (== size)
2560 static bool DisassembleNVectorShift(MCInst &MI, unsigned Opcode, uint32_t insn,
2561 unsigned short NumOps, unsigned &NumOpsAdded, bool LeftShift, BO B) {
2563 const TargetInstrDesc &TID = ARMInsts[Opcode];
2564 const TargetOperandInfo *OpInfo = TID.OpInfo;
2566 assert(NumOps >= 3 &&
2567 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2568 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2569 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2570 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
2571 "Expect >= 3 operands and first 2 as reg operands");
2573 unsigned &OpIdx = NumOpsAdded;
2577 // Qd/Dd = Inst{22:15-12} => NEON Rd
2578 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2579 decodeNEONRd(insn))));
2582 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
2584 MI.addOperand(MCOperand::CreateReg(0));
2588 assert((OpInfo[OpIdx].RegClass == ARM::DPRRegClassID ||
2589 OpInfo[OpIdx].RegClass == ARM::QPRRegClassID) &&
2590 "Reg operand expected");
2592 // Qm/Dm = Inst{5:3-0} => NEON Rm
2593 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2594 decodeNEONRm(insn))));
2597 assert(OpInfo[OpIdx].RegClass < 0 && "Imm operand expected");
2599 // Add the imm operand.
2601 // VSHLL has maximum shift count as the imm, inferred from its size.
2605 Imm = decodeNVSAmt(insn, LeftShift);
2617 MI.addOperand(MCOperand::CreateImm(Imm));
2623 // Left shift instructions.
2624 static bool DisassembleN2RegVecShLFrm(MCInst &MI, unsigned Opcode,
2625 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2627 return DisassembleNVectorShift(MI, Opcode, insn, NumOps, NumOpsAdded, true,
2630 // Right shift instructions have different shift amount interpretation.
2631 static bool DisassembleN2RegVecShRFrm(MCInst &MI, unsigned Opcode,
2632 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2634 return DisassembleNVectorShift(MI, Opcode, insn, NumOps, NumOpsAdded, false,
2643 N3V_Multiply_By_Scalar
2645 } // End of unnamed namespace
2647 // NEON Three Register Instructions with Optional Immediate Operand
2649 // Vector Extract Instructions.
2650 // Qd/Dd Qn/Dn Qm/Dm imm4
2652 // Vector Shift (Register) Instructions.
2653 // Qd/Dd Qm/Dm Qn/Dn (notice the order of m, n)
2655 // Vector Multiply [Accumulate/Subtract] [Long] By Scalar Instructions.
2656 // Qd/Dd Qn/Dn RestrictedDm index
2659 static bool DisassembleNVdVnVmOptImm(MCInst &MI, unsigned Opcode, uint32_t insn,
2660 unsigned short NumOps, unsigned &NumOpsAdded, N3VFlag Flag, BO B) {
2662 const TargetInstrDesc &TID = ARMInsts[Opcode];
2663 const TargetOperandInfo *OpInfo = TID.OpInfo;
2665 // No checking for OpInfo[2] because of MOVDneon/MOVQ with only two regs.
2666 assert(NumOps >= 3 &&
2667 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2668 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2669 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2670 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
2671 "Expect >= 3 operands and first 2 as reg operands");
2673 unsigned &OpIdx = NumOpsAdded;
2677 bool VdVnVm = Flag == N3V_VectorShift ? false : true;
2678 bool IsImm4 = Flag == N3V_VectorExtract ? true : false;
2679 bool IsDmRestricted = Flag == N3V_Multiply_By_Scalar ? true : false;
2680 ElemSize esize = ESizeNA;
2681 if (Flag == N3V_Multiply_By_Scalar) {
2682 unsigned size = (insn >> 20) & 3;
2683 if (size == 1) esize = ESize16;
2684 if (size == 2) esize = ESize32;
2685 assert (esize == ESize16 || esize == ESize32);
2688 // Qd/Dd = Inst{22:15-12} => NEON Rd
2689 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2690 decodeNEONRd(insn))));
2693 // VABA, VABAL, VBSLd, VBSLq, ...
2694 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
2696 MI.addOperand(MCOperand::CreateReg(0));
2700 // Dn = Inst{7:19-16} => NEON Rn
2702 // Dm = Inst{5:3-0} => NEON Rm
2703 MI.addOperand(MCOperand::CreateReg(
2704 getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2705 VdVnVm ? decodeNEONRn(insn)
2706 : decodeNEONRm(insn))));
2709 // Special case handling for VMOVDneon and VMOVQ because they are marked as
2711 if (Opcode == ARM::VMOVDneon || Opcode == ARM::VMOVQ)
2714 // Dm = Inst{5:3-0} => NEON Rm
2716 // Dm is restricted to D0-D7 if size is 16, D0-D15 otherwise
2718 // Dn = Inst{7:19-16} => NEON Rn
2719 unsigned m = VdVnVm ? (IsDmRestricted ? decodeRestrictedDm(insn, esize)
2720 : decodeNEONRm(insn))
2721 : decodeNEONRn(insn);
2723 MI.addOperand(MCOperand::CreateReg(
2724 getRegisterEnum(B, OpInfo[OpIdx].RegClass, m)));
2727 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2728 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2729 // Add the imm operand.
2732 Imm = decodeN3VImm(insn);
2733 else if (IsDmRestricted)
2734 Imm = decodeRestrictedDmIndex(insn, esize);
2736 assert(0 && "Internal error: unreachable code!");
2740 MI.addOperand(MCOperand::CreateImm(Imm));
2747 static bool DisassembleN3RegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2748 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2750 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
2753 static bool DisassembleN3RegVecShFrm(MCInst &MI, unsigned Opcode,
2754 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2756 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
2757 N3V_VectorShift, B);
2759 static bool DisassembleNVecExtractFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2760 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2762 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
2763 N3V_VectorExtract, B);
2765 static bool DisassembleNVecMulScalarFrm(MCInst &MI, unsigned Opcode,
2766 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2768 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
2769 N3V_Multiply_By_Scalar, B);
2772 // Vector Table Lookup
2774 // VTBL1, VTBX1: Dd [Dd(TIED_TO)] Dn Dm
2775 // VTBL2, VTBX2: Dd [Dd(TIED_TO)] Dn Dn+1 Dm
2776 // VTBL3, VTBX3: Dd [Dd(TIED_TO)] Dn Dn+1 Dn+2 Dm
2777 // VTBL4, VTBX4: Dd [Dd(TIED_TO)] Dn Dn+1 Dn+2 Dn+3 Dm
2778 static bool DisassembleNVTBLFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2779 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2781 const TargetInstrDesc &TID = ARMInsts[Opcode];
2782 const TargetOperandInfo *OpInfo = TID.OpInfo;
2783 if (!OpInfo) return false;
2785 assert(NumOps >= 3 &&
2786 OpInfo[0].RegClass == ARM::DPRRegClassID &&
2787 OpInfo[1].RegClass == ARM::DPRRegClassID &&
2788 OpInfo[2].RegClass == ARM::DPRRegClassID &&
2789 "Expect >= 3 operands and first 3 as reg operands");
2791 unsigned &OpIdx = NumOpsAdded;
2795 unsigned Rn = decodeNEONRn(insn);
2797 // {Dn} encoded as len = 0b00
2798 // {Dn Dn+1} encoded as len = 0b01
2799 // {Dn Dn+1 Dn+2 } encoded as len = 0b10
2800 // {Dn Dn+1 Dn+2 Dn+3} encoded as len = 0b11
2801 unsigned Len = slice(insn, 9, 8) + 1;
2803 // Dd (the destination vector)
2804 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
2805 decodeNEONRd(insn))));
2808 // Process tied_to operand constraint.
2810 if ((Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) {
2811 MI.addOperand(MI.getOperand(Idx));
2815 // Do the <list> now.
2816 for (unsigned i = 0; i < Len; ++i) {
2817 assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::DPRRegClassID &&
2818 "Reg operand expected");
2819 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
2824 // Dm (the index vector)
2825 assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::DPRRegClassID &&
2826 "Reg operand (index vector) expected");
2827 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
2828 decodeNEONRm(insn))));
2834 // Vector Get Lane (move scalar to ARM core register) Instructions.
2835 // VGETLNi32, VGETLNs16, VGETLNs8, VGETLNu16, VGETLNu8: Rt Dn index
2836 static bool DisassembleNGetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2837 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2839 const TargetInstrDesc &TID = ARMInsts[Opcode];
2840 const TargetOperandInfo *OpInfo = TID.OpInfo;
2841 if (!OpInfo) return false;
2843 assert(TID.getNumDefs() == 1 && NumOps >= 3 &&
2844 OpInfo[0].RegClass == ARM::GPRRegClassID &&
2845 OpInfo[1].RegClass == ARM::DPRRegClassID &&
2846 OpInfo[2].RegClass < 0 &&
2847 "Expect >= 3 operands with one dst operand");
2850 Opcode == ARM::VGETLNi32 ? ESize32
2851 : ((Opcode == ARM::VGETLNs16 || Opcode == ARM::VGETLNu16) ? ESize16
2854 // Rt = Inst{15-12} => ARM Rd
2855 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2858 // Dn = Inst{7:19-16} => NEON Rn
2859 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
2860 decodeNEONRn(insn))));
2862 MI.addOperand(MCOperand::CreateImm(decodeNVLaneOpIndex(insn, esize)));
2868 // Vector Set Lane (move ARM core register to scalar) Instructions.
2869 // VSETLNi16, VSETLNi32, VSETLNi8: Dd Dd (TIED_TO) Rt index
2870 static bool DisassembleNSetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2871 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2873 const TargetInstrDesc &TID = ARMInsts[Opcode];
2874 const TargetOperandInfo *OpInfo = TID.OpInfo;
2875 if (!OpInfo) return false;
2877 assert(TID.getNumDefs() == 1 && NumOps >= 3 &&
2878 OpInfo[0].RegClass == ARM::DPRRegClassID &&
2879 OpInfo[1].RegClass == ARM::DPRRegClassID &&
2880 TID.getOperandConstraint(1, TOI::TIED_TO) != -1 &&
2881 OpInfo[2].RegClass == ARM::GPRRegClassID &&
2882 OpInfo[3].RegClass < 0 &&
2883 "Expect >= 3 operands with one dst operand");
2886 Opcode == ARM::VSETLNi8 ? ESize8
2887 : (Opcode == ARM::VSETLNi16 ? ESize16
2890 // Dd = Inst{7:19-16} => NEON Rn
2891 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
2892 decodeNEONRn(insn))));
2895 MI.addOperand(MCOperand::CreateReg(0));
2897 // Rt = Inst{15-12} => ARM Rd
2898 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2901 MI.addOperand(MCOperand::CreateImm(decodeNVLaneOpIndex(insn, esize)));
2907 // Vector Duplicate Instructions (from ARM core register to all elements).
2908 // VDUP8d, VDUP16d, VDUP32d, VDUP8q, VDUP16q, VDUP32q: Qd/Dd Rt
2909 static bool DisassembleNDupFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2910 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2912 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
2914 assert(NumOps >= 2 &&
2915 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2916 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2917 OpInfo[1].RegClass == ARM::GPRRegClassID &&
2918 "Expect >= 2 operands and first 2 as reg operand");
2920 unsigned RegClass = OpInfo[0].RegClass;
2922 // Qd/Dd = Inst{7:19-16} => NEON Rn
2923 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClass,
2924 decodeNEONRn(insn))));
2926 // Rt = Inst{15-12} => ARM Rd
2927 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2937 static inline bool MemBarrierInstr(uint32_t insn) {
2938 unsigned op7_4 = slice(insn, 7, 4);
2939 if (slice(insn, 31, 20) == 0xf57 && (op7_4 >= 4 && op7_4 <= 6))
2945 static inline bool PreLoadOpcode(unsigned Opcode) {
2947 case ARM::PLDi: case ARM::PLDr:
2948 case ARM::PLDWi: case ARM::PLDWr:
2949 case ARM::PLIi: case ARM::PLIr:
2956 static bool DisassemblePreLoadFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2957 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2959 // Preload Data/Instruction requires either 2 or 4 operands.
2960 // PLDi, PLDWi, PLIi: Rn [+/-]imm12 add = (U == '1')
2961 // PLDr[a|m], PLDWr[a|m], PLIr[a|m]: Rn Rm addrmode2_opc
2963 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2966 if (Opcode == ARM::PLDi || Opcode == ARM::PLDWi || Opcode == ARM::PLIi) {
2967 unsigned Imm12 = slice(insn, 11, 0);
2968 bool Negative = getUBit(insn) == 0;
2969 int Offset = Negative ? -1 - Imm12 : 1 * Imm12;
2970 MI.addOperand(MCOperand::CreateImm(Offset));
2973 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2976 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
2978 // Inst{6-5} encodes the shift opcode.
2979 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
2980 // Inst{11-7} encodes the imm5 shift amount.
2981 unsigned ShImm = slice(insn, 11, 7);
2983 // A8.4.1. Possible rrx or shift amount of 32...
2984 getImmShiftSE(ShOp, ShImm);
2985 MI.addOperand(MCOperand::CreateImm(
2986 ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp)));
2993 static bool DisassembleMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2994 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2996 if (MemBarrierInstr(insn))
3014 // CPS has a singleton $opt operand that contains the following information:
3015 // opt{4-0} = mode from Inst{4-0}
3016 // opt{5} = changemode from Inst{17}
3017 // opt{8-6} = AIF from Inst{8-6}
3018 // opt{10-9} = imod from Inst{19-18} with 0b10 as enable and 0b11 as disable
3019 if (Opcode == ARM::CPS) {
3020 unsigned Option = slice(insn, 4, 0) | slice(insn, 17, 17) << 5 |
3021 slice(insn, 8, 6) << 6 | slice(insn, 19, 18) << 9;
3022 MI.addOperand(MCOperand::CreateImm(Option));
3027 // DBG has its option specified in Inst{3-0}.
3028 if (Opcode == ARM::DBG) {
3029 MI.addOperand(MCOperand::CreateImm(slice(insn, 3, 0)));
3034 // BKPT takes an imm32 val equal to ZeroExtend(Inst{19-8:3-0}).
3035 if (Opcode == ARM::BKPT) {
3036 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 8) << 4 |
3037 slice(insn, 3, 0)));
3042 if (PreLoadOpcode(Opcode))
3043 return DisassemblePreLoadFrm(MI, Opcode, insn, NumOps, NumOpsAdded, B);
3045 assert(0 && "Unexpected misc instruction!");
3049 /// FuncPtrs - FuncPtrs maps ARMFormat to its corresponding DisassembleFP.
3050 /// We divide the disassembly task into different categories, with each one
3051 /// corresponding to a specific instruction encoding format. There could be
3052 /// exceptions when handling a specific format, and that is why the Opcode is
3053 /// also present in the function prototype.
3054 static const DisassembleFP FuncPtrs[] = {
3058 &DisassembleBrMiscFrm,
3060 &DisassembleDPSoRegFrm,
3063 &DisassembleLdMiscFrm,
3064 &DisassembleStMiscFrm,
3065 &DisassembleLdStMulFrm,
3066 &DisassembleLdStExFrm,
3067 &DisassembleArithMiscFrm,
3070 &DisassembleVFPUnaryFrm,
3071 &DisassembleVFPBinaryFrm,
3072 &DisassembleVFPConv1Frm,
3073 &DisassembleVFPConv2Frm,
3074 &DisassembleVFPConv3Frm,
3075 &DisassembleVFPConv4Frm,
3076 &DisassembleVFPConv5Frm,
3077 &DisassembleVFPLdStFrm,
3078 &DisassembleVFPLdStMulFrm,
3079 &DisassembleVFPMiscFrm,
3080 &DisassembleThumbFrm,
3081 &DisassembleMiscFrm,
3082 &DisassembleNGetLnFrm,
3083 &DisassembleNSetLnFrm,
3084 &DisassembleNDupFrm,
3086 // VLD and VST (including one lane) Instructions.
3089 // A7.4.6 One register and a modified immediate value
3090 // 1-Register Instructions with imm.
3091 // LLVM only defines VMOVv instructions.
3092 &DisassembleN1RegModImmFrm,
3094 // 2-Register Instructions with no imm.
3095 &DisassembleN2RegFrm,
3097 // 2-Register Instructions with imm (vector convert float/fixed point).
3098 &DisassembleNVCVTFrm,
3100 // 2-Register Instructions with imm (vector dup lane).
3101 &DisassembleNVecDupLnFrm,
3103 // Vector Shift Left Instructions.
3104 &DisassembleN2RegVecShLFrm,
3106 // Vector Shift Righ Instructions, which has different interpretation of the
3107 // shift amount from the imm6 field.
3108 &DisassembleN2RegVecShRFrm,
3110 // 3-Register Data-Processing Instructions.
3111 &DisassembleN3RegFrm,
3113 // Vector Shift (Register) Instructions.
3114 // D:Vd M:Vm N:Vn (notice that M:Vm is the first operand)
3115 &DisassembleN3RegVecShFrm,
3117 // Vector Extract Instructions.
3118 &DisassembleNVecExtractFrm,
3120 // Vector [Saturating Rounding Doubling] Multiply [Accumulate/Subtract] [Long]
3121 // By Scalar Instructions.
3122 &DisassembleNVecMulScalarFrm,
3124 // Vector Table Lookup uses byte indexes in a control vector to look up byte
3125 // values in a table and generate a new vector.
3126 &DisassembleNVTBLFrm,
3131 /// BuildIt - BuildIt performs the build step for this ARM Basic MC Builder.
3132 /// The general idea is to set the Opcode for the MCInst, followed by adding
3133 /// the appropriate MCOperands to the MCInst. ARM Basic MC Builder delegates
3134 /// to the Format-specific disassemble function for disassembly, followed by
3135 /// TryPredicateAndSBitModifier() to do PredicateOperand and OptionalDefOperand
3136 /// which follow the Dst/Src Operands.
3137 bool ARMBasicMCBuilder::BuildIt(MCInst &MI, uint32_t insn) {
3138 // Stage 1 sets the Opcode.
3139 MI.setOpcode(Opcode);
3140 // If the number of operands is zero, we're done!
3144 // Stage 2 calls the format-specific disassemble function to build the operand
3148 unsigned NumOpsAdded = 0;
3149 bool OK = (*Disasm)(MI, Opcode, insn, NumOps, NumOpsAdded, this);
3151 if (!OK || this->Err != 0) return false;
3152 if (NumOpsAdded >= NumOps)
3155 // Stage 3 deals with operands unaccounted for after stage 2 is finished.
3156 // FIXME: Should this be done selectively?
3157 return TryPredicateAndSBitModifier(MI, Opcode, insn, NumOps - NumOpsAdded);
3160 // A8.3 Conditional execution
3161 // A8.3.1 Pseudocode details of conditional execution
3162 // Condition bits '111x' indicate the instruction is always executed.
3163 static uint32_t CondCode(uint32_t CondField) {
3164 if (CondField == 0xF)
3169 /// DoPredicateOperands - DoPredicateOperands process the predicate operands
3170 /// of some Thumb instructions which come before the reglist operands. It
3171 /// returns true if the two predicate operands have been processed.
3172 bool ARMBasicMCBuilder::DoPredicateOperands(MCInst& MI, unsigned Opcode,
3173 uint32_t /* insn */, unsigned short NumOpsRemaining) {
3175 assert(NumOpsRemaining > 0 && "Invalid argument");
3177 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
3178 unsigned Idx = MI.getNumOperands();
3180 // First, we check whether this instr specifies the PredicateOperand through
3181 // a pair of TargetOperandInfos with isPredicate() property.
3182 if (NumOpsRemaining >= 2 &&
3183 OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
3184 OpInfo[Idx].RegClass < 0 &&
3185 OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
3187 // If we are inside an IT block, get the IT condition bits maintained via
3188 // ARMBasicMCBuilder::ITState[7:0], through ARMBasicMCBuilder::GetITCond().
3191 MI.addOperand(MCOperand::CreateImm(GetITCond()));
3193 MI.addOperand(MCOperand::CreateImm(ARMCC::AL));
3194 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
3201 /// TryPredicateAndSBitModifier - TryPredicateAndSBitModifier tries to process
3202 /// the possible Predicate and SBitModifier, to build the remaining MCOperand
3204 bool ARMBasicMCBuilder::TryPredicateAndSBitModifier(MCInst& MI, unsigned Opcode,
3205 uint32_t insn, unsigned short NumOpsRemaining) {
3207 assert(NumOpsRemaining > 0 && "Invalid argument");
3209 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
3210 const std::string &Name = ARMInsts[Opcode].Name;
3211 unsigned Idx = MI.getNumOperands();
3213 // First, we check whether this instr specifies the PredicateOperand through
3214 // a pair of TargetOperandInfos with isPredicate() property.
3215 if (NumOpsRemaining >= 2 &&
3216 OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
3217 OpInfo[Idx].RegClass < 0 &&
3218 OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
3220 // If we are inside an IT block, get the IT condition bits maintained via
3221 // ARMBasicMCBuilder::ITState[7:0], through ARMBasicMCBuilder::GetITCond().
3224 MI.addOperand(MCOperand::CreateImm(GetITCond()));
3226 if (Name.length() > 1 && Name[0] == 't') {
3227 // Thumb conditional branch instructions have their cond field embedded,
3231 if (Name == "t2Bcc")
3232 MI.addOperand(MCOperand::CreateImm(CondCode(slice(insn, 25, 22))));
3233 else if (Name == "tBcc")
3234 MI.addOperand(MCOperand::CreateImm(CondCode(slice(insn, 11, 8))));
3236 MI.addOperand(MCOperand::CreateImm(ARMCC::AL));
3238 // ARM instructions get their condition field from Inst{31-28}.
3239 MI.addOperand(MCOperand::CreateImm(CondCode(getCondField(insn))));
3242 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
3244 NumOpsRemaining -= 2;
3247 if (NumOpsRemaining == 0)
3250 // Next, if OptionalDefOperand exists, we check whether the 'S' bit is set.
3251 if (OpInfo[Idx].isOptionalDef() && OpInfo[Idx].RegClass==ARM::CCRRegClassID) {
3252 MI.addOperand(MCOperand::CreateReg(getSBit(insn) == 1 ? ARM::CPSR : 0));
3256 if (NumOpsRemaining == 0)
3262 /// RunBuildAfterHook - RunBuildAfterHook performs operations deemed necessary
3263 /// after BuildIt is finished.
3264 bool ARMBasicMCBuilder::RunBuildAfterHook(bool Status, MCInst &MI,
3267 if (!SP) return Status;
3269 if (Opcode == ARM::t2IT)
3270 Status = SP->InitIT(slice(insn, 7, 0)) ? Status : false;
3271 else if (InITBlock())
3277 /// Opcode, Format, and NumOperands make up an ARM Basic MCBuilder.
3278 ARMBasicMCBuilder::ARMBasicMCBuilder(unsigned opc, ARMFormat format,
3280 : Opcode(opc), Format(format), NumOps(num), SP(0), Err(0) {
3281 unsigned Idx = (unsigned)format;
3282 assert(Idx < (array_lengthof(FuncPtrs) - 1) && "Unknown format");
3283 Disasm = FuncPtrs[Idx];
3286 /// CreateMCBuilder - Return an ARMBasicMCBuilder that can build up the MC
3287 /// infrastructure of an MCInst given the Opcode and Format of the instr.
3288 /// Return NULL if it fails to create/return a proper builder. API clients
3289 /// are responsible for freeing up of the allocated memory. Cacheing can be
3290 /// performed by the API clients to improve performance.
3291 ARMBasicMCBuilder *llvm::CreateMCBuilder(unsigned Opcode, ARMFormat Format) {
3292 // For "Unknown format", fail by returning a NULL pointer.
3293 if ((unsigned)Format >= (array_lengthof(FuncPtrs) - 1)) {
3294 DEBUG(errs() << "Unknown format\n");
3298 return new ARMBasicMCBuilder(Opcode, Format,
3299 ARMInsts[Opcode].getNumOperands());