1 //===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/ARMBaseInfo.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "MCTargetDesc/ARMMCExpr.h"
13 #include "llvm/MC/MCParser/MCAsmLexer.h"
14 #include "llvm/MC/MCParser/MCAsmParser.h"
15 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16 #include "llvm/MC/MCAsmInfo.h"
17 #include "llvm/MC/MCContext.h"
18 #include "llvm/MC/MCStreamer.h"
19 #include "llvm/MC/MCExpr.h"
20 #include "llvm/MC/MCInst.h"
21 #include "llvm/MC/MCInstrDesc.h"
22 #include "llvm/MC/MCRegisterInfo.h"
23 #include "llvm/MC/MCSubtargetInfo.h"
24 #include "llvm/MC/MCTargetAsmParser.h"
25 #include "llvm/Support/MathExtras.h"
26 #include "llvm/Support/SourceMgr.h"
27 #include "llvm/Support/TargetRegistry.h"
28 #include "llvm/Support/raw_ostream.h"
29 #include "llvm/ADT/BitVector.h"
30 #include "llvm/ADT/OwningPtr.h"
31 #include "llvm/ADT/STLExtras.h"
32 #include "llvm/ADT/SmallVector.h"
33 #include "llvm/ADT/StringSwitch.h"
34 #include "llvm/ADT/Twine.h"
42 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
44 class ARMAsmParser : public MCTargetAsmParser {
49 ARMCC::CondCodes Cond; // Condition for IT block.
50 unsigned Mask:4; // Condition mask for instructions.
51 // Starting at first 1 (from lsb).
52 // '1' condition as indicated in IT.
53 // '0' inverse of condition (else).
54 // Count of instructions in IT block is
55 // 4 - trailingzeroes(mask)
57 bool FirstCond; // Explicit flag for when we're parsing the
58 // First instruction in the IT block. It's
59 // implied in the mask, so needs special
62 unsigned CurPosition; // Current position in parsing of IT
63 // block. In range [0,3]. Initialized
64 // according to count of instructions in block.
65 // ~0U if no active IT block.
67 bool inITBlock() { return ITState.CurPosition != ~0U;}
68 void forwardITPosition() {
69 if (!inITBlock()) return;
70 // Move to the next instruction in the IT block, if there is one. If not,
71 // mark the block as done.
72 unsigned TZ = CountTrailingZeros_32(ITState.Mask);
73 if (++ITState.CurPosition == 5 - TZ)
74 ITState.CurPosition = ~0U; // Done with the IT block after this.
78 MCAsmParser &getParser() const { return Parser; }
79 MCAsmLexer &getLexer() const { return Parser.getLexer(); }
81 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
82 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
84 int tryParseRegister();
85 bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
86 int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
87 bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
88 bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
89 bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
90 bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
91 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
92 unsigned &ShiftAmount);
93 bool parseDirectiveWord(unsigned Size, SMLoc L);
94 bool parseDirectiveThumb(SMLoc L);
95 bool parseDirectiveARM(SMLoc L);
96 bool parseDirectiveThumbFunc(SMLoc L);
97 bool parseDirectiveCode(SMLoc L);
98 bool parseDirectiveSyntax(SMLoc L);
100 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
101 bool &CarrySetting, unsigned &ProcessorIMod,
103 void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
104 bool &CanAcceptPredicationCode);
106 bool isThumb() const {
107 // FIXME: Can tablegen auto-generate this?
108 return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
110 bool isThumbOne() const {
111 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
113 bool isThumbTwo() const {
114 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
116 bool hasV6Ops() const {
117 return STI.getFeatureBits() & ARM::HasV6Ops;
119 bool hasV7Ops() const {
120 return STI.getFeatureBits() & ARM::HasV7Ops;
123 unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
124 setAvailableFeatures(FB);
126 bool isMClass() const {
127 return STI.getFeatureBits() & ARM::FeatureMClass;
130 /// @name Auto-generated Match Functions
133 #define GET_ASSEMBLER_HEADER
134 #include "ARMGenAsmMatcher.inc"
138 OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
139 OperandMatchResultTy parseCoprocNumOperand(
140 SmallVectorImpl<MCParsedAsmOperand*>&);
141 OperandMatchResultTy parseCoprocRegOperand(
142 SmallVectorImpl<MCParsedAsmOperand*>&);
143 OperandMatchResultTy parseCoprocOptionOperand(
144 SmallVectorImpl<MCParsedAsmOperand*>&);
145 OperandMatchResultTy parseMemBarrierOptOperand(
146 SmallVectorImpl<MCParsedAsmOperand*>&);
147 OperandMatchResultTy parseProcIFlagsOperand(
148 SmallVectorImpl<MCParsedAsmOperand*>&);
149 OperandMatchResultTy parseMSRMaskOperand(
150 SmallVectorImpl<MCParsedAsmOperand*>&);
151 OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
152 StringRef Op, int Low, int High);
153 OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
154 return parsePKHImm(O, "lsl", 0, 31);
156 OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
157 return parsePKHImm(O, "asr", 1, 32);
159 OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
160 OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
161 OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
162 OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
163 OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
164 OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
165 OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
166 OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
167 OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
169 // Asm Match Converter Methods
170 bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
171 const SmallVectorImpl<MCParsedAsmOperand*> &);
172 bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
173 const SmallVectorImpl<MCParsedAsmOperand*> &);
174 bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
175 const SmallVectorImpl<MCParsedAsmOperand*> &);
176 bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
177 const SmallVectorImpl<MCParsedAsmOperand*> &);
178 bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
179 const SmallVectorImpl<MCParsedAsmOperand*> &);
180 bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
181 const SmallVectorImpl<MCParsedAsmOperand*> &);
182 bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
183 const SmallVectorImpl<MCParsedAsmOperand*> &);
184 bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
185 const SmallVectorImpl<MCParsedAsmOperand*> &);
186 bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
187 const SmallVectorImpl<MCParsedAsmOperand*> &);
188 bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
189 const SmallVectorImpl<MCParsedAsmOperand*> &);
190 bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
191 const SmallVectorImpl<MCParsedAsmOperand*> &);
192 bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
193 const SmallVectorImpl<MCParsedAsmOperand*> &);
194 bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
195 const SmallVectorImpl<MCParsedAsmOperand*> &);
196 bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
197 const SmallVectorImpl<MCParsedAsmOperand*> &);
198 bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
199 const SmallVectorImpl<MCParsedAsmOperand*> &);
200 bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
201 const SmallVectorImpl<MCParsedAsmOperand*> &);
202 bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
203 const SmallVectorImpl<MCParsedAsmOperand*> &);
204 bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
205 const SmallVectorImpl<MCParsedAsmOperand*> &);
206 bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
207 const SmallVectorImpl<MCParsedAsmOperand*> &);
208 bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
209 const SmallVectorImpl<MCParsedAsmOperand*> &);
210 bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
211 const SmallVectorImpl<MCParsedAsmOperand*> &);
213 bool validateInstruction(MCInst &Inst,
214 const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
215 bool processInstruction(MCInst &Inst,
216 const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
217 bool shouldOmitCCOutOperand(StringRef Mnemonic,
218 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
221 enum ARMMatchResultTy {
222 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
223 Match_RequiresNotITBlock,
228 ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
229 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
230 MCAsmParserExtension::Initialize(_Parser);
232 // Initialize the set of available features.
233 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
235 // Not in an ITBlock to start with.
236 ITState.CurPosition = ~0U;
239 // Implementation of the MCTargetAsmParser interface:
240 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
241 bool ParseInstruction(StringRef Name, SMLoc NameLoc,
242 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
243 bool ParseDirective(AsmToken DirectiveID);
245 unsigned checkTargetMatchPredicate(MCInst &Inst);
247 bool MatchAndEmitInstruction(SMLoc IDLoc,
248 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
251 } // end anonymous namespace
255 /// ARMOperand - Instances of this class represent a parsed ARM machine
257 class ARMOperand : public MCParsedAsmOperand {
278 k_VectorListAllLanes,
284 k_BitfieldDescriptor,
288 SMLoc StartLoc, EndLoc;
289 SmallVector<unsigned, 8> Registers;
293 ARMCC::CondCodes Val;
313 ARM_PROC::IFlags Val;
329 // A vector register list is a sequential list of 1 to 4 registers.
345 unsigned Val; // encoded 8-bit representation
348 /// Combined record for all forms of ARM address expressions.
351 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
353 const MCConstantExpr *OffsetImm; // Offset immediate value
354 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL
355 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
356 unsigned ShiftImm; // shift for OffsetReg.
357 unsigned Alignment; // 0 = no alignment specified
358 // n = alignment in bytes (8, 16, or 32)
359 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit)
365 ARM_AM::ShiftOpc ShiftTy;
374 ARM_AM::ShiftOpc ShiftTy;
380 ARM_AM::ShiftOpc ShiftTy;
393 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
395 ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
397 StartLoc = o.StartLoc;
414 case k_DPRRegisterList:
415 case k_SPRRegisterList:
416 Registers = o.Registers;
419 case k_VectorListAllLanes:
420 case k_VectorListIndexed:
421 VectorList = o.VectorList;
428 CoprocOption = o.CoprocOption;
436 case k_MemBarrierOpt:
442 case k_PostIndexRegister:
443 PostIdxReg = o.PostIdxReg;
451 case k_ShifterImmediate:
452 ShifterImm = o.ShifterImm;
454 case k_ShiftedRegister:
455 RegShiftedReg = o.RegShiftedReg;
457 case k_ShiftedImmediate:
458 RegShiftedImm = o.RegShiftedImm;
460 case k_RotateImmediate:
463 case k_BitfieldDescriptor:
464 Bitfield = o.Bitfield;
467 VectorIndex = o.VectorIndex;
472 /// getStartLoc - Get the location of the first token of this operand.
473 SMLoc getStartLoc() const { return StartLoc; }
474 /// getEndLoc - Get the location of the last token of this operand.
475 SMLoc getEndLoc() const { return EndLoc; }
477 ARMCC::CondCodes getCondCode() const {
478 assert(Kind == k_CondCode && "Invalid access!");
482 unsigned getCoproc() const {
483 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
487 StringRef getToken() const {
488 assert(Kind == k_Token && "Invalid access!");
489 return StringRef(Tok.Data, Tok.Length);
492 unsigned getReg() const {
493 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
497 const SmallVectorImpl<unsigned> &getRegList() const {
498 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
499 Kind == k_SPRRegisterList) && "Invalid access!");
503 const MCExpr *getImm() const {
504 assert(Kind == k_Immediate && "Invalid access!");
508 unsigned getFPImm() const {
509 assert(Kind == k_FPImmediate && "Invalid access!");
513 unsigned getVectorIndex() const {
514 assert(Kind == k_VectorIndex && "Invalid access!");
515 return VectorIndex.Val;
518 ARM_MB::MemBOpt getMemBarrierOpt() const {
519 assert(Kind == k_MemBarrierOpt && "Invalid access!");
523 ARM_PROC::IFlags getProcIFlags() const {
524 assert(Kind == k_ProcIFlags && "Invalid access!");
528 unsigned getMSRMask() const {
529 assert(Kind == k_MSRMask && "Invalid access!");
533 bool isCoprocNum() const { return Kind == k_CoprocNum; }
534 bool isCoprocReg() const { return Kind == k_CoprocReg; }
535 bool isCoprocOption() const { return Kind == k_CoprocOption; }
536 bool isCondCode() const { return Kind == k_CondCode; }
537 bool isCCOut() const { return Kind == k_CCOut; }
538 bool isITMask() const { return Kind == k_ITCondMask; }
539 bool isITCondCode() const { return Kind == k_CondCode; }
540 bool isImm() const { return Kind == k_Immediate; }
541 bool isFPImm() const { return Kind == k_FPImmediate; }
542 bool isImm8s4() const {
543 if (Kind != k_Immediate)
545 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
546 if (!CE) return false;
547 int64_t Value = CE->getValue();
548 return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
550 bool isImm0_1020s4() const {
551 if (Kind != k_Immediate)
553 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
554 if (!CE) return false;
555 int64_t Value = CE->getValue();
556 return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
558 bool isImm0_508s4() const {
559 if (Kind != k_Immediate)
561 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
562 if (!CE) return false;
563 int64_t Value = CE->getValue();
564 return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
566 bool isImm0_255() const {
567 if (Kind != k_Immediate)
569 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
570 if (!CE) return false;
571 int64_t Value = CE->getValue();
572 return Value >= 0 && Value < 256;
574 bool isImm0_1() const {
575 if (Kind != k_Immediate)
577 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
578 if (!CE) return false;
579 int64_t Value = CE->getValue();
580 return Value >= 0 && Value < 2;
582 bool isImm0_3() const {
583 if (Kind != k_Immediate)
585 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
586 if (!CE) return false;
587 int64_t Value = CE->getValue();
588 return Value >= 0 && Value < 4;
590 bool isImm0_7() const {
591 if (Kind != k_Immediate)
593 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
594 if (!CE) return false;
595 int64_t Value = CE->getValue();
596 return Value >= 0 && Value < 8;
598 bool isImm0_15() const {
599 if (Kind != k_Immediate)
601 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
602 if (!CE) return false;
603 int64_t Value = CE->getValue();
604 return Value >= 0 && Value < 16;
606 bool isImm0_31() const {
607 if (Kind != k_Immediate)
609 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
610 if (!CE) return false;
611 int64_t Value = CE->getValue();
612 return Value >= 0 && Value < 32;
614 bool isImm0_63() const {
615 if (Kind != k_Immediate)
617 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
618 if (!CE) return false;
619 int64_t Value = CE->getValue();
620 return Value >= 0 && Value < 64;
622 bool isImm8() const {
623 if (Kind != k_Immediate)
625 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
626 if (!CE) return false;
627 int64_t Value = CE->getValue();
630 bool isImm16() const {
631 if (Kind != k_Immediate)
633 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
634 if (!CE) return false;
635 int64_t Value = CE->getValue();
638 bool isImm32() const {
639 if (Kind != k_Immediate)
641 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
642 if (!CE) return false;
643 int64_t Value = CE->getValue();
646 bool isImm1_7() const {
647 if (Kind != k_Immediate)
649 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
650 if (!CE) return false;
651 int64_t Value = CE->getValue();
652 return Value > 0 && Value < 8;
654 bool isImm1_15() const {
655 if (Kind != k_Immediate)
657 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
658 if (!CE) return false;
659 int64_t Value = CE->getValue();
660 return Value > 0 && Value < 16;
662 bool isImm1_31() const {
663 if (Kind != k_Immediate)
665 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
666 if (!CE) return false;
667 int64_t Value = CE->getValue();
668 return Value > 0 && Value < 32;
670 bool isImm1_16() const {
671 if (Kind != k_Immediate)
673 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
674 if (!CE) return false;
675 int64_t Value = CE->getValue();
676 return Value > 0 && Value < 17;
678 bool isImm1_32() const {
679 if (Kind != k_Immediate)
681 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
682 if (!CE) return false;
683 int64_t Value = CE->getValue();
684 return Value > 0 && Value < 33;
686 bool isImm0_32() const {
687 if (Kind != k_Immediate)
689 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
690 if (!CE) return false;
691 int64_t Value = CE->getValue();
692 return Value >= 0 && Value < 33;
694 bool isImm0_65535() const {
695 if (Kind != k_Immediate)
697 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
698 if (!CE) return false;
699 int64_t Value = CE->getValue();
700 return Value >= 0 && Value < 65536;
702 bool isImm0_65535Expr() const {
703 if (Kind != k_Immediate)
705 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
706 // If it's not a constant expression, it'll generate a fixup and be
708 if (!CE) return true;
709 int64_t Value = CE->getValue();
710 return Value >= 0 && Value < 65536;
712 bool isImm24bit() const {
713 if (Kind != k_Immediate)
715 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
716 if (!CE) return false;
717 int64_t Value = CE->getValue();
718 return Value >= 0 && Value <= 0xffffff;
720 bool isImmThumbSR() const {
721 if (Kind != k_Immediate)
723 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
724 if (!CE) return false;
725 int64_t Value = CE->getValue();
726 return Value > 0 && Value < 33;
728 bool isPKHLSLImm() const {
729 if (Kind != k_Immediate)
731 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
732 if (!CE) return false;
733 int64_t Value = CE->getValue();
734 return Value >= 0 && Value < 32;
736 bool isPKHASRImm() const {
737 if (Kind != k_Immediate)
739 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
740 if (!CE) return false;
741 int64_t Value = CE->getValue();
742 return Value > 0 && Value <= 32;
744 bool isARMSOImm() const {
745 if (Kind != k_Immediate)
747 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
748 if (!CE) return false;
749 int64_t Value = CE->getValue();
750 return ARM_AM::getSOImmVal(Value) != -1;
752 bool isARMSOImmNot() const {
753 if (Kind != k_Immediate)
755 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
756 if (!CE) return false;
757 int64_t Value = CE->getValue();
758 return ARM_AM::getSOImmVal(~Value) != -1;
760 bool isARMSOImmNeg() const {
761 if (Kind != k_Immediate)
763 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
764 if (!CE) return false;
765 int64_t Value = CE->getValue();
766 return ARM_AM::getSOImmVal(-Value) != -1;
768 bool isT2SOImm() const {
769 if (Kind != k_Immediate)
771 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
772 if (!CE) return false;
773 int64_t Value = CE->getValue();
774 return ARM_AM::getT2SOImmVal(Value) != -1;
776 bool isT2SOImmNot() const {
777 if (Kind != k_Immediate)
779 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
780 if (!CE) return false;
781 int64_t Value = CE->getValue();
782 return ARM_AM::getT2SOImmVal(~Value) != -1;
784 bool isT2SOImmNeg() const {
785 if (Kind != k_Immediate)
787 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
788 if (!CE) return false;
789 int64_t Value = CE->getValue();
790 return ARM_AM::getT2SOImmVal(-Value) != -1;
792 bool isSetEndImm() const {
793 if (Kind != k_Immediate)
795 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
796 if (!CE) return false;
797 int64_t Value = CE->getValue();
798 return Value == 1 || Value == 0;
800 bool isReg() const { return Kind == k_Register; }
801 bool isRegList() const { return Kind == k_RegisterList; }
802 bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
803 bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
804 bool isToken() const { return Kind == k_Token; }
805 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
806 bool isMemory() const { return Kind == k_Memory; }
807 bool isShifterImm() const { return Kind == k_ShifterImmediate; }
808 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
809 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
810 bool isRotImm() const { return Kind == k_RotateImmediate; }
811 bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
812 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
813 bool isPostIdxReg() const {
814 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
816 bool isMemNoOffset(bool alignOK = false) const {
819 // No offset of any kind.
820 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
821 (alignOK || Memory.Alignment == 0);
823 bool isAlignedMemory() const {
824 return isMemNoOffset(true);
826 bool isAddrMode2() const {
827 if (!isMemory() || Memory.Alignment != 0) return false;
828 // Check for register offset.
829 if (Memory.OffsetRegNum) return true;
830 // Immediate offset in range [-4095, 4095].
831 if (!Memory.OffsetImm) return true;
832 int64_t Val = Memory.OffsetImm->getValue();
833 return Val > -4096 && Val < 4096;
835 bool isAM2OffsetImm() const {
836 if (Kind != k_Immediate)
838 // Immediate offset in range [-4095, 4095].
839 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
840 if (!CE) return false;
841 int64_t Val = CE->getValue();
842 return Val > -4096 && Val < 4096;
844 bool isAddrMode3() const {
845 if (!isMemory() || Memory.Alignment != 0) return false;
846 // No shifts are legal for AM3.
847 if (Memory.ShiftType != ARM_AM::no_shift) return false;
848 // Check for register offset.
849 if (Memory.OffsetRegNum) return true;
850 // Immediate offset in range [-255, 255].
851 if (!Memory.OffsetImm) return true;
852 int64_t Val = Memory.OffsetImm->getValue();
853 return Val > -256 && Val < 256;
855 bool isAM3Offset() const {
856 if (Kind != k_Immediate && Kind != k_PostIndexRegister)
858 if (Kind == k_PostIndexRegister)
859 return PostIdxReg.ShiftTy == ARM_AM::no_shift;
860 // Immediate offset in range [-255, 255].
861 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
862 if (!CE) return false;
863 int64_t Val = CE->getValue();
864 // Special case, #-0 is INT32_MIN.
865 return (Val > -256 && Val < 256) || Val == INT32_MIN;
867 bool isAddrMode5() const {
868 // If we have an immediate that's not a constant, treat it as a label
869 // reference needing a fixup. If it is a constant, it's something else
871 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
873 if (!isMemory() || Memory.Alignment != 0) return false;
874 // Check for register offset.
875 if (Memory.OffsetRegNum) return false;
876 // Immediate offset in range [-1020, 1020] and a multiple of 4.
877 if (!Memory.OffsetImm) return true;
878 int64_t Val = Memory.OffsetImm->getValue();
879 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
882 bool isMemTBB() const {
883 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
884 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
888 bool isMemTBH() const {
889 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
890 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
891 Memory.Alignment != 0 )
895 bool isMemRegOffset() const {
896 if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
900 bool isT2MemRegOffset() const {
901 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
902 Memory.Alignment != 0)
904 // Only lsl #{0, 1, 2, 3} allowed.
905 if (Memory.ShiftType == ARM_AM::no_shift)
907 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
911 bool isMemThumbRR() const {
912 // Thumb reg+reg addressing is simple. Just two registers, a base and
913 // an offset. No shifts, negations or any other complicating factors.
914 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
915 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
917 return isARMLowRegister(Memory.BaseRegNum) &&
918 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
920 bool isMemThumbRIs4() const {
921 if (!isMemory() || Memory.OffsetRegNum != 0 ||
922 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
924 // Immediate offset, multiple of 4 in range [0, 124].
925 if (!Memory.OffsetImm) return true;
926 int64_t Val = Memory.OffsetImm->getValue();
927 return Val >= 0 && Val <= 124 && (Val % 4) == 0;
929 bool isMemThumbRIs2() const {
930 if (!isMemory() || Memory.OffsetRegNum != 0 ||
931 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
933 // Immediate offset, multiple of 4 in range [0, 62].
934 if (!Memory.OffsetImm) return true;
935 int64_t Val = Memory.OffsetImm->getValue();
936 return Val >= 0 && Val <= 62 && (Val % 2) == 0;
938 bool isMemThumbRIs1() const {
939 if (!isMemory() || Memory.OffsetRegNum != 0 ||
940 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
942 // Immediate offset in range [0, 31].
943 if (!Memory.OffsetImm) return true;
944 int64_t Val = Memory.OffsetImm->getValue();
945 return Val >= 0 && Val <= 31;
947 bool isMemThumbSPI() const {
948 if (!isMemory() || Memory.OffsetRegNum != 0 ||
949 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
951 // Immediate offset, multiple of 4 in range [0, 1020].
952 if (!Memory.OffsetImm) return true;
953 int64_t Val = Memory.OffsetImm->getValue();
954 return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
956 bool isMemImm8s4Offset() const {
957 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
959 // Immediate offset a multiple of 4 in range [-1020, 1020].
960 if (!Memory.OffsetImm) return true;
961 int64_t Val = Memory.OffsetImm->getValue();
962 return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
964 bool isMemImm0_1020s4Offset() const {
965 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
967 // Immediate offset a multiple of 4 in range [0, 1020].
968 if (!Memory.OffsetImm) return true;
969 int64_t Val = Memory.OffsetImm->getValue();
970 return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
972 bool isMemImm8Offset() const {
973 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
975 // Immediate offset in range [-255, 255].
976 if (!Memory.OffsetImm) return true;
977 int64_t Val = Memory.OffsetImm->getValue();
978 return (Val == INT32_MIN) || (Val > -256 && Val < 256);
980 bool isMemPosImm8Offset() const {
981 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
983 // Immediate offset in range [0, 255].
984 if (!Memory.OffsetImm) return true;
985 int64_t Val = Memory.OffsetImm->getValue();
986 return Val >= 0 && Val < 256;
988 bool isMemNegImm8Offset() const {
989 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
991 // Immediate offset in range [-255, -1].
992 if (!Memory.OffsetImm) return false;
993 int64_t Val = Memory.OffsetImm->getValue();
994 return (Val == INT32_MIN) || (Val > -256 && Val < 0);
996 bool isMemUImm12Offset() const {
997 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
999 // Immediate offset in range [0, 4095].
1000 if (!Memory.OffsetImm) return true;
1001 int64_t Val = Memory.OffsetImm->getValue();
1002 return (Val >= 0 && Val < 4096);
1004 bool isMemImm12Offset() const {
1005 // If we have an immediate that's not a constant, treat it as a label
1006 // reference needing a fixup. If it is a constant, it's something else
1007 // and we reject it.
1008 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
1011 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1013 // Immediate offset in range [-4095, 4095].
1014 if (!Memory.OffsetImm) return true;
1015 int64_t Val = Memory.OffsetImm->getValue();
1016 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1018 bool isPostIdxImm8() const {
1019 if (Kind != k_Immediate)
1021 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1022 if (!CE) return false;
1023 int64_t Val = CE->getValue();
1024 return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1026 bool isPostIdxImm8s4() const {
1027 if (Kind != k_Immediate)
1029 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1030 if (!CE) return false;
1031 int64_t Val = CE->getValue();
1032 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1036 bool isMSRMask() const { return Kind == k_MSRMask; }
1037 bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1040 bool isVecListOneD() const {
1041 if (Kind != k_VectorList) return false;
1042 return VectorList.Count == 1;
1045 bool isVecListTwoD() const {
1046 if (Kind != k_VectorList) return false;
1047 return VectorList.Count == 2;
1050 bool isVecListThreeD() const {
1051 if (Kind != k_VectorList) return false;
1052 return VectorList.Count == 3;
1055 bool isVecListFourD() const {
1056 if (Kind != k_VectorList) return false;
1057 return VectorList.Count == 4;
1060 bool isVecListTwoQ() const {
1061 if (Kind != k_VectorList) return false;
1062 //FIXME: We haven't taught the parser to handle by-two register lists
1063 // yet, so don't pretend to know one.
1064 return VectorList.Count == 2 && false;
1067 bool isVecListOneDAllLanes() const {
1068 if (Kind != k_VectorListAllLanes) return false;
1069 return VectorList.Count == 1;
1072 bool isVecListTwoDAllLanes() const {
1073 if (Kind != k_VectorListAllLanes) return false;
1074 return VectorList.Count == 2;
1077 bool isVecListOneDByteIndexed() const {
1078 if (Kind != k_VectorListIndexed) return false;
1079 return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1082 bool isVectorIndex8() const {
1083 if (Kind != k_VectorIndex) return false;
1084 return VectorIndex.Val < 8;
1086 bool isVectorIndex16() const {
1087 if (Kind != k_VectorIndex) return false;
1088 return VectorIndex.Val < 4;
1090 bool isVectorIndex32() const {
1091 if (Kind != k_VectorIndex) return false;
1092 return VectorIndex.Val < 2;
1095 bool isNEONi8splat() const {
1096 if (Kind != k_Immediate)
1098 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1099 // Must be a constant.
1100 if (!CE) return false;
1101 int64_t Value = CE->getValue();
1102 // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1104 return Value >= 0 && Value < 256;
1107 bool isNEONi16splat() const {
1108 if (Kind != k_Immediate)
1110 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1111 // Must be a constant.
1112 if (!CE) return false;
1113 int64_t Value = CE->getValue();
1114 // i16 value in the range [0,255] or [0x0100, 0xff00]
1115 return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1118 bool isNEONi32splat() const {
1119 if (Kind != k_Immediate)
1121 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1122 // Must be a constant.
1123 if (!CE) return false;
1124 int64_t Value = CE->getValue();
1125 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1126 return (Value >= 0 && Value < 256) ||
1127 (Value >= 0x0100 && Value <= 0xff00) ||
1128 (Value >= 0x010000 && Value <= 0xff0000) ||
1129 (Value >= 0x01000000 && Value <= 0xff000000);
1132 bool isNEONi32vmov() const {
1133 if (Kind != k_Immediate)
1135 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1136 // Must be a constant.
1137 if (!CE) return false;
1138 int64_t Value = CE->getValue();
1139 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1140 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1141 return (Value >= 0 && Value < 256) ||
1142 (Value >= 0x0100 && Value <= 0xff00) ||
1143 (Value >= 0x010000 && Value <= 0xff0000) ||
1144 (Value >= 0x01000000 && Value <= 0xff000000) ||
1145 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1146 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1149 bool isNEONi64splat() const {
1150 if (Kind != k_Immediate)
1152 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1153 // Must be a constant.
1154 if (!CE) return false;
1155 uint64_t Value = CE->getValue();
1156 // i64 value with each byte being either 0 or 0xff.
1157 for (unsigned i = 0; i < 8; ++i)
1158 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1162 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1163 // Add as immediates when possible. Null MCExpr = 0.
1165 Inst.addOperand(MCOperand::CreateImm(0));
1166 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1167 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1169 Inst.addOperand(MCOperand::CreateExpr(Expr));
1172 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1173 assert(N == 2 && "Invalid number of operands!");
1174 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1175 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1176 Inst.addOperand(MCOperand::CreateReg(RegNum));
1179 void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1180 assert(N == 1 && "Invalid number of operands!");
1181 Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1184 void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1185 assert(N == 1 && "Invalid number of operands!");
1186 Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1189 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1190 assert(N == 1 && "Invalid number of operands!");
1191 Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1194 void addITMaskOperands(MCInst &Inst, unsigned N) const {
1195 assert(N == 1 && "Invalid number of operands!");
1196 Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1199 void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1200 assert(N == 1 && "Invalid number of operands!");
1201 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1204 void addCCOutOperands(MCInst &Inst, unsigned N) const {
1205 assert(N == 1 && "Invalid number of operands!");
1206 Inst.addOperand(MCOperand::CreateReg(getReg()));
1209 void addRegOperands(MCInst &Inst, unsigned N) const {
1210 assert(N == 1 && "Invalid number of operands!");
1211 Inst.addOperand(MCOperand::CreateReg(getReg()));
1214 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1215 assert(N == 3 && "Invalid number of operands!");
1216 assert(isRegShiftedReg() &&
1217 "addRegShiftedRegOperands() on non RegShiftedReg!");
1218 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1219 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1220 Inst.addOperand(MCOperand::CreateImm(
1221 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1224 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1225 assert(N == 2 && "Invalid number of operands!");
1226 assert(isRegShiftedImm() &&
1227 "addRegShiftedImmOperands() on non RegShiftedImm!");
1228 Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1229 Inst.addOperand(MCOperand::CreateImm(
1230 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1233 void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1234 assert(N == 1 && "Invalid number of operands!");
1235 Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1239 void addRegListOperands(MCInst &Inst, unsigned N) const {
1240 assert(N == 1 && "Invalid number of operands!");
1241 const SmallVectorImpl<unsigned> &RegList = getRegList();
1242 for (SmallVectorImpl<unsigned>::const_iterator
1243 I = RegList.begin(), E = RegList.end(); I != E; ++I)
1244 Inst.addOperand(MCOperand::CreateReg(*I));
1247 void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1248 addRegListOperands(Inst, N);
1251 void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1252 addRegListOperands(Inst, N);
1255 void addRotImmOperands(MCInst &Inst, unsigned N) const {
1256 assert(N == 1 && "Invalid number of operands!");
1257 // Encoded as val>>3. The printer handles display as 8, 16, 24.
1258 Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1261 void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1262 assert(N == 1 && "Invalid number of operands!");
1263 // Munge the lsb/width into a bitfield mask.
1264 unsigned lsb = Bitfield.LSB;
1265 unsigned width = Bitfield.Width;
1266 // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1267 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1268 (32 - (lsb + width)));
1269 Inst.addOperand(MCOperand::CreateImm(Mask));
1272 void addImmOperands(MCInst &Inst, unsigned N) const {
1273 assert(N == 1 && "Invalid number of operands!");
1274 addExpr(Inst, getImm());
1277 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1278 assert(N == 1 && "Invalid number of operands!");
1279 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1282 void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1283 assert(N == 1 && "Invalid number of operands!");
1284 // FIXME: We really want to scale the value here, but the LDRD/STRD
1285 // instruction don't encode operands that way yet.
1286 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1287 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1290 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1291 assert(N == 1 && "Invalid number of operands!");
1292 // The immediate is scaled by four in the encoding and is stored
1293 // in the MCInst as such. Lop off the low two bits here.
1294 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1295 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1298 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1299 assert(N == 1 && "Invalid number of operands!");
1300 // The immediate is scaled by four in the encoding and is stored
1301 // in the MCInst as such. Lop off the low two bits here.
1302 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1303 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1306 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1307 assert(N == 1 && "Invalid number of operands!");
1308 // The constant encodes as the immediate-1, and we store in the instruction
1309 // the bits as encoded, so subtract off one here.
1310 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1311 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1314 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1315 assert(N == 1 && "Invalid number of operands!");
1316 // The constant encodes as the immediate-1, and we store in the instruction
1317 // the bits as encoded, so subtract off one here.
1318 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1319 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1322 void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1323 assert(N == 1 && "Invalid number of operands!");
1324 // The constant encodes as the immediate, except for 32, which encodes as
1326 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1327 unsigned Imm = CE->getValue();
1328 Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1331 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1332 assert(N == 1 && "Invalid number of operands!");
1333 // An ASR value of 32 encodes as 0, so that's how we want to add it to
1334 // the instruction as well.
1335 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1336 int Val = CE->getValue();
1337 Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1340 void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1341 assert(N == 1 && "Invalid number of operands!");
1342 // The operand is actually a t2_so_imm, but we have its bitwise
1343 // negation in the assembly source, so twiddle it here.
1344 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1345 Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1348 void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1349 assert(N == 1 && "Invalid number of operands!");
1350 // The operand is actually a t2_so_imm, but we have its
1351 // negation in the assembly source, so twiddle it here.
1352 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1353 Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1356 void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1357 assert(N == 1 && "Invalid number of operands!");
1358 // The operand is actually a so_imm, but we have its bitwise
1359 // negation in the assembly source, so twiddle it here.
1360 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1361 Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1364 void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1365 assert(N == 1 && "Invalid number of operands!");
1366 // The operand is actually a so_imm, but we have its
1367 // negation in the assembly source, so twiddle it here.
1368 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1369 Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1372 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1373 assert(N == 1 && "Invalid number of operands!");
1374 Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1377 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1378 assert(N == 1 && "Invalid number of operands!");
1379 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1382 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1383 assert(N == 2 && "Invalid number of operands!");
1384 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1385 Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1388 void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1389 assert(N == 3 && "Invalid number of operands!");
1390 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1391 if (!Memory.OffsetRegNum) {
1392 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1393 // Special case for #-0
1394 if (Val == INT32_MIN) Val = 0;
1395 if (Val < 0) Val = -Val;
1396 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1398 // For register offset, we encode the shift type and negation flag
1400 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1401 Memory.ShiftImm, Memory.ShiftType);
1403 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1404 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1405 Inst.addOperand(MCOperand::CreateImm(Val));
1408 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1409 assert(N == 2 && "Invalid number of operands!");
1410 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1411 assert(CE && "non-constant AM2OffsetImm operand!");
1412 int32_t Val = CE->getValue();
1413 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1414 // Special case for #-0
1415 if (Val == INT32_MIN) Val = 0;
1416 if (Val < 0) Val = -Val;
1417 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1418 Inst.addOperand(MCOperand::CreateReg(0));
1419 Inst.addOperand(MCOperand::CreateImm(Val));
1422 void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1423 assert(N == 3 && "Invalid number of operands!");
1424 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1425 if (!Memory.OffsetRegNum) {
1426 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1427 // Special case for #-0
1428 if (Val == INT32_MIN) Val = 0;
1429 if (Val < 0) Val = -Val;
1430 Val = ARM_AM::getAM3Opc(AddSub, Val);
1432 // For register offset, we encode the shift type and negation flag
1434 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1436 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1437 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1438 Inst.addOperand(MCOperand::CreateImm(Val));
1441 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1442 assert(N == 2 && "Invalid number of operands!");
1443 if (Kind == k_PostIndexRegister) {
1445 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1446 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1447 Inst.addOperand(MCOperand::CreateImm(Val));
1452 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1453 int32_t Val = CE->getValue();
1454 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1455 // Special case for #-0
1456 if (Val == INT32_MIN) Val = 0;
1457 if (Val < 0) Val = -Val;
1458 Val = ARM_AM::getAM3Opc(AddSub, Val);
1459 Inst.addOperand(MCOperand::CreateReg(0));
1460 Inst.addOperand(MCOperand::CreateImm(Val));
1463 void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1464 assert(N == 2 && "Invalid number of operands!");
1465 // If we have an immediate that's not a constant, treat it as a label
1466 // reference needing a fixup. If it is a constant, it's something else
1467 // and we reject it.
1469 Inst.addOperand(MCOperand::CreateExpr(getImm()));
1470 Inst.addOperand(MCOperand::CreateImm(0));
1474 // The lower two bits are always zero and as such are not encoded.
1475 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1476 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1477 // Special case for #-0
1478 if (Val == INT32_MIN) Val = 0;
1479 if (Val < 0) Val = -Val;
1480 Val = ARM_AM::getAM5Opc(AddSub, Val);
1481 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1482 Inst.addOperand(MCOperand::CreateImm(Val));
1485 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1486 assert(N == 2 && "Invalid number of operands!");
1487 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1488 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1489 Inst.addOperand(MCOperand::CreateImm(Val));
1492 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1493 assert(N == 2 && "Invalid number of operands!");
1494 // The lower two bits are always zero and as such are not encoded.
1495 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1496 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1497 Inst.addOperand(MCOperand::CreateImm(Val));
1500 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1501 assert(N == 2 && "Invalid number of operands!");
1502 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1503 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1504 Inst.addOperand(MCOperand::CreateImm(Val));
1507 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1508 addMemImm8OffsetOperands(Inst, N);
1511 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1512 addMemImm8OffsetOperands(Inst, N);
1515 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1516 assert(N == 2 && "Invalid number of operands!");
1517 // If this is an immediate, it's a label reference.
1518 if (Kind == k_Immediate) {
1519 addExpr(Inst, getImm());
1520 Inst.addOperand(MCOperand::CreateImm(0));
1524 // Otherwise, it's a normal memory reg+offset.
1525 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1526 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1527 Inst.addOperand(MCOperand::CreateImm(Val));
1530 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1531 assert(N == 2 && "Invalid number of operands!");
1532 // If this is an immediate, it's a label reference.
1533 if (Kind == k_Immediate) {
1534 addExpr(Inst, getImm());
1535 Inst.addOperand(MCOperand::CreateImm(0));
1539 // Otherwise, it's a normal memory reg+offset.
1540 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1541 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1542 Inst.addOperand(MCOperand::CreateImm(Val));
1545 void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1546 assert(N == 2 && "Invalid number of operands!");
1547 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1548 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1551 void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1552 assert(N == 2 && "Invalid number of operands!");
1553 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1554 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1557 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1558 assert(N == 3 && "Invalid number of operands!");
1560 ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1561 Memory.ShiftImm, Memory.ShiftType);
1562 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1563 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1564 Inst.addOperand(MCOperand::CreateImm(Val));
1567 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1568 assert(N == 3 && "Invalid number of operands!");
1569 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1570 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1571 Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1574 void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1575 assert(N == 2 && "Invalid number of operands!");
1576 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1577 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1580 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1581 assert(N == 2 && "Invalid number of operands!");
1582 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1583 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1584 Inst.addOperand(MCOperand::CreateImm(Val));
1587 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1588 assert(N == 2 && "Invalid number of operands!");
1589 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1590 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1591 Inst.addOperand(MCOperand::CreateImm(Val));
1594 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1595 assert(N == 2 && "Invalid number of operands!");
1596 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1597 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1598 Inst.addOperand(MCOperand::CreateImm(Val));
1601 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1602 assert(N == 2 && "Invalid number of operands!");
1603 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1604 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1605 Inst.addOperand(MCOperand::CreateImm(Val));
1608 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1609 assert(N == 1 && "Invalid number of operands!");
1610 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1611 assert(CE && "non-constant post-idx-imm8 operand!");
1612 int Imm = CE->getValue();
1613 bool isAdd = Imm >= 0;
1614 if (Imm == INT32_MIN) Imm = 0;
1615 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1616 Inst.addOperand(MCOperand::CreateImm(Imm));
1619 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1620 assert(N == 1 && "Invalid number of operands!");
1621 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1622 assert(CE && "non-constant post-idx-imm8s4 operand!");
1623 int Imm = CE->getValue();
1624 bool isAdd = Imm >= 0;
1625 if (Imm == INT32_MIN) Imm = 0;
1626 // Immediate is scaled by 4.
1627 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1628 Inst.addOperand(MCOperand::CreateImm(Imm));
1631 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1632 assert(N == 2 && "Invalid number of operands!");
1633 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1634 Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1637 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1638 assert(N == 2 && "Invalid number of operands!");
1639 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1640 // The sign, shift type, and shift amount are encoded in a single operand
1641 // using the AM2 encoding helpers.
1642 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1643 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1644 PostIdxReg.ShiftTy);
1645 Inst.addOperand(MCOperand::CreateImm(Imm));
1648 void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1649 assert(N == 1 && "Invalid number of operands!");
1650 Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1653 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1654 assert(N == 1 && "Invalid number of operands!");
1655 Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1658 void addVecListOperands(MCInst &Inst, unsigned N) const {
1659 assert(N == 1 && "Invalid number of operands!");
1660 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1663 void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1664 assert(N == 2 && "Invalid number of operands!");
1665 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1666 Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1669 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1670 assert(N == 1 && "Invalid number of operands!");
1671 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1674 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1675 assert(N == 1 && "Invalid number of operands!");
1676 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1679 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1680 assert(N == 1 && "Invalid number of operands!");
1681 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1684 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1685 assert(N == 1 && "Invalid number of operands!");
1686 // The immediate encodes the type of constant as well as the value.
1687 // Mask in that this is an i8 splat.
1688 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1689 Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1692 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1693 assert(N == 1 && "Invalid number of operands!");
1694 // The immediate encodes the type of constant as well as the value.
1695 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1696 unsigned Value = CE->getValue();
1698 Value = (Value >> 8) | 0xa00;
1701 Inst.addOperand(MCOperand::CreateImm(Value));
1704 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1705 assert(N == 1 && "Invalid number of operands!");
1706 // The immediate encodes the type of constant as well as the value.
1707 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1708 unsigned Value = CE->getValue();
1709 if (Value >= 256 && Value <= 0xff00)
1710 Value = (Value >> 8) | 0x200;
1711 else if (Value > 0xffff && Value <= 0xff0000)
1712 Value = (Value >> 16) | 0x400;
1713 else if (Value > 0xffffff)
1714 Value = (Value >> 24) | 0x600;
1715 Inst.addOperand(MCOperand::CreateImm(Value));
1718 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1719 assert(N == 1 && "Invalid number of operands!");
1720 // The immediate encodes the type of constant as well as the value.
1721 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1722 unsigned Value = CE->getValue();
1723 if (Value >= 256 && Value <= 0xffff)
1724 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1725 else if (Value > 0xffff && Value <= 0xffffff)
1726 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1727 else if (Value > 0xffffff)
1728 Value = (Value >> 24) | 0x600;
1729 Inst.addOperand(MCOperand::CreateImm(Value));
1732 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
1733 assert(N == 1 && "Invalid number of operands!");
1734 // The immediate encodes the type of constant as well as the value.
1735 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1736 uint64_t Value = CE->getValue();
1738 for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1739 Imm |= (Value & 1) << i;
1741 Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
1744 virtual void print(raw_ostream &OS) const;
1746 static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
1747 ARMOperand *Op = new ARMOperand(k_ITCondMask);
1748 Op->ITMask.Mask = Mask;
1754 static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
1755 ARMOperand *Op = new ARMOperand(k_CondCode);
1762 static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
1763 ARMOperand *Op = new ARMOperand(k_CoprocNum);
1764 Op->Cop.Val = CopVal;
1770 static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
1771 ARMOperand *Op = new ARMOperand(k_CoprocReg);
1772 Op->Cop.Val = CopVal;
1778 static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
1779 ARMOperand *Op = new ARMOperand(k_CoprocOption);
1786 static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
1787 ARMOperand *Op = new ARMOperand(k_CCOut);
1788 Op->Reg.RegNum = RegNum;
1794 static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
1795 ARMOperand *Op = new ARMOperand(k_Token);
1796 Op->Tok.Data = Str.data();
1797 Op->Tok.Length = Str.size();
1803 static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
1804 ARMOperand *Op = new ARMOperand(k_Register);
1805 Op->Reg.RegNum = RegNum;
1811 static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
1816 ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
1817 Op->RegShiftedReg.ShiftTy = ShTy;
1818 Op->RegShiftedReg.SrcReg = SrcReg;
1819 Op->RegShiftedReg.ShiftReg = ShiftReg;
1820 Op->RegShiftedReg.ShiftImm = ShiftImm;
1826 static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
1830 ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
1831 Op->RegShiftedImm.ShiftTy = ShTy;
1832 Op->RegShiftedImm.SrcReg = SrcReg;
1833 Op->RegShiftedImm.ShiftImm = ShiftImm;
1839 static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
1841 ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
1842 Op->ShifterImm.isASR = isASR;
1843 Op->ShifterImm.Imm = Imm;
1849 static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
1850 ARMOperand *Op = new ARMOperand(k_RotateImmediate);
1851 Op->RotImm.Imm = Imm;
1857 static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
1859 ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
1860 Op->Bitfield.LSB = LSB;
1861 Op->Bitfield.Width = Width;
1868 CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
1869 SMLoc StartLoc, SMLoc EndLoc) {
1870 KindTy Kind = k_RegisterList;
1872 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
1873 Kind = k_DPRRegisterList;
1874 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
1875 contains(Regs.front().first))
1876 Kind = k_SPRRegisterList;
1878 ARMOperand *Op = new ARMOperand(Kind);
1879 for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
1880 I = Regs.begin(), E = Regs.end(); I != E; ++I)
1881 Op->Registers.push_back(I->first);
1882 array_pod_sort(Op->Registers.begin(), Op->Registers.end());
1883 Op->StartLoc = StartLoc;
1884 Op->EndLoc = EndLoc;
1888 static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
1890 ARMOperand *Op = new ARMOperand(k_VectorList);
1891 Op->VectorList.RegNum = RegNum;
1892 Op->VectorList.Count = Count;
1898 static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
1900 ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
1901 Op->VectorList.RegNum = RegNum;
1902 Op->VectorList.Count = Count;
1908 static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
1909 unsigned Index, SMLoc S, SMLoc E) {
1910 ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
1911 Op->VectorList.RegNum = RegNum;
1912 Op->VectorList.Count = Count;
1913 Op->VectorList.LaneIndex = Index;
1919 static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
1921 ARMOperand *Op = new ARMOperand(k_VectorIndex);
1922 Op->VectorIndex.Val = Idx;
1928 static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
1929 ARMOperand *Op = new ARMOperand(k_Immediate);
1936 static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
1937 ARMOperand *Op = new ARMOperand(k_FPImmediate);
1938 Op->FPImm.Val = Val;
1944 static ARMOperand *CreateMem(unsigned BaseRegNum,
1945 const MCConstantExpr *OffsetImm,
1946 unsigned OffsetRegNum,
1947 ARM_AM::ShiftOpc ShiftType,
1952 ARMOperand *Op = new ARMOperand(k_Memory);
1953 Op->Memory.BaseRegNum = BaseRegNum;
1954 Op->Memory.OffsetImm = OffsetImm;
1955 Op->Memory.OffsetRegNum = OffsetRegNum;
1956 Op->Memory.ShiftType = ShiftType;
1957 Op->Memory.ShiftImm = ShiftImm;
1958 Op->Memory.Alignment = Alignment;
1959 Op->Memory.isNegative = isNegative;
1965 static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
1966 ARM_AM::ShiftOpc ShiftTy,
1969 ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
1970 Op->PostIdxReg.RegNum = RegNum;
1971 Op->PostIdxReg.isAdd = isAdd;
1972 Op->PostIdxReg.ShiftTy = ShiftTy;
1973 Op->PostIdxReg.ShiftImm = ShiftImm;
1979 static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
1980 ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
1981 Op->MBOpt.Val = Opt;
1987 static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
1988 ARMOperand *Op = new ARMOperand(k_ProcIFlags);
1989 Op->IFlags.Val = IFlags;
1995 static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
1996 ARMOperand *Op = new ARMOperand(k_MSRMask);
1997 Op->MMask.Val = MMask;
2004 } // end anonymous namespace.
2006 void ARMOperand::print(raw_ostream &OS) const {
2009 OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm())
2013 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2016 OS << "<ccout " << getReg() << ">";
2018 case k_ITCondMask: {
2019 static const char *MaskStr[] = {
2020 "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2021 "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2023 assert((ITMask.Mask & 0xf) == ITMask.Mask);
2024 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2028 OS << "<coprocessor number: " << getCoproc() << ">";
2031 OS << "<coprocessor register: " << getCoproc() << ">";
2033 case k_CoprocOption:
2034 OS << "<coprocessor option: " << CoprocOption.Val << ">";
2037 OS << "<mask: " << getMSRMask() << ">";
2040 getImm()->print(OS);
2042 case k_MemBarrierOpt:
2043 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2047 << " base:" << Memory.BaseRegNum;
2050 case k_PostIndexRegister:
2051 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2052 << PostIdxReg.RegNum;
2053 if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2054 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2055 << PostIdxReg.ShiftImm;
2058 case k_ProcIFlags: {
2059 OS << "<ARM_PROC::";
2060 unsigned IFlags = getProcIFlags();
2061 for (int i=2; i >= 0; --i)
2062 if (IFlags & (1 << i))
2063 OS << ARM_PROC::IFlagsToString(1 << i);
2068 OS << "<register " << getReg() << ">";
2070 case k_ShifterImmediate:
2071 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2072 << " #" << ShifterImm.Imm << ">";
2074 case k_ShiftedRegister:
2075 OS << "<so_reg_reg "
2076 << RegShiftedReg.SrcReg << " "
2077 << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2078 << " " << RegShiftedReg.ShiftReg << ">";
2080 case k_ShiftedImmediate:
2081 OS << "<so_reg_imm "
2082 << RegShiftedImm.SrcReg << " "
2083 << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2084 << " #" << RegShiftedImm.ShiftImm << ">";
2086 case k_RotateImmediate:
2087 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2089 case k_BitfieldDescriptor:
2090 OS << "<bitfield " << "lsb: " << Bitfield.LSB
2091 << ", width: " << Bitfield.Width << ">";
2093 case k_RegisterList:
2094 case k_DPRRegisterList:
2095 case k_SPRRegisterList: {
2096 OS << "<register_list ";
2098 const SmallVectorImpl<unsigned> &RegList = getRegList();
2099 for (SmallVectorImpl<unsigned>::const_iterator
2100 I = RegList.begin(), E = RegList.end(); I != E; ) {
2102 if (++I < E) OS << ", ";
2109 OS << "<vector_list " << VectorList.Count << " * "
2110 << VectorList.RegNum << ">";
2112 case k_VectorListAllLanes:
2113 OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2114 << VectorList.RegNum << ">";
2116 case k_VectorListIndexed:
2117 OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2118 << VectorList.Count << " * " << VectorList.RegNum << ">";
2121 OS << "'" << getToken() << "'";
2124 OS << "<vectorindex " << getVectorIndex() << ">";
2129 /// @name Auto-generated Match Functions
2132 static unsigned MatchRegisterName(StringRef Name);
2136 bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2137 SMLoc &StartLoc, SMLoc &EndLoc) {
2138 RegNo = tryParseRegister();
2140 return (RegNo == (unsigned)-1);
2143 /// Try to parse a register name. The token must be an Identifier when called,
2144 /// and if it is a register name the token is eaten and the register number is
2145 /// returned. Otherwise return -1.
2147 int ARMAsmParser::tryParseRegister() {
2148 const AsmToken &Tok = Parser.getTok();
2149 if (Tok.isNot(AsmToken::Identifier)) return -1;
2151 // FIXME: Validate register for the current architecture; we have to do
2152 // validation later, so maybe there is no need for this here.
2153 std::string lowerCase = Tok.getString().lower();
2154 unsigned RegNum = MatchRegisterName(lowerCase);
2156 RegNum = StringSwitch<unsigned>(lowerCase)
2157 .Case("r13", ARM::SP)
2158 .Case("r14", ARM::LR)
2159 .Case("r15", ARM::PC)
2160 .Case("ip", ARM::R12)
2163 if (!RegNum) return -1;
2165 Parser.Lex(); // Eat identifier token.
2170 // Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0.
2171 // If a recoverable error occurs, return 1. If an irrecoverable error
2172 // occurs, return -1. An irrecoverable error is one where tokens have been
2173 // consumed in the process of trying to parse the shifter (i.e., when it is
2174 // indeed a shifter operand, but malformed).
2175 int ARMAsmParser::tryParseShiftRegister(
2176 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2177 SMLoc S = Parser.getTok().getLoc();
2178 const AsmToken &Tok = Parser.getTok();
2179 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2181 std::string lowerCase = Tok.getString().lower();
2182 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2183 .Case("asl", ARM_AM::lsl)
2184 .Case("lsl", ARM_AM::lsl)
2185 .Case("lsr", ARM_AM::lsr)
2186 .Case("asr", ARM_AM::asr)
2187 .Case("ror", ARM_AM::ror)
2188 .Case("rrx", ARM_AM::rrx)
2189 .Default(ARM_AM::no_shift);
2191 if (ShiftTy == ARM_AM::no_shift)
2194 Parser.Lex(); // Eat the operator.
2196 // The source register for the shift has already been added to the
2197 // operand list, so we need to pop it off and combine it into the shifted
2198 // register operand instead.
2199 OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2200 if (!PrevOp->isReg())
2201 return Error(PrevOp->getStartLoc(), "shift must be of a register");
2202 int SrcReg = PrevOp->getReg();
2205 if (ShiftTy == ARM_AM::rrx) {
2206 // RRX Doesn't have an explicit shift amount. The encoder expects
2207 // the shift register to be the same as the source register. Seems odd,
2211 // Figure out if this is shifted by a constant or a register (for non-RRX).
2212 if (Parser.getTok().is(AsmToken::Hash)) {
2213 Parser.Lex(); // Eat hash.
2214 SMLoc ImmLoc = Parser.getTok().getLoc();
2215 const MCExpr *ShiftExpr = 0;
2216 if (getParser().ParseExpression(ShiftExpr)) {
2217 Error(ImmLoc, "invalid immediate shift value");
2220 // The expression must be evaluatable as an immediate.
2221 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2223 Error(ImmLoc, "invalid immediate shift value");
2226 // Range check the immediate.
2227 // lsl, ror: 0 <= imm <= 31
2228 // lsr, asr: 0 <= imm <= 32
2229 Imm = CE->getValue();
2231 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2232 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2233 Error(ImmLoc, "immediate shift value out of range");
2236 } else if (Parser.getTok().is(AsmToken::Identifier)) {
2237 ShiftReg = tryParseRegister();
2238 SMLoc L = Parser.getTok().getLoc();
2239 if (ShiftReg == -1) {
2240 Error (L, "expected immediate or register in shift operand");
2244 Error (Parser.getTok().getLoc(),
2245 "expected immediate or register in shift operand");
2250 if (ShiftReg && ShiftTy != ARM_AM::rrx)
2251 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2253 S, Parser.getTok().getLoc()));
2255 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2256 S, Parser.getTok().getLoc()));
2262 /// Try to parse a register name. The token must be an Identifier when called.
2263 /// If it's a register, an AsmOperand is created. Another AsmOperand is created
2264 /// if there is a "writeback". 'true' if it's not a register.
2266 /// TODO this is likely to change to allow different register types and or to
2267 /// parse for a specific register type.
2269 tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2270 SMLoc S = Parser.getTok().getLoc();
2271 int RegNo = tryParseRegister();
2275 Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2277 const AsmToken &ExclaimTok = Parser.getTok();
2278 if (ExclaimTok.is(AsmToken::Exclaim)) {
2279 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2280 ExclaimTok.getLoc()));
2281 Parser.Lex(); // Eat exclaim token
2285 // Also check for an index operand. This is only legal for vector registers,
2286 // but that'll get caught OK in operand matching, so we don't need to
2287 // explicitly filter everything else out here.
2288 if (Parser.getTok().is(AsmToken::LBrac)) {
2289 SMLoc SIdx = Parser.getTok().getLoc();
2290 Parser.Lex(); // Eat left bracket token.
2292 const MCExpr *ImmVal;
2293 if (getParser().ParseExpression(ImmVal))
2294 return MatchOperand_ParseFail;
2295 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2297 TokError("immediate value expected for vector index");
2298 return MatchOperand_ParseFail;
2301 SMLoc E = Parser.getTok().getLoc();
2302 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2303 Error(E, "']' expected");
2304 return MatchOperand_ParseFail;
2307 Parser.Lex(); // Eat right bracket token.
2309 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2317 /// MatchCoprocessorOperandName - Try to parse an coprocessor related
2318 /// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2320 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2321 // Use the same layout as the tablegen'erated register name matcher. Ugly,
2323 switch (Name.size()) {
2326 if (Name[0] != CoprocOp)
2343 if (Name[0] != CoprocOp || Name[1] != '1')
2347 case '0': return 10;
2348 case '1': return 11;
2349 case '2': return 12;
2350 case '3': return 13;
2351 case '4': return 14;
2352 case '5': return 15;
2360 /// parseITCondCode - Try to parse a condition code for an IT instruction.
2361 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2362 parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2363 SMLoc S = Parser.getTok().getLoc();
2364 const AsmToken &Tok = Parser.getTok();
2365 if (!Tok.is(AsmToken::Identifier))
2366 return MatchOperand_NoMatch;
2367 unsigned CC = StringSwitch<unsigned>(Tok.getString())
2368 .Case("eq", ARMCC::EQ)
2369 .Case("ne", ARMCC::NE)
2370 .Case("hs", ARMCC::HS)
2371 .Case("cs", ARMCC::HS)
2372 .Case("lo", ARMCC::LO)
2373 .Case("cc", ARMCC::LO)
2374 .Case("mi", ARMCC::MI)
2375 .Case("pl", ARMCC::PL)
2376 .Case("vs", ARMCC::VS)
2377 .Case("vc", ARMCC::VC)
2378 .Case("hi", ARMCC::HI)
2379 .Case("ls", ARMCC::LS)
2380 .Case("ge", ARMCC::GE)
2381 .Case("lt", ARMCC::LT)
2382 .Case("gt", ARMCC::GT)
2383 .Case("le", ARMCC::LE)
2384 .Case("al", ARMCC::AL)
2387 return MatchOperand_NoMatch;
2388 Parser.Lex(); // Eat the token.
2390 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2392 return MatchOperand_Success;
2395 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2396 /// token must be an Identifier when called, and if it is a coprocessor
2397 /// number, the token is eaten and the operand is added to the operand list.
2398 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2399 parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2400 SMLoc S = Parser.getTok().getLoc();
2401 const AsmToken &Tok = Parser.getTok();
2402 if (Tok.isNot(AsmToken::Identifier))
2403 return MatchOperand_NoMatch;
2405 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2407 return MatchOperand_NoMatch;
2409 Parser.Lex(); // Eat identifier token.
2410 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2411 return MatchOperand_Success;
2414 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2415 /// token must be an Identifier when called, and if it is a coprocessor
2416 /// number, the token is eaten and the operand is added to the operand list.
2417 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2418 parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2419 SMLoc S = Parser.getTok().getLoc();
2420 const AsmToken &Tok = Parser.getTok();
2421 if (Tok.isNot(AsmToken::Identifier))
2422 return MatchOperand_NoMatch;
2424 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2426 return MatchOperand_NoMatch;
2428 Parser.Lex(); // Eat identifier token.
2429 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2430 return MatchOperand_Success;
2433 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2434 /// coproc_option : '{' imm0_255 '}'
2435 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2436 parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2437 SMLoc S = Parser.getTok().getLoc();
2439 // If this isn't a '{', this isn't a coprocessor immediate operand.
2440 if (Parser.getTok().isNot(AsmToken::LCurly))
2441 return MatchOperand_NoMatch;
2442 Parser.Lex(); // Eat the '{'
2445 SMLoc Loc = Parser.getTok().getLoc();
2446 if (getParser().ParseExpression(Expr)) {
2447 Error(Loc, "illegal expression");
2448 return MatchOperand_ParseFail;
2450 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2451 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2452 Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2453 return MatchOperand_ParseFail;
2455 int Val = CE->getValue();
2457 // Check for and consume the closing '}'
2458 if (Parser.getTok().isNot(AsmToken::RCurly))
2459 return MatchOperand_ParseFail;
2460 SMLoc E = Parser.getTok().getLoc();
2461 Parser.Lex(); // Eat the '}'
2463 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2464 return MatchOperand_Success;
2467 // For register list parsing, we need to map from raw GPR register numbering
2468 // to the enumeration values. The enumeration values aren't sorted by
2469 // register number due to our using "sp", "lr" and "pc" as canonical names.
2470 static unsigned getNextRegister(unsigned Reg) {
2471 // If this is a GPR, we need to do it manually, otherwise we can rely
2472 // on the sort ordering of the enumeration since the other reg-classes
2474 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2477 default: assert(0 && "Invalid GPR number!");
2478 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2;
2479 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4;
2480 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6;
2481 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8;
2482 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10;
2483 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2484 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR;
2485 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0;
2489 // Return the low-subreg of a given Q register.
2490 static unsigned getDRegFromQReg(unsigned QReg) {
2492 default: llvm_unreachable("expected a Q register!");
2493 case ARM::Q0: return ARM::D0;
2494 case ARM::Q1: return ARM::D2;
2495 case ARM::Q2: return ARM::D4;
2496 case ARM::Q3: return ARM::D6;
2497 case ARM::Q4: return ARM::D8;
2498 case ARM::Q5: return ARM::D10;
2499 case ARM::Q6: return ARM::D12;
2500 case ARM::Q7: return ARM::D14;
2501 case ARM::Q8: return ARM::D16;
2502 case ARM::Q9: return ARM::D18;
2503 case ARM::Q10: return ARM::D20;
2504 case ARM::Q11: return ARM::D22;
2505 case ARM::Q12: return ARM::D24;
2506 case ARM::Q13: return ARM::D26;
2507 case ARM::Q14: return ARM::D28;
2508 case ARM::Q15: return ARM::D30;
2512 /// Parse a register list.
2514 parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2515 assert(Parser.getTok().is(AsmToken::LCurly) &&
2516 "Token is not a Left Curly Brace");
2517 SMLoc S = Parser.getTok().getLoc();
2518 Parser.Lex(); // Eat '{' token.
2519 SMLoc RegLoc = Parser.getTok().getLoc();
2521 // Check the first register in the list to see what register class
2522 // this is a list of.
2523 int Reg = tryParseRegister();
2525 return Error(RegLoc, "register expected");
2527 // The reglist instructions have at most 16 registers, so reserve
2528 // space for that many.
2529 SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2531 // Allow Q regs and just interpret them as the two D sub-registers.
2532 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2533 Reg = getDRegFromQReg(Reg);
2534 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2537 const MCRegisterClass *RC;
2538 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2539 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2540 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2541 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2542 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2543 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2545 return Error(RegLoc, "invalid register in register list");
2547 // Store the register.
2548 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2550 // This starts immediately after the first register token in the list,
2551 // so we can see either a comma or a minus (range separator) as a legal
2553 while (Parser.getTok().is(AsmToken::Comma) ||
2554 Parser.getTok().is(AsmToken::Minus)) {
2555 if (Parser.getTok().is(AsmToken::Minus)) {
2556 Parser.Lex(); // Eat the minus.
2557 SMLoc EndLoc = Parser.getTok().getLoc();
2558 int EndReg = tryParseRegister();
2560 return Error(EndLoc, "register expected");
2561 // Allow Q regs and just interpret them as the two D sub-registers.
2562 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2563 EndReg = getDRegFromQReg(EndReg) + 1;
2564 // If the register is the same as the start reg, there's nothing
2568 // The register must be in the same register class as the first.
2569 if (!RC->contains(EndReg))
2570 return Error(EndLoc, "invalid register in register list");
2571 // Ranges must go from low to high.
2572 if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2573 return Error(EndLoc, "bad range in register list");
2575 // Add all the registers in the range to the register list.
2576 while (Reg != EndReg) {
2577 Reg = getNextRegister(Reg);
2578 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2582 Parser.Lex(); // Eat the comma.
2583 RegLoc = Parser.getTok().getLoc();
2585 Reg = tryParseRegister();
2587 return Error(RegLoc, "register expected");
2588 // Allow Q regs and just interpret them as the two D sub-registers.
2589 bool isQReg = false;
2590 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2591 Reg = getDRegFromQReg(Reg);
2594 // The register must be in the same register class as the first.
2595 if (!RC->contains(Reg))
2596 return Error(RegLoc, "invalid register in register list");
2597 // List must be monotonically increasing.
2598 if (getARMRegisterNumbering(Reg) <= getARMRegisterNumbering(OldReg))
2599 return Error(RegLoc, "register list not in ascending order");
2600 // VFP register lists must also be contiguous.
2601 // It's OK to use the enumeration values directly here rather, as the
2602 // VFP register classes have the enum sorted properly.
2603 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2605 return Error(RegLoc, "non-contiguous register range");
2606 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2608 Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2611 SMLoc E = Parser.getTok().getLoc();
2612 if (Parser.getTok().isNot(AsmToken::RCurly))
2613 return Error(E, "'}' expected");
2614 Parser.Lex(); // Eat '}' token.
2616 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2620 // Helper function to parse the lane index for vector lists.
2621 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2622 parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2623 Index = 0; // Always return a defined index value.
2624 if (Parser.getTok().is(AsmToken::LBrac)) {
2625 Parser.Lex(); // Eat the '['.
2626 if (Parser.getTok().is(AsmToken::RBrac)) {
2627 // "Dn[]" is the 'all lanes' syntax.
2628 LaneKind = AllLanes;
2629 Parser.Lex(); // Eat the ']'.
2630 return MatchOperand_Success;
2632 if (Parser.getTok().is(AsmToken::Integer)) {
2633 int64_t Val = Parser.getTok().getIntVal();
2634 // Make this range check context sensitive for .8, .16, .32.
2635 if (Val < 0 && Val > 7)
2636 Error(Parser.getTok().getLoc(), "lane index out of range");
2638 LaneKind = IndexedLane;
2639 Parser.Lex(); // Eat the token;
2640 if (Parser.getTok().isNot(AsmToken::RBrac))
2641 Error(Parser.getTok().getLoc(), "']' expected");
2642 Parser.Lex(); // Eat the ']'.
2643 return MatchOperand_Success;
2645 Error(Parser.getTok().getLoc(), "lane index must be empty or an integer");
2646 return MatchOperand_ParseFail;
2649 return MatchOperand_Success;
2652 // parse a vector register list
2653 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2654 parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2655 VectorLaneTy LaneKind;
2657 SMLoc S = Parser.getTok().getLoc();
2658 // As an extension (to match gas), support a plain D register or Q register
2659 // (without encosing curly braces) as a single or double entry list,
2661 if (Parser.getTok().is(AsmToken::Identifier)) {
2662 int Reg = tryParseRegister();
2664 return MatchOperand_NoMatch;
2665 SMLoc E = Parser.getTok().getLoc();
2666 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
2667 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2668 if (Res != MatchOperand_Success)
2672 assert(0 && "unexpected lane kind!");
2674 E = Parser.getTok().getLoc();
2675 Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, S, E));
2678 E = Parser.getTok().getLoc();
2679 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, S, E));
2682 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
2686 return MatchOperand_Success;
2688 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2689 Reg = getDRegFromQReg(Reg);
2690 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2691 if (Res != MatchOperand_Success)
2695 assert(0 && "unexpected lane kind!");
2697 E = Parser.getTok().getLoc();
2698 Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, S, E));
2701 E = Parser.getTok().getLoc();
2702 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, S, E));
2705 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
2709 return MatchOperand_Success;
2711 Error(S, "vector register expected");
2712 return MatchOperand_ParseFail;
2715 if (Parser.getTok().isNot(AsmToken::LCurly))
2716 return MatchOperand_NoMatch;
2718 Parser.Lex(); // Eat '{' token.
2719 SMLoc RegLoc = Parser.getTok().getLoc();
2721 int Reg = tryParseRegister();
2723 Error(RegLoc, "register expected");
2724 return MatchOperand_ParseFail;
2727 unsigned FirstReg = Reg;
2728 // The list is of D registers, but we also allow Q regs and just interpret
2729 // them as the two D sub-registers.
2730 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2731 FirstReg = Reg = getDRegFromQReg(Reg);
2735 if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
2736 return MatchOperand_ParseFail;
2738 while (Parser.getTok().is(AsmToken::Comma) ||
2739 Parser.getTok().is(AsmToken::Minus)) {
2740 if (Parser.getTok().is(AsmToken::Minus)) {
2741 Parser.Lex(); // Eat the minus.
2742 SMLoc EndLoc = Parser.getTok().getLoc();
2743 int EndReg = tryParseRegister();
2745 Error(EndLoc, "register expected");
2746 return MatchOperand_ParseFail;
2748 // Allow Q regs and just interpret them as the two D sub-registers.
2749 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2750 EndReg = getDRegFromQReg(EndReg) + 1;
2751 // If the register is the same as the start reg, there's nothing
2755 // The register must be in the same register class as the first.
2756 if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
2757 Error(EndLoc, "invalid register in register list");
2758 return MatchOperand_ParseFail;
2760 // Ranges must go from low to high.
2762 Error(EndLoc, "bad range in register list");
2763 return MatchOperand_ParseFail;
2765 // Parse the lane specifier if present.
2766 VectorLaneTy NextLaneKind;
2767 unsigned NextLaneIndex;
2768 if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2769 return MatchOperand_ParseFail;
2770 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2771 Error(EndLoc, "mismatched lane index in register list");
2772 return MatchOperand_ParseFail;
2774 EndLoc = Parser.getTok().getLoc();
2776 // Add all the registers in the range to the register list.
2777 Count += EndReg - Reg;
2781 Parser.Lex(); // Eat the comma.
2782 RegLoc = Parser.getTok().getLoc();
2784 Reg = tryParseRegister();
2786 Error(RegLoc, "register expected");
2787 return MatchOperand_ParseFail;
2789 // vector register lists must be contiguous.
2790 // It's OK to use the enumeration values directly here rather, as the
2791 // VFP register classes have the enum sorted properly.
2793 // The list is of D registers, but we also allow Q regs and just interpret
2794 // them as the two D sub-registers.
2795 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2796 Reg = getDRegFromQReg(Reg);
2797 if (Reg != OldReg + 1) {
2798 Error(RegLoc, "non-contiguous register range");
2799 return MatchOperand_ParseFail;
2803 // Parse the lane specifier if present.
2804 VectorLaneTy NextLaneKind;
2805 unsigned NextLaneIndex;
2806 SMLoc EndLoc = Parser.getTok().getLoc();
2807 if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2808 return MatchOperand_ParseFail;
2809 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2810 Error(EndLoc, "mismatched lane index in register list");
2811 return MatchOperand_ParseFail;
2815 // Normal D register. Just check that it's contiguous and keep going.
2816 if (Reg != OldReg + 1) {
2817 Error(RegLoc, "non-contiguous register range");
2818 return MatchOperand_ParseFail;
2821 // Parse the lane specifier if present.
2822 VectorLaneTy NextLaneKind;
2823 unsigned NextLaneIndex;
2824 SMLoc EndLoc = Parser.getTok().getLoc();
2825 if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2826 return MatchOperand_ParseFail;
2827 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2828 Error(EndLoc, "mismatched lane index in register list");
2829 return MatchOperand_ParseFail;
2833 SMLoc E = Parser.getTok().getLoc();
2834 if (Parser.getTok().isNot(AsmToken::RCurly)) {
2835 Error(E, "'}' expected");
2836 return MatchOperand_ParseFail;
2838 Parser.Lex(); // Eat '}' token.
2842 assert(0 && "unexpected lane kind in register list.");
2844 Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count, S, E));
2847 Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
2851 Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
2855 return MatchOperand_Success;
2858 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
2859 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2860 parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2861 SMLoc S = Parser.getTok().getLoc();
2862 const AsmToken &Tok = Parser.getTok();
2863 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2864 StringRef OptStr = Tok.getString();
2866 unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
2867 .Case("sy", ARM_MB::SY)
2868 .Case("st", ARM_MB::ST)
2869 .Case("sh", ARM_MB::ISH)
2870 .Case("ish", ARM_MB::ISH)
2871 .Case("shst", ARM_MB::ISHST)
2872 .Case("ishst", ARM_MB::ISHST)
2873 .Case("nsh", ARM_MB::NSH)
2874 .Case("un", ARM_MB::NSH)
2875 .Case("nshst", ARM_MB::NSHST)
2876 .Case("unst", ARM_MB::NSHST)
2877 .Case("osh", ARM_MB::OSH)
2878 .Case("oshst", ARM_MB::OSHST)
2882 return MatchOperand_NoMatch;
2884 Parser.Lex(); // Eat identifier token.
2885 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
2886 return MatchOperand_Success;
2889 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
2890 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2891 parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2892 SMLoc S = Parser.getTok().getLoc();
2893 const AsmToken &Tok = Parser.getTok();
2894 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2895 StringRef IFlagsStr = Tok.getString();
2897 // An iflags string of "none" is interpreted to mean that none of the AIF
2898 // bits are set. Not a terribly useful instruction, but a valid encoding.
2899 unsigned IFlags = 0;
2900 if (IFlagsStr != "none") {
2901 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
2902 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
2903 .Case("a", ARM_PROC::A)
2904 .Case("i", ARM_PROC::I)
2905 .Case("f", ARM_PROC::F)
2908 // If some specific iflag is already set, it means that some letter is
2909 // present more than once, this is not acceptable.
2910 if (Flag == ~0U || (IFlags & Flag))
2911 return MatchOperand_NoMatch;
2917 Parser.Lex(); // Eat identifier token.
2918 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
2919 return MatchOperand_Success;
2922 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
2923 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2924 parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2925 SMLoc S = Parser.getTok().getLoc();
2926 const AsmToken &Tok = Parser.getTok();
2927 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2928 StringRef Mask = Tok.getString();
2931 // See ARMv6-M 10.1.1
2932 unsigned FlagsVal = StringSwitch<unsigned>(Mask)
2942 .Case("primask", 16)
2943 .Case("basepri", 17)
2944 .Case("basepri_max", 18)
2945 .Case("faultmask", 19)
2946 .Case("control", 20)
2949 if (FlagsVal == ~0U)
2950 return MatchOperand_NoMatch;
2952 if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
2953 // basepri, basepri_max and faultmask only valid for V7m.
2954 return MatchOperand_NoMatch;
2956 Parser.Lex(); // Eat identifier token.
2957 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
2958 return MatchOperand_Success;
2961 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
2962 size_t Start = 0, Next = Mask.find('_');
2963 StringRef Flags = "";
2964 std::string SpecReg = Mask.slice(Start, Next).lower();
2965 if (Next != StringRef::npos)
2966 Flags = Mask.slice(Next+1, Mask.size());
2968 // FlagsVal contains the complete mask:
2970 // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
2971 unsigned FlagsVal = 0;
2973 if (SpecReg == "apsr") {
2974 FlagsVal = StringSwitch<unsigned>(Flags)
2975 .Case("nzcvq", 0x8) // same as CPSR_f
2976 .Case("g", 0x4) // same as CPSR_s
2977 .Case("nzcvqg", 0xc) // same as CPSR_fs
2980 if (FlagsVal == ~0U) {
2982 return MatchOperand_NoMatch;
2984 FlagsVal = 8; // No flag
2986 } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
2987 if (Flags == "all") // cpsr_all is an alias for cpsr_fc
2989 for (int i = 0, e = Flags.size(); i != e; ++i) {
2990 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
2997 // If some specific flag is already set, it means that some letter is
2998 // present more than once, this is not acceptable.
2999 if (FlagsVal == ~0U || (FlagsVal & Flag))
3000 return MatchOperand_NoMatch;
3003 } else // No match for special register.
3004 return MatchOperand_NoMatch;
3006 // Special register without flags is NOT equivalent to "fc" flags.
3007 // NOTE: This is a divergence from gas' behavior. Uncommenting the following
3008 // two lines would enable gas compatibility at the expense of breaking
3014 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3015 if (SpecReg == "spsr")
3018 Parser.Lex(); // Eat identifier token.
3019 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3020 return MatchOperand_Success;
3023 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3024 parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3025 int Low, int High) {
3026 const AsmToken &Tok = Parser.getTok();
3027 if (Tok.isNot(AsmToken::Identifier)) {
3028 Error(Parser.getTok().getLoc(), Op + " operand expected.");
3029 return MatchOperand_ParseFail;
3031 StringRef ShiftName = Tok.getString();
3032 std::string LowerOp = Op.lower();
3033 std::string UpperOp = Op.upper();
3034 if (ShiftName != LowerOp && ShiftName != UpperOp) {
3035 Error(Parser.getTok().getLoc(), Op + " operand expected.");
3036 return MatchOperand_ParseFail;
3038 Parser.Lex(); // Eat shift type token.
3040 // There must be a '#' and a shift amount.
3041 if (Parser.getTok().isNot(AsmToken::Hash)) {
3042 Error(Parser.getTok().getLoc(), "'#' expected");
3043 return MatchOperand_ParseFail;
3045 Parser.Lex(); // Eat hash token.
3047 const MCExpr *ShiftAmount;
3048 SMLoc Loc = Parser.getTok().getLoc();
3049 if (getParser().ParseExpression(ShiftAmount)) {
3050 Error(Loc, "illegal expression");
3051 return MatchOperand_ParseFail;
3053 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3055 Error(Loc, "constant expression expected");
3056 return MatchOperand_ParseFail;
3058 int Val = CE->getValue();
3059 if (Val < Low || Val > High) {
3060 Error(Loc, "immediate value out of range");
3061 return MatchOperand_ParseFail;
3064 Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3066 return MatchOperand_Success;
3069 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3070 parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3071 const AsmToken &Tok = Parser.getTok();
3072 SMLoc S = Tok.getLoc();
3073 if (Tok.isNot(AsmToken::Identifier)) {
3074 Error(Tok.getLoc(), "'be' or 'le' operand expected");
3075 return MatchOperand_ParseFail;
3077 int Val = StringSwitch<int>(Tok.getString())
3081 Parser.Lex(); // Eat the token.
3084 Error(Tok.getLoc(), "'be' or 'le' operand expected");
3085 return MatchOperand_ParseFail;
3087 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3089 S, Parser.getTok().getLoc()));
3090 return MatchOperand_Success;
3093 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3094 /// instructions. Legal values are:
3095 /// lsl #n 'n' in [0,31]
3096 /// asr #n 'n' in [1,32]
3097 /// n == 32 encoded as n == 0.
3098 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3099 parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3100 const AsmToken &Tok = Parser.getTok();
3101 SMLoc S = Tok.getLoc();
3102 if (Tok.isNot(AsmToken::Identifier)) {
3103 Error(S, "shift operator 'asr' or 'lsl' expected");
3104 return MatchOperand_ParseFail;
3106 StringRef ShiftName = Tok.getString();
3108 if (ShiftName == "lsl" || ShiftName == "LSL")
3110 else if (ShiftName == "asr" || ShiftName == "ASR")
3113 Error(S, "shift operator 'asr' or 'lsl' expected");
3114 return MatchOperand_ParseFail;
3116 Parser.Lex(); // Eat the operator.
3118 // A '#' and a shift amount.
3119 if (Parser.getTok().isNot(AsmToken::Hash)) {
3120 Error(Parser.getTok().getLoc(), "'#' expected");
3121 return MatchOperand_ParseFail;
3123 Parser.Lex(); // Eat hash token.
3125 const MCExpr *ShiftAmount;
3126 SMLoc E = Parser.getTok().getLoc();
3127 if (getParser().ParseExpression(ShiftAmount)) {
3128 Error(E, "malformed shift expression");
3129 return MatchOperand_ParseFail;
3131 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3133 Error(E, "shift amount must be an immediate");
3134 return MatchOperand_ParseFail;
3137 int64_t Val = CE->getValue();
3139 // Shift amount must be in [1,32]
3140 if (Val < 1 || Val > 32) {
3141 Error(E, "'asr' shift amount must be in range [1,32]");
3142 return MatchOperand_ParseFail;
3144 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3145 if (isThumb() && Val == 32) {
3146 Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3147 return MatchOperand_ParseFail;
3149 if (Val == 32) Val = 0;
3151 // Shift amount must be in [1,32]
3152 if (Val < 0 || Val > 31) {
3153 Error(E, "'lsr' shift amount must be in range [0,31]");
3154 return MatchOperand_ParseFail;
3158 E = Parser.getTok().getLoc();
3159 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3161 return MatchOperand_Success;
3164 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3165 /// of instructions. Legal values are:
3166 /// ror #n 'n' in {0, 8, 16, 24}
3167 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3168 parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3169 const AsmToken &Tok = Parser.getTok();
3170 SMLoc S = Tok.getLoc();
3171 if (Tok.isNot(AsmToken::Identifier))
3172 return MatchOperand_NoMatch;
3173 StringRef ShiftName = Tok.getString();
3174 if (ShiftName != "ror" && ShiftName != "ROR")
3175 return MatchOperand_NoMatch;
3176 Parser.Lex(); // Eat the operator.
3178 // A '#' and a rotate amount.
3179 if (Parser.getTok().isNot(AsmToken::Hash)) {
3180 Error(Parser.getTok().getLoc(), "'#' expected");
3181 return MatchOperand_ParseFail;
3183 Parser.Lex(); // Eat hash token.
3185 const MCExpr *ShiftAmount;
3186 SMLoc E = Parser.getTok().getLoc();
3187 if (getParser().ParseExpression(ShiftAmount)) {
3188 Error(E, "malformed rotate expression");
3189 return MatchOperand_ParseFail;
3191 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3193 Error(E, "rotate amount must be an immediate");
3194 return MatchOperand_ParseFail;
3197 int64_t Val = CE->getValue();
3198 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3199 // normally, zero is represented in asm by omitting the rotate operand
3201 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3202 Error(E, "'ror' rotate amount must be 8, 16, or 24");
3203 return MatchOperand_ParseFail;
3206 E = Parser.getTok().getLoc();
3207 Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3209 return MatchOperand_Success;
3212 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3213 parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3214 SMLoc S = Parser.getTok().getLoc();
3215 // The bitfield descriptor is really two operands, the LSB and the width.
3216 if (Parser.getTok().isNot(AsmToken::Hash)) {
3217 Error(Parser.getTok().getLoc(), "'#' expected");
3218 return MatchOperand_ParseFail;
3220 Parser.Lex(); // Eat hash token.
3222 const MCExpr *LSBExpr;
3223 SMLoc E = Parser.getTok().getLoc();
3224 if (getParser().ParseExpression(LSBExpr)) {
3225 Error(E, "malformed immediate expression");
3226 return MatchOperand_ParseFail;
3228 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3230 Error(E, "'lsb' operand must be an immediate");
3231 return MatchOperand_ParseFail;
3234 int64_t LSB = CE->getValue();
3235 // The LSB must be in the range [0,31]
3236 if (LSB < 0 || LSB > 31) {
3237 Error(E, "'lsb' operand must be in the range [0,31]");
3238 return MatchOperand_ParseFail;
3240 E = Parser.getTok().getLoc();
3242 // Expect another immediate operand.
3243 if (Parser.getTok().isNot(AsmToken::Comma)) {
3244 Error(Parser.getTok().getLoc(), "too few operands");
3245 return MatchOperand_ParseFail;
3247 Parser.Lex(); // Eat hash token.
3248 if (Parser.getTok().isNot(AsmToken::Hash)) {
3249 Error(Parser.getTok().getLoc(), "'#' expected");
3250 return MatchOperand_ParseFail;
3252 Parser.Lex(); // Eat hash token.
3254 const MCExpr *WidthExpr;
3255 if (getParser().ParseExpression(WidthExpr)) {
3256 Error(E, "malformed immediate expression");
3257 return MatchOperand_ParseFail;
3259 CE = dyn_cast<MCConstantExpr>(WidthExpr);
3261 Error(E, "'width' operand must be an immediate");
3262 return MatchOperand_ParseFail;
3265 int64_t Width = CE->getValue();
3266 // The LSB must be in the range [1,32-lsb]
3267 if (Width < 1 || Width > 32 - LSB) {
3268 Error(E, "'width' operand must be in the range [1,32-lsb]");
3269 return MatchOperand_ParseFail;
3271 E = Parser.getTok().getLoc();
3273 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3275 return MatchOperand_Success;
3278 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3279 parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3280 // Check for a post-index addressing register operand. Specifically:
3281 // postidx_reg := '+' register {, shift}
3282 // | '-' register {, shift}
3283 // | register {, shift}
3285 // This method must return MatchOperand_NoMatch without consuming any tokens
3286 // in the case where there is no match, as other alternatives take other
3288 AsmToken Tok = Parser.getTok();
3289 SMLoc S = Tok.getLoc();
3290 bool haveEaten = false;
3293 if (Tok.is(AsmToken::Plus)) {
3294 Parser.Lex(); // Eat the '+' token.
3296 } else if (Tok.is(AsmToken::Minus)) {
3297 Parser.Lex(); // Eat the '-' token.
3301 if (Parser.getTok().is(AsmToken::Identifier))
3302 Reg = tryParseRegister();
3305 return MatchOperand_NoMatch;
3306 Error(Parser.getTok().getLoc(), "register expected");
3307 return MatchOperand_ParseFail;
3309 SMLoc E = Parser.getTok().getLoc();
3311 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3312 unsigned ShiftImm = 0;
3313 if (Parser.getTok().is(AsmToken::Comma)) {
3314 Parser.Lex(); // Eat the ','.
3315 if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3316 return MatchOperand_ParseFail;
3319 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3322 return MatchOperand_Success;
3325 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3326 parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3327 // Check for a post-index addressing register operand. Specifically:
3328 // am3offset := '+' register
3335 // This method must return MatchOperand_NoMatch without consuming any tokens
3336 // in the case where there is no match, as other alternatives take other
3338 AsmToken Tok = Parser.getTok();
3339 SMLoc S = Tok.getLoc();
3341 // Do immediates first, as we always parse those if we have a '#'.
3342 if (Parser.getTok().is(AsmToken::Hash)) {
3343 Parser.Lex(); // Eat the '#'.
3344 // Explicitly look for a '-', as we need to encode negative zero
3346 bool isNegative = Parser.getTok().is(AsmToken::Minus);
3347 const MCExpr *Offset;
3348 if (getParser().ParseExpression(Offset))
3349 return MatchOperand_ParseFail;
3350 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3352 Error(S, "constant expression expected");
3353 return MatchOperand_ParseFail;
3355 SMLoc E = Tok.getLoc();
3356 // Negative zero is encoded as the flag value INT32_MIN.
3357 int32_t Val = CE->getValue();
3358 if (isNegative && Val == 0)
3362 ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3364 return MatchOperand_Success;
3368 bool haveEaten = false;
3371 if (Tok.is(AsmToken::Plus)) {
3372 Parser.Lex(); // Eat the '+' token.
3374 } else if (Tok.is(AsmToken::Minus)) {
3375 Parser.Lex(); // Eat the '-' token.
3379 if (Parser.getTok().is(AsmToken::Identifier))
3380 Reg = tryParseRegister();
3383 return MatchOperand_NoMatch;
3384 Error(Parser.getTok().getLoc(), "register expected");
3385 return MatchOperand_ParseFail;
3387 SMLoc E = Parser.getTok().getLoc();
3389 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3392 return MatchOperand_Success;
3395 /// cvtT2LdrdPre - Convert parsed operands to MCInst.
3396 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3397 /// when they refer multiple MIOperands inside a single one.
3399 cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3400 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3402 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3403 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3404 // Create a writeback register dummy placeholder.
3405 Inst.addOperand(MCOperand::CreateReg(0));
3407 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3409 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3413 /// cvtT2StrdPre - Convert parsed operands to MCInst.
3414 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3415 /// when they refer multiple MIOperands inside a single one.
3417 cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3418 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3419 // Create a writeback register dummy placeholder.
3420 Inst.addOperand(MCOperand::CreateReg(0));
3422 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3423 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3425 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3427 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3431 /// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3432 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3433 /// when they refer multiple MIOperands inside a single one.
3435 cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3436 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3437 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3439 // Create a writeback register dummy placeholder.
3440 Inst.addOperand(MCOperand::CreateImm(0));
3442 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3443 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3447 /// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3448 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3449 /// when they refer multiple MIOperands inside a single one.
3451 cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3452 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3453 // Create a writeback register dummy placeholder.
3454 Inst.addOperand(MCOperand::CreateImm(0));
3455 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3456 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3457 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3461 /// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3462 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3463 /// when they refer multiple MIOperands inside a single one.
3465 cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3466 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3467 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3469 // Create a writeback register dummy placeholder.
3470 Inst.addOperand(MCOperand::CreateImm(0));
3472 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3473 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3477 /// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3478 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3479 /// when they refer multiple MIOperands inside a single one.
3481 cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3482 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3483 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3485 // Create a writeback register dummy placeholder.
3486 Inst.addOperand(MCOperand::CreateImm(0));
3488 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3489 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3494 /// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3495 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3496 /// when they refer multiple MIOperands inside a single one.
3498 cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3499 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3500 // Create a writeback register dummy placeholder.
3501 Inst.addOperand(MCOperand::CreateImm(0));
3502 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3503 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3504 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3508 /// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3509 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3510 /// when they refer multiple MIOperands inside a single one.
3512 cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3513 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3514 // Create a writeback register dummy placeholder.
3515 Inst.addOperand(MCOperand::CreateImm(0));
3516 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3517 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3518 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3522 /// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3523 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3524 /// when they refer multiple MIOperands inside a single one.
3526 cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3527 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3528 // Create a writeback register dummy placeholder.
3529 Inst.addOperand(MCOperand::CreateImm(0));
3530 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3531 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3532 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3536 /// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3537 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3538 /// when they refer multiple MIOperands inside a single one.
3540 cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3541 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3543 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3544 // Create a writeback register dummy placeholder.
3545 Inst.addOperand(MCOperand::CreateImm(0));
3547 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3549 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3551 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3555 /// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3556 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3557 /// when they refer multiple MIOperands inside a single one.
3559 cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3560 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3562 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3563 // Create a writeback register dummy placeholder.
3564 Inst.addOperand(MCOperand::CreateImm(0));
3566 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3568 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3570 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3574 /// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3575 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3576 /// when they refer multiple MIOperands inside a single one.
3578 cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3579 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3580 // Create a writeback register dummy placeholder.
3581 Inst.addOperand(MCOperand::CreateImm(0));
3583 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3585 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3587 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3589 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3593 /// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3594 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3595 /// when they refer multiple MIOperands inside a single one.
3597 cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3598 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3599 // Create a writeback register dummy placeholder.
3600 Inst.addOperand(MCOperand::CreateImm(0));
3602 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3604 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3606 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3608 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3612 /// cvtLdrdPre - Convert parsed operands to MCInst.
3613 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3614 /// when they refer multiple MIOperands inside a single one.
3616 cvtLdrdPre(MCInst &Inst, unsigned Opcode,
3617 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3619 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3620 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3621 // Create a writeback register dummy placeholder.
3622 Inst.addOperand(MCOperand::CreateImm(0));
3624 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3626 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3630 /// cvtStrdPre - Convert parsed operands to MCInst.
3631 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3632 /// when they refer multiple MIOperands inside a single one.
3634 cvtStrdPre(MCInst &Inst, unsigned Opcode,
3635 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3636 // Create a writeback register dummy placeholder.
3637 Inst.addOperand(MCOperand::CreateImm(0));
3639 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3640 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3642 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3644 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3648 /// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3649 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3650 /// when they refer multiple MIOperands inside a single one.
3652 cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3653 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3654 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3655 // Create a writeback register dummy placeholder.
3656 Inst.addOperand(MCOperand::CreateImm(0));
3657 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3658 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3662 /// cvtThumbMultiple- Convert parsed operands to MCInst.
3663 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3664 /// when they refer multiple MIOperands inside a single one.
3666 cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
3667 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3668 // The second source operand must be the same register as the destination
3670 if (Operands.size() == 6 &&
3671 (((ARMOperand*)Operands[3])->getReg() !=
3672 ((ARMOperand*)Operands[5])->getReg()) &&
3673 (((ARMOperand*)Operands[3])->getReg() !=
3674 ((ARMOperand*)Operands[4])->getReg())) {
3675 Error(Operands[3]->getStartLoc(),
3676 "destination register must match source register");
3679 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3680 ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
3681 // If we have a three-operand form, make sure to set Rn to be the operand
3682 // that isn't the same as Rd.
3684 if (Operands.size() == 6 &&
3685 ((ARMOperand*)Operands[4])->getReg() ==
3686 ((ARMOperand*)Operands[3])->getReg())
3688 ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
3689 Inst.addOperand(Inst.getOperand(0));
3690 ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
3696 cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
3697 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3699 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3700 // Create a writeback register dummy placeholder.
3701 Inst.addOperand(MCOperand::CreateImm(0));
3703 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3705 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3710 cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
3711 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3713 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3714 // Create a writeback register dummy placeholder.
3715 Inst.addOperand(MCOperand::CreateImm(0));
3717 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3719 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3721 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3726 cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
3727 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3728 // Create a writeback register dummy placeholder.
3729 Inst.addOperand(MCOperand::CreateImm(0));
3731 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3733 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3735 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3740 cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
3741 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3742 // Create a writeback register dummy placeholder.
3743 Inst.addOperand(MCOperand::CreateImm(0));
3745 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3747 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3749 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3751 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3755 /// Parse an ARM memory expression, return false if successful else return true
3756 /// or an error. The first token must be a '[' when called.
3758 parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3760 assert(Parser.getTok().is(AsmToken::LBrac) &&
3761 "Token is not a Left Bracket");
3762 S = Parser.getTok().getLoc();
3763 Parser.Lex(); // Eat left bracket token.
3765 const AsmToken &BaseRegTok = Parser.getTok();
3766 int BaseRegNum = tryParseRegister();
3767 if (BaseRegNum == -1)
3768 return Error(BaseRegTok.getLoc(), "register expected");
3770 // The next token must either be a comma or a closing bracket.
3771 const AsmToken &Tok = Parser.getTok();
3772 if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
3773 return Error(Tok.getLoc(), "malformed memory operand");
3775 if (Tok.is(AsmToken::RBrac)) {
3777 Parser.Lex(); // Eat right bracket token.
3779 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
3780 0, 0, false, S, E));
3782 // If there's a pre-indexing writeback marker, '!', just add it as a token
3783 // operand. It's rather odd, but syntactically valid.
3784 if (Parser.getTok().is(AsmToken::Exclaim)) {
3785 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
3786 Parser.Lex(); // Eat the '!'.
3792 assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
3793 Parser.Lex(); // Eat the comma.
3795 // If we have a ':', it's an alignment specifier.
3796 if (Parser.getTok().is(AsmToken::Colon)) {
3797 Parser.Lex(); // Eat the ':'.
3798 E = Parser.getTok().getLoc();
3801 if (getParser().ParseExpression(Expr))
3804 // The expression has to be a constant. Memory references with relocations
3805 // don't come through here, as they use the <label> forms of the relevant
3807 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
3809 return Error (E, "constant expression expected");
3812 switch (CE->getValue()) {
3814 return Error(E, "alignment specifier must be 64, 128, or 256 bits");
3815 case 64: Align = 8; break;
3816 case 128: Align = 16; break;
3817 case 256: Align = 32; break;
3820 // Now we should have the closing ']'
3821 E = Parser.getTok().getLoc();
3822 if (Parser.getTok().isNot(AsmToken::RBrac))
3823 return Error(E, "']' expected");
3824 Parser.Lex(); // Eat right bracket token.
3826 // Don't worry about range checking the value here. That's handled by
3827 // the is*() predicates.
3828 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
3829 ARM_AM::no_shift, 0, Align,
3832 // If there's a pre-indexing writeback marker, '!', just add it as a token
3834 if (Parser.getTok().is(AsmToken::Exclaim)) {
3835 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
3836 Parser.Lex(); // Eat the '!'.
3842 // If we have a '#', it's an immediate offset, else assume it's a register
3843 // offset. Be friendly and also accept a plain integer (without a leading
3844 // hash) for gas compatibility.
3845 if (Parser.getTok().is(AsmToken::Hash) ||
3846 Parser.getTok().is(AsmToken::Integer)) {
3847 if (Parser.getTok().is(AsmToken::Hash))
3848 Parser.Lex(); // Eat the '#'.
3849 E = Parser.getTok().getLoc();
3851 bool isNegative = getParser().getTok().is(AsmToken::Minus);
3852 const MCExpr *Offset;
3853 if (getParser().ParseExpression(Offset))
3856 // The expression has to be a constant. Memory references with relocations
3857 // don't come through here, as they use the <label> forms of the relevant
3859 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3861 return Error (E, "constant expression expected");
3863 // If the constant was #-0, represent it as INT32_MIN.
3864 int32_t Val = CE->getValue();
3865 if (isNegative && Val == 0)
3866 CE = MCConstantExpr::Create(INT32_MIN, getContext());
3868 // Now we should have the closing ']'
3869 E = Parser.getTok().getLoc();
3870 if (Parser.getTok().isNot(AsmToken::RBrac))
3871 return Error(E, "']' expected");
3872 Parser.Lex(); // Eat right bracket token.
3874 // Don't worry about range checking the value here. That's handled by
3875 // the is*() predicates.
3876 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
3877 ARM_AM::no_shift, 0, 0,
3880 // If there's a pre-indexing writeback marker, '!', just add it as a token
3882 if (Parser.getTok().is(AsmToken::Exclaim)) {
3883 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
3884 Parser.Lex(); // Eat the '!'.
3890 // The register offset is optionally preceded by a '+' or '-'
3891 bool isNegative = false;
3892 if (Parser.getTok().is(AsmToken::Minus)) {
3894 Parser.Lex(); // Eat the '-'.
3895 } else if (Parser.getTok().is(AsmToken::Plus)) {
3897 Parser.Lex(); // Eat the '+'.
3900 E = Parser.getTok().getLoc();
3901 int OffsetRegNum = tryParseRegister();
3902 if (OffsetRegNum == -1)
3903 return Error(E, "register expected");
3905 // If there's a shift operator, handle it.
3906 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
3907 unsigned ShiftImm = 0;
3908 if (Parser.getTok().is(AsmToken::Comma)) {
3909 Parser.Lex(); // Eat the ','.
3910 if (parseMemRegOffsetShift(ShiftType, ShiftImm))
3914 // Now we should have the closing ']'
3915 E = Parser.getTok().getLoc();
3916 if (Parser.getTok().isNot(AsmToken::RBrac))
3917 return Error(E, "']' expected");
3918 Parser.Lex(); // Eat right bracket token.
3920 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
3921 ShiftType, ShiftImm, 0, isNegative,
3924 // If there's a pre-indexing writeback marker, '!', just add it as a token
3926 if (Parser.getTok().is(AsmToken::Exclaim)) {
3927 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
3928 Parser.Lex(); // Eat the '!'.
3934 /// parseMemRegOffsetShift - one of these two:
3935 /// ( lsl | lsr | asr | ror ) , # shift_amount
3937 /// return true if it parses a shift otherwise it returns false.
3938 bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
3940 SMLoc Loc = Parser.getTok().getLoc();
3941 const AsmToken &Tok = Parser.getTok();
3942 if (Tok.isNot(AsmToken::Identifier))
3944 StringRef ShiftName = Tok.getString();
3945 if (ShiftName == "lsl" || ShiftName == "LSL" ||
3946 ShiftName == "asl" || ShiftName == "ASL")
3948 else if (ShiftName == "lsr" || ShiftName == "LSR")
3950 else if (ShiftName == "asr" || ShiftName == "ASR")
3952 else if (ShiftName == "ror" || ShiftName == "ROR")
3954 else if (ShiftName == "rrx" || ShiftName == "RRX")
3957 return Error(Loc, "illegal shift operator");
3958 Parser.Lex(); // Eat shift type token.
3960 // rrx stands alone.
3962 if (St != ARM_AM::rrx) {
3963 Loc = Parser.getTok().getLoc();
3964 // A '#' and a shift amount.
3965 const AsmToken &HashTok = Parser.getTok();
3966 if (HashTok.isNot(AsmToken::Hash))
3967 return Error(HashTok.getLoc(), "'#' expected");
3968 Parser.Lex(); // Eat hash token.
3971 if (getParser().ParseExpression(Expr))
3973 // Range check the immediate.
3974 // lsl, ror: 0 <= imm <= 31
3975 // lsr, asr: 0 <= imm <= 32
3976 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
3978 return Error(Loc, "shift amount must be an immediate");
3979 int64_t Imm = CE->getValue();
3981 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
3982 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
3983 return Error(Loc, "immediate shift value out of range");
3990 /// parseFPImm - A floating point immediate expression operand.
3991 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3992 parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3993 SMLoc S = Parser.getTok().getLoc();
3995 if (Parser.getTok().isNot(AsmToken::Hash))
3996 return MatchOperand_NoMatch;
3998 // Disambiguate the VMOV forms that can accept an FP immediate.
3999 // vmov.f32 <sreg>, #imm
4000 // vmov.f64 <dreg>, #imm
4001 // vmov.f32 <dreg>, #imm @ vector f32x2
4002 // vmov.f32 <qreg>, #imm @ vector f32x4
4004 // There are also the NEON VMOV instructions which expect an
4005 // integer constant. Make sure we don't try to parse an FPImm
4007 // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4008 ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4009 if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4010 TyOp->getToken() != ".f64"))
4011 return MatchOperand_NoMatch;
4013 Parser.Lex(); // Eat the '#'.
4015 // Handle negation, as that still comes through as a separate token.
4016 bool isNegative = false;
4017 if (Parser.getTok().is(AsmToken::Minus)) {
4021 const AsmToken &Tok = Parser.getTok();
4022 if (Tok.is(AsmToken::Real)) {
4023 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
4024 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4025 // If we had a '-' in front, toggle the sign bit.
4026 IntVal ^= (uint64_t)isNegative << 63;
4027 int Val = ARM_AM::getFP64Imm(APInt(64, IntVal));
4028 Parser.Lex(); // Eat the token.
4030 TokError("floating point value out of range");
4031 return MatchOperand_ParseFail;
4033 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
4034 return MatchOperand_Success;
4036 if (Tok.is(AsmToken::Integer)) {
4037 int64_t Val = Tok.getIntVal();
4038 Parser.Lex(); // Eat the token.
4039 if (Val > 255 || Val < 0) {
4040 TokError("encoded floating point value out of range");
4041 return MatchOperand_ParseFail;
4043 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
4044 return MatchOperand_Success;
4047 TokError("invalid floating point immediate");
4048 return MatchOperand_ParseFail;
4050 /// Parse a arm instruction operand. For now this parses the operand regardless
4051 /// of the mnemonic.
4052 bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4053 StringRef Mnemonic) {
4056 // Check if the current operand has a custom associated parser, if so, try to
4057 // custom parse the operand, or fallback to the general approach.
4058 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4059 if (ResTy == MatchOperand_Success)
4061 // If there wasn't a custom match, try the generic matcher below. Otherwise,
4062 // there was a match, but an error occurred, in which case, just return that
4063 // the operand parsing failed.
4064 if (ResTy == MatchOperand_ParseFail)
4067 switch (getLexer().getKind()) {
4069 Error(Parser.getTok().getLoc(), "unexpected token in operand");
4071 case AsmToken::Identifier: {
4072 // If this is VMRS, check for the apsr_nzcv operand.
4073 if (!tryParseRegisterWithWriteBack(Operands))
4075 int Res = tryParseShiftRegister(Operands);
4076 if (Res == 0) // success
4078 else if (Res == -1) // irrecoverable error
4080 if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") {
4081 S = Parser.getTok().getLoc();
4083 Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S));
4087 // Fall though for the Identifier case that is not a register or a
4090 case AsmToken::LParen: // parenthesized expressions like (_strcmp-4)
4091 case AsmToken::Integer: // things like 1f and 2b as a branch targets
4092 case AsmToken::String: // quoted label names.
4093 case AsmToken::Dot: { // . as a branch target
4094 // This was not a register so parse other operands that start with an
4095 // identifier (like labels) as expressions and create them as immediates.
4096 const MCExpr *IdVal;
4097 S = Parser.getTok().getLoc();
4098 if (getParser().ParseExpression(IdVal))
4100 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4101 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4104 case AsmToken::LBrac:
4105 return parseMemory(Operands);
4106 case AsmToken::LCurly:
4107 return parseRegisterList(Operands);
4108 case AsmToken::Hash: {
4109 // #42 -> immediate.
4110 // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
4111 S = Parser.getTok().getLoc();
4113 bool isNegative = Parser.getTok().is(AsmToken::Minus);
4114 const MCExpr *ImmVal;
4115 if (getParser().ParseExpression(ImmVal))
4117 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4119 int32_t Val = CE->getValue();
4120 if (isNegative && Val == 0)
4121 ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4123 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4124 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4127 case AsmToken::Colon: {
4128 // ":lower16:" and ":upper16:" expression prefixes
4129 // FIXME: Check it's an expression prefix,
4130 // e.g. (FOO - :lower16:BAR) isn't legal.
4131 ARMMCExpr::VariantKind RefKind;
4132 if (parsePrefix(RefKind))
4135 const MCExpr *SubExprVal;
4136 if (getParser().ParseExpression(SubExprVal))
4139 const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4141 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4142 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4148 // parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4149 // :lower16: and :upper16:.
4150 bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4151 RefKind = ARMMCExpr::VK_ARM_None;
4153 // :lower16: and :upper16: modifiers
4154 assert(getLexer().is(AsmToken::Colon) && "expected a :");
4155 Parser.Lex(); // Eat ':'
4157 if (getLexer().isNot(AsmToken::Identifier)) {
4158 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4162 StringRef IDVal = Parser.getTok().getIdentifier();
4163 if (IDVal == "lower16") {
4164 RefKind = ARMMCExpr::VK_ARM_LO16;
4165 } else if (IDVal == "upper16") {
4166 RefKind = ARMMCExpr::VK_ARM_HI16;
4168 Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4173 if (getLexer().isNot(AsmToken::Colon)) {
4174 Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4177 Parser.Lex(); // Eat the last ':'
4181 /// \brief Given a mnemonic, split out possible predication code and carry
4182 /// setting letters to form a canonical mnemonic and flags.
4184 // FIXME: Would be nice to autogen this.
4185 // FIXME: This is a bit of a maze of special cases.
4186 StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4187 unsigned &PredicationCode,
4189 unsigned &ProcessorIMod,
4190 StringRef &ITMask) {
4191 PredicationCode = ARMCC::AL;
4192 CarrySetting = false;
4195 // Ignore some mnemonics we know aren't predicated forms.
4197 // FIXME: Would be nice to autogen this.
4198 if ((Mnemonic == "movs" && isThumb()) ||
4199 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" ||
4200 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" ||
4201 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" ||
4202 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" ||
4203 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" ||
4204 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" ||
4205 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal")
4208 // First, split out any predication code. Ignore mnemonics we know aren't
4209 // predicated but do have a carry-set and so weren't caught above.
4210 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4211 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4212 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4213 Mnemonic != "sbcs" && Mnemonic != "rscs") {
4214 unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4215 .Case("eq", ARMCC::EQ)
4216 .Case("ne", ARMCC::NE)
4217 .Case("hs", ARMCC::HS)
4218 .Case("cs", ARMCC::HS)
4219 .Case("lo", ARMCC::LO)
4220 .Case("cc", ARMCC::LO)
4221 .Case("mi", ARMCC::MI)
4222 .Case("pl", ARMCC::PL)
4223 .Case("vs", ARMCC::VS)
4224 .Case("vc", ARMCC::VC)
4225 .Case("hi", ARMCC::HI)
4226 .Case("ls", ARMCC::LS)
4227 .Case("ge", ARMCC::GE)
4228 .Case("lt", ARMCC::LT)
4229 .Case("gt", ARMCC::GT)
4230 .Case("le", ARMCC::LE)
4231 .Case("al", ARMCC::AL)
4234 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4235 PredicationCode = CC;
4239 // Next, determine if we have a carry setting bit. We explicitly ignore all
4240 // the instructions we know end in 's'.
4241 if (Mnemonic.endswith("s") &&
4242 !(Mnemonic == "cps" || Mnemonic == "mls" ||
4243 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4244 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4245 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4246 Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4247 Mnemonic == "fmrs" ||
4248 (Mnemonic == "movs" && isThumb()))) {
4249 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4250 CarrySetting = true;
4253 // The "cps" instruction can have a interrupt mode operand which is glued into
4254 // the mnemonic. Check if this is the case, split it and parse the imod op
4255 if (Mnemonic.startswith("cps")) {
4256 // Split out any imod code.
4258 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4259 .Case("ie", ARM_PROC::IE)
4260 .Case("id", ARM_PROC::ID)
4263 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4264 ProcessorIMod = IMod;
4268 // The "it" instruction has the condition mask on the end of the mnemonic.
4269 if (Mnemonic.startswith("it")) {
4270 ITMask = Mnemonic.slice(2, Mnemonic.size());
4271 Mnemonic = Mnemonic.slice(0, 2);
4277 /// \brief Given a canonical mnemonic, determine if the instruction ever allows
4278 /// inclusion of carry set or predication code operands.
4280 // FIXME: It would be nice to autogen this.
4282 getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4283 bool &CanAcceptPredicationCode) {
4284 if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4285 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4286 Mnemonic == "add" || Mnemonic == "adc" ||
4287 Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4288 Mnemonic == "orr" || Mnemonic == "mvn" ||
4289 Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4290 Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4291 (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4292 Mnemonic == "mla" || Mnemonic == "smlal" ||
4293 Mnemonic == "umlal" || Mnemonic == "umull"))) {
4294 CanAcceptCarrySet = true;
4296 CanAcceptCarrySet = false;
4298 if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4299 Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4300 Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4301 Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4302 Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4303 (Mnemonic == "clrex" && !isThumb()) ||
4304 (Mnemonic == "nop" && isThumbOne()) ||
4305 ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4306 Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4307 Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4308 ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4310 Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4311 CanAcceptPredicationCode = false;
4313 CanAcceptPredicationCode = true;
4316 if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4317 Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4318 CanAcceptPredicationCode = false;
4322 bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4323 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4324 // FIXME: This is all horribly hacky. We really need a better way to deal
4325 // with optional operands like this in the matcher table.
4327 // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4328 // another does not. Specifically, the MOVW instruction does not. So we
4329 // special case it here and remove the defaulted (non-setting) cc_out
4330 // operand if that's the instruction we're trying to match.
4332 // We do this as post-processing of the explicit operands rather than just
4333 // conditionally adding the cc_out in the first place because we need
4334 // to check the type of the parsed immediate operand.
4335 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4336 !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4337 static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4338 static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4341 // Register-register 'add' for thumb does not have a cc_out operand
4342 // when there are only two register operands.
4343 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4344 static_cast<ARMOperand*>(Operands[3])->isReg() &&
4345 static_cast<ARMOperand*>(Operands[4])->isReg() &&
4346 static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4348 // Register-register 'add' for thumb does not have a cc_out operand
4349 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4350 // have to check the immediate range here since Thumb2 has a variant
4351 // that can handle a different range and has a cc_out operand.
4352 if (((isThumb() && Mnemonic == "add") ||
4353 (isThumbTwo() && Mnemonic == "sub")) &&
4354 Operands.size() == 6 &&
4355 static_cast<ARMOperand*>(Operands[3])->isReg() &&
4356 static_cast<ARMOperand*>(Operands[4])->isReg() &&
4357 static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4358 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4359 (static_cast<ARMOperand*>(Operands[5])->isReg() ||
4360 static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4362 // For Thumb2, add/sub immediate does not have a cc_out operand for the
4363 // imm0_4095 variant. That's the least-preferred variant when
4364 // selecting via the generic "add" mnemonic, so to know that we
4365 // should remove the cc_out operand, we have to explicitly check that
4366 // it's not one of the other variants. Ugh.
4367 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4368 Operands.size() == 6 &&
4369 static_cast<ARMOperand*>(Operands[3])->isReg() &&
4370 static_cast<ARMOperand*>(Operands[4])->isReg() &&
4371 static_cast<ARMOperand*>(Operands[5])->isImm()) {
4372 // Nest conditions rather than one big 'if' statement for readability.
4374 // If either register is a high reg, it's either one of the SP
4375 // variants (handled above) or a 32-bit encoding, so we just
4376 // check against T3.
4377 if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4378 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4379 static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4381 // If both registers are low, we're in an IT block, and the immediate is
4382 // in range, we should use encoding T1 instead, which has a cc_out.
4384 isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4385 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4386 static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4389 // Otherwise, we use encoding T4, which does not have a cc_out
4394 // The thumb2 multiply instruction doesn't have a CCOut register, so
4395 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4396 // use the 16-bit encoding or not.
4397 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4398 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4399 static_cast<ARMOperand*>(Operands[3])->isReg() &&
4400 static_cast<ARMOperand*>(Operands[4])->isReg() &&
4401 static_cast<ARMOperand*>(Operands[5])->isReg() &&
4402 // If the registers aren't low regs, the destination reg isn't the
4403 // same as one of the source regs, or the cc_out operand is zero
4404 // outside of an IT block, we have to use the 32-bit encoding, so
4405 // remove the cc_out operand.
4406 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4407 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4408 !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4410 (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4411 static_cast<ARMOperand*>(Operands[5])->getReg() &&
4412 static_cast<ARMOperand*>(Operands[3])->getReg() !=
4413 static_cast<ARMOperand*>(Operands[4])->getReg())))
4416 // Also check the 'mul' syntax variant that doesn't specify an explicit
4417 // destination register.
4418 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4419 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4420 static_cast<ARMOperand*>(Operands[3])->isReg() &&
4421 static_cast<ARMOperand*>(Operands[4])->isReg() &&
4422 // If the registers aren't low regs or the cc_out operand is zero
4423 // outside of an IT block, we have to use the 32-bit encoding, so
4424 // remove the cc_out operand.
4425 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4426 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4432 // Register-register 'add/sub' for thumb does not have a cc_out operand
4433 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4434 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4435 // right, this will result in better diagnostics (which operand is off)
4437 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4438 (Operands.size() == 5 || Operands.size() == 6) &&
4439 static_cast<ARMOperand*>(Operands[3])->isReg() &&
4440 static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4441 static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4447 static bool isDataTypeToken(StringRef Tok) {
4448 return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4449 Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4450 Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4451 Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4452 Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4453 Tok == ".f" || Tok == ".d";
4456 // FIXME: This bit should probably be handled via an explicit match class
4457 // in the .td files that matches the suffix instead of having it be
4458 // a literal string token the way it is now.
4459 static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4460 return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4463 /// Parse an arm instruction mnemonic followed by its operands.
4464 bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4465 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4466 // Create the leading tokens for the mnemonic, split by '.' characters.
4467 size_t Start = 0, Next = Name.find('.');
4468 StringRef Mnemonic = Name.slice(Start, Next);
4470 // Split out the predication code and carry setting flag from the mnemonic.
4471 unsigned PredicationCode;
4472 unsigned ProcessorIMod;
4475 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4476 ProcessorIMod, ITMask);
4478 // In Thumb1, only the branch (B) instruction can be predicated.
4479 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4480 Parser.EatToEndOfStatement();
4481 return Error(NameLoc, "conditional execution not supported in Thumb1");
4484 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4486 // Handle the IT instruction ITMask. Convert it to a bitmask. This
4487 // is the mask as it will be for the IT encoding if the conditional
4488 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4489 // where the conditional bit0 is zero, the instruction post-processing
4490 // will adjust the mask accordingly.
4491 if (Mnemonic == "it") {
4492 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4493 if (ITMask.size() > 3) {
4494 Parser.EatToEndOfStatement();
4495 return Error(Loc, "too many conditions on IT instruction");
4498 for (unsigned i = ITMask.size(); i != 0; --i) {
4499 char pos = ITMask[i - 1];
4500 if (pos != 't' && pos != 'e') {
4501 Parser.EatToEndOfStatement();
4502 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4505 if (ITMask[i - 1] == 't')
4508 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4511 // FIXME: This is all a pretty gross hack. We should automatically handle
4512 // optional operands like this via tblgen.
4514 // Next, add the CCOut and ConditionCode operands, if needed.
4516 // For mnemonics which can ever incorporate a carry setting bit or predication
4517 // code, our matching model involves us always generating CCOut and
4518 // ConditionCode operands to match the mnemonic "as written" and then we let
4519 // the matcher deal with finding the right instruction or generating an
4520 // appropriate error.
4521 bool CanAcceptCarrySet, CanAcceptPredicationCode;
4522 getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4524 // If we had a carry-set on an instruction that can't do that, issue an
4526 if (!CanAcceptCarrySet && CarrySetting) {
4527 Parser.EatToEndOfStatement();
4528 return Error(NameLoc, "instruction '" + Mnemonic +
4529 "' can not set flags, but 's' suffix specified");
4531 // If we had a predication code on an instruction that can't do that, issue an
4533 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4534 Parser.EatToEndOfStatement();
4535 return Error(NameLoc, "instruction '" + Mnemonic +
4536 "' is not predicable, but condition code specified");
4539 // Add the carry setting operand, if necessary.
4540 if (CanAcceptCarrySet) {
4541 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4542 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4546 // Add the predication code operand, if necessary.
4547 if (CanAcceptPredicationCode) {
4548 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
4550 Operands.push_back(ARMOperand::CreateCondCode(
4551 ARMCC::CondCodes(PredicationCode), Loc));
4554 // Add the processor imod operand, if necessary.
4555 if (ProcessorIMod) {
4556 Operands.push_back(ARMOperand::CreateImm(
4557 MCConstantExpr::Create(ProcessorIMod, getContext()),
4561 // Add the remaining tokens in the mnemonic.
4562 while (Next != StringRef::npos) {
4564 Next = Name.find('.', Start + 1);
4565 StringRef ExtraToken = Name.slice(Start, Next);
4567 // Some NEON instructions have an optional datatype suffix that is
4568 // completely ignored. Check for that.
4569 if (isDataTypeToken(ExtraToken) &&
4570 doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
4573 if (ExtraToken != ".n") {
4574 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
4575 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
4579 // Read the remaining operands.
4580 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4581 // Read the first operand.
4582 if (parseOperand(Operands, Mnemonic)) {
4583 Parser.EatToEndOfStatement();
4587 while (getLexer().is(AsmToken::Comma)) {
4588 Parser.Lex(); // Eat the comma.
4590 // Parse and remember the operand.
4591 if (parseOperand(Operands, Mnemonic)) {
4592 Parser.EatToEndOfStatement();
4598 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4599 SMLoc Loc = getLexer().getLoc();
4600 Parser.EatToEndOfStatement();
4601 return Error(Loc, "unexpected token in argument list");
4604 Parser.Lex(); // Consume the EndOfStatement
4606 // Some instructions, mostly Thumb, have forms for the same mnemonic that
4607 // do and don't have a cc_out optional-def operand. With some spot-checks
4608 // of the operand list, we can figure out which variant we're trying to
4609 // parse and adjust accordingly before actually matching. We shouldn't ever
4610 // try to remove a cc_out operand that was explicitly set on the the
4611 // mnemonic, of course (CarrySetting == true). Reason number #317 the
4612 // table driven matcher doesn't fit well with the ARM instruction set.
4613 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
4614 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4615 Operands.erase(Operands.begin() + 1);
4619 // ARM mode 'blx' need special handling, as the register operand version
4620 // is predicable, but the label operand version is not. So, we can't rely
4621 // on the Mnemonic based checking to correctly figure out when to put
4622 // a k_CondCode operand in the list. If we're trying to match the label
4623 // version, remove the k_CondCode operand here.
4624 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
4625 static_cast<ARMOperand*>(Operands[2])->isImm()) {
4626 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4627 Operands.erase(Operands.begin() + 1);
4631 // The vector-compare-to-zero instructions have a literal token "#0" at
4632 // the end that comes to here as an immediate operand. Convert it to a
4633 // token to play nicely with the matcher.
4634 if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
4635 Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
4636 static_cast<ARMOperand*>(Operands[5])->isImm()) {
4637 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4638 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4639 if (CE && CE->getValue() == 0) {
4640 Operands.erase(Operands.begin() + 5);
4641 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4645 // VCMP{E} does the same thing, but with a different operand count.
4646 if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
4647 static_cast<ARMOperand*>(Operands[4])->isImm()) {
4648 ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
4649 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4650 if (CE && CE->getValue() == 0) {
4651 Operands.erase(Operands.begin() + 4);
4652 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4656 // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
4657 // end. Convert it to a token here.
4658 if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
4659 static_cast<ARMOperand*>(Operands[5])->isImm()) {
4660 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4661 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4662 if (CE && CE->getValue() == 0) {
4663 Operands.erase(Operands.begin() + 5);
4664 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4672 // Validate context-sensitive operand constraints.
4674 // return 'true' if register list contains non-low GPR registers,
4675 // 'false' otherwise. If Reg is in the register list or is HiReg, set
4676 // 'containsReg' to true.
4677 static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
4678 unsigned HiReg, bool &containsReg) {
4679 containsReg = false;
4680 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4681 unsigned OpReg = Inst.getOperand(i).getReg();
4684 // Anything other than a low register isn't legal here.
4685 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
4691 // Check if the specified regisgter is in the register list of the inst,
4692 // starting at the indicated operand number.
4693 static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
4694 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4695 unsigned OpReg = Inst.getOperand(i).getReg();
4702 // FIXME: We would really prefer to have MCInstrInfo (the wrapper around
4703 // the ARMInsts array) instead. Getting that here requires awkward
4704 // API changes, though. Better way?
4706 extern const MCInstrDesc ARMInsts[];
4708 static const MCInstrDesc &getInstDesc(unsigned Opcode) {
4709 return ARMInsts[Opcode];
4712 // FIXME: We would really like to be able to tablegen'erate this.
4714 validateInstruction(MCInst &Inst,
4715 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4716 const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
4717 SMLoc Loc = Operands[0]->getStartLoc();
4718 // Check the IT block state first.
4719 // NOTE: In Thumb mode, the BKPT instruction has the interesting property of
4720 // being allowed in IT blocks, but not being predicable. It just always
4722 if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) {
4724 if (ITState.FirstCond)
4725 ITState.FirstCond = false;
4727 bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
4728 // The instruction must be predicable.
4729 if (!MCID.isPredicable())
4730 return Error(Loc, "instructions in IT block must be predicable");
4731 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
4732 unsigned ITCond = bit ? ITState.Cond :
4733 ARMCC::getOppositeCondition(ITState.Cond);
4734 if (Cond != ITCond) {
4735 // Find the condition code Operand to get its SMLoc information.
4737 for (unsigned i = 1; i < Operands.size(); ++i)
4738 if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
4739 CondLoc = Operands[i]->getStartLoc();
4740 return Error(CondLoc, "incorrect condition in IT block; got '" +
4741 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
4742 "', but expected '" +
4743 ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
4745 // Check for non-'al' condition codes outside of the IT block.
4746 } else if (isThumbTwo() && MCID.isPredicable() &&
4747 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
4748 ARMCC::AL && Inst.getOpcode() != ARM::tB &&
4749 Inst.getOpcode() != ARM::t2B)
4750 return Error(Loc, "predicated instructions must be in IT block");
4752 switch (Inst.getOpcode()) {
4755 case ARM::LDRD_POST:
4757 // Rt2 must be Rt + 1.
4758 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
4759 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
4761 return Error(Operands[3]->getStartLoc(),
4762 "destination operands must be sequential");
4766 // Rt2 must be Rt + 1.
4767 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
4768 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
4770 return Error(Operands[3]->getStartLoc(),
4771 "source operands must be sequential");
4775 case ARM::STRD_POST:
4777 // Rt2 must be Rt + 1.
4778 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
4779 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
4781 return Error(Operands[3]->getStartLoc(),
4782 "source operands must be sequential");
4787 // width must be in range [1, 32-lsb]
4788 unsigned lsb = Inst.getOperand(2).getImm();
4789 unsigned widthm1 = Inst.getOperand(3).getImm();
4790 if (widthm1 >= 32 - lsb)
4791 return Error(Operands[5]->getStartLoc(),
4792 "bitfield width must be in range [1,32-lsb]");
4796 // If we're parsing Thumb2, the .w variant is available and handles
4797 // most cases that are normally illegal for a Thumb1 LDM
4798 // instruction. We'll make the transformation in processInstruction()
4801 // Thumb LDM instructions are writeback iff the base register is not
4802 // in the register list.
4803 unsigned Rn = Inst.getOperand(0).getReg();
4804 bool hasWritebackToken =
4805 (static_cast<ARMOperand*>(Operands[3])->isToken() &&
4806 static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
4807 bool listContainsBase;
4808 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
4809 return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
4810 "registers must be in range r0-r7");
4811 // If we should have writeback, then there should be a '!' token.
4812 if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
4813 return Error(Operands[2]->getStartLoc(),
4814 "writeback operator '!' expected");
4815 // If we should not have writeback, there must not be a '!'. This is
4816 // true even for the 32-bit wide encodings.
4817 if (listContainsBase && hasWritebackToken)
4818 return Error(Operands[3]->getStartLoc(),
4819 "writeback operator '!' not allowed when base register "
4820 "in register list");
4824 case ARM::t2LDMIA_UPD: {
4825 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
4826 return Error(Operands[4]->getStartLoc(),
4827 "writeback operator '!' not allowed when base register "
4828 "in register list");
4831 // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
4832 // so only issue a diagnostic for thumb1. The instructions will be
4833 // switched to the t2 encodings in processInstruction() if necessary.
4835 bool listContainsBase;
4836 if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
4838 return Error(Operands[2]->getStartLoc(),
4839 "registers must be in range r0-r7 or pc");
4843 bool listContainsBase;
4844 if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
4846 return Error(Operands[2]->getStartLoc(),
4847 "registers must be in range r0-r7 or lr");
4850 case ARM::tSTMIA_UPD: {
4851 bool listContainsBase;
4852 if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
4853 return Error(Operands[4]->getStartLoc(),
4854 "registers must be in range r0-r7");
4862 static unsigned getRealVSTLNOpcode(unsigned Opc) {
4864 default: assert(0 && "unexpected opcode!");
4865 case ARM::VST1LNdWB_fixed_Asm_8: return ARM::VST1LNd8_UPD;
4866 case ARM::VST1LNdWB_fixed_Asm_P8: return ARM::VST1LNd8_UPD;
4867 case ARM::VST1LNdWB_fixed_Asm_I8: return ARM::VST1LNd8_UPD;
4868 case ARM::VST1LNdWB_fixed_Asm_S8: return ARM::VST1LNd8_UPD;
4869 case ARM::VST1LNdWB_fixed_Asm_U8: return ARM::VST1LNd8_UPD;
4870 case ARM::VST1LNdWB_fixed_Asm_16: return ARM::VST1LNd16_UPD;
4871 case ARM::VST1LNdWB_fixed_Asm_P16: return ARM::VST1LNd16_UPD;
4872 case ARM::VST1LNdWB_fixed_Asm_I16: return ARM::VST1LNd16_UPD;
4873 case ARM::VST1LNdWB_fixed_Asm_S16: return ARM::VST1LNd16_UPD;
4874 case ARM::VST1LNdWB_fixed_Asm_U16: return ARM::VST1LNd16_UPD;
4875 case ARM::VST1LNdWB_fixed_Asm_32: return ARM::VST1LNd32_UPD;
4876 case ARM::VST1LNdWB_fixed_Asm_F: return ARM::VST1LNd32_UPD;
4877 case ARM::VST1LNdWB_fixed_Asm_F32: return ARM::VST1LNd32_UPD;
4878 case ARM::VST1LNdWB_fixed_Asm_I32: return ARM::VST1LNd32_UPD;
4879 case ARM::VST1LNdWB_fixed_Asm_S32: return ARM::VST1LNd32_UPD;
4880 case ARM::VST1LNdWB_fixed_Asm_U32: return ARM::VST1LNd32_UPD;
4881 case ARM::VST1LNdWB_register_Asm_8: return ARM::VST1LNd8_UPD;
4882 case ARM::VST1LNdWB_register_Asm_P8: return ARM::VST1LNd8_UPD;
4883 case ARM::VST1LNdWB_register_Asm_I8: return ARM::VST1LNd8_UPD;
4884 case ARM::VST1LNdWB_register_Asm_S8: return ARM::VST1LNd8_UPD;
4885 case ARM::VST1LNdWB_register_Asm_U8: return ARM::VST1LNd8_UPD;
4886 case ARM::VST1LNdWB_register_Asm_16: return ARM::VST1LNd16_UPD;
4887 case ARM::VST1LNdWB_register_Asm_P16: return ARM::VST1LNd16_UPD;
4888 case ARM::VST1LNdWB_register_Asm_I16: return ARM::VST1LNd16_UPD;
4889 case ARM::VST1LNdWB_register_Asm_S16: return ARM::VST1LNd16_UPD;
4890 case ARM::VST1LNdWB_register_Asm_U16: return ARM::VST1LNd16_UPD;
4891 case ARM::VST1LNdWB_register_Asm_32: return ARM::VST1LNd32_UPD;
4892 case ARM::VST1LNdWB_register_Asm_F: return ARM::VST1LNd32_UPD;
4893 case ARM::VST1LNdWB_register_Asm_F32: return ARM::VST1LNd32_UPD;
4894 case ARM::VST1LNdWB_register_Asm_I32: return ARM::VST1LNd32_UPD;
4895 case ARM::VST1LNdWB_register_Asm_S32: return ARM::VST1LNd32_UPD;
4896 case ARM::VST1LNdWB_register_Asm_U32: return ARM::VST1LNd32_UPD;
4897 case ARM::VST1LNdAsm_8: return ARM::VST1LNd8;
4898 case ARM::VST1LNdAsm_P8: return ARM::VST1LNd8;
4899 case ARM::VST1LNdAsm_I8: return ARM::VST1LNd8;
4900 case ARM::VST1LNdAsm_S8: return ARM::VST1LNd8;
4901 case ARM::VST1LNdAsm_U8: return ARM::VST1LNd8;
4902 case ARM::VST1LNdAsm_16: return ARM::VST1LNd16;
4903 case ARM::VST1LNdAsm_P16: return ARM::VST1LNd16;
4904 case ARM::VST1LNdAsm_I16: return ARM::VST1LNd16;
4905 case ARM::VST1LNdAsm_S16: return ARM::VST1LNd16;
4906 case ARM::VST1LNdAsm_U16: return ARM::VST1LNd16;
4907 case ARM::VST1LNdAsm_32: return ARM::VST1LNd32;
4908 case ARM::VST1LNdAsm_F: return ARM::VST1LNd32;
4909 case ARM::VST1LNdAsm_F32: return ARM::VST1LNd32;
4910 case ARM::VST1LNdAsm_I32: return ARM::VST1LNd32;
4911 case ARM::VST1LNdAsm_S32: return ARM::VST1LNd32;
4912 case ARM::VST1LNdAsm_U32: return ARM::VST1LNd32;
4916 static unsigned getRealVLDLNOpcode(unsigned Opc) {
4918 default: assert(0 && "unexpected opcode!");
4919 case ARM::VLD1LNdWB_fixed_Asm_8: return ARM::VLD1LNd8_UPD;
4920 case ARM::VLD1LNdWB_fixed_Asm_P8: return ARM::VLD1LNd8_UPD;
4921 case ARM::VLD1LNdWB_fixed_Asm_I8: return ARM::VLD1LNd8_UPD;
4922 case ARM::VLD1LNdWB_fixed_Asm_S8: return ARM::VLD1LNd8_UPD;
4923 case ARM::VLD1LNdWB_fixed_Asm_U8: return ARM::VLD1LNd8_UPD;
4924 case ARM::VLD1LNdWB_fixed_Asm_16: return ARM::VLD1LNd16_UPD;
4925 case ARM::VLD1LNdWB_fixed_Asm_P16: return ARM::VLD1LNd16_UPD;
4926 case ARM::VLD1LNdWB_fixed_Asm_I16: return ARM::VLD1LNd16_UPD;
4927 case ARM::VLD1LNdWB_fixed_Asm_S16: return ARM::VLD1LNd16_UPD;
4928 case ARM::VLD1LNdWB_fixed_Asm_U16: return ARM::VLD1LNd16_UPD;
4929 case ARM::VLD1LNdWB_fixed_Asm_32: return ARM::VLD1LNd32_UPD;
4930 case ARM::VLD1LNdWB_fixed_Asm_F: return ARM::VLD1LNd32_UPD;
4931 case ARM::VLD1LNdWB_fixed_Asm_F32: return ARM::VLD1LNd32_UPD;
4932 case ARM::VLD1LNdWB_fixed_Asm_I32: return ARM::VLD1LNd32_UPD;
4933 case ARM::VLD1LNdWB_fixed_Asm_S32: return ARM::VLD1LNd32_UPD;
4934 case ARM::VLD1LNdWB_fixed_Asm_U32: return ARM::VLD1LNd32_UPD;
4935 case ARM::VLD1LNdWB_register_Asm_8: return ARM::VLD1LNd8_UPD;
4936 case ARM::VLD1LNdWB_register_Asm_P8: return ARM::VLD1LNd8_UPD;
4937 case ARM::VLD1LNdWB_register_Asm_I8: return ARM::VLD1LNd8_UPD;
4938 case ARM::VLD1LNdWB_register_Asm_S8: return ARM::VLD1LNd8_UPD;
4939 case ARM::VLD1LNdWB_register_Asm_U8: return ARM::VLD1LNd8_UPD;
4940 case ARM::VLD1LNdWB_register_Asm_16: return ARM::VLD1LNd16_UPD;
4941 case ARM::VLD1LNdWB_register_Asm_P16: return ARM::VLD1LNd16_UPD;
4942 case ARM::VLD1LNdWB_register_Asm_I16: return ARM::VLD1LNd16_UPD;
4943 case ARM::VLD1LNdWB_register_Asm_S16: return ARM::VLD1LNd16_UPD;
4944 case ARM::VLD1LNdWB_register_Asm_U16: return ARM::VLD1LNd16_UPD;
4945 case ARM::VLD1LNdWB_register_Asm_32: return ARM::VLD1LNd32_UPD;
4946 case ARM::VLD1LNdWB_register_Asm_F: return ARM::VLD1LNd32_UPD;
4947 case ARM::VLD1LNdWB_register_Asm_F32: return ARM::VLD1LNd32_UPD;
4948 case ARM::VLD1LNdWB_register_Asm_I32: return ARM::VLD1LNd32_UPD;
4949 case ARM::VLD1LNdWB_register_Asm_S32: return ARM::VLD1LNd32_UPD;
4950 case ARM::VLD1LNdWB_register_Asm_U32: return ARM::VLD1LNd32_UPD;
4951 case ARM::VLD1LNdAsm_8: return ARM::VLD1LNd8;
4952 case ARM::VLD1LNdAsm_P8: return ARM::VLD1LNd8;
4953 case ARM::VLD1LNdAsm_I8: return ARM::VLD1LNd8;
4954 case ARM::VLD1LNdAsm_S8: return ARM::VLD1LNd8;
4955 case ARM::VLD1LNdAsm_U8: return ARM::VLD1LNd8;
4956 case ARM::VLD1LNdAsm_16: return ARM::VLD1LNd16;
4957 case ARM::VLD1LNdAsm_P16: return ARM::VLD1LNd16;
4958 case ARM::VLD1LNdAsm_I16: return ARM::VLD1LNd16;
4959 case ARM::VLD1LNdAsm_S16: return ARM::VLD1LNd16;
4960 case ARM::VLD1LNdAsm_U16: return ARM::VLD1LNd16;
4961 case ARM::VLD1LNdAsm_32: return ARM::VLD1LNd32;
4962 case ARM::VLD1LNdAsm_F: return ARM::VLD1LNd32;
4963 case ARM::VLD1LNdAsm_F32: return ARM::VLD1LNd32;
4964 case ARM::VLD1LNdAsm_I32: return ARM::VLD1LNd32;
4965 case ARM::VLD1LNdAsm_S32: return ARM::VLD1LNd32;
4966 case ARM::VLD1LNdAsm_U32: return ARM::VLD1LNd32;
4971 processInstruction(MCInst &Inst,
4972 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4973 switch (Inst.getOpcode()) {
4974 // Handle NEON VST1 complex aliases.
4975 case ARM::VST1LNdWB_register_Asm_8:
4976 case ARM::VST1LNdWB_register_Asm_P8:
4977 case ARM::VST1LNdWB_register_Asm_I8:
4978 case ARM::VST1LNdWB_register_Asm_S8:
4979 case ARM::VST1LNdWB_register_Asm_U8:
4980 case ARM::VST1LNdWB_register_Asm_16:
4981 case ARM::VST1LNdWB_register_Asm_P16:
4982 case ARM::VST1LNdWB_register_Asm_I16:
4983 case ARM::VST1LNdWB_register_Asm_S16:
4984 case ARM::VST1LNdWB_register_Asm_U16:
4985 case ARM::VST1LNdWB_register_Asm_32:
4986 case ARM::VST1LNdWB_register_Asm_F:
4987 case ARM::VST1LNdWB_register_Asm_F32:
4988 case ARM::VST1LNdWB_register_Asm_I32:
4989 case ARM::VST1LNdWB_register_Asm_S32:
4990 case ARM::VST1LNdWB_register_Asm_U32: {
4992 // Shuffle the operands around so the lane index operand is in the
4994 TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
4995 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
4996 TmpInst.addOperand(Inst.getOperand(2)); // Rn
4997 TmpInst.addOperand(Inst.getOperand(3)); // alignment
4998 TmpInst.addOperand(Inst.getOperand(4)); // Rm
4999 TmpInst.addOperand(Inst.getOperand(0)); // Vd
5000 TmpInst.addOperand(Inst.getOperand(1)); // lane
5001 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5002 TmpInst.addOperand(Inst.getOperand(6));
5006 case ARM::VST1LNdWB_fixed_Asm_8:
5007 case ARM::VST1LNdWB_fixed_Asm_P8:
5008 case ARM::VST1LNdWB_fixed_Asm_I8:
5009 case ARM::VST1LNdWB_fixed_Asm_S8:
5010 case ARM::VST1LNdWB_fixed_Asm_U8:
5011 case ARM::VST1LNdWB_fixed_Asm_16:
5012 case ARM::VST1LNdWB_fixed_Asm_P16:
5013 case ARM::VST1LNdWB_fixed_Asm_I16:
5014 case ARM::VST1LNdWB_fixed_Asm_S16:
5015 case ARM::VST1LNdWB_fixed_Asm_U16:
5016 case ARM::VST1LNdWB_fixed_Asm_32:
5017 case ARM::VST1LNdWB_fixed_Asm_F:
5018 case ARM::VST1LNdWB_fixed_Asm_F32:
5019 case ARM::VST1LNdWB_fixed_Asm_I32:
5020 case ARM::VST1LNdWB_fixed_Asm_S32:
5021 case ARM::VST1LNdWB_fixed_Asm_U32: {
5023 // Shuffle the operands around so the lane index operand is in the
5025 TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5026 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5027 TmpInst.addOperand(Inst.getOperand(2)); // Rn
5028 TmpInst.addOperand(Inst.getOperand(3)); // alignment
5029 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5030 TmpInst.addOperand(Inst.getOperand(0)); // Vd
5031 TmpInst.addOperand(Inst.getOperand(1)); // lane
5032 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5033 TmpInst.addOperand(Inst.getOperand(5));
5037 case ARM::VST1LNdAsm_8:
5038 case ARM::VST1LNdAsm_P8:
5039 case ARM::VST1LNdAsm_I8:
5040 case ARM::VST1LNdAsm_S8:
5041 case ARM::VST1LNdAsm_U8:
5042 case ARM::VST1LNdAsm_16:
5043 case ARM::VST1LNdAsm_P16:
5044 case ARM::VST1LNdAsm_I16:
5045 case ARM::VST1LNdAsm_S16:
5046 case ARM::VST1LNdAsm_U16:
5047 case ARM::VST1LNdAsm_32:
5048 case ARM::VST1LNdAsm_F:
5049 case ARM::VST1LNdAsm_F32:
5050 case ARM::VST1LNdAsm_I32:
5051 case ARM::VST1LNdAsm_S32:
5052 case ARM::VST1LNdAsm_U32: {
5054 // Shuffle the operands around so the lane index operand is in the
5056 TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5057 TmpInst.addOperand(Inst.getOperand(2)); // Rn
5058 TmpInst.addOperand(Inst.getOperand(3)); // alignment
5059 TmpInst.addOperand(Inst.getOperand(0)); // Vd
5060 TmpInst.addOperand(Inst.getOperand(1)); // lane
5061 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5062 TmpInst.addOperand(Inst.getOperand(5));
5066 // Handle NEON VLD1 complex aliases.
5067 case ARM::VLD1LNdWB_register_Asm_8:
5068 case ARM::VLD1LNdWB_register_Asm_P8:
5069 case ARM::VLD1LNdWB_register_Asm_I8:
5070 case ARM::VLD1LNdWB_register_Asm_S8:
5071 case ARM::VLD1LNdWB_register_Asm_U8:
5072 case ARM::VLD1LNdWB_register_Asm_16:
5073 case ARM::VLD1LNdWB_register_Asm_P16:
5074 case ARM::VLD1LNdWB_register_Asm_I16:
5075 case ARM::VLD1LNdWB_register_Asm_S16:
5076 case ARM::VLD1LNdWB_register_Asm_U16:
5077 case ARM::VLD1LNdWB_register_Asm_32:
5078 case ARM::VLD1LNdWB_register_Asm_F:
5079 case ARM::VLD1LNdWB_register_Asm_F32:
5080 case ARM::VLD1LNdWB_register_Asm_I32:
5081 case ARM::VLD1LNdWB_register_Asm_S32:
5082 case ARM::VLD1LNdWB_register_Asm_U32: {
5084 // Shuffle the operands around so the lane index operand is in the
5086 TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5087 TmpInst.addOperand(Inst.getOperand(0)); // Vd
5088 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5089 TmpInst.addOperand(Inst.getOperand(2)); // Rn
5090 TmpInst.addOperand(Inst.getOperand(3)); // alignment
5091 TmpInst.addOperand(Inst.getOperand(4)); // Rm
5092 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5093 TmpInst.addOperand(Inst.getOperand(1)); // lane
5094 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5095 TmpInst.addOperand(Inst.getOperand(6));
5099 case ARM::VLD1LNdWB_fixed_Asm_8:
5100 case ARM::VLD1LNdWB_fixed_Asm_P8:
5101 case ARM::VLD1LNdWB_fixed_Asm_I8:
5102 case ARM::VLD1LNdWB_fixed_Asm_S8:
5103 case ARM::VLD1LNdWB_fixed_Asm_U8:
5104 case ARM::VLD1LNdWB_fixed_Asm_16:
5105 case ARM::VLD1LNdWB_fixed_Asm_P16:
5106 case ARM::VLD1LNdWB_fixed_Asm_I16:
5107 case ARM::VLD1LNdWB_fixed_Asm_S16:
5108 case ARM::VLD1LNdWB_fixed_Asm_U16:
5109 case ARM::VLD1LNdWB_fixed_Asm_32:
5110 case ARM::VLD1LNdWB_fixed_Asm_F:
5111 case ARM::VLD1LNdWB_fixed_Asm_F32:
5112 case ARM::VLD1LNdWB_fixed_Asm_I32:
5113 case ARM::VLD1LNdWB_fixed_Asm_S32:
5114 case ARM::VLD1LNdWB_fixed_Asm_U32: {
5116 // Shuffle the operands around so the lane index operand is in the
5118 TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5119 TmpInst.addOperand(Inst.getOperand(0)); // Vd
5120 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5121 TmpInst.addOperand(Inst.getOperand(2)); // Rn
5122 TmpInst.addOperand(Inst.getOperand(3)); // alignment
5123 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5124 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5125 TmpInst.addOperand(Inst.getOperand(1)); // lane
5126 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5127 TmpInst.addOperand(Inst.getOperand(5));
5131 case ARM::VLD1LNdAsm_8:
5132 case ARM::VLD1LNdAsm_P8:
5133 case ARM::VLD1LNdAsm_I8:
5134 case ARM::VLD1LNdAsm_S8:
5135 case ARM::VLD1LNdAsm_U8:
5136 case ARM::VLD1LNdAsm_16:
5137 case ARM::VLD1LNdAsm_P16:
5138 case ARM::VLD1LNdAsm_I16:
5139 case ARM::VLD1LNdAsm_S16:
5140 case ARM::VLD1LNdAsm_U16:
5141 case ARM::VLD1LNdAsm_32:
5142 case ARM::VLD1LNdAsm_F:
5143 case ARM::VLD1LNdAsm_F32:
5144 case ARM::VLD1LNdAsm_I32:
5145 case ARM::VLD1LNdAsm_S32:
5146 case ARM::VLD1LNdAsm_U32: {
5148 // Shuffle the operands around so the lane index operand is in the
5150 TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5151 TmpInst.addOperand(Inst.getOperand(0)); // Vd
5152 TmpInst.addOperand(Inst.getOperand(2)); // Rn
5153 TmpInst.addOperand(Inst.getOperand(3)); // alignment
5154 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5155 TmpInst.addOperand(Inst.getOperand(1)); // lane
5156 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5157 TmpInst.addOperand(Inst.getOperand(5));
5161 // Handle the MOV complex aliases.
5166 ARM_AM::ShiftOpc ShiftTy;
5167 switch(Inst.getOpcode()) {
5168 default: llvm_unreachable("unexpected opcode!");
5169 case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
5170 case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
5171 case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
5172 case ARM::RORr: ShiftTy = ARM_AM::ror; break;
5174 // A shift by zero is a plain MOVr, not a MOVsi.
5175 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
5177 TmpInst.setOpcode(ARM::MOVsr);
5178 TmpInst.addOperand(Inst.getOperand(0)); // Rd
5179 TmpInst.addOperand(Inst.getOperand(1)); // Rn
5180 TmpInst.addOperand(Inst.getOperand(2)); // Rm
5181 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5182 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5183 TmpInst.addOperand(Inst.getOperand(4));
5184 TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5192 ARM_AM::ShiftOpc ShiftTy;
5193 switch(Inst.getOpcode()) {
5194 default: llvm_unreachable("unexpected opcode!");
5195 case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
5196 case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
5197 case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
5198 case ARM::RORi: ShiftTy = ARM_AM::ror; break;
5200 // A shift by zero is a plain MOVr, not a MOVsi.
5201 unsigned Amt = Inst.getOperand(2).getImm();
5202 unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
5203 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
5205 TmpInst.setOpcode(Opc);
5206 TmpInst.addOperand(Inst.getOperand(0)); // Rd
5207 TmpInst.addOperand(Inst.getOperand(1)); // Rn
5208 if (Opc == ARM::MOVsi)
5209 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5210 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5211 TmpInst.addOperand(Inst.getOperand(4));
5212 TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5217 unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
5219 TmpInst.setOpcode(ARM::MOVsi);
5220 TmpInst.addOperand(Inst.getOperand(0)); // Rd
5221 TmpInst.addOperand(Inst.getOperand(1)); // Rn
5222 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5223 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5224 TmpInst.addOperand(Inst.getOperand(3));
5225 TmpInst.addOperand(Inst.getOperand(4)); // cc_out
5229 case ARM::t2LDMIA_UPD: {
5230 // If this is a load of a single register, then we should use
5231 // a post-indexed LDR instruction instead, per the ARM ARM.
5232 if (Inst.getNumOperands() != 5)
5235 TmpInst.setOpcode(ARM::t2LDR_POST);
5236 TmpInst.addOperand(Inst.getOperand(4)); // Rt
5237 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5238 TmpInst.addOperand(Inst.getOperand(1)); // Rn
5239 TmpInst.addOperand(MCOperand::CreateImm(4));
5240 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5241 TmpInst.addOperand(Inst.getOperand(3));
5245 case ARM::t2STMDB_UPD: {
5246 // If this is a store of a single register, then we should use
5247 // a pre-indexed STR instruction instead, per the ARM ARM.
5248 if (Inst.getNumOperands() != 5)
5251 TmpInst.setOpcode(ARM::t2STR_PRE);
5252 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5253 TmpInst.addOperand(Inst.getOperand(4)); // Rt
5254 TmpInst.addOperand(Inst.getOperand(1)); // Rn
5255 TmpInst.addOperand(MCOperand::CreateImm(-4));
5256 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5257 TmpInst.addOperand(Inst.getOperand(3));
5261 case ARM::LDMIA_UPD:
5262 // If this is a load of a single register via a 'pop', then we should use
5263 // a post-indexed LDR instruction instead, per the ARM ARM.
5264 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
5265 Inst.getNumOperands() == 5) {
5267 TmpInst.setOpcode(ARM::LDR_POST_IMM);
5268 TmpInst.addOperand(Inst.getOperand(4)); // Rt
5269 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5270 TmpInst.addOperand(Inst.getOperand(1)); // Rn
5271 TmpInst.addOperand(MCOperand::CreateReg(0)); // am2offset
5272 TmpInst.addOperand(MCOperand::CreateImm(4));
5273 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5274 TmpInst.addOperand(Inst.getOperand(3));
5279 case ARM::STMDB_UPD:
5280 // If this is a store of a single register via a 'push', then we should use
5281 // a pre-indexed STR instruction instead, per the ARM ARM.
5282 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
5283 Inst.getNumOperands() == 5) {
5285 TmpInst.setOpcode(ARM::STR_PRE_IMM);
5286 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5287 TmpInst.addOperand(Inst.getOperand(4)); // Rt
5288 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
5289 TmpInst.addOperand(MCOperand::CreateImm(-4));
5290 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5291 TmpInst.addOperand(Inst.getOperand(3));
5295 case ARM::t2ADDri12:
5296 // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
5297 // mnemonic was used (not "addw"), encoding T3 is preferred.
5298 if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
5299 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5301 Inst.setOpcode(ARM::t2ADDri);
5302 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5304 case ARM::t2SUBri12:
5305 // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
5306 // mnemonic was used (not "subw"), encoding T3 is preferred.
5307 if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
5308 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5310 Inst.setOpcode(ARM::t2SUBri);
5311 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5314 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5315 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5316 // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5317 // to encoding T1 if <Rd> is omitted."
5318 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
5319 Inst.setOpcode(ARM::tADDi3);
5324 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5325 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5326 // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5327 // to encoding T1 if <Rd> is omitted."
5328 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
5329 Inst.setOpcode(ARM::tSUBi3);
5333 case ARM::t2ADDrr: {
5334 // If the destination and first source operand are the same, and
5335 // there's no setting of the flags, use encoding T2 instead of T3.
5336 // Note that this is only for ADD, not SUB. This mirrors the system
5337 // 'as' behaviour. Make sure the wide encoding wasn't explicit.
5338 if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
5339 Inst.getOperand(5).getReg() != 0 ||
5340 (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5341 static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
5344 TmpInst.setOpcode(ARM::tADDhirr);
5345 TmpInst.addOperand(Inst.getOperand(0));
5346 TmpInst.addOperand(Inst.getOperand(0));
5347 TmpInst.addOperand(Inst.getOperand(2));
5348 TmpInst.addOperand(Inst.getOperand(3));
5349 TmpInst.addOperand(Inst.getOperand(4));
5354 // A Thumb conditional branch outside of an IT block is a tBcc.
5355 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
5356 Inst.setOpcode(ARM::tBcc);
5361 // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
5362 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
5363 Inst.setOpcode(ARM::t2Bcc);
5368 // If the conditional is AL or we're in an IT block, we really want t2B.
5369 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
5370 Inst.setOpcode(ARM::t2B);
5375 // If the conditional is AL, we really want tB.
5376 if (Inst.getOperand(1).getImm() == ARMCC::AL) {
5377 Inst.setOpcode(ARM::tB);
5382 // If the register list contains any high registers, or if the writeback
5383 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
5384 // instead if we're in Thumb2. Otherwise, this should have generated
5385 // an error in validateInstruction().
5386 unsigned Rn = Inst.getOperand(0).getReg();
5387 bool hasWritebackToken =
5388 (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5389 static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5390 bool listContainsBase;
5391 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
5392 (!listContainsBase && !hasWritebackToken) ||
5393 (listContainsBase && hasWritebackToken)) {
5394 // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
5395 assert (isThumbTwo());
5396 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
5397 // If we're switching to the updating version, we need to insert
5398 // the writeback tied operand.
5399 if (hasWritebackToken)
5400 Inst.insert(Inst.begin(),
5401 MCOperand::CreateReg(Inst.getOperand(0).getReg()));
5406 case ARM::tSTMIA_UPD: {
5407 // If the register list contains any high registers, we need to use
5408 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
5409 // should have generated an error in validateInstruction().
5410 unsigned Rn = Inst.getOperand(0).getReg();
5411 bool listContainsBase;
5412 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
5413 // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
5414 assert (isThumbTwo());
5415 Inst.setOpcode(ARM::t2STMIA_UPD);
5421 bool listContainsBase;
5422 // If the register list contains any high registers, we need to use
5423 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
5424 // should have generated an error in validateInstruction().
5425 if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
5427 assert (isThumbTwo());
5428 Inst.setOpcode(ARM::t2LDMIA_UPD);
5429 // Add the base register and writeback operands.
5430 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5431 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5435 bool listContainsBase;
5436 if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
5438 assert (isThumbTwo());
5439 Inst.setOpcode(ARM::t2STMDB_UPD);
5440 // Add the base register and writeback operands.
5441 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5442 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5446 // If we can use the 16-bit encoding and the user didn't explicitly
5447 // request the 32-bit variant, transform it here.
5448 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5449 Inst.getOperand(1).getImm() <= 255 &&
5450 ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
5451 Inst.getOperand(4).getReg() == ARM::CPSR) ||
5452 (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
5453 (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
5454 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
5455 // The operands aren't in the same order for tMOVi8...
5457 TmpInst.setOpcode(ARM::tMOVi8);
5458 TmpInst.addOperand(Inst.getOperand(0));
5459 TmpInst.addOperand(Inst.getOperand(4));
5460 TmpInst.addOperand(Inst.getOperand(1));
5461 TmpInst.addOperand(Inst.getOperand(2));
5462 TmpInst.addOperand(Inst.getOperand(3));
5469 // If we can use the 16-bit encoding and the user didn't explicitly
5470 // request the 32-bit variant, transform it here.
5471 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5472 isARMLowRegister(Inst.getOperand(1).getReg()) &&
5473 Inst.getOperand(2).getImm() == ARMCC::AL &&
5474 Inst.getOperand(4).getReg() == ARM::CPSR &&
5475 (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
5476 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
5477 // The operands aren't the same for tMOV[S]r... (no cc_out)
5479 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
5480 TmpInst.addOperand(Inst.getOperand(0));
5481 TmpInst.addOperand(Inst.getOperand(1));
5482 TmpInst.addOperand(Inst.getOperand(2));
5483 TmpInst.addOperand(Inst.getOperand(3));
5493 // If we can use the 16-bit encoding and the user didn't explicitly
5494 // request the 32-bit variant, transform it here.
5495 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5496 isARMLowRegister(Inst.getOperand(1).getReg()) &&
5497 Inst.getOperand(2).getImm() == 0 &&
5498 (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
5499 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
5501 switch (Inst.getOpcode()) {
5502 default: llvm_unreachable("Illegal opcode!");
5503 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
5504 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
5505 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
5506 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
5508 // The operands aren't the same for thumb1 (no rotate operand).
5510 TmpInst.setOpcode(NewOpc);
5511 TmpInst.addOperand(Inst.getOperand(0));
5512 TmpInst.addOperand(Inst.getOperand(1));
5513 TmpInst.addOperand(Inst.getOperand(3));
5514 TmpInst.addOperand(Inst.getOperand(4));
5521 // The mask bits for all but the first condition are represented as
5522 // the low bit of the condition code value implies 't'. We currently
5523 // always have 1 implies 't', so XOR toggle the bits if the low bit
5524 // of the condition code is zero. The encoding also expects the low
5525 // bit of the condition to be encoded as bit 4 of the mask operand,
5526 // so mask that in if needed
5527 MCOperand &MO = Inst.getOperand(1);
5528 unsigned Mask = MO.getImm();
5529 unsigned OrigMask = Mask;
5530 unsigned TZ = CountTrailingZeros_32(Mask);
5531 if ((Inst.getOperand(0).getImm() & 1) == 0) {
5532 assert(Mask && TZ <= 3 && "illegal IT mask value!");
5533 for (unsigned i = 3; i != TZ; --i)
5539 // Set up the IT block state according to the IT instruction we just
5541 assert(!inITBlock() && "nested IT blocks?!");
5542 ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
5543 ITState.Mask = OrigMask; // Use the original mask, not the updated one.
5544 ITState.CurPosition = 0;
5545 ITState.FirstCond = true;
5552 unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
5553 // 16-bit thumb arithmetic instructions either require or preclude the 'S'
5554 // suffix depending on whether they're in an IT block or not.
5555 unsigned Opc = Inst.getOpcode();
5556 const MCInstrDesc &MCID = getInstDesc(Opc);
5557 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
5558 assert(MCID.hasOptionalDef() &&
5559 "optionally flag setting instruction missing optional def operand");
5560 assert(MCID.NumOperands == Inst.getNumOperands() &&
5561 "operand count mismatch!");
5562 // Find the optional-def operand (cc_out).
5565 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
5568 // If we're parsing Thumb1, reject it completely.
5569 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
5570 return Match_MnemonicFail;
5571 // If we're parsing Thumb2, which form is legal depends on whether we're
5573 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
5575 return Match_RequiresITBlock;
5576 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
5578 return Match_RequiresNotITBlock;
5580 // Some high-register supporting Thumb1 encodings only allow both registers
5581 // to be from r0-r7 when in Thumb2.
5582 else if (Opc == ARM::tADDhirr && isThumbOne() &&
5583 isARMLowRegister(Inst.getOperand(1).getReg()) &&
5584 isARMLowRegister(Inst.getOperand(2).getReg()))
5585 return Match_RequiresThumb2;
5586 // Others only require ARMv6 or later.
5587 else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
5588 isARMLowRegister(Inst.getOperand(0).getReg()) &&
5589 isARMLowRegister(Inst.getOperand(1).getReg()))
5590 return Match_RequiresV6;
5591 return Match_Success;
5595 MatchAndEmitInstruction(SMLoc IDLoc,
5596 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
5600 unsigned MatchResult;
5601 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
5602 switch (MatchResult) {
5605 // Context sensitive operand constraints aren't handled by the matcher,
5606 // so check them here.
5607 if (validateInstruction(Inst, Operands)) {
5608 // Still progress the IT block, otherwise one wrong condition causes
5609 // nasty cascading errors.
5610 forwardITPosition();
5614 // Some instructions need post-processing to, for example, tweak which
5615 // encoding is selected. Loop on it while changes happen so the
5616 // individual transformations can chain off each other. E.g.,
5617 // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
5618 while (processInstruction(Inst, Operands))
5621 // Only move forward at the very end so that everything in validate
5622 // and process gets a consistent answer about whether we're in an IT
5624 forwardITPosition();
5626 Out.EmitInstruction(Inst);
5628 case Match_MissingFeature:
5629 Error(IDLoc, "instruction requires a CPU feature not currently enabled");
5631 case Match_InvalidOperand: {
5632 SMLoc ErrorLoc = IDLoc;
5633 if (ErrorInfo != ~0U) {
5634 if (ErrorInfo >= Operands.size())
5635 return Error(IDLoc, "too few operands for instruction");
5637 ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
5638 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
5641 return Error(ErrorLoc, "invalid operand for instruction");
5643 case Match_MnemonicFail:
5644 return Error(IDLoc, "invalid instruction");
5645 case Match_ConversionFail:
5646 // The converter function will have already emited a diagnostic.
5648 case Match_RequiresNotITBlock:
5649 return Error(IDLoc, "flag setting instruction only valid outside IT block");
5650 case Match_RequiresITBlock:
5651 return Error(IDLoc, "instruction only valid inside IT block");
5652 case Match_RequiresV6:
5653 return Error(IDLoc, "instruction variant requires ARMv6 or later");
5654 case Match_RequiresThumb2:
5655 return Error(IDLoc, "instruction variant requires Thumb2");
5658 llvm_unreachable("Implement any new match types added!");
5662 /// parseDirective parses the arm specific directives
5663 bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
5664 StringRef IDVal = DirectiveID.getIdentifier();
5665 if (IDVal == ".word")
5666 return parseDirectiveWord(4, DirectiveID.getLoc());
5667 else if (IDVal == ".thumb")
5668 return parseDirectiveThumb(DirectiveID.getLoc());
5669 else if (IDVal == ".arm")
5670 return parseDirectiveARM(DirectiveID.getLoc());
5671 else if (IDVal == ".thumb_func")
5672 return parseDirectiveThumbFunc(DirectiveID.getLoc());
5673 else if (IDVal == ".code")
5674 return parseDirectiveCode(DirectiveID.getLoc());
5675 else if (IDVal == ".syntax")
5676 return parseDirectiveSyntax(DirectiveID.getLoc());
5680 /// parseDirectiveWord
5681 /// ::= .word [ expression (, expression)* ]
5682 bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
5683 if (getLexer().isNot(AsmToken::EndOfStatement)) {
5685 const MCExpr *Value;
5686 if (getParser().ParseExpression(Value))
5689 getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
5691 if (getLexer().is(AsmToken::EndOfStatement))
5694 // FIXME: Improve diagnostic.
5695 if (getLexer().isNot(AsmToken::Comma))
5696 return Error(L, "unexpected token in directive");
5705 /// parseDirectiveThumb
5707 bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
5708 if (getLexer().isNot(AsmToken::EndOfStatement))
5709 return Error(L, "unexpected token in directive");
5714 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
5718 /// parseDirectiveARM
5720 bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
5721 if (getLexer().isNot(AsmToken::EndOfStatement))
5722 return Error(L, "unexpected token in directive");
5727 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
5731 /// parseDirectiveThumbFunc
5732 /// ::= .thumbfunc symbol_name
5733 bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
5734 const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
5735 bool isMachO = MAI.hasSubsectionsViaSymbols();
5738 // Darwin asm has function name after .thumb_func direction
5741 const AsmToken &Tok = Parser.getTok();
5742 if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
5743 return Error(L, "unexpected token in .thumb_func directive");
5744 Name = Tok.getIdentifier();
5745 Parser.Lex(); // Consume the identifier token.
5748 if (getLexer().isNot(AsmToken::EndOfStatement))
5749 return Error(L, "unexpected token in directive");
5752 // FIXME: assuming function name will be the line following .thumb_func
5754 Name = Parser.getTok().getIdentifier();
5757 // Mark symbol as a thumb symbol.
5758 MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
5759 getParser().getStreamer().EmitThumbFunc(Func);
5763 /// parseDirectiveSyntax
5764 /// ::= .syntax unified | divided
5765 bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
5766 const AsmToken &Tok = Parser.getTok();
5767 if (Tok.isNot(AsmToken::Identifier))
5768 return Error(L, "unexpected token in .syntax directive");
5769 StringRef Mode = Tok.getString();
5770 if (Mode == "unified" || Mode == "UNIFIED")
5772 else if (Mode == "divided" || Mode == "DIVIDED")
5773 return Error(L, "'.syntax divided' arm asssembly not supported");
5775 return Error(L, "unrecognized syntax mode in .syntax directive");
5777 if (getLexer().isNot(AsmToken::EndOfStatement))
5778 return Error(Parser.getTok().getLoc(), "unexpected token in directive");
5781 // TODO tell the MC streamer the mode
5782 // getParser().getStreamer().Emit???();
5786 /// parseDirectiveCode
5787 /// ::= .code 16 | 32
5788 bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
5789 const AsmToken &Tok = Parser.getTok();
5790 if (Tok.isNot(AsmToken::Integer))
5791 return Error(L, "unexpected token in .code directive");
5792 int64_t Val = Parser.getTok().getIntVal();
5798 return Error(L, "invalid operand to .code directive");
5800 if (getLexer().isNot(AsmToken::EndOfStatement))
5801 return Error(Parser.getTok().getLoc(), "unexpected token in directive");
5807 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
5811 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
5817 extern "C" void LLVMInitializeARMAsmLexer();
5819 /// Force static initialization.
5820 extern "C" void LLVMInitializeARMAsmParser() {
5821 RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
5822 RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
5823 LLVMInitializeARMAsmLexer();
5826 #define GET_REGISTER_MATCHER
5827 #define GET_MATCHER_IMPLEMENTATION
5828 #include "ARMGenAsmMatcher.inc"