1 //===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "ARMFPUName.h"
11 #include "ARMFeatures.h"
12 #include "MCTargetDesc/ARMAddressingModes.h"
13 #include "MCTargetDesc/ARMArchName.h"
14 #include "MCTargetDesc/ARMBaseInfo.h"
15 #include "MCTargetDesc/ARMMCExpr.h"
16 #include "llvm/ADT/OwningPtr.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/StringExtras.h"
20 #include "llvm/ADT/StringSwitch.h"
21 #include "llvm/ADT/Twine.h"
22 #include "llvm/MC/MCAsmInfo.h"
23 #include "llvm/MC/MCAssembler.h"
24 #include "llvm/MC/MCContext.h"
25 #include "llvm/MC/MCDisassembler.h"
26 #include "llvm/MC/MCELF.h"
27 #include "llvm/MC/MCELFStreamer.h"
28 #include "llvm/MC/MCELFSymbolFlags.h"
29 #include "llvm/MC/MCExpr.h"
30 #include "llvm/MC/MCInst.h"
31 #include "llvm/MC/MCInstrDesc.h"
32 #include "llvm/MC/MCInstrInfo.h"
33 #include "llvm/MC/MCObjectFileInfo.h"
34 #include "llvm/MC/MCParser/MCAsmLexer.h"
35 #include "llvm/MC/MCParser/MCAsmParser.h"
36 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
37 #include "llvm/MC/MCRegisterInfo.h"
38 #include "llvm/MC/MCSection.h"
39 #include "llvm/MC/MCStreamer.h"
40 #include "llvm/MC/MCSubtargetInfo.h"
41 #include "llvm/MC/MCSymbol.h"
42 #include "llvm/MC/MCTargetAsmParser.h"
43 #include "llvm/Support/ARMBuildAttributes.h"
44 #include "llvm/Support/ARMEHABI.h"
45 #include "llvm/Support/COFF.h"
46 #include "llvm/Support/Debug.h"
47 #include "llvm/Support/ELF.h"
48 #include "llvm/Support/MathExtras.h"
49 #include "llvm/Support/SourceMgr.h"
50 #include "llvm/Support/TargetRegistry.h"
51 #include "llvm/Support/raw_ostream.h"
59 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
64 typedef SmallVector<SMLoc, 4> Locs;
69 Locs PersonalityIndexLocs;
74 UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
76 bool hasFnStart() const { return !FnStartLocs.empty(); }
77 bool cantUnwind() const { return !CantUnwindLocs.empty(); }
78 bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
79 bool hasPersonality() const {
80 return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
83 void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
84 void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
85 void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
86 void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
87 void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
89 void saveFPReg(int Reg) { FPReg = Reg; }
90 int getFPReg() const { return FPReg; }
92 void emitFnStartLocNotes() const {
93 for (Locs::const_iterator FI = FnStartLocs.begin(), FE = FnStartLocs.end();
95 Parser.Note(*FI, ".fnstart was specified here");
97 void emitCantUnwindLocNotes() const {
98 for (Locs::const_iterator UI = CantUnwindLocs.begin(),
99 UE = CantUnwindLocs.end(); UI != UE; ++UI)
100 Parser.Note(*UI, ".cantunwind was specified here");
102 void emitHandlerDataLocNotes() const {
103 for (Locs::const_iterator HI = HandlerDataLocs.begin(),
104 HE = HandlerDataLocs.end(); HI != HE; ++HI)
105 Parser.Note(*HI, ".handlerdata was specified here");
107 void emitPersonalityLocNotes() const {
108 for (Locs::const_iterator PI = PersonalityLocs.begin(),
109 PE = PersonalityLocs.end(),
110 PII = PersonalityIndexLocs.begin(),
111 PIE = PersonalityIndexLocs.end();
112 PI != PE || PII != PIE;) {
113 if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
114 Parser.Note(*PI++, ".personality was specified here");
115 else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
116 Parser.Note(*PII++, ".personalityindex was specified here");
118 llvm_unreachable(".personality and .personalityindex cannot be "
119 "at the same location");
124 FnStartLocs = Locs();
125 CantUnwindLocs = Locs();
126 PersonalityLocs = Locs();
127 HandlerDataLocs = Locs();
128 PersonalityIndexLocs = Locs();
133 class ARMAsmParser : public MCTargetAsmParser {
134 MCSubtargetInfo &STI;
136 const MCInstrInfo &MII;
137 const MCRegisterInfo *MRI;
140 ARMTargetStreamer &getTargetStreamer() {
141 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
142 return static_cast<ARMTargetStreamer &>(TS);
145 // Map of register aliases registers via the .req directive.
146 StringMap<unsigned> RegisterReqs;
148 bool NextSymbolIsThumb;
151 ARMCC::CondCodes Cond; // Condition for IT block.
152 unsigned Mask:4; // Condition mask for instructions.
153 // Starting at first 1 (from lsb).
154 // '1' condition as indicated in IT.
155 // '0' inverse of condition (else).
156 // Count of instructions in IT block is
157 // 4 - trailingzeroes(mask)
159 bool FirstCond; // Explicit flag for when we're parsing the
160 // First instruction in the IT block. It's
161 // implied in the mask, so needs special
164 unsigned CurPosition; // Current position in parsing of IT
165 // block. In range [0,3]. Initialized
166 // according to count of instructions in block.
167 // ~0U if no active IT block.
169 bool inITBlock() { return ITState.CurPosition != ~0U;}
170 void forwardITPosition() {
171 if (!inITBlock()) return;
172 // Move to the next instruction in the IT block, if there is one. If not,
173 // mark the block as done.
174 unsigned TZ = countTrailingZeros(ITState.Mask);
175 if (++ITState.CurPosition == 5 - TZ)
176 ITState.CurPosition = ~0U; // Done with the IT block after this.
180 MCAsmParser &getParser() const { return Parser; }
181 MCAsmLexer &getLexer() const { return Parser.getLexer(); }
183 void Note(SMLoc L, const Twine &Msg, ArrayRef<SMRange> Ranges = None) {
184 return Parser.Note(L, Msg, Ranges);
186 bool Warning(SMLoc L, const Twine &Msg,
187 ArrayRef<SMRange> Ranges = None) {
188 return Parser.Warning(L, Msg, Ranges);
190 bool Error(SMLoc L, const Twine &Msg,
191 ArrayRef<SMRange> Ranges = None) {
192 return Parser.Error(L, Msg, Ranges);
195 int tryParseRegister();
196 bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
197 int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
198 bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
199 bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
200 bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
201 bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
202 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
203 unsigned &ShiftAmount);
204 bool parseLiteralValues(unsigned Size, SMLoc L);
205 bool parseDirectiveThumb(SMLoc L);
206 bool parseDirectiveARM(SMLoc L);
207 bool parseDirectiveThumbFunc(SMLoc L);
208 bool parseDirectiveCode(SMLoc L);
209 bool parseDirectiveSyntax(SMLoc L);
210 bool parseDirectiveReq(StringRef Name, SMLoc L);
211 bool parseDirectiveUnreq(SMLoc L);
212 bool parseDirectiveArch(SMLoc L);
213 bool parseDirectiveEabiAttr(SMLoc L);
214 bool parseDirectiveCPU(SMLoc L);
215 bool parseDirectiveFPU(SMLoc L);
216 bool parseDirectiveFnStart(SMLoc L);
217 bool parseDirectiveFnEnd(SMLoc L);
218 bool parseDirectiveCantUnwind(SMLoc L);
219 bool parseDirectivePersonality(SMLoc L);
220 bool parseDirectiveHandlerData(SMLoc L);
221 bool parseDirectiveSetFP(SMLoc L);
222 bool parseDirectivePad(SMLoc L);
223 bool parseDirectiveRegSave(SMLoc L, bool IsVector);
224 bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
225 bool parseDirectiveLtorg(SMLoc L);
226 bool parseDirectiveEven(SMLoc L);
227 bool parseDirectivePersonalityIndex(SMLoc L);
228 bool parseDirectiveUnwindRaw(SMLoc L);
229 bool parseDirectiveTLSDescSeq(SMLoc L);
230 bool parseDirectiveMovSP(SMLoc L);
231 bool parseDirectiveObjectArch(SMLoc L);
232 bool parseDirectiveArchExtension(SMLoc L);
233 bool parseDirectiveAlign(SMLoc L);
234 bool parseDirectiveThumbSet(SMLoc L);
236 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
237 bool &CarrySetting, unsigned &ProcessorIMod,
239 void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
240 bool &CanAcceptCarrySet,
241 bool &CanAcceptPredicationCode);
243 bool isThumb() const {
244 // FIXME: Can tablegen auto-generate this?
245 return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
247 bool isThumbOne() const {
248 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
250 bool isThumbTwo() const {
251 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
253 bool hasThumb() const {
254 return STI.getFeatureBits() & ARM::HasV4TOps;
256 bool hasV6Ops() const {
257 return STI.getFeatureBits() & ARM::HasV6Ops;
259 bool hasV6MOps() const {
260 return STI.getFeatureBits() & ARM::HasV6MOps;
262 bool hasV7Ops() const {
263 return STI.getFeatureBits() & ARM::HasV7Ops;
265 bool hasV8Ops() const {
266 return STI.getFeatureBits() & ARM::HasV8Ops;
268 bool hasARM() const {
269 return !(STI.getFeatureBits() & ARM::FeatureNoARM);
273 unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
274 setAvailableFeatures(FB);
276 bool isMClass() const {
277 return STI.getFeatureBits() & ARM::FeatureMClass;
280 /// @name Auto-generated Match Functions
283 #define GET_ASSEMBLER_HEADER
284 #include "ARMGenAsmMatcher.inc"
288 OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
289 OperandMatchResultTy parseCoprocNumOperand(
290 SmallVectorImpl<MCParsedAsmOperand*>&);
291 OperandMatchResultTy parseCoprocRegOperand(
292 SmallVectorImpl<MCParsedAsmOperand*>&);
293 OperandMatchResultTy parseCoprocOptionOperand(
294 SmallVectorImpl<MCParsedAsmOperand*>&);
295 OperandMatchResultTy parseMemBarrierOptOperand(
296 SmallVectorImpl<MCParsedAsmOperand*>&);
297 OperandMatchResultTy parseInstSyncBarrierOptOperand(
298 SmallVectorImpl<MCParsedAsmOperand*>&);
299 OperandMatchResultTy parseProcIFlagsOperand(
300 SmallVectorImpl<MCParsedAsmOperand*>&);
301 OperandMatchResultTy parseMSRMaskOperand(
302 SmallVectorImpl<MCParsedAsmOperand*>&);
303 OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
304 StringRef Op, int Low, int High);
305 OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
306 return parsePKHImm(O, "lsl", 0, 31);
308 OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
309 return parsePKHImm(O, "asr", 1, 32);
311 OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
312 OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
313 OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
314 OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
315 OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
316 OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
317 OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
318 OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
319 OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
322 // Asm Match Converter Methods
323 void cvtThumbMultiply(MCInst &Inst,
324 const SmallVectorImpl<MCParsedAsmOperand*> &);
325 void cvtThumbBranches(MCInst &Inst,
326 const SmallVectorImpl<MCParsedAsmOperand*> &);
328 bool validateInstruction(MCInst &Inst,
329 const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
330 bool processInstruction(MCInst &Inst,
331 const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
332 bool shouldOmitCCOutOperand(StringRef Mnemonic,
333 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
334 bool shouldOmitPredicateOperand(StringRef Mnemonic,
335 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
337 enum ARMMatchResultTy {
338 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
339 Match_RequiresNotITBlock,
341 Match_RequiresThumb2,
342 #define GET_OPERAND_DIAGNOSTIC_TYPES
343 #include "ARMGenAsmMatcher.inc"
347 ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
348 const MCInstrInfo &MII)
349 : MCTargetAsmParser(), STI(_STI), Parser(_Parser), MII(MII), UC(_Parser) {
350 MCAsmParserExtension::Initialize(_Parser);
352 // Cache the MCRegisterInfo.
353 MRI = getContext().getRegisterInfo();
355 // Initialize the set of available features.
356 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
358 // Not in an ITBlock to start with.
359 ITState.CurPosition = ~0U;
361 NextSymbolIsThumb = false;
364 // Implementation of the MCTargetAsmParser interface:
365 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
367 ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
369 SmallVectorImpl<MCParsedAsmOperand*> &Operands) override;
370 bool ParseDirective(AsmToken DirectiveID) override;
372 unsigned validateTargetOperandClass(MCParsedAsmOperand *Op,
373 unsigned Kind) override;
374 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
376 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
377 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
378 MCStreamer &Out, unsigned &ErrorInfo,
379 bool MatchingInlineAsm) override;
380 void onLabelParsed(MCSymbol *Symbol) override;
382 } // end anonymous namespace
386 /// ARMOperand - Instances of this class represent a parsed ARM machine
388 class ARMOperand : public MCParsedAsmOperand {
398 k_InstSyncBarrierOpt,
409 k_VectorListAllLanes,
415 k_BitfieldDescriptor,
419 SMLoc StartLoc, EndLoc;
420 SmallVector<unsigned, 8> Registers;
423 ARMCC::CondCodes Val;
430 struct CoprocOptionOp {
443 ARM_ISB::InstSyncBOpt Val;
447 ARM_PROC::IFlags Val;
463 // A vector register list is a sequential list of 1 to 4 registers.
464 struct VectorListOp {
471 struct VectorIndexOp {
479 /// Combined record for all forms of ARM address expressions.
482 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
484 const MCConstantExpr *OffsetImm; // Offset immediate value
485 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL
486 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
487 unsigned ShiftImm; // shift for OffsetReg.
488 unsigned Alignment; // 0 = no alignment specified
489 // n = alignment in bytes (2, 4, 8, 16, or 32)
490 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit)
493 struct PostIdxRegOp {
496 ARM_AM::ShiftOpc ShiftTy;
500 struct ShifterImmOp {
505 struct RegShiftedRegOp {
506 ARM_AM::ShiftOpc ShiftTy;
512 struct RegShiftedImmOp {
513 ARM_AM::ShiftOpc ShiftTy;
530 struct CoprocOptionOp CoprocOption;
531 struct MBOptOp MBOpt;
532 struct ISBOptOp ISBOpt;
533 struct ITMaskOp ITMask;
534 struct IFlagsOp IFlags;
535 struct MMaskOp MMask;
538 struct VectorListOp VectorList;
539 struct VectorIndexOp VectorIndex;
541 struct MemoryOp Memory;
542 struct PostIdxRegOp PostIdxReg;
543 struct ShifterImmOp ShifterImm;
544 struct RegShiftedRegOp RegShiftedReg;
545 struct RegShiftedImmOp RegShiftedImm;
546 struct RotImmOp RotImm;
547 struct BitfieldOp Bitfield;
550 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
552 ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
554 StartLoc = o.StartLoc;
571 case k_DPRRegisterList:
572 case k_SPRRegisterList:
573 Registers = o.Registers;
576 case k_VectorListAllLanes:
577 case k_VectorListIndexed:
578 VectorList = o.VectorList;
585 CoprocOption = o.CoprocOption;
590 case k_MemBarrierOpt:
593 case k_InstSyncBarrierOpt:
598 case k_PostIndexRegister:
599 PostIdxReg = o.PostIdxReg;
607 case k_ShifterImmediate:
608 ShifterImm = o.ShifterImm;
610 case k_ShiftedRegister:
611 RegShiftedReg = o.RegShiftedReg;
613 case k_ShiftedImmediate:
614 RegShiftedImm = o.RegShiftedImm;
616 case k_RotateImmediate:
619 case k_BitfieldDescriptor:
620 Bitfield = o.Bitfield;
623 VectorIndex = o.VectorIndex;
628 /// getStartLoc - Get the location of the first token of this operand.
629 SMLoc getStartLoc() const override { return StartLoc; }
630 /// getEndLoc - Get the location of the last token of this operand.
631 SMLoc getEndLoc() const override { return EndLoc; }
632 /// getLocRange - Get the range between the first and last token of this
634 SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
636 ARMCC::CondCodes getCondCode() const {
637 assert(Kind == k_CondCode && "Invalid access!");
641 unsigned getCoproc() const {
642 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
646 StringRef getToken() const {
647 assert(Kind == k_Token && "Invalid access!");
648 return StringRef(Tok.Data, Tok.Length);
651 unsigned getReg() const override {
652 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
656 const SmallVectorImpl<unsigned> &getRegList() const {
657 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
658 Kind == k_SPRRegisterList) && "Invalid access!");
662 const MCExpr *getImm() const {
663 assert(isImm() && "Invalid access!");
667 unsigned getVectorIndex() const {
668 assert(Kind == k_VectorIndex && "Invalid access!");
669 return VectorIndex.Val;
672 ARM_MB::MemBOpt getMemBarrierOpt() const {
673 assert(Kind == k_MemBarrierOpt && "Invalid access!");
677 ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
678 assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
682 ARM_PROC::IFlags getProcIFlags() const {
683 assert(Kind == k_ProcIFlags && "Invalid access!");
687 unsigned getMSRMask() const {
688 assert(Kind == k_MSRMask && "Invalid access!");
692 bool isCoprocNum() const { return Kind == k_CoprocNum; }
693 bool isCoprocReg() const { return Kind == k_CoprocReg; }
694 bool isCoprocOption() const { return Kind == k_CoprocOption; }
695 bool isCondCode() const { return Kind == k_CondCode; }
696 bool isCCOut() const { return Kind == k_CCOut; }
697 bool isITMask() const { return Kind == k_ITCondMask; }
698 bool isITCondCode() const { return Kind == k_CondCode; }
699 bool isImm() const override { return Kind == k_Immediate; }
700 // checks whether this operand is an unsigned offset which fits is a field
701 // of specified width and scaled by a specific number of bits
702 template<unsigned width, unsigned scale>
703 bool isUnsignedOffset() const {
704 if (!isImm()) return false;
705 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
706 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
707 int64_t Val = CE->getValue();
708 int64_t Align = 1LL << scale;
709 int64_t Max = Align * ((1LL << width) - 1);
710 return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
714 // checks whether this operand is an signed offset which fits is a field
715 // of specified width and scaled by a specific number of bits
716 template<unsigned width, unsigned scale>
717 bool isSignedOffset() const {
718 if (!isImm()) return false;
719 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
720 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
721 int64_t Val = CE->getValue();
722 int64_t Align = 1LL << scale;
723 int64_t Max = Align * ((1LL << (width-1)) - 1);
724 int64_t Min = -Align * (1LL << (width-1));
725 return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
730 // checks whether this operand is a memory operand computed as an offset
731 // applied to PC. the offset may have 8 bits of magnitude and is represented
732 // with two bits of shift. textually it may be either [pc, #imm], #imm or
733 // relocable expression...
734 bool isThumbMemPC() const {
737 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
738 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
739 if (!CE) return false;
740 Val = CE->getValue();
743 if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
744 if(Memory.BaseRegNum != ARM::PC) return false;
745 Val = Memory.OffsetImm->getValue();
748 return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
750 bool isFPImm() const {
751 if (!isImm()) return false;
752 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
753 if (!CE) return false;
754 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
757 bool isFBits16() const {
758 if (!isImm()) return false;
759 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
760 if (!CE) return false;
761 int64_t Value = CE->getValue();
762 return Value >= 0 && Value <= 16;
764 bool isFBits32() const {
765 if (!isImm()) return false;
766 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
767 if (!CE) return false;
768 int64_t Value = CE->getValue();
769 return Value >= 1 && Value <= 32;
771 bool isImm8s4() const {
772 if (!isImm()) return false;
773 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
774 if (!CE) return false;
775 int64_t Value = CE->getValue();
776 return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
778 bool isImm0_1020s4() const {
779 if (!isImm()) return false;
780 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
781 if (!CE) return false;
782 int64_t Value = CE->getValue();
783 return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
785 bool isImm0_508s4() const {
786 if (!isImm()) return false;
787 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
788 if (!CE) return false;
789 int64_t Value = CE->getValue();
790 return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
792 bool isImm0_508s4Neg() const {
793 if (!isImm()) return false;
794 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
795 if (!CE) return false;
796 int64_t Value = -CE->getValue();
797 // explicitly exclude zero. we want that to use the normal 0_508 version.
798 return ((Value & 3) == 0) && Value > 0 && Value <= 508;
800 bool isImm0_239() const {
801 if (!isImm()) return false;
802 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
803 if (!CE) return false;
804 int64_t Value = CE->getValue();
805 return Value >= 0 && Value < 240;
807 bool isImm0_255() const {
808 if (!isImm()) return false;
809 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
810 if (!CE) return false;
811 int64_t Value = CE->getValue();
812 return Value >= 0 && Value < 256;
814 bool isImm0_4095() const {
815 if (!isImm()) return false;
816 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
817 if (!CE) return false;
818 int64_t Value = CE->getValue();
819 return Value >= 0 && Value < 4096;
821 bool isImm0_4095Neg() const {
822 if (!isImm()) return false;
823 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
824 if (!CE) return false;
825 int64_t Value = -CE->getValue();
826 return Value > 0 && Value < 4096;
828 bool isImm0_1() const {
829 if (!isImm()) return false;
830 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
831 if (!CE) return false;
832 int64_t Value = CE->getValue();
833 return Value >= 0 && Value < 2;
835 bool isImm0_3() const {
836 if (!isImm()) return false;
837 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
838 if (!CE) return false;
839 int64_t Value = CE->getValue();
840 return Value >= 0 && Value < 4;
842 bool isImm0_7() const {
843 if (!isImm()) return false;
844 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
845 if (!CE) return false;
846 int64_t Value = CE->getValue();
847 return Value >= 0 && Value < 8;
849 bool isImm0_15() const {
850 if (!isImm()) return false;
851 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
852 if (!CE) return false;
853 int64_t Value = CE->getValue();
854 return Value >= 0 && Value < 16;
856 bool isImm0_31() const {
857 if (!isImm()) return false;
858 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
859 if (!CE) return false;
860 int64_t Value = CE->getValue();
861 return Value >= 0 && Value < 32;
863 bool isImm0_63() const {
864 if (!isImm()) return false;
865 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
866 if (!CE) return false;
867 int64_t Value = CE->getValue();
868 return Value >= 0 && Value < 64;
870 bool isImm8() const {
871 if (!isImm()) return false;
872 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
873 if (!CE) return false;
874 int64_t Value = CE->getValue();
877 bool isImm16() const {
878 if (!isImm()) return false;
879 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
880 if (!CE) return false;
881 int64_t Value = CE->getValue();
884 bool isImm32() const {
885 if (!isImm()) return false;
886 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
887 if (!CE) return false;
888 int64_t Value = CE->getValue();
891 bool isShrImm8() const {
892 if (!isImm()) return false;
893 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
894 if (!CE) return false;
895 int64_t Value = CE->getValue();
896 return Value > 0 && Value <= 8;
898 bool isShrImm16() const {
899 if (!isImm()) return false;
900 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
901 if (!CE) return false;
902 int64_t Value = CE->getValue();
903 return Value > 0 && Value <= 16;
905 bool isShrImm32() const {
906 if (!isImm()) return false;
907 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
908 if (!CE) return false;
909 int64_t Value = CE->getValue();
910 return Value > 0 && Value <= 32;
912 bool isShrImm64() const {
913 if (!isImm()) return false;
914 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
915 if (!CE) return false;
916 int64_t Value = CE->getValue();
917 return Value > 0 && Value <= 64;
919 bool isImm1_7() const {
920 if (!isImm()) return false;
921 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
922 if (!CE) return false;
923 int64_t Value = CE->getValue();
924 return Value > 0 && Value < 8;
926 bool isImm1_15() const {
927 if (!isImm()) return false;
928 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
929 if (!CE) return false;
930 int64_t Value = CE->getValue();
931 return Value > 0 && Value < 16;
933 bool isImm1_31() const {
934 if (!isImm()) return false;
935 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
936 if (!CE) return false;
937 int64_t Value = CE->getValue();
938 return Value > 0 && Value < 32;
940 bool isImm1_16() const {
941 if (!isImm()) return false;
942 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
943 if (!CE) return false;
944 int64_t Value = CE->getValue();
945 return Value > 0 && Value < 17;
947 bool isImm1_32() const {
948 if (!isImm()) return false;
949 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
950 if (!CE) return false;
951 int64_t Value = CE->getValue();
952 return Value > 0 && Value < 33;
954 bool isImm0_32() const {
955 if (!isImm()) return false;
956 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
957 if (!CE) return false;
958 int64_t Value = CE->getValue();
959 return Value >= 0 && Value < 33;
961 bool isImm0_65535() const {
962 if (!isImm()) return false;
963 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
964 if (!CE) return false;
965 int64_t Value = CE->getValue();
966 return Value >= 0 && Value < 65536;
968 bool isImm256_65535Expr() const {
969 if (!isImm()) return false;
970 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
971 // If it's not a constant expression, it'll generate a fixup and be
973 if (!CE) return true;
974 int64_t Value = CE->getValue();
975 return Value >= 256 && Value < 65536;
977 bool isImm0_65535Expr() const {
978 if (!isImm()) return false;
979 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
980 // If it's not a constant expression, it'll generate a fixup and be
982 if (!CE) return true;
983 int64_t Value = CE->getValue();
984 return Value >= 0 && Value < 65536;
986 bool isImm24bit() const {
987 if (!isImm()) return false;
988 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
989 if (!CE) return false;
990 int64_t Value = CE->getValue();
991 return Value >= 0 && Value <= 0xffffff;
993 bool isImmThumbSR() const {
994 if (!isImm()) return false;
995 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
996 if (!CE) return false;
997 int64_t Value = CE->getValue();
998 return Value > 0 && Value < 33;
1000 bool isPKHLSLImm() const {
1001 if (!isImm()) return false;
1002 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1003 if (!CE) return false;
1004 int64_t Value = CE->getValue();
1005 return Value >= 0 && Value < 32;
1007 bool isPKHASRImm() const {
1008 if (!isImm()) return false;
1009 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1010 if (!CE) return false;
1011 int64_t Value = CE->getValue();
1012 return Value > 0 && Value <= 32;
1014 bool isAdrLabel() const {
1015 // If we have an immediate that's not a constant, treat it as a label
1016 // reference needing a fixup. If it is a constant, but it can't fit
1017 // into shift immediate encoding, we reject it.
1018 if (isImm() && !isa<MCConstantExpr>(getImm())) return true;
1019 else return (isARMSOImm() || isARMSOImmNeg());
1021 bool isARMSOImm() const {
1022 if (!isImm()) return false;
1023 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1024 if (!CE) return false;
1025 int64_t Value = CE->getValue();
1026 return ARM_AM::getSOImmVal(Value) != -1;
1028 bool isARMSOImmNot() const {
1029 if (!isImm()) return false;
1030 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1031 if (!CE) return false;
1032 int64_t Value = CE->getValue();
1033 return ARM_AM::getSOImmVal(~Value) != -1;
1035 bool isARMSOImmNeg() const {
1036 if (!isImm()) return false;
1037 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1038 if (!CE) return false;
1039 int64_t Value = CE->getValue();
1040 // Only use this when not representable as a plain so_imm.
1041 return ARM_AM::getSOImmVal(Value) == -1 &&
1042 ARM_AM::getSOImmVal(-Value) != -1;
1044 bool isT2SOImm() const {
1045 if (!isImm()) return false;
1046 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1047 if (!CE) return false;
1048 int64_t Value = CE->getValue();
1049 return ARM_AM::getT2SOImmVal(Value) != -1;
1051 bool isT2SOImmNot() const {
1052 if (!isImm()) return false;
1053 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1054 if (!CE) return false;
1055 int64_t Value = CE->getValue();
1056 return ARM_AM::getT2SOImmVal(Value) == -1 &&
1057 ARM_AM::getT2SOImmVal(~Value) != -1;
1059 bool isT2SOImmNeg() const {
1060 if (!isImm()) return false;
1061 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1062 if (!CE) return false;
1063 int64_t Value = CE->getValue();
1064 // Only use this when not representable as a plain so_imm.
1065 return ARM_AM::getT2SOImmVal(Value) == -1 &&
1066 ARM_AM::getT2SOImmVal(-Value) != -1;
1068 bool isSetEndImm() const {
1069 if (!isImm()) return false;
1070 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1071 if (!CE) return false;
1072 int64_t Value = CE->getValue();
1073 return Value == 1 || Value == 0;
1075 bool isReg() const override { return Kind == k_Register; }
1076 bool isRegList() const { return Kind == k_RegisterList; }
1077 bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
1078 bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
1079 bool isToken() const override { return Kind == k_Token; }
1080 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
1081 bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
1082 bool isMem() const override { return Kind == k_Memory; }
1083 bool isShifterImm() const { return Kind == k_ShifterImmediate; }
1084 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
1085 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
1086 bool isRotImm() const { return Kind == k_RotateImmediate; }
1087 bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
1088 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
1089 bool isPostIdxReg() const {
1090 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
1092 bool isMemNoOffset(bool alignOK = false) const {
1095 // No offset of any kind.
1096 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
1097 (alignOK || Memory.Alignment == 0);
1099 bool isMemPCRelImm12() const {
1100 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1102 // Base register must be PC.
1103 if (Memory.BaseRegNum != ARM::PC)
1105 // Immediate offset in range [-4095, 4095].
1106 if (!Memory.OffsetImm) return true;
1107 int64_t Val = Memory.OffsetImm->getValue();
1108 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1110 bool isAlignedMemory() const {
1111 return isMemNoOffset(true);
1113 bool isAddrMode2() const {
1114 if (!isMem() || Memory.Alignment != 0) return false;
1115 // Check for register offset.
1116 if (Memory.OffsetRegNum) return true;
1117 // Immediate offset in range [-4095, 4095].
1118 if (!Memory.OffsetImm) return true;
1119 int64_t Val = Memory.OffsetImm->getValue();
1120 return Val > -4096 && Val < 4096;
1122 bool isAM2OffsetImm() const {
1123 if (!isImm()) return false;
1124 // Immediate offset in range [-4095, 4095].
1125 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1126 if (!CE) return false;
1127 int64_t Val = CE->getValue();
1128 return (Val == INT32_MIN) || (Val > -4096 && Val < 4096);
1130 bool isAddrMode3() const {
1131 // If we have an immediate that's not a constant, treat it as a label
1132 // reference needing a fixup. If it is a constant, it's something else
1133 // and we reject it.
1134 if (isImm() && !isa<MCConstantExpr>(getImm()))
1136 if (!isMem() || Memory.Alignment != 0) return false;
1137 // No shifts are legal for AM3.
1138 if (Memory.ShiftType != ARM_AM::no_shift) return false;
1139 // Check for register offset.
1140 if (Memory.OffsetRegNum) return true;
1141 // Immediate offset in range [-255, 255].
1142 if (!Memory.OffsetImm) return true;
1143 int64_t Val = Memory.OffsetImm->getValue();
1144 // The #-0 offset is encoded as INT32_MIN, and we have to check
1146 return (Val > -256 && Val < 256) || Val == INT32_MIN;
1148 bool isAM3Offset() const {
1149 if (Kind != k_Immediate && Kind != k_PostIndexRegister)
1151 if (Kind == k_PostIndexRegister)
1152 return PostIdxReg.ShiftTy == ARM_AM::no_shift;
1153 // Immediate offset in range [-255, 255].
1154 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1155 if (!CE) return false;
1156 int64_t Val = CE->getValue();
1157 // Special case, #-0 is INT32_MIN.
1158 return (Val > -256 && Val < 256) || Val == INT32_MIN;
1160 bool isAddrMode5() const {
1161 // If we have an immediate that's not a constant, treat it as a label
1162 // reference needing a fixup. If it is a constant, it's something else
1163 // and we reject it.
1164 if (isImm() && !isa<MCConstantExpr>(getImm()))
1166 if (!isMem() || Memory.Alignment != 0) return false;
1167 // Check for register offset.
1168 if (Memory.OffsetRegNum) return false;
1169 // Immediate offset in range [-1020, 1020] and a multiple of 4.
1170 if (!Memory.OffsetImm) return true;
1171 int64_t Val = Memory.OffsetImm->getValue();
1172 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1175 bool isMemTBB() const {
1176 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1177 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1181 bool isMemTBH() const {
1182 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1183 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1184 Memory.Alignment != 0 )
1188 bool isMemRegOffset() const {
1189 if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1193 bool isT2MemRegOffset() const {
1194 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1195 Memory.Alignment != 0)
1197 // Only lsl #{0, 1, 2, 3} allowed.
1198 if (Memory.ShiftType == ARM_AM::no_shift)
1200 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1204 bool isMemThumbRR() const {
1205 // Thumb reg+reg addressing is simple. Just two registers, a base and
1206 // an offset. No shifts, negations or any other complicating factors.
1207 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1208 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1210 return isARMLowRegister(Memory.BaseRegNum) &&
1211 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1213 bool isMemThumbRIs4() const {
1214 if (!isMem() || Memory.OffsetRegNum != 0 ||
1215 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1217 // Immediate offset, multiple of 4 in range [0, 124].
1218 if (!Memory.OffsetImm) return true;
1219 int64_t Val = Memory.OffsetImm->getValue();
1220 return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1222 bool isMemThumbRIs2() const {
1223 if (!isMem() || Memory.OffsetRegNum != 0 ||
1224 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1226 // Immediate offset, multiple of 4 in range [0, 62].
1227 if (!Memory.OffsetImm) return true;
1228 int64_t Val = Memory.OffsetImm->getValue();
1229 return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1231 bool isMemThumbRIs1() const {
1232 if (!isMem() || Memory.OffsetRegNum != 0 ||
1233 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1235 // Immediate offset in range [0, 31].
1236 if (!Memory.OffsetImm) return true;
1237 int64_t Val = Memory.OffsetImm->getValue();
1238 return Val >= 0 && Val <= 31;
1240 bool isMemThumbSPI() const {
1241 if (!isMem() || Memory.OffsetRegNum != 0 ||
1242 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1244 // Immediate offset, multiple of 4 in range [0, 1020].
1245 if (!Memory.OffsetImm) return true;
1246 int64_t Val = Memory.OffsetImm->getValue();
1247 return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1249 bool isMemImm8s4Offset() const {
1250 // If we have an immediate that's not a constant, treat it as a label
1251 // reference needing a fixup. If it is a constant, it's something else
1252 // and we reject it.
1253 if (isImm() && !isa<MCConstantExpr>(getImm()))
1255 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1257 // Immediate offset a multiple of 4 in range [-1020, 1020].
1258 if (!Memory.OffsetImm) return true;
1259 int64_t Val = Memory.OffsetImm->getValue();
1260 // Special case, #-0 is INT32_MIN.
1261 return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) || Val == INT32_MIN;
1263 bool isMemImm0_1020s4Offset() const {
1264 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1266 // Immediate offset a multiple of 4 in range [0, 1020].
1267 if (!Memory.OffsetImm) return true;
1268 int64_t Val = Memory.OffsetImm->getValue();
1269 return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1271 bool isMemImm8Offset() const {
1272 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1274 // Base reg of PC isn't allowed for these encodings.
1275 if (Memory.BaseRegNum == ARM::PC) return false;
1276 // Immediate offset in range [-255, 255].
1277 if (!Memory.OffsetImm) return true;
1278 int64_t Val = Memory.OffsetImm->getValue();
1279 return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1281 bool isMemPosImm8Offset() const {
1282 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1284 // Immediate offset in range [0, 255].
1285 if (!Memory.OffsetImm) return true;
1286 int64_t Val = Memory.OffsetImm->getValue();
1287 return Val >= 0 && Val < 256;
1289 bool isMemNegImm8Offset() const {
1290 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1292 // Base reg of PC isn't allowed for these encodings.
1293 if (Memory.BaseRegNum == ARM::PC) return false;
1294 // Immediate offset in range [-255, -1].
1295 if (!Memory.OffsetImm) return false;
1296 int64_t Val = Memory.OffsetImm->getValue();
1297 return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1299 bool isMemUImm12Offset() const {
1300 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1302 // Immediate offset in range [0, 4095].
1303 if (!Memory.OffsetImm) return true;
1304 int64_t Val = Memory.OffsetImm->getValue();
1305 return (Val >= 0 && Val < 4096);
1307 bool isMemImm12Offset() const {
1308 // If we have an immediate that's not a constant, treat it as a label
1309 // reference needing a fixup. If it is a constant, it's something else
1310 // and we reject it.
1311 if (isImm() && !isa<MCConstantExpr>(getImm()))
1314 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1316 // Immediate offset in range [-4095, 4095].
1317 if (!Memory.OffsetImm) return true;
1318 int64_t Val = Memory.OffsetImm->getValue();
1319 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1321 bool isPostIdxImm8() const {
1322 if (!isImm()) return false;
1323 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1324 if (!CE) return false;
1325 int64_t Val = CE->getValue();
1326 return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1328 bool isPostIdxImm8s4() const {
1329 if (!isImm()) return false;
1330 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1331 if (!CE) return false;
1332 int64_t Val = CE->getValue();
1333 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1337 bool isMSRMask() const { return Kind == k_MSRMask; }
1338 bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1341 bool isSingleSpacedVectorList() const {
1342 return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1344 bool isDoubleSpacedVectorList() const {
1345 return Kind == k_VectorList && VectorList.isDoubleSpaced;
1347 bool isVecListOneD() const {
1348 if (!isSingleSpacedVectorList()) return false;
1349 return VectorList.Count == 1;
1352 bool isVecListDPair() const {
1353 if (!isSingleSpacedVectorList()) return false;
1354 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1355 .contains(VectorList.RegNum));
1358 bool isVecListThreeD() const {
1359 if (!isSingleSpacedVectorList()) return false;
1360 return VectorList.Count == 3;
1363 bool isVecListFourD() const {
1364 if (!isSingleSpacedVectorList()) return false;
1365 return VectorList.Count == 4;
1368 bool isVecListDPairSpaced() const {
1369 if (isSingleSpacedVectorList()) return false;
1370 return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1371 .contains(VectorList.RegNum));
1374 bool isVecListThreeQ() const {
1375 if (!isDoubleSpacedVectorList()) return false;
1376 return VectorList.Count == 3;
1379 bool isVecListFourQ() const {
1380 if (!isDoubleSpacedVectorList()) return false;
1381 return VectorList.Count == 4;
1384 bool isSingleSpacedVectorAllLanes() const {
1385 return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1387 bool isDoubleSpacedVectorAllLanes() const {
1388 return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1390 bool isVecListOneDAllLanes() const {
1391 if (!isSingleSpacedVectorAllLanes()) return false;
1392 return VectorList.Count == 1;
1395 bool isVecListDPairAllLanes() const {
1396 if (!isSingleSpacedVectorAllLanes()) return false;
1397 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1398 .contains(VectorList.RegNum));
1401 bool isVecListDPairSpacedAllLanes() const {
1402 if (!isDoubleSpacedVectorAllLanes()) return false;
1403 return VectorList.Count == 2;
1406 bool isVecListThreeDAllLanes() const {
1407 if (!isSingleSpacedVectorAllLanes()) return false;
1408 return VectorList.Count == 3;
1411 bool isVecListThreeQAllLanes() const {
1412 if (!isDoubleSpacedVectorAllLanes()) return false;
1413 return VectorList.Count == 3;
1416 bool isVecListFourDAllLanes() const {
1417 if (!isSingleSpacedVectorAllLanes()) return false;
1418 return VectorList.Count == 4;
1421 bool isVecListFourQAllLanes() const {
1422 if (!isDoubleSpacedVectorAllLanes()) return false;
1423 return VectorList.Count == 4;
1426 bool isSingleSpacedVectorIndexed() const {
1427 return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1429 bool isDoubleSpacedVectorIndexed() const {
1430 return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1432 bool isVecListOneDByteIndexed() const {
1433 if (!isSingleSpacedVectorIndexed()) return false;
1434 return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1437 bool isVecListOneDHWordIndexed() const {
1438 if (!isSingleSpacedVectorIndexed()) return false;
1439 return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1442 bool isVecListOneDWordIndexed() const {
1443 if (!isSingleSpacedVectorIndexed()) return false;
1444 return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1447 bool isVecListTwoDByteIndexed() const {
1448 if (!isSingleSpacedVectorIndexed()) return false;
1449 return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1452 bool isVecListTwoDHWordIndexed() const {
1453 if (!isSingleSpacedVectorIndexed()) return false;
1454 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1457 bool isVecListTwoQWordIndexed() const {
1458 if (!isDoubleSpacedVectorIndexed()) return false;
1459 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1462 bool isVecListTwoQHWordIndexed() const {
1463 if (!isDoubleSpacedVectorIndexed()) return false;
1464 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1467 bool isVecListTwoDWordIndexed() const {
1468 if (!isSingleSpacedVectorIndexed()) return false;
1469 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1472 bool isVecListThreeDByteIndexed() const {
1473 if (!isSingleSpacedVectorIndexed()) return false;
1474 return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1477 bool isVecListThreeDHWordIndexed() const {
1478 if (!isSingleSpacedVectorIndexed()) return false;
1479 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1482 bool isVecListThreeQWordIndexed() const {
1483 if (!isDoubleSpacedVectorIndexed()) return false;
1484 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1487 bool isVecListThreeQHWordIndexed() const {
1488 if (!isDoubleSpacedVectorIndexed()) return false;
1489 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1492 bool isVecListThreeDWordIndexed() const {
1493 if (!isSingleSpacedVectorIndexed()) return false;
1494 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1497 bool isVecListFourDByteIndexed() const {
1498 if (!isSingleSpacedVectorIndexed()) return false;
1499 return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1502 bool isVecListFourDHWordIndexed() const {
1503 if (!isSingleSpacedVectorIndexed()) return false;
1504 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1507 bool isVecListFourQWordIndexed() const {
1508 if (!isDoubleSpacedVectorIndexed()) return false;
1509 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1512 bool isVecListFourQHWordIndexed() const {
1513 if (!isDoubleSpacedVectorIndexed()) return false;
1514 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1517 bool isVecListFourDWordIndexed() const {
1518 if (!isSingleSpacedVectorIndexed()) return false;
1519 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1522 bool isVectorIndex8() const {
1523 if (Kind != k_VectorIndex) return false;
1524 return VectorIndex.Val < 8;
1526 bool isVectorIndex16() const {
1527 if (Kind != k_VectorIndex) return false;
1528 return VectorIndex.Val < 4;
1530 bool isVectorIndex32() const {
1531 if (Kind != k_VectorIndex) return false;
1532 return VectorIndex.Val < 2;
1535 bool isNEONi8splat() const {
1536 if (!isImm()) return false;
1537 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1538 // Must be a constant.
1539 if (!CE) return false;
1540 int64_t Value = CE->getValue();
1541 // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1543 return Value >= 0 && Value < 256;
1546 bool isNEONi16splat() const {
1547 if (!isImm()) return false;
1548 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1549 // Must be a constant.
1550 if (!CE) return false;
1551 int64_t Value = CE->getValue();
1552 // i16 value in the range [0,255] or [0x0100, 0xff00]
1553 return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1556 bool isNEONi32splat() const {
1557 if (!isImm()) return false;
1558 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1559 // Must be a constant.
1560 if (!CE) return false;
1561 int64_t Value = CE->getValue();
1562 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1563 return (Value >= 0 && Value < 256) ||
1564 (Value >= 0x0100 && Value <= 0xff00) ||
1565 (Value >= 0x010000 && Value <= 0xff0000) ||
1566 (Value >= 0x01000000 && Value <= 0xff000000);
1569 bool isNEONi32vmov() const {
1570 if (!isImm()) return false;
1571 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1572 // Must be a constant.
1573 if (!CE) return false;
1574 int64_t Value = CE->getValue();
1575 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1576 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1577 return (Value >= 0 && Value < 256) ||
1578 (Value >= 0x0100 && Value <= 0xff00) ||
1579 (Value >= 0x010000 && Value <= 0xff0000) ||
1580 (Value >= 0x01000000 && Value <= 0xff000000) ||
1581 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1582 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1584 bool isNEONi32vmovNeg() const {
1585 if (!isImm()) return false;
1586 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1587 // Must be a constant.
1588 if (!CE) return false;
1589 int64_t Value = ~CE->getValue();
1590 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1591 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1592 return (Value >= 0 && Value < 256) ||
1593 (Value >= 0x0100 && Value <= 0xff00) ||
1594 (Value >= 0x010000 && Value <= 0xff0000) ||
1595 (Value >= 0x01000000 && Value <= 0xff000000) ||
1596 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1597 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1600 bool isNEONi64splat() const {
1601 if (!isImm()) return false;
1602 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1603 // Must be a constant.
1604 if (!CE) return false;
1605 uint64_t Value = CE->getValue();
1606 // i64 value with each byte being either 0 or 0xff.
1607 for (unsigned i = 0; i < 8; ++i)
1608 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1612 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1613 // Add as immediates when possible. Null MCExpr = 0.
1615 Inst.addOperand(MCOperand::CreateImm(0));
1616 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1617 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1619 Inst.addOperand(MCOperand::CreateExpr(Expr));
1622 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1623 assert(N == 2 && "Invalid number of operands!");
1624 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1625 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1626 Inst.addOperand(MCOperand::CreateReg(RegNum));
1629 void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1630 assert(N == 1 && "Invalid number of operands!");
1631 Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1634 void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1635 assert(N == 1 && "Invalid number of operands!");
1636 Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1639 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1640 assert(N == 1 && "Invalid number of operands!");
1641 Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1644 void addITMaskOperands(MCInst &Inst, unsigned N) const {
1645 assert(N == 1 && "Invalid number of operands!");
1646 Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1649 void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1650 assert(N == 1 && "Invalid number of operands!");
1651 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1654 void addCCOutOperands(MCInst &Inst, unsigned N) const {
1655 assert(N == 1 && "Invalid number of operands!");
1656 Inst.addOperand(MCOperand::CreateReg(getReg()));
1659 void addRegOperands(MCInst &Inst, unsigned N) const {
1660 assert(N == 1 && "Invalid number of operands!");
1661 Inst.addOperand(MCOperand::CreateReg(getReg()));
1664 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1665 assert(N == 3 && "Invalid number of operands!");
1666 assert(isRegShiftedReg() &&
1667 "addRegShiftedRegOperands() on non-RegShiftedReg!");
1668 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1669 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1670 Inst.addOperand(MCOperand::CreateImm(
1671 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1674 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1675 assert(N == 2 && "Invalid number of operands!");
1676 assert(isRegShiftedImm() &&
1677 "addRegShiftedImmOperands() on non-RegShiftedImm!");
1678 Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1679 // Shift of #32 is encoded as 0 where permitted
1680 unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
1681 Inst.addOperand(MCOperand::CreateImm(
1682 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
1685 void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1686 assert(N == 1 && "Invalid number of operands!");
1687 Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1691 void addRegListOperands(MCInst &Inst, unsigned N) const {
1692 assert(N == 1 && "Invalid number of operands!");
1693 const SmallVectorImpl<unsigned> &RegList = getRegList();
1694 for (SmallVectorImpl<unsigned>::const_iterator
1695 I = RegList.begin(), E = RegList.end(); I != E; ++I)
1696 Inst.addOperand(MCOperand::CreateReg(*I));
1699 void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1700 addRegListOperands(Inst, N);
1703 void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1704 addRegListOperands(Inst, N);
1707 void addRotImmOperands(MCInst &Inst, unsigned N) const {
1708 assert(N == 1 && "Invalid number of operands!");
1709 // Encoded as val>>3. The printer handles display as 8, 16, 24.
1710 Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1713 void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1714 assert(N == 1 && "Invalid number of operands!");
1715 // Munge the lsb/width into a bitfield mask.
1716 unsigned lsb = Bitfield.LSB;
1717 unsigned width = Bitfield.Width;
1718 // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1719 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1720 (32 - (lsb + width)));
1721 Inst.addOperand(MCOperand::CreateImm(Mask));
1724 void addImmOperands(MCInst &Inst, unsigned N) const {
1725 assert(N == 1 && "Invalid number of operands!");
1726 addExpr(Inst, getImm());
1729 void addFBits16Operands(MCInst &Inst, unsigned N) const {
1730 assert(N == 1 && "Invalid number of operands!");
1731 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1732 Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
1735 void addFBits32Operands(MCInst &Inst, unsigned N) const {
1736 assert(N == 1 && "Invalid number of operands!");
1737 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1738 Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
1741 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1742 assert(N == 1 && "Invalid number of operands!");
1743 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1744 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1745 Inst.addOperand(MCOperand::CreateImm(Val));
1748 void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1749 assert(N == 1 && "Invalid number of operands!");
1750 // FIXME: We really want to scale the value here, but the LDRD/STRD
1751 // instruction don't encode operands that way yet.
1752 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1753 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1756 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1757 assert(N == 1 && "Invalid number of operands!");
1758 // The immediate is scaled by four in the encoding and is stored
1759 // in the MCInst as such. Lop off the low two bits here.
1760 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1761 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1764 void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
1765 assert(N == 1 && "Invalid number of operands!");
1766 // The immediate is scaled by four in the encoding and is stored
1767 // in the MCInst as such. Lop off the low two bits here.
1768 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1769 Inst.addOperand(MCOperand::CreateImm(-(CE->getValue() / 4)));
1772 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1773 assert(N == 1 && "Invalid number of operands!");
1774 // The immediate is scaled by four in the encoding and is stored
1775 // in the MCInst as such. Lop off the low two bits here.
1776 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1777 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1780 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1781 assert(N == 1 && "Invalid number of operands!");
1782 // The constant encodes as the immediate-1, and we store in the instruction
1783 // the bits as encoded, so subtract off one here.
1784 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1785 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1788 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1789 assert(N == 1 && "Invalid number of operands!");
1790 // The constant encodes as the immediate-1, and we store in the instruction
1791 // the bits as encoded, so subtract off one here.
1792 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1793 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1796 void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1797 assert(N == 1 && "Invalid number of operands!");
1798 // The constant encodes as the immediate, except for 32, which encodes as
1800 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1801 unsigned Imm = CE->getValue();
1802 Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1805 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1806 assert(N == 1 && "Invalid number of operands!");
1807 // An ASR value of 32 encodes as 0, so that's how we want to add it to
1808 // the instruction as well.
1809 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1810 int Val = CE->getValue();
1811 Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1814 void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1815 assert(N == 1 && "Invalid number of operands!");
1816 // The operand is actually a t2_so_imm, but we have its bitwise
1817 // negation in the assembly source, so twiddle it here.
1818 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1819 Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1822 void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1823 assert(N == 1 && "Invalid number of operands!");
1824 // The operand is actually a t2_so_imm, but we have its
1825 // negation in the assembly source, so twiddle it here.
1826 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1827 Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1830 void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
1831 assert(N == 1 && "Invalid number of operands!");
1832 // The operand is actually an imm0_4095, but we have its
1833 // negation in the assembly source, so twiddle it here.
1834 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1835 Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1838 void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
1839 if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
1840 Inst.addOperand(MCOperand::CreateImm(CE->getValue() >> 2));
1844 const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
1845 assert(SR && "Unknown value type!");
1846 Inst.addOperand(MCOperand::CreateExpr(SR));
1849 void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
1850 assert(N == 1 && "Invalid number of operands!");
1852 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1854 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1858 const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
1859 assert(SR && "Unknown value type!");
1860 Inst.addOperand(MCOperand::CreateExpr(SR));
1864 assert(isMem() && "Unknown value type!");
1865 assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
1866 Inst.addOperand(MCOperand::CreateImm(Memory.OffsetImm->getValue()));
1869 void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1870 assert(N == 1 && "Invalid number of operands!");
1871 // The operand is actually a so_imm, but we have its bitwise
1872 // negation in the assembly source, so twiddle it here.
1873 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1874 Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1877 void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1878 assert(N == 1 && "Invalid number of operands!");
1879 // The operand is actually a so_imm, but we have its
1880 // negation in the assembly source, so twiddle it here.
1881 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1882 Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1885 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1886 assert(N == 1 && "Invalid number of operands!");
1887 Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1890 void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
1891 assert(N == 1 && "Invalid number of operands!");
1892 Inst.addOperand(MCOperand::CreateImm(unsigned(getInstSyncBarrierOpt())));
1895 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1896 assert(N == 1 && "Invalid number of operands!");
1897 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1900 void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
1901 assert(N == 1 && "Invalid number of operands!");
1902 int32_t Imm = Memory.OffsetImm->getValue();
1903 Inst.addOperand(MCOperand::CreateImm(Imm));
1906 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1907 assert(N == 1 && "Invalid number of operands!");
1908 assert(isImm() && "Not an immediate!");
1910 // If we have an immediate that's not a constant, treat it as a label
1911 // reference needing a fixup.
1912 if (!isa<MCConstantExpr>(getImm())) {
1913 Inst.addOperand(MCOperand::CreateExpr(getImm()));
1917 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1918 int Val = CE->getValue();
1919 Inst.addOperand(MCOperand::CreateImm(Val));
1922 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1923 assert(N == 2 && "Invalid number of operands!");
1924 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1925 Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1928 void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1929 assert(N == 3 && "Invalid number of operands!");
1930 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1931 if (!Memory.OffsetRegNum) {
1932 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1933 // Special case for #-0
1934 if (Val == INT32_MIN) Val = 0;
1935 if (Val < 0) Val = -Val;
1936 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1938 // For register offset, we encode the shift type and negation flag
1940 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1941 Memory.ShiftImm, Memory.ShiftType);
1943 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1944 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1945 Inst.addOperand(MCOperand::CreateImm(Val));
1948 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1949 assert(N == 2 && "Invalid number of operands!");
1950 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1951 assert(CE && "non-constant AM2OffsetImm operand!");
1952 int32_t Val = CE->getValue();
1953 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1954 // Special case for #-0
1955 if (Val == INT32_MIN) Val = 0;
1956 if (Val < 0) Val = -Val;
1957 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1958 Inst.addOperand(MCOperand::CreateReg(0));
1959 Inst.addOperand(MCOperand::CreateImm(Val));
1962 void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1963 assert(N == 3 && "Invalid number of operands!");
1964 // If we have an immediate that's not a constant, treat it as a label
1965 // reference needing a fixup. If it is a constant, it's something else
1966 // and we reject it.
1968 Inst.addOperand(MCOperand::CreateExpr(getImm()));
1969 Inst.addOperand(MCOperand::CreateReg(0));
1970 Inst.addOperand(MCOperand::CreateImm(0));
1974 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1975 if (!Memory.OffsetRegNum) {
1976 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1977 // Special case for #-0
1978 if (Val == INT32_MIN) Val = 0;
1979 if (Val < 0) Val = -Val;
1980 Val = ARM_AM::getAM3Opc(AddSub, Val);
1982 // For register offset, we encode the shift type and negation flag
1984 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1986 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1987 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1988 Inst.addOperand(MCOperand::CreateImm(Val));
1991 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1992 assert(N == 2 && "Invalid number of operands!");
1993 if (Kind == k_PostIndexRegister) {
1995 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1996 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1997 Inst.addOperand(MCOperand::CreateImm(Val));
2002 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
2003 int32_t Val = CE->getValue();
2004 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2005 // Special case for #-0
2006 if (Val == INT32_MIN) Val = 0;
2007 if (Val < 0) Val = -Val;
2008 Val = ARM_AM::getAM3Opc(AddSub, Val);
2009 Inst.addOperand(MCOperand::CreateReg(0));
2010 Inst.addOperand(MCOperand::CreateImm(Val));
2013 void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
2014 assert(N == 2 && "Invalid number of operands!");
2015 // If we have an immediate that's not a constant, treat it as a label
2016 // reference needing a fixup. If it is a constant, it's something else
2017 // and we reject it.
2019 Inst.addOperand(MCOperand::CreateExpr(getImm()));
2020 Inst.addOperand(MCOperand::CreateImm(0));
2024 // The lower two bits are always zero and as such are not encoded.
2025 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2026 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2027 // Special case for #-0
2028 if (Val == INT32_MIN) Val = 0;
2029 if (Val < 0) Val = -Val;
2030 Val = ARM_AM::getAM5Opc(AddSub, Val);
2031 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2032 Inst.addOperand(MCOperand::CreateImm(Val));
2035 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
2036 assert(N == 2 && "Invalid number of operands!");
2037 // If we have an immediate that's not a constant, treat it as a label
2038 // reference needing a fixup. If it is a constant, it's something else
2039 // and we reject it.
2041 Inst.addOperand(MCOperand::CreateExpr(getImm()));
2042 Inst.addOperand(MCOperand::CreateImm(0));
2046 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2047 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2048 Inst.addOperand(MCOperand::CreateImm(Val));
2051 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
2052 assert(N == 2 && "Invalid number of operands!");
2053 // The lower two bits are always zero and as such are not encoded.
2054 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2055 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2056 Inst.addOperand(MCOperand::CreateImm(Val));
2059 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2060 assert(N == 2 && "Invalid number of operands!");
2061 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2062 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2063 Inst.addOperand(MCOperand::CreateImm(Val));
2066 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2067 addMemImm8OffsetOperands(Inst, N);
2070 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2071 addMemImm8OffsetOperands(Inst, N);
2074 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2075 assert(N == 2 && "Invalid number of operands!");
2076 // If this is an immediate, it's a label reference.
2078 addExpr(Inst, getImm());
2079 Inst.addOperand(MCOperand::CreateImm(0));
2083 // Otherwise, it's a normal memory reg+offset.
2084 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2085 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2086 Inst.addOperand(MCOperand::CreateImm(Val));
2089 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2090 assert(N == 2 && "Invalid number of operands!");
2091 // If this is an immediate, it's a label reference.
2093 addExpr(Inst, getImm());
2094 Inst.addOperand(MCOperand::CreateImm(0));
2098 // Otherwise, it's a normal memory reg+offset.
2099 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2100 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2101 Inst.addOperand(MCOperand::CreateImm(Val));
2104 void addMemTBBOperands(MCInst &Inst, unsigned N) const {
2105 assert(N == 2 && "Invalid number of operands!");
2106 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2107 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2110 void addMemTBHOperands(MCInst &Inst, unsigned N) const {
2111 assert(N == 2 && "Invalid number of operands!");
2112 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2113 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2116 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2117 assert(N == 3 && "Invalid number of operands!");
2119 ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2120 Memory.ShiftImm, Memory.ShiftType);
2121 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2122 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2123 Inst.addOperand(MCOperand::CreateImm(Val));
2126 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2127 assert(N == 3 && "Invalid number of operands!");
2128 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2129 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2130 Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
2133 void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
2134 assert(N == 2 && "Invalid number of operands!");
2135 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2136 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2139 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
2140 assert(N == 2 && "Invalid number of operands!");
2141 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2142 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2143 Inst.addOperand(MCOperand::CreateImm(Val));
2146 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
2147 assert(N == 2 && "Invalid number of operands!");
2148 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
2149 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2150 Inst.addOperand(MCOperand::CreateImm(Val));
2153 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
2154 assert(N == 2 && "Invalid number of operands!");
2155 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
2156 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2157 Inst.addOperand(MCOperand::CreateImm(Val));
2160 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
2161 assert(N == 2 && "Invalid number of operands!");
2162 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2163 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2164 Inst.addOperand(MCOperand::CreateImm(Val));
2167 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
2168 assert(N == 1 && "Invalid number of operands!");
2169 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2170 assert(CE && "non-constant post-idx-imm8 operand!");
2171 int Imm = CE->getValue();
2172 bool isAdd = Imm >= 0;
2173 if (Imm == INT32_MIN) Imm = 0;
2174 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
2175 Inst.addOperand(MCOperand::CreateImm(Imm));
2178 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
2179 assert(N == 1 && "Invalid number of operands!");
2180 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2181 assert(CE && "non-constant post-idx-imm8s4 operand!");
2182 int Imm = CE->getValue();
2183 bool isAdd = Imm >= 0;
2184 if (Imm == INT32_MIN) Imm = 0;
2185 // Immediate is scaled by 4.
2186 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
2187 Inst.addOperand(MCOperand::CreateImm(Imm));
2190 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
2191 assert(N == 2 && "Invalid number of operands!");
2192 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
2193 Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
2196 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
2197 assert(N == 2 && "Invalid number of operands!");
2198 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
2199 // The sign, shift type, and shift amount are encoded in a single operand
2200 // using the AM2 encoding helpers.
2201 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
2202 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
2203 PostIdxReg.ShiftTy);
2204 Inst.addOperand(MCOperand::CreateImm(Imm));
2207 void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
2208 assert(N == 1 && "Invalid number of operands!");
2209 Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
2212 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
2213 assert(N == 1 && "Invalid number of operands!");
2214 Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
2217 void addVecListOperands(MCInst &Inst, unsigned N) const {
2218 assert(N == 1 && "Invalid number of operands!");
2219 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
2222 void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
2223 assert(N == 2 && "Invalid number of operands!");
2224 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
2225 Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
2228 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
2229 assert(N == 1 && "Invalid number of operands!");
2230 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
2233 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
2234 assert(N == 1 && "Invalid number of operands!");
2235 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
2238 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
2239 assert(N == 1 && "Invalid number of operands!");
2240 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
2243 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
2244 assert(N == 1 && "Invalid number of operands!");
2245 // The immediate encodes the type of constant as well as the value.
2246 // Mask in that this is an i8 splat.
2247 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2248 Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
2251 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
2252 assert(N == 1 && "Invalid number of operands!");
2253 // The immediate encodes the type of constant as well as the value.
2254 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2255 unsigned Value = CE->getValue();
2257 Value = (Value >> 8) | 0xa00;
2260 Inst.addOperand(MCOperand::CreateImm(Value));
2263 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
2264 assert(N == 1 && "Invalid number of operands!");
2265 // The immediate encodes the type of constant as well as the value.
2266 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2267 unsigned Value = CE->getValue();
2268 if (Value >= 256 && Value <= 0xff00)
2269 Value = (Value >> 8) | 0x200;
2270 else if (Value > 0xffff && Value <= 0xff0000)
2271 Value = (Value >> 16) | 0x400;
2272 else if (Value > 0xffffff)
2273 Value = (Value >> 24) | 0x600;
2274 Inst.addOperand(MCOperand::CreateImm(Value));
2277 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
2278 assert(N == 1 && "Invalid number of operands!");
2279 // The immediate encodes the type of constant as well as the value.
2280 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2281 unsigned Value = CE->getValue();
2282 if (Value >= 256 && Value <= 0xffff)
2283 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2284 else if (Value > 0xffff && Value <= 0xffffff)
2285 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2286 else if (Value > 0xffffff)
2287 Value = (Value >> 24) | 0x600;
2288 Inst.addOperand(MCOperand::CreateImm(Value));
2291 void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
2292 assert(N == 1 && "Invalid number of operands!");
2293 // The immediate encodes the type of constant as well as the value.
2294 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2295 unsigned Value = ~CE->getValue();
2296 if (Value >= 256 && Value <= 0xffff)
2297 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2298 else if (Value > 0xffff && Value <= 0xffffff)
2299 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2300 else if (Value > 0xffffff)
2301 Value = (Value >> 24) | 0x600;
2302 Inst.addOperand(MCOperand::CreateImm(Value));
2305 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
2306 assert(N == 1 && "Invalid number of operands!");
2307 // The immediate encodes the type of constant as well as the value.
2308 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2309 uint64_t Value = CE->getValue();
2311 for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
2312 Imm |= (Value & 1) << i;
2314 Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
2317 void print(raw_ostream &OS) const override;
2319 static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
2320 ARMOperand *Op = new ARMOperand(k_ITCondMask);
2321 Op->ITMask.Mask = Mask;
2327 static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
2328 ARMOperand *Op = new ARMOperand(k_CondCode);
2335 static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
2336 ARMOperand *Op = new ARMOperand(k_CoprocNum);
2337 Op->Cop.Val = CopVal;
2343 static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
2344 ARMOperand *Op = new ARMOperand(k_CoprocReg);
2345 Op->Cop.Val = CopVal;
2351 static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
2352 ARMOperand *Op = new ARMOperand(k_CoprocOption);
2359 static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
2360 ARMOperand *Op = new ARMOperand(k_CCOut);
2361 Op->Reg.RegNum = RegNum;
2367 static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
2368 ARMOperand *Op = new ARMOperand(k_Token);
2369 Op->Tok.Data = Str.data();
2370 Op->Tok.Length = Str.size();
2376 static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
2377 ARMOperand *Op = new ARMOperand(k_Register);
2378 Op->Reg.RegNum = RegNum;
2384 static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
2389 ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
2390 Op->RegShiftedReg.ShiftTy = ShTy;
2391 Op->RegShiftedReg.SrcReg = SrcReg;
2392 Op->RegShiftedReg.ShiftReg = ShiftReg;
2393 Op->RegShiftedReg.ShiftImm = ShiftImm;
2399 static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
2403 ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
2404 Op->RegShiftedImm.ShiftTy = ShTy;
2405 Op->RegShiftedImm.SrcReg = SrcReg;
2406 Op->RegShiftedImm.ShiftImm = ShiftImm;
2412 static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
2414 ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
2415 Op->ShifterImm.isASR = isASR;
2416 Op->ShifterImm.Imm = Imm;
2422 static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
2423 ARMOperand *Op = new ARMOperand(k_RotateImmediate);
2424 Op->RotImm.Imm = Imm;
2430 static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
2432 ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
2433 Op->Bitfield.LSB = LSB;
2434 Op->Bitfield.Width = Width;
2441 CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned> > &Regs,
2442 SMLoc StartLoc, SMLoc EndLoc) {
2443 assert (Regs.size() > 0 && "RegList contains no registers?");
2444 KindTy Kind = k_RegisterList;
2446 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().second))
2447 Kind = k_DPRRegisterList;
2448 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2449 contains(Regs.front().second))
2450 Kind = k_SPRRegisterList;
2452 // Sort based on the register encoding values.
2453 array_pod_sort(Regs.begin(), Regs.end());
2455 ARMOperand *Op = new ARMOperand(Kind);
2456 for (SmallVectorImpl<std::pair<unsigned, unsigned> >::const_iterator
2457 I = Regs.begin(), E = Regs.end(); I != E; ++I)
2458 Op->Registers.push_back(I->second);
2459 Op->StartLoc = StartLoc;
2460 Op->EndLoc = EndLoc;
2464 static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2465 bool isDoubleSpaced, SMLoc S, SMLoc E) {
2466 ARMOperand *Op = new ARMOperand(k_VectorList);
2467 Op->VectorList.RegNum = RegNum;
2468 Op->VectorList.Count = Count;
2469 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2475 static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2476 bool isDoubleSpaced,
2478 ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2479 Op->VectorList.RegNum = RegNum;
2480 Op->VectorList.Count = Count;
2481 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2487 static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2489 bool isDoubleSpaced,
2491 ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2492 Op->VectorList.RegNum = RegNum;
2493 Op->VectorList.Count = Count;
2494 Op->VectorList.LaneIndex = Index;
2495 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2501 static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2503 ARMOperand *Op = new ARMOperand(k_VectorIndex);
2504 Op->VectorIndex.Val = Idx;
2510 static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2511 ARMOperand *Op = new ARMOperand(k_Immediate);
2518 static ARMOperand *CreateMem(unsigned BaseRegNum,
2519 const MCConstantExpr *OffsetImm,
2520 unsigned OffsetRegNum,
2521 ARM_AM::ShiftOpc ShiftType,
2526 ARMOperand *Op = new ARMOperand(k_Memory);
2527 Op->Memory.BaseRegNum = BaseRegNum;
2528 Op->Memory.OffsetImm = OffsetImm;
2529 Op->Memory.OffsetRegNum = OffsetRegNum;
2530 Op->Memory.ShiftType = ShiftType;
2531 Op->Memory.ShiftImm = ShiftImm;
2532 Op->Memory.Alignment = Alignment;
2533 Op->Memory.isNegative = isNegative;
2539 static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2540 ARM_AM::ShiftOpc ShiftTy,
2543 ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2544 Op->PostIdxReg.RegNum = RegNum;
2545 Op->PostIdxReg.isAdd = isAdd;
2546 Op->PostIdxReg.ShiftTy = ShiftTy;
2547 Op->PostIdxReg.ShiftImm = ShiftImm;
2553 static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2554 ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2555 Op->MBOpt.Val = Opt;
2561 static ARMOperand *CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt,
2563 ARMOperand *Op = new ARMOperand(k_InstSyncBarrierOpt);
2564 Op->ISBOpt.Val = Opt;
2570 static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2571 ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2572 Op->IFlags.Val = IFlags;
2578 static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2579 ARMOperand *Op = new ARMOperand(k_MSRMask);
2580 Op->MMask.Val = MMask;
2587 } // end anonymous namespace.
2589 void ARMOperand::print(raw_ostream &OS) const {
2592 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2595 OS << "<ccout " << getReg() << ">";
2597 case k_ITCondMask: {
2598 static const char *const MaskStr[] = {
2599 "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2600 "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2602 assert((ITMask.Mask & 0xf) == ITMask.Mask);
2603 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2607 OS << "<coprocessor number: " << getCoproc() << ">";
2610 OS << "<coprocessor register: " << getCoproc() << ">";
2612 case k_CoprocOption:
2613 OS << "<coprocessor option: " << CoprocOption.Val << ">";
2616 OS << "<mask: " << getMSRMask() << ">";
2619 getImm()->print(OS);
2621 case k_MemBarrierOpt:
2622 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
2624 case k_InstSyncBarrierOpt:
2625 OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
2629 << " base:" << Memory.BaseRegNum;
2632 case k_PostIndexRegister:
2633 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2634 << PostIdxReg.RegNum;
2635 if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2636 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2637 << PostIdxReg.ShiftImm;
2640 case k_ProcIFlags: {
2641 OS << "<ARM_PROC::";
2642 unsigned IFlags = getProcIFlags();
2643 for (int i=2; i >= 0; --i)
2644 if (IFlags & (1 << i))
2645 OS << ARM_PROC::IFlagsToString(1 << i);
2650 OS << "<register " << getReg() << ">";
2652 case k_ShifterImmediate:
2653 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2654 << " #" << ShifterImm.Imm << ">";
2656 case k_ShiftedRegister:
2657 OS << "<so_reg_reg "
2658 << RegShiftedReg.SrcReg << " "
2659 << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2660 << " " << RegShiftedReg.ShiftReg << ">";
2662 case k_ShiftedImmediate:
2663 OS << "<so_reg_imm "
2664 << RegShiftedImm.SrcReg << " "
2665 << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2666 << " #" << RegShiftedImm.ShiftImm << ">";
2668 case k_RotateImmediate:
2669 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2671 case k_BitfieldDescriptor:
2672 OS << "<bitfield " << "lsb: " << Bitfield.LSB
2673 << ", width: " << Bitfield.Width << ">";
2675 case k_RegisterList:
2676 case k_DPRRegisterList:
2677 case k_SPRRegisterList: {
2678 OS << "<register_list ";
2680 const SmallVectorImpl<unsigned> &RegList = getRegList();
2681 for (SmallVectorImpl<unsigned>::const_iterator
2682 I = RegList.begin(), E = RegList.end(); I != E; ) {
2684 if (++I < E) OS << ", ";
2691 OS << "<vector_list " << VectorList.Count << " * "
2692 << VectorList.RegNum << ">";
2694 case k_VectorListAllLanes:
2695 OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2696 << VectorList.RegNum << ">";
2698 case k_VectorListIndexed:
2699 OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2700 << VectorList.Count << " * " << VectorList.RegNum << ">";
2703 OS << "'" << getToken() << "'";
2706 OS << "<vectorindex " << getVectorIndex() << ">";
2711 /// @name Auto-generated Match Functions
2714 static unsigned MatchRegisterName(StringRef Name);
2718 bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2719 SMLoc &StartLoc, SMLoc &EndLoc) {
2720 StartLoc = Parser.getTok().getLoc();
2721 EndLoc = Parser.getTok().getEndLoc();
2722 RegNo = tryParseRegister();
2724 return (RegNo == (unsigned)-1);
2727 /// Try to parse a register name. The token must be an Identifier when called,
2728 /// and if it is a register name the token is eaten and the register number is
2729 /// returned. Otherwise return -1.
2731 int ARMAsmParser::tryParseRegister() {
2732 const AsmToken &Tok = Parser.getTok();
2733 if (Tok.isNot(AsmToken::Identifier)) return -1;
2735 std::string lowerCase = Tok.getString().lower();
2736 unsigned RegNum = MatchRegisterName(lowerCase);
2738 RegNum = StringSwitch<unsigned>(lowerCase)
2739 .Case("r13", ARM::SP)
2740 .Case("r14", ARM::LR)
2741 .Case("r15", ARM::PC)
2742 .Case("ip", ARM::R12)
2743 // Additional register name aliases for 'gas' compatibility.
2744 .Case("a1", ARM::R0)
2745 .Case("a2", ARM::R1)
2746 .Case("a3", ARM::R2)
2747 .Case("a4", ARM::R3)
2748 .Case("v1", ARM::R4)
2749 .Case("v2", ARM::R5)
2750 .Case("v3", ARM::R6)
2751 .Case("v4", ARM::R7)
2752 .Case("v5", ARM::R8)
2753 .Case("v6", ARM::R9)
2754 .Case("v7", ARM::R10)
2755 .Case("v8", ARM::R11)
2756 .Case("sb", ARM::R9)
2757 .Case("sl", ARM::R10)
2758 .Case("fp", ARM::R11)
2762 // Check for aliases registered via .req. Canonicalize to lower case.
2763 // That's more consistent since register names are case insensitive, and
2764 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2765 StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2766 // If no match, return failure.
2767 if (Entry == RegisterReqs.end())
2769 Parser.Lex(); // Eat identifier token.
2770 return Entry->getValue();
2773 Parser.Lex(); // Eat identifier token.
2778 // Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0.
2779 // If a recoverable error occurs, return 1. If an irrecoverable error
2780 // occurs, return -1. An irrecoverable error is one where tokens have been
2781 // consumed in the process of trying to parse the shifter (i.e., when it is
2782 // indeed a shifter operand, but malformed).
2783 int ARMAsmParser::tryParseShiftRegister(
2784 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2785 SMLoc S = Parser.getTok().getLoc();
2786 const AsmToken &Tok = Parser.getTok();
2787 if (Tok.isNot(AsmToken::Identifier))
2790 std::string lowerCase = Tok.getString().lower();
2791 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2792 .Case("asl", ARM_AM::lsl)
2793 .Case("lsl", ARM_AM::lsl)
2794 .Case("lsr", ARM_AM::lsr)
2795 .Case("asr", ARM_AM::asr)
2796 .Case("ror", ARM_AM::ror)
2797 .Case("rrx", ARM_AM::rrx)
2798 .Default(ARM_AM::no_shift);
2800 if (ShiftTy == ARM_AM::no_shift)
2803 Parser.Lex(); // Eat the operator.
2805 // The source register for the shift has already been added to the
2806 // operand list, so we need to pop it off and combine it into the shifted
2807 // register operand instead.
2808 OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2809 if (!PrevOp->isReg())
2810 return Error(PrevOp->getStartLoc(), "shift must be of a register");
2811 int SrcReg = PrevOp->getReg();
2816 if (ShiftTy == ARM_AM::rrx) {
2817 // RRX Doesn't have an explicit shift amount. The encoder expects
2818 // the shift register to be the same as the source register. Seems odd,
2822 // Figure out if this is shifted by a constant or a register (for non-RRX).
2823 if (Parser.getTok().is(AsmToken::Hash) ||
2824 Parser.getTok().is(AsmToken::Dollar)) {
2825 Parser.Lex(); // Eat hash.
2826 SMLoc ImmLoc = Parser.getTok().getLoc();
2827 const MCExpr *ShiftExpr = 0;
2828 if (getParser().parseExpression(ShiftExpr, EndLoc)) {
2829 Error(ImmLoc, "invalid immediate shift value");
2832 // The expression must be evaluatable as an immediate.
2833 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2835 Error(ImmLoc, "invalid immediate shift value");
2838 // Range check the immediate.
2839 // lsl, ror: 0 <= imm <= 31
2840 // lsr, asr: 0 <= imm <= 32
2841 Imm = CE->getValue();
2843 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2844 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2845 Error(ImmLoc, "immediate shift value out of range");
2848 // shift by zero is a nop. Always send it through as lsl.
2849 // ('as' compatibility)
2851 ShiftTy = ARM_AM::lsl;
2852 } else if (Parser.getTok().is(AsmToken::Identifier)) {
2853 SMLoc L = Parser.getTok().getLoc();
2854 EndLoc = Parser.getTok().getEndLoc();
2855 ShiftReg = tryParseRegister();
2856 if (ShiftReg == -1) {
2857 Error (L, "expected immediate or register in shift operand");
2861 Error (Parser.getTok().getLoc(),
2862 "expected immediate or register in shift operand");
2867 if (ShiftReg && ShiftTy != ARM_AM::rrx)
2868 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2872 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2879 /// Try to parse a register name. The token must be an Identifier when called.
2880 /// If it's a register, an AsmOperand is created. Another AsmOperand is created
2881 /// if there is a "writeback". 'true' if it's not a register.
2883 /// TODO this is likely to change to allow different register types and or to
2884 /// parse for a specific register type.
2886 tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2887 const AsmToken &RegTok = Parser.getTok();
2888 int RegNo = tryParseRegister();
2892 Operands.push_back(ARMOperand::CreateReg(RegNo, RegTok.getLoc(),
2893 RegTok.getEndLoc()));
2895 const AsmToken &ExclaimTok = Parser.getTok();
2896 if (ExclaimTok.is(AsmToken::Exclaim)) {
2897 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2898 ExclaimTok.getLoc()));
2899 Parser.Lex(); // Eat exclaim token
2903 // Also check for an index operand. This is only legal for vector registers,
2904 // but that'll get caught OK in operand matching, so we don't need to
2905 // explicitly filter everything else out here.
2906 if (Parser.getTok().is(AsmToken::LBrac)) {
2907 SMLoc SIdx = Parser.getTok().getLoc();
2908 Parser.Lex(); // Eat left bracket token.
2910 const MCExpr *ImmVal;
2911 if (getParser().parseExpression(ImmVal))
2913 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2915 return TokError("immediate value expected for vector index");
2917 if (Parser.getTok().isNot(AsmToken::RBrac))
2918 return Error(Parser.getTok().getLoc(), "']' expected");
2920 SMLoc E = Parser.getTok().getEndLoc();
2921 Parser.Lex(); // Eat right bracket token.
2923 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2931 /// MatchCoprocessorOperandName - Try to parse an coprocessor related
2932 /// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2934 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2935 // Use the same layout as the tablegen'erated register name matcher. Ugly,
2937 switch (Name.size()) {
2940 if (Name[0] != CoprocOp)
2956 if (Name[0] != CoprocOp || Name[1] != '1')
2960 // p10 and p11 are invalid for coproc instructions (reserved for FP/NEON)
2961 case '0': return CoprocOp == 'p'? -1: 10;
2962 case '1': return CoprocOp == 'p'? -1: 11;
2963 case '2': return 12;
2964 case '3': return 13;
2965 case '4': return 14;
2966 case '5': return 15;
2971 /// parseITCondCode - Try to parse a condition code for an IT instruction.
2972 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2973 parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2974 SMLoc S = Parser.getTok().getLoc();
2975 const AsmToken &Tok = Parser.getTok();
2976 if (!Tok.is(AsmToken::Identifier))
2977 return MatchOperand_NoMatch;
2978 unsigned CC = StringSwitch<unsigned>(Tok.getString().lower())
2979 .Case("eq", ARMCC::EQ)
2980 .Case("ne", ARMCC::NE)
2981 .Case("hs", ARMCC::HS)
2982 .Case("cs", ARMCC::HS)
2983 .Case("lo", ARMCC::LO)
2984 .Case("cc", ARMCC::LO)
2985 .Case("mi", ARMCC::MI)
2986 .Case("pl", ARMCC::PL)
2987 .Case("vs", ARMCC::VS)
2988 .Case("vc", ARMCC::VC)
2989 .Case("hi", ARMCC::HI)
2990 .Case("ls", ARMCC::LS)
2991 .Case("ge", ARMCC::GE)
2992 .Case("lt", ARMCC::LT)
2993 .Case("gt", ARMCC::GT)
2994 .Case("le", ARMCC::LE)
2995 .Case("al", ARMCC::AL)
2998 return MatchOperand_NoMatch;
2999 Parser.Lex(); // Eat the token.
3001 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
3003 return MatchOperand_Success;
3006 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
3007 /// token must be an Identifier when called, and if it is a coprocessor
3008 /// number, the token is eaten and the operand is added to the operand list.
3009 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3010 parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3011 SMLoc S = Parser.getTok().getLoc();
3012 const AsmToken &Tok = Parser.getTok();
3013 if (Tok.isNot(AsmToken::Identifier))
3014 return MatchOperand_NoMatch;
3016 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
3018 return MatchOperand_NoMatch;
3020 Parser.Lex(); // Eat identifier token.
3021 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
3022 return MatchOperand_Success;
3025 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
3026 /// token must be an Identifier when called, and if it is a coprocessor
3027 /// number, the token is eaten and the operand is added to the operand list.
3028 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3029 parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3030 SMLoc S = Parser.getTok().getLoc();
3031 const AsmToken &Tok = Parser.getTok();
3032 if (Tok.isNot(AsmToken::Identifier))
3033 return MatchOperand_NoMatch;
3035 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
3037 return MatchOperand_NoMatch;
3039 Parser.Lex(); // Eat identifier token.
3040 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
3041 return MatchOperand_Success;
3044 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
3045 /// coproc_option : '{' imm0_255 '}'
3046 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3047 parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3048 SMLoc S = Parser.getTok().getLoc();
3050 // If this isn't a '{', this isn't a coprocessor immediate operand.
3051 if (Parser.getTok().isNot(AsmToken::LCurly))
3052 return MatchOperand_NoMatch;
3053 Parser.Lex(); // Eat the '{'
3056 SMLoc Loc = Parser.getTok().getLoc();
3057 if (getParser().parseExpression(Expr)) {
3058 Error(Loc, "illegal expression");
3059 return MatchOperand_ParseFail;
3061 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
3062 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
3063 Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
3064 return MatchOperand_ParseFail;
3066 int Val = CE->getValue();
3068 // Check for and consume the closing '}'
3069 if (Parser.getTok().isNot(AsmToken::RCurly))
3070 return MatchOperand_ParseFail;
3071 SMLoc E = Parser.getTok().getEndLoc();
3072 Parser.Lex(); // Eat the '}'
3074 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
3075 return MatchOperand_Success;
3078 // For register list parsing, we need to map from raw GPR register numbering
3079 // to the enumeration values. The enumeration values aren't sorted by
3080 // register number due to our using "sp", "lr" and "pc" as canonical names.
3081 static unsigned getNextRegister(unsigned Reg) {
3082 // If this is a GPR, we need to do it manually, otherwise we can rely
3083 // on the sort ordering of the enumeration since the other reg-classes
3085 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3088 default: llvm_unreachable("Invalid GPR number!");
3089 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2;
3090 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4;
3091 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6;
3092 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8;
3093 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10;
3094 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
3095 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR;
3096 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0;
3100 // Return the low-subreg of a given Q register.
3101 static unsigned getDRegFromQReg(unsigned QReg) {
3103 default: llvm_unreachable("expected a Q register!");
3104 case ARM::Q0: return ARM::D0;
3105 case ARM::Q1: return ARM::D2;
3106 case ARM::Q2: return ARM::D4;
3107 case ARM::Q3: return ARM::D6;
3108 case ARM::Q4: return ARM::D8;
3109 case ARM::Q5: return ARM::D10;
3110 case ARM::Q6: return ARM::D12;
3111 case ARM::Q7: return ARM::D14;
3112 case ARM::Q8: return ARM::D16;
3113 case ARM::Q9: return ARM::D18;
3114 case ARM::Q10: return ARM::D20;
3115 case ARM::Q11: return ARM::D22;
3116 case ARM::Q12: return ARM::D24;
3117 case ARM::Q13: return ARM::D26;
3118 case ARM::Q14: return ARM::D28;
3119 case ARM::Q15: return ARM::D30;
3123 /// Parse a register list.
3125 parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3126 assert(Parser.getTok().is(AsmToken::LCurly) &&
3127 "Token is not a Left Curly Brace");
3128 SMLoc S = Parser.getTok().getLoc();
3129 Parser.Lex(); // Eat '{' token.
3130 SMLoc RegLoc = Parser.getTok().getLoc();
3132 // Check the first register in the list to see what register class
3133 // this is a list of.
3134 int Reg = tryParseRegister();
3136 return Error(RegLoc, "register expected");
3138 // The reglist instructions have at most 16 registers, so reserve
3139 // space for that many.
3141 SmallVector<std::pair<unsigned, unsigned>, 16> Registers;
3143 // Allow Q regs and just interpret them as the two D sub-registers.
3144 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3145 Reg = getDRegFromQReg(Reg);
3146 EReg = MRI->getEncodingValue(Reg);
3147 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3150 const MCRegisterClass *RC;
3151 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3152 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
3153 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
3154 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
3155 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
3156 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
3158 return Error(RegLoc, "invalid register in register list");
3160 // Store the register.
3161 EReg = MRI->getEncodingValue(Reg);
3162 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3164 // This starts immediately after the first register token in the list,
3165 // so we can see either a comma or a minus (range separator) as a legal
3167 while (Parser.getTok().is(AsmToken::Comma) ||
3168 Parser.getTok().is(AsmToken::Minus)) {
3169 if (Parser.getTok().is(AsmToken::Minus)) {
3170 Parser.Lex(); // Eat the minus.
3171 SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3172 int EndReg = tryParseRegister();
3174 return Error(AfterMinusLoc, "register expected");
3175 // Allow Q regs and just interpret them as the two D sub-registers.
3176 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3177 EndReg = getDRegFromQReg(EndReg) + 1;
3178 // If the register is the same as the start reg, there's nothing
3182 // The register must be in the same register class as the first.
3183 if (!RC->contains(EndReg))
3184 return Error(AfterMinusLoc, "invalid register in register list");
3185 // Ranges must go from low to high.
3186 if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
3187 return Error(AfterMinusLoc, "bad range in register list");
3189 // Add all the registers in the range to the register list.
3190 while (Reg != EndReg) {
3191 Reg = getNextRegister(Reg);
3192 EReg = MRI->getEncodingValue(Reg);
3193 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3197 Parser.Lex(); // Eat the comma.
3198 RegLoc = Parser.getTok().getLoc();
3200 const AsmToken RegTok = Parser.getTok();
3201 Reg = tryParseRegister();
3203 return Error(RegLoc, "register expected");
3204 // Allow Q regs and just interpret them as the two D sub-registers.
3205 bool isQReg = false;
3206 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3207 Reg = getDRegFromQReg(Reg);
3210 // The register must be in the same register class as the first.
3211 if (!RC->contains(Reg))
3212 return Error(RegLoc, "invalid register in register list");
3213 // List must be monotonically increasing.
3214 if (MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
3215 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3216 Warning(RegLoc, "register list not in ascending order");
3218 return Error(RegLoc, "register list not in ascending order");
3220 if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) {
3221 Warning(RegLoc, "duplicated register (" + RegTok.getString() +
3222 ") in register list");
3225 // VFP register lists must also be contiguous.
3226 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
3228 return Error(RegLoc, "non-contiguous register range");
3229 EReg = MRI->getEncodingValue(Reg);
3230 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3232 EReg = MRI->getEncodingValue(++Reg);
3233 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3237 if (Parser.getTok().isNot(AsmToken::RCurly))
3238 return Error(Parser.getTok().getLoc(), "'}' expected");
3239 SMLoc E = Parser.getTok().getEndLoc();
3240 Parser.Lex(); // Eat '}' token.
3242 // Push the register list operand.
3243 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
3245 // The ARM system instruction variants for LDM/STM have a '^' token here.
3246 if (Parser.getTok().is(AsmToken::Caret)) {
3247 Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
3248 Parser.Lex(); // Eat '^' token.
3254 // Helper function to parse the lane index for vector lists.
3255 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3256 parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
3257 Index = 0; // Always return a defined index value.
3258 if (Parser.getTok().is(AsmToken::LBrac)) {
3259 Parser.Lex(); // Eat the '['.
3260 if (Parser.getTok().is(AsmToken::RBrac)) {
3261 // "Dn[]" is the 'all lanes' syntax.
3262 LaneKind = AllLanes;
3263 EndLoc = Parser.getTok().getEndLoc();
3264 Parser.Lex(); // Eat the ']'.
3265 return MatchOperand_Success;
3268 // There's an optional '#' token here. Normally there wouldn't be, but
3269 // inline assemble puts one in, and it's friendly to accept that.
3270 if (Parser.getTok().is(AsmToken::Hash))
3271 Parser.Lex(); // Eat '#' or '$'.
3273 const MCExpr *LaneIndex;
3274 SMLoc Loc = Parser.getTok().getLoc();
3275 if (getParser().parseExpression(LaneIndex)) {
3276 Error(Loc, "illegal expression");
3277 return MatchOperand_ParseFail;
3279 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
3281 Error(Loc, "lane index must be empty or an integer");
3282 return MatchOperand_ParseFail;
3284 if (Parser.getTok().isNot(AsmToken::RBrac)) {
3285 Error(Parser.getTok().getLoc(), "']' expected");
3286 return MatchOperand_ParseFail;
3288 EndLoc = Parser.getTok().getEndLoc();
3289 Parser.Lex(); // Eat the ']'.
3290 int64_t Val = CE->getValue();
3292 // FIXME: Make this range check context sensitive for .8, .16, .32.
3293 if (Val < 0 || Val > 7) {
3294 Error(Parser.getTok().getLoc(), "lane index out of range");
3295 return MatchOperand_ParseFail;
3298 LaneKind = IndexedLane;
3299 return MatchOperand_Success;
3302 return MatchOperand_Success;
3305 // parse a vector register list
3306 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3307 parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3308 VectorLaneTy LaneKind;
3310 SMLoc S = Parser.getTok().getLoc();
3311 // As an extension (to match gas), support a plain D register or Q register
3312 // (without encosing curly braces) as a single or double entry list,
3314 if (Parser.getTok().is(AsmToken::Identifier)) {
3315 SMLoc E = Parser.getTok().getEndLoc();
3316 int Reg = tryParseRegister();
3318 return MatchOperand_NoMatch;
3319 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
3320 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3321 if (Res != MatchOperand_Success)
3325 Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
3328 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
3332 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
3337 return MatchOperand_Success;
3339 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3340 Reg = getDRegFromQReg(Reg);
3341 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3342 if (Res != MatchOperand_Success)
3346 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3347 &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3348 Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
3351 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3352 &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3353 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
3357 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
3362 return MatchOperand_Success;
3364 Error(S, "vector register expected");
3365 return MatchOperand_ParseFail;
3368 if (Parser.getTok().isNot(AsmToken::LCurly))
3369 return MatchOperand_NoMatch;
3371 Parser.Lex(); // Eat '{' token.
3372 SMLoc RegLoc = Parser.getTok().getLoc();
3374 int Reg = tryParseRegister();
3376 Error(RegLoc, "register expected");
3377 return MatchOperand_ParseFail;
3381 unsigned FirstReg = Reg;
3382 // The list is of D registers, but we also allow Q regs and just interpret
3383 // them as the two D sub-registers.
3384 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3385 FirstReg = Reg = getDRegFromQReg(Reg);
3386 Spacing = 1; // double-spacing requires explicit D registers, otherwise
3387 // it's ambiguous with four-register single spaced.
3393 if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success)
3394 return MatchOperand_ParseFail;
3396 while (Parser.getTok().is(AsmToken::Comma) ||
3397 Parser.getTok().is(AsmToken::Minus)) {
3398 if (Parser.getTok().is(AsmToken::Minus)) {
3400 Spacing = 1; // Register range implies a single spaced list.
3401 else if (Spacing == 2) {
3402 Error(Parser.getTok().getLoc(),
3403 "sequential registers in double spaced list");
3404 return MatchOperand_ParseFail;
3406 Parser.Lex(); // Eat the minus.
3407 SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3408 int EndReg = tryParseRegister();
3410 Error(AfterMinusLoc, "register expected");
3411 return MatchOperand_ParseFail;
3413 // Allow Q regs and just interpret them as the two D sub-registers.
3414 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3415 EndReg = getDRegFromQReg(EndReg) + 1;
3416 // If the register is the same as the start reg, there's nothing
3420 // The register must be in the same register class as the first.
3421 if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3422 Error(AfterMinusLoc, "invalid register in register list");
3423 return MatchOperand_ParseFail;
3425 // Ranges must go from low to high.
3427 Error(AfterMinusLoc, "bad range in register list");
3428 return MatchOperand_ParseFail;
3430 // Parse the lane specifier if present.
3431 VectorLaneTy NextLaneKind;
3432 unsigned NextLaneIndex;
3433 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3434 MatchOperand_Success)
3435 return MatchOperand_ParseFail;
3436 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3437 Error(AfterMinusLoc, "mismatched lane index in register list");
3438 return MatchOperand_ParseFail;
3441 // Add all the registers in the range to the register list.
3442 Count += EndReg - Reg;
3446 Parser.Lex(); // Eat the comma.
3447 RegLoc = Parser.getTok().getLoc();
3449 Reg = tryParseRegister();
3451 Error(RegLoc, "register expected");
3452 return MatchOperand_ParseFail;
3454 // vector register lists must be contiguous.
3455 // It's OK to use the enumeration values directly here rather, as the
3456 // VFP register classes have the enum sorted properly.
3458 // The list is of D registers, but we also allow Q regs and just interpret
3459 // them as the two D sub-registers.
3460 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3462 Spacing = 1; // Register range implies a single spaced list.
3463 else if (Spacing == 2) {
3465 "invalid register in double-spaced list (must be 'D' register')");
3466 return MatchOperand_ParseFail;
3468 Reg = getDRegFromQReg(Reg);
3469 if (Reg != OldReg + 1) {
3470 Error(RegLoc, "non-contiguous register range");
3471 return MatchOperand_ParseFail;
3475 // Parse the lane specifier if present.
3476 VectorLaneTy NextLaneKind;
3477 unsigned NextLaneIndex;
3478 SMLoc LaneLoc = Parser.getTok().getLoc();
3479 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3480 MatchOperand_Success)
3481 return MatchOperand_ParseFail;
3482 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3483 Error(LaneLoc, "mismatched lane index in register list");
3484 return MatchOperand_ParseFail;
3488 // Normal D register.
3489 // Figure out the register spacing (single or double) of the list if
3490 // we don't know it already.
3492 Spacing = 1 + (Reg == OldReg + 2);
3494 // Just check that it's contiguous and keep going.
3495 if (Reg != OldReg + Spacing) {
3496 Error(RegLoc, "non-contiguous register range");
3497 return MatchOperand_ParseFail;
3500 // Parse the lane specifier if present.
3501 VectorLaneTy NextLaneKind;
3502 unsigned NextLaneIndex;
3503 SMLoc EndLoc = Parser.getTok().getLoc();
3504 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success)
3505 return MatchOperand_ParseFail;
3506 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3507 Error(EndLoc, "mismatched lane index in register list");
3508 return MatchOperand_ParseFail;
3512 if (Parser.getTok().isNot(AsmToken::RCurly)) {
3513 Error(Parser.getTok().getLoc(), "'}' expected");
3514 return MatchOperand_ParseFail;
3516 E = Parser.getTok().getEndLoc();
3517 Parser.Lex(); // Eat '}' token.
3521 // Two-register operands have been converted to the
3522 // composite register classes.
3524 const MCRegisterClass *RC = (Spacing == 1) ?
3525 &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3526 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3527 FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3530 Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3531 (Spacing == 2), S, E));
3534 // Two-register operands have been converted to the
3535 // composite register classes.
3537 const MCRegisterClass *RC = (Spacing == 1) ?
3538 &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3539 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3540 FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3542 Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3547 Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3553 return MatchOperand_Success;
3556 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3557 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3558 parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3559 SMLoc S = Parser.getTok().getLoc();
3560 const AsmToken &Tok = Parser.getTok();
3563 if (Tok.is(AsmToken::Identifier)) {
3564 StringRef OptStr = Tok.getString();
3566 Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
3567 .Case("sy", ARM_MB::SY)
3568 .Case("st", ARM_MB::ST)
3569 .Case("ld", ARM_MB::LD)
3570 .Case("sh", ARM_MB::ISH)
3571 .Case("ish", ARM_MB::ISH)
3572 .Case("shst", ARM_MB::ISHST)
3573 .Case("ishst", ARM_MB::ISHST)
3574 .Case("ishld", ARM_MB::ISHLD)
3575 .Case("nsh", ARM_MB::NSH)
3576 .Case("un", ARM_MB::NSH)
3577 .Case("nshst", ARM_MB::NSHST)
3578 .Case("nshld", ARM_MB::NSHLD)
3579 .Case("unst", ARM_MB::NSHST)
3580 .Case("osh", ARM_MB::OSH)
3581 .Case("oshst", ARM_MB::OSHST)
3582 .Case("oshld", ARM_MB::OSHLD)
3585 // ishld, oshld, nshld and ld are only available from ARMv8.
3586 if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
3587 Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
3591 return MatchOperand_NoMatch;
3593 Parser.Lex(); // Eat identifier token.
3594 } else if (Tok.is(AsmToken::Hash) ||
3595 Tok.is(AsmToken::Dollar) ||
3596 Tok.is(AsmToken::Integer)) {
3597 if (Parser.getTok().isNot(AsmToken::Integer))
3598 Parser.Lex(); // Eat '#' or '$'.
3599 SMLoc Loc = Parser.getTok().getLoc();
3601 const MCExpr *MemBarrierID;
3602 if (getParser().parseExpression(MemBarrierID)) {
3603 Error(Loc, "illegal expression");
3604 return MatchOperand_ParseFail;
3607 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
3609 Error(Loc, "constant expression expected");
3610 return MatchOperand_ParseFail;
3613 int Val = CE->getValue();
3615 Error(Loc, "immediate value out of range");
3616 return MatchOperand_ParseFail;
3619 Opt = ARM_MB::RESERVED_0 + Val;
3621 return MatchOperand_ParseFail;
3623 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3624 return MatchOperand_Success;
3627 /// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
3628 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3629 parseInstSyncBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3630 SMLoc S = Parser.getTok().getLoc();
3631 const AsmToken &Tok = Parser.getTok();
3634 if (Tok.is(AsmToken::Identifier)) {
3635 StringRef OptStr = Tok.getString();
3637 if (OptStr.equals_lower("sy"))
3640 return MatchOperand_NoMatch;
3642 Parser.Lex(); // Eat identifier token.
3643 } else if (Tok.is(AsmToken::Hash) ||
3644 Tok.is(AsmToken::Dollar) ||
3645 Tok.is(AsmToken::Integer)) {
3646 if (Parser.getTok().isNot(AsmToken::Integer))
3647 Parser.Lex(); // Eat '#' or '$'.
3648 SMLoc Loc = Parser.getTok().getLoc();
3650 const MCExpr *ISBarrierID;
3651 if (getParser().parseExpression(ISBarrierID)) {
3652 Error(Loc, "illegal expression");
3653 return MatchOperand_ParseFail;
3656 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID);
3658 Error(Loc, "constant expression expected");
3659 return MatchOperand_ParseFail;
3662 int Val = CE->getValue();
3664 Error(Loc, "immediate value out of range");
3665 return MatchOperand_ParseFail;
3668 Opt = ARM_ISB::RESERVED_0 + Val;
3670 return MatchOperand_ParseFail;
3672 Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
3673 (ARM_ISB::InstSyncBOpt)Opt, S));
3674 return MatchOperand_Success;
3678 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3679 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3680 parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3681 SMLoc S = Parser.getTok().getLoc();
3682 const AsmToken &Tok = Parser.getTok();
3683 if (!Tok.is(AsmToken::Identifier))
3684 return MatchOperand_NoMatch;
3685 StringRef IFlagsStr = Tok.getString();
3687 // An iflags string of "none" is interpreted to mean that none of the AIF
3688 // bits are set. Not a terribly useful instruction, but a valid encoding.
3689 unsigned IFlags = 0;
3690 if (IFlagsStr != "none") {
3691 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3692 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3693 .Case("a", ARM_PROC::A)
3694 .Case("i", ARM_PROC::I)
3695 .Case("f", ARM_PROC::F)
3698 // If some specific iflag is already set, it means that some letter is
3699 // present more than once, this is not acceptable.
3700 if (Flag == ~0U || (IFlags & Flag))
3701 return MatchOperand_NoMatch;
3707 Parser.Lex(); // Eat identifier token.
3708 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3709 return MatchOperand_Success;
3712 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3713 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3714 parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3715 SMLoc S = Parser.getTok().getLoc();
3716 const AsmToken &Tok = Parser.getTok();
3717 if (!Tok.is(AsmToken::Identifier))
3718 return MatchOperand_NoMatch;
3719 StringRef Mask = Tok.getString();
3722 // See ARMv6-M 10.1.1
3723 std::string Name = Mask.lower();
3724 unsigned FlagsVal = StringSwitch<unsigned>(Name)
3725 // Note: in the documentation:
3726 // ARM deprecates using MSR APSR without a _<bits> qualifier as an alias
3727 // for MSR APSR_nzcvq.
3728 // but we do make it an alias here. This is so to get the "mask encoding"
3729 // bits correct on MSR APSR writes.
3731 // FIXME: Note the 0xc00 "mask encoding" bits version of the registers
3732 // should really only be allowed when writing a special register. Note
3733 // they get dropped in the MRS instruction reading a special register as
3734 // the SYSm field is only 8 bits.
3736 // FIXME: the _g and _nzcvqg versions are only allowed if the processor
3737 // includes the DSP extension but that is not checked.
3738 .Case("apsr", 0x800)
3739 .Case("apsr_nzcvq", 0x800)
3740 .Case("apsr_g", 0x400)
3741 .Case("apsr_nzcvqg", 0xc00)
3742 .Case("iapsr", 0x801)
3743 .Case("iapsr_nzcvq", 0x801)
3744 .Case("iapsr_g", 0x401)
3745 .Case("iapsr_nzcvqg", 0xc01)
3746 .Case("eapsr", 0x802)
3747 .Case("eapsr_nzcvq", 0x802)
3748 .Case("eapsr_g", 0x402)
3749 .Case("eapsr_nzcvqg", 0xc02)
3750 .Case("xpsr", 0x803)
3751 .Case("xpsr_nzcvq", 0x803)
3752 .Case("xpsr_g", 0x403)
3753 .Case("xpsr_nzcvqg", 0xc03)
3754 .Case("ipsr", 0x805)
3755 .Case("epsr", 0x806)
3756 .Case("iepsr", 0x807)
3759 .Case("primask", 0x810)
3760 .Case("basepri", 0x811)
3761 .Case("basepri_max", 0x812)
3762 .Case("faultmask", 0x813)
3763 .Case("control", 0x814)
3766 if (FlagsVal == ~0U)
3767 return MatchOperand_NoMatch;
3769 if (!hasV7Ops() && FlagsVal >= 0x811 && FlagsVal <= 0x813)
3770 // basepri, basepri_max and faultmask only valid for V7m.
3771 return MatchOperand_NoMatch;
3773 Parser.Lex(); // Eat identifier token.
3774 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3775 return MatchOperand_Success;
3778 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3779 size_t Start = 0, Next = Mask.find('_');
3780 StringRef Flags = "";
3781 std::string SpecReg = Mask.slice(Start, Next).lower();
3782 if (Next != StringRef::npos)
3783 Flags = Mask.slice(Next+1, Mask.size());
3785 // FlagsVal contains the complete mask:
3787 // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3788 unsigned FlagsVal = 0;
3790 if (SpecReg == "apsr") {
3791 FlagsVal = StringSwitch<unsigned>(Flags)
3792 .Case("nzcvq", 0x8) // same as CPSR_f
3793 .Case("g", 0x4) // same as CPSR_s
3794 .Case("nzcvqg", 0xc) // same as CPSR_fs
3797 if (FlagsVal == ~0U) {
3799 return MatchOperand_NoMatch;
3801 FlagsVal = 8; // No flag
3803 } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3804 // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
3805 if (Flags == "all" || Flags == "")
3807 for (int i = 0, e = Flags.size(); i != e; ++i) {
3808 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3815 // If some specific flag is already set, it means that some letter is
3816 // present more than once, this is not acceptable.
3817 if (FlagsVal == ~0U || (FlagsVal & Flag))
3818 return MatchOperand_NoMatch;
3821 } else // No match for special register.
3822 return MatchOperand_NoMatch;
3824 // Special register without flags is NOT equivalent to "fc" flags.
3825 // NOTE: This is a divergence from gas' behavior. Uncommenting the following
3826 // two lines would enable gas compatibility at the expense of breaking
3832 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3833 if (SpecReg == "spsr")
3836 Parser.Lex(); // Eat identifier token.
3837 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3838 return MatchOperand_Success;
3841 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3842 parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3843 int Low, int High) {
3844 const AsmToken &Tok = Parser.getTok();
3845 if (Tok.isNot(AsmToken::Identifier)) {
3846 Error(Parser.getTok().getLoc(), Op + " operand expected.");
3847 return MatchOperand_ParseFail;
3849 StringRef ShiftName = Tok.getString();
3850 std::string LowerOp = Op.lower();
3851 std::string UpperOp = Op.upper();
3852 if (ShiftName != LowerOp && ShiftName != UpperOp) {
3853 Error(Parser.getTok().getLoc(), Op + " operand expected.");
3854 return MatchOperand_ParseFail;
3856 Parser.Lex(); // Eat shift type token.
3858 // There must be a '#' and a shift amount.
3859 if (Parser.getTok().isNot(AsmToken::Hash) &&
3860 Parser.getTok().isNot(AsmToken::Dollar)) {
3861 Error(Parser.getTok().getLoc(), "'#' expected");
3862 return MatchOperand_ParseFail;
3864 Parser.Lex(); // Eat hash token.
3866 const MCExpr *ShiftAmount;
3867 SMLoc Loc = Parser.getTok().getLoc();
3869 if (getParser().parseExpression(ShiftAmount, EndLoc)) {
3870 Error(Loc, "illegal expression");
3871 return MatchOperand_ParseFail;
3873 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3875 Error(Loc, "constant expression expected");
3876 return MatchOperand_ParseFail;
3878 int Val = CE->getValue();
3879 if (Val < Low || Val > High) {
3880 Error(Loc, "immediate value out of range");
3881 return MatchOperand_ParseFail;
3884 Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
3886 return MatchOperand_Success;
3889 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3890 parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3891 const AsmToken &Tok = Parser.getTok();
3892 SMLoc S = Tok.getLoc();
3893 if (Tok.isNot(AsmToken::Identifier)) {
3894 Error(S, "'be' or 'le' operand expected");
3895 return MatchOperand_ParseFail;
3897 int Val = StringSwitch<int>(Tok.getString().lower())
3901 Parser.Lex(); // Eat the token.
3904 Error(S, "'be' or 'le' operand expected");
3905 return MatchOperand_ParseFail;
3907 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3909 S, Tok.getEndLoc()));
3910 return MatchOperand_Success;
3913 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3914 /// instructions. Legal values are:
3915 /// lsl #n 'n' in [0,31]
3916 /// asr #n 'n' in [1,32]
3917 /// n == 32 encoded as n == 0.
3918 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3919 parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3920 const AsmToken &Tok = Parser.getTok();
3921 SMLoc S = Tok.getLoc();
3922 if (Tok.isNot(AsmToken::Identifier)) {
3923 Error(S, "shift operator 'asr' or 'lsl' expected");
3924 return MatchOperand_ParseFail;
3926 StringRef ShiftName = Tok.getString();
3928 if (ShiftName == "lsl" || ShiftName == "LSL")
3930 else if (ShiftName == "asr" || ShiftName == "ASR")
3933 Error(S, "shift operator 'asr' or 'lsl' expected");
3934 return MatchOperand_ParseFail;
3936 Parser.Lex(); // Eat the operator.
3938 // A '#' and a shift amount.
3939 if (Parser.getTok().isNot(AsmToken::Hash) &&
3940 Parser.getTok().isNot(AsmToken::Dollar)) {
3941 Error(Parser.getTok().getLoc(), "'#' expected");
3942 return MatchOperand_ParseFail;
3944 Parser.Lex(); // Eat hash token.
3945 SMLoc ExLoc = Parser.getTok().getLoc();
3947 const MCExpr *ShiftAmount;
3949 if (getParser().parseExpression(ShiftAmount, EndLoc)) {
3950 Error(ExLoc, "malformed shift expression");
3951 return MatchOperand_ParseFail;
3953 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3955 Error(ExLoc, "shift amount must be an immediate");
3956 return MatchOperand_ParseFail;
3959 int64_t Val = CE->getValue();
3961 // Shift amount must be in [1,32]
3962 if (Val < 1 || Val > 32) {
3963 Error(ExLoc, "'asr' shift amount must be in range [1,32]");
3964 return MatchOperand_ParseFail;
3966 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3967 if (isThumb() && Val == 32) {
3968 Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
3969 return MatchOperand_ParseFail;
3971 if (Val == 32) Val = 0;
3973 // Shift amount must be in [1,32]
3974 if (Val < 0 || Val > 31) {
3975 Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
3976 return MatchOperand_ParseFail;
3980 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
3982 return MatchOperand_Success;
3985 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3986 /// of instructions. Legal values are:
3987 /// ror #n 'n' in {0, 8, 16, 24}
3988 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3989 parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3990 const AsmToken &Tok = Parser.getTok();
3991 SMLoc S = Tok.getLoc();
3992 if (Tok.isNot(AsmToken::Identifier))
3993 return MatchOperand_NoMatch;
3994 StringRef ShiftName = Tok.getString();
3995 if (ShiftName != "ror" && ShiftName != "ROR")
3996 return MatchOperand_NoMatch;
3997 Parser.Lex(); // Eat the operator.
3999 // A '#' and a rotate amount.
4000 if (Parser.getTok().isNot(AsmToken::Hash) &&
4001 Parser.getTok().isNot(AsmToken::Dollar)) {
4002 Error(Parser.getTok().getLoc(), "'#' expected");
4003 return MatchOperand_ParseFail;
4005 Parser.Lex(); // Eat hash token.
4006 SMLoc ExLoc = Parser.getTok().getLoc();
4008 const MCExpr *ShiftAmount;
4010 if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4011 Error(ExLoc, "malformed rotate expression");
4012 return MatchOperand_ParseFail;
4014 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4016 Error(ExLoc, "rotate amount must be an immediate");
4017 return MatchOperand_ParseFail;
4020 int64_t Val = CE->getValue();
4021 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
4022 // normally, zero is represented in asm by omitting the rotate operand
4024 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
4025 Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
4026 return MatchOperand_ParseFail;
4029 Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
4031 return MatchOperand_Success;
4034 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4035 parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4036 SMLoc S = Parser.getTok().getLoc();
4037 // The bitfield descriptor is really two operands, the LSB and the width.
4038 if (Parser.getTok().isNot(AsmToken::Hash) &&
4039 Parser.getTok().isNot(AsmToken::Dollar)) {
4040 Error(Parser.getTok().getLoc(), "'#' expected");
4041 return MatchOperand_ParseFail;
4043 Parser.Lex(); // Eat hash token.
4045 const MCExpr *LSBExpr;
4046 SMLoc E = Parser.getTok().getLoc();
4047 if (getParser().parseExpression(LSBExpr)) {
4048 Error(E, "malformed immediate expression");
4049 return MatchOperand_ParseFail;
4051 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
4053 Error(E, "'lsb' operand must be an immediate");
4054 return MatchOperand_ParseFail;
4057 int64_t LSB = CE->getValue();
4058 // The LSB must be in the range [0,31]
4059 if (LSB < 0 || LSB > 31) {
4060 Error(E, "'lsb' operand must be in the range [0,31]");
4061 return MatchOperand_ParseFail;
4063 E = Parser.getTok().getLoc();
4065 // Expect another immediate operand.
4066 if (Parser.getTok().isNot(AsmToken::Comma)) {
4067 Error(Parser.getTok().getLoc(), "too few operands");
4068 return MatchOperand_ParseFail;
4070 Parser.Lex(); // Eat hash token.
4071 if (Parser.getTok().isNot(AsmToken::Hash) &&
4072 Parser.getTok().isNot(AsmToken::Dollar)) {
4073 Error(Parser.getTok().getLoc(), "'#' expected");
4074 return MatchOperand_ParseFail;
4076 Parser.Lex(); // Eat hash token.
4078 const MCExpr *WidthExpr;
4080 if (getParser().parseExpression(WidthExpr, EndLoc)) {
4081 Error(E, "malformed immediate expression");
4082 return MatchOperand_ParseFail;
4084 CE = dyn_cast<MCConstantExpr>(WidthExpr);
4086 Error(E, "'width' operand must be an immediate");
4087 return MatchOperand_ParseFail;
4090 int64_t Width = CE->getValue();
4091 // The LSB must be in the range [1,32-lsb]
4092 if (Width < 1 || Width > 32 - LSB) {
4093 Error(E, "'width' operand must be in the range [1,32-lsb]");
4094 return MatchOperand_ParseFail;
4097 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
4099 return MatchOperand_Success;
4102 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4103 parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4104 // Check for a post-index addressing register operand. Specifically:
4105 // postidx_reg := '+' register {, shift}
4106 // | '-' register {, shift}
4107 // | register {, shift}
4109 // This method must return MatchOperand_NoMatch without consuming any tokens
4110 // in the case where there is no match, as other alternatives take other
4112 AsmToken Tok = Parser.getTok();
4113 SMLoc S = Tok.getLoc();
4114 bool haveEaten = false;
4116 if (Tok.is(AsmToken::Plus)) {
4117 Parser.Lex(); // Eat the '+' token.
4119 } else if (Tok.is(AsmToken::Minus)) {
4120 Parser.Lex(); // Eat the '-' token.
4125 SMLoc E = Parser.getTok().getEndLoc();
4126 int Reg = tryParseRegister();
4129 return MatchOperand_NoMatch;
4130 Error(Parser.getTok().getLoc(), "register expected");
4131 return MatchOperand_ParseFail;
4134 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
4135 unsigned ShiftImm = 0;
4136 if (Parser.getTok().is(AsmToken::Comma)) {
4137 Parser.Lex(); // Eat the ','.
4138 if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
4139 return MatchOperand_ParseFail;
4141 // FIXME: Only approximates end...may include intervening whitespace.
4142 E = Parser.getTok().getLoc();
4145 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
4148 return MatchOperand_Success;
4151 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4152 parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4153 // Check for a post-index addressing register operand. Specifically:
4154 // am3offset := '+' register
4161 // This method must return MatchOperand_NoMatch without consuming any tokens
4162 // in the case where there is no match, as other alternatives take other
4164 AsmToken Tok = Parser.getTok();
4165 SMLoc S = Tok.getLoc();
4167 // Do immediates first, as we always parse those if we have a '#'.
4168 if (Parser.getTok().is(AsmToken::Hash) ||
4169 Parser.getTok().is(AsmToken::Dollar)) {
4170 Parser.Lex(); // Eat '#' or '$'.
4171 // Explicitly look for a '-', as we need to encode negative zero
4173 bool isNegative = Parser.getTok().is(AsmToken::Minus);
4174 const MCExpr *Offset;
4176 if (getParser().parseExpression(Offset, E))
4177 return MatchOperand_ParseFail;
4178 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4180 Error(S, "constant expression expected");
4181 return MatchOperand_ParseFail;
4183 // Negative zero is encoded as the flag value INT32_MIN.
4184 int32_t Val = CE->getValue();
4185 if (isNegative && Val == 0)
4189 ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
4191 return MatchOperand_Success;
4195 bool haveEaten = false;
4197 if (Tok.is(AsmToken::Plus)) {
4198 Parser.Lex(); // Eat the '+' token.
4200 } else if (Tok.is(AsmToken::Minus)) {
4201 Parser.Lex(); // Eat the '-' token.
4206 Tok = Parser.getTok();
4207 int Reg = tryParseRegister();
4210 return MatchOperand_NoMatch;
4211 Error(Tok.getLoc(), "register expected");
4212 return MatchOperand_ParseFail;
4215 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
4216 0, S, Tok.getEndLoc()));
4218 return MatchOperand_Success;
4221 /// Convert parsed operands to MCInst. Needed here because this instruction
4222 /// only has two register operands, but multiplication is commutative so
4223 /// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
4225 cvtThumbMultiply(MCInst &Inst,
4226 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4227 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4228 ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
4229 // If we have a three-operand form, make sure to set Rn to be the operand
4230 // that isn't the same as Rd.
4232 if (Operands.size() == 6 &&
4233 ((ARMOperand*)Operands[4])->getReg() ==
4234 ((ARMOperand*)Operands[3])->getReg())
4236 ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
4237 Inst.addOperand(Inst.getOperand(0));
4238 ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
4242 cvtThumbBranches(MCInst &Inst,
4243 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4244 int CondOp = -1, ImmOp = -1;
4245 switch(Inst.getOpcode()) {
4247 case ARM::tBcc: CondOp = 1; ImmOp = 2; break;
4250 case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break;
4252 default: llvm_unreachable("Unexpected instruction in cvtThumbBranches");
4254 // first decide whether or not the branch should be conditional
4255 // by looking at it's location relative to an IT block
4257 // inside an IT block we cannot have any conditional branches. any
4258 // such instructions needs to be converted to unconditional form
4259 switch(Inst.getOpcode()) {
4260 case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
4261 case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
4264 // outside IT blocks we can only have unconditional branches with AL
4265 // condition code or conditional branches with non-AL condition code
4266 unsigned Cond = static_cast<ARMOperand*>(Operands[CondOp])->getCondCode();
4267 switch(Inst.getOpcode()) {
4270 Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
4274 Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
4279 // now decide on encoding size based on branch target range
4280 switch(Inst.getOpcode()) {
4281 // classify tB as either t2B or t1B based on range of immediate operand
4283 ARMOperand* op = static_cast<ARMOperand*>(Operands[ImmOp]);
4284 if(!op->isSignedOffset<11, 1>() && isThumbTwo())
4285 Inst.setOpcode(ARM::t2B);
4288 // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
4290 ARMOperand* op = static_cast<ARMOperand*>(Operands[ImmOp]);
4291 if(!op->isSignedOffset<8, 1>() && isThumbTwo())
4292 Inst.setOpcode(ARM::t2Bcc);
4296 ((ARMOperand*)Operands[ImmOp])->addImmOperands(Inst, 1);
4297 ((ARMOperand*)Operands[CondOp])->addCondCodeOperands(Inst, 2);
4300 /// Parse an ARM memory expression, return false if successful else return true
4301 /// or an error. The first token must be a '[' when called.
4303 parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4305 assert(Parser.getTok().is(AsmToken::LBrac) &&
4306 "Token is not a Left Bracket");
4307 S = Parser.getTok().getLoc();
4308 Parser.Lex(); // Eat left bracket token.
4310 const AsmToken &BaseRegTok = Parser.getTok();
4311 int BaseRegNum = tryParseRegister();
4312 if (BaseRegNum == -1)
4313 return Error(BaseRegTok.getLoc(), "register expected");
4315 // The next token must either be a comma, a colon or a closing bracket.
4316 const AsmToken &Tok = Parser.getTok();
4317 if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
4318 !Tok.is(AsmToken::RBrac))
4319 return Error(Tok.getLoc(), "malformed memory operand");
4321 if (Tok.is(AsmToken::RBrac)) {
4322 E = Tok.getEndLoc();
4323 Parser.Lex(); // Eat right bracket token.
4325 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4326 0, 0, false, S, E));
4328 // If there's a pre-indexing writeback marker, '!', just add it as a token
4329 // operand. It's rather odd, but syntactically valid.
4330 if (Parser.getTok().is(AsmToken::Exclaim)) {
4331 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4332 Parser.Lex(); // Eat the '!'.
4338 assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
4339 "Lost colon or comma in memory operand?!");
4340 if (Tok.is(AsmToken::Comma)) {
4341 Parser.Lex(); // Eat the comma.
4344 // If we have a ':', it's an alignment specifier.
4345 if (Parser.getTok().is(AsmToken::Colon)) {
4346 Parser.Lex(); // Eat the ':'.
4347 E = Parser.getTok().getLoc();
4350 if (getParser().parseExpression(Expr))
4353 // The expression has to be a constant. Memory references with relocations
4354 // don't come through here, as they use the <label> forms of the relevant
4356 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4358 return Error (E, "constant expression expected");
4361 switch (CE->getValue()) {
4364 "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4365 case 16: Align = 2; break;
4366 case 32: Align = 4; break;
4367 case 64: Align = 8; break;
4368 case 128: Align = 16; break;
4369 case 256: Align = 32; break;
4372 // Now we should have the closing ']'
4373 if (Parser.getTok().isNot(AsmToken::RBrac))
4374 return Error(Parser.getTok().getLoc(), "']' expected");
4375 E = Parser.getTok().getEndLoc();
4376 Parser.Lex(); // Eat right bracket token.
4378 // Don't worry about range checking the value here. That's handled by
4379 // the is*() predicates.
4380 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4381 ARM_AM::no_shift, 0, Align,
4384 // If there's a pre-indexing writeback marker, '!', just add it as a token
4386 if (Parser.getTok().is(AsmToken::Exclaim)) {
4387 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4388 Parser.Lex(); // Eat the '!'.
4394 // If we have a '#', it's an immediate offset, else assume it's a register
4395 // offset. Be friendly and also accept a plain integer (without a leading
4396 // hash) for gas compatibility.
4397 if (Parser.getTok().is(AsmToken::Hash) ||
4398 Parser.getTok().is(AsmToken::Dollar) ||
4399 Parser.getTok().is(AsmToken::Integer)) {
4400 if (Parser.getTok().isNot(AsmToken::Integer))
4401 Parser.Lex(); // Eat '#' or '$'.
4402 E = Parser.getTok().getLoc();
4404 bool isNegative = getParser().getTok().is(AsmToken::Minus);
4405 const MCExpr *Offset;
4406 if (getParser().parseExpression(Offset))
4409 // The expression has to be a constant. Memory references with relocations
4410 // don't come through here, as they use the <label> forms of the relevant
4412 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4414 return Error (E, "constant expression expected");
4416 // If the constant was #-0, represent it as INT32_MIN.
4417 int32_t Val = CE->getValue();
4418 if (isNegative && Val == 0)
4419 CE = MCConstantExpr::Create(INT32_MIN, getContext());
4421 // Now we should have the closing ']'
4422 if (Parser.getTok().isNot(AsmToken::RBrac))
4423 return Error(Parser.getTok().getLoc(), "']' expected");
4424 E = Parser.getTok().getEndLoc();
4425 Parser.Lex(); // Eat right bracket token.
4427 // Don't worry about range checking the value here. That's handled by
4428 // the is*() predicates.
4429 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4430 ARM_AM::no_shift, 0, 0,
4433 // If there's a pre-indexing writeback marker, '!', just add it as a token
4435 if (Parser.getTok().is(AsmToken::Exclaim)) {
4436 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4437 Parser.Lex(); // Eat the '!'.
4443 // The register offset is optionally preceded by a '+' or '-'
4444 bool isNegative = false;
4445 if (Parser.getTok().is(AsmToken::Minus)) {
4447 Parser.Lex(); // Eat the '-'.
4448 } else if (Parser.getTok().is(AsmToken::Plus)) {
4450 Parser.Lex(); // Eat the '+'.
4453 E = Parser.getTok().getLoc();
4454 int OffsetRegNum = tryParseRegister();
4455 if (OffsetRegNum == -1)
4456 return Error(E, "register expected");
4458 // If there's a shift operator, handle it.
4459 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4460 unsigned ShiftImm = 0;
4461 if (Parser.getTok().is(AsmToken::Comma)) {
4462 Parser.Lex(); // Eat the ','.
4463 if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4467 // Now we should have the closing ']'
4468 if (Parser.getTok().isNot(AsmToken::RBrac))
4469 return Error(Parser.getTok().getLoc(), "']' expected");
4470 E = Parser.getTok().getEndLoc();
4471 Parser.Lex(); // Eat right bracket token.
4473 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4474 ShiftType, ShiftImm, 0, isNegative,
4477 // If there's a pre-indexing writeback marker, '!', just add it as a token
4479 if (Parser.getTok().is(AsmToken::Exclaim)) {
4480 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4481 Parser.Lex(); // Eat the '!'.
4487 /// parseMemRegOffsetShift - one of these two:
4488 /// ( lsl | lsr | asr | ror ) , # shift_amount
4490 /// return true if it parses a shift otherwise it returns false.
4491 bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4493 SMLoc Loc = Parser.getTok().getLoc();
4494 const AsmToken &Tok = Parser.getTok();
4495 if (Tok.isNot(AsmToken::Identifier))
4497 StringRef ShiftName = Tok.getString();
4498 if (ShiftName == "lsl" || ShiftName == "LSL" ||
4499 ShiftName == "asl" || ShiftName == "ASL")
4501 else if (ShiftName == "lsr" || ShiftName == "LSR")
4503 else if (ShiftName == "asr" || ShiftName == "ASR")
4505 else if (ShiftName == "ror" || ShiftName == "ROR")
4507 else if (ShiftName == "rrx" || ShiftName == "RRX")
4510 return Error(Loc, "illegal shift operator");
4511 Parser.Lex(); // Eat shift type token.
4513 // rrx stands alone.
4515 if (St != ARM_AM::rrx) {
4516 Loc = Parser.getTok().getLoc();
4517 // A '#' and a shift amount.
4518 const AsmToken &HashTok = Parser.getTok();
4519 if (HashTok.isNot(AsmToken::Hash) &&
4520 HashTok.isNot(AsmToken::Dollar))
4521 return Error(HashTok.getLoc(), "'#' expected");
4522 Parser.Lex(); // Eat hash token.
4525 if (getParser().parseExpression(Expr))
4527 // Range check the immediate.
4528 // lsl, ror: 0 <= imm <= 31
4529 // lsr, asr: 0 <= imm <= 32
4530 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4532 return Error(Loc, "shift amount must be an immediate");
4533 int64_t Imm = CE->getValue();
4535 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4536 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4537 return Error(Loc, "immediate shift value out of range");
4538 // If <ShiftTy> #0, turn it into a no_shift.
4541 // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
4550 /// parseFPImm - A floating point immediate expression operand.
4551 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4552 parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4553 // Anything that can accept a floating point constant as an operand
4554 // needs to go through here, as the regular parseExpression is
4557 // This routine still creates a generic Immediate operand, containing
4558 // a bitcast of the 64-bit floating point value. The various operands
4559 // that accept floats can check whether the value is valid for them
4560 // via the standard is*() predicates.
4562 SMLoc S = Parser.getTok().getLoc();
4564 if (Parser.getTok().isNot(AsmToken::Hash) &&
4565 Parser.getTok().isNot(AsmToken::Dollar))
4566 return MatchOperand_NoMatch;
4568 // Disambiguate the VMOV forms that can accept an FP immediate.
4569 // vmov.f32 <sreg>, #imm
4570 // vmov.f64 <dreg>, #imm
4571 // vmov.f32 <dreg>, #imm @ vector f32x2
4572 // vmov.f32 <qreg>, #imm @ vector f32x4
4574 // There are also the NEON VMOV instructions which expect an
4575 // integer constant. Make sure we don't try to parse an FPImm
4577 // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4578 ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4579 bool isVmovf = TyOp->isToken() && (TyOp->getToken() == ".f32" ||
4580 TyOp->getToken() == ".f64");
4581 ARMOperand *Mnemonic = static_cast<ARMOperand*>(Operands[0]);
4582 bool isFconst = Mnemonic->isToken() && (Mnemonic->getToken() == "fconstd" ||
4583 Mnemonic->getToken() == "fconsts");
4584 if (!(isVmovf || isFconst))
4585 return MatchOperand_NoMatch;
4587 Parser.Lex(); // Eat '#' or '$'.
4589 // Handle negation, as that still comes through as a separate token.
4590 bool isNegative = false;
4591 if (Parser.getTok().is(AsmToken::Minus)) {
4595 const AsmToken &Tok = Parser.getTok();
4596 SMLoc Loc = Tok.getLoc();
4597 if (Tok.is(AsmToken::Real) && isVmovf) {
4598 APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
4599 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4600 // If we had a '-' in front, toggle the sign bit.
4601 IntVal ^= (uint64_t)isNegative << 31;
4602 Parser.Lex(); // Eat the token.
4603 Operands.push_back(ARMOperand::CreateImm(
4604 MCConstantExpr::Create(IntVal, getContext()),
4605 S, Parser.getTok().getLoc()));
4606 return MatchOperand_Success;
4608 // Also handle plain integers. Instructions which allow floating point
4609 // immediates also allow a raw encoded 8-bit value.
4610 if (Tok.is(AsmToken::Integer) && isFconst) {
4611 int64_t Val = Tok.getIntVal();
4612 Parser.Lex(); // Eat the token.
4613 if (Val > 255 || Val < 0) {
4614 Error(Loc, "encoded floating point value out of range");
4615 return MatchOperand_ParseFail;
4617 float RealVal = ARM_AM::getFPImmFloat(Val);
4618 Val = APFloat(RealVal).bitcastToAPInt().getZExtValue();
4620 Operands.push_back(ARMOperand::CreateImm(
4621 MCConstantExpr::Create(Val, getContext()), S,
4622 Parser.getTok().getLoc()));
4623 return MatchOperand_Success;
4626 Error(Loc, "invalid floating point immediate");
4627 return MatchOperand_ParseFail;
4630 /// Parse a arm instruction operand. For now this parses the operand regardless
4631 /// of the mnemonic.
4632 bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4633 StringRef Mnemonic) {
4636 // Check if the current operand has a custom associated parser, if so, try to
4637 // custom parse the operand, or fallback to the general approach.
4638 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4639 if (ResTy == MatchOperand_Success)
4641 // If there wasn't a custom match, try the generic matcher below. Otherwise,
4642 // there was a match, but an error occurred, in which case, just return that
4643 // the operand parsing failed.
4644 if (ResTy == MatchOperand_ParseFail)
4647 switch (getLexer().getKind()) {
4649 Error(Parser.getTok().getLoc(), "unexpected token in operand");
4651 case AsmToken::Identifier: {
4652 // If we've seen a branch mnemonic, the next operand must be a label. This
4653 // is true even if the label is a register name. So "br r1" means branch to
4655 bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl";
4657 if (!tryParseRegisterWithWriteBack(Operands))
4659 int Res = tryParseShiftRegister(Operands);
4660 if (Res == 0) // success
4662 else if (Res == -1) // irrecoverable error
4664 // If this is VMRS, check for the apsr_nzcv operand.
4665 if (Mnemonic == "vmrs" &&
4666 Parser.getTok().getString().equals_lower("apsr_nzcv")) {
4667 S = Parser.getTok().getLoc();
4669 Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
4674 // Fall though for the Identifier case that is not a register or a
4677 case AsmToken::LParen: // parenthesized expressions like (_strcmp-4)
4678 case AsmToken::Integer: // things like 1f and 2b as a branch targets
4679 case AsmToken::String: // quoted label names.
4680 case AsmToken::Dot: { // . as a branch target
4681 // This was not a register so parse other operands that start with an
4682 // identifier (like labels) as expressions and create them as immediates.
4683 const MCExpr *IdVal;
4684 S = Parser.getTok().getLoc();
4685 if (getParser().parseExpression(IdVal))
4687 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4688 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4691 case AsmToken::LBrac:
4692 return parseMemory(Operands);
4693 case AsmToken::LCurly:
4694 return parseRegisterList(Operands);
4695 case AsmToken::Dollar:
4696 case AsmToken::Hash: {
4697 // #42 -> immediate.
4698 S = Parser.getTok().getLoc();
4701 if (Parser.getTok().isNot(AsmToken::Colon)) {
4702 bool isNegative = Parser.getTok().is(AsmToken::Minus);
4703 const MCExpr *ImmVal;
4704 if (getParser().parseExpression(ImmVal))
4706 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4708 int32_t Val = CE->getValue();
4709 if (isNegative && Val == 0)
4710 ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4712 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4713 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4715 // There can be a trailing '!' on operands that we want as a separate
4716 // '!' Token operand. Handle that here. For example, the compatibility
4717 // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
4718 if (Parser.getTok().is(AsmToken::Exclaim)) {
4719 Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(),
4720 Parser.getTok().getLoc()));
4721 Parser.Lex(); // Eat exclaim token
4725 // w/ a ':' after the '#', it's just like a plain ':'.
4728 case AsmToken::Colon: {
4729 // ":lower16:" and ":upper16:" expression prefixes
4730 // FIXME: Check it's an expression prefix,
4731 // e.g. (FOO - :lower16:BAR) isn't legal.
4732 ARMMCExpr::VariantKind RefKind;
4733 if (parsePrefix(RefKind))
4736 const MCExpr *SubExprVal;
4737 if (getParser().parseExpression(SubExprVal))
4740 const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4742 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4743 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4746 case AsmToken::Equal: {
4747 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
4748 return Error(Parser.getTok().getLoc(), "unexpected token in operand");
4750 Parser.Lex(); // Eat '='
4751 const MCExpr *SubExprVal;
4752 if (getParser().parseExpression(SubExprVal))
4754 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4756 const MCExpr *CPLoc = getTargetStreamer().addConstantPoolEntry(SubExprVal);
4757 Operands.push_back(ARMOperand::CreateImm(CPLoc, S, E));
4763 // parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4764 // :lower16: and :upper16:.
4765 bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4766 RefKind = ARMMCExpr::VK_ARM_None;
4768 // consume an optional '#' (GNU compatibility)
4769 if (getLexer().is(AsmToken::Hash))
4772 // :lower16: and :upper16: modifiers
4773 assert(getLexer().is(AsmToken::Colon) && "expected a :");
4774 Parser.Lex(); // Eat ':'
4776 if (getLexer().isNot(AsmToken::Identifier)) {
4777 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4781 StringRef IDVal = Parser.getTok().getIdentifier();
4782 if (IDVal == "lower16") {
4783 RefKind = ARMMCExpr::VK_ARM_LO16;
4784 } else if (IDVal == "upper16") {
4785 RefKind = ARMMCExpr::VK_ARM_HI16;
4787 Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4792 if (getLexer().isNot(AsmToken::Colon)) {
4793 Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4796 Parser.Lex(); // Eat the last ':'
4800 /// \brief Given a mnemonic, split out possible predication code and carry
4801 /// setting letters to form a canonical mnemonic and flags.
4803 // FIXME: Would be nice to autogen this.
4804 // FIXME: This is a bit of a maze of special cases.
4805 StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4806 unsigned &PredicationCode,
4808 unsigned &ProcessorIMod,
4809 StringRef &ITMask) {
4810 PredicationCode = ARMCC::AL;
4811 CarrySetting = false;
4814 // Ignore some mnemonics we know aren't predicated forms.
4816 // FIXME: Would be nice to autogen this.
4817 if ((Mnemonic == "movs" && isThumb()) ||
4818 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" ||
4819 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" ||
4820 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" ||
4821 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" ||
4822 Mnemonic == "vaclt" || Mnemonic == "vacle" || Mnemonic == "hlt" ||
4823 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" ||
4824 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" ||
4825 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4826 Mnemonic == "fmuls" || Mnemonic == "vmaxnm" || Mnemonic == "vminnm" ||
4827 Mnemonic == "vcvta" || Mnemonic == "vcvtn" || Mnemonic == "vcvtp" ||
4828 Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" ||
4829 Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic.startswith("vsel"))
4832 // First, split out any predication code. Ignore mnemonics we know aren't
4833 // predicated but do have a carry-set and so weren't caught above.
4834 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4835 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4836 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4837 Mnemonic != "sbcs" && Mnemonic != "rscs") {
4838 unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4839 .Case("eq", ARMCC::EQ)
4840 .Case("ne", ARMCC::NE)
4841 .Case("hs", ARMCC::HS)
4842 .Case("cs", ARMCC::HS)
4843 .Case("lo", ARMCC::LO)
4844 .Case("cc", ARMCC::LO)
4845 .Case("mi", ARMCC::MI)
4846 .Case("pl", ARMCC::PL)
4847 .Case("vs", ARMCC::VS)
4848 .Case("vc", ARMCC::VC)
4849 .Case("hi", ARMCC::HI)
4850 .Case("ls", ARMCC::LS)
4851 .Case("ge", ARMCC::GE)
4852 .Case("lt", ARMCC::LT)
4853 .Case("gt", ARMCC::GT)
4854 .Case("le", ARMCC::LE)
4855 .Case("al", ARMCC::AL)
4858 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4859 PredicationCode = CC;
4863 // Next, determine if we have a carry setting bit. We explicitly ignore all
4864 // the instructions we know end in 's'.
4865 if (Mnemonic.endswith("s") &&
4866 !(Mnemonic == "cps" || Mnemonic == "mls" ||
4867 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4868 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4869 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4870 Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4871 Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4872 Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4873 Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
4874 Mnemonic == "vfms" || Mnemonic == "vfnms" || Mnemonic == "fconsts" ||
4875 (Mnemonic == "movs" && isThumb()))) {
4876 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4877 CarrySetting = true;
4880 // The "cps" instruction can have a interrupt mode operand which is glued into
4881 // the mnemonic. Check if this is the case, split it and parse the imod op
4882 if (Mnemonic.startswith("cps")) {
4883 // Split out any imod code.
4885 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4886 .Case("ie", ARM_PROC::IE)
4887 .Case("id", ARM_PROC::ID)
4890 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4891 ProcessorIMod = IMod;
4895 // The "it" instruction has the condition mask on the end of the mnemonic.
4896 if (Mnemonic.startswith("it")) {
4897 ITMask = Mnemonic.slice(2, Mnemonic.size());
4898 Mnemonic = Mnemonic.slice(0, 2);
4904 /// \brief Given a canonical mnemonic, determine if the instruction ever allows
4905 /// inclusion of carry set or predication code operands.
4907 // FIXME: It would be nice to autogen this.
4909 getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
4910 bool &CanAcceptCarrySet, bool &CanAcceptPredicationCode) {
4911 if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4912 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4913 Mnemonic == "add" || Mnemonic == "adc" ||
4914 Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4915 Mnemonic == "orr" || Mnemonic == "mvn" ||
4916 Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4917 Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4918 Mnemonic == "vfm" || Mnemonic == "vfnm" ||
4919 (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4920 Mnemonic == "mla" || Mnemonic == "smlal" ||
4921 Mnemonic == "umlal" || Mnemonic == "umull"))) {
4922 CanAcceptCarrySet = true;
4924 CanAcceptCarrySet = false;
4926 if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" ||
4927 Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" ||
4928 Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic.startswith("crc32") ||
4929 Mnemonic.startswith("cps") || Mnemonic.startswith("vsel") ||
4930 Mnemonic == "vmaxnm" || Mnemonic == "vminnm" || Mnemonic == "vcvta" ||
4931 Mnemonic == "vcvtn" || Mnemonic == "vcvtp" || Mnemonic == "vcvtm" ||
4932 Mnemonic == "vrinta" || Mnemonic == "vrintn" || Mnemonic == "vrintp" ||
4933 Mnemonic == "vrintm" || Mnemonic.startswith("aes") ||
4934 Mnemonic.startswith("sha1") || Mnemonic.startswith("sha256") ||
4935 (FullInst.startswith("vmull") && FullInst.endswith(".p64"))) {
4936 // These mnemonics are never predicable
4937 CanAcceptPredicationCode = false;
4938 } else if (!isThumb()) {
4939 // Some instructions are only predicable in Thumb mode
4940 CanAcceptPredicationCode
4941 = Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" &&
4942 Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" &&
4943 Mnemonic != "dmb" && Mnemonic != "dsb" && Mnemonic != "isb" &&
4944 Mnemonic != "pld" && Mnemonic != "pli" && Mnemonic != "pldw" &&
4945 Mnemonic != "ldc2" && Mnemonic != "ldc2l" &&
4946 Mnemonic != "stc2" && Mnemonic != "stc2l" &&
4947 !Mnemonic.startswith("rfe") && !Mnemonic.startswith("srs");
4948 } else if (isThumbOne()) {
4950 CanAcceptPredicationCode = Mnemonic != "movs";
4952 CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs";
4954 CanAcceptPredicationCode = true;
4957 bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4958 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4959 // FIXME: This is all horribly hacky. We really need a better way to deal
4960 // with optional operands like this in the matcher table.
4962 // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4963 // another does not. Specifically, the MOVW instruction does not. So we
4964 // special case it here and remove the defaulted (non-setting) cc_out
4965 // operand if that's the instruction we're trying to match.
4967 // We do this as post-processing of the explicit operands rather than just
4968 // conditionally adding the cc_out in the first place because we need
4969 // to check the type of the parsed immediate operand.
4970 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4971 !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4972 static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4973 static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4976 // Register-register 'add' for thumb does not have a cc_out operand
4977 // when there are only two register operands.
4978 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4979 static_cast<ARMOperand*>(Operands[3])->isReg() &&
4980 static_cast<ARMOperand*>(Operands[4])->isReg() &&
4981 static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4983 // Register-register 'add' for thumb does not have a cc_out operand
4984 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4985 // have to check the immediate range here since Thumb2 has a variant
4986 // that can handle a different range and has a cc_out operand.
4987 if (((isThumb() && Mnemonic == "add") ||
4988 (isThumbTwo() && Mnemonic == "sub")) &&
4989 Operands.size() == 6 &&
4990 static_cast<ARMOperand*>(Operands[3])->isReg() &&
4991 static_cast<ARMOperand*>(Operands[4])->isReg() &&
4992 static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4993 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4994 ((Mnemonic == "add" &&static_cast<ARMOperand*>(Operands[5])->isReg()) ||
4995 static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4997 // For Thumb2, add/sub immediate does not have a cc_out operand for the
4998 // imm0_4095 variant. That's the least-preferred variant when
4999 // selecting via the generic "add" mnemonic, so to know that we
5000 // should remove the cc_out operand, we have to explicitly check that
5001 // it's not one of the other variants. Ugh.
5002 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
5003 Operands.size() == 6 &&
5004 static_cast<ARMOperand*>(Operands[3])->isReg() &&
5005 static_cast<ARMOperand*>(Operands[4])->isReg() &&
5006 static_cast<ARMOperand*>(Operands[5])->isImm()) {
5007 // Nest conditions rather than one big 'if' statement for readability.
5009 // If both registers are low, we're in an IT block, and the immediate is
5010 // in range, we should use encoding T1 instead, which has a cc_out.
5012 isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
5013 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
5014 static_cast<ARMOperand*>(Operands[5])->isImm0_7())
5016 // Check against T3. If the second register is the PC, this is an
5017 // alternate form of ADR, which uses encoding T4, so check for that too.
5018 if (static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC &&
5019 static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
5022 // Otherwise, we use encoding T4, which does not have a cc_out
5027 // The thumb2 multiply instruction doesn't have a CCOut register, so
5028 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
5029 // use the 16-bit encoding or not.
5030 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
5031 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
5032 static_cast<ARMOperand*>(Operands[3])->isReg() &&
5033 static_cast<ARMOperand*>(Operands[4])->isReg() &&
5034 static_cast<ARMOperand*>(Operands[5])->isReg() &&
5035 // If the registers aren't low regs, the destination reg isn't the
5036 // same as one of the source regs, or the cc_out operand is zero
5037 // outside of an IT block, we have to use the 32-bit encoding, so
5038 // remove the cc_out operand.
5039 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
5040 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
5041 !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
5043 (static_cast<ARMOperand*>(Operands[3])->getReg() !=
5044 static_cast<ARMOperand*>(Operands[5])->getReg() &&
5045 static_cast<ARMOperand*>(Operands[3])->getReg() !=
5046 static_cast<ARMOperand*>(Operands[4])->getReg())))
5049 // Also check the 'mul' syntax variant that doesn't specify an explicit
5050 // destination register.
5051 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
5052 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
5053 static_cast<ARMOperand*>(Operands[3])->isReg() &&
5054 static_cast<ARMOperand*>(Operands[4])->isReg() &&
5055 // If the registers aren't low regs or the cc_out operand is zero
5056 // outside of an IT block, we have to use the 32-bit encoding, so
5057 // remove the cc_out operand.
5058 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
5059 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
5065 // Register-register 'add/sub' for thumb does not have a cc_out operand
5066 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
5067 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
5068 // right, this will result in better diagnostics (which operand is off)
5070 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
5071 (Operands.size() == 5 || Operands.size() == 6) &&
5072 static_cast<ARMOperand*>(Operands[3])->isReg() &&
5073 static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
5074 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
5075 (static_cast<ARMOperand*>(Operands[4])->isImm() ||
5076 (Operands.size() == 6 &&
5077 static_cast<ARMOperand*>(Operands[5])->isImm())))
5083 bool ARMAsmParser::shouldOmitPredicateOperand(
5084 StringRef Mnemonic, SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
5085 // VRINT{Z, R, X} have a predicate operand in VFP, but not in NEON
5086 unsigned RegIdx = 3;
5087 if ((Mnemonic == "vrintz" || Mnemonic == "vrintx" || Mnemonic == "vrintr") &&
5088 static_cast<ARMOperand *>(Operands[2])->getToken() == ".f32") {
5089 if (static_cast<ARMOperand *>(Operands[3])->isToken() &&
5090 static_cast<ARMOperand *>(Operands[3])->getToken() == ".f32")
5093 if (static_cast<ARMOperand *>(Operands[RegIdx])->isReg() &&
5094 (ARMMCRegisterClasses[ARM::DPRRegClassID]
5095 .contains(static_cast<ARMOperand *>(Operands[RegIdx])->getReg()) ||
5096 ARMMCRegisterClasses[ARM::QPRRegClassID]
5097 .contains(static_cast<ARMOperand *>(Operands[RegIdx])->getReg())))
5103 static bool isDataTypeToken(StringRef Tok) {
5104 return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
5105 Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
5106 Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
5107 Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
5108 Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
5109 Tok == ".f" || Tok == ".d";
5112 // FIXME: This bit should probably be handled via an explicit match class
5113 // in the .td files that matches the suffix instead of having it be
5114 // a literal string token the way it is now.
5115 static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
5116 return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
5118 static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features,
5119 unsigned VariantID);
5121 static bool RequiresVFPRegListValidation(StringRef Inst,
5122 bool &AcceptSinglePrecisionOnly,
5123 bool &AcceptDoublePrecisionOnly) {
5124 if (Inst.size() < 7)
5127 if (Inst.startswith("fldm") || Inst.startswith("fstm")) {
5128 StringRef AddressingMode = Inst.substr(4, 2);
5129 if (AddressingMode == "ia" || AddressingMode == "db" ||
5130 AddressingMode == "ea" || AddressingMode == "fd") {
5131 AcceptSinglePrecisionOnly = Inst[6] == 's';
5132 AcceptDoublePrecisionOnly = Inst[6] == 'd' || Inst[6] == 'x';
5140 /// Parse an arm instruction mnemonic followed by its operands.
5141 bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
5143 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5144 // FIXME: Can this be done via tablegen in some fashion?
5145 bool RequireVFPRegisterListCheck;
5146 bool AcceptSinglePrecisionOnly;
5147 bool AcceptDoublePrecisionOnly;
5148 RequireVFPRegisterListCheck =
5149 RequiresVFPRegListValidation(Name, AcceptSinglePrecisionOnly,
5150 AcceptDoublePrecisionOnly);
5152 // Apply mnemonic aliases before doing anything else, as the destination
5153 // mnemonic may include suffices and we want to handle them normally.
5154 // The generic tblgen'erated code does this later, at the start of
5155 // MatchInstructionImpl(), but that's too late for aliases that include
5156 // any sort of suffix.
5157 unsigned AvailableFeatures = getAvailableFeatures();
5158 unsigned AssemblerDialect = getParser().getAssemblerDialect();
5159 applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect);
5161 // First check for the ARM-specific .req directive.
5162 if (Parser.getTok().is(AsmToken::Identifier) &&
5163 Parser.getTok().getIdentifier() == ".req") {
5164 parseDirectiveReq(Name, NameLoc);
5165 // We always return 'error' for this, as we're done with this
5166 // statement and don't need to match the 'instruction."
5170 // Create the leading tokens for the mnemonic, split by '.' characters.
5171 size_t Start = 0, Next = Name.find('.');
5172 StringRef Mnemonic = Name.slice(Start, Next);
5174 // Split out the predication code and carry setting flag from the mnemonic.
5175 unsigned PredicationCode;
5176 unsigned ProcessorIMod;
5179 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
5180 ProcessorIMod, ITMask);
5182 // In Thumb1, only the branch (B) instruction can be predicated.
5183 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
5184 Parser.eatToEndOfStatement();
5185 return Error(NameLoc, "conditional execution not supported in Thumb1");
5188 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
5190 // Handle the IT instruction ITMask. Convert it to a bitmask. This
5191 // is the mask as it will be for the IT encoding if the conditional
5192 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
5193 // where the conditional bit0 is zero, the instruction post-processing
5194 // will adjust the mask accordingly.
5195 if (Mnemonic == "it") {
5196 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
5197 if (ITMask.size() > 3) {
5198 Parser.eatToEndOfStatement();
5199 return Error(Loc, "too many conditions on IT instruction");
5202 for (unsigned i = ITMask.size(); i != 0; --i) {
5203 char pos = ITMask[i - 1];
5204 if (pos != 't' && pos != 'e') {
5205 Parser.eatToEndOfStatement();
5206 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
5209 if (ITMask[i - 1] == 't')
5212 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
5215 // FIXME: This is all a pretty gross hack. We should automatically handle
5216 // optional operands like this via tblgen.
5218 // Next, add the CCOut and ConditionCode operands, if needed.
5220 // For mnemonics which can ever incorporate a carry setting bit or predication
5221 // code, our matching model involves us always generating CCOut and
5222 // ConditionCode operands to match the mnemonic "as written" and then we let
5223 // the matcher deal with finding the right instruction or generating an
5224 // appropriate error.
5225 bool CanAcceptCarrySet, CanAcceptPredicationCode;
5226 getMnemonicAcceptInfo(Mnemonic, Name, CanAcceptCarrySet, CanAcceptPredicationCode);
5228 // If we had a carry-set on an instruction that can't do that, issue an
5230 if (!CanAcceptCarrySet && CarrySetting) {
5231 Parser.eatToEndOfStatement();
5232 return Error(NameLoc, "instruction '" + Mnemonic +
5233 "' can not set flags, but 's' suffix specified");
5235 // If we had a predication code on an instruction that can't do that, issue an
5237 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
5238 Parser.eatToEndOfStatement();
5239 return Error(NameLoc, "instruction '" + Mnemonic +
5240 "' is not predicable, but condition code specified");
5243 // Add the carry setting operand, if necessary.
5244 if (CanAcceptCarrySet) {
5245 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
5246 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
5250 // Add the predication code operand, if necessary.
5251 if (CanAcceptPredicationCode) {
5252 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
5254 Operands.push_back(ARMOperand::CreateCondCode(
5255 ARMCC::CondCodes(PredicationCode), Loc));
5258 // Add the processor imod operand, if necessary.
5259 if (ProcessorIMod) {
5260 Operands.push_back(ARMOperand::CreateImm(
5261 MCConstantExpr::Create(ProcessorIMod, getContext()),
5265 // Add the remaining tokens in the mnemonic.
5266 while (Next != StringRef::npos) {
5268 Next = Name.find('.', Start + 1);
5269 StringRef ExtraToken = Name.slice(Start, Next);
5271 // Some NEON instructions have an optional datatype suffix that is
5272 // completely ignored. Check for that.
5273 if (isDataTypeToken(ExtraToken) &&
5274 doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
5277 // For for ARM mode generate an error if the .n qualifier is used.
5278 if (ExtraToken == ".n" && !isThumb()) {
5279 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
5280 Parser.eatToEndOfStatement();
5281 return Error(Loc, "instruction with .n (narrow) qualifier not allowed in "
5285 // The .n qualifier is always discarded as that is what the tables
5286 // and matcher expect. In ARM mode the .w qualifier has no effect,
5287 // so discard it to avoid errors that can be caused by the matcher.
5288 if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) {
5289 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
5290 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
5294 // Read the remaining operands.
5295 if (getLexer().isNot(AsmToken::EndOfStatement)) {
5296 // Read the first operand.
5297 if (parseOperand(Operands, Mnemonic)) {
5298 Parser.eatToEndOfStatement();
5302 while (getLexer().is(AsmToken::Comma)) {
5303 Parser.Lex(); // Eat the comma.
5305 // Parse and remember the operand.
5306 if (parseOperand(Operands, Mnemonic)) {
5307 Parser.eatToEndOfStatement();
5313 if (getLexer().isNot(AsmToken::EndOfStatement)) {
5314 SMLoc Loc = getLexer().getLoc();
5315 Parser.eatToEndOfStatement();
5316 return Error(Loc, "unexpected token in argument list");
5319 Parser.Lex(); // Consume the EndOfStatement
5321 if (RequireVFPRegisterListCheck) {
5322 ARMOperand *Op = static_cast<ARMOperand*>(Operands.back());
5323 if (AcceptSinglePrecisionOnly && !Op->isSPRRegList())
5324 return Error(Op->getStartLoc(),
5325 "VFP/Neon single precision register expected");
5326 if (AcceptDoublePrecisionOnly && !Op->isDPRRegList())
5327 return Error(Op->getStartLoc(),
5328 "VFP/Neon double precision register expected");
5331 // Some instructions, mostly Thumb, have forms for the same mnemonic that
5332 // do and don't have a cc_out optional-def operand. With some spot-checks
5333 // of the operand list, we can figure out which variant we're trying to
5334 // parse and adjust accordingly before actually matching. We shouldn't ever
5335 // try to remove a cc_out operand that was explicitly set on the the
5336 // mnemonic, of course (CarrySetting == true). Reason number #317 the
5337 // table driven matcher doesn't fit well with the ARM instruction set.
5338 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
5339 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5340 Operands.erase(Operands.begin() + 1);
5344 // Some instructions have the same mnemonic, but don't always
5345 // have a predicate. Distinguish them here and delete the
5346 // predicate if needed.
5347 if (shouldOmitPredicateOperand(Mnemonic, Operands)) {
5348 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5349 Operands.erase(Operands.begin() + 1);
5353 // ARM mode 'blx' need special handling, as the register operand version
5354 // is predicable, but the label operand version is not. So, we can't rely
5355 // on the Mnemonic based checking to correctly figure out when to put
5356 // a k_CondCode operand in the list. If we're trying to match the label
5357 // version, remove the k_CondCode operand here.
5358 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
5359 static_cast<ARMOperand*>(Operands[2])->isImm()) {
5360 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5361 Operands.erase(Operands.begin() + 1);
5365 // Adjust operands of ldrexd/strexd to MCK_GPRPair.
5366 // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
5367 // a single GPRPair reg operand is used in the .td file to replace the two
5368 // GPRs. However, when parsing from asm, the two GRPs cannot be automatically
5369 // expressed as a GPRPair, so we have to manually merge them.
5370 // FIXME: We would really like to be able to tablegen'erate this.
5371 if (!isThumb() && Operands.size() > 4 &&
5372 (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
5373 Mnemonic == "stlexd")) {
5374 bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
5375 unsigned Idx = isLoad ? 2 : 3;
5376 ARMOperand* Op1 = static_cast<ARMOperand*>(Operands[Idx]);
5377 ARMOperand* Op2 = static_cast<ARMOperand*>(Operands[Idx+1]);
5379 const MCRegisterClass& MRC = MRI->getRegClass(ARM::GPRRegClassID);
5380 // Adjust only if Op1 and Op2 are GPRs.
5381 if (Op1->isReg() && Op2->isReg() && MRC.contains(Op1->getReg()) &&
5382 MRC.contains(Op2->getReg())) {
5383 unsigned Reg1 = Op1->getReg();
5384 unsigned Reg2 = Op2->getReg();
5385 unsigned Rt = MRI->getEncodingValue(Reg1);
5386 unsigned Rt2 = MRI->getEncodingValue(Reg2);
5388 // Rt2 must be Rt + 1 and Rt must be even.
5389 if (Rt + 1 != Rt2 || (Rt & 1)) {
5390 Error(Op2->getStartLoc(), isLoad ?
5391 "destination operands must be sequential" :
5392 "source operands must be sequential");
5395 unsigned NewReg = MRI->getMatchingSuperReg(Reg1, ARM::gsub_0,
5396 &(MRI->getRegClass(ARM::GPRPairRegClassID)));
5397 Operands.erase(Operands.begin() + Idx, Operands.begin() + Idx + 2);
5398 Operands.insert(Operands.begin() + Idx, ARMOperand::CreateReg(
5399 NewReg, Op1->getStartLoc(), Op2->getEndLoc()));
5405 // GNU Assembler extension (compatibility)
5406 if ((Mnemonic == "ldrd" || Mnemonic == "strd") && !isThumb() &&
5407 Operands.size() == 4) {
5408 ARMOperand *Op = static_cast<ARMOperand *>(Operands[2]);
5409 assert(Op->isReg() && "expected register argument");
5410 assert(MRI->getMatchingSuperReg(Op->getReg(), ARM::gsub_0,
5411 &MRI->getRegClass(ARM::GPRPairRegClassID))
5412 && "expected register pair");
5413 Operands.insert(Operands.begin() + 3,
5414 ARMOperand::CreateReg(Op->getReg() + 1, Op->getStartLoc(),
5418 // FIXME: As said above, this is all a pretty gross hack. This instruction
5419 // does not fit with other "subs" and tblgen.
5420 // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction
5421 // so the Mnemonic is the original name "subs" and delete the predicate
5422 // operand so it will match the table entry.
5423 if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 &&
5424 static_cast<ARMOperand*>(Operands[3])->isReg() &&
5425 static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::PC &&
5426 static_cast<ARMOperand*>(Operands[4])->isReg() &&
5427 static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::LR &&
5428 static_cast<ARMOperand*>(Operands[5])->isImm()) {
5429 ARMOperand *Op0 = static_cast<ARMOperand*>(Operands[0]);
5430 Operands.erase(Operands.begin());
5432 Operands.insert(Operands.begin(), ARMOperand::CreateToken(Name, NameLoc));
5434 ARMOperand *Op1 = static_cast<ARMOperand*>(Operands[1]);
5435 Operands.erase(Operands.begin() + 1);
5441 // Validate context-sensitive operand constraints.
5443 // return 'true' if register list contains non-low GPR registers,
5444 // 'false' otherwise. If Reg is in the register list or is HiReg, set
5445 // 'containsReg' to true.
5446 static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
5447 unsigned HiReg, bool &containsReg) {
5448 containsReg = false;
5449 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5450 unsigned OpReg = Inst.getOperand(i).getReg();
5453 // Anything other than a low register isn't legal here.
5454 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
5460 // Check if the specified regisgter is in the register list of the inst,
5461 // starting at the indicated operand number.
5462 static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
5463 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5464 unsigned OpReg = Inst.getOperand(i).getReg();
5471 // Return true if instruction has the interesting property of being
5472 // allowed in IT blocks, but not being predicable.
5473 static bool instIsBreakpoint(const MCInst &Inst) {
5474 return Inst.getOpcode() == ARM::tBKPT ||
5475 Inst.getOpcode() == ARM::BKPT ||
5476 Inst.getOpcode() == ARM::tHLT ||
5477 Inst.getOpcode() == ARM::HLT;
5481 // FIXME: We would really like to be able to tablegen'erate this.
5483 validateInstruction(MCInst &Inst,
5484 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5485 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
5486 SMLoc Loc = Operands[0]->getStartLoc();
5488 // Check the IT block state first.
5489 // NOTE: BKPT and HLT instructions have the interesting property of being
5490 // allowed in IT blocks, but not being predicable. They just always execute.
5491 if (inITBlock() && !instIsBreakpoint(Inst)) {
5493 if (ITState.FirstCond)
5494 ITState.FirstCond = false;
5496 Bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
5497 // The instruction must be predicable.
5498 if (!MCID.isPredicable())
5499 return Error(Loc, "instructions in IT block must be predicable");
5500 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
5501 unsigned ITCond = Bit ? ITState.Cond :
5502 ARMCC::getOppositeCondition(ITState.Cond);
5503 if (Cond != ITCond) {
5504 // Find the condition code Operand to get its SMLoc information.
5506 for (unsigned I = 1; I < Operands.size(); ++I)
5507 if (static_cast<ARMOperand*>(Operands[I])->isCondCode())
5508 CondLoc = Operands[I]->getStartLoc();
5509 return Error(CondLoc, "incorrect condition in IT block; got '" +
5510 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5511 "', but expected '" +
5512 ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5514 // Check for non-'al' condition codes outside of the IT block.
5515 } else if (isThumbTwo() && MCID.isPredicable() &&
5516 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5517 ARMCC::AL && Inst.getOpcode() != ARM::tBcc &&
5518 Inst.getOpcode() != ARM::t2Bcc)
5519 return Error(Loc, "predicated instructions must be in IT block");
5521 const unsigned Opcode = Inst.getOpcode();
5525 case ARM::LDRD_POST: {
5526 const unsigned RtReg = Inst.getOperand(0).getReg();
5529 if (RtReg == ARM::LR)
5530 return Error(Operands[3]->getStartLoc(),
5533 const unsigned Rt = MRI->getEncodingValue(RtReg);
5534 // Rt must be even-numbered.
5536 return Error(Operands[3]->getStartLoc(),
5537 "Rt must be even-numbered");
5539 // Rt2 must be Rt + 1.
5540 const unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
5542 return Error(Operands[3]->getStartLoc(),
5543 "destination operands must be sequential");
5545 if (Opcode == ARM::LDRD_PRE || Opcode == ARM::LDRD_POST) {
5546 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg());
5547 // For addressing modes with writeback, the base register needs to be
5548 // different from the destination registers.
5549 if (Rn == Rt || Rn == Rt2)
5550 return Error(Operands[3]->getStartLoc(),
5551 "base register needs to be different from destination "
5558 case ARM::t2LDRD_PRE:
5559 case ARM::t2LDRD_POST: {
5560 // Rt2 must be different from Rt.
5561 unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
5562 unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
5564 return Error(Operands[3]->getStartLoc(),
5565 "destination operands can't be identical");
5569 // Rt2 must be Rt + 1.
5570 unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
5571 unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
5573 return Error(Operands[3]->getStartLoc(),
5574 "source operands must be sequential");
5578 case ARM::STRD_POST: {
5579 // Rt2 must be Rt + 1.
5580 unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
5581 unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(2).getReg());
5583 return Error(Operands[3]->getStartLoc(),
5584 "source operands must be sequential");
5589 // Width must be in range [1, 32-lsb].
5590 unsigned LSB = Inst.getOperand(2).getImm();
5591 unsigned Widthm1 = Inst.getOperand(3).getImm();
5592 if (Widthm1 >= 32 - LSB)
5593 return Error(Operands[5]->getStartLoc(),
5594 "bitfield width must be in range [1,32-lsb]");
5597 // Notionally handles ARM::tLDMIA_UPD too.
5599 // If we're parsing Thumb2, the .w variant is available and handles
5600 // most cases that are normally illegal for a Thumb1 LDM instruction.
5601 // We'll make the transformation in processInstruction() if necessary.
5603 // Thumb LDM instructions are writeback iff the base register is not
5604 // in the register list.
5605 unsigned Rn = Inst.getOperand(0).getReg();
5606 bool HasWritebackToken =
5607 (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5608 static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5609 bool ListContainsBase;
5610 if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo())
5611 return Error(Operands[3 + HasWritebackToken]->getStartLoc(),
5612 "registers must be in range r0-r7");
5613 // If we should have writeback, then there should be a '!' token.
5614 if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
5615 return Error(Operands[2]->getStartLoc(),
5616 "writeback operator '!' expected");
5617 // If we should not have writeback, there must not be a '!'. This is
5618 // true even for the 32-bit wide encodings.
5619 if (ListContainsBase && HasWritebackToken)
5620 return Error(Operands[3]->getStartLoc(),
5621 "writeback operator '!' not allowed when base register "
5622 "in register list");
5626 case ARM::LDMIA_UPD:
5627 case ARM::LDMDB_UPD:
5628 case ARM::LDMIB_UPD:
5629 case ARM::LDMDA_UPD:
5630 // ARM variants loading and updating the same register are only officially
5631 // UNPREDICTABLE on v7 upwards. Goodness knows what they did before.
5635 case ARM::t2LDMIA_UPD:
5636 case ARM::t2LDMDB_UPD:
5637 case ARM::t2STMIA_UPD:
5638 case ARM::t2STMDB_UPD: {
5639 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5640 return Error(Operands.back()->getStartLoc(),
5641 "writeback register not allowed in register list");
5644 case ARM::sysLDMIA_UPD:
5645 case ARM::sysLDMDA_UPD:
5646 case ARM::sysLDMDB_UPD:
5647 case ARM::sysLDMIB_UPD:
5648 if (!listContainsReg(Inst, 3, ARM::PC))
5649 return Error(Operands[4]->getStartLoc(),
5650 "writeback register only allowed on system LDM "
5651 "if PC in register-list");
5653 case ARM::sysSTMIA_UPD:
5654 case ARM::sysSTMDA_UPD:
5655 case ARM::sysSTMDB_UPD:
5656 case ARM::sysSTMIB_UPD:
5657 return Error(Operands[2]->getStartLoc(),
5658 "system STM cannot have writeback register");
5660 // The second source operand must be the same register as the destination
5663 // In this case, we must directly check the parsed operands because the
5664 // cvtThumbMultiply() function is written in such a way that it guarantees
5665 // this first statement is always true for the new Inst. Essentially, the
5666 // destination is unconditionally copied into the second source operand
5667 // without checking to see if it matches what we actually parsed.
5668 if (Operands.size() == 6 &&
5669 (((ARMOperand*)Operands[3])->getReg() !=
5670 ((ARMOperand*)Operands[5])->getReg()) &&
5671 (((ARMOperand*)Operands[3])->getReg() !=
5672 ((ARMOperand*)Operands[4])->getReg())) {
5673 return Error(Operands[3]->getStartLoc(),
5674 "destination register must match source register");
5678 // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5679 // so only issue a diagnostic for thumb1. The instructions will be
5680 // switched to the t2 encodings in processInstruction() if necessary.
5682 bool ListContainsBase;
5683 if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) &&
5685 return Error(Operands[2]->getStartLoc(),
5686 "registers must be in range r0-r7 or pc");
5690 bool ListContainsBase;
5691 if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) &&
5693 return Error(Operands[2]->getStartLoc(),
5694 "registers must be in range r0-r7 or lr");
5697 case ARM::tSTMIA_UPD: {
5698 bool ListContainsBase, InvalidLowList;
5699 InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(),
5700 0, ListContainsBase);
5701 if (InvalidLowList && !isThumbTwo())
5702 return Error(Operands[4]->getStartLoc(),
5703 "registers must be in range r0-r7");
5705 // This would be converted to a 32-bit stm, but that's not valid if the
5706 // writeback register is in the list.
5707 if (InvalidLowList && ListContainsBase)
5708 return Error(Operands[4]->getStartLoc(),
5709 "writeback operator '!' not allowed when base register "
5710 "in register list");
5713 case ARM::tADDrSP: {
5714 // If the non-SP source operand and the destination operand are not the
5715 // same, we need thumb2 (for the wide encoding), or we have an error.
5716 if (!isThumbTwo() &&
5717 Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
5718 return Error(Operands[4]->getStartLoc(),
5719 "source register must be the same as destination");
5723 // Final range checking for Thumb unconditional branch instructions.
5725 if (!(static_cast<ARMOperand*>(Operands[2]))->isSignedOffset<11, 1>())
5726 return Error(Operands[2]->getStartLoc(), "branch target out of range");
5729 int op = (Operands[2]->isImm()) ? 2 : 3;
5730 if (!(static_cast<ARMOperand*>(Operands[op]))->isSignedOffset<24, 1>())
5731 return Error(Operands[op]->getStartLoc(), "branch target out of range");
5734 // Final range checking for Thumb conditional branch instructions.
5736 if (!(static_cast<ARMOperand*>(Operands[2]))->isSignedOffset<8, 1>())
5737 return Error(Operands[2]->getStartLoc(), "branch target out of range");
5740 int Op = (Operands[2]->isImm()) ? 2 : 3;
5741 if (!(static_cast<ARMOperand*>(Operands[Op]))->isSignedOffset<20, 1>())
5742 return Error(Operands[Op]->getStartLoc(), "branch target out of range");
5750 static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
5752 default: llvm_unreachable("unexpected opcode!");
5754 case ARM::VST1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD;
5755 case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5756 case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5757 case ARM::VST1LNdWB_register_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD;
5758 case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5759 case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5760 case ARM::VST1LNdAsm_8: Spacing = 1; return ARM::VST1LNd8;
5761 case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
5762 case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
5765 case ARM::VST2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD;
5766 case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5767 case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5768 case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5769 case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5771 case ARM::VST2LNdWB_register_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD;
5772 case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5773 case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5774 case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5775 case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5777 case ARM::VST2LNdAsm_8: Spacing = 1; return ARM::VST2LNd8;
5778 case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
5779 case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
5780 case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
5781 case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
5784 case ARM::VST3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD;
5785 case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5786 case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5787 case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
5788 case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5789 case ARM::VST3LNdWB_register_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD;
5790 case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5791 case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5792 case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
5793 case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5794 case ARM::VST3LNdAsm_8: Spacing = 1; return ARM::VST3LNd8;
5795 case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
5796 case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
5797 case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
5798 case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
5801 case ARM::VST3dWB_fixed_Asm_8: Spacing = 1; return ARM::VST3d8_UPD;
5802 case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5803 case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5804 case ARM::VST3qWB_fixed_Asm_8: Spacing = 2; return ARM::VST3q8_UPD;
5805 case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5806 case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5807 case ARM::VST3dWB_register_Asm_8: Spacing = 1; return ARM::VST3d8_UPD;
5808 case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5809 case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5810 case ARM::VST3qWB_register_Asm_8: Spacing = 2; return ARM::VST3q8_UPD;
5811 case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5812 case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5813 case ARM::VST3dAsm_8: Spacing = 1; return ARM::VST3d8;
5814 case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
5815 case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
5816 case ARM::VST3qAsm_8: Spacing = 2; return ARM::VST3q8;
5817 case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
5818 case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
5821 case ARM::VST4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD;
5822 case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5823 case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5824 case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
5825 case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5826 case ARM::VST4LNdWB_register_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD;
5827 case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5828 case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5829 case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
5830 case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5831 case ARM::VST4LNdAsm_8: Spacing = 1; return ARM::VST4LNd8;
5832 case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
5833 case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
5834 case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
5835 case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
5838 case ARM::VST4dWB_fixed_Asm_8: Spacing = 1; return ARM::VST4d8_UPD;
5839 case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5840 case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5841 case ARM::VST4qWB_fixed_Asm_8: Spacing = 2; return ARM::VST4q8_UPD;
5842 case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5843 case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5844 case ARM::VST4dWB_register_Asm_8: Spacing = 1; return ARM::VST4d8_UPD;
5845 case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5846 case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5847 case ARM::VST4qWB_register_Asm_8: Spacing = 2; return ARM::VST4q8_UPD;
5848 case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5849 case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5850 case ARM::VST4dAsm_8: Spacing = 1; return ARM::VST4d8;
5851 case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
5852 case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
5853 case ARM::VST4qAsm_8: Spacing = 2; return ARM::VST4q8;
5854 case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
5855 case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
5859 static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
5861 default: llvm_unreachable("unexpected opcode!");
5863 case ARM::VLD1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD;
5864 case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5865 case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5866 case ARM::VLD1LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD;
5867 case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5868 case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5869 case ARM::VLD1LNdAsm_8: Spacing = 1; return ARM::VLD1LNd8;
5870 case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
5871 case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
5874 case ARM::VLD2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD;
5875 case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5876 case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5877 case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
5878 case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5879 case ARM::VLD2LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD;
5880 case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5881 case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5882 case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
5883 case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5884 case ARM::VLD2LNdAsm_8: Spacing = 1; return ARM::VLD2LNd8;
5885 case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
5886 case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
5887 case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
5888 case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
5891 case ARM::VLD3DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD;
5892 case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5893 case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5894 case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
5895 case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPq16_UPD;
5896 case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5897 case ARM::VLD3DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD;
5898 case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5899 case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5900 case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
5901 case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
5902 case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5903 case ARM::VLD3DUPdAsm_8: Spacing = 1; return ARM::VLD3DUPd8;
5904 case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
5905 case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
5906 case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
5907 case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
5908 case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
5911 case ARM::VLD3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD;
5912 case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5913 case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5914 case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
5915 case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5916 case ARM::VLD3LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD;
5917 case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5918 case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5919 case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
5920 case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5921 case ARM::VLD3LNdAsm_8: Spacing = 1; return ARM::VLD3LNd8;
5922 case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
5923 case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
5924 case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
5925 case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
5928 case ARM::VLD3dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD;
5929 case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5930 case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5931 case ARM::VLD3qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD;
5932 case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5933 case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5934 case ARM::VLD3dWB_register_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD;
5935 case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5936 case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5937 case ARM::VLD3qWB_register_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD;
5938 case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5939 case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5940 case ARM::VLD3dAsm_8: Spacing = 1; return ARM::VLD3d8;
5941 case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
5942 case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
5943 case ARM::VLD3qAsm_8: Spacing = 2; return ARM::VLD3q8;
5944 case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
5945 case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
5948 case ARM::VLD4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD;
5949 case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5950 case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5951 case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
5952 case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5953 case ARM::VLD4LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD;
5954 case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5955 case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5956 case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
5957 case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5958 case ARM::VLD4LNdAsm_8: Spacing = 1; return ARM::VLD4LNd8;
5959 case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
5960 case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
5961 case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
5962 case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
5965 case ARM::VLD4DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD;
5966 case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5967 case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5968 case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
5969 case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
5970 case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5971 case ARM::VLD4DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD;
5972 case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5973 case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5974 case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
5975 case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
5976 case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5977 case ARM::VLD4DUPdAsm_8: Spacing = 1; return ARM::VLD4DUPd8;
5978 case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
5979 case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
5980 case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
5981 case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
5982 case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
5985 case ARM::VLD4dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD;
5986 case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5987 case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5988 case ARM::VLD4qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD;
5989 case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5990 case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5991 case ARM::VLD4dWB_register_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD;
5992 case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5993 case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5994 case ARM::VLD4qWB_register_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD;
5995 case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5996 case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5997 case ARM::VLD4dAsm_8: Spacing = 1; return ARM::VLD4d8;
5998 case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
5999 case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
6000 case ARM::VLD4qAsm_8: Spacing = 2; return ARM::VLD4q8;
6001 case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
6002 case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
6007 processInstruction(MCInst &Inst,
6008 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
6009 switch (Inst.getOpcode()) {
6010 // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction.
6011 case ARM::LDRT_POST:
6012 case ARM::LDRBT_POST: {
6013 const unsigned Opcode =
6014 (Inst.getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
6015 : ARM::LDRBT_POST_IMM;
6017 TmpInst.setOpcode(Opcode);
6018 TmpInst.addOperand(Inst.getOperand(0));
6019 TmpInst.addOperand(Inst.getOperand(1));
6020 TmpInst.addOperand(Inst.getOperand(1));
6021 TmpInst.addOperand(MCOperand::CreateReg(0));
6022 TmpInst.addOperand(MCOperand::CreateImm(0));
6023 TmpInst.addOperand(Inst.getOperand(2));
6024 TmpInst.addOperand(Inst.getOperand(3));
6028 // Alias for alternate form of 'str{,b}t Rt, [Rn], #imm' instruction.
6029 case ARM::STRT_POST:
6030 case ARM::STRBT_POST: {
6031 const unsigned Opcode =
6032 (Inst.getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
6033 : ARM::STRBT_POST_IMM;
6035 TmpInst.setOpcode(Opcode);
6036 TmpInst.addOperand(Inst.getOperand(1));
6037 TmpInst.addOperand(Inst.getOperand(0));
6038 TmpInst.addOperand(Inst.getOperand(1));
6039 TmpInst.addOperand(MCOperand::CreateReg(0));
6040 TmpInst.addOperand(MCOperand::CreateImm(0));
6041 TmpInst.addOperand(Inst.getOperand(2));
6042 TmpInst.addOperand(Inst.getOperand(3));
6046 // Alias for alternate form of 'ADR Rd, #imm' instruction.
6048 if (Inst.getOperand(1).getReg() != ARM::PC ||
6049 Inst.getOperand(5).getReg() != 0)
6052 TmpInst.setOpcode(ARM::ADR);
6053 TmpInst.addOperand(Inst.getOperand(0));
6054 TmpInst.addOperand(Inst.getOperand(2));
6055 TmpInst.addOperand(Inst.getOperand(3));
6056 TmpInst.addOperand(Inst.getOperand(4));
6060 // Aliases for alternate PC+imm syntax of LDR instructions.
6061 case ARM::t2LDRpcrel:
6062 // Select the narrow version if the immediate will fit.
6063 if (Inst.getOperand(1).getImm() > 0 &&
6064 Inst.getOperand(1).getImm() <= 0xff &&
6065 !(static_cast<ARMOperand*>(Operands[2])->isToken() &&
6066 static_cast<ARMOperand*>(Operands[2])->getToken() == ".w"))
6067 Inst.setOpcode(ARM::tLDRpci);
6069 Inst.setOpcode(ARM::t2LDRpci);
6071 case ARM::t2LDRBpcrel:
6072 Inst.setOpcode(ARM::t2LDRBpci);
6074 case ARM::t2LDRHpcrel:
6075 Inst.setOpcode(ARM::t2LDRHpci);
6077 case ARM::t2LDRSBpcrel:
6078 Inst.setOpcode(ARM::t2LDRSBpci);
6080 case ARM::t2LDRSHpcrel:
6081 Inst.setOpcode(ARM::t2LDRSHpci);
6083 // Handle NEON VST complex aliases.
6084 case ARM::VST1LNdWB_register_Asm_8:
6085 case ARM::VST1LNdWB_register_Asm_16:
6086 case ARM::VST1LNdWB_register_Asm_32: {
6088 // Shuffle the operands around so the lane index operand is in the
6091 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6092 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6093 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6094 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6095 TmpInst.addOperand(Inst.getOperand(4)); // Rm
6096 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6097 TmpInst.addOperand(Inst.getOperand(1)); // lane
6098 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6099 TmpInst.addOperand(Inst.getOperand(6));
6104 case ARM::VST2LNdWB_register_Asm_8:
6105 case ARM::VST2LNdWB_register_Asm_16:
6106 case ARM::VST2LNdWB_register_Asm_32:
6107 case ARM::VST2LNqWB_register_Asm_16:
6108 case ARM::VST2LNqWB_register_Asm_32: {
6110 // Shuffle the operands around so the lane index operand is in the
6113 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6114 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6115 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6116 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6117 TmpInst.addOperand(Inst.getOperand(4)); // Rm
6118 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6119 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6121 TmpInst.addOperand(Inst.getOperand(1)); // lane
6122 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6123 TmpInst.addOperand(Inst.getOperand(6));
6128 case ARM::VST3LNdWB_register_Asm_8:
6129 case ARM::VST3LNdWB_register_Asm_16:
6130 case ARM::VST3LNdWB_register_Asm_32:
6131 case ARM::VST3LNqWB_register_Asm_16:
6132 case ARM::VST3LNqWB_register_Asm_32: {
6134 // Shuffle the operands around so the lane index operand is in the
6137 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6138 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6139 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6140 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6141 TmpInst.addOperand(Inst.getOperand(4)); // Rm
6142 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6143 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6145 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6147 TmpInst.addOperand(Inst.getOperand(1)); // lane
6148 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6149 TmpInst.addOperand(Inst.getOperand(6));
6154 case ARM::VST4LNdWB_register_Asm_8:
6155 case ARM::VST4LNdWB_register_Asm_16:
6156 case ARM::VST4LNdWB_register_Asm_32:
6157 case ARM::VST4LNqWB_register_Asm_16:
6158 case ARM::VST4LNqWB_register_Asm_32: {
6160 // Shuffle the operands around so the lane index operand is in the
6163 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6164 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6165 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6166 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6167 TmpInst.addOperand(Inst.getOperand(4)); // Rm
6168 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6169 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6171 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6173 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6175 TmpInst.addOperand(Inst.getOperand(1)); // lane
6176 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6177 TmpInst.addOperand(Inst.getOperand(6));
6182 case ARM::VST1LNdWB_fixed_Asm_8:
6183 case ARM::VST1LNdWB_fixed_Asm_16:
6184 case ARM::VST1LNdWB_fixed_Asm_32: {
6186 // Shuffle the operands around so the lane index operand is in the
6189 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6190 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6191 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6192 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6193 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6194 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6195 TmpInst.addOperand(Inst.getOperand(1)); // lane
6196 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6197 TmpInst.addOperand(Inst.getOperand(5));
6202 case ARM::VST2LNdWB_fixed_Asm_8:
6203 case ARM::VST2LNdWB_fixed_Asm_16:
6204 case ARM::VST2LNdWB_fixed_Asm_32:
6205 case ARM::VST2LNqWB_fixed_Asm_16:
6206 case ARM::VST2LNqWB_fixed_Asm_32: {
6208 // Shuffle the operands around so the lane index operand is in the
6211 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6212 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6213 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6214 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6215 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6216 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6217 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6219 TmpInst.addOperand(Inst.getOperand(1)); // lane
6220 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6221 TmpInst.addOperand(Inst.getOperand(5));
6226 case ARM::VST3LNdWB_fixed_Asm_8:
6227 case ARM::VST3LNdWB_fixed_Asm_16:
6228 case ARM::VST3LNdWB_fixed_Asm_32:
6229 case ARM::VST3LNqWB_fixed_Asm_16:
6230 case ARM::VST3LNqWB_fixed_Asm_32: {
6232 // Shuffle the operands around so the lane index operand is in the
6235 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6236 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6237 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6238 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6239 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6240 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6241 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6243 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6245 TmpInst.addOperand(Inst.getOperand(1)); // lane
6246 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6247 TmpInst.addOperand(Inst.getOperand(5));
6252 case ARM::VST4LNdWB_fixed_Asm_8:
6253 case ARM::VST4LNdWB_fixed_Asm_16:
6254 case ARM::VST4LNdWB_fixed_Asm_32:
6255 case ARM::VST4LNqWB_fixed_Asm_16:
6256 case ARM::VST4LNqWB_fixed_Asm_32: {
6258 // Shuffle the operands around so the lane index operand is in the
6261 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6262 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6263 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6264 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6265 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6266 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6267 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6269 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6271 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6273 TmpInst.addOperand(Inst.getOperand(1)); // lane
6274 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6275 TmpInst.addOperand(Inst.getOperand(5));
6280 case ARM::VST1LNdAsm_8:
6281 case ARM::VST1LNdAsm_16:
6282 case ARM::VST1LNdAsm_32: {
6284 // Shuffle the operands around so the lane index operand is in the
6287 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6288 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6289 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6290 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6291 TmpInst.addOperand(Inst.getOperand(1)); // lane
6292 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6293 TmpInst.addOperand(Inst.getOperand(5));
6298 case ARM::VST2LNdAsm_8:
6299 case ARM::VST2LNdAsm_16:
6300 case ARM::VST2LNdAsm_32:
6301 case ARM::VST2LNqAsm_16:
6302 case ARM::VST2LNqAsm_32: {
6304 // Shuffle the operands around so the lane index operand is in the
6307 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6308 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6309 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6310 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6311 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6313 TmpInst.addOperand(Inst.getOperand(1)); // lane
6314 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6315 TmpInst.addOperand(Inst.getOperand(5));
6320 case ARM::VST3LNdAsm_8:
6321 case ARM::VST3LNdAsm_16:
6322 case ARM::VST3LNdAsm_32:
6323 case ARM::VST3LNqAsm_16:
6324 case ARM::VST3LNqAsm_32: {
6326 // Shuffle the operands around so the lane index operand is in the
6329 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6330 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6331 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6332 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6333 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6335 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6337 TmpInst.addOperand(Inst.getOperand(1)); // lane
6338 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6339 TmpInst.addOperand(Inst.getOperand(5));
6344 case ARM::VST4LNdAsm_8:
6345 case ARM::VST4LNdAsm_16:
6346 case ARM::VST4LNdAsm_32:
6347 case ARM::VST4LNqAsm_16:
6348 case ARM::VST4LNqAsm_32: {
6350 // Shuffle the operands around so the lane index operand is in the
6353 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6354 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6355 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6356 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6357 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6359 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6361 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6363 TmpInst.addOperand(Inst.getOperand(1)); // lane
6364 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6365 TmpInst.addOperand(Inst.getOperand(5));
6370 // Handle NEON VLD complex aliases.
6371 case ARM::VLD1LNdWB_register_Asm_8:
6372 case ARM::VLD1LNdWB_register_Asm_16:
6373 case ARM::VLD1LNdWB_register_Asm_32: {
6375 // Shuffle the operands around so the lane index operand is in the
6378 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6379 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6380 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6381 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6382 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6383 TmpInst.addOperand(Inst.getOperand(4)); // Rm
6384 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6385 TmpInst.addOperand(Inst.getOperand(1)); // lane
6386 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6387 TmpInst.addOperand(Inst.getOperand(6));
6392 case ARM::VLD2LNdWB_register_Asm_8:
6393 case ARM::VLD2LNdWB_register_Asm_16:
6394 case ARM::VLD2LNdWB_register_Asm_32:
6395 case ARM::VLD2LNqWB_register_Asm_16:
6396 case ARM::VLD2LNqWB_register_Asm_32: {
6398 // Shuffle the operands around so the lane index operand is in the
6401 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6402 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6403 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6405 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6406 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6407 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6408 TmpInst.addOperand(Inst.getOperand(4)); // Rm
6409 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6410 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6412 TmpInst.addOperand(Inst.getOperand(1)); // lane
6413 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6414 TmpInst.addOperand(Inst.getOperand(6));
6419 case ARM::VLD3LNdWB_register_Asm_8:
6420 case ARM::VLD3LNdWB_register_Asm_16:
6421 case ARM::VLD3LNdWB_register_Asm_32:
6422 case ARM::VLD3LNqWB_register_Asm_16:
6423 case ARM::VLD3LNqWB_register_Asm_32: {
6425 // Shuffle the operands around so the lane index operand is in the
6428 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6429 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6430 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6432 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6434 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6435 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6436 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6437 TmpInst.addOperand(Inst.getOperand(4)); // Rm
6438 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6439 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6441 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6443 TmpInst.addOperand(Inst.getOperand(1)); // lane
6444 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6445 TmpInst.addOperand(Inst.getOperand(6));
6450 case ARM::VLD4LNdWB_register_Asm_8:
6451 case ARM::VLD4LNdWB_register_Asm_16:
6452 case ARM::VLD4LNdWB_register_Asm_32:
6453 case ARM::VLD4LNqWB_register_Asm_16:
6454 case ARM::VLD4LNqWB_register_Asm_32: {
6456 // Shuffle the operands around so the lane index operand is in the
6459 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6460 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6461 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6463 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6465 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6467 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6468 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6469 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6470 TmpInst.addOperand(Inst.getOperand(4)); // Rm
6471 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6472 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6474 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6476 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6478 TmpInst.addOperand(Inst.getOperand(1)); // lane
6479 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6480 TmpInst.addOperand(Inst.getOperand(6));
6485 case ARM::VLD1LNdWB_fixed_Asm_8:
6486 case ARM::VLD1LNdWB_fixed_Asm_16:
6487 case ARM::VLD1LNdWB_fixed_Asm_32: {
6489 // Shuffle the operands around so the lane index operand is in the
6492 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6493 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6494 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6495 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6496 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6497 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6498 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6499 TmpInst.addOperand(Inst.getOperand(1)); // lane
6500 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6501 TmpInst.addOperand(Inst.getOperand(5));
6506 case ARM::VLD2LNdWB_fixed_Asm_8:
6507 case ARM::VLD2LNdWB_fixed_Asm_16:
6508 case ARM::VLD2LNdWB_fixed_Asm_32:
6509 case ARM::VLD2LNqWB_fixed_Asm_16:
6510 case ARM::VLD2LNqWB_fixed_Asm_32: {
6512 // Shuffle the operands around so the lane index operand is in the
6515 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6516 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6517 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6519 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6520 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6521 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6522 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6523 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6524 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6526 TmpInst.addOperand(Inst.getOperand(1)); // lane
6527 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6528 TmpInst.addOperand(Inst.getOperand(5));
6533 case ARM::VLD3LNdWB_fixed_Asm_8:
6534 case ARM::VLD3LNdWB_fixed_Asm_16:
6535 case ARM::VLD3LNdWB_fixed_Asm_32:
6536 case ARM::VLD3LNqWB_fixed_Asm_16:
6537 case ARM::VLD3LNqWB_fixed_Asm_32: {
6539 // Shuffle the operands around so the lane index operand is in the
6542 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6543 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6544 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6546 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6548 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6549 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6550 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6551 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6552 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6553 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6555 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6557 TmpInst.addOperand(Inst.getOperand(1)); // lane
6558 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6559 TmpInst.addOperand(Inst.getOperand(5));
6564 case ARM::VLD4LNdWB_fixed_Asm_8:
6565 case ARM::VLD4LNdWB_fixed_Asm_16:
6566 case ARM::VLD4LNdWB_fixed_Asm_32:
6567 case ARM::VLD4LNqWB_fixed_Asm_16:
6568 case ARM::VLD4LNqWB_fixed_Asm_32: {
6570 // Shuffle the operands around so the lane index operand is in the
6573 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6574 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6575 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6577 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6579 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6581 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6582 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6583 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6584 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6585 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6586 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6588 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6590 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6592 TmpInst.addOperand(Inst.getOperand(1)); // lane
6593 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6594 TmpInst.addOperand(Inst.getOperand(5));
6599 case ARM::VLD1LNdAsm_8:
6600 case ARM::VLD1LNdAsm_16:
6601 case ARM::VLD1LNdAsm_32: {
6603 // Shuffle the operands around so the lane index operand is in the
6606 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6607 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6608 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6609 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6610 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6611 TmpInst.addOperand(Inst.getOperand(1)); // lane
6612 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6613 TmpInst.addOperand(Inst.getOperand(5));
6618 case ARM::VLD2LNdAsm_8:
6619 case ARM::VLD2LNdAsm_16:
6620 case ARM::VLD2LNdAsm_32:
6621 case ARM::VLD2LNqAsm_16:
6622 case ARM::VLD2LNqAsm_32: {
6624 // Shuffle the operands around so the lane index operand is in the
6627 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6628 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6629 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6631 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6632 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6633 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6634 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6636 TmpInst.addOperand(Inst.getOperand(1)); // lane
6637 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6638 TmpInst.addOperand(Inst.getOperand(5));
6643 case ARM::VLD3LNdAsm_8:
6644 case ARM::VLD3LNdAsm_16:
6645 case ARM::VLD3LNdAsm_32:
6646 case ARM::VLD3LNqAsm_16:
6647 case ARM::VLD3LNqAsm_32: {
6649 // Shuffle the operands around so the lane index operand is in the
6652 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6653 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6654 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6656 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6658 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6659 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6660 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6661 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6663 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6665 TmpInst.addOperand(Inst.getOperand(1)); // lane
6666 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6667 TmpInst.addOperand(Inst.getOperand(5));
6672 case ARM::VLD4LNdAsm_8:
6673 case ARM::VLD4LNdAsm_16:
6674 case ARM::VLD4LNdAsm_32:
6675 case ARM::VLD4LNqAsm_16:
6676 case ARM::VLD4LNqAsm_32: {
6678 // Shuffle the operands around so the lane index operand is in the
6681 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6682 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6683 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6685 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6687 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6689 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6690 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6691 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6692 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6694 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6696 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6698 TmpInst.addOperand(Inst.getOperand(1)); // lane
6699 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6700 TmpInst.addOperand(Inst.getOperand(5));
6705 // VLD3DUP single 3-element structure to all lanes instructions.
6706 case ARM::VLD3DUPdAsm_8:
6707 case ARM::VLD3DUPdAsm_16:
6708 case ARM::VLD3DUPdAsm_32:
6709 case ARM::VLD3DUPqAsm_8:
6710 case ARM::VLD3DUPqAsm_16:
6711 case ARM::VLD3DUPqAsm_32: {
6714 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6715 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6716 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6718 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6720 TmpInst.addOperand(Inst.getOperand(1)); // Rn
6721 TmpInst.addOperand(Inst.getOperand(2)); // alignment
6722 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6723 TmpInst.addOperand(Inst.getOperand(4));
6728 case ARM::VLD3DUPdWB_fixed_Asm_8:
6729 case ARM::VLD3DUPdWB_fixed_Asm_16:
6730 case ARM::VLD3DUPdWB_fixed_Asm_32:
6731 case ARM::VLD3DUPqWB_fixed_Asm_8:
6732 case ARM::VLD3DUPqWB_fixed_Asm_16:
6733 case ARM::VLD3DUPqWB_fixed_Asm_32: {
6736 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6737 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6738 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6740 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6742 TmpInst.addOperand(Inst.getOperand(1)); // Rn
6743 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6744 TmpInst.addOperand(Inst.getOperand(2)); // alignment
6745 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6746 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6747 TmpInst.addOperand(Inst.getOperand(4));
6752 case ARM::VLD3DUPdWB_register_Asm_8:
6753 case ARM::VLD3DUPdWB_register_Asm_16:
6754 case ARM::VLD3DUPdWB_register_Asm_32:
6755 case ARM::VLD3DUPqWB_register_Asm_8:
6756 case ARM::VLD3DUPqWB_register_Asm_16:
6757 case ARM::VLD3DUPqWB_register_Asm_32: {
6760 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6761 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6762 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6764 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6766 TmpInst.addOperand(Inst.getOperand(1)); // Rn
6767 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6768 TmpInst.addOperand(Inst.getOperand(2)); // alignment
6769 TmpInst.addOperand(Inst.getOperand(3)); // Rm
6770 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6771 TmpInst.addOperand(Inst.getOperand(5));
6776 // VLD3 multiple 3-element structure instructions.
6777 case ARM::VLD3dAsm_8:
6778 case ARM::VLD3dAsm_16:
6779 case ARM::VLD3dAsm_32:
6780 case ARM::VLD3qAsm_8:
6781 case ARM::VLD3qAsm_16:
6782 case ARM::VLD3qAsm_32: {
6785 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6786 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6787 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6789 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6791 TmpInst.addOperand(Inst.getOperand(1)); // Rn
6792 TmpInst.addOperand(Inst.getOperand(2)); // alignment
6793 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6794 TmpInst.addOperand(Inst.getOperand(4));
6799 case ARM::VLD3dWB_fixed_Asm_8:
6800 case ARM::VLD3dWB_fixed_Asm_16:
6801 case ARM::VLD3dWB_fixed_Asm_32:
6802 case ARM::VLD3qWB_fixed_Asm_8:
6803 case ARM::VLD3qWB_fixed_Asm_16:
6804 case ARM::VLD3qWB_fixed_Asm_32: {
6807 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6808 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6809 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6811 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6813 TmpInst.addOperand(Inst.getOperand(1)); // Rn
6814 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6815 TmpInst.addOperand(Inst.getOperand(2)); // alignment
6816 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6817 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6818 TmpInst.addOperand(Inst.getOperand(4));
6823 case ARM::VLD3dWB_register_Asm_8:
6824 case ARM::VLD3dWB_register_Asm_16:
6825 case ARM::VLD3dWB_register_Asm_32:
6826 case ARM::VLD3qWB_register_Asm_8:
6827 case ARM::VLD3qWB_register_Asm_16:
6828 case ARM::VLD3qWB_register_Asm_32: {
6831 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6832 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6833 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6835 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6837 TmpInst.addOperand(Inst.getOperand(1)); // Rn
6838 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6839 TmpInst.addOperand(Inst.getOperand(2)); // alignment
6840 TmpInst.addOperand(Inst.getOperand(3)); // Rm
6841 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6842 TmpInst.addOperand(Inst.getOperand(5));
6847 // VLD4DUP single 3-element structure to all lanes instructions.
6848 case ARM::VLD4DUPdAsm_8:
6849 case ARM::VLD4DUPdAsm_16:
6850 case ARM::VLD4DUPdAsm_32:
6851 case ARM::VLD4DUPqAsm_8:
6852 case ARM::VLD4DUPqAsm_16:
6853 case ARM::VLD4DUPqAsm_32: {
6856 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6857 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6858 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6860 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6862 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6864 TmpInst.addOperand(Inst.getOperand(1)); // Rn
6865 TmpInst.addOperand(Inst.getOperand(2)); // alignment
6866 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6867 TmpInst.addOperand(Inst.getOperand(4));
6872 case ARM::VLD4DUPdWB_fixed_Asm_8:
6873 case ARM::VLD4DUPdWB_fixed_Asm_16:
6874 case ARM::VLD4DUPdWB_fixed_Asm_32:
6875 case ARM::VLD4DUPqWB_fixed_Asm_8:
6876 case ARM::VLD4DUPqWB_fixed_Asm_16:
6877 case ARM::VLD4DUPqWB_fixed_Asm_32: {
6880 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6881 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6882 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6884 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6886 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6888 TmpInst.addOperand(Inst.getOperand(1)); // Rn
6889 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6890 TmpInst.addOperand(Inst.getOperand(2)); // alignment
6891 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6892 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6893 TmpInst.addOperand(Inst.getOperand(4));
6898 case ARM::VLD4DUPdWB_register_Asm_8:
6899 case ARM::VLD4DUPdWB_register_Asm_16:
6900 case ARM::VLD4DUPdWB_register_Asm_32:
6901 case ARM::VLD4DUPqWB_register_Asm_8:
6902 case ARM::VLD4DUPqWB_register_Asm_16:
6903 case ARM::VLD4DUPqWB_register_Asm_32: {
6906 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6907 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6908 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6910 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6912 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6914 TmpInst.addOperand(Inst.getOperand(1)); // Rn
6915 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6916 TmpInst.addOperand(Inst.getOperand(2)); // alignment
6917 TmpInst.addOperand(Inst.getOperand(3)); // Rm
6918 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6919 TmpInst.addOperand(Inst.getOperand(5));
6924 // VLD4 multiple 4-element structure instructions.
6925 case ARM::VLD4dAsm_8:
6926 case ARM::VLD4dAsm_16:
6927 case ARM::VLD4dAsm_32:
6928 case ARM::VLD4qAsm_8:
6929 case ARM::VLD4qAsm_16:
6930 case ARM::VLD4qAsm_32: {
6933 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6934 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6935 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6937 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6939 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6941 TmpInst.addOperand(Inst.getOperand(1)); // Rn
6942 TmpInst.addOperand(Inst.getOperand(2)); // alignment
6943 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6944 TmpInst.addOperand(Inst.getOperand(4));
6949 case ARM::VLD4dWB_fixed_Asm_8:
6950 case ARM::VLD4dWB_fixed_Asm_16:
6951 case ARM::VLD4dWB_fixed_Asm_32:
6952 case ARM::VLD4qWB_fixed_Asm_8:
6953 case ARM::VLD4qWB_fixed_Asm_16:
6954 case ARM::VLD4qWB_fixed_Asm_32: {
6957 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6958 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6959 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6961 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6963 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6965 TmpInst.addOperand(Inst.getOperand(1)); // Rn
6966 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6967 TmpInst.addOperand(Inst.getOperand(2)); // alignment
6968 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6969 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6970 TmpInst.addOperand(Inst.getOperand(4));
6975 case ARM::VLD4dWB_register_Asm_8:
6976 case ARM::VLD4dWB_register_Asm_16:
6977 case ARM::VLD4dWB_register_Asm_32:
6978 case ARM::VLD4qWB_register_Asm_8:
6979 case ARM::VLD4qWB_register_Asm_16:
6980 case ARM::VLD4qWB_register_Asm_32: {
6983 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6984 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6985 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6987 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6989 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6991 TmpInst.addOperand(Inst.getOperand(1)); // Rn
6992 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6993 TmpInst.addOperand(Inst.getOperand(2)); // alignment
6994 TmpInst.addOperand(Inst.getOperand(3)); // Rm
6995 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6996 TmpInst.addOperand(Inst.getOperand(5));
7001 // VST3 multiple 3-element structure instructions.
7002 case ARM::VST3dAsm_8:
7003 case ARM::VST3dAsm_16:
7004 case ARM::VST3dAsm_32:
7005 case ARM::VST3qAsm_8:
7006 case ARM::VST3qAsm_16:
7007 case ARM::VST3qAsm_32: {
7010 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7011 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7012 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7013 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7014 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7016 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7018 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7019 TmpInst.addOperand(Inst.getOperand(4));
7024 case ARM::VST3dWB_fixed_Asm_8:
7025 case ARM::VST3dWB_fixed_Asm_16:
7026 case ARM::VST3dWB_fixed_Asm_32:
7027 case ARM::VST3qWB_fixed_Asm_8:
7028 case ARM::VST3qWB_fixed_Asm_16:
7029 case ARM::VST3qWB_fixed_Asm_32: {
7032 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7033 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7034 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7035 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7036 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
7037 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7038 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7040 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7042 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7043 TmpInst.addOperand(Inst.getOperand(4));
7048 case ARM::VST3dWB_register_Asm_8:
7049 case ARM::VST3dWB_register_Asm_16:
7050 case ARM::VST3dWB_register_Asm_32:
7051 case ARM::VST3qWB_register_Asm_8:
7052 case ARM::VST3qWB_register_Asm_16:
7053 case ARM::VST3qWB_register_Asm_32: {
7056 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7057 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7058 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7059 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7060 TmpInst.addOperand(Inst.getOperand(3)); // Rm
7061 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7062 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7064 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7066 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7067 TmpInst.addOperand(Inst.getOperand(5));
7072 // VST4 multiple 3-element structure instructions.
7073 case ARM::VST4dAsm_8:
7074 case ARM::VST4dAsm_16:
7075 case ARM::VST4dAsm_32:
7076 case ARM::VST4qAsm_8:
7077 case ARM::VST4qAsm_16:
7078 case ARM::VST4qAsm_32: {
7081 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7082 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7083 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7084 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7085 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7087 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7089 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7091 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7092 TmpInst.addOperand(Inst.getOperand(4));
7097 case ARM::VST4dWB_fixed_Asm_8:
7098 case ARM::VST4dWB_fixed_Asm_16:
7099 case ARM::VST4dWB_fixed_Asm_32:
7100 case ARM::VST4qWB_fixed_Asm_8:
7101 case ARM::VST4qWB_fixed_Asm_16:
7102 case ARM::VST4qWB_fixed_Asm_32: {
7105 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7106 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7107 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7108 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7109 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
7110 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7111 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7113 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7115 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7117 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7118 TmpInst.addOperand(Inst.getOperand(4));
7123 case ARM::VST4dWB_register_Asm_8:
7124 case ARM::VST4dWB_register_Asm_16:
7125 case ARM::VST4dWB_register_Asm_32:
7126 case ARM::VST4qWB_register_Asm_8:
7127 case ARM::VST4qWB_register_Asm_16:
7128 case ARM::VST4qWB_register_Asm_32: {
7131 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7132 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7133 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7134 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7135 TmpInst.addOperand(Inst.getOperand(3)); // Rm
7136 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7137 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7139 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7141 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7143 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7144 TmpInst.addOperand(Inst.getOperand(5));
7149 // Handle encoding choice for the shift-immediate instructions.
7152 case ARM::t2ASRri: {
7153 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7154 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
7155 Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
7156 !(static_cast<ARMOperand*>(Operands[3])->isToken() &&
7157 static_cast<ARMOperand*>(Operands[3])->getToken() == ".w")) {
7159 switch (Inst.getOpcode()) {
7160 default: llvm_unreachable("unexpected opcode");
7161 case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
7162 case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
7163 case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
7165 // The Thumb1 operands aren't in the same order. Awesome, eh?
7167 TmpInst.setOpcode(NewOpc);
7168 TmpInst.addOperand(Inst.getOperand(0));
7169 TmpInst.addOperand(Inst.getOperand(5));
7170 TmpInst.addOperand(Inst.getOperand(1));
7171 TmpInst.addOperand(Inst.getOperand(2));
7172 TmpInst.addOperand(Inst.getOperand(3));
7173 TmpInst.addOperand(Inst.getOperand(4));
7180 // Handle the Thumb2 mode MOV complex aliases.
7182 case ARM::t2MOVSsr: {
7183 // Which instruction to expand to depends on the CCOut operand and
7184 // whether we're in an IT block if the register operands are low
7186 bool isNarrow = false;
7187 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7188 isARMLowRegister(Inst.getOperand(1).getReg()) &&
7189 isARMLowRegister(Inst.getOperand(2).getReg()) &&
7190 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
7191 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
7195 switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
7196 default: llvm_unreachable("unexpected opcode!");
7197 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
7198 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
7199 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
7200 case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR : ARM::t2RORrr; break;
7202 TmpInst.setOpcode(newOpc);
7203 TmpInst.addOperand(Inst.getOperand(0)); // Rd
7205 TmpInst.addOperand(MCOperand::CreateReg(
7206 Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
7207 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7208 TmpInst.addOperand(Inst.getOperand(2)); // Rm
7209 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7210 TmpInst.addOperand(Inst.getOperand(5));
7212 TmpInst.addOperand(MCOperand::CreateReg(
7213 Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
7218 case ARM::t2MOVSsi: {
7219 // Which instruction to expand to depends on the CCOut operand and
7220 // whether we're in an IT block if the register operands are low
7222 bool isNarrow = false;
7223 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7224 isARMLowRegister(Inst.getOperand(1).getReg()) &&
7225 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
7229 switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
7230 default: llvm_unreachable("unexpected opcode!");
7231 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
7232 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
7233 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
7234 case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
7235 case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
7237 unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
7238 if (Amount == 32) Amount = 0;
7239 TmpInst.setOpcode(newOpc);
7240 TmpInst.addOperand(Inst.getOperand(0)); // Rd
7242 TmpInst.addOperand(MCOperand::CreateReg(
7243 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
7244 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7245 if (newOpc != ARM::t2RRX)
7246 TmpInst.addOperand(MCOperand::CreateImm(Amount));
7247 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7248 TmpInst.addOperand(Inst.getOperand(4));
7250 TmpInst.addOperand(MCOperand::CreateReg(
7251 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
7255 // Handle the ARM mode MOV complex aliases.
7260 ARM_AM::ShiftOpc ShiftTy;
7261 switch(Inst.getOpcode()) {
7262 default: llvm_unreachable("unexpected opcode!");
7263 case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
7264 case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
7265 case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
7266 case ARM::RORr: ShiftTy = ARM_AM::ror; break;
7268 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
7270 TmpInst.setOpcode(ARM::MOVsr);
7271 TmpInst.addOperand(Inst.getOperand(0)); // Rd
7272 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7273 TmpInst.addOperand(Inst.getOperand(2)); // Rm
7274 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
7275 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7276 TmpInst.addOperand(Inst.getOperand(4));
7277 TmpInst.addOperand(Inst.getOperand(5)); // cc_out
7285 ARM_AM::ShiftOpc ShiftTy;
7286 switch(Inst.getOpcode()) {
7287 default: llvm_unreachable("unexpected opcode!");
7288 case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
7289 case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
7290 case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
7291 case ARM::RORi: ShiftTy = ARM_AM::ror; break;
7293 // A shift by zero is a plain MOVr, not a MOVsi.
7294 unsigned Amt = Inst.getOperand(2).getImm();
7295 unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
7296 // A shift by 32 should be encoded as 0 when permitted
7297 if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
7299 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
7301 TmpInst.setOpcode(Opc);
7302 TmpInst.addOperand(Inst.getOperand(0)); // Rd
7303 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7304 if (Opc == ARM::MOVsi)
7305 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
7306 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7307 TmpInst.addOperand(Inst.getOperand(4));
7308 TmpInst.addOperand(Inst.getOperand(5)); // cc_out
7313 unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
7315 TmpInst.setOpcode(ARM::MOVsi);
7316 TmpInst.addOperand(Inst.getOperand(0)); // Rd
7317 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7318 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
7319 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7320 TmpInst.addOperand(Inst.getOperand(3));
7321 TmpInst.addOperand(Inst.getOperand(4)); // cc_out
7325 case ARM::t2LDMIA_UPD: {
7326 // If this is a load of a single register, then we should use
7327 // a post-indexed LDR instruction instead, per the ARM ARM.
7328 if (Inst.getNumOperands() != 5)
7331 TmpInst.setOpcode(ARM::t2LDR_POST);
7332 TmpInst.addOperand(Inst.getOperand(4)); // Rt
7333 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7334 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7335 TmpInst.addOperand(MCOperand::CreateImm(4));
7336 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7337 TmpInst.addOperand(Inst.getOperand(3));
7341 case ARM::t2STMDB_UPD: {
7342 // If this is a store of a single register, then we should use
7343 // a pre-indexed STR instruction instead, per the ARM ARM.
7344 if (Inst.getNumOperands() != 5)
7347 TmpInst.setOpcode(ARM::t2STR_PRE);
7348 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7349 TmpInst.addOperand(Inst.getOperand(4)); // Rt
7350 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7351 TmpInst.addOperand(MCOperand::CreateImm(-4));
7352 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7353 TmpInst.addOperand(Inst.getOperand(3));
7357 case ARM::LDMIA_UPD:
7358 // If this is a load of a single register via a 'pop', then we should use
7359 // a post-indexed LDR instruction instead, per the ARM ARM.
7360 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
7361 Inst.getNumOperands() == 5) {
7363 TmpInst.setOpcode(ARM::LDR_POST_IMM);
7364 TmpInst.addOperand(Inst.getOperand(4)); // Rt
7365 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7366 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7367 TmpInst.addOperand(MCOperand::CreateReg(0)); // am2offset
7368 TmpInst.addOperand(MCOperand::CreateImm(4));
7369 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7370 TmpInst.addOperand(Inst.getOperand(3));
7375 case ARM::STMDB_UPD:
7376 // If this is a store of a single register via a 'push', then we should use
7377 // a pre-indexed STR instruction instead, per the ARM ARM.
7378 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
7379 Inst.getNumOperands() == 5) {
7381 TmpInst.setOpcode(ARM::STR_PRE_IMM);
7382 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7383 TmpInst.addOperand(Inst.getOperand(4)); // Rt
7384 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
7385 TmpInst.addOperand(MCOperand::CreateImm(-4));
7386 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7387 TmpInst.addOperand(Inst.getOperand(3));
7391 case ARM::t2ADDri12:
7392 // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
7393 // mnemonic was used (not "addw"), encoding T3 is preferred.
7394 if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
7395 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
7397 Inst.setOpcode(ARM::t2ADDri);
7398 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
7400 case ARM::t2SUBri12:
7401 // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
7402 // mnemonic was used (not "subw"), encoding T3 is preferred.
7403 if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
7404 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
7406 Inst.setOpcode(ARM::t2SUBri);
7407 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
7410 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
7411 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
7412 // to encoding T2 if <Rd> is specified and encoding T2 is preferred
7413 // to encoding T1 if <Rd> is omitted."
7414 if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
7415 Inst.setOpcode(ARM::tADDi3);
7420 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
7421 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
7422 // to encoding T2 if <Rd> is specified and encoding T2 is preferred
7423 // to encoding T1 if <Rd> is omitted."
7424 if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
7425 Inst.setOpcode(ARM::tSUBi3);
7430 case ARM::t2SUBri: {
7431 // If the destination and first source operand are the same, and
7432 // the flags are compatible with the current IT status, use encoding T2
7433 // instead of T3. For compatibility with the system 'as'. Make sure the
7434 // wide encoding wasn't explicit.
7435 if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
7436 !isARMLowRegister(Inst.getOperand(0).getReg()) ||
7437 (unsigned)Inst.getOperand(2).getImm() > 255 ||
7438 ((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) ||
7439 (inITBlock() && Inst.getOperand(5).getReg() != 0)) ||
7440 (static_cast<ARMOperand*>(Operands[3])->isToken() &&
7441 static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
7444 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
7445 ARM::tADDi8 : ARM::tSUBi8);
7446 TmpInst.addOperand(Inst.getOperand(0));
7447 TmpInst.addOperand(Inst.getOperand(5));
7448 TmpInst.addOperand(Inst.getOperand(0));
7449 TmpInst.addOperand(Inst.getOperand(2));
7450 TmpInst.addOperand(Inst.getOperand(3));
7451 TmpInst.addOperand(Inst.getOperand(4));
7455 case ARM::t2ADDrr: {
7456 // If the destination and first source operand are the same, and
7457 // there's no setting of the flags, use encoding T2 instead of T3.
7458 // Note that this is only for ADD, not SUB. This mirrors the system
7459 // 'as' behaviour. Make sure the wide encoding wasn't explicit.
7460 if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
7461 Inst.getOperand(5).getReg() != 0 ||
7462 (static_cast<ARMOperand*>(Operands[3])->isToken() &&
7463 static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
7466 TmpInst.setOpcode(ARM::tADDhirr);
7467 TmpInst.addOperand(Inst.getOperand(0));
7468 TmpInst.addOperand(Inst.getOperand(0));
7469 TmpInst.addOperand(Inst.getOperand(2));
7470 TmpInst.addOperand(Inst.getOperand(3));
7471 TmpInst.addOperand(Inst.getOperand(4));
7475 case ARM::tADDrSP: {
7476 // If the non-SP source operand and the destination operand are not the
7477 // same, we need to use the 32-bit encoding if it's available.
7478 if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
7479 Inst.setOpcode(ARM::t2ADDrr);
7480 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
7486 // A Thumb conditional branch outside of an IT block is a tBcc.
7487 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
7488 Inst.setOpcode(ARM::tBcc);
7493 // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
7494 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
7495 Inst.setOpcode(ARM::t2Bcc);
7500 // If the conditional is AL or we're in an IT block, we really want t2B.
7501 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
7502 Inst.setOpcode(ARM::t2B);
7507 // If the conditional is AL, we really want tB.
7508 if (Inst.getOperand(1).getImm() == ARMCC::AL) {
7509 Inst.setOpcode(ARM::tB);
7514 // If the register list contains any high registers, or if the writeback
7515 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
7516 // instead if we're in Thumb2. Otherwise, this should have generated
7517 // an error in validateInstruction().
7518 unsigned Rn = Inst.getOperand(0).getReg();
7519 bool hasWritebackToken =
7520 (static_cast<ARMOperand*>(Operands[3])->isToken() &&
7521 static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
7522 bool listContainsBase;
7523 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
7524 (!listContainsBase && !hasWritebackToken) ||
7525 (listContainsBase && hasWritebackToken)) {
7526 // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
7527 assert (isThumbTwo());
7528 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
7529 // If we're switching to the updating version, we need to insert
7530 // the writeback tied operand.
7531 if (hasWritebackToken)
7532 Inst.insert(Inst.begin(),
7533 MCOperand::CreateReg(Inst.getOperand(0).getReg()));
7538 case ARM::tSTMIA_UPD: {
7539 // If the register list contains any high registers, we need to use
7540 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
7541 // should have generated an error in validateInstruction().
7542 unsigned Rn = Inst.getOperand(0).getReg();
7543 bool listContainsBase;
7544 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
7545 // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
7546 assert (isThumbTwo());
7547 Inst.setOpcode(ARM::t2STMIA_UPD);
7553 bool listContainsBase;
7554 // If the register list contains any high registers, we need to use
7555 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
7556 // should have generated an error in validateInstruction().
7557 if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
7559 assert (isThumbTwo());
7560 Inst.setOpcode(ARM::t2LDMIA_UPD);
7561 // Add the base register and writeback operands.
7562 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7563 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7567 bool listContainsBase;
7568 if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
7570 assert (isThumbTwo());
7571 Inst.setOpcode(ARM::t2STMDB_UPD);
7572 // Add the base register and writeback operands.
7573 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7574 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7578 // If we can use the 16-bit encoding and the user didn't explicitly
7579 // request the 32-bit variant, transform it here.
7580 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7581 (unsigned)Inst.getOperand(1).getImm() <= 255 &&
7582 ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
7583 Inst.getOperand(4).getReg() == ARM::CPSR) ||
7584 (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
7585 (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7586 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7587 // The operands aren't in the same order for tMOVi8...
7589 TmpInst.setOpcode(ARM::tMOVi8);
7590 TmpInst.addOperand(Inst.getOperand(0));
7591 TmpInst.addOperand(Inst.getOperand(4));
7592 TmpInst.addOperand(Inst.getOperand(1));
7593 TmpInst.addOperand(Inst.getOperand(2));
7594 TmpInst.addOperand(Inst.getOperand(3));
7601 // If we can use the 16-bit encoding and the user didn't explicitly
7602 // request the 32-bit variant, transform it here.
7603 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7604 isARMLowRegister(Inst.getOperand(1).getReg()) &&
7605 Inst.getOperand(2).getImm() == ARMCC::AL &&
7606 Inst.getOperand(4).getReg() == ARM::CPSR &&
7607 (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7608 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7609 // The operands aren't the same for tMOV[S]r... (no cc_out)
7611 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
7612 TmpInst.addOperand(Inst.getOperand(0));
7613 TmpInst.addOperand(Inst.getOperand(1));
7614 TmpInst.addOperand(Inst.getOperand(2));
7615 TmpInst.addOperand(Inst.getOperand(3));
7625 // If we can use the 16-bit encoding and the user didn't explicitly
7626 // request the 32-bit variant, transform it here.
7627 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7628 isARMLowRegister(Inst.getOperand(1).getReg()) &&
7629 Inst.getOperand(2).getImm() == 0 &&
7630 (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7631 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7633 switch (Inst.getOpcode()) {
7634 default: llvm_unreachable("Illegal opcode!");
7635 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
7636 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
7637 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
7638 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
7640 // The operands aren't the same for thumb1 (no rotate operand).
7642 TmpInst.setOpcode(NewOpc);
7643 TmpInst.addOperand(Inst.getOperand(0));
7644 TmpInst.addOperand(Inst.getOperand(1));
7645 TmpInst.addOperand(Inst.getOperand(3));
7646 TmpInst.addOperand(Inst.getOperand(4));
7653 ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
7654 // rrx shifts and asr/lsr of #32 is encoded as 0
7655 if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
7657 if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
7658 // Shifting by zero is accepted as a vanilla 'MOVr'
7660 TmpInst.setOpcode(ARM::MOVr);
7661 TmpInst.addOperand(Inst.getOperand(0));
7662 TmpInst.addOperand(Inst.getOperand(1));
7663 TmpInst.addOperand(Inst.getOperand(3));
7664 TmpInst.addOperand(Inst.getOperand(4));
7665 TmpInst.addOperand(Inst.getOperand(5));
7678 ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
7679 if (SOpc == ARM_AM::rrx) return false;
7680 switch (Inst.getOpcode()) {
7681 default: llvm_unreachable("unexpected opcode!");
7682 case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
7683 case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
7684 case ARM::EORrsi: newOpc = ARM::EORrr; break;
7685 case ARM::BICrsi: newOpc = ARM::BICrr; break;
7686 case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
7687 case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
7689 // If the shift is by zero, use the non-shifted instruction definition.
7690 // The exception is for right shifts, where 0 == 32
7691 if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
7692 !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
7694 TmpInst.setOpcode(newOpc);
7695 TmpInst.addOperand(Inst.getOperand(0));
7696 TmpInst.addOperand(Inst.getOperand(1));
7697 TmpInst.addOperand(Inst.getOperand(2));
7698 TmpInst.addOperand(Inst.getOperand(4));
7699 TmpInst.addOperand(Inst.getOperand(5));
7700 TmpInst.addOperand(Inst.getOperand(6));
7708 // The mask bits for all but the first condition are represented as
7709 // the low bit of the condition code value implies 't'. We currently
7710 // always have 1 implies 't', so XOR toggle the bits if the low bit
7711 // of the condition code is zero.
7712 MCOperand &MO = Inst.getOperand(1);
7713 unsigned Mask = MO.getImm();
7714 unsigned OrigMask = Mask;
7715 unsigned TZ = countTrailingZeros(Mask);
7716 if ((Inst.getOperand(0).getImm() & 1) == 0) {
7717 assert(Mask && TZ <= 3 && "illegal IT mask value!");
7718 Mask ^= (0xE << TZ) & 0xF;
7722 // Set up the IT block state according to the IT instruction we just
7724 assert(!inITBlock() && "nested IT blocks?!");
7725 ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
7726 ITState.Mask = OrigMask; // Use the original mask, not the updated one.
7727 ITState.CurPosition = 0;
7728 ITState.FirstCond = true;
7738 // Assemblers should use the narrow encodings of these instructions when permissible.
7739 if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
7740 isARMLowRegister(Inst.getOperand(2).getReg())) &&
7741 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
7742 ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
7743 (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
7744 (!static_cast<ARMOperand*>(Operands[3])->isToken() ||
7745 !static_cast<ARMOperand*>(Operands[3])->getToken().equals_lower(".w"))) {
7747 switch (Inst.getOpcode()) {
7748 default: llvm_unreachable("unexpected opcode");
7749 case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
7750 case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
7751 case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
7752 case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
7753 case ARM::t2RORrr: NewOpc = ARM::tROR; break;
7754 case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
7757 TmpInst.setOpcode(NewOpc);
7758 TmpInst.addOperand(Inst.getOperand(0));
7759 TmpInst.addOperand(Inst.getOperand(5));
7760 TmpInst.addOperand(Inst.getOperand(1));
7761 TmpInst.addOperand(Inst.getOperand(2));
7762 TmpInst.addOperand(Inst.getOperand(3));
7763 TmpInst.addOperand(Inst.getOperand(4));
7774 // Assemblers should use the narrow encodings of these instructions when permissible.
7775 // These instructions are special in that they are commutable, so shorter encodings
7776 // are available more often.
7777 if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
7778 isARMLowRegister(Inst.getOperand(2).getReg())) &&
7779 (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
7780 Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
7781 ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
7782 (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
7783 (!static_cast<ARMOperand*>(Operands[3])->isToken() ||
7784 !static_cast<ARMOperand*>(Operands[3])->getToken().equals_lower(".w"))) {
7786 switch (Inst.getOpcode()) {
7787 default: llvm_unreachable("unexpected opcode");
7788 case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
7789 case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
7790 case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
7791 case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
7794 TmpInst.setOpcode(NewOpc);
7795 TmpInst.addOperand(Inst.getOperand(0));
7796 TmpInst.addOperand(Inst.getOperand(5));
7797 if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
7798 TmpInst.addOperand(Inst.getOperand(1));
7799 TmpInst.addOperand(Inst.getOperand(2));
7801 TmpInst.addOperand(Inst.getOperand(2));
7802 TmpInst.addOperand(Inst.getOperand(1));
7804 TmpInst.addOperand(Inst.getOperand(3));
7805 TmpInst.addOperand(Inst.getOperand(4));
7815 unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
7816 // 16-bit thumb arithmetic instructions either require or preclude the 'S'
7817 // suffix depending on whether they're in an IT block or not.
7818 unsigned Opc = Inst.getOpcode();
7819 const MCInstrDesc &MCID = MII.get(Opc);
7820 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
7821 assert(MCID.hasOptionalDef() &&
7822 "optionally flag setting instruction missing optional def operand");
7823 assert(MCID.NumOperands == Inst.getNumOperands() &&
7824 "operand count mismatch!");
7825 // Find the optional-def operand (cc_out).
7828 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
7831 // If we're parsing Thumb1, reject it completely.
7832 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
7833 return Match_MnemonicFail;
7834 // If we're parsing Thumb2, which form is legal depends on whether we're
7836 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
7838 return Match_RequiresITBlock;
7839 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
7841 return Match_RequiresNotITBlock;
7843 // Some high-register supporting Thumb1 encodings only allow both registers
7844 // to be from r0-r7 when in Thumb2.
7845 else if (Opc == ARM::tADDhirr && isThumbOne() &&
7846 isARMLowRegister(Inst.getOperand(1).getReg()) &&
7847 isARMLowRegister(Inst.getOperand(2).getReg()))
7848 return Match_RequiresThumb2;
7849 // Others only require ARMv6 or later.
7850 else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
7851 isARMLowRegister(Inst.getOperand(0).getReg()) &&
7852 isARMLowRegister(Inst.getOperand(1).getReg()))
7853 return Match_RequiresV6;
7854 return Match_Success;
7857 template<> inline bool IsCPSRDead<MCInst>(MCInst* Instr) {
7858 return true; // In an assembly source, no need to second-guess
7861 static const char *getSubtargetFeatureName(unsigned Val);
7863 MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
7864 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
7865 MCStreamer &Out, unsigned &ErrorInfo,
7866 bool MatchingInlineAsm) {
7868 unsigned MatchResult;
7870 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
7872 switch (MatchResult) {
7875 // Context sensitive operand constraints aren't handled by the matcher,
7876 // so check them here.
7877 if (validateInstruction(Inst, Operands)) {
7878 // Still progress the IT block, otherwise one wrong condition causes
7879 // nasty cascading errors.
7880 forwardITPosition();
7884 { // processInstruction() updates inITBlock state, we need to save it away
7885 bool wasInITBlock = inITBlock();
7887 // Some instructions need post-processing to, for example, tweak which
7888 // encoding is selected. Loop on it while changes happen so the
7889 // individual transformations can chain off each other. E.g.,
7890 // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
7891 while (processInstruction(Inst, Operands))
7894 // Only after the instruction is fully processed, we can validate it
7895 if (wasInITBlock && hasV8Ops() && isThumb() &&
7896 !isV8EligibleForIT(&Inst)) {
7897 Warning(IDLoc, "deprecated instruction in IT block");
7901 // Only move forward at the very end so that everything in validate
7902 // and process gets a consistent answer about whether we're in an IT
7904 forwardITPosition();
7906 // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
7907 // doesn't actually encode.
7908 if (Inst.getOpcode() == ARM::ITasm)
7912 Out.EmitInstruction(Inst, STI);
7914 case Match_MissingFeature: {
7915 assert(ErrorInfo && "Unknown missing feature!");
7916 // Special case the error message for the very common case where only
7917 // a single subtarget feature is missing (Thumb vs. ARM, e.g.).
7918 std::string Msg = "instruction requires:";
7920 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
7921 if (ErrorInfo & Mask) {
7923 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
7927 return Error(IDLoc, Msg);
7929 case Match_InvalidOperand: {
7930 SMLoc ErrorLoc = IDLoc;
7931 if (ErrorInfo != ~0U) {
7932 if (ErrorInfo >= Operands.size())
7933 return Error(IDLoc, "too few operands for instruction");
7935 ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7936 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7939 return Error(ErrorLoc, "invalid operand for instruction");
7941 case Match_MnemonicFail:
7942 return Error(IDLoc, "invalid instruction",
7943 ((ARMOperand*)Operands[0])->getLocRange());
7944 case Match_RequiresNotITBlock:
7945 return Error(IDLoc, "flag setting instruction only valid outside IT block");
7946 case Match_RequiresITBlock:
7947 return Error(IDLoc, "instruction only valid inside IT block");
7948 case Match_RequiresV6:
7949 return Error(IDLoc, "instruction variant requires ARMv6 or later");
7950 case Match_RequiresThumb2:
7951 return Error(IDLoc, "instruction variant requires Thumb2");
7952 case Match_ImmRange0_15: {
7953 SMLoc ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7954 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7955 return Error(ErrorLoc, "immediate operand must be in the range [0,15]");
7957 case Match_ImmRange0_239: {
7958 SMLoc ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7959 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7960 return Error(ErrorLoc, "immediate operand must be in the range [0,239]");
7964 llvm_unreachable("Implement any new match types added!");
7967 /// parseDirective parses the arm specific directives
7968 bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
7969 StringRef IDVal = DirectiveID.getIdentifier();
7970 if (IDVal == ".word")
7971 return parseLiteralValues(4, DirectiveID.getLoc());
7972 else if (IDVal == ".short" || IDVal == ".hword")
7973 return parseLiteralValues(2, DirectiveID.getLoc());
7974 else if (IDVal == ".thumb")
7975 return parseDirectiveThumb(DirectiveID.getLoc());
7976 else if (IDVal == ".arm")
7977 return parseDirectiveARM(DirectiveID.getLoc());
7978 else if (IDVal == ".thumb_func")
7979 return parseDirectiveThumbFunc(DirectiveID.getLoc());
7980 else if (IDVal == ".code")
7981 return parseDirectiveCode(DirectiveID.getLoc());
7982 else if (IDVal == ".syntax")
7983 return parseDirectiveSyntax(DirectiveID.getLoc());
7984 else if (IDVal == ".unreq")
7985 return parseDirectiveUnreq(DirectiveID.getLoc());
7986 else if (IDVal == ".arch")
7987 return parseDirectiveArch(DirectiveID.getLoc());
7988 else if (IDVal == ".eabi_attribute")
7989 return parseDirectiveEabiAttr(DirectiveID.getLoc());
7990 else if (IDVal == ".cpu")
7991 return parseDirectiveCPU(DirectiveID.getLoc());
7992 else if (IDVal == ".fpu")
7993 return parseDirectiveFPU(DirectiveID.getLoc());
7994 else if (IDVal == ".fnstart")
7995 return parseDirectiveFnStart(DirectiveID.getLoc());
7996 else if (IDVal == ".fnend")
7997 return parseDirectiveFnEnd(DirectiveID.getLoc());
7998 else if (IDVal == ".cantunwind")
7999 return parseDirectiveCantUnwind(DirectiveID.getLoc());
8000 else if (IDVal == ".personality")
8001 return parseDirectivePersonality(DirectiveID.getLoc());
8002 else if (IDVal == ".handlerdata")
8003 return parseDirectiveHandlerData(DirectiveID.getLoc());
8004 else if (IDVal == ".setfp")
8005 return parseDirectiveSetFP(DirectiveID.getLoc());
8006 else if (IDVal == ".pad")
8007 return parseDirectivePad(DirectiveID.getLoc());
8008 else if (IDVal == ".save")
8009 return parseDirectiveRegSave(DirectiveID.getLoc(), false);
8010 else if (IDVal == ".vsave")
8011 return parseDirectiveRegSave(DirectiveID.getLoc(), true);
8012 else if (IDVal == ".inst")
8013 return parseDirectiveInst(DirectiveID.getLoc());
8014 else if (IDVal == ".inst.n")
8015 return parseDirectiveInst(DirectiveID.getLoc(), 'n');
8016 else if (IDVal == ".inst.w")
8017 return parseDirectiveInst(DirectiveID.getLoc(), 'w');
8018 else if (IDVal == ".ltorg" || IDVal == ".pool")
8019 return parseDirectiveLtorg(DirectiveID.getLoc());
8020 else if (IDVal == ".even")
8021 return parseDirectiveEven(DirectiveID.getLoc());
8022 else if (IDVal == ".personalityindex")
8023 return parseDirectivePersonalityIndex(DirectiveID.getLoc());
8024 else if (IDVal == ".unwind_raw")
8025 return parseDirectiveUnwindRaw(DirectiveID.getLoc());
8026 else if (IDVal == ".tlsdescseq")
8027 return parseDirectiveTLSDescSeq(DirectiveID.getLoc());
8028 else if (IDVal == ".movsp")
8029 return parseDirectiveMovSP(DirectiveID.getLoc());
8030 else if (IDVal == ".object_arch")
8031 return parseDirectiveObjectArch(DirectiveID.getLoc());
8032 else if (IDVal == ".arch_extension")
8033 return parseDirectiveArchExtension(DirectiveID.getLoc());
8034 else if (IDVal == ".align")
8035 return parseDirectiveAlign(DirectiveID.getLoc());
8036 else if (IDVal == ".thumb_set")
8037 return parseDirectiveThumbSet(DirectiveID.getLoc());
8041 /// parseLiteralValues
8042 /// ::= .hword expression [, expression]*
8043 /// ::= .short expression [, expression]*
8044 /// ::= .word expression [, expression]*
8045 bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) {
8046 if (getLexer().isNot(AsmToken::EndOfStatement)) {
8048 const MCExpr *Value;
8049 if (getParser().parseExpression(Value)) {
8050 Parser.eatToEndOfStatement();
8054 getParser().getStreamer().EmitValue(Value, Size);
8056 if (getLexer().is(AsmToken::EndOfStatement))
8059 // FIXME: Improve diagnostic.
8060 if (getLexer().isNot(AsmToken::Comma)) {
8061 Error(L, "unexpected token in directive");
8072 /// parseDirectiveThumb
8074 bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
8075 if (getLexer().isNot(AsmToken::EndOfStatement)) {
8076 Error(L, "unexpected token in directive");
8082 Error(L, "target does not support Thumb mode");
8089 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
8093 /// parseDirectiveARM
8095 bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
8096 if (getLexer().isNot(AsmToken::EndOfStatement)) {
8097 Error(L, "unexpected token in directive");
8103 Error(L, "target does not support ARM mode");
8110 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
8114 void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) {
8115 if (NextSymbolIsThumb) {
8116 getParser().getStreamer().EmitThumbFunc(Symbol);
8117 NextSymbolIsThumb = false;
8124 const MCObjectFileInfo::Environment Format =
8125 getContext().getObjectFileInfo()->getObjectFileType();
8127 case MCObjectFileInfo::IsCOFF: {
8128 const MCSymbolData &SD =
8129 getParser().getStreamer().getOrCreateSymbolData(Symbol);
8130 char Type = COFF::IMAGE_SYM_DTYPE_FUNCTION << COFF::SCT_COMPLEX_TYPE_SHIFT;
8131 if (SD.getFlags() & (Type << COFF::SF_TypeShift))
8132 getParser().getStreamer().EmitThumbFunc(Symbol);
8135 case MCObjectFileInfo::IsELF: {
8136 const MCSymbolData &SD =
8137 getParser().getStreamer().getOrCreateSymbolData(Symbol);
8138 if (MCELF::GetType(SD) & (ELF::STT_FUNC << ELF_STT_Shift))
8139 getParser().getStreamer().EmitThumbFunc(Symbol);
8142 case MCObjectFileInfo::IsMachO:
8147 /// parseDirectiveThumbFunc
8148 /// ::= .thumbfunc symbol_name
8149 bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
8150 const MCAsmInfo *MAI = getParser().getStreamer().getContext().getAsmInfo();
8151 bool isMachO = MAI->hasSubsectionsViaSymbols();
8153 // Darwin asm has (optionally) function name after .thumb_func direction
8156 const AsmToken &Tok = Parser.getTok();
8157 if (Tok.isNot(AsmToken::EndOfStatement)) {
8158 if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) {
8159 Error(L, "unexpected token in .thumb_func directive");
8164 getParser().getContext().GetOrCreateSymbol(Tok.getIdentifier());
8165 getParser().getStreamer().EmitThumbFunc(Func);
8166 Parser.Lex(); // Consume the identifier token.
8171 if (getLexer().isNot(AsmToken::EndOfStatement)) {
8172 Error(L, "unexpected token in directive");
8176 NextSymbolIsThumb = true;
8180 /// parseDirectiveSyntax
8181 /// ::= .syntax unified | divided
8182 bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
8183 const AsmToken &Tok = Parser.getTok();
8184 if (Tok.isNot(AsmToken::Identifier)) {
8185 Error(L, "unexpected token in .syntax directive");
8189 StringRef Mode = Tok.getString();
8190 if (Mode == "unified" || Mode == "UNIFIED") {
8192 } else if (Mode == "divided" || Mode == "DIVIDED") {
8193 Error(L, "'.syntax divided' arm asssembly not supported");
8196 Error(L, "unrecognized syntax mode in .syntax directive");
8200 if (getLexer().isNot(AsmToken::EndOfStatement)) {
8201 Error(Parser.getTok().getLoc(), "unexpected token in directive");
8206 // TODO tell the MC streamer the mode
8207 // getParser().getStreamer().Emit???();
8211 /// parseDirectiveCode
8212 /// ::= .code 16 | 32
8213 bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
8214 const AsmToken &Tok = Parser.getTok();
8215 if (Tok.isNot(AsmToken::Integer)) {
8216 Error(L, "unexpected token in .code directive");
8219 int64_t Val = Parser.getTok().getIntVal();
8220 if (Val != 16 && Val != 32) {
8221 Error(L, "invalid operand to .code directive");
8226 if (getLexer().isNot(AsmToken::EndOfStatement)) {
8227 Error(Parser.getTok().getLoc(), "unexpected token in directive");
8234 Error(L, "target does not support Thumb mode");
8240 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
8243 Error(L, "target does not support ARM mode");
8249 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
8255 /// parseDirectiveReq
8256 /// ::= name .req registername
8257 bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
8258 Parser.Lex(); // Eat the '.req' token.
8260 SMLoc SRegLoc, ERegLoc;
8261 if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
8262 Parser.eatToEndOfStatement();
8263 Error(SRegLoc, "register name expected");
8267 // Shouldn't be anything else.
8268 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
8269 Parser.eatToEndOfStatement();
8270 Error(Parser.getTok().getLoc(), "unexpected input in .req directive.");
8274 Parser.Lex(); // Consume the EndOfStatement
8276 if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg) {
8277 Error(SRegLoc, "redefinition of '" + Name + "' does not match original.");
8284 /// parseDirectiveUneq
8285 /// ::= .unreq registername
8286 bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
8287 if (Parser.getTok().isNot(AsmToken::Identifier)) {
8288 Parser.eatToEndOfStatement();
8289 Error(L, "unexpected input in .unreq directive.");
8292 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
8293 Parser.Lex(); // Eat the identifier.
8297 /// parseDirectiveArch
8299 bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
8300 const MCAsmInfo *MAI = getParser().getStreamer().getContext().getAsmInfo();
8301 bool isMachO = MAI->hasSubsectionsViaSymbols();
8303 Error(L, ".arch directive not valid for Mach-O");
8304 Parser.eatToEndOfStatement();
8308 StringRef Arch = getParser().parseStringToEndOfStatement().trim();
8310 unsigned ID = StringSwitch<unsigned>(Arch)
8311 #define ARM_ARCH_NAME(NAME, ID, DEFAULT_CPU_NAME, DEFAULT_CPU_ARCH) \
8312 .Case(NAME, ARM::ID)
8313 #define ARM_ARCH_ALIAS(NAME, ID) \
8314 .Case(NAME, ARM::ID)
8315 #include "MCTargetDesc/ARMArchName.def"
8316 .Default(ARM::INVALID_ARCH);
8318 if (ID == ARM::INVALID_ARCH) {
8319 Error(L, "Unknown arch name");
8323 getTargetStreamer().emitArch(ID);
8327 /// parseDirectiveEabiAttr
8328 /// ::= .eabi_attribute int, int [, "str"]
8329 /// ::= .eabi_attribute Tag_name, int [, "str"]
8330 bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
8331 const MCAsmInfo *MAI = getParser().getStreamer().getContext().getAsmInfo();
8332 bool isMachO = MAI->hasSubsectionsViaSymbols();
8334 Error(L, ".eabi_attribute directive not valid for Mach-O");
8335 Parser.eatToEndOfStatement();
8341 TagLoc = Parser.getTok().getLoc();
8342 if (Parser.getTok().is(AsmToken::Identifier)) {
8343 StringRef Name = Parser.getTok().getIdentifier();
8344 Tag = ARMBuildAttrs::AttrTypeFromString(Name);
8346 Error(TagLoc, "attribute name not recognised: " + Name);
8347 Parser.eatToEndOfStatement();
8352 const MCExpr *AttrExpr;
8354 TagLoc = Parser.getTok().getLoc();
8355 if (Parser.parseExpression(AttrExpr)) {
8356 Parser.eatToEndOfStatement();
8360 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(AttrExpr);
8362 Error(TagLoc, "expected numeric constant");
8363 Parser.eatToEndOfStatement();
8367 Tag = CE->getValue();
8370 if (Parser.getTok().isNot(AsmToken::Comma)) {
8371 Error(Parser.getTok().getLoc(), "comma expected");
8372 Parser.eatToEndOfStatement();
8375 Parser.Lex(); // skip comma
8377 StringRef StringValue = "";
8378 bool IsStringValue = false;
8380 int64_t IntegerValue = 0;
8381 bool IsIntegerValue = false;
8383 if (Tag == ARMBuildAttrs::CPU_raw_name || Tag == ARMBuildAttrs::CPU_name)
8384 IsStringValue = true;
8385 else if (Tag == ARMBuildAttrs::compatibility) {
8386 IsStringValue = true;
8387 IsIntegerValue = true;
8388 } else if (Tag < 32 || Tag % 2 == 0)
8389 IsIntegerValue = true;
8390 else if (Tag % 2 == 1)
8391 IsStringValue = true;
8393 llvm_unreachable("invalid tag type");
8395 if (IsIntegerValue) {
8396 const MCExpr *ValueExpr;
8397 SMLoc ValueExprLoc = Parser.getTok().getLoc();
8398 if (Parser.parseExpression(ValueExpr)) {
8399 Parser.eatToEndOfStatement();
8403 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ValueExpr);
8405 Error(ValueExprLoc, "expected numeric constant");
8406 Parser.eatToEndOfStatement();
8410 IntegerValue = CE->getValue();
8413 if (Tag == ARMBuildAttrs::compatibility) {
8414 if (Parser.getTok().isNot(AsmToken::Comma))
8415 IsStringValue = false;
8420 if (IsStringValue) {
8421 if (Parser.getTok().isNot(AsmToken::String)) {
8422 Error(Parser.getTok().getLoc(), "bad string constant");
8423 Parser.eatToEndOfStatement();
8427 StringValue = Parser.getTok().getStringContents();
8431 if (IsIntegerValue && IsStringValue) {
8432 assert(Tag == ARMBuildAttrs::compatibility);
8433 getTargetStreamer().emitIntTextAttribute(Tag, IntegerValue, StringValue);
8434 } else if (IsIntegerValue)
8435 getTargetStreamer().emitAttribute(Tag, IntegerValue);
8436 else if (IsStringValue)
8437 getTargetStreamer().emitTextAttribute(Tag, StringValue);
8441 /// parseDirectiveCPU
8443 bool ARMAsmParser::parseDirectiveCPU(SMLoc L) {
8444 const MCAsmInfo *MAI = getParser().getStreamer().getContext().getAsmInfo();
8445 bool isMachO = MAI->hasSubsectionsViaSymbols();
8447 Error(L, ".cpu directive not valid for Mach-O");
8448 Parser.eatToEndOfStatement();
8452 StringRef CPU = getParser().parseStringToEndOfStatement().trim();
8453 getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU);
8457 /// parseDirectiveFPU
8459 bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
8460 const MCAsmInfo *MAI = getParser().getStreamer().getContext().getAsmInfo();
8461 bool isMachO = MAI->hasSubsectionsViaSymbols();
8463 Error(L, ".fpu directive not valid for Mach-O");
8464 Parser.eatToEndOfStatement();
8468 StringRef FPU = getParser().parseStringToEndOfStatement().trim();
8470 unsigned ID = StringSwitch<unsigned>(FPU)
8471 #define ARM_FPU_NAME(NAME, ID) .Case(NAME, ARM::ID)
8472 #include "ARMFPUName.def"
8473 .Default(ARM::INVALID_FPU);
8475 if (ID == ARM::INVALID_FPU) {
8476 Error(L, "Unknown FPU name");
8480 getTargetStreamer().emitFPU(ID);
8484 /// parseDirectiveFnStart
8486 bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
8487 const MCAsmInfo *MAI = getParser().getStreamer().getContext().getAsmInfo();
8488 bool isMachO = MAI->hasSubsectionsViaSymbols();
8490 Error(L, ".fnstart directive not valid for Mach-O");
8491 Parser.eatToEndOfStatement();
8495 if (UC.hasFnStart()) {
8496 Error(L, ".fnstart starts before the end of previous one");
8497 UC.emitFnStartLocNotes();
8501 // Reset the unwind directives parser state
8504 getTargetStreamer().emitFnStart();
8506 UC.recordFnStart(L);
8510 /// parseDirectiveFnEnd
8512 bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
8513 // Check the ordering of unwind directives
8514 if (!UC.hasFnStart()) {
8515 Error(L, ".fnstart must precede .fnend directive");
8519 // Reset the unwind directives parser state
8520 getTargetStreamer().emitFnEnd();
8526 /// parseDirectiveCantUnwind
8528 bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
8529 UC.recordCantUnwind(L);
8531 // Check the ordering of unwind directives
8532 if (!UC.hasFnStart()) {
8533 Error(L, ".fnstart must precede .cantunwind directive");
8536 if (UC.hasHandlerData()) {
8537 Error(L, ".cantunwind can't be used with .handlerdata directive");
8538 UC.emitHandlerDataLocNotes();
8541 if (UC.hasPersonality()) {
8542 Error(L, ".cantunwind can't be used with .personality directive");
8543 UC.emitPersonalityLocNotes();
8547 getTargetStreamer().emitCantUnwind();
8551 /// parseDirectivePersonality
8552 /// ::= .personality name
8553 bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
8554 bool HasExistingPersonality = UC.hasPersonality();
8556 UC.recordPersonality(L);
8558 // Check the ordering of unwind directives
8559 if (!UC.hasFnStart()) {
8560 Error(L, ".fnstart must precede .personality directive");
8563 if (UC.cantUnwind()) {
8564 Error(L, ".personality can't be used with .cantunwind directive");
8565 UC.emitCantUnwindLocNotes();
8568 if (UC.hasHandlerData()) {
8569 Error(L, ".personality must precede .handlerdata directive");
8570 UC.emitHandlerDataLocNotes();
8573 if (HasExistingPersonality) {
8574 Parser.eatToEndOfStatement();
8575 Error(L, "multiple personality directives");
8576 UC.emitPersonalityLocNotes();
8580 // Parse the name of the personality routine
8581 if (Parser.getTok().isNot(AsmToken::Identifier)) {
8582 Parser.eatToEndOfStatement();
8583 Error(L, "unexpected input in .personality directive.");
8586 StringRef Name(Parser.getTok().getIdentifier());
8589 MCSymbol *PR = getParser().getContext().GetOrCreateSymbol(Name);
8590 getTargetStreamer().emitPersonality(PR);
8594 /// parseDirectiveHandlerData
8595 /// ::= .handlerdata
8596 bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
8597 UC.recordHandlerData(L);
8599 // Check the ordering of unwind directives
8600 if (!UC.hasFnStart()) {
8601 Error(L, ".fnstart must precede .personality directive");
8604 if (UC.cantUnwind()) {
8605 Error(L, ".handlerdata can't be used with .cantunwind directive");
8606 UC.emitCantUnwindLocNotes();
8610 getTargetStreamer().emitHandlerData();
8614 /// parseDirectiveSetFP
8615 /// ::= .setfp fpreg, spreg [, offset]
8616 bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
8617 // Check the ordering of unwind directives
8618 if (!UC.hasFnStart()) {
8619 Error(L, ".fnstart must precede .setfp directive");
8622 if (UC.hasHandlerData()) {
8623 Error(L, ".setfp must precede .handlerdata directive");
8628 SMLoc FPRegLoc = Parser.getTok().getLoc();
8629 int FPReg = tryParseRegister();
8631 Error(FPRegLoc, "frame pointer register expected");
8636 if (Parser.getTok().isNot(AsmToken::Comma)) {
8637 Error(Parser.getTok().getLoc(), "comma expected");
8640 Parser.Lex(); // skip comma
8643 SMLoc SPRegLoc = Parser.getTok().getLoc();
8644 int SPReg = tryParseRegister();
8646 Error(SPRegLoc, "stack pointer register expected");
8650 if (SPReg != ARM::SP && SPReg != UC.getFPReg()) {
8651 Error(SPRegLoc, "register should be either $sp or the latest fp register");
8655 // Update the frame pointer register
8656 UC.saveFPReg(FPReg);
8660 if (Parser.getTok().is(AsmToken::Comma)) {
8661 Parser.Lex(); // skip comma
8663 if (Parser.getTok().isNot(AsmToken::Hash) &&
8664 Parser.getTok().isNot(AsmToken::Dollar)) {
8665 Error(Parser.getTok().getLoc(), "'#' expected");
8668 Parser.Lex(); // skip hash token.
8670 const MCExpr *OffsetExpr;
8671 SMLoc ExLoc = Parser.getTok().getLoc();
8673 if (getParser().parseExpression(OffsetExpr, EndLoc)) {
8674 Error(ExLoc, "malformed setfp offset");
8677 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
8679 Error(ExLoc, "setfp offset must be an immediate");
8683 Offset = CE->getValue();
8686 getTargetStreamer().emitSetFP(static_cast<unsigned>(FPReg),
8687 static_cast<unsigned>(SPReg), Offset);
8693 bool ARMAsmParser::parseDirectivePad(SMLoc L) {
8694 // Check the ordering of unwind directives
8695 if (!UC.hasFnStart()) {
8696 Error(L, ".fnstart must precede .pad directive");
8699 if (UC.hasHandlerData()) {
8700 Error(L, ".pad must precede .handlerdata directive");
8705 if (Parser.getTok().isNot(AsmToken::Hash) &&
8706 Parser.getTok().isNot(AsmToken::Dollar)) {
8707 Error(Parser.getTok().getLoc(), "'#' expected");
8710 Parser.Lex(); // skip hash token.
8712 const MCExpr *OffsetExpr;
8713 SMLoc ExLoc = Parser.getTok().getLoc();
8715 if (getParser().parseExpression(OffsetExpr, EndLoc)) {
8716 Error(ExLoc, "malformed pad offset");
8719 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
8721 Error(ExLoc, "pad offset must be an immediate");
8725 getTargetStreamer().emitPad(CE->getValue());
8729 /// parseDirectiveRegSave
8730 /// ::= .save { registers }
8731 /// ::= .vsave { registers }
8732 bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) {
8733 // Check the ordering of unwind directives
8734 if (!UC.hasFnStart()) {
8735 Error(L, ".fnstart must precede .save or .vsave directives");
8738 if (UC.hasHandlerData()) {
8739 Error(L, ".save or .vsave must precede .handlerdata directive");
8743 // RAII object to make sure parsed operands are deleted.
8744 struct CleanupObject {
8745 SmallVector<MCParsedAsmOperand *, 1> Operands;
8747 for (unsigned I = 0, E = Operands.size(); I != E; ++I)
8752 // Parse the register list
8753 if (parseRegisterList(CO.Operands))
8755 ARMOperand *Op = (ARMOperand*)CO.Operands[0];
8756 if (!IsVector && !Op->isRegList()) {
8757 Error(L, ".save expects GPR registers");
8760 if (IsVector && !Op->isDPRRegList()) {
8761 Error(L, ".vsave expects DPR registers");
8765 getTargetStreamer().emitRegSave(Op->getRegList(), IsVector);
8769 /// parseDirectiveInst
8770 /// ::= .inst opcode [, ...]
8771 /// ::= .inst.n opcode [, ...]
8772 /// ::= .inst.w opcode [, ...]
8773 bool ARMAsmParser::parseDirectiveInst(SMLoc Loc, char Suffix) {
8774 const MCAsmInfo *MAI = getParser().getStreamer().getContext().getAsmInfo();
8775 bool isMachO = MAI->hasSubsectionsViaSymbols();
8777 Error(Loc, ".inst directive not valid for Mach-O");
8778 Parser.eatToEndOfStatement();
8793 Parser.eatToEndOfStatement();
8794 Error(Loc, "cannot determine Thumb instruction size, "
8795 "use inst.n/inst.w instead");
8800 Parser.eatToEndOfStatement();
8801 Error(Loc, "width suffixes are invalid in ARM mode");
8807 if (getLexer().is(AsmToken::EndOfStatement)) {
8808 Parser.eatToEndOfStatement();
8809 Error(Loc, "expected expression following directive");
8816 if (getParser().parseExpression(Expr)) {
8817 Error(Loc, "expected expression");
8821 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
8823 Error(Loc, "expected constant expression");
8829 if (Value->getValue() > 0xffff) {
8830 Error(Loc, "inst.n operand is too big, use inst.w instead");
8835 if (Value->getValue() > 0xffffffff) {
8837 StringRef(Suffix ? "inst.w" : "inst") + " operand is too big");
8842 llvm_unreachable("only supported widths are 2 and 4");
8845 getTargetStreamer().emitInst(Value->getValue(), Suffix);
8847 if (getLexer().is(AsmToken::EndOfStatement))
8850 if (getLexer().isNot(AsmToken::Comma)) {
8851 Error(Loc, "unexpected token in directive");
8862 /// parseDirectiveLtorg
8863 /// ::= .ltorg | .pool
8864 bool ARMAsmParser::parseDirectiveLtorg(SMLoc L) {
8865 getTargetStreamer().emitCurrentConstantPool();
8869 bool ARMAsmParser::parseDirectiveEven(SMLoc L) {
8870 const MCSection *Section = getStreamer().getCurrentSection().first;
8872 if (getLexer().isNot(AsmToken::EndOfStatement)) {
8873 TokError("unexpected token in directive");
8878 getStreamer().InitSections();
8879 Section = getStreamer().getCurrentSection().first;
8882 assert(Section && "must have section to emit alignment");
8883 if (Section->UseCodeAlign())
8884 getStreamer().EmitCodeAlignment(2);
8886 getStreamer().EmitValueToAlignment(2);
8891 /// parseDirectivePersonalityIndex
8892 /// ::= .personalityindex index
8893 bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) {
8894 bool HasExistingPersonality = UC.hasPersonality();
8896 UC.recordPersonalityIndex(L);
8898 if (!UC.hasFnStart()) {
8899 Parser.eatToEndOfStatement();
8900 Error(L, ".fnstart must precede .personalityindex directive");
8903 if (UC.cantUnwind()) {
8904 Parser.eatToEndOfStatement();
8905 Error(L, ".personalityindex cannot be used with .cantunwind");
8906 UC.emitCantUnwindLocNotes();
8909 if (UC.hasHandlerData()) {
8910 Parser.eatToEndOfStatement();
8911 Error(L, ".personalityindex must precede .handlerdata directive");
8912 UC.emitHandlerDataLocNotes();
8915 if (HasExistingPersonality) {
8916 Parser.eatToEndOfStatement();
8917 Error(L, "multiple personality directives");
8918 UC.emitPersonalityLocNotes();
8922 const MCExpr *IndexExpression;
8923 SMLoc IndexLoc = Parser.getTok().getLoc();
8924 if (Parser.parseExpression(IndexExpression)) {
8925 Parser.eatToEndOfStatement();
8929 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IndexExpression);
8931 Parser.eatToEndOfStatement();
8932 Error(IndexLoc, "index must be a constant number");
8935 if (CE->getValue() < 0 ||
8936 CE->getValue() >= ARM::EHABI::NUM_PERSONALITY_INDEX) {
8937 Parser.eatToEndOfStatement();
8938 Error(IndexLoc, "personality routine index should be in range [0-3]");
8942 getTargetStreamer().emitPersonalityIndex(CE->getValue());
8946 /// parseDirectiveUnwindRaw
8947 /// ::= .unwind_raw offset, opcode [, opcode...]
8948 bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) {
8949 if (!UC.hasFnStart()) {
8950 Parser.eatToEndOfStatement();
8951 Error(L, ".fnstart must precede .unwind_raw directives");
8955 int64_t StackOffset;
8957 const MCExpr *OffsetExpr;
8958 SMLoc OffsetLoc = getLexer().getLoc();
8959 if (getLexer().is(AsmToken::EndOfStatement) ||
8960 getParser().parseExpression(OffsetExpr)) {
8961 Error(OffsetLoc, "expected expression");
8962 Parser.eatToEndOfStatement();
8966 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
8968 Error(OffsetLoc, "offset must be a constant");
8969 Parser.eatToEndOfStatement();
8973 StackOffset = CE->getValue();
8975 if (getLexer().isNot(AsmToken::Comma)) {
8976 Error(getLexer().getLoc(), "expected comma");
8977 Parser.eatToEndOfStatement();
8982 SmallVector<uint8_t, 16> Opcodes;
8986 SMLoc OpcodeLoc = getLexer().getLoc();
8987 if (getLexer().is(AsmToken::EndOfStatement) || Parser.parseExpression(OE)) {
8988 Error(OpcodeLoc, "expected opcode expression");
8989 Parser.eatToEndOfStatement();
8993 const MCConstantExpr *OC = dyn_cast<MCConstantExpr>(OE);
8995 Error(OpcodeLoc, "opcode value must be a constant");
8996 Parser.eatToEndOfStatement();
9000 const int64_t Opcode = OC->getValue();
9001 if (Opcode & ~0xff) {
9002 Error(OpcodeLoc, "invalid opcode");
9003 Parser.eatToEndOfStatement();
9007 Opcodes.push_back(uint8_t(Opcode));
9009 if (getLexer().is(AsmToken::EndOfStatement))
9012 if (getLexer().isNot(AsmToken::Comma)) {
9013 Error(getLexer().getLoc(), "unexpected token in directive");
9014 Parser.eatToEndOfStatement();
9021 getTargetStreamer().emitUnwindRaw(StackOffset, Opcodes);
9027 /// parseDirectiveTLSDescSeq
9028 /// ::= .tlsdescseq tls-variable
9029 bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) {
9030 const MCAsmInfo *MAI = getParser().getStreamer().getContext().getAsmInfo();
9031 bool isMachO = MAI->hasSubsectionsViaSymbols();
9033 Error(L, ".tlsdescseq directive not valid for Mach-O");
9034 Parser.eatToEndOfStatement();
9038 if (getLexer().isNot(AsmToken::Identifier)) {
9039 TokError("expected variable after '.tlsdescseq' directive");
9040 Parser.eatToEndOfStatement();
9044 const MCSymbolRefExpr *SRE =
9045 MCSymbolRefExpr::Create(Parser.getTok().getIdentifier(),
9046 MCSymbolRefExpr::VK_ARM_TLSDESCSEQ, getContext());
9049 if (getLexer().isNot(AsmToken::EndOfStatement)) {
9050 Error(Parser.getTok().getLoc(), "unexpected token");
9051 Parser.eatToEndOfStatement();
9055 getTargetStreamer().AnnotateTLSDescriptorSequence(SRE);
9059 /// parseDirectiveMovSP
9060 /// ::= .movsp reg [, #offset]
9061 bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) {
9062 if (!UC.hasFnStart()) {
9063 Parser.eatToEndOfStatement();
9064 Error(L, ".fnstart must precede .movsp directives");
9067 if (UC.getFPReg() != ARM::SP) {
9068 Parser.eatToEndOfStatement();
9069 Error(L, "unexpected .movsp directive");
9073 SMLoc SPRegLoc = Parser.getTok().getLoc();
9074 int SPReg = tryParseRegister();
9076 Parser.eatToEndOfStatement();
9077 Error(SPRegLoc, "register expected");
9081 if (SPReg == ARM::SP || SPReg == ARM::PC) {
9082 Parser.eatToEndOfStatement();
9083 Error(SPRegLoc, "sp and pc are not permitted in .movsp directive");
9088 if (Parser.getTok().is(AsmToken::Comma)) {
9091 if (Parser.getTok().isNot(AsmToken::Hash)) {
9092 Error(Parser.getTok().getLoc(), "expected #constant");
9093 Parser.eatToEndOfStatement();
9098 const MCExpr *OffsetExpr;
9099 SMLoc OffsetLoc = Parser.getTok().getLoc();
9100 if (Parser.parseExpression(OffsetExpr)) {
9101 Parser.eatToEndOfStatement();
9102 Error(OffsetLoc, "malformed offset expression");
9106 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
9108 Parser.eatToEndOfStatement();
9109 Error(OffsetLoc, "offset must be an immediate constant");
9113 Offset = CE->getValue();
9116 getTargetStreamer().emitMovSP(SPReg, Offset);
9117 UC.saveFPReg(SPReg);
9122 /// parseDirectiveObjectArch
9123 /// ::= .object_arch name
9124 bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) {
9125 const MCAsmInfo *MAI = getParser().getStreamer().getContext().getAsmInfo();
9126 bool isMachO = MAI->hasSubsectionsViaSymbols();
9128 Error(L, ".object_arch directive not valid for Mach-O");
9129 Parser.eatToEndOfStatement();
9133 if (getLexer().isNot(AsmToken::Identifier)) {
9134 Error(getLexer().getLoc(), "unexpected token");
9135 Parser.eatToEndOfStatement();
9139 StringRef Arch = Parser.getTok().getString();
9140 SMLoc ArchLoc = Parser.getTok().getLoc();
9143 unsigned ID = StringSwitch<unsigned>(Arch)
9144 #define ARM_ARCH_NAME(NAME, ID, DEFAULT_CPU_NAME, DEFAULT_CPU_ARCH) \
9145 .Case(NAME, ARM::ID)
9146 #define ARM_ARCH_ALIAS(NAME, ID) \
9147 .Case(NAME, ARM::ID)
9148 #include "MCTargetDesc/ARMArchName.def"
9149 #undef ARM_ARCH_NAME
9150 #undef ARM_ARCH_ALIAS
9151 .Default(ARM::INVALID_ARCH);
9153 if (ID == ARM::INVALID_ARCH) {
9154 Error(ArchLoc, "unknown architecture '" + Arch + "'");
9155 Parser.eatToEndOfStatement();
9159 getTargetStreamer().emitObjectArch(ID);
9161 if (getLexer().isNot(AsmToken::EndOfStatement)) {
9162 Error(getLexer().getLoc(), "unexpected token");
9163 Parser.eatToEndOfStatement();
9169 /// parseDirectiveAlign
9171 bool ARMAsmParser::parseDirectiveAlign(SMLoc L) {
9172 // NOTE: if this is not the end of the statement, fall back to the target
9173 // agnostic handling for this directive which will correctly handle this.
9174 if (getLexer().isNot(AsmToken::EndOfStatement))
9177 // '.align' is target specifically handled to mean 2**2 byte alignment.
9178 if (getStreamer().getCurrentSection().first->UseCodeAlign())
9179 getStreamer().EmitCodeAlignment(4, 0);
9181 getStreamer().EmitValueToAlignment(4, 0, 1, 0);
9186 /// parseDirectiveThumbSet
9187 /// ::= .thumb_set name, value
9188 bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) {
9190 if (Parser.parseIdentifier(Name)) {
9191 TokError("expected identifier after '.thumb_set'");
9192 Parser.eatToEndOfStatement();
9196 if (getLexer().isNot(AsmToken::Comma)) {
9197 TokError("expected comma after name '" + Name + "'");
9198 Parser.eatToEndOfStatement();
9203 const MCExpr *Value;
9204 if (Parser.parseExpression(Value)) {
9205 TokError("missing expression");
9206 Parser.eatToEndOfStatement();
9210 if (getLexer().isNot(AsmToken::EndOfStatement)) {
9211 TokError("unexpected token");
9212 Parser.eatToEndOfStatement();
9217 MCSymbol *Alias = getContext().GetOrCreateSymbol(Name);
9218 if (const MCSymbolRefExpr *SRE = dyn_cast<MCSymbolRefExpr>(Value)) {
9219 MCSymbol *Sym = getContext().LookupSymbol(SRE->getSymbol().getName());
9220 if (!Sym->isDefined()) {
9221 getStreamer().EmitSymbolAttribute(Sym, MCSA_Global);
9222 getStreamer().EmitAssignment(Alias, Value);
9226 const MCObjectFileInfo::Environment Format =
9227 getContext().getObjectFileInfo()->getObjectFileType();
9229 case MCObjectFileInfo::IsCOFF: {
9230 char Type = COFF::IMAGE_SYM_DTYPE_FUNCTION << COFF::SCT_COMPLEX_TYPE_SHIFT;
9231 getStreamer().EmitCOFFSymbolType(Type);
9232 // .set values are always local in COFF
9233 getStreamer().EmitSymbolAttribute(Alias, MCSA_Local);
9236 case MCObjectFileInfo::IsELF:
9237 getStreamer().EmitSymbolAttribute(Alias, MCSA_ELF_TypeFunction);
9239 case MCObjectFileInfo::IsMachO:
9244 // FIXME: set the function as being a thumb function via the assembler
9245 getStreamer().EmitThumbFunc(Alias);
9246 getStreamer().EmitAssignment(Alias, Value);
9251 /// Force static initialization.
9252 extern "C" void LLVMInitializeARMAsmParser() {
9253 RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
9254 RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
9257 #define GET_REGISTER_MATCHER
9258 #define GET_SUBTARGET_FEATURE_NAME
9259 #define GET_MATCHER_IMPLEMENTATION
9260 #include "ARMGenAsmMatcher.inc"
9262 static const struct ExtMapEntry {
9263 const char *Extension;
9264 const unsigned ArchCheck;
9265 const uint64_t Features;
9267 { "crc", Feature_HasV8, ARM::FeatureCRC },
9268 { "crypto", Feature_HasV8,
9269 ARM::FeatureCrypto | ARM::FeatureNEON | ARM::FeatureFPARMv8 },
9270 { "fp", Feature_HasV8, ARM::FeatureFPARMv8 },
9271 { "idiv", Feature_HasV7 | Feature_IsNotMClass,
9272 ARM::FeatureHWDiv | ARM::FeatureHWDivARM },
9273 // FIXME: iWMMXT not supported
9274 { "iwmmxt", Feature_None, 0 },
9275 // FIXME: iWMMXT2 not supported
9276 { "iwmmxt2", Feature_None, 0 },
9277 // FIXME: Maverick not supported
9278 { "maverick", Feature_None, 0 },
9279 { "mp", Feature_HasV7 | Feature_IsNotMClass, ARM::FeatureMP },
9280 // FIXME: ARMv6-m OS Extensions feature not checked
9281 { "os", Feature_None, 0 },
9282 // FIXME: Also available in ARMv6-K
9283 { "sec", Feature_HasV7, ARM::FeatureTrustZone },
9284 { "simd", Feature_HasV8, ARM::FeatureNEON | ARM::FeatureFPARMv8 },
9285 // FIXME: Only available in A-class, isel not predicated
9286 { "virt", Feature_HasV7, ARM::FeatureVirtualization },
9287 // FIXME: xscale not supported
9288 { "xscale", Feature_None, 0 },
9291 /// parseDirectiveArchExtension
9292 /// ::= .arch_extension [no]feature
9293 bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) {
9294 if (getLexer().isNot(AsmToken::Identifier)) {
9295 Error(getLexer().getLoc(), "unexpected token");
9296 Parser.eatToEndOfStatement();
9300 StringRef Extension = Parser.getTok().getString();
9301 SMLoc ExtLoc = Parser.getTok().getLoc();
9304 bool EnableFeature = true;
9305 if (Extension.startswith_lower("no")) {
9306 EnableFeature = false;
9307 Extension = Extension.substr(2);
9310 for (unsigned EI = 0, EE = array_lengthof(Extensions); EI != EE; ++EI) {
9311 if (Extensions[EI].Extension != Extension)
9314 unsigned FB = getAvailableFeatures();
9315 if ((FB & Extensions[EI].ArchCheck) != Extensions[EI].ArchCheck) {
9316 Error(ExtLoc, "architectural extension '" + Extension + "' is not "
9317 "allowed for the current base architecture");
9321 if (!Extensions[EI].Features)
9322 report_fatal_error("unsupported architectural extension: " + Extension);
9325 FB |= ComputeAvailableFeatures(Extensions[EI].Features);
9327 FB &= ~ComputeAvailableFeatures(Extensions[EI].Features);
9329 setAvailableFeatures(FB);
9333 Error(ExtLoc, "unknown architectural extension: " + Extension);
9334 Parser.eatToEndOfStatement();
9338 // Define this matcher function after the auto-generated include so we
9339 // have the match class enum definitions.
9340 unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand *AsmOp,
9342 ARMOperand *Op = static_cast<ARMOperand*>(AsmOp);
9343 // If the kind is a token for a literal immediate, check if our asm
9344 // operand matches. This is for InstAliases which have a fixed-value
9345 // immediate in the syntax.
9350 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()))
9351 if (CE->getValue() == 0)
9352 return Match_Success;
9356 const MCExpr *SOExpr = Op->getImm();
9358 if (!SOExpr->EvaluateAsAbsolute(Value))
9359 return Match_Success;
9360 assert((Value >= INT32_MIN && Value <= INT32_MAX) &&
9361 "expression value must be representiable in 32 bits");
9366 MRI->getRegClass(ARM::GPRRegClassID).contains(Op->getReg()))
9367 return Match_Success;
9370 return Match_InvalidOperand;