X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FARM%2FAsmParser%2FARMAsmParser.cpp;h=6e7a27ae03e6c366824fcd3531a4380d28701ac8;hb=2664ea938ffdd15261876917640a5d70f1f30945;hp=6eea56795ba1a760f513f2cf6ad9f0bbd4d3691c;hpb=70883a252ca14648774609146cb533593254a6a4;p=oota-llvm.git diff --git a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp index 6eea56795ba..6e7a27ae03e 100644 --- a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -129,12 +129,13 @@ public: class ARMAsmParser : public MCTargetAsmParser { MCSubtargetInfo &STI; - MCAsmParser &Parser; const MCInstrInfo &MII; const MCRegisterInfo *MRI; UnwindContext UC; ARMTargetStreamer &getTargetStreamer() { + assert(getParser().getStreamer().getTargetStreamer() && + "do not have a target streamer"); MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer(); return static_cast(TS); } @@ -173,28 +174,24 @@ class ARMAsmParser : public MCTargetAsmParser { ITState.CurPosition = ~0U; // Done with the IT block after this. } - - MCAsmParser &getParser() const { return Parser; } - MCAsmLexer &getLexer() const { return Parser.getLexer(); } - void Note(SMLoc L, const Twine &Msg, ArrayRef Ranges = None) { - return Parser.Note(L, Msg, Ranges); + return getParser().Note(L, Msg, Ranges); } bool Warning(SMLoc L, const Twine &Msg, ArrayRef Ranges = None) { - return Parser.Warning(L, Msg, Ranges); + return getParser().Warning(L, Msg, Ranges); } bool Error(SMLoc L, const Twine &Msg, ArrayRef Ranges = None) { - return Parser.Error(L, Msg, Ranges); + return getParser().Error(L, Msg, Ranges); } int tryParseRegister(); - bool tryParseRegisterWithWriteBack(SmallVectorImpl &); - int tryParseShiftRegister(SmallVectorImpl &); - bool parseRegisterList(SmallVectorImpl &); - bool parseMemory(SmallVectorImpl &); - bool parseOperand(SmallVectorImpl &, StringRef Mnemonic); + bool tryParseRegisterWithWriteBack(OperandVector &); + int tryParseShiftRegister(OperandVector &); + bool parseRegisterList(OperandVector &); + bool parseMemory(OperandVector &); + bool parseOperand(OperandVector &, StringRef Mnemonic); bool parsePrefix(ARMMCExpr::VariantKind &RefKind); bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType, unsigned &ShiftAmount); @@ -265,9 +262,15 @@ class ARMAsmParser : public MCTargetAsmParser { bool hasARM() const { return !(STI.getFeatureBits() & ARM::FeatureNoARM); } + bool hasThumb2DSP() const { + return STI.getFeatureBits() & ARM::FeatureDSPThumb2; + } + bool hasD16() const { + return STI.getFeatureBits() & ARM::FeatureD16; + } void SwitchMode() { - unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb)); + uint64_t FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb)); setAvailableFeatures(FB); } bool isMClass() const { @@ -282,54 +285,44 @@ class ARMAsmParser : public MCTargetAsmParser { /// } - OperandMatchResultTy parseITCondCode(SmallVectorImpl&); - OperandMatchResultTy parseCoprocNumOperand( - SmallVectorImpl&); - OperandMatchResultTy parseCoprocRegOperand( - SmallVectorImpl&); - OperandMatchResultTy parseCoprocOptionOperand( - SmallVectorImpl&); - OperandMatchResultTy parseMemBarrierOptOperand( - SmallVectorImpl&); - OperandMatchResultTy parseInstSyncBarrierOptOperand( - SmallVectorImpl&); - OperandMatchResultTy parseProcIFlagsOperand( - SmallVectorImpl&); - OperandMatchResultTy parseMSRMaskOperand( - SmallVectorImpl&); - OperandMatchResultTy parsePKHImm(SmallVectorImpl &O, - StringRef Op, int Low, int High); - OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl &O) { + OperandMatchResultTy parseITCondCode(OperandVector &); + OperandMatchResultTy parseCoprocNumOperand(OperandVector &); + OperandMatchResultTy parseCoprocRegOperand(OperandVector &); + OperandMatchResultTy parseCoprocOptionOperand(OperandVector &); + OperandMatchResultTy parseMemBarrierOptOperand(OperandVector &); + OperandMatchResultTy parseInstSyncBarrierOptOperand(OperandVector &); + OperandMatchResultTy parseProcIFlagsOperand(OperandVector &); + OperandMatchResultTy parseMSRMaskOperand(OperandVector &); + OperandMatchResultTy parseBankedRegOperand(OperandVector &); + OperandMatchResultTy parsePKHImm(OperandVector &O, StringRef Op, int Low, + int High); + OperandMatchResultTy parsePKHLSLImm(OperandVector &O) { return parsePKHImm(O, "lsl", 0, 31); } - OperandMatchResultTy parsePKHASRImm(SmallVectorImpl &O) { + OperandMatchResultTy parsePKHASRImm(OperandVector &O) { return parsePKHImm(O, "asr", 1, 32); } - OperandMatchResultTy parseSetEndImm(SmallVectorImpl&); - OperandMatchResultTy parseShifterImm(SmallVectorImpl&); - OperandMatchResultTy parseRotImm(SmallVectorImpl&); - OperandMatchResultTy parseBitfield(SmallVectorImpl&); - OperandMatchResultTy parsePostIdxReg(SmallVectorImpl&); - OperandMatchResultTy parseAM3Offset(SmallVectorImpl&); - OperandMatchResultTy parseFPImm(SmallVectorImpl&); - OperandMatchResultTy parseVectorList(SmallVectorImpl&); + OperandMatchResultTy parseSetEndImm(OperandVector &); + OperandMatchResultTy parseShifterImm(OperandVector &); + OperandMatchResultTy parseRotImm(OperandVector &); + OperandMatchResultTy parseModImm(OperandVector &); + OperandMatchResultTy parseBitfield(OperandVector &); + OperandMatchResultTy parsePostIdxReg(OperandVector &); + OperandMatchResultTy parseAM3Offset(OperandVector &); + OperandMatchResultTy parseFPImm(OperandVector &); + OperandMatchResultTy parseVectorList(OperandVector &); OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc); // Asm Match Converter Methods - void cvtThumbMultiply(MCInst &Inst, - const SmallVectorImpl &); - void cvtThumbBranches(MCInst &Inst, - const SmallVectorImpl &); - - bool validateInstruction(MCInst &Inst, - const SmallVectorImpl &Ops); - bool processInstruction(MCInst &Inst, - const SmallVectorImpl &Ops); - bool shouldOmitCCOutOperand(StringRef Mnemonic, - SmallVectorImpl &Operands); - bool shouldOmitPredicateOperand(StringRef Mnemonic, - SmallVectorImpl &Operands); + void cvtThumbMultiply(MCInst &Inst, const OperandVector &); + void cvtThumbBranches(MCInst &Inst, const OperandVector &); + + bool validateInstruction(MCInst &Inst, const OperandVector &Ops); + bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out); + bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands); + bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands); + public: enum ARMMatchResultTy { Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY, @@ -341,10 +334,9 @@ public: }; - ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser, - const MCInstrInfo &MII, - const MCTargetOptions &Options) - : MCTargetAsmParser(), STI(_STI), Parser(_Parser), MII(MII), UC(_Parser) { + ARMAsmParser(MCSubtargetInfo & _STI, MCAsmParser & _Parser, + const MCInstrInfo &MII, const MCTargetOptions &Options) + : MCTargetAsmParser(), STI(_STI), MII(MII), UC(_Parser) { MCAsmParserExtension::Initialize(_Parser); // Cache the MCRegisterInfo. @@ -361,19 +353,17 @@ public: // Implementation of the MCTargetAsmParser interface: bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override; - bool - ParseInstruction(ParseInstructionInfo &Info, StringRef Name, - SMLoc NameLoc, - SmallVectorImpl &Operands) override; + bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, + SMLoc NameLoc, OperandVector &Operands) override; bool ParseDirective(AsmToken DirectiveID) override; - unsigned validateTargetOperandClass(MCParsedAsmOperand *Op, + unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, unsigned Kind) override; unsigned checkTargetMatchPredicate(MCInst &Inst) override; bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, - SmallVectorImpl &Operands, - MCStreamer &Out, unsigned &ErrorInfo, + OperandVector &Operands, MCStreamer &Out, + uint64_t &ErrorInfo, bool MatchingInlineAsm) override; void onLabelParsed(MCSymbol *Symbol) override; }; @@ -397,6 +387,7 @@ class ARMOperand : public MCParsedAsmOperand { k_Memory, k_PostIndexRegister, k_MSRMask, + k_BankedReg, k_ProcIFlags, k_VectorIndex, k_Register, @@ -410,6 +401,7 @@ class ARMOperand : public MCParsedAsmOperand { k_ShiftedImmediate, k_ShifterImmediate, k_RotateImmediate, + k_ModifiedImmediate, k_BitfieldDescriptor, k_Token } Kind; @@ -449,6 +441,10 @@ class ARMOperand : public MCParsedAsmOperand { unsigned Val; }; + struct BankedRegOp { + unsigned Val; + }; + struct TokOp { const char *Data; unsigned Length; @@ -517,6 +513,11 @@ class ARMOperand : public MCParsedAsmOperand { unsigned Imm; }; + struct ModImmOp { + unsigned Bits; + unsigned Rot; + }; + struct BitfieldOp { unsigned LSB; unsigned Width; @@ -531,6 +532,7 @@ class ARMOperand : public MCParsedAsmOperand { struct ITMaskOp ITMask; struct IFlagsOp IFlags; struct MMaskOp MMask; + struct BankedRegOp BankedReg; struct TokOp Tok; struct RegOp Reg; struct VectorListOp VectorList; @@ -542,11 +544,12 @@ class ARMOperand : public MCParsedAsmOperand { struct RegShiftedRegOp RegShiftedReg; struct RegShiftedImmOp RegShiftedImm; struct RotImmOp RotImm; + struct ModImmOp ModImm; struct BitfieldOp Bitfield; }; - ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} public: + ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() { Kind = o.Kind; StartLoc = o.StartLoc; @@ -599,6 +602,9 @@ public: case k_MSRMask: MMask = o.MMask; break; + case k_BankedReg: + BankedReg = o.BankedReg; + break; case k_ProcIFlags: IFlags = o.IFlags; break; @@ -614,6 +620,9 @@ public: case k_RotateImmediate: RotImm = o.RotImm; break; + case k_ModifiedImmediate: + ModImm = o.ModImm; + break; case k_BitfieldDescriptor: Bitfield = o.Bitfield; break; @@ -693,6 +702,11 @@ public: return MMask.Val; } + unsigned getBankedReg() const { + assert(Kind == k_BankedReg && "Invalid access!"); + return BankedReg.Val; + } + bool isCoprocNum() const { return Kind == k_CoprocNum; } bool isCoprocReg() const { return Kind == k_CoprocReg; } bool isCoprocOption() const { return Kind == k_CoprocOption; } @@ -1088,6 +1102,22 @@ public: bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; } bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; } bool isRotImm() const { return Kind == k_RotateImmediate; } + bool isModImm() const { return Kind == k_ModifiedImmediate; } + bool isModImmNot() const { + if (!isImm()) return false; + const MCConstantExpr *CE = dyn_cast(getImm()); + if (!CE) return false; + int64_t Value = CE->getValue(); + return ARM_AM::getSOImmVal(~Value) != -1; + } + bool isModImmNeg() const { + if (!isImm()) return false; + const MCConstantExpr *CE = dyn_cast(getImm()); + if (!CE) return false; + int64_t Value = CE->getValue(); + return ARM_AM::getSOImmVal(Value) == -1 && + ARM_AM::getSOImmVal(-Value) != -1; + } bool isBitfield() const { return Kind == k_BitfieldDescriptor; } bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; } bool isPostIdxReg() const { @@ -1398,6 +1428,7 @@ public: } bool isMSRMask() const { return Kind == k_MSRMask; } + bool isBankedReg() const { return Kind == k_BankedReg; } bool isProcIFlags() const { return Kind == k_ProcIFlags; } // NEON operands. @@ -1615,9 +1646,18 @@ public: const MCConstantExpr *CE = dyn_cast(getImm()); // Must be a constant. if (!CE) return false; - int64_t Value = CE->getValue(); - // i16 value in the range [0,255] or [0x0100, 0xff00] - return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00); + unsigned Value = CE->getValue(); + return ARM_AM::isNEONi16splat(Value); + } + + bool isNEONi16splatNot() const { + if (!isImm()) + return false; + const MCConstantExpr *CE = dyn_cast(getImm()); + // Must be a constant. + if (!CE) return false; + unsigned Value = CE->getValue(); + return ARM_AM::isNEONi16splat(~Value & 0xffff); } bool isNEONi32splat() const { @@ -1628,12 +1668,18 @@ public: const MCConstantExpr *CE = dyn_cast(getImm()); // Must be a constant. if (!CE) return false; - int64_t Value = CE->getValue(); - // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X. - return (Value >= 0 && Value < 256) || - (Value >= 0x0100 && Value <= 0xff00) || - (Value >= 0x010000 && Value <= 0xff0000) || - (Value >= 0x01000000 && Value <= 0xff000000); + unsigned Value = CE->getValue(); + return ARM_AM::isNEONi32splat(Value); + } + + bool isNEONi32splatNot() const { + if (!isImm()) + return false; + const MCConstantExpr *CE = dyn_cast(getImm()); + // Must be a constant. + if (!CE) return false; + unsigned Value = CE->getValue(); + return ARM_AM::isNEONi32splat(~Value); } bool isNEONByteReplicate(unsigned NumBytes) const { @@ -1669,6 +1715,7 @@ public: int64_t Value = CE->getValue(); // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. + // FIXME: This is probably wrong and a copy and paste from previous example return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00) || (Value >= 0x010000 && Value <= 0xff0000) || @@ -1684,6 +1731,7 @@ public: int64_t Value = ~CE->getValue(); // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. + // FIXME: This is probably wrong and a copy and paste from previous example return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00) || (Value >= 0x010000 && Value <= 0xff0000) || @@ -1805,6 +1853,39 @@ public: Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3)); } + void addModImmOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + + // Support for fixups (MCFixup) + if (isImm()) + return addImmOperands(Inst, N); + + if (Inst.getOpcode() == ARM::ADDri && + Inst.getOperand(1).getReg() == ARM::PC) { + // Instructions of the form [ADD , pc, #imm] are manually aliased + // in processInstruction() to use ADR. We must keep the immediate in + // its unencoded form in order to not clash with this aliasing. + Inst.addOperand(MCOperand::CreateImm(ARM_AM::rotr32(ModImm.Bits, + ModImm.Rot))); + } else { + Inst.addOperand(MCOperand::CreateImm(ModImm.Bits | (ModImm.Rot << 7))); + } + } + + void addModImmNotOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + const MCConstantExpr *CE = dyn_cast(getImm()); + uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue()); + Inst.addOperand(MCOperand::CreateImm(Enc)); + } + + void addModImmNegOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + const MCConstantExpr *CE = dyn_cast(getImm()); + uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue()); + Inst.addOperand(MCOperand::CreateImm(Enc)); + } + void addBitfieldOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); // Munge the lsb/width into a bitfield mask. @@ -2348,6 +2429,11 @@ public: Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask()))); } + void addBankedRegOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + Inst.addOperand(MCOperand::CreateImm(unsigned(getBankedReg()))); + } + void addProcIFlagsOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags()))); @@ -2392,10 +2478,16 @@ public: // The immediate encodes the type of constant as well as the value. const MCConstantExpr *CE = dyn_cast(getImm()); unsigned Value = CE->getValue(); - if (Value >= 256) - Value = (Value >> 8) | 0xa00; - else - Value |= 0x800; + Value = ARM_AM::encodeNEONi16splat(Value); + Inst.addOperand(MCOperand::CreateImm(Value)); + } + + void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + // The immediate encodes the type of constant as well as the value. + const MCConstantExpr *CE = dyn_cast(getImm()); + unsigned Value = CE->getValue(); + Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff); Inst.addOperand(MCOperand::CreateImm(Value)); } @@ -2404,12 +2496,16 @@ public: // The immediate encodes the type of constant as well as the value. const MCConstantExpr *CE = dyn_cast(getImm()); unsigned Value = CE->getValue(); - if (Value >= 256 && Value <= 0xff00) - Value = (Value >> 8) | 0x200; - else if (Value > 0xffff && Value <= 0xff0000) - Value = (Value >> 16) | 0x400; - else if (Value > 0xffffff) - Value = (Value >> 24) | 0x600; + Value = ARM_AM::encodeNEONi32splat(Value); + Inst.addOperand(MCOperand::CreateImm(Value)); + } + + void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + // The immediate encodes the type of constant as well as the value. + const MCConstantExpr *CE = dyn_cast(getImm()); + unsigned Value = CE->getValue(); + Value = ARM_AM::encodeNEONi32splat(~Value); Inst.addOperand(MCOperand::CreateImm(Value)); } @@ -2481,56 +2577,58 @@ public: void print(raw_ostream &OS) const override; - static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) { - ARMOperand *Op = new ARMOperand(k_ITCondMask); + static std::unique_ptr CreateITMask(unsigned Mask, SMLoc S) { + auto Op = make_unique(k_ITCondMask); Op->ITMask.Mask = Mask; Op->StartLoc = S; Op->EndLoc = S; return Op; } - static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) { - ARMOperand *Op = new ARMOperand(k_CondCode); + static std::unique_ptr CreateCondCode(ARMCC::CondCodes CC, + SMLoc S) { + auto Op = make_unique(k_CondCode); Op->CC.Val = CC; Op->StartLoc = S; Op->EndLoc = S; return Op; } - static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) { - ARMOperand *Op = new ARMOperand(k_CoprocNum); + static std::unique_ptr CreateCoprocNum(unsigned CopVal, SMLoc S) { + auto Op = make_unique(k_CoprocNum); Op->Cop.Val = CopVal; Op->StartLoc = S; Op->EndLoc = S; return Op; } - static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) { - ARMOperand *Op = new ARMOperand(k_CoprocReg); + static std::unique_ptr CreateCoprocReg(unsigned CopVal, SMLoc S) { + auto Op = make_unique(k_CoprocReg); Op->Cop.Val = CopVal; Op->StartLoc = S; Op->EndLoc = S; return Op; } - static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) { - ARMOperand *Op = new ARMOperand(k_CoprocOption); + static std::unique_ptr CreateCoprocOption(unsigned Val, SMLoc S, + SMLoc E) { + auto Op = make_unique(k_CoprocOption); Op->Cop.Val = Val; Op->StartLoc = S; Op->EndLoc = E; return Op; } - static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) { - ARMOperand *Op = new ARMOperand(k_CCOut); + static std::unique_ptr CreateCCOut(unsigned RegNum, SMLoc S) { + auto Op = make_unique(k_CCOut); Op->Reg.RegNum = RegNum; Op->StartLoc = S; Op->EndLoc = S; return Op; } - static ARMOperand *CreateToken(StringRef Str, SMLoc S) { - ARMOperand *Op = new ARMOperand(k_Token); + static std::unique_ptr CreateToken(StringRef Str, SMLoc S) { + auto Op = make_unique(k_Token); Op->Tok.Data = Str.data(); Op->Tok.Length = Str.size(); Op->StartLoc = S; @@ -2538,20 +2636,20 @@ public: return Op; } - static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) { - ARMOperand *Op = new ARMOperand(k_Register); + static std::unique_ptr CreateReg(unsigned RegNum, SMLoc S, + SMLoc E) { + auto Op = make_unique(k_Register); Op->Reg.RegNum = RegNum; Op->StartLoc = S; Op->EndLoc = E; return Op; } - static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, - unsigned SrcReg, - unsigned ShiftReg, - unsigned ShiftImm, - SMLoc S, SMLoc E) { - ARMOperand *Op = new ARMOperand(k_ShiftedRegister); + static std::unique_ptr + CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg, + unsigned ShiftReg, unsigned ShiftImm, SMLoc S, + SMLoc E) { + auto Op = make_unique(k_ShiftedRegister); Op->RegShiftedReg.ShiftTy = ShTy; Op->RegShiftedReg.SrcReg = SrcReg; Op->RegShiftedReg.ShiftReg = ShiftReg; @@ -2561,11 +2659,10 @@ public: return Op; } - static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, - unsigned SrcReg, - unsigned ShiftImm, - SMLoc S, SMLoc E) { - ARMOperand *Op = new ARMOperand(k_ShiftedImmediate); + static std::unique_ptr + CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg, + unsigned ShiftImm, SMLoc S, SMLoc E) { + auto Op = make_unique(k_ShiftedImmediate); Op->RegShiftedImm.ShiftTy = ShTy; Op->RegShiftedImm.SrcReg = SrcReg; Op->RegShiftedImm.ShiftImm = ShiftImm; @@ -2574,9 +2671,9 @@ public: return Op; } - static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm, - SMLoc S, SMLoc E) { - ARMOperand *Op = new ARMOperand(k_ShifterImmediate); + static std::unique_ptr CreateShifterImm(bool isASR, unsigned Imm, + SMLoc S, SMLoc E) { + auto Op = make_unique(k_ShifterImmediate); Op->ShifterImm.isASR = isASR; Op->ShifterImm.Imm = Imm; Op->StartLoc = S; @@ -2584,17 +2681,28 @@ public: return Op; } - static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) { - ARMOperand *Op = new ARMOperand(k_RotateImmediate); + static std::unique_ptr CreateRotImm(unsigned Imm, SMLoc S, + SMLoc E) { + auto Op = make_unique(k_RotateImmediate); Op->RotImm.Imm = Imm; Op->StartLoc = S; Op->EndLoc = E; return Op; } - static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width, - SMLoc S, SMLoc E) { - ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor); + static std::unique_ptr CreateModImm(unsigned Bits, unsigned Rot, + SMLoc S, SMLoc E) { + auto Op = make_unique(k_ModifiedImmediate); + Op->ModImm.Bits = Bits; + Op->ModImm.Rot = Rot; + Op->StartLoc = S; + Op->EndLoc = E; + return Op; + } + + static std::unique_ptr + CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) { + auto Op = make_unique(k_BitfieldDescriptor); Op->Bitfield.LSB = LSB; Op->Bitfield.Width = Width; Op->StartLoc = S; @@ -2602,8 +2710,8 @@ public: return Op; } - static ARMOperand * - CreateRegList(SmallVectorImpl > &Regs, + static std::unique_ptr + CreateRegList(SmallVectorImpl> &Regs, SMLoc StartLoc, SMLoc EndLoc) { assert (Regs.size() > 0 && "RegList contains no registers?"); KindTy Kind = k_RegisterList; @@ -2617,7 +2725,7 @@ public: // Sort based on the register encoding values. array_pod_sort(Regs.begin(), Regs.end()); - ARMOperand *Op = new ARMOperand(Kind); + auto Op = make_unique(Kind); for (SmallVectorImpl >::const_iterator I = Regs.begin(), E = Regs.end(); I != E; ++I) Op->Registers.push_back(I->second); @@ -2626,9 +2734,11 @@ public: return Op; } - static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count, - bool isDoubleSpaced, SMLoc S, SMLoc E) { - ARMOperand *Op = new ARMOperand(k_VectorList); + static std::unique_ptr CreateVectorList(unsigned RegNum, + unsigned Count, + bool isDoubleSpaced, + SMLoc S, SMLoc E) { + auto Op = make_unique(k_VectorList); Op->VectorList.RegNum = RegNum; Op->VectorList.Count = Count; Op->VectorList.isDoubleSpaced = isDoubleSpaced; @@ -2637,10 +2747,10 @@ public: return Op; } - static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count, - bool isDoubleSpaced, - SMLoc S, SMLoc E) { - ARMOperand *Op = new ARMOperand(k_VectorListAllLanes); + static std::unique_ptr + CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced, + SMLoc S, SMLoc E) { + auto Op = make_unique(k_VectorListAllLanes); Op->VectorList.RegNum = RegNum; Op->VectorList.Count = Count; Op->VectorList.isDoubleSpaced = isDoubleSpaced; @@ -2649,11 +2759,10 @@ public: return Op; } - static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count, - unsigned Index, - bool isDoubleSpaced, - SMLoc S, SMLoc E) { - ARMOperand *Op = new ARMOperand(k_VectorListIndexed); + static std::unique_ptr + CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index, + bool isDoubleSpaced, SMLoc S, SMLoc E) { + auto Op = make_unique(k_VectorListIndexed); Op->VectorList.RegNum = RegNum; Op->VectorList.Count = Count; Op->VectorList.LaneIndex = Index; @@ -2663,33 +2772,30 @@ public: return Op; } - static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, - MCContext &Ctx) { - ARMOperand *Op = new ARMOperand(k_VectorIndex); + static std::unique_ptr + CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) { + auto Op = make_unique(k_VectorIndex); Op->VectorIndex.Val = Idx; Op->StartLoc = S; Op->EndLoc = E; return Op; } - static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) { - ARMOperand *Op = new ARMOperand(k_Immediate); + static std::unique_ptr CreateImm(const MCExpr *Val, SMLoc S, + SMLoc E) { + auto Op = make_unique(k_Immediate); Op->Imm.Val = Val; Op->StartLoc = S; Op->EndLoc = E; return Op; } - static ARMOperand *CreateMem(unsigned BaseRegNum, - const MCConstantExpr *OffsetImm, - unsigned OffsetRegNum, - ARM_AM::ShiftOpc ShiftType, - unsigned ShiftImm, - unsigned Alignment, - bool isNegative, - SMLoc S, SMLoc E, - SMLoc AlignmentLoc = SMLoc()) { - ARMOperand *Op = new ARMOperand(k_Memory); + static std::unique_ptr + CreateMem(unsigned BaseRegNum, const MCConstantExpr *OffsetImm, + unsigned OffsetRegNum, ARM_AM::ShiftOpc ShiftType, + unsigned ShiftImm, unsigned Alignment, bool isNegative, SMLoc S, + SMLoc E, SMLoc AlignmentLoc = SMLoc()) { + auto Op = make_unique(k_Memory); Op->Memory.BaseRegNum = BaseRegNum; Op->Memory.OffsetImm = OffsetImm; Op->Memory.OffsetRegNum = OffsetRegNum; @@ -2703,11 +2809,10 @@ public: return Op; } - static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd, - ARM_AM::ShiftOpc ShiftTy, - unsigned ShiftImm, - SMLoc S, SMLoc E) { - ARMOperand *Op = new ARMOperand(k_PostIndexRegister); + static std::unique_ptr + CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy, + unsigned ShiftImm, SMLoc S, SMLoc E) { + auto Op = make_unique(k_PostIndexRegister); Op->PostIdxReg.RegNum = RegNum; Op->PostIdxReg.isAdd = isAdd; Op->PostIdxReg.ShiftTy = ShiftTy; @@ -2717,38 +2822,48 @@ public: return Op; } - static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) { - ARMOperand *Op = new ARMOperand(k_MemBarrierOpt); + static std::unique_ptr CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, + SMLoc S) { + auto Op = make_unique(k_MemBarrierOpt); Op->MBOpt.Val = Opt; Op->StartLoc = S; Op->EndLoc = S; return Op; } - static ARMOperand *CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, - SMLoc S) { - ARMOperand *Op = new ARMOperand(k_InstSyncBarrierOpt); + static std::unique_ptr + CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) { + auto Op = make_unique(k_InstSyncBarrierOpt); Op->ISBOpt.Val = Opt; Op->StartLoc = S; Op->EndLoc = S; return Op; } - static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) { - ARMOperand *Op = new ARMOperand(k_ProcIFlags); + static std::unique_ptr CreateProcIFlags(ARM_PROC::IFlags IFlags, + SMLoc S) { + auto Op = make_unique(k_ProcIFlags); Op->IFlags.Val = IFlags; Op->StartLoc = S; Op->EndLoc = S; return Op; } - static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) { - ARMOperand *Op = new ARMOperand(k_MSRMask); + static std::unique_ptr CreateMSRMask(unsigned MMask, SMLoc S) { + auto Op = make_unique(k_MSRMask); Op->MMask.Val = MMask; Op->StartLoc = S; Op->EndLoc = S; return Op; } + + static std::unique_ptr CreateBankedReg(unsigned Reg, SMLoc S) { + auto Op = make_unique(k_BankedReg); + Op->BankedReg.Val = Reg; + Op->StartLoc = S; + Op->EndLoc = S; + return Op; + } }; } // end anonymous namespace. @@ -2782,6 +2897,9 @@ void ARMOperand::print(raw_ostream &OS) const { case k_MSRMask: OS << ""; break; + case k_BankedReg: + OS << ""; + break; case k_Immediate: getImm()->print(OS); break; @@ -2835,6 +2953,10 @@ void ARMOperand::print(raw_ostream &OS) const { case k_RotateImmediate: OS << ""; break; + case k_ModifiedImmediate: + OS << ""; + break; case k_BitfieldDescriptor: OS << ""; @@ -2884,8 +3006,9 @@ static unsigned MatchRegisterName(StringRef Name); bool ARMAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) { - StartLoc = Parser.getTok().getLoc(); - EndLoc = Parser.getTok().getEndLoc(); + const AsmToken &Tok = getParser().getTok(); + StartLoc = Tok.getLoc(); + EndLoc = Tok.getEndLoc(); RegNo = tryParseRegister(); return (RegNo == (unsigned)-1); @@ -2896,6 +3019,7 @@ bool ARMAsmParser::ParseRegister(unsigned &RegNo, /// returned. Otherwise return -1. /// int ARMAsmParser::tryParseRegister() { + MCAsmParser &Parser = getParser(); const AsmToken &Tok = Parser.getTok(); if (Tok.isNot(AsmToken::Identifier)) return -1; @@ -2937,6 +3061,10 @@ int ARMAsmParser::tryParseRegister() { return Entry->getValue(); } + // Some FPUs only have 16 D registers, so D16-D31 are invalid + if (hasD16() && RegNum >= ARM::D16 && RegNum <= ARM::D31) + return -1; + Parser.Lex(); // Eat identifier token. return RegNum; @@ -2947,8 +3075,8 @@ int ARMAsmParser::tryParseRegister() { // occurs, return -1. An irrecoverable error is one where tokens have been // consumed in the process of trying to parse the shifter (i.e., when it is // indeed a shifter operand, but malformed). -int ARMAsmParser::tryParseShiftRegister( - SmallVectorImpl &Operands) { +int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) { + MCAsmParser &Parser = getParser(); SMLoc S = Parser.getTok().getLoc(); const AsmToken &Tok = Parser.getTok(); if (Tok.isNot(AsmToken::Identifier)) @@ -2972,7 +3100,8 @@ int ARMAsmParser::tryParseShiftRegister( // The source register for the shift has already been added to the // operand list, so we need to pop it off and combine it into the shifted // register operand instead. - std::unique_ptr PrevOp((ARMOperand*)Operands.pop_back_val()); + std::unique_ptr PrevOp( + (ARMOperand *)Operands.pop_back_val().release()); if (!PrevOp->isReg()) return Error(PrevOp->getStartLoc(), "shift must be of a register"); int SrcReg = PrevOp->getReg(); @@ -3021,12 +3150,12 @@ int ARMAsmParser::tryParseShiftRegister( EndLoc = Parser.getTok().getEndLoc(); ShiftReg = tryParseRegister(); if (ShiftReg == -1) { - Error (L, "expected immediate or register in shift operand"); + Error(L, "expected immediate or register in shift operand"); return -1; } } else { - Error (Parser.getTok().getLoc(), - "expected immediate or register in shift operand"); + Error(Parser.getTok().getLoc(), + "expected immediate or register in shift operand"); return -1; } } @@ -3049,8 +3178,8 @@ int ARMAsmParser::tryParseShiftRegister( /// /// TODO this is likely to change to allow different register types and or to /// parse for a specific register type. -bool ARMAsmParser:: -tryParseRegisterWithWriteBack(SmallVectorImpl &Operands) { +bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) { + MCAsmParser &Parser = getParser(); const AsmToken &RegTok = Parser.getTok(); int RegNo = tryParseRegister(); if (RegNo == -1) @@ -3096,17 +3225,25 @@ tryParseRegisterWithWriteBack(SmallVectorImpl &Operands) { } /// MatchCoprocessorOperandName - Try to parse an coprocessor related -/// instruction with a symbolic operand name. Example: "p1", "p7", "c3", -/// "c5", ... +/// instruction with a symbolic operand name. +/// We accept "crN" syntax for GAS compatibility. +/// ::= +/// If CoprocOp is 'c', then: +/// ::= c | cr +/// If CoprocOp is 'p', then : +/// ::= p +/// ::= integer in range [0, 15] static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { // Use the same layout as the tablegen'erated register name matcher. Ugly, // but efficient. + if (Name.size() < 2 || Name[0] != CoprocOp) + return -1; + Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front(); + switch (Name.size()) { default: return -1; - case 2: - if (Name[0] != CoprocOp) - return -1; - switch (Name[1]) { + case 1: + switch (Name[0]) { default: return -1; case '0': return 0; case '1': return 1; @@ -3119,14 +3256,15 @@ static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { case '8': return 8; case '9': return 9; } - case 3: - if (Name[0] != CoprocOp || Name[1] != '1') + case 2: + if (Name[0] != '1') return -1; - switch (Name[2]) { + switch (Name[1]) { default: return -1; - // p10 and p11 are invalid for coproc instructions (reserved for FP/NEON) - case '0': return CoprocOp == 'p'? -1: 10; - case '1': return CoprocOp == 'p'? -1: 11; + // CP10 and CP11 are VFP/NEON and so vector instructions should be used. + // However, old cores (v5/v6) did use them in that way. + case '0': return 10; + case '1': return 11; case '2': return 12; case '3': return 13; case '4': return 14; @@ -3136,8 +3274,9 @@ static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { } /// parseITCondCode - Try to parse a condition code for an IT instruction. -ARMAsmParser::OperandMatchResultTy ARMAsmParser:: -parseITCondCode(SmallVectorImpl &Operands) { +ARMAsmParser::OperandMatchResultTy +ARMAsmParser::parseITCondCode(OperandVector &Operands) { + MCAsmParser &Parser = getParser(); SMLoc S = Parser.getTok().getLoc(); const AsmToken &Tok = Parser.getTok(); if (!Tok.is(AsmToken::Identifier)) @@ -3173,8 +3312,9 @@ parseITCondCode(SmallVectorImpl &Operands) { /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The /// token must be an Identifier when called, and if it is a coprocessor /// number, the token is eaten and the operand is added to the operand list. -ARMAsmParser::OperandMatchResultTy ARMAsmParser:: -parseCoprocNumOperand(SmallVectorImpl &Operands) { +ARMAsmParser::OperandMatchResultTy +ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) { + MCAsmParser &Parser = getParser(); SMLoc S = Parser.getTok().getLoc(); const AsmToken &Tok = Parser.getTok(); if (Tok.isNot(AsmToken::Identifier)) @@ -3183,6 +3323,9 @@ parseCoprocNumOperand(SmallVectorImpl &Operands) { int Num = MatchCoprocessorOperandName(Tok.getString(), 'p'); if (Num == -1) return MatchOperand_NoMatch; + // ARMv7 and v8 don't allow cp10/cp11 due to VFP/NEON specific instructions + if ((hasV7Ops() || hasV8Ops()) && (Num == 10 || Num == 11)) + return MatchOperand_NoMatch; Parser.Lex(); // Eat identifier token. Operands.push_back(ARMOperand::CreateCoprocNum(Num, S)); @@ -3192,8 +3335,9 @@ parseCoprocNumOperand(SmallVectorImpl &Operands) { /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The /// token must be an Identifier when called, and if it is a coprocessor /// number, the token is eaten and the operand is added to the operand list. -ARMAsmParser::OperandMatchResultTy ARMAsmParser:: -parseCoprocRegOperand(SmallVectorImpl &Operands) { +ARMAsmParser::OperandMatchResultTy +ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) { + MCAsmParser &Parser = getParser(); SMLoc S = Parser.getTok().getLoc(); const AsmToken &Tok = Parser.getTok(); if (Tok.isNot(AsmToken::Identifier)) @@ -3210,8 +3354,9 @@ parseCoprocRegOperand(SmallVectorImpl &Operands) { /// parseCoprocOptionOperand - Try to parse an coprocessor option operand. /// coproc_option : '{' imm0_255 '}' -ARMAsmParser::OperandMatchResultTy ARMAsmParser:: -parseCoprocOptionOperand(SmallVectorImpl &Operands) { +ARMAsmParser::OperandMatchResultTy +ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) { + MCAsmParser &Parser = getParser(); SMLoc S = Parser.getTok().getLoc(); // If this isn't a '{', this isn't a coprocessor immediate operand. @@ -3288,8 +3433,8 @@ static unsigned getDRegFromQReg(unsigned QReg) { } /// Parse a register list. -bool ARMAsmParser:: -parseRegisterList(SmallVectorImpl &Operands) { +bool ARMAsmParser::parseRegisterList(OperandVector &Operands) { + MCAsmParser &Parser = getParser(); assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Curly Brace"); SMLoc S = Parser.getTok().getLoc(); @@ -3421,6 +3566,7 @@ parseRegisterList(SmallVectorImpl &Operands) { // Helper function to parse the lane index for vector lists. ARMAsmParser::OperandMatchResultTy ARMAsmParser:: parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) { + MCAsmParser &Parser = getParser(); Index = 0; // Always return a defined index value. if (Parser.getTok().is(AsmToken::LBrac)) { Parser.Lex(); // Eat the '['. @@ -3470,8 +3616,9 @@ parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) { } // parse a vector register list -ARMAsmParser::OperandMatchResultTy ARMAsmParser:: -parseVectorList(SmallVectorImpl &Operands) { +ARMAsmParser::OperandMatchResultTy +ARMAsmParser::parseVectorList(OperandVector &Operands) { + MCAsmParser &Parser = getParser(); VectorLaneTy LaneKind; unsigned LaneIndex; SMLoc S = Parser.getTok().getLoc(); @@ -3721,8 +3868,9 @@ parseVectorList(SmallVectorImpl &Operands) { } /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options. -ARMAsmParser::OperandMatchResultTy ARMAsmParser:: -parseMemBarrierOptOperand(SmallVectorImpl &Operands) { +ARMAsmParser::OperandMatchResultTy +ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) { + MCAsmParser &Parser = getParser(); SMLoc S = Parser.getTok().getLoc(); const AsmToken &Tok = Parser.getTok(); unsigned Opt; @@ -3792,8 +3940,9 @@ parseMemBarrierOptOperand(SmallVectorImpl &Operands) { } /// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options. -ARMAsmParser::OperandMatchResultTy ARMAsmParser:: -parseInstSyncBarrierOptOperand(SmallVectorImpl &Operands) { +ARMAsmParser::OperandMatchResultTy +ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) { + MCAsmParser &Parser = getParser(); SMLoc S = Parser.getTok().getLoc(); const AsmToken &Tok = Parser.getTok(); unsigned Opt; @@ -3843,8 +3992,9 @@ parseInstSyncBarrierOptOperand(SmallVectorImpl &Operands) { /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction. -ARMAsmParser::OperandMatchResultTy ARMAsmParser:: -parseProcIFlagsOperand(SmallVectorImpl &Operands) { +ARMAsmParser::OperandMatchResultTy +ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) { + MCAsmParser &Parser = getParser(); SMLoc S = Parser.getTok().getLoc(); const AsmToken &Tok = Parser.getTok(); if (!Tok.is(AsmToken::Identifier)) @@ -3877,8 +4027,9 @@ parseProcIFlagsOperand(SmallVectorImpl &Operands) { } /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction. -ARMAsmParser::OperandMatchResultTy ARMAsmParser:: -parseMSRMaskOperand(SmallVectorImpl &Operands) { +ARMAsmParser::OperandMatchResultTy +ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) { + MCAsmParser &Parser = getParser(); SMLoc S = Parser.getTok().getLoc(); const AsmToken &Tok = Parser.getTok(); if (!Tok.is(AsmToken::Identifier)) @@ -3899,9 +4050,6 @@ parseMSRMaskOperand(SmallVectorImpl &Operands) { // should really only be allowed when writing a special register. Note // they get dropped in the MRS instruction reading a special register as // the SYSm field is only 8 bits. - // - // FIXME: the _g and _nzcvqg versions are only allowed if the processor - // includes the DSP extension but that is not checked. .Case("apsr", 0x800) .Case("apsr_nzcvq", 0x800) .Case("apsr_g", 0x400) @@ -3933,6 +4081,11 @@ parseMSRMaskOperand(SmallVectorImpl &Operands) { if (FlagsVal == ~0U) return MatchOperand_NoMatch; + if (!hasThumb2DSP() && (FlagsVal & 0x400)) + // The _g and _nzcvqg versions are only valid if the DSP extension is + // available. + return MatchOperand_NoMatch; + if (!hasV7Ops() && FlagsVal >= 0x811 && FlagsVal <= 0x813) // basepri, basepri_max and faultmask only valid for V7m. return MatchOperand_NoMatch; @@ -4005,9 +4158,67 @@ parseMSRMaskOperand(SmallVectorImpl &Operands) { return MatchOperand_Success; } -ARMAsmParser::OperandMatchResultTy ARMAsmParser:: -parsePKHImm(SmallVectorImpl &Operands, StringRef Op, - int Low, int High) { +/// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for +/// use in the MRS/MSR instructions added to support virtualization. +ARMAsmParser::OperandMatchResultTy +ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) { + MCAsmParser &Parser = getParser(); + SMLoc S = Parser.getTok().getLoc(); + const AsmToken &Tok = Parser.getTok(); + if (!Tok.is(AsmToken::Identifier)) + return MatchOperand_NoMatch; + StringRef RegName = Tok.getString(); + + // The values here come from B9.2.3 of the ARM ARM, where bits 4-0 are SysM + // and bit 5 is R. + unsigned Encoding = StringSwitch(RegName.lower()) + .Case("r8_usr", 0x00) + .Case("r9_usr", 0x01) + .Case("r10_usr", 0x02) + .Case("r11_usr", 0x03) + .Case("r12_usr", 0x04) + .Case("sp_usr", 0x05) + .Case("lr_usr", 0x06) + .Case("r8_fiq", 0x08) + .Case("r9_fiq", 0x09) + .Case("r10_fiq", 0x0a) + .Case("r11_fiq", 0x0b) + .Case("r12_fiq", 0x0c) + .Case("sp_fiq", 0x0d) + .Case("lr_fiq", 0x0e) + .Case("lr_irq", 0x10) + .Case("sp_irq", 0x11) + .Case("lr_svc", 0x12) + .Case("sp_svc", 0x13) + .Case("lr_abt", 0x14) + .Case("sp_abt", 0x15) + .Case("lr_und", 0x16) + .Case("sp_und", 0x17) + .Case("lr_mon", 0x1c) + .Case("sp_mon", 0x1d) + .Case("elr_hyp", 0x1e) + .Case("sp_hyp", 0x1f) + .Case("spsr_fiq", 0x2e) + .Case("spsr_irq", 0x30) + .Case("spsr_svc", 0x32) + .Case("spsr_abt", 0x34) + .Case("spsr_und", 0x36) + .Case("spsr_mon", 0x3c) + .Case("spsr_hyp", 0x3e) + .Default(~0U); + + if (Encoding == ~0U) + return MatchOperand_NoMatch; + + Parser.Lex(); // Eat identifier token. + Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S)); + return MatchOperand_Success; +} + +ARMAsmParser::OperandMatchResultTy +ARMAsmParser::parsePKHImm(OperandVector &Operands, StringRef Op, int Low, + int High) { + MCAsmParser &Parser = getParser(); const AsmToken &Tok = Parser.getTok(); if (Tok.isNot(AsmToken::Identifier)) { Error(Parser.getTok().getLoc(), Op + " operand expected."); @@ -4053,8 +4264,9 @@ parsePKHImm(SmallVectorImpl &Operands, StringRef Op, return MatchOperand_Success; } -ARMAsmParser::OperandMatchResultTy ARMAsmParser:: -parseSetEndImm(SmallVectorImpl &Operands) { +ARMAsmParser::OperandMatchResultTy +ARMAsmParser::parseSetEndImm(OperandVector &Operands) { + MCAsmParser &Parser = getParser(); const AsmToken &Tok = Parser.getTok(); SMLoc S = Tok.getLoc(); if (Tok.isNot(AsmToken::Identifier)) { @@ -4082,8 +4294,9 @@ parseSetEndImm(SmallVectorImpl &Operands) { /// lsl #n 'n' in [0,31] /// asr #n 'n' in [1,32] /// n == 32 encoded as n == 0. -ARMAsmParser::OperandMatchResultTy ARMAsmParser:: -parseShifterImm(SmallVectorImpl &Operands) { +ARMAsmParser::OperandMatchResultTy +ARMAsmParser::parseShifterImm(OperandVector &Operands) { + MCAsmParser &Parser = getParser(); const AsmToken &Tok = Parser.getTok(); SMLoc S = Tok.getLoc(); if (Tok.isNot(AsmToken::Identifier)) { @@ -4152,8 +4365,9 @@ parseShifterImm(SmallVectorImpl &Operands) { /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family /// of instructions. Legal values are: /// ror #n 'n' in {0, 8, 16, 24} -ARMAsmParser::OperandMatchResultTy ARMAsmParser:: -parseRotImm(SmallVectorImpl &Operands) { +ARMAsmParser::OperandMatchResultTy +ARMAsmParser::parseRotImm(OperandVector &Operands) { + MCAsmParser &Parser = getParser(); const AsmToken &Tok = Parser.getTok(); SMLoc S = Tok.getLoc(); if (Tok.isNot(AsmToken::Identifier)) @@ -4198,8 +4412,109 @@ parseRotImm(SmallVectorImpl &Operands) { return MatchOperand_Success; } -ARMAsmParser::OperandMatchResultTy ARMAsmParser:: -parseBitfield(SmallVectorImpl &Operands) { +ARMAsmParser::OperandMatchResultTy +ARMAsmParser::parseModImm(OperandVector &Operands) { + MCAsmParser &Parser = getParser(); + MCAsmLexer &Lexer = getLexer(); + int64_t Imm1, Imm2; + + if ((Parser.getTok().isNot(AsmToken::Hash) && + Parser.getTok().isNot(AsmToken::Dollar) /* looking for an immediate */ ) + || Lexer.peekTok().is(AsmToken::Colon) + || Lexer.peekTok().is(AsmToken::LParen) /* avoid complex operands */ ) + return MatchOperand_NoMatch; + + SMLoc S = Parser.getTok().getLoc(); + + // Eat the hash (or dollar) + Parser.Lex(); + + SMLoc Sx1, Ex1; + Sx1 = Parser.getTok().getLoc(); + const MCExpr *Imm1Exp; + if (getParser().parseExpression(Imm1Exp, Ex1)) { + Error(Sx1, "malformed expression"); + return MatchOperand_ParseFail; + } + + const MCConstantExpr *CE = dyn_cast(Imm1Exp); + + if (CE) { + // immediate must fit within 32-bits + Imm1 = CE->getValue(); + if (Imm1 < INT32_MIN || Imm1 > UINT32_MAX) { + Error(Sx1, "immediate operand must be representable with 32 bits"); + return MatchOperand_ParseFail; + } + + int Enc = ARM_AM::getSOImmVal(Imm1); + if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) { + // We have a match! + Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF), + (Enc & 0xF00) >> 7, + Sx1, Ex1)); + return MatchOperand_Success; + } + } else { + Error(Sx1, "constant expression expected"); + return MatchOperand_ParseFail; + } + + if (Parser.getTok().isNot(AsmToken::Comma)) { + // Consider [mov r0, #-10], which is aliased with mvn. We cannot fail + // the parse here. + Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1)); + return MatchOperand_Success; + } + + // From this point onward, we expect the input to be a (#bits, #rot) pair + if (Imm1 & ~0xFF) { + Error(Sx1, "immediate operand must a number in the range [0, 255]"); + return MatchOperand_ParseFail; + } + + if (Lexer.peekTok().isNot(AsmToken::Hash) && + Lexer.peekTok().isNot(AsmToken::Dollar)) { + Error(Lexer.peekTok().getLoc(), "immediate operand expected"); + return MatchOperand_ParseFail; + } + + // Eat the comma + Parser.Lex(); + + // Repeat for #rot + SMLoc Sx2, Ex2; + Sx2 = Parser.getTok().getLoc(); + + // Eat the hash (or dollar) + Parser.Lex(); + + const MCExpr *Imm2Exp; + if (getParser().parseExpression(Imm2Exp, Ex2)) { + Error(Sx2, "malformed expression"); + return MatchOperand_ParseFail; + } + + CE = dyn_cast(Imm2Exp); + + if (CE) { + Imm2 = CE->getValue(); + if (!(Imm2 & ~0x1E)) { + // We have a match! + Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2)); + return MatchOperand_Success; + } + Error(Sx2, "immediate operand must an even number in the range [0, 30]"); + return MatchOperand_ParseFail; + } else { + Error(Sx2, "constant expression expected"); + return MatchOperand_ParseFail; + } +} + +ARMAsmParser::OperandMatchResultTy +ARMAsmParser::parseBitfield(OperandVector &Operands) { + MCAsmParser &Parser = getParser(); SMLoc S = Parser.getTok().getLoc(); // The bitfield descriptor is really two operands, the LSB and the width. if (Parser.getTok().isNot(AsmToken::Hash) && @@ -4266,8 +4581,8 @@ parseBitfield(SmallVectorImpl &Operands) { return MatchOperand_Success; } -ARMAsmParser::OperandMatchResultTy ARMAsmParser:: -parsePostIdxReg(SmallVectorImpl &Operands) { +ARMAsmParser::OperandMatchResultTy +ARMAsmParser::parsePostIdxReg(OperandVector &Operands) { // Check for a post-index addressing register operand. Specifically: // postidx_reg := '+' register {, shift} // | '-' register {, shift} @@ -4276,6 +4591,7 @@ parsePostIdxReg(SmallVectorImpl &Operands) { // This method must return MatchOperand_NoMatch without consuming any tokens // in the case where there is no match, as other alternatives take other // parse methods. + MCAsmParser &Parser = getParser(); AsmToken Tok = Parser.getTok(); SMLoc S = Tok.getLoc(); bool haveEaten = false; @@ -4315,8 +4631,8 @@ parsePostIdxReg(SmallVectorImpl &Operands) { return MatchOperand_Success; } -ARMAsmParser::OperandMatchResultTy ARMAsmParser:: -parseAM3Offset(SmallVectorImpl &Operands) { +ARMAsmParser::OperandMatchResultTy +ARMAsmParser::parseAM3Offset(OperandVector &Operands) { // Check for a post-index addressing register operand. Specifically: // am3offset := '+' register // | '-' register @@ -4328,6 +4644,7 @@ parseAM3Offset(SmallVectorImpl &Operands) { // This method must return MatchOperand_NoMatch without consuming any tokens // in the case where there is no match, as other alternatives take other // parse methods. + MCAsmParser &Parser = getParser(); AsmToken Tok = Parser.getTok(); SMLoc S = Tok.getLoc(); @@ -4388,26 +4705,24 @@ parseAM3Offset(SmallVectorImpl &Operands) { /// Convert parsed operands to MCInst. Needed here because this instruction /// only has two register operands, but multiplication is commutative so /// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN". -void ARMAsmParser:: -cvtThumbMultiply(MCInst &Inst, - const SmallVectorImpl &Operands) { - ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); - ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1); +void ARMAsmParser::cvtThumbMultiply(MCInst &Inst, + const OperandVector &Operands) { + ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1); + ((ARMOperand &)*Operands[1]).addCCOutOperands(Inst, 1); // If we have a three-operand form, make sure to set Rn to be the operand // that isn't the same as Rd. unsigned RegOp = 4; if (Operands.size() == 6 && - ((ARMOperand*)Operands[4])->getReg() == - ((ARMOperand*)Operands[3])->getReg()) + ((ARMOperand &)*Operands[4]).getReg() == + ((ARMOperand &)*Operands[3]).getReg()) RegOp = 5; - ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1); + ((ARMOperand &)*Operands[RegOp]).addRegOperands(Inst, 1); Inst.addOperand(Inst.getOperand(0)); - ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2); + ((ARMOperand &)*Operands[2]).addCondCodeOperands(Inst, 2); } -void ARMAsmParser:: -cvtThumbBranches(MCInst &Inst, - const SmallVectorImpl &Operands) { +void ARMAsmParser::cvtThumbBranches(MCInst &Inst, + const OperandVector &Operands) { int CondOp = -1, ImmOp = -1; switch(Inst.getOpcode()) { case ARM::tB: @@ -4430,7 +4745,7 @@ cvtThumbBranches(MCInst &Inst, } else { // outside IT blocks we can only have unconditional branches with AL // condition code or conditional branches with non-AL condition code - unsigned Cond = static_cast(Operands[CondOp])->getCondCode(); + unsigned Cond = static_cast(*Operands[CondOp]).getCondCode(); switch(Inst.getOpcode()) { case ARM::tB: case ARM::tBcc: @@ -4447,27 +4762,27 @@ cvtThumbBranches(MCInst &Inst, switch(Inst.getOpcode()) { // classify tB as either t2B or t1B based on range of immediate operand case ARM::tB: { - ARMOperand* op = static_cast(Operands[ImmOp]); - if(!op->isSignedOffset<11, 1>() && isThumbTwo()) + ARMOperand &op = static_cast(*Operands[ImmOp]); + if (!op.isSignedOffset<11, 1>() && isThumbTwo()) Inst.setOpcode(ARM::t2B); break; } // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand case ARM::tBcc: { - ARMOperand* op = static_cast(Operands[ImmOp]); - if(!op->isSignedOffset<8, 1>() && isThumbTwo()) + ARMOperand &op = static_cast(*Operands[ImmOp]); + if (!op.isSignedOffset<8, 1>() && isThumbTwo()) Inst.setOpcode(ARM::t2Bcc); break; } } - ((ARMOperand*)Operands[ImmOp])->addImmOperands(Inst, 1); - ((ARMOperand*)Operands[CondOp])->addCondCodeOperands(Inst, 2); + ((ARMOperand &)*Operands[ImmOp]).addImmOperands(Inst, 1); + ((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2); } /// Parse an ARM memory expression, return false if successful else return true /// or an error. The first token must be a '[' when called. -bool ARMAsmParser:: -parseMemory(SmallVectorImpl &Operands) { +bool ARMAsmParser::parseMemory(OperandVector &Operands) { + MCAsmParser &Parser = getParser(); SMLoc S, E; assert(Parser.getTok().is(AsmToken::LBrac) && "Token is not a Left Bracket"); @@ -4659,6 +4974,7 @@ parseMemory(SmallVectorImpl &Operands) { /// return true if it parses a shift otherwise it returns false. bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, unsigned &Amount) { + MCAsmParser &Parser = getParser(); SMLoc Loc = Parser.getTok().getLoc(); const AsmToken &Tok = Parser.getTok(); if (Tok.isNot(AsmToken::Identifier)) @@ -4717,8 +5033,9 @@ bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, } /// parseFPImm - A floating point immediate expression operand. -ARMAsmParser::OperandMatchResultTy ARMAsmParser:: -parseFPImm(SmallVectorImpl &Operands) { +ARMAsmParser::OperandMatchResultTy +ARMAsmParser::parseFPImm(OperandVector &Operands) { + MCAsmParser &Parser = getParser(); // Anything that can accept a floating point constant as an operand // needs to go through here, as the regular parseExpression is // integer only. @@ -4744,12 +5061,12 @@ parseFPImm(SmallVectorImpl &Operands) { // integer constant. Make sure we don't try to parse an FPImm // for these: // vmov.i{8|16|32|64} , #imm - ARMOperand *TyOp = static_cast(Operands[2]); - bool isVmovf = TyOp->isToken() && (TyOp->getToken() == ".f32" || - TyOp->getToken() == ".f64"); - ARMOperand *Mnemonic = static_cast(Operands[0]); - bool isFconst = Mnemonic->isToken() && (Mnemonic->getToken() == "fconstd" || - Mnemonic->getToken() == "fconsts"); + ARMOperand &TyOp = static_cast(*Operands[2]); + bool isVmovf = TyOp.isToken() && + (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64"); + ARMOperand &Mnemonic = static_cast(*Operands[0]); + bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" || + Mnemonic.getToken() == "fconsts"); if (!(isVmovf || isFconst)) return MatchOperand_NoMatch; @@ -4798,8 +5115,8 @@ parseFPImm(SmallVectorImpl &Operands) { /// Parse a arm instruction operand. For now this parses the operand regardless /// of the mnemonic. -bool ARMAsmParser::parseOperand(SmallVectorImpl &Operands, - StringRef Mnemonic) { +bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) { + MCAsmParser &Parser = getParser(); SMLoc S, E; // Check if the current operand has a custom associated parser, if so, try to @@ -4932,6 +5249,7 @@ bool ARMAsmParser::parseOperand(SmallVectorImpl &Operands, // parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e. // :lower16: and :upper16:. bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) { + MCAsmParser &Parser = getParser(); RefKind = ARMMCExpr::VK_ARM_None; // consume an optional '#' (GNU compatibility) @@ -4995,7 +5313,8 @@ StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, Mnemonic == "fmuls" || Mnemonic == "vmaxnm" || Mnemonic == "vminnm" || Mnemonic == "vcvta" || Mnemonic == "vcvtn" || Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" || - Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic.startswith("vsel")) + Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic == "hvc" || + Mnemonic.startswith("vsel")) return Mnemonic; // First, split out any predication code. Ignore mnemonics we know aren't @@ -5094,12 +5413,13 @@ getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst, if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" || - Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic.startswith("crc32") || - Mnemonic.startswith("cps") || Mnemonic.startswith("vsel") || + Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic == "udf" || + Mnemonic.startswith("crc32") || Mnemonic.startswith("cps") || + Mnemonic.startswith("vsel") || Mnemonic == "vmaxnm" || Mnemonic == "vminnm" || Mnemonic == "vcvta" || Mnemonic == "vcvtn" || Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" || Mnemonic == "vrintp" || - Mnemonic == "vrintm" || Mnemonic.startswith("aes") || + Mnemonic == "vrintm" || Mnemonic.startswith("aes") || Mnemonic == "hvc" || Mnemonic.startswith("sha1") || Mnemonic.startswith("sha256") || (FullInst.startswith("vmull") && FullInst.endswith(".p64"))) { // These mnemonics are never predicable @@ -5124,7 +5444,7 @@ getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst, } bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, - SmallVectorImpl &Operands) { + OperandVector &Operands) { // FIXME: This is all horribly hacky. We really need a better way to deal // with optional operands like this in the matcher table. @@ -5137,17 +5457,17 @@ bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, // conditionally adding the cc_out in the first place because we need // to check the type of the parsed immediate operand. if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() && - !static_cast(Operands[4])->isARMSOImm() && - static_cast(Operands[4])->isImm0_65535Expr() && - static_cast(Operands[1])->getReg() == 0) + !static_cast(*Operands[4]).isARMSOImm() && + static_cast(*Operands[4]).isImm0_65535Expr() && + static_cast(*Operands[1]).getReg() == 0) return true; // Register-register 'add' for thumb does not have a cc_out operand // when there are only two register operands. if (isThumb() && Mnemonic == "add" && Operands.size() == 5 && - static_cast(Operands[3])->isReg() && - static_cast(Operands[4])->isReg() && - static_cast(Operands[1])->getReg() == 0) + static_cast(*Operands[3]).isReg() && + static_cast(*Operands[4]).isReg() && + static_cast(*Operands[1]).getReg() == 0) return true; // Register-register 'add' for thumb does not have a cc_out operand // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do @@ -5155,13 +5475,12 @@ bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, // that can handle a different range and has a cc_out operand. if (((isThumb() && Mnemonic == "add") || (isThumbTwo() && Mnemonic == "sub")) && - Operands.size() == 6 && - static_cast(Operands[3])->isReg() && - static_cast(Operands[4])->isReg() && - static_cast(Operands[4])->getReg() == ARM::SP && - static_cast(Operands[1])->getReg() == 0 && - ((Mnemonic == "add" &&static_cast(Operands[5])->isReg()) || - static_cast(Operands[5])->isImm0_1020s4())) + Operands.size() == 6 && static_cast(*Operands[3]).isReg() && + static_cast(*Operands[4]).isReg() && + static_cast(*Operands[4]).getReg() == ARM::SP && + static_cast(*Operands[1]).getReg() == 0 && + ((Mnemonic == "add" && static_cast(*Operands[5]).isReg()) || + static_cast(*Operands[5]).isImm0_1020s4())) return true; // For Thumb2, add/sub immediate does not have a cc_out operand for the // imm0_4095 variant. That's the least-preferred variant when @@ -5169,23 +5488,22 @@ bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, // should remove the cc_out operand, we have to explicitly check that // it's not one of the other variants. Ugh. if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") && - Operands.size() == 6 && - static_cast(Operands[3])->isReg() && - static_cast(Operands[4])->isReg() && - static_cast(Operands[5])->isImm()) { + Operands.size() == 6 && static_cast(*Operands[3]).isReg() && + static_cast(*Operands[4]).isReg() && + static_cast(*Operands[5]).isImm()) { // Nest conditions rather than one big 'if' statement for readability. // // If both registers are low, we're in an IT block, and the immediate is // in range, we should use encoding T1 instead, which has a cc_out. if (inITBlock() && - isARMLowRegister(static_cast(Operands[3])->getReg()) && - isARMLowRegister(static_cast(Operands[4])->getReg()) && - static_cast(Operands[5])->isImm0_7()) + isARMLowRegister(static_cast(*Operands[3]).getReg()) && + isARMLowRegister(static_cast(*Operands[4]).getReg()) && + static_cast(*Operands[5]).isImm0_7()) return false; // Check against T3. If the second register is the PC, this is an // alternate form of ADR, which uses encoding T4, so check for that too. - if (static_cast(Operands[4])->getReg() != ARM::PC && - static_cast(Operands[5])->isT2SOImm()) + if (static_cast(*Operands[4]).getReg() != ARM::PC && + static_cast(*Operands[5]).isT2SOImm()) return false; // Otherwise, we use encoding T4, which does not have a cc_out @@ -5197,35 +5515,34 @@ bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to // use the 16-bit encoding or not. if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 && - static_cast(Operands[1])->getReg() == 0 && - static_cast(Operands[3])->isReg() && - static_cast(Operands[4])->isReg() && - static_cast(Operands[5])->isReg() && + static_cast(*Operands[1]).getReg() == 0 && + static_cast(*Operands[3]).isReg() && + static_cast(*Operands[4]).isReg() && + static_cast(*Operands[5]).isReg() && // If the registers aren't low regs, the destination reg isn't the // same as one of the source regs, or the cc_out operand is zero // outside of an IT block, we have to use the 32-bit encoding, so // remove the cc_out operand. - (!isARMLowRegister(static_cast(Operands[3])->getReg()) || - !isARMLowRegister(static_cast(Operands[4])->getReg()) || - !isARMLowRegister(static_cast(Operands[5])->getReg()) || - !inITBlock() || - (static_cast(Operands[3])->getReg() != - static_cast(Operands[5])->getReg() && - static_cast(Operands[3])->getReg() != - static_cast(Operands[4])->getReg()))) + (!isARMLowRegister(static_cast(*Operands[3]).getReg()) || + !isARMLowRegister(static_cast(*Operands[4]).getReg()) || + !isARMLowRegister(static_cast(*Operands[5]).getReg()) || + !inITBlock() || (static_cast(*Operands[3]).getReg() != + static_cast(*Operands[5]).getReg() && + static_cast(*Operands[3]).getReg() != + static_cast(*Operands[4]).getReg()))) return true; // Also check the 'mul' syntax variant that doesn't specify an explicit // destination register. if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 && - static_cast(Operands[1])->getReg() == 0 && - static_cast(Operands[3])->isReg() && - static_cast(Operands[4])->isReg() && + static_cast(*Operands[1]).getReg() == 0 && + static_cast(*Operands[3]).isReg() && + static_cast(*Operands[4]).isReg() && // If the registers aren't low regs or the cc_out operand is zero // outside of an IT block, we have to use the 32-bit encoding, so // remove the cc_out operand. - (!isARMLowRegister(static_cast(Operands[3])->getReg()) || - !isARMLowRegister(static_cast(Operands[4])->getReg()) || + (!isARMLowRegister(static_cast(*Operands[3]).getReg()) || + !isARMLowRegister(static_cast(*Operands[4]).getReg()) || !inITBlock())) return true; @@ -5238,32 +5555,32 @@ bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, // anyway. if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && (Operands.size() == 5 || Operands.size() == 6) && - static_cast(Operands[3])->isReg() && - static_cast(Operands[3])->getReg() == ARM::SP && - static_cast(Operands[1])->getReg() == 0 && - (static_cast(Operands[4])->isImm() || + static_cast(*Operands[3]).isReg() && + static_cast(*Operands[3]).getReg() == ARM::SP && + static_cast(*Operands[1]).getReg() == 0 && + (static_cast(*Operands[4]).isImm() || (Operands.size() == 6 && - static_cast(Operands[5])->isImm()))) + static_cast(*Operands[5]).isImm()))) return true; return false; } -bool ARMAsmParser::shouldOmitPredicateOperand( - StringRef Mnemonic, SmallVectorImpl &Operands) { +bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic, + OperandVector &Operands) { // VRINT{Z, R, X} have a predicate operand in VFP, but not in NEON unsigned RegIdx = 3; if ((Mnemonic == "vrintz" || Mnemonic == "vrintx" || Mnemonic == "vrintr") && - static_cast(Operands[2])->getToken() == ".f32") { - if (static_cast(Operands[3])->isToken() && - static_cast(Operands[3])->getToken() == ".f32") + static_cast(*Operands[2]).getToken() == ".f32") { + if (static_cast(*Operands[3]).isToken() && + static_cast(*Operands[3]).getToken() == ".f32") RegIdx = 4; - if (static_cast(Operands[RegIdx])->isReg() && - (ARMMCRegisterClasses[ARM::DPRRegClassID] - .contains(static_cast(Operands[RegIdx])->getReg()) || - ARMMCRegisterClasses[ARM::QPRRegClassID] - .contains(static_cast(Operands[RegIdx])->getReg()))) + if (static_cast(*Operands[RegIdx]).isReg() && + (ARMMCRegisterClasses[ARM::DPRRegClassID].contains( + static_cast(*Operands[RegIdx]).getReg()) || + ARMMCRegisterClasses[ARM::QPRRegClassID].contains( + static_cast(*Operands[RegIdx]).getReg()))) return true; } return false; @@ -5284,7 +5601,7 @@ static bool isDataTypeToken(StringRef Tok) { static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) { return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm"); } -static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features, +static void applyMnemonicAliases(StringRef &Mnemonic, uint64_t Features, unsigned VariantID); static bool RequiresVFPRegListValidation(StringRef Inst, @@ -5308,8 +5625,8 @@ static bool RequiresVFPRegListValidation(StringRef Inst, /// Parse an arm instruction mnemonic followed by its operands. bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, - SMLoc NameLoc, - SmallVectorImpl &Operands) { + SMLoc NameLoc, OperandVector &Operands) { + MCAsmParser &Parser = getParser(); // FIXME: Can this be done via tablegen in some fashion? bool RequireVFPRegisterListCheck; bool AcceptSinglePrecisionOnly; @@ -5323,7 +5640,7 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, // The generic tblgen'erated code does this later, at the start of // MatchInstructionImpl(), but that's too late for aliases that include // any sort of suffix. - unsigned AvailableFeatures = getAvailableFeatures(); + uint64_t AvailableFeatures = getAvailableFeatures(); unsigned AssemblerDialect = getParser().getAssemblerDialect(); applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect); @@ -5429,6 +5746,8 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, Operands.push_back(ARMOperand::CreateImm( MCConstantExpr::Create(ProcessorIMod, getContext()), NameLoc, NameLoc)); + } else if (Mnemonic == "cps" && isMClass()) { + return Error(NameLoc, "instruction 'cps' requires effect for M-class"); } // Add the remaining tokens in the mnemonic. @@ -5488,12 +5807,12 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, Parser.Lex(); // Consume the EndOfStatement if (RequireVFPRegisterListCheck) { - ARMOperand *Op = static_cast(Operands.back()); - if (AcceptSinglePrecisionOnly && !Op->isSPRRegList()) - return Error(Op->getStartLoc(), + ARMOperand &Op = static_cast(*Operands.back()); + if (AcceptSinglePrecisionOnly && !Op.isSPRRegList()) + return Error(Op.getStartLoc(), "VFP/Neon single precision register expected"); - if (AcceptDoublePrecisionOnly && !Op->isDPRRegList()) - return Error(Op->getStartLoc(), + if (AcceptDoublePrecisionOnly && !Op.isDPRRegList()) + return Error(Op.getStartLoc(), "VFP/Neon double precision register expected"); } @@ -5504,20 +5823,14 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, // try to remove a cc_out operand that was explicitly set on the the // mnemonic, of course (CarrySetting == true). Reason number #317 the // table driven matcher doesn't fit well with the ARM instruction set. - if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) { - ARMOperand *Op = static_cast(Operands[1]); + if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) Operands.erase(Operands.begin() + 1); - delete Op; - } // Some instructions have the same mnemonic, but don't always // have a predicate. Distinguish them here and delete the // predicate if needed. - if (shouldOmitPredicateOperand(Mnemonic, Operands)) { - ARMOperand *Op = static_cast(Operands[1]); + if (shouldOmitPredicateOperand(Mnemonic, Operands)) Operands.erase(Operands.begin() + 1); - delete Op; - } // ARM mode 'blx' need special handling, as the register operand version // is predicable, but the label operand version is not. So, we can't rely @@ -5525,11 +5838,8 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, // a k_CondCode operand in the list. If we're trying to match the label // version, remove the k_CondCode operand here. if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && - static_cast(Operands[2])->isImm()) { - ARMOperand *Op = static_cast(Operands[1]); + static_cast(*Operands[2]).isImm()) Operands.erase(Operands.begin() + 1); - delete Op; - } // Adjust operands of ldrexd/strexd to MCK_GPRPair. // ldrexd/strexd require even/odd GPR pair. To enforce this constraint, @@ -5542,53 +5852,92 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, Mnemonic == "stlexd")) { bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd"); unsigned Idx = isLoad ? 2 : 3; - ARMOperand* Op1 = static_cast(Operands[Idx]); - ARMOperand* Op2 = static_cast(Operands[Idx+1]); + ARMOperand &Op1 = static_cast(*Operands[Idx]); + ARMOperand &Op2 = static_cast(*Operands[Idx + 1]); const MCRegisterClass& MRC = MRI->getRegClass(ARM::GPRRegClassID); // Adjust only if Op1 and Op2 are GPRs. - if (Op1->isReg() && Op2->isReg() && MRC.contains(Op1->getReg()) && - MRC.contains(Op2->getReg())) { - unsigned Reg1 = Op1->getReg(); - unsigned Reg2 = Op2->getReg(); + if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) && + MRC.contains(Op2.getReg())) { + unsigned Reg1 = Op1.getReg(); + unsigned Reg2 = Op2.getReg(); unsigned Rt = MRI->getEncodingValue(Reg1); unsigned Rt2 = MRI->getEncodingValue(Reg2); // Rt2 must be Rt + 1 and Rt must be even. if (Rt + 1 != Rt2 || (Rt & 1)) { - Error(Op2->getStartLoc(), isLoad ? - "destination operands must be sequential" : - "source operands must be sequential"); + Error(Op2.getStartLoc(), isLoad + ? "destination operands must be sequential" + : "source operands must be sequential"); return true; } unsigned NewReg = MRI->getMatchingSuperReg(Reg1, ARM::gsub_0, &(MRI->getRegClass(ARM::GPRPairRegClassID))); - Operands.erase(Operands.begin() + Idx, Operands.begin() + Idx + 2); - Operands.insert(Operands.begin() + Idx, ARMOperand::CreateReg( - NewReg, Op1->getStartLoc(), Op2->getEndLoc())); - delete Op1; - delete Op2; + Operands[Idx] = + ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc()); + Operands.erase(Operands.begin() + Idx + 1); } } + // If first 2 operands of a 3 operand instruction are the same + // then transform to 2 operand version of the same instruction + // e.g. 'adds r0, r0, #1' transforms to 'adds r0, #1' + // FIXME: We would really like to be able to tablegen'erate this. + if (isThumbOne() && Operands.size() == 6 && + (Mnemonic == "add" || Mnemonic == "sub" || Mnemonic == "and" || + Mnemonic == "eor" || Mnemonic == "lsl" || Mnemonic == "lsr" || + Mnemonic == "asr" || Mnemonic == "adc" || Mnemonic == "sbc" || + Mnemonic == "ror" || Mnemonic == "orr" || Mnemonic == "bic")) { + ARMOperand &Op3 = static_cast(*Operands[3]); + ARMOperand &Op4 = static_cast(*Operands[4]); + ARMOperand &Op5 = static_cast(*Operands[5]); + + // If both registers are the same then remove one of them from + // the operand list. + if (Op3.isReg() && Op4.isReg() && Op3.getReg() == Op4.getReg()) { + // If 3rd operand (variable Op5) is a register and the instruction is adds/sub + // then do not transform as the backend already handles this instruction + // correctly. + if (!Op5.isReg() || !((Mnemonic == "add" && CarrySetting) || Mnemonic == "sub")) { + Operands.erase(Operands.begin() + 3); + if (Mnemonic == "add" && !CarrySetting) { + // Special case for 'add' (not 'adds') instruction must + // remove the CCOut operand as well. + Operands.erase(Operands.begin() + 1); + } + } + } + } + + // If instruction is 'add' and first two register operands + // use SP register, then remove one of the SP registers from + // the instruction. + // FIXME: We would really like to be able to tablegen'erate this. + if (isThumbOne() && Operands.size() == 5 && Mnemonic == "add" && !CarrySetting) { + ARMOperand &Op2 = static_cast(*Operands[2]); + ARMOperand &Op3 = static_cast(*Operands[3]); + if (Op2.isReg() && Op3.isReg() && Op2.getReg() == ARM::SP && Op3.getReg() == ARM::SP) { + Operands.erase(Operands.begin() + 2); + } + } + // GNU Assembler extension (compatibility) if ((Mnemonic == "ldrd" || Mnemonic == "strd")) { - ARMOperand *Op2 = static_cast(Operands[2]); - ARMOperand *Op3 = static_cast(Operands[3]); - if (Op3->isMem()) { - assert(Op2->isReg() && "expected register argument"); + ARMOperand &Op2 = static_cast(*Operands[2]); + ARMOperand &Op3 = static_cast(*Operands[3]); + if (Op3.isMem()) { + assert(Op2.isReg() && "expected register argument"); unsigned SuperReg = MRI->getMatchingSuperReg( - Op2->getReg(), ARM::gsub_0, &MRI->getRegClass(ARM::GPRPairRegClassID)); + Op2.getReg(), ARM::gsub_0, &MRI->getRegClass(ARM::GPRPairRegClassID)); assert(SuperReg && "expected register pair"); unsigned PairedReg = MRI->getSubReg(SuperReg, ARM::gsub_1); - Operands.insert(Operands.begin() + 3, - ARMOperand::CreateReg(PairedReg, - Op2->getStartLoc(), - Op2->getEndLoc())); + Operands.insert( + Operands.begin() + 3, + ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc())); } } @@ -5598,19 +5947,13 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, // so the Mnemonic is the original name "subs" and delete the predicate // operand so it will match the table entry. if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 && - static_cast(Operands[3])->isReg() && - static_cast(Operands[3])->getReg() == ARM::PC && - static_cast(Operands[4])->isReg() && - static_cast(Operands[4])->getReg() == ARM::LR && - static_cast(Operands[5])->isImm()) { - ARMOperand *Op0 = static_cast(Operands[0]); - Operands.erase(Operands.begin()); - delete Op0; - Operands.insert(Operands.begin(), ARMOperand::CreateToken(Name, NameLoc)); - - ARMOperand *Op1 = static_cast(Operands[1]); + static_cast(*Operands[3]).isReg() && + static_cast(*Operands[3]).getReg() == ARM::PC && + static_cast(*Operands[4]).isReg() && + static_cast(*Operands[4]).getReg() == ARM::LR && + static_cast(*Operands[5]).isImm()) { + Operands.front() = ARMOperand::CreateToken(Name, NameLoc); Operands.erase(Operands.begin() + 1); - delete Op1; } return false; } @@ -5656,9 +5999,8 @@ static bool instIsBreakpoint(const MCInst &Inst) { } // FIXME: We would really like to be able to tablegen'erate this. -bool ARMAsmParser:: -validateInstruction(MCInst &Inst, - const SmallVectorImpl &Operands) { +bool ARMAsmParser::validateInstruction(MCInst &Inst, + const OperandVector &Operands) { const MCInstrDesc &MCID = MII.get(Inst.getOpcode()); SMLoc Loc = Operands[0]->getStartLoc(); @@ -5681,7 +6023,7 @@ validateInstruction(MCInst &Inst, // Find the condition code Operand to get its SMLoc information. SMLoc CondLoc; for (unsigned I = 1; I < Operands.size(); ++I) - if (static_cast(Operands[I])->isCondCode()) + if (static_cast(*Operands[I]).isCondCode()) CondLoc = Operands[I]->getStartLoc(); return Error(CondLoc, "incorrect condition in IT block; got '" + StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) + @@ -5761,6 +6103,48 @@ validateInstruction(MCInst &Inst, "source operands must be sequential"); return false; } + case ARM::STR_PRE_IMM: + case ARM::STR_PRE_REG: + case ARM::STR_POST_IMM: + case ARM::STR_POST_REG: + case ARM::STRH_PRE: + case ARM::STRH_POST: + case ARM::STRB_PRE_IMM: + case ARM::STRB_PRE_REG: + case ARM::STRB_POST_IMM: + case ARM::STRB_POST_REG: { + // Rt must be different from Rn. + const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg()); + const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg()); + + if (Rt == Rn) + return Error(Operands[3]->getStartLoc(), + "source register and base register can't be identical"); + return false; + } + case ARM::LDR_PRE_IMM: + case ARM::LDR_PRE_REG: + case ARM::LDR_POST_IMM: + case ARM::LDR_POST_REG: + case ARM::LDRH_PRE: + case ARM::LDRH_POST: + case ARM::LDRSH_PRE: + case ARM::LDRSH_POST: + case ARM::LDRB_PRE_IMM: + case ARM::LDRB_PRE_REG: + case ARM::LDRB_POST_IMM: + case ARM::LDRB_POST_REG: + case ARM::LDRSB_PRE: + case ARM::LDRSB_POST: { + // Rt must be different from Rn. + const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg()); + const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg()); + + if (Rt == Rn) + return Error(Operands[3]->getStartLoc(), + "destination register and base register can't be identical"); + return false; + } case ARM::SBFX: case ARM::UBFX: { // Width must be in range [1, 32-lsb]. @@ -5781,8 +6165,8 @@ validateInstruction(MCInst &Inst, // in the register list. unsigned Rn = Inst.getOperand(0).getReg(); bool HasWritebackToken = - (static_cast(Operands[3])->isToken() && - static_cast(Operands[3])->getToken() == "!"); + (static_cast(*Operands[3]).isToken() && + static_cast(*Operands[3]).getToken() == "!"); bool ListContainsBase; if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo()) return Error(Operands[3 + HasWritebackToken]->getStartLoc(), @@ -5797,7 +6181,9 @@ validateInstruction(MCInst &Inst, return Error(Operands[3]->getStartLoc(), "writeback operator '!' not allowed when base register " "in register list"); - + if (listContainsReg(Inst, 3 + HasWritebackToken, ARM::SP)) + return Error(Operands[3 + HasWritebackToken]->getStartLoc(), + "SP not allowed in register list"); break; } case ARM::LDMIA_UPD: @@ -5808,7 +6194,19 @@ validateInstruction(MCInst &Inst, // UNPREDICTABLE on v7 upwards. Goodness knows what they did before. if (!hasV7Ops()) break; - // Fallthrough + if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) + return Error(Operands.back()->getStartLoc(), + "writeback register not allowed in register list"); + break; + case ARM::t2LDMIA: + case ARM::t2LDMDB: + case ARM::t2STMIA: + case ARM::t2STMDB: { + if (listContainsReg(Inst, 3, ARM::SP)) + return Error(Operands.back()->getStartLoc(), + "SP not allowed in register list"); + break; + } case ARM::t2LDMIA_UPD: case ARM::t2LDMDB_UPD: case ARM::t2STMIA_UPD: @@ -5816,6 +6214,10 @@ validateInstruction(MCInst &Inst, if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) return Error(Operands.back()->getStartLoc(), "writeback register not allowed in register list"); + + if (listContainsReg(Inst, 4, ARM::SP)) + return Error(Operands.back()->getStartLoc(), + "SP not allowed in register list"); break; } case ARM::sysLDMIA_UPD: @@ -5842,11 +6244,10 @@ validateInstruction(MCInst &Inst, // this first statement is always true for the new Inst. Essentially, the // destination is unconditionally copied into the second source operand // without checking to see if it matches what we actually parsed. - if (Operands.size() == 6 && - (((ARMOperand*)Operands[3])->getReg() != - ((ARMOperand*)Operands[5])->getReg()) && - (((ARMOperand*)Operands[3])->getReg() != - ((ARMOperand*)Operands[4])->getReg())) { + if (Operands.size() == 6 && (((ARMOperand &)*Operands[3]).getReg() != + ((ARMOperand &)*Operands[5]).getReg()) && + (((ARMOperand &)*Operands[3]).getReg() != + ((ARMOperand &)*Operands[4]).getReg())) { return Error(Operands[3]->getStartLoc(), "destination register must match source register"); } @@ -5885,6 +6286,9 @@ validateInstruction(MCInst &Inst, return Error(Operands[4]->getStartLoc(), "writeback operator '!' not allowed when base register " "in register list"); + if (listContainsReg(Inst, 4, ARM::SP) && !inITBlock()) + return Error(Operands.back()->getStartLoc(), + "SP not allowed in register list"); break; } case ARM::tADDrSP: { @@ -5899,23 +6303,23 @@ validateInstruction(MCInst &Inst, } // Final range checking for Thumb unconditional branch instructions. case ARM::tB: - if (!(static_cast(Operands[2]))->isSignedOffset<11, 1>()) + if (!(static_cast(*Operands[2])).isSignedOffset<11, 1>()) return Error(Operands[2]->getStartLoc(), "branch target out of range"); break; case ARM::t2B: { int op = (Operands[2]->isImm()) ? 2 : 3; - if (!(static_cast(Operands[op]))->isSignedOffset<24, 1>()) + if (!static_cast(*Operands[op]).isSignedOffset<24, 1>()) return Error(Operands[op]->getStartLoc(), "branch target out of range"); break; } // Final range checking for Thumb conditional branch instructions. case ARM::tBcc: - if (!(static_cast(Operands[2]))->isSignedOffset<8, 1>()) + if (!static_cast(*Operands[2]).isSignedOffset<8, 1>()) return Error(Operands[2]->getStartLoc(), "branch target out of range"); break; case ARM::t2Bcc: { int Op = (Operands[2]->isImm()) ? 2 : 3; - if (!(static_cast(Operands[Op]))->isSignedOffset<20, 1>()) + if (!static_cast(*Operands[Op]).isSignedOffset<20, 1>()) return Error(Operands[Op]->getStartLoc(), "branch target out of range"); break; } @@ -5930,19 +6334,19 @@ validateInstruction(MCInst &Inst, // lead to bugs that are difficult to find since this is an easy mistake // to make. int i = (Operands[3]->isImm()) ? 3 : 4; - ARMOperand *Op = static_cast(Operands[i]); - const MCConstantExpr *CE = dyn_cast(Op->getImm()); + ARMOperand &Op = static_cast(*Operands[i]); + const MCConstantExpr *CE = dyn_cast(Op.getImm()); if (CE) break; - const MCExpr *E = dyn_cast(Op->getImm()); + const MCExpr *E = dyn_cast(Op.getImm()); if (!E) break; const ARMMCExpr *ARM16Expr = dyn_cast(E); if (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 && - ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16)) { - return Error(Op->getStartLoc(), - "immediate expression for mov requires :lower16: or :upper16"); - break; - } - } + ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16)) + return Error( + Op.getStartLoc(), + "immediate expression for mov requires :lower16: or :upper16"); + break; + } } return false; @@ -6204,9 +6608,9 @@ static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) { } } -bool ARMAsmParser:: -processInstruction(MCInst &Inst, - const SmallVectorImpl &Operands) { +bool ARMAsmParser::processInstruction(MCInst &Inst, + const OperandVector &Operands, + MCStreamer &Out) { switch (Inst.getOpcode()) { // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction. case ARM::LDRT_POST: @@ -6247,12 +6651,31 @@ processInstruction(MCInst &Inst, // Alias for alternate form of 'ADR Rd, #imm' instruction. case ARM::ADDri: { if (Inst.getOperand(1).getReg() != ARM::PC || - Inst.getOperand(5).getReg() != 0) + Inst.getOperand(5).getReg() != 0 || + !(Inst.getOperand(2).isExpr() || Inst.getOperand(2).isImm())) return false; MCInst TmpInst; TmpInst.setOpcode(ARM::ADR); TmpInst.addOperand(Inst.getOperand(0)); - TmpInst.addOperand(Inst.getOperand(2)); + if (Inst.getOperand(2).isImm()) { + TmpInst.addOperand(Inst.getOperand(2)); + } else { + // Turn PC-relative expression into absolute expression. + // Reading PC provides the start of the current instruction + 8 and + // the transform to adr is biased by that. + MCSymbol *Dot = getContext().CreateTempSymbol(); + Out.EmitLabel(Dot); + const MCExpr *OpExpr = Inst.getOperand(2).getExpr(); + const MCExpr *InstPC = MCSymbolRefExpr::Create(Dot, + MCSymbolRefExpr::VK_None, + getContext()); + const MCExpr *Const8 = MCConstantExpr::Create(8, getContext()); + const MCExpr *ReadPC = MCBinaryExpr::CreateAdd(InstPC, Const8, + getContext()); + const MCExpr *FixupAddr = MCBinaryExpr::CreateAdd(ReadPC, OpExpr, + getContext()); + TmpInst.addOperand(MCOperand::CreateExpr(FixupAddr)); + } TmpInst.addOperand(Inst.getOperand(3)); TmpInst.addOperand(Inst.getOperand(4)); Inst = TmpInst; @@ -6263,8 +6686,8 @@ processInstruction(MCInst &Inst, // Select the narrow version if the immediate will fit. if (Inst.getOperand(1).getImm() > 0 && Inst.getOperand(1).getImm() <= 0xff && - !(static_cast(Operands[2])->isToken() && - static_cast(Operands[2])->getToken() == ".w")) + !(static_cast(*Operands[2]).isToken() && + static_cast(*Operands[2]).getToken() == ".w")) Inst.setOpcode(ARM::tLDRpci); else Inst.setOpcode(ARM::t2LDRpci); @@ -7354,8 +7777,8 @@ processInstruction(MCInst &Inst, if (isARMLowRegister(Inst.getOperand(0).getReg()) && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() && Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) && - !(static_cast(Operands[3])->isToken() && - static_cast(Operands[3])->getToken() == ".w")) { + !(static_cast(*Operands[3]).isToken() && + static_cast(*Operands[3]).getToken() == ".w")) { unsigned NewOpc; switch (Inst.getOpcode()) { default: llvm_unreachable("unexpected opcode"); @@ -7558,7 +7981,7 @@ processInstruction(MCInst &Inst, case ARM::LDMIA_UPD: // If this is a load of a single register via a 'pop', then we should use // a post-indexed LDR instruction instead, per the ARM ARM. - if (static_cast(Operands[0])->getToken() == "pop" && + if (static_cast(*Operands[0]).getToken() == "pop" && Inst.getNumOperands() == 5) { MCInst TmpInst; TmpInst.setOpcode(ARM::LDR_POST_IMM); @@ -7576,7 +7999,7 @@ processInstruction(MCInst &Inst, case ARM::STMDB_UPD: // If this is a store of a single register via a 'push', then we should use // a pre-indexed STR instruction instead, per the ARM ARM. - if (static_cast(Operands[0])->getToken() == "push" && + if (static_cast(*Operands[0]).getToken() == "push" && Inst.getNumOperands() == 5) { MCInst TmpInst; TmpInst.setOpcode(ARM::STR_PRE_IMM); @@ -7592,7 +8015,7 @@ processInstruction(MCInst &Inst, case ARM::t2ADDri12: // If the immediate fits for encoding T3 (t2ADDri) and the generic "add" // mnemonic was used (not "addw"), encoding T3 is preferred. - if (static_cast(Operands[0])->getToken() != "add" || + if (static_cast(*Operands[0]).getToken() != "add" || ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1) break; Inst.setOpcode(ARM::t2ADDri); @@ -7601,7 +8024,7 @@ processInstruction(MCInst &Inst, case ARM::t2SUBri12: // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub" // mnemonic was used (not "subw"), encoding T3 is preferred. - if (static_cast(Operands[0])->getToken() != "sub" || + if (static_cast(*Operands[0]).getToken() != "sub" || ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1) break; Inst.setOpcode(ARM::t2SUBri); @@ -7637,9 +8060,9 @@ processInstruction(MCInst &Inst, !isARMLowRegister(Inst.getOperand(0).getReg()) || (unsigned)Inst.getOperand(2).getImm() > 255 || ((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) || - (inITBlock() && Inst.getOperand(5).getReg() != 0)) || - (static_cast(Operands[3])->isToken() && - static_cast(Operands[3])->getToken() == ".w")) + (inITBlock() && Inst.getOperand(5).getReg() != 0)) || + (static_cast(*Operands[3]).isToken() && + static_cast(*Operands[3]).getToken() == ".w")) break; MCInst TmpInst; TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ? @@ -7660,8 +8083,8 @@ processInstruction(MCInst &Inst, // 'as' behaviour. Make sure the wide encoding wasn't explicit. if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() || Inst.getOperand(5).getReg() != 0 || - (static_cast(Operands[3])->isToken() && - static_cast(Operands[3])->getToken() == ".w")) + (static_cast(*Operands[3]).isToken() && + static_cast(*Operands[3]).getToken() == ".w")) break; MCInst TmpInst; TmpInst.setOpcode(ARM::tADDhirr); @@ -7718,8 +8141,8 @@ processInstruction(MCInst &Inst, // an error in validateInstruction(). unsigned Rn = Inst.getOperand(0).getReg(); bool hasWritebackToken = - (static_cast(Operands[3])->isToken() && - static_cast(Operands[3])->getToken() == "!"); + (static_cast(*Operands[3]).isToken() && + static_cast(*Operands[3]).getToken() == "!"); bool listContainsBase; if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) || (!listContainsBase && !hasWritebackToken) || @@ -7781,10 +8204,10 @@ processInstruction(MCInst &Inst, if (isARMLowRegister(Inst.getOperand(0).getReg()) && (unsigned)Inst.getOperand(1).getImm() <= 255 && ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL && - Inst.getOperand(4).getReg() == ARM::CPSR) || - (inITBlock() && Inst.getOperand(4).getReg() == 0)) && - (!static_cast(Operands[2])->isToken() || - static_cast(Operands[2])->getToken() != ".w")) { + Inst.getOperand(4).getReg() == ARM::CPSR) || + (inITBlock() && Inst.getOperand(4).getReg() == 0)) && + (!static_cast(*Operands[2]).isToken() || + static_cast(*Operands[2]).getToken() != ".w")) { // The operands aren't in the same order for tMOVi8... MCInst TmpInst; TmpInst.setOpcode(ARM::tMOVi8); @@ -7805,8 +8228,8 @@ processInstruction(MCInst &Inst, isARMLowRegister(Inst.getOperand(1).getReg()) && Inst.getOperand(2).getImm() == ARMCC::AL && Inst.getOperand(4).getReg() == ARM::CPSR && - (!static_cast(Operands[2])->isToken() || - static_cast(Operands[2])->getToken() != ".w")) { + (!static_cast(*Operands[2]).isToken() || + static_cast(*Operands[2]).getToken() != ".w")) { // The operands aren't the same for tMOV[S]r... (no cc_out) MCInst TmpInst; TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr); @@ -7828,8 +8251,8 @@ processInstruction(MCInst &Inst, if (isARMLowRegister(Inst.getOperand(0).getReg()) && isARMLowRegister(Inst.getOperand(1).getReg()) && Inst.getOperand(2).getImm() == 0 && - (!static_cast(Operands[2])->isToken() || - static_cast(Operands[2])->getToken() != ".w")) { + (!static_cast(*Operands[2]).isToken() || + static_cast(*Operands[2]).getToken() != ".w")) { unsigned NewOpc; switch (Inst.getOpcode()) { default: llvm_unreachable("Illegal opcode!"); @@ -7941,9 +8364,10 @@ processInstruction(MCInst &Inst, isARMLowRegister(Inst.getOperand(2).getReg())) && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() && ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) || - (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) && - (!static_cast(Operands[3])->isToken() || - !static_cast(Operands[3])->getToken().equals_lower(".w"))) { + (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) && + (!static_cast(*Operands[3]).isToken() || + !static_cast(*Operands[3]).getToken().equals_lower( + ".w"))) { unsigned NewOpc; switch (Inst.getOpcode()) { default: llvm_unreachable("unexpected opcode"); @@ -7980,9 +8404,10 @@ processInstruction(MCInst &Inst, (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() || Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) && ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) || - (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) && - (!static_cast(Operands[3])->isToken() || - !static_cast(Operands[3])->getToken().equals_lower(".w"))) { + (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) && + (!static_cast(*Operands[3]).isToken() || + !static_cast(*Operands[3]).getToken().equals_lower( + ".w"))) { unsigned NewOpc; switch (Inst.getOpcode()) { default: llvm_unreachable("unexpected opcode"); @@ -8043,7 +8468,7 @@ unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) { } // Some high-register supporting Thumb1 encodings only allow both registers // to be from r0-r7 when in Thumb2. - else if (Opc == ARM::tADDhirr && isThumbOne() && + else if (Opc == ARM::tADDhirr && isThumbOne() && !hasV6MOps() && isARMLowRegister(Inst.getOperand(1).getReg()) && isARMLowRegister(Inst.getOperand(2).getReg())) return Match_RequiresThumb2; @@ -8061,12 +8486,11 @@ template <> inline bool IsCPSRDead(MCInst *Instr) { } } -static const char *getSubtargetFeatureName(unsigned Val); -bool ARMAsmParser:: -MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, - SmallVectorImpl &Operands, - MCStreamer &Out, unsigned &ErrorInfo, - bool MatchingInlineAsm) { +static const char *getSubtargetFeatureName(uint64_t Val); +bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, + OperandVector &Operands, + MCStreamer &Out, uint64_t &ErrorInfo, + bool MatchingInlineAsm) { MCInst Inst; unsigned MatchResult; @@ -8091,7 +8515,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, // encoding is selected. Loop on it while changes happen so the // individual transformations can chain off each other. E.g., // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8) - while (processInstruction(Inst, Operands)) + while (processInstruction(Inst, Operands, Out)) ; // Only after the instruction is fully processed, we can validate it @@ -8119,7 +8543,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, // Special case the error message for the very common case where only // a single subtarget feature is missing (Thumb vs. ARM, e.g.). std::string Msg = "instruction requires:"; - unsigned Mask = 1; + uint64_t Mask = 1; for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) { if (ErrorInfo & Mask) { Msg += " "; @@ -8131,11 +8555,11 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, } case Match_InvalidOperand: { SMLoc ErrorLoc = IDLoc; - if (ErrorInfo != ~0U) { + if (ErrorInfo != ~0ULL) { if (ErrorInfo >= Operands.size()) return Error(IDLoc, "too few operands for instruction"); - ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc(); + ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getStartLoc(); if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; } @@ -8143,7 +8567,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, } case Match_MnemonicFail: return Error(IDLoc, "invalid instruction", - ((ARMOperand*)Operands[0])->getLocRange()); + ((ARMOperand &)*Operands[0]).getLocRange()); case Match_RequiresNotITBlock: return Error(IDLoc, "flag setting instruction only valid outside IT block"); case Match_RequiresITBlock: @@ -8153,12 +8577,12 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, case Match_RequiresThumb2: return Error(IDLoc, "instruction variant requires Thumb2"); case Match_ImmRange0_15: { - SMLoc ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc(); + SMLoc ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getStartLoc(); if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; return Error(ErrorLoc, "immediate operand must be in the range [0,15]"); } case Match_ImmRange0_239: { - SMLoc ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc(); + SMLoc ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getStartLoc(); if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; return Error(ErrorLoc, "immediate operand must be in the range [0,239]"); } @@ -8174,7 +8598,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, case Match_DupAlignedMemoryRequires64or128: case Match_AlignedMemoryRequires64or128or256: { - SMLoc ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getAlignmentLoc(); + SMLoc ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getAlignmentLoc(); if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; switch (MatchResult) { default: @@ -8208,6 +8632,7 @@ bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) { const MCObjectFileInfo::Environment Format = getContext().getObjectFileInfo()->getObjectFileType(); bool IsMachO = Format == MCObjectFileInfo::IsMachO; + bool IsCOFF = Format == MCObjectFileInfo::IsCOFF; StringRef IDVal = DirectiveID.getIdentifier(); if (IDVal == ".word") @@ -8259,7 +8684,7 @@ bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) { else if (IDVal == ".thumb_set") return parseDirectiveThumbSet(DirectiveID.getLoc()); - if (!IsMachO) { + if (!IsMachO && !IsCOFF) { if (IDVal == ".arch") return parseDirectiveArch(DirectiveID.getLoc()); else if (IDVal == ".cpu") @@ -8290,6 +8715,7 @@ bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) { /// ::= .short expression [, expression]* /// ::= .word expression [, expression]* bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) { + MCAsmParser &Parser = getParser(); if (getLexer().isNot(AsmToken::EndOfStatement)) { for (;;) { const MCExpr *Value; @@ -8319,6 +8745,7 @@ bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) { /// parseDirectiveThumb /// ::= .thumb bool ARMAsmParser::parseDirectiveThumb(SMLoc L) { + MCAsmParser &Parser = getParser(); if (getLexer().isNot(AsmToken::EndOfStatement)) { Error(L, "unexpected token in directive"); return false; @@ -8340,6 +8767,7 @@ bool ARMAsmParser::parseDirectiveThumb(SMLoc L) { /// parseDirectiveARM /// ::= .arm bool ARMAsmParser::parseDirectiveARM(SMLoc L) { + MCAsmParser &Parser = getParser(); if (getLexer().isNot(AsmToken::EndOfStatement)) { Error(L, "unexpected token in directive"); return false; @@ -8368,12 +8796,13 @@ void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) { /// parseDirectiveThumbFunc /// ::= .thumbfunc symbol_name bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { - const MCAsmInfo *MAI = getParser().getStreamer().getContext().getAsmInfo(); - bool isMachO = MAI->hasSubsectionsViaSymbols(); + MCAsmParser &Parser = getParser(); + const auto Format = getContext().getObjectFileInfo()->getObjectFileType(); + bool IsMachO = Format == MCObjectFileInfo::IsMachO; // Darwin asm has (optionally) function name after .thumb_func direction // ELF doesn't - if (isMachO) { + if (IsMachO) { const AsmToken &Tok = Parser.getTok(); if (Tok.isNot(AsmToken::EndOfStatement)) { if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) { @@ -8390,7 +8819,8 @@ bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { } if (getLexer().isNot(AsmToken::EndOfStatement)) { - Error(L, "unexpected token in directive"); + Error(Parser.getTok().getLoc(), "unexpected token in directive"); + Parser.eatToEndOfStatement(); return false; } @@ -8401,6 +8831,7 @@ bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { /// parseDirectiveSyntax /// ::= .syntax unified | divided bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) { + MCAsmParser &Parser = getParser(); const AsmToken &Tok = Parser.getTok(); if (Tok.isNot(AsmToken::Identifier)) { Error(L, "unexpected token in .syntax directive"); @@ -8432,6 +8863,7 @@ bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) { /// parseDirectiveCode /// ::= .code 16 | 32 bool ARMAsmParser::parseDirectiveCode(SMLoc L) { + MCAsmParser &Parser = getParser(); const AsmToken &Tok = Parser.getTok(); if (Tok.isNot(AsmToken::Integer)) { Error(L, "unexpected token in .code directive"); @@ -8476,6 +8908,7 @@ bool ARMAsmParser::parseDirectiveCode(SMLoc L) { /// parseDirectiveReq /// ::= name .req registername bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) { + MCAsmParser &Parser = getParser(); Parser.Lex(); // Eat the '.req' token. unsigned Reg; SMLoc SRegLoc, ERegLoc; @@ -8494,7 +8927,7 @@ bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) { Parser.Lex(); // Consume the EndOfStatement - if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg) { + if (!RegisterReqs.insert(std::make_pair(Name, Reg)).second) { Error(SRegLoc, "redefinition of '" + Name + "' does not match original."); return false; } @@ -8505,6 +8938,7 @@ bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) { /// parseDirectiveUneq /// ::= .unreq registername bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) { + MCAsmParser &Parser = getParser(); if (Parser.getTok().isNot(AsmToken::Identifier)) { Parser.eatToEndOfStatement(); Error(L, "unexpected input in .unreq directive."); @@ -8541,6 +8975,7 @@ bool ARMAsmParser::parseDirectiveArch(SMLoc L) { /// ::= .eabi_attribute int, int [, "str"] /// ::= .eabi_attribute Tag_name, int [, "str"] bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) { + MCAsmParser &Parser = getParser(); int64_t Tag; SMLoc TagLoc; TagLoc = Parser.getTok().getLoc(); @@ -8648,9 +9083,46 @@ bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) { bool ARMAsmParser::parseDirectiveCPU(SMLoc L) { StringRef CPU = getParser().parseStringToEndOfStatement().trim(); getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU); + + if (!STI.CPUStringIsValid(CPU)) { + Error(L, "Unknown CPU name"); + return false; + } + + STI.InitMCProcessorInfo(CPU, ""); + STI.InitCPUSchedModel(CPU); + unsigned FB = ComputeAvailableFeatures(STI.getFeatureBits()); + setAvailableFeatures(FB); + return false; } +// FIXME: This is duplicated in getARMFPUFeatures() in +// tools/clang/lib/Driver/Tools.cpp +static const struct { + const unsigned Fpu; + const uint64_t Enabled; + const uint64_t Disabled; +} Fpus[] = { + {ARM::VFP, ARM::FeatureVFP2, ARM::FeatureNEON}, + {ARM::VFPV2, ARM::FeatureVFP2, ARM::FeatureNEON}, + {ARM::VFPV3, ARM::FeatureVFP3, ARM::FeatureNEON}, + {ARM::VFPV3_D16, ARM::FeatureVFP3 | ARM::FeatureD16, ARM::FeatureNEON}, + {ARM::VFPV4, ARM::FeatureVFP4, ARM::FeatureNEON}, + {ARM::VFPV4_D16, ARM::FeatureVFP4 | ARM::FeatureD16, ARM::FeatureNEON}, + {ARM::FPV5_D16, ARM::FeatureFPARMv8 | ARM::FeatureD16, + ARM::FeatureNEON | ARM::FeatureCrypto}, + {ARM::FP_ARMV8, ARM::FeatureFPARMv8, + ARM::FeatureNEON | ARM::FeatureCrypto}, + {ARM::NEON, ARM::FeatureNEON, 0}, + {ARM::NEON_VFPV4, ARM::FeatureVFP4 | ARM::FeatureNEON, 0}, + {ARM::NEON_FP_ARMV8, ARM::FeatureFPARMv8 | ARM::FeatureNEON, + ARM::FeatureCrypto}, + {ARM::CRYPTO_NEON_FP_ARMV8, + ARM::FeatureFPARMv8 | ARM::FeatureNEON | ARM::FeatureCrypto, 0}, + {ARM::SOFTVFP, 0, 0}, +}; + /// parseDirectiveFPU /// ::= .fpu str bool ARMAsmParser::parseDirectiveFPU(SMLoc L) { @@ -8666,6 +9138,18 @@ bool ARMAsmParser::parseDirectiveFPU(SMLoc L) { return false; } + for (const auto &Fpu : Fpus) { + if (Fpu.Fpu != ID) + continue; + + // Need to toggle features that should be on but are off and that + // should off but are on. + uint64_t Toggle = (Fpu.Enabled & ~STI.getFeatureBits()) | + (Fpu.Disabled & STI.getFeatureBits()); + setAvailableFeatures(ComputeAvailableFeatures(STI.ToggleFeature(Toggle))); + break; + } + getTargetStreamer().emitFPU(ID); return false; } @@ -8732,6 +9216,7 @@ bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) { /// parseDirectivePersonality /// ::= .personality name bool ARMAsmParser::parseDirectivePersonality(SMLoc L) { + MCAsmParser &Parser = getParser(); bool HasExistingPersonality = UC.hasPersonality(); UC.recordPersonality(L); @@ -8795,6 +9280,7 @@ bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) { /// parseDirectiveSetFP /// ::= .setfp fpreg, spreg [, offset] bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) { + MCAsmParser &Parser = getParser(); // Check the ordering of unwind directives if (!UC.hasFnStart()) { Error(L, ".fnstart must precede .setfp directive"); @@ -8872,6 +9358,7 @@ bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) { /// parseDirective /// ::= .pad offset bool ARMAsmParser::parseDirectivePad(SMLoc L) { + MCAsmParser &Parser = getParser(); // Check the ordering of unwind directives if (!UC.hasFnStart()) { Error(L, ".fnstart must precede .pad directive"); @@ -8922,28 +9409,22 @@ bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) { } // RAII object to make sure parsed operands are deleted. - struct CleanupObject { - SmallVector Operands; - ~CleanupObject() { - for (unsigned I = 0, E = Operands.size(); I != E; ++I) - delete Operands[I]; - } - } CO; + SmallVector, 1> Operands; // Parse the register list - if (parseRegisterList(CO.Operands)) + if (parseRegisterList(Operands)) return false; - ARMOperand *Op = (ARMOperand*)CO.Operands[0]; - if (!IsVector && !Op->isRegList()) { + ARMOperand &Op = (ARMOperand &)*Operands[0]; + if (!IsVector && !Op.isRegList()) { Error(L, ".save expects GPR registers"); return false; } - if (IsVector && !Op->isDPRRegList()) { + if (IsVector && !Op.isDPRRegList()) { Error(L, ".vsave expects DPR registers"); return false; } - getTargetStreamer().emitRegSave(Op->getRegList(), IsVector); + getTargetStreamer().emitRegSave(Op.getRegList(), IsVector); return false; } @@ -8952,6 +9433,7 @@ bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) { /// ::= .inst.n opcode [, ...] /// ::= .inst.w opcode [, ...] bool ARMAsmParser::parseDirectiveInst(SMLoc Loc, char Suffix) { + MCAsmParser &Parser = getParser(); int Width; if (isThumb()) { @@ -9048,7 +9530,7 @@ bool ARMAsmParser::parseDirectiveEven(SMLoc L) { } if (!Section) { - getStreamer().InitSections(); + getStreamer().InitSections(false); Section = getStreamer().getCurrentSection().first; } @@ -9064,6 +9546,7 @@ bool ARMAsmParser::parseDirectiveEven(SMLoc L) { /// parseDirectivePersonalityIndex /// ::= .personalityindex index bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) { + MCAsmParser &Parser = getParser(); bool HasExistingPersonality = UC.hasPersonality(); UC.recordPersonalityIndex(L); @@ -9119,6 +9602,7 @@ bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) { /// parseDirectiveUnwindRaw /// ::= .unwind_raw offset, opcode [, opcode...] bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) { + MCAsmParser &Parser = getParser(); if (!UC.hasFnStart()) { Parser.eatToEndOfStatement(); Error(L, ".fnstart must precede .unwind_raw directives"); @@ -9200,6 +9684,8 @@ bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) { /// parseDirectiveTLSDescSeq /// ::= .tlsdescseq tls-variable bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) { + MCAsmParser &Parser = getParser(); + if (getLexer().isNot(AsmToken::Identifier)) { TokError("expected variable after '.tlsdescseq' directive"); Parser.eatToEndOfStatement(); @@ -9224,6 +9710,7 @@ bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) { /// parseDirectiveMovSP /// ::= .movsp reg [, #offset] bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) { + MCAsmParser &Parser = getParser(); if (!UC.hasFnStart()) { Parser.eatToEndOfStatement(); Error(L, ".fnstart must precede .movsp directives"); @@ -9287,6 +9774,7 @@ bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) { /// parseDirectiveObjectArch /// ::= .object_arch name bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) { + MCAsmParser &Parser = getParser(); if (getLexer().isNot(AsmToken::Identifier)) { Error(getLexer().getLoc(), "unexpected token"); Parser.eatToEndOfStatement(); @@ -9343,6 +9831,8 @@ bool ARMAsmParser::parseDirectiveAlign(SMLoc L) { /// parseDirectiveThumbSet /// ::= .thumb_set name, value bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) { + MCAsmParser &Parser = getParser(); + StringRef Name; if (Parser.parseIdentifier(Name)) { TokError("expected identifier after '.thumb_set'"); @@ -9372,36 +9862,7 @@ bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) { Lex(); MCSymbol *Alias = getContext().GetOrCreateSymbol(Name); - if (const MCSymbolRefExpr *SRE = dyn_cast(Value)) { - MCSymbol *Sym = getContext().LookupSymbol(SRE->getSymbol().getName()); - if (!Sym->isDefined()) { - getStreamer().EmitSymbolAttribute(Sym, MCSA_Global); - getStreamer().EmitAssignment(Alias, Value); - return false; - } - - const MCObjectFileInfo::Environment Format = - getContext().getObjectFileInfo()->getObjectFileType(); - switch (Format) { - case MCObjectFileInfo::IsCOFF: { - char Type = COFF::IMAGE_SYM_DTYPE_FUNCTION << COFF::SCT_COMPLEX_TYPE_SHIFT; - getStreamer().EmitCOFFSymbolType(Type); - // .set values are always local in COFF - getStreamer().EmitSymbolAttribute(Alias, MCSA_Local); - break; - } - case MCObjectFileInfo::IsELF: - getStreamer().EmitSymbolAttribute(Alias, MCSA_ELF_TypeFunction); - break; - case MCObjectFileInfo::IsMachO: - break; - } - } - - // FIXME: set the function as being a thumb function via the assembler - getStreamer().EmitThumbFunc(Alias); - getStreamer().EmitAssignment(Alias, Value); - + getTargetStreamer().emitThumbSet(Alias, Value); return false; } @@ -9418,8 +9879,8 @@ extern "C" void LLVMInitializeARMAsmParser() { #define GET_MATCHER_IMPLEMENTATION #include "ARMGenAsmMatcher.inc" -static const struct ExtMapEntry { - const char *Extension; +static const struct { + const char *Name; const unsigned ArchCheck; const uint64_t Features; } Extensions[] = { @@ -9450,79 +9911,81 @@ static const struct ExtMapEntry { /// parseDirectiveArchExtension /// ::= .arch_extension [no]feature bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) { + MCAsmParser &Parser = getParser(); + if (getLexer().isNot(AsmToken::Identifier)) { Error(getLexer().getLoc(), "unexpected token"); Parser.eatToEndOfStatement(); return false; } - StringRef Extension = Parser.getTok().getString(); + StringRef Name = Parser.getTok().getString(); SMLoc ExtLoc = Parser.getTok().getLoc(); getLexer().Lex(); bool EnableFeature = true; - if (Extension.startswith_lower("no")) { + if (Name.startswith_lower("no")) { EnableFeature = false; - Extension = Extension.substr(2); + Name = Name.substr(2); } - for (unsigned EI = 0, EE = array_lengthof(Extensions); EI != EE; ++EI) { - if (Extensions[EI].Extension != Extension) + for (const auto &Extension : Extensions) { + if (Extension.Name != Name) continue; - unsigned FB = getAvailableFeatures(); - if ((FB & Extensions[EI].ArchCheck) != Extensions[EI].ArchCheck) { - Error(ExtLoc, "architectural extension '" + Extension + "' is not " + if (!Extension.Features) + report_fatal_error("unsupported architectural extension: " + Name); + + if ((getAvailableFeatures() & Extension.ArchCheck) != Extension.ArchCheck) { + Error(ExtLoc, "architectural extension '" + Name + "' is not " "allowed for the current base architecture"); return false; } - if (!Extensions[EI].Features) - report_fatal_error("unsupported architectural extension: " + Extension); - - if (EnableFeature) - FB |= ComputeAvailableFeatures(Extensions[EI].Features); - else - FB &= ~ComputeAvailableFeatures(Extensions[EI].Features); - - setAvailableFeatures(FB); + uint64_t ToggleFeatures = EnableFeature + ? (~STI.getFeatureBits() & Extension.Features) + : ( STI.getFeatureBits() & Extension.Features); + uint64_t Features = + ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures)); + setAvailableFeatures(Features); return false; } - Error(ExtLoc, "unknown architectural extension: " + Extension); + Error(ExtLoc, "unknown architectural extension: " + Name); Parser.eatToEndOfStatement(); return false; } // Define this matcher function after the auto-generated include so we // have the match class enum definitions. -unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand *AsmOp, +unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp, unsigned Kind) { - ARMOperand *Op = static_cast(AsmOp); + ARMOperand &Op = static_cast(AsmOp); // If the kind is a token for a literal immediate, check if our asm // operand matches. This is for InstAliases which have a fixed-value // immediate in the syntax. switch (Kind) { default: break; case MCK__35_0: - if (Op->isImm()) - if (const MCConstantExpr *CE = dyn_cast(Op->getImm())) + if (Op.isImm()) + if (const MCConstantExpr *CE = dyn_cast(Op.getImm())) if (CE->getValue() == 0) return Match_Success; break; + case MCK_ModImm: case MCK_ARMSOImm: - if (Op->isImm()) { - const MCExpr *SOExpr = Op->getImm(); + if (Op.isImm()) { + const MCExpr *SOExpr = Op.getImm(); int64_t Value; if (!SOExpr->EvaluateAsAbsolute(Value)) return Match_Success; - assert((Value >= INT32_MIN && Value <= INT32_MAX) && - "expression value must be representiable in 32 bits"); + assert((Value >= INT32_MIN && Value <= UINT32_MAX) && + "expression value must be representable in 32 bits"); } break; case MCK_GPRPair: - if (Op->isReg() && - MRI->getRegClass(ARM::GPRRegClassID).contains(Op->getReg())) + if (Op.isReg() && + MRI->getRegClass(ARM::GPRRegClassID).contains(Op.getReg())) return Match_Success; break; }