MCAsmParser &getParser() const { return Parser; }
MCAsmLexer &getLexer() const { return Parser.getLexer(); }
- void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
- bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
+ bool Warning(SMLoc L, const Twine &Msg,
+ ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) {
+ return Parser.Warning(L, Msg, Ranges);
+ }
+ bool Error(SMLoc L, const Twine &Msg,
+ ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) {
+ return Parser.Error(L, Msg, Ranges);
+ }
int tryParseRegister();
bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
// Asm Match Converter Methods
- bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
- const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
- const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
+ void cvtT2LdrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
+ void cvtT2StrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
+ void cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
+ void cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
+ void cvtLdWriteBackRegAddrMode2(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
+ void cvtLdWriteBackRegAddrModeImm12(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
+ void cvtStWriteBackRegAddrModeImm12(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
+ void cvtStWriteBackRegAddrMode2(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
+ void cvtStWriteBackRegAddrMode3(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
+ void cvtLdExtTWriteBackImm(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
+ void cvtLdExtTWriteBackReg(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
+ void cvtStExtTWriteBackImm(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
+ void cvtStExtTWriteBackReg(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
- const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
- const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
+ void cvtLdrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
+ void cvtStrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
+ void cvtLdWriteBackRegAddrMode3(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
+ void cvtThumbMultiply(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
+ void cvtVLDwbFixed(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
+ void cvtVLDwbRegister(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
+ void cvtVSTwbFixed(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
- bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
+ void cvtVSTwbRegister(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &);
-
bool validateInstruction(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
bool processInstruction(MCInst &Inst,
Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
Match_RequiresNotITBlock,
Match_RequiresV6,
- Match_RequiresThumb2
+ Match_RequiresThumb2,
+#define GET_OPERAND_DIAGNOSTIC_TYPES
+#include "ARMGenAsmMatcher.inc"
+
};
ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
SMLoc getStartLoc() const { return StartLoc; }
/// getEndLoc - Get the location of the last token of this operand.
SMLoc getEndLoc() const { return EndLoc; }
+ /// getLocRange - Get the range between the first and last token of this
+ /// operand.
+ SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
ARMCC::CondCodes getCondCode() const {
assert(Kind == k_CondCode && "Invalid access!");
int64_t Value = CE->getValue();
return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
}
+ bool isImm0_508s4Neg() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = -CE->getValue();
+ // explicitly exclude zero. we want that to use the normal 0_508 version.
+ return ((Value & 3) == 0) && Value > 0 && Value <= 508;
+ }
bool isImm0_255() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
int64_t Value = CE->getValue();
return Value >= 0 && Value < 256;
}
+ bool isImm0_4095() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ return Value >= 0 && Value < 4096;
+ }
+ bool isImm0_4095Neg() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = -CE->getValue();
+ return Value > 0 && Value < 4096;
+ }
bool isImm0_1() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
int64_t Value = CE->getValue();
return Value > 0 && Value <= 32;
}
+ bool isAdrLabel() const {
+ // If we have an immediate that's not a constant, treat it as a label
+ // reference needing a fixup. If it is a constant, but it can't fit
+ // into shift immediate encoding, we reject it.
+ if (isImm() && !isa<MCConstantExpr>(getImm())) return true;
+ else return (isARMSOImm() || isARMSOImmNeg());
+ }
bool isARMSOImm() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
- // Negation must be representable as an so_imm and be non-zero.
- return Value && ARM_AM::getSOImmVal(-Value) != -1;
+ // Only use this when not representable as a plain so_imm.
+ return ARM_AM::getSOImmVal(Value) == -1 &&
+ ARM_AM::getSOImmVal(-Value) != -1;
}
bool isT2SOImm() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
- // Negation must be representable as a t2_so_imm and be non-zero.
- return Value && ARM_AM::getT2SOImmVal(-Value) != -1;
+ // Only use this when not representable as a plain so_imm.
+ return ARM_AM::getT2SOImmVal(Value) == -1 &&
+ ARM_AM::getT2SOImmVal(-Value) != -1;
}
bool isSetEndImm() const {
if (!isImm()) return false;
bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
bool isToken() const { return Kind == k_Token; }
bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
- bool isMemory() const { return Kind == k_Memory; }
+ bool isMem() const { return Kind == k_Memory; }
bool isShifterImm() const { return Kind == k_ShifterImmediate; }
bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
}
bool isMemNoOffset(bool alignOK = false) const {
- if (!isMemory())
+ if (!isMem())
return false;
// No offset of any kind.
return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
(alignOK || Memory.Alignment == 0);
}
bool isMemPCRelImm12() const {
- if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
+ if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Base register must be PC.
if (Memory.BaseRegNum != ARM::PC)
return isMemNoOffset(true);
}
bool isAddrMode2() const {
- if (!isMemory() || Memory.Alignment != 0) return false;
+ if (!isMem() || Memory.Alignment != 0) return false;
// Check for register offset.
if (Memory.OffsetRegNum) return true;
// Immediate offset in range [-4095, 4095].
// and we reject it.
if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
- if (!isMemory() || Memory.Alignment != 0) return false;
+ if (!isMem() || Memory.Alignment != 0) return false;
// No shifts are legal for AM3.
if (Memory.ShiftType != ARM_AM::no_shift) return false;
// Check for register offset.
// Immediate offset in range [-255, 255].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
- return Val > -256 && Val < 256;
+ // The #-0 offset is encoded as INT32_MIN, and we have to check
+ // for this too.
+ return (Val > -256 && Val < 256) || Val == INT32_MIN;
}
bool isAM3Offset() const {
if (Kind != k_Immediate && Kind != k_PostIndexRegister)
// and we reject it.
if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
- if (!isMemory() || Memory.Alignment != 0) return false;
+ if (!isMem() || Memory.Alignment != 0) return false;
// Check for register offset.
if (Memory.OffsetRegNum) return false;
// Immediate offset in range [-1020, 1020] and a multiple of 4.
Val == INT32_MIN;
}
bool isMemTBB() const {
- if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
+ if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
return false;
return true;
}
bool isMemTBH() const {
- if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
+ if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
Memory.Alignment != 0 )
return false;
return true;
}
bool isMemRegOffset() const {
- if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
+ if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
return false;
return true;
}
bool isT2MemRegOffset() const {
- if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
+ if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
Memory.Alignment != 0)
return false;
// Only lsl #{0, 1, 2, 3} allowed.
bool isMemThumbRR() const {
// Thumb reg+reg addressing is simple. Just two registers, a base and
// an offset. No shifts, negations or any other complicating factors.
- if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
+ if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
return false;
return isARMLowRegister(Memory.BaseRegNum) &&
(!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
}
bool isMemThumbRIs4() const {
- if (!isMemory() || Memory.OffsetRegNum != 0 ||
+ if (!isMem() || Memory.OffsetRegNum != 0 ||
!isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
return false;
// Immediate offset, multiple of 4 in range [0, 124].
return Val >= 0 && Val <= 124 && (Val % 4) == 0;
}
bool isMemThumbRIs2() const {
- if (!isMemory() || Memory.OffsetRegNum != 0 ||
+ if (!isMem() || Memory.OffsetRegNum != 0 ||
!isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
return false;
// Immediate offset, multiple of 4 in range [0, 62].
return Val >= 0 && Val <= 62 && (Val % 2) == 0;
}
bool isMemThumbRIs1() const {
- if (!isMemory() || Memory.OffsetRegNum != 0 ||
+ if (!isMem() || Memory.OffsetRegNum != 0 ||
!isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
return false;
// Immediate offset in range [0, 31].
return Val >= 0 && Val <= 31;
}
bool isMemThumbSPI() const {
- if (!isMemory() || Memory.OffsetRegNum != 0 ||
+ if (!isMem() || Memory.OffsetRegNum != 0 ||
Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
return false;
// Immediate offset, multiple of 4 in range [0, 1020].
// and we reject it.
if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
- if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
+ if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset a multiple of 4 in range [-1020, 1020].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
- return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
+ // Special case, #-0 is INT32_MIN.
+ return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) || Val == INT32_MIN;
}
bool isMemImm0_1020s4Offset() const {
- if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
+ if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset a multiple of 4 in range [0, 1020].
if (!Memory.OffsetImm) return true;
return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
}
bool isMemImm8Offset() const {
- if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
+ if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Base reg of PC isn't allowed for these encodings.
if (Memory.BaseRegNum == ARM::PC) return false;
return (Val == INT32_MIN) || (Val > -256 && Val < 256);
}
bool isMemPosImm8Offset() const {
- if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
+ if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset in range [0, 255].
if (!Memory.OffsetImm) return true;
return Val >= 0 && Val < 256;
}
bool isMemNegImm8Offset() const {
- if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
+ if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Base reg of PC isn't allowed for these encodings.
if (Memory.BaseRegNum == ARM::PC) return false;
return (Val == INT32_MIN) || (Val > -256 && Val < 0);
}
bool isMemUImm12Offset() const {
- if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
+ if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset in range [0, 4095].
if (!Memory.OffsetImm) return true;
if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
- if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
+ if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset in range [-4095, 4095].
if (!Memory.OffsetImm) return true;
assert(isRegShiftedImm() &&
"addRegShiftedImmOperands() on non RegShiftedImm!");
Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
+ // Shift of #32 is encoded as 0 where permitted
+ unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
Inst.addOperand(MCOperand::CreateImm(
- ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
+ ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
}
void addShifterImmOperands(MCInst &Inst, unsigned N) const {
Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
}
+ void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ // The immediate is scaled by four in the encoding and is stored
+ // in the MCInst as such. Lop off the low two bits here.
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::CreateImm(-(CE->getValue() / 4)));
+ }
+
void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The immediate is scaled by four in the encoding and is stored
Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
}
+ void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ // The operand is actually an imm0_4095, but we have its
+ // negation in the assembly source, so twiddle it here.
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
+ }
+
void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The operand is actually a so_imm, but we have its bitwise
Inst.addOperand(MCOperand::CreateImm(Imm));
}
+ void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ assert(isImm() && "Not an immediate!");
+
+ // If we have an immediate that's not a constant, treat it as a label
+ // reference needing a fixup.
+ if (!isa<MCConstantExpr>(getImm())) {
+ Inst.addOperand(MCOperand::CreateExpr(getImm()));
+ return;
+ }
+
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ int Val = CE->getValue();
+ Inst.addOperand(MCOperand::CreateImm(Val));
+ }
+
void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
OS << "<ccout " << getReg() << ">";
break;
case k_ITCondMask: {
- static const char *MaskStr[] = {
+ static const char *const MaskStr[] = {
"()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
"(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
};
const AsmToken &Tok = Parser.getTok();
if (!Tok.is(AsmToken::Identifier))
return MatchOperand_NoMatch;
- unsigned CC = StringSwitch<unsigned>(Tok.getString())
+ unsigned CC = StringSwitch<unsigned>(Tok.getString().lower())
.Case("eq", ARMCC::EQ)
.Case("ne", ARMCC::NE)
.Case("hs", ARMCC::HS)
if (!RC->contains(EndReg))
return Error(EndLoc, "invalid register in register list");
// Ranges must go from low to high.
- if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
+ if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
return Error(EndLoc, "bad range in register list");
// Add all the registers in the range to the register list.
if (!RC->contains(Reg))
return Error(RegLoc, "invalid register in register list");
// List must be monotonically increasing.
- if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg)) {
+ if (MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
Warning(RegLoc, "register list not in ascending order");
else
return Error(RegLoc, "register list not in ascending order");
}
- if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
+ if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) {
Warning(RegLoc, "duplicated register (" + RegTok.getString() +
") in register list");
continue;
parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
SMLoc S = Parser.getTok().getLoc();
const AsmToken &Tok = Parser.getTok();
- assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
- StringRef OptStr = Tok.getString();
-
- unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
- .Case("sy", ARM_MB::SY)
- .Case("st", ARM_MB::ST)
- .Case("sh", ARM_MB::ISH)
- .Case("ish", ARM_MB::ISH)
- .Case("shst", ARM_MB::ISHST)
- .Case("ishst", ARM_MB::ISHST)
- .Case("nsh", ARM_MB::NSH)
- .Case("un", ARM_MB::NSH)
- .Case("nshst", ARM_MB::NSHST)
- .Case("unst", ARM_MB::NSHST)
- .Case("osh", ARM_MB::OSH)
- .Case("oshst", ARM_MB::OSHST)
- .Default(~0U);
+ unsigned Opt;
+
+ if (Tok.is(AsmToken::Identifier)) {
+ StringRef OptStr = Tok.getString();
+
+ Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
+ .Case("sy", ARM_MB::SY)
+ .Case("st", ARM_MB::ST)
+ .Case("sh", ARM_MB::ISH)
+ .Case("ish", ARM_MB::ISH)
+ .Case("shst", ARM_MB::ISHST)
+ .Case("ishst", ARM_MB::ISHST)
+ .Case("nsh", ARM_MB::NSH)
+ .Case("un", ARM_MB::NSH)
+ .Case("nshst", ARM_MB::NSHST)
+ .Case("unst", ARM_MB::NSHST)
+ .Case("osh", ARM_MB::OSH)
+ .Case("oshst", ARM_MB::OSHST)
+ .Default(~0U);
- if (Opt == ~0U)
- return MatchOperand_NoMatch;
+ if (Opt == ~0U)
+ return MatchOperand_NoMatch;
+
+ Parser.Lex(); // Eat identifier token.
+ } else if (Tok.is(AsmToken::Hash) ||
+ Tok.is(AsmToken::Dollar) ||
+ Tok.is(AsmToken::Integer)) {
+ if (Parser.getTok().isNot(AsmToken::Integer))
+ Parser.Lex(); // Eat the '#'.
+ SMLoc Loc = Parser.getTok().getLoc();
+
+ const MCExpr *MemBarrierID;
+ if (getParser().ParseExpression(MemBarrierID)) {
+ Error(Loc, "illegal expression");
+ return MatchOperand_ParseFail;
+ }
+
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
+ if (!CE) {
+ Error(Loc, "constant expression expected");
+ return MatchOperand_ParseFail;
+ }
+
+ int Val = CE->getValue();
+ if (Val & ~0xf) {
+ Error(Loc, "immediate value out of range");
+ return MatchOperand_ParseFail;
+ }
+
+ Opt = ARM_MB::RESERVED_0 + Val;
+ } else
+ return MatchOperand_ParseFail;
- Parser.Lex(); // Eat identifier token.
Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
return MatchOperand_Success;
}
parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
SMLoc S = Parser.getTok().getLoc();
const AsmToken &Tok = Parser.getTok();
- assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
+ if (!Tok.is(AsmToken::Identifier))
+ return MatchOperand_NoMatch;
StringRef IFlagsStr = Tok.getString();
// An iflags string of "none" is interpreted to mean that none of the AIF
// See ARMv6-M 10.1.1
std::string Name = Mask.lower();
unsigned FlagsVal = StringSwitch<unsigned>(Name)
- .Case("apsr", 0)
- .Case("iapsr", 1)
- .Case("eapsr", 2)
- .Case("xpsr", 3)
- .Case("ipsr", 5)
- .Case("epsr", 6)
- .Case("iepsr", 7)
- .Case("msp", 8)
- .Case("psp", 9)
- .Case("primask", 16)
- .Case("basepri", 17)
- .Case("basepri_max", 18)
- .Case("faultmask", 19)
- .Case("control", 20)
+ // Note: in the documentation:
+ // ARM deprecates using MSR APSR without a _<bits> qualifier as an alias
+ // for MSR APSR_nzcvq.
+ // but we do make it an alias here. This is so to get the "mask encoding"
+ // bits correct on MSR APSR writes.
+ //
+ // FIXME: Note the 0xc00 "mask encoding" bits version of the registers
+ // should really only be allowed when writing a special register. Note
+ // they get dropped in the MRS instruction reading a special register as
+ // the SYSm field is only 8 bits.
+ //
+ // FIXME: the _g and _nzcvqg versions are only allowed if the processor
+ // includes the DSP extension but that is not checked.
+ .Case("apsr", 0x800)
+ .Case("apsr_nzcvq", 0x800)
+ .Case("apsr_g", 0x400)
+ .Case("apsr_nzcvqg", 0xc00)
+ .Case("iapsr", 0x801)
+ .Case("iapsr_nzcvq", 0x801)
+ .Case("iapsr_g", 0x401)
+ .Case("iapsr_nzcvqg", 0xc01)
+ .Case("eapsr", 0x802)
+ .Case("eapsr_nzcvq", 0x802)
+ .Case("eapsr_g", 0x402)
+ .Case("eapsr_nzcvqg", 0xc02)
+ .Case("xpsr", 0x803)
+ .Case("xpsr_nzcvq", 0x803)
+ .Case("xpsr_g", 0x403)
+ .Case("xpsr_nzcvqg", 0xc03)
+ .Case("ipsr", 0x805)
+ .Case("epsr", 0x806)
+ .Case("iepsr", 0x807)
+ .Case("msp", 0x808)
+ .Case("psp", 0x809)
+ .Case("primask", 0x810)
+ .Case("basepri", 0x811)
+ .Case("basepri_max", 0x812)
+ .Case("faultmask", 0x813)
+ .Case("control", 0x814)
.Default(~0U);
if (FlagsVal == ~0U)
return MatchOperand_NoMatch;
- if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
+ if (!hasV7Ops() && FlagsVal >= 0x811 && FlagsVal <= 0x813)
// basepri, basepri_max and faultmask only valid for V7m.
return MatchOperand_NoMatch;
FlagsVal = 8; // No flag
}
} else if (SpecReg == "cpsr" || SpecReg == "spsr") {
- if (Flags == "all") // cpsr_all is an alias for cpsr_fc
+ // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
+ if (Flags == "all" || Flags == "")
Flags = "fc";
for (int i = 0, e = Flags.size(); i != e; ++i) {
unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
/// cvtT2LdrdPre - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtT2LdrdPre(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Rt, Rt2
((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
// pred
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtT2StrdPre - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtT2StrdPre(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Create a writeback register dummy placeholder.
Inst.addOperand(MCOperand::CreateReg(0));
((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
// pred
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Create a writeback register dummy placeholder.
Inst.addOperand(MCOperand::CreateImm(0));
((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtLdWriteBackRegAddrMode2(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtLdWriteBackRegAddrModeImm12(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtStWriteBackRegAddrModeImm12(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Create a writeback register dummy placeholder.
Inst.addOperand(MCOperand::CreateImm(0));
((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtStWriteBackRegAddrMode2(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Create a writeback register dummy placeholder.
Inst.addOperand(MCOperand::CreateImm(0));
((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtStWriteBackRegAddrMode3(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Create a writeback register dummy placeholder.
Inst.addOperand(MCOperand::CreateImm(0));
((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtLdExtTWriteBackImm(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Rt
((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
// pred
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtLdExtTWriteBackReg(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Rt
((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
// pred
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtStExtTWriteBackImm(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Create a writeback register dummy placeholder.
Inst.addOperand(MCOperand::CreateImm(0));
((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
// pred
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtStExtTWriteBackReg(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Create a writeback register dummy placeholder.
Inst.addOperand(MCOperand::CreateImm(0));
((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
// pred
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtLdrdPre - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtLdrdPre(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtLdrdPre(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Rt, Rt2
((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
// pred
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtStrdPre - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtStrdPre(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtStrdPre(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Create a writeback register dummy placeholder.
Inst.addOperand(MCOperand::CreateImm(0));
((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
// pred
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtLdWriteBackRegAddrMode3(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
// Create a writeback register dummy placeholder.
Inst.addOperand(MCOperand::CreateImm(0));
((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
-/// cvtThumbMultiple- Convert parsed operands to MCInst.
+/// cvtThumbMultiply - Convert parsed operands to MCInst.
/// Needed here because the Asm Gen Matcher can't handle properly tied operands
/// when they refer multiple MIOperands inside a single one.
-bool ARMAsmParser::
-cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtThumbMultiply(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- // The second source operand must be the same register as the destination
- // operand.
- if (Operands.size() == 6 &&
- (((ARMOperand*)Operands[3])->getReg() !=
- ((ARMOperand*)Operands[5])->getReg()) &&
- (((ARMOperand*)Operands[3])->getReg() !=
- ((ARMOperand*)Operands[4])->getReg())) {
- Error(Operands[3]->getStartLoc(),
- "destination register must match source register");
- return false;
- }
((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
// If we have a three-operand form, make sure to set Rn to be the operand
((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
Inst.addOperand(Inst.getOperand(0));
((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
-
- return true;
}
-bool ARMAsmParser::
-cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtVLDwbFixed(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Vd
((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
// pred
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
-bool ARMAsmParser::
-cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtVLDwbRegister(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Vd
((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
// pred
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
-bool ARMAsmParser::
-cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtVSTwbFixed(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Create a writeback register dummy placeholder.
Inst.addOperand(MCOperand::CreateImm(0));
((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
// pred
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
-bool ARMAsmParser::
-cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
+void ARMAsmParser::
+cvtVSTwbRegister(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Create a writeback register dummy placeholder.
Inst.addOperand(MCOperand::CreateImm(0));
((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
// pred
((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
- return true;
}
/// Parse an ARM memory expression, return false if successful else return true
((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
return Error(Loc, "immediate shift value out of range");
+ // If <ShiftTy> #0, turn it into a no_shift.
+ if (Imm == 0)
+ St = ARM_AM::lsl;
+ // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
+ if (Imm == 32)
+ Imm = 0;
Amount = Imm;
}
case AsmToken::Dollar:
case AsmToken::Hash: {
// #42 -> immediate.
- // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
S = Parser.getTok().getLoc();
Parser.Lex();
- bool isNegative = Parser.getTok().is(AsmToken::Minus);
- const MCExpr *ImmVal;
- if (getParser().ParseExpression(ImmVal))
- return true;
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
- if (CE) {
- int32_t Val = CE->getValue();
- if (isNegative && Val == 0)
- ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
+
+ if (Parser.getTok().isNot(AsmToken::Colon)) {
+ bool isNegative = Parser.getTok().is(AsmToken::Minus);
+ const MCExpr *ImmVal;
+ if (getParser().ParseExpression(ImmVal))
+ return true;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
+ if (CE) {
+ int32_t Val = CE->getValue();
+ if (isNegative && Val == 0)
+ ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
+ }
+ E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
+ Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
+ return false;
}
- E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
- Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
- return false;
+ // w/ a ':' after the '#', it's just like a plain ':'.
+ // FALLTHROUGH
}
case AsmToken::Colon: {
// ":lower16:" and ":upper16:" expression prefixes
return true;
const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
- getContext());
+ getContext());
E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
return false;
Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
+ Mnemonic == "vfms" || Mnemonic == "vfnms" ||
(Mnemonic == "movs" && isThumb()))) {
Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
CarrySetting = true;
Mnemonic == "orr" || Mnemonic == "mvn" ||
Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
+ Mnemonic == "vfm" || Mnemonic == "vfnm" ||
(!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
Mnemonic == "mla" || Mnemonic == "smlal" ||
Mnemonic == "umlal" || Mnemonic == "umull"))) {
static_cast<ARMOperand*>(Operands[4])->isReg() &&
static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
- (static_cast<ARMOperand*>(Operands[5])->isReg() ||
+ ((Mnemonic == "add" &&static_cast<ARMOperand*>(Operands[5])->isReg()) ||
static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
return true;
// For Thumb2, add/sub immediate does not have a cc_out operand for the
(Operands.size() == 5 || Operands.size() == 6) &&
static_cast<ARMOperand*>(Operands[3])->isReg() &&
static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
- static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
+ static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
+ (static_cast<ARMOperand*>(Operands[4])->isImm() ||
+ (Operands.size() == 6 &&
+ static_cast<ARMOperand*>(Operands[5])->isImm())))
return true;
return false;
case ARM::LDRD_POST:
case ARM::LDREXD: {
// Rt2 must be Rt + 1.
- unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
- unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
+ unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
+ unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
if (Rt2 != Rt + 1)
return Error(Operands[3]->getStartLoc(),
"destination operands must be sequential");
}
case ARM::STRD: {
// Rt2 must be Rt + 1.
- unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
- unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
+ unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
+ unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
if (Rt2 != Rt + 1)
return Error(Operands[3]->getStartLoc(),
"source operands must be sequential");
case ARM::STRD_POST:
case ARM::STREXD: {
// Rt2 must be Rt + 1.
- unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
- unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
+ unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
+ unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(2).getReg());
if (Rt2 != Rt + 1)
return Error(Operands[3]->getStartLoc(),
"source operands must be sequential");
"in register list");
break;
}
+ case ARM::tMUL: {
+ // The second source operand must be the same register as the destination
+ // operand.
+ //
+ // In this case, we must directly check the parsed operands because the
+ // cvtThumbMultiply() function is written in such a way that it guarantees
+ // this first statement is always true for the new Inst. Essentially, the
+ // destination is unconditionally copied into the second source operand
+ // without checking to see if it matches what we actually parsed.
+ if (Operands.size() == 6 &&
+ (((ARMOperand*)Operands[3])->getReg() !=
+ ((ARMOperand*)Operands[5])->getReg()) &&
+ (((ARMOperand*)Operands[3])->getReg() !=
+ ((ARMOperand*)Operands[4])->getReg())) {
+ return Error(Operands[3]->getStartLoc(),
+ "destination register must match source register");
+ }
+ break;
+ }
// Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
// so only issue a diagnostic for thumb1. The instructions will be
// switched to the t2 encodings in processInstruction() if necessary.
"registers must be in range r0-r7");
break;
}
+ case ARM::tADDrSP: {
+ // If the non-SP source operand and the destination operand are not the
+ // same, we need thumb2 (for the wide encoding), or we have an error.
+ if (!isThumbTwo() &&
+ Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
+ return Error(Operands[4]->getStartLoc(),
+ "source register must be the same as destination");
+ }
+ break;
+ }
}
return false;
return true;
}
+ // Handle encoding choice for the shift-immediate instructions.
+ case ARM::t2LSLri:
+ case ARM::t2LSRri:
+ case ARM::t2ASRri: {
+ if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
+ Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
+ Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
+ !(static_cast<ARMOperand*>(Operands[3])->isToken() &&
+ static_cast<ARMOperand*>(Operands[3])->getToken() == ".w")) {
+ unsigned NewOpc;
+ switch (Inst.getOpcode()) {
+ default: llvm_unreachable("unexpected opcode");
+ case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
+ case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
+ case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
+ }
+ // The Thumb1 operands aren't in the same order. Awesome, eh?
+ MCInst TmpInst;
+ TmpInst.setOpcode(NewOpc);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(5));
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(Inst.getOperand(2));
+ TmpInst.addOperand(Inst.getOperand(3));
+ TmpInst.addOperand(Inst.getOperand(4));
+ Inst = TmpInst;
+ return true;
+ }
+ return false;
+ }
+
// Handle the Thumb2 mode MOV complex aliases.
case ARM::t2MOVsr:
case ARM::t2MOVSsr: {
case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
}
- unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
- if (Ammount == 32) Ammount = 0;
+ unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
+ if (Amount == 32) Amount = 0;
TmpInst.setOpcode(newOpc);
TmpInst.addOperand(Inst.getOperand(0)); // Rd
if (isNarrow)
Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
TmpInst.addOperand(Inst.getOperand(1)); // Rn
if (newOpc != ARM::t2RRX)
- TmpInst.addOperand(MCOperand::CreateImm(Ammount));
+ TmpInst.addOperand(MCOperand::CreateImm(Amount));
TmpInst.addOperand(Inst.getOperand(3)); // CondCode
TmpInst.addOperand(Inst.getOperand(4));
if (!isNarrow)
// A shift by zero is a plain MOVr, not a MOVsi.
unsigned Amt = Inst.getOperand(2).getImm();
unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
+ // A shift by 32 should be encoded as 0 when permitted
+ if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
+ Amt = 0;
unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
MCInst TmpInst;
TmpInst.setOpcode(Opc);
// explicitly specified. From the ARM ARM: "Encoding T1 is preferred
// to encoding T2 if <Rd> is specified and encoding T2 is preferred
// to encoding T1 if <Rd> is omitted."
- if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
+ if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
Inst.setOpcode(ARM::tADDi3);
return true;
}
// explicitly specified. From the ARM ARM: "Encoding T1 is preferred
// to encoding T2 if <Rd> is specified and encoding T2 is preferred
// to encoding T1 if <Rd> is omitted."
- if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
+ if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
Inst.setOpcode(ARM::tSUBi3);
return true;
}
break;
+ case ARM::t2ADDri:
+ case ARM::t2SUBri: {
+ // If the destination and first source operand are the same, and
+ // the flags are compatible with the current IT status, use encoding T2
+ // instead of T3. For compatibility with the system 'as'. Make sure the
+ // wide encoding wasn't explicit.
+ if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
+ !isARMLowRegister(Inst.getOperand(0).getReg()) ||
+ (unsigned)Inst.getOperand(2).getImm() > 255 ||
+ ((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) ||
+ (inITBlock() && Inst.getOperand(5).getReg() != 0)) ||
+ (static_cast<ARMOperand*>(Operands[3])->isToken() &&
+ static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
+ break;
+ MCInst TmpInst;
+ TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
+ ARM::tADDi8 : ARM::tSUBi8);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(5));
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(2));
+ TmpInst.addOperand(Inst.getOperand(3));
+ TmpInst.addOperand(Inst.getOperand(4));
+ Inst = TmpInst;
+ return true;
+ }
case ARM::t2ADDrr: {
// If the destination and first source operand are the same, and
// there's no setting of the flags, use encoding T2 instead of T3.
Inst = TmpInst;
return true;
}
+ case ARM::tADDrSP: {
+ // If the non-SP source operand and the destination operand are not the
+ // same, we need to use the 32-bit encoding if it's available.
+ if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
+ Inst.setOpcode(ARM::t2ADDrr);
+ Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
+ return true;
+ }
+ break;
+ }
case ARM::tB:
// A Thumb conditional branch outside of an IT block is a tBcc.
if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
// If we can use the 16-bit encoding and the user didn't explicitly
// request the 32-bit variant, transform it here.
if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
- Inst.getOperand(1).getImm() <= 255 &&
+ (unsigned)Inst.getOperand(1).getImm() <= 255 &&
((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
Inst.getOperand(4).getReg() == ARM::CPSR) ||
(inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
}
case ARM::MOVsi: {
ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
- if (SOpc == ARM_AM::rrx) return false;
+ // rrx shifts and asr/lsr of #32 is encoded as 0
+ if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
+ return false;
if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
// Shifting by zero is accepted as a vanilla 'MOVr'
MCInst TmpInst;
case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
}
// If the shift is by zero, use the non-shifted instruction definition.
- if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0) {
+ // The exception is for right shifts, where 0 == 32
+ if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
+ !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
MCInst TmpInst;
TmpInst.setOpcode(newOpc);
TmpInst.addOperand(Inst.getOperand(0));
// The mask bits for all but the first condition are represented as
// the low bit of the condition code value implies 't'. We currently
// always have 1 implies 't', so XOR toggle the bits if the low bit
- // of the condition code is zero. The encoding also expects the low
- // bit of the condition to be encoded as bit 4 of the mask operand,
- // so mask that in if needed
+ // of the condition code is zero.
MCOperand &MO = Inst.getOperand(1);
unsigned Mask = MO.getImm();
unsigned OrigMask = Mask;
assert(Mask && TZ <= 3 && "illegal IT mask value!");
for (unsigned i = 3; i != TZ; --i)
Mask ^= 1 << i;
- } else
- Mask |= 0x10;
+ }
MO.setImm(Mask);
// Set up the IT block state according to the IT instruction we just
ITState.FirstCond = true;
break;
}
+ case ARM::t2LSLrr:
+ case ARM::t2LSRrr:
+ case ARM::t2ASRrr:
+ case ARM::t2SBCrr:
+ case ARM::t2RORrr:
+ case ARM::t2BICrr:
+ {
+ // Assemblers should use the narrow encodings of these instructions when permissible.
+ if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
+ isARMLowRegister(Inst.getOperand(2).getReg())) &&
+ Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
+ ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
+ (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
+ (!static_cast<ARMOperand*>(Operands[3])->isToken() ||
+ !static_cast<ARMOperand*>(Operands[3])->getToken().equals_lower(".w"))) {
+ unsigned NewOpc;
+ switch (Inst.getOpcode()) {
+ default: llvm_unreachable("unexpected opcode");
+ case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
+ case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
+ case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
+ case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
+ case ARM::t2RORrr: NewOpc = ARM::tROR; break;
+ case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
+ }
+ MCInst TmpInst;
+ TmpInst.setOpcode(NewOpc);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(5));
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(Inst.getOperand(2));
+ TmpInst.addOperand(Inst.getOperand(3));
+ TmpInst.addOperand(Inst.getOperand(4));
+ Inst = TmpInst;
+ return true;
+ }
+ return false;
+ }
+ case ARM::t2ANDrr:
+ case ARM::t2EORrr:
+ case ARM::t2ADCrr:
+ case ARM::t2ORRrr:
+ {
+ // Assemblers should use the narrow encodings of these instructions when permissible.
+ // These instructions are special in that they are commutable, so shorter encodings
+ // are available more often.
+ if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
+ isARMLowRegister(Inst.getOperand(2).getReg())) &&
+ (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
+ Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
+ ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
+ (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
+ (!static_cast<ARMOperand*>(Operands[3])->isToken() ||
+ !static_cast<ARMOperand*>(Operands[3])->getToken().equals_lower(".w"))) {
+ unsigned NewOpc;
+ switch (Inst.getOpcode()) {
+ default: llvm_unreachable("unexpected opcode");
+ case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
+ case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
+ case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
+ case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
+ }
+ MCInst TmpInst;
+ TmpInst.setOpcode(NewOpc);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(5));
+ if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(Inst.getOperand(2));
+ } else {
+ TmpInst.addOperand(Inst.getOperand(2));
+ TmpInst.addOperand(Inst.getOperand(1));
+ }
+ TmpInst.addOperand(Inst.getOperand(3));
+ TmpInst.addOperand(Inst.getOperand(4));
+ Inst = TmpInst;
+ return true;
+ }
+ return false;
+ }
}
return false;
}
return Match_Success;
}
+static const char *getSubtargetFeatureName(unsigned Val);
bool ARMAsmParser::
MatchAndEmitInstruction(SMLoc IDLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands,
MCStreamer &Out) {
MCInst Inst;
+ unsigned Kind;
unsigned ErrorInfo;
unsigned MatchResult;
- MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
+
+ MatchResult = MatchInstructionImpl(Operands, Kind, Inst, ErrorInfo);
switch (MatchResult) {
default: break;
case Match_Success:
Inst.setLoc(IDLoc);
Out.EmitInstruction(Inst);
return false;
- case Match_MissingFeature:
- Error(IDLoc, "instruction requires a CPU feature not currently enabled");
- return true;
+ case Match_MissingFeature: {
+ assert(ErrorInfo && "Unknown missing feature!");
+ // Special case the error message for the very common case where only
+ // a single subtarget feature is missing (Thumb vs. ARM, e.g.).
+ std::string Msg = "instruction requires:";
+ unsigned Mask = 1;
+ for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
+ if (ErrorInfo & Mask) {
+ Msg += " ";
+ Msg += getSubtargetFeatureName(ErrorInfo & Mask);
+ }
+ Mask <<= 1;
+ }
+ return Error(IDLoc, Msg);
+ }
case Match_InvalidOperand: {
SMLoc ErrorLoc = IDLoc;
if (ErrorInfo != ~0U) {
return Error(ErrorLoc, "invalid operand for instruction");
}
case Match_MnemonicFail:
- return Error(IDLoc, "invalid instruction");
- case Match_ConversionFail:
- // The converter function will have already emited a diagnostic.
- return true;
+ return Error(IDLoc, "invalid instruction",
+ ((ARMOperand*)Operands[0])->getLocRange());
case Match_RequiresNotITBlock:
return Error(IDLoc, "flag setting instruction only valid outside IT block");
case Match_RequiresITBlock:
return Error(IDLoc, "instruction variant requires ARMv6 or later");
case Match_RequiresThumb2:
return Error(IDLoc, "instruction variant requires Thumb2");
+ case Match_ImmRange0_15: {
+ SMLoc ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
+ if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
+ return Error(ErrorLoc, "immediate operand must be in the range [0,15]");
+ }
}
llvm_unreachable("Implement any new match types added!");
}
#define GET_REGISTER_MATCHER
+#define GET_SUBTARGET_FEATURE_NAME
#define GET_MATCHER_IMPLEMENTATION
#include "ARMGenAsmMatcher.inc"