1 //===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
11 #include "SIDefines.h"
12 #include "llvm/ADT/APFloat.h"
13 #include "llvm/ADT/SmallString.h"
14 #include "llvm/ADT/SmallVector.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/StringSwitch.h"
17 #include "llvm/ADT/Twine.h"
18 #include "llvm/MC/MCContext.h"
19 #include "llvm/MC/MCExpr.h"
20 #include "llvm/MC/MCInst.h"
21 #include "llvm/MC/MCInstrInfo.h"
22 #include "llvm/MC/MCParser/MCAsmLexer.h"
23 #include "llvm/MC/MCParser/MCAsmParser.h"
24 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
25 #include "llvm/MC/MCRegisterInfo.h"
26 #include "llvm/MC/MCStreamer.h"
27 #include "llvm/MC/MCSubtargetInfo.h"
28 #include "llvm/MC/MCTargetAsmParser.h"
29 #include "llvm/Support/SourceMgr.h"
30 #include "llvm/Support/TargetRegistry.h"
31 #include "llvm/Support/raw_ostream.h"
32 #include "llvm/Support/Debug.h"
38 struct OptionalOperand;
40 class AMDGPUOperand : public MCParsedAsmOperand {
48 SMLoc StartLoc, EndLoc;
51 AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
82 const MCRegisterInfo *TRI;
93 void addImmOperands(MCInst &Inst, unsigned N) const {
94 Inst.addOperand(MCOperand::CreateImm(getImm()));
97 StringRef getToken() const {
98 return StringRef(Tok.Data, Tok.Length);
101 void addRegOperands(MCInst &Inst, unsigned N) const {
102 Inst.addOperand(MCOperand::CreateReg(getReg()));
105 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
107 addRegOperands(Inst, N);
109 addImmOperands(Inst, N);
112 void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
113 Inst.addOperand(MCOperand::CreateImm(
114 Reg.Modifiers == -1 ? 0 : Reg.Modifiers));
115 addRegOperands(Inst, N);
118 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
120 addImmOperands(Inst, N);
123 Inst.addOperand(MCOperand::CreateExpr(Expr));
127 bool defaultTokenHasSuffix() const {
128 StringRef Token(Tok.Data, Tok.Length);
130 return Token.endswith("_e32") || Token.endswith("_e64");
133 bool isToken() const override {
134 return Kind == Token;
137 bool isImm() const override {
138 return Kind == Immediate;
141 bool isInlineImm() const {
142 float F = BitsToFloat(Imm.Val);
143 // TODO: Add 0.5pi for VI
144 return isImm() && ((Imm.Val <= 64 && Imm.Val >= -16) ||
145 (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
146 F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0));
149 bool isDSOffset0() const {
151 return Imm.Type == ImmTyDSOffset0;
154 bool isDSOffset1() const {
156 return Imm.Type == ImmTyDSOffset1;
159 int64_t getImm() const {
163 enum ImmTy getImmTy() const {
168 bool isRegKind() const {
169 return Kind == Register;
172 bool isReg() const override {
173 return Kind == Register && Reg.Modifiers == -1;
176 bool isRegWithInputMods() const {
177 return Kind == Register && (Reg.IsForcedVOP3 || Reg.Modifiers != -1);
180 void setModifiers(unsigned Mods) {
182 Reg.Modifiers = Mods;
185 bool hasModifiers() const {
187 return Reg.Modifiers != -1;
190 unsigned getReg() const override {
194 bool isRegOrImm() const {
195 return isReg() || isImm();
198 bool isRegClass(unsigned RCID) const {
199 return Reg.TRI->getRegClass(RCID).contains(getReg());
202 bool isSCSrc32() const {
203 return isInlineImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
206 bool isSSrc32() const {
207 return isImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
210 bool isSSrc64() const {
211 return isImm() || isInlineImm() ||
212 (isReg() && isRegClass(AMDGPU::SReg_64RegClassID));
215 bool isVCSrc32() const {
216 return isInlineImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
219 bool isVCSrc64() const {
220 return isInlineImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
223 bool isVSrc32() const {
224 return isImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
227 bool isVSrc64() const {
228 return isImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
231 bool isMem() const override {
235 bool isExpr() const {
236 return Kind == Expression;
239 bool isSoppBrTarget() const {
240 return isExpr() || isImm();
243 SMLoc getStartLoc() const override {
247 SMLoc getEndLoc() const override {
251 void print(raw_ostream &OS) const override { }
253 static std::unique_ptr<AMDGPUOperand> CreateImm(int64_t Val, SMLoc Loc,
254 enum ImmTy Type = ImmTyNone,
255 bool IsFPImm = false) {
256 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
258 Op->Imm.IsFPImm = IsFPImm;
265 static std::unique_ptr<AMDGPUOperand> CreateToken(StringRef Str, SMLoc Loc,
266 bool HasExplicitEncodingSize = true) {
267 auto Res = llvm::make_unique<AMDGPUOperand>(Token);
268 Res->Tok.Data = Str.data();
269 Res->Tok.Length = Str.size();
275 static std::unique_ptr<AMDGPUOperand> CreateReg(unsigned RegNo, SMLoc S,
277 const MCRegisterInfo *TRI,
279 auto Op = llvm::make_unique<AMDGPUOperand>(Register);
280 Op->Reg.RegNo = RegNo;
282 Op->Reg.Modifiers = -1;
283 Op->Reg.IsForcedVOP3 = ForceVOP3;
289 static std::unique_ptr<AMDGPUOperand> CreateExpr(const class MCExpr *Expr, SMLoc S) {
290 auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
297 bool isDSOffset() const;
298 bool isDSOffset01() const;
299 bool isSWaitCnt() const;
300 bool isMubufOffset() const;
303 class AMDGPUAsmParser : public MCTargetAsmParser {
304 MCSubtargetInfo &STI;
305 const MCInstrInfo &MII;
308 unsigned ForcedEncodingSize;
309 /// @name Auto-generated Match Functions
312 #define GET_ASSEMBLER_HEADER
313 #include "AMDGPUGenAsmMatcher.inc"
318 AMDGPUAsmParser(MCSubtargetInfo &STI, MCAsmParser &_Parser,
319 const MCInstrInfo &MII,
320 const MCTargetOptions &Options)
321 : MCTargetAsmParser(), STI(STI), MII(MII), Parser(_Parser),
322 ForcedEncodingSize(0){
324 if (!STI.getFeatureBits()) {
325 // Set default features.
326 STI.ToggleFeature("SOUTHERN_ISLANDS");
329 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
332 unsigned getForcedEncodingSize() const {
333 return ForcedEncodingSize;
336 void setForcedEncodingSize(unsigned Size) {
337 ForcedEncodingSize = Size;
340 bool isForcedVOP3() const {
341 return ForcedEncodingSize == 64;
344 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
345 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
346 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
347 OperandVector &Operands, MCStreamer &Out,
349 bool MatchingInlineAsm) override;
350 bool ParseDirective(AsmToken DirectiveID) override;
351 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
352 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
353 SMLoc NameLoc, OperandVector &Operands) override;
355 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int,
356 int64_t Default = 0);
357 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
358 OperandVector &Operands,
359 enum AMDGPUOperand::ImmTy ImmTy =
360 AMDGPUOperand::ImmTyNone);
361 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
362 enum AMDGPUOperand::ImmTy ImmTy =
363 AMDGPUOperand::ImmTyNone);
364 OperandMatchResultTy parseOptionalOps(
365 const ArrayRef<OptionalOperand> &OptionalOps,
366 OperandVector &Operands);
369 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
370 void cvtDS(MCInst &Inst, const OperandVector &Operands);
371 OperandMatchResultTy parseDSOptionalOps(OperandVector &Operands);
372 OperandMatchResultTy parseDSOff01OptionalOps(OperandVector &Operands);
373 OperandMatchResultTy parseDSOffsetOptional(OperandVector &Operands);
375 bool parseCnt(int64_t &IntVal);
376 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
377 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
379 void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
380 OperandMatchResultTy parseOffset(OperandVector &Operands);
381 OperandMatchResultTy parseMubufOptionalOps(OperandVector &Operands);
382 OperandMatchResultTy parseGLC(OperandVector &Operands);
383 OperandMatchResultTy parseSLC(OperandVector &Operands);
384 OperandMatchResultTy parseTFE(OperandVector &Operands);
386 OperandMatchResultTy parseDMask(OperandVector &Operands);
387 OperandMatchResultTy parseUNorm(OperandVector &Operands);
388 OperandMatchResultTy parseR128(OperandVector &Operands);
390 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
391 OperandMatchResultTy parseVOP3OptionalOps(OperandVector &Operands);
394 struct OptionalOperand {
396 AMDGPUOperand::ImmTy Type;
399 bool (*ConvertResult)(int64_t&);
404 static unsigned getRegClass(bool IsVgpr, unsigned RegWidth) {
407 default: llvm_unreachable("Unknown register width");
408 case 1: return AMDGPU::VGPR_32RegClassID;
409 case 2: return AMDGPU::VReg_64RegClassID;
410 case 3: return AMDGPU::VReg_96RegClassID;
411 case 4: return AMDGPU::VReg_128RegClassID;
412 case 8: return AMDGPU::VReg_256RegClassID;
413 case 16: return AMDGPU::VReg_512RegClassID;
418 default: llvm_unreachable("Unknown register width");
419 case 1: return AMDGPU::SGPR_32RegClassID;
420 case 2: return AMDGPU::SGPR_64RegClassID;
421 case 4: return AMDGPU::SReg_128RegClassID;
422 case 8: return AMDGPU::SReg_256RegClassID;
423 case 16: return AMDGPU::SReg_512RegClassID;
427 static unsigned getRegForName(const StringRef &RegName) {
429 return StringSwitch<unsigned>(RegName)
430 .Case("exec", AMDGPU::EXEC)
431 .Case("vcc", AMDGPU::VCC)
432 .Case("flat_scr", AMDGPU::FLAT_SCR)
433 .Case("m0", AMDGPU::M0)
434 .Case("scc", AMDGPU::SCC)
435 .Case("flat_scr_lo", AMDGPU::FLAT_SCR_LO)
436 .Case("flat_scr_hi", AMDGPU::FLAT_SCR_HI)
437 .Case("vcc_lo", AMDGPU::VCC_LO)
438 .Case("vcc_hi", AMDGPU::VCC_HI)
439 .Case("exec_lo", AMDGPU::EXEC_LO)
440 .Case("exec_hi", AMDGPU::EXEC_HI)
444 bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
445 const AsmToken Tok = Parser.getTok();
446 StartLoc = Tok.getLoc();
447 EndLoc = Tok.getEndLoc();
448 const StringRef &RegName = Tok.getString();
449 RegNo = getRegForName(RegName);
456 // Match vgprs and sgprs
457 if (RegName[0] != 's' && RegName[0] != 'v')
460 bool IsVgpr = RegName[0] == 'v';
462 unsigned RegIndexInClass;
463 if (RegName.size() > 1) {
464 // We have a 32-bit register
466 if (RegName.substr(1).getAsInteger(10, RegIndexInClass))
470 // We have a register greater than 32-bits.
472 int64_t RegLo, RegHi;
474 if (getLexer().isNot(AsmToken::LBrac))
478 if (getParser().parseAbsoluteExpression(RegLo))
481 if (getLexer().isNot(AsmToken::Colon))
485 if (getParser().parseAbsoluteExpression(RegHi))
488 if (getLexer().isNot(AsmToken::RBrac))
492 RegWidth = (RegHi - RegLo) + 1;
494 // VGPR registers aren't aligned.
495 RegIndexInClass = RegLo;
497 // SGPR registers are aligned. Max alignment is 4 dwords.
498 RegIndexInClass = RegLo / std::min(RegWidth, 4u);
502 const MCRegisterInfo *TRC = getContext().getRegisterInfo();
503 unsigned RC = getRegClass(IsVgpr, RegWidth);
504 if (RegIndexInClass > TRC->getRegClass(RC).getNumRegs())
506 RegNo = TRC->getRegClass(RC).getRegister(RegIndexInClass);
510 unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
512 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
514 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
515 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)))
516 return Match_InvalidOperand;
518 return Match_Success;
522 bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
523 OperandVector &Operands,
526 bool MatchingInlineAsm) {
529 switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
533 Out.EmitInstruction(Inst, STI);
535 case Match_MissingFeature:
536 return Error(IDLoc, "instruction not supported on this GPU");
538 case Match_MnemonicFail:
539 return Error(IDLoc, "unrecognized instruction mnemonic");
541 case Match_InvalidOperand: {
542 SMLoc ErrorLoc = IDLoc;
543 if (ErrorInfo != ~0ULL) {
544 if (ErrorInfo >= Operands.size()) {
545 if (isForcedVOP3()) {
546 // If 64-bit encoding has been forced we can end up with no
547 // clamp or omod operands if none of the registers have modifiers,
548 // so we need to add these to the operand list.
549 AMDGPUOperand &LastOp =
550 ((AMDGPUOperand &)*Operands[Operands.size() - 1]);
551 if (LastOp.isRegKind() ||
553 LastOp.getImmTy() != AMDGPUOperand::ImmTyNone)) {
554 SMLoc S = Parser.getTok().getLoc();
555 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
556 AMDGPUOperand::ImmTyClamp));
557 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
558 AMDGPUOperand::ImmTyOMod));
559 bool Res = MatchAndEmitInstruction(IDLoc, Opcode, Operands,
567 return Error(IDLoc, "too few operands for instruction");
570 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
571 if (ErrorLoc == SMLoc())
574 return Error(ErrorLoc, "invalid operand for instruction");
577 llvm_unreachable("Implement any new match types added!");
580 bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
584 static bool operandsHaveModifiers(const OperandVector &Operands) {
586 for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
587 const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
588 if (Op.isRegKind() && Op.hasModifiers())
590 if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOMod ||
591 Op.getImmTy() == AMDGPUOperand::ImmTyClamp))
597 AMDGPUAsmParser::OperandMatchResultTy
598 AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
600 // Try to parse with a custom parser
601 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
603 // If we successfully parsed the operand or if there as an error parsing,
606 // If we are parsing after we reach EndOfStatement then this means we
607 // are appending default values to the Operands list. This is only done
608 // by custom parser, so we shouldn't continue on to the generic parsing.
609 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
610 getLexer().is(AsmToken::EndOfStatement))
613 bool Negate = false, Abs = false;
614 if (getLexer().getKind()== AsmToken::Minus) {
619 if (getLexer().getKind() == AsmToken::Pipe) {
624 switch(getLexer().getKind()) {
625 case AsmToken::Integer: {
626 SMLoc S = Parser.getTok().getLoc();
628 if (getParser().parseAbsoluteExpression(IntVal))
629 return MatchOperand_ParseFail;
630 APInt IntVal32(32, IntVal);
631 if (IntVal32.getSExtValue() != IntVal) {
632 Error(S, "invalid immediate: only 32-bit values are legal");
633 return MatchOperand_ParseFail;
636 IntVal = IntVal32.getSExtValue();
639 Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
640 return MatchOperand_Success;
642 case AsmToken::Real: {
643 // FIXME: We should emit an error if a double precisions floating-point
644 // value is used. I'm not sure the best way to detect this.
645 SMLoc S = Parser.getTok().getLoc();
647 if (getParser().parseAbsoluteExpression(IntVal))
648 return MatchOperand_ParseFail;
650 APFloat F((float)BitsToDouble(IntVal));
654 AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S));
655 return MatchOperand_Success;
657 case AsmToken::Identifier: {
660 if (!ParseRegister(RegNo, S, E)) {
662 bool HasModifiers = operandsHaveModifiers(Operands);
663 unsigned Modifiers = 0;
669 if (getLexer().getKind() != AsmToken::Pipe)
670 return MatchOperand_ParseFail;
675 if (Modifiers && !HasModifiers) {
676 // We are adding a modifier to src1 or src2 and previous sources
677 // don't have modifiers, so we need to go back and empty modifers
678 // for each previous source.
679 for (unsigned PrevRegIdx = Operands.size() - 1; PrevRegIdx > 1;
682 AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[PrevRegIdx]);
683 RegOp.setModifiers(0);
688 Operands.push_back(AMDGPUOperand::CreateReg(
689 RegNo, S, E, getContext().getRegisterInfo(),
692 if (HasModifiers || Modifiers) {
693 AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[Operands.size() - 1]);
694 RegOp.setModifiers(Modifiers);
698 Operands.push_back(AMDGPUOperand::CreateToken(Parser.getTok().getString(),
702 return MatchOperand_Success;
705 return MatchOperand_NoMatch;
709 bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
711 SMLoc NameLoc, OperandVector &Operands) {
713 // Clear any forced encodings from the previous instruction.
714 setForcedEncodingSize(0);
716 if (Name.endswith("_e64"))
717 setForcedEncodingSize(64);
718 else if (Name.endswith("_e32"))
719 setForcedEncodingSize(32);
721 // Add the instruction mnemonic
722 Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
724 while (!getLexer().is(AsmToken::EndOfStatement)) {
725 AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
727 // Eat the comma or space if there is one.
728 if (getLexer().is(AsmToken::Comma))
732 case MatchOperand_Success: break;
733 case MatchOperand_ParseFail: return Error(getLexer().getLoc(),
734 "failed parsing operand.");
735 case MatchOperand_NoMatch: return Error(getLexer().getLoc(),
736 "not a valid operand.");
740 // Once we reach end of statement, continue parsing so we can add default
741 // values for optional arguments.
742 AMDGPUAsmParser::OperandMatchResultTy Res;
743 while ((Res = parseOperand(Operands, Name)) != MatchOperand_NoMatch) {
744 if (Res != MatchOperand_Success)
745 return Error(getLexer().getLoc(), "failed parsing operand.");
750 //===----------------------------------------------------------------------===//
752 //===----------------------------------------------------------------------===//
754 AMDGPUAsmParser::OperandMatchResultTy
755 AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int,
758 // We are at the end of the statement, and this is a default argument, so
759 // use a default value.
760 if (getLexer().is(AsmToken::EndOfStatement)) {
762 return MatchOperand_Success;
765 switch(getLexer().getKind()) {
766 default: return MatchOperand_NoMatch;
767 case AsmToken::Identifier: {
768 StringRef OffsetName = Parser.getTok().getString();
769 if (!OffsetName.equals(Prefix))
770 return MatchOperand_NoMatch;
773 if (getLexer().isNot(AsmToken::Colon))
774 return MatchOperand_ParseFail;
777 if (getLexer().isNot(AsmToken::Integer))
778 return MatchOperand_ParseFail;
780 if (getParser().parseAbsoluteExpression(Int))
781 return MatchOperand_ParseFail;
785 return MatchOperand_Success;
788 AMDGPUAsmParser::OperandMatchResultTy
789 AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
790 enum AMDGPUOperand::ImmTy ImmTy) {
792 SMLoc S = Parser.getTok().getLoc();
795 AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Offset);
796 if (Res != MatchOperand_Success)
799 Operands.push_back(AMDGPUOperand::CreateImm(Offset, S, ImmTy));
800 return MatchOperand_Success;
803 AMDGPUAsmParser::OperandMatchResultTy
804 AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
805 enum AMDGPUOperand::ImmTy ImmTy) {
807 SMLoc S = Parser.getTok().getLoc();
809 // We are at the end of the statement, and this is a default argument, so
810 // use a default value.
811 if (getLexer().isNot(AsmToken::EndOfStatement)) {
812 switch(getLexer().getKind()) {
813 case AsmToken::Identifier: {
814 StringRef Tok = Parser.getTok().getString();
818 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
822 return MatchOperand_NoMatch;
827 return MatchOperand_NoMatch;
831 Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
832 return MatchOperand_Success;
835 static bool operandsHasOptionalOp(const OperandVector &Operands,
836 const OptionalOperand &OOp) {
837 for (unsigned i = 0; i < Operands.size(); i++) {
838 const AMDGPUOperand &ParsedOp = ((const AMDGPUOperand &)*Operands[i]);
839 if ((ParsedOp.isImm() && ParsedOp.getImmTy() == OOp.Type) ||
840 (ParsedOp.isToken() && ParsedOp.getToken() == OOp.Name))
847 AMDGPUAsmParser::OperandMatchResultTy
848 AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
849 OperandVector &Operands) {
850 SMLoc S = Parser.getTok().getLoc();
851 for (const OptionalOperand &Op : OptionalOps) {
852 if (operandsHasOptionalOp(Operands, Op))
854 AMDGPUAsmParser::OperandMatchResultTy Res;
857 Res = parseNamedBit(Op.Name, Operands, Op.Type);
858 if (Res == MatchOperand_NoMatch)
863 Res = parseIntWithPrefix(Op.Name, Value, Op.Default);
865 if (Res == MatchOperand_NoMatch)
868 if (Res != MatchOperand_Success)
871 if (Op.ConvertResult && !Op.ConvertResult(Value)) {
872 return MatchOperand_ParseFail;
875 Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
876 return MatchOperand_Success;
878 return MatchOperand_NoMatch;
881 //===----------------------------------------------------------------------===//
883 //===----------------------------------------------------------------------===//
885 static const OptionalOperand DSOptionalOps [] = {
886 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
887 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
890 static const OptionalOperand DSOptionalOpsOff01 [] = {
891 {"offset0", AMDGPUOperand::ImmTyDSOffset0, false, 0, nullptr},
892 {"offset1", AMDGPUOperand::ImmTyDSOffset1, false, 0, nullptr},
893 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
896 AMDGPUAsmParser::OperandMatchResultTy
897 AMDGPUAsmParser::parseDSOptionalOps(OperandVector &Operands) {
898 return parseOptionalOps(DSOptionalOps, Operands);
900 AMDGPUAsmParser::OperandMatchResultTy
901 AMDGPUAsmParser::parseDSOff01OptionalOps(OperandVector &Operands) {
902 return parseOptionalOps(DSOptionalOpsOff01, Operands);
905 AMDGPUAsmParser::OperandMatchResultTy
906 AMDGPUAsmParser::parseDSOffsetOptional(OperandVector &Operands) {
907 SMLoc S = Parser.getTok().getLoc();
908 AMDGPUAsmParser::OperandMatchResultTy Res =
909 parseIntWithPrefix("offset", Operands, AMDGPUOperand::ImmTyOffset);
910 if (Res == MatchOperand_NoMatch) {
911 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
912 AMDGPUOperand::ImmTyOffset));
913 Res = MatchOperand_Success;
918 bool AMDGPUOperand::isDSOffset() const {
919 return isImm() && isUInt<16>(getImm());
922 bool AMDGPUOperand::isDSOffset01() const {
923 return isImm() && isUInt<8>(getImm());
926 void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
927 const OperandVector &Operands) {
929 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
931 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
932 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
934 // Add the register arguments
936 Op.addRegOperands(Inst, 1);
940 // Handle optional arguments
941 OptionalIdx[Op.getImmTy()] = i;
944 unsigned Offset0Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset0];
945 unsigned Offset1Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset1];
946 unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
948 ((AMDGPUOperand &)*Operands[Offset0Idx]).addImmOperands(Inst, 1); // offset0
949 ((AMDGPUOperand &)*Operands[Offset1Idx]).addImmOperands(Inst, 1); // offset1
950 ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
951 Inst.addOperand(MCOperand::CreateReg(AMDGPU::M0)); // m0
954 void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
956 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
957 bool GDSOnly = false;
959 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
960 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
962 // Add the register arguments
964 Op.addRegOperands(Inst, 1);
968 if (Op.isToken() && Op.getToken() == "gds") {
973 // Handle optional arguments
974 OptionalIdx[Op.getImmTy()] = i;
977 unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
978 ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1); // offset
981 unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
982 ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
984 Inst.addOperand(MCOperand::CreateReg(AMDGPU::M0)); // m0
988 //===----------------------------------------------------------------------===//
990 //===----------------------------------------------------------------------===//
992 bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
993 StringRef CntName = Parser.getTok().getString();
997 if (getLexer().isNot(AsmToken::LParen))
1001 if (getLexer().isNot(AsmToken::Integer))
1004 if (getParser().parseAbsoluteExpression(CntVal))
1007 if (getLexer().isNot(AsmToken::RParen))
1011 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
1017 if (CntName == "vmcnt") {
1020 } else if (CntName == "expcnt") {
1023 } else if (CntName == "lgkmcnt") {
1030 IntVal &= ~(CntMask << CntShift);
1031 IntVal |= (CntVal << CntShift);
1035 AMDGPUAsmParser::OperandMatchResultTy
1036 AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
1037 // Disable all counters by default.
1041 int64_t CntVal = 0x77f;
1042 SMLoc S = Parser.getTok().getLoc();
1044 switch(getLexer().getKind()) {
1045 default: return MatchOperand_ParseFail;
1046 case AsmToken::Integer:
1047 // The operand can be an integer value.
1048 if (getParser().parseAbsoluteExpression(CntVal))
1049 return MatchOperand_ParseFail;
1052 case AsmToken::Identifier:
1054 if (parseCnt(CntVal))
1055 return MatchOperand_ParseFail;
1056 } while(getLexer().isNot(AsmToken::EndOfStatement));
1059 Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
1060 return MatchOperand_Success;
1063 bool AMDGPUOperand::isSWaitCnt() const {
1067 //===----------------------------------------------------------------------===//
1068 // sopp branch targets
1069 //===----------------------------------------------------------------------===//
1071 AMDGPUAsmParser::OperandMatchResultTy
1072 AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
1073 SMLoc S = Parser.getTok().getLoc();
1075 switch (getLexer().getKind()) {
1076 default: return MatchOperand_ParseFail;
1077 case AsmToken::Integer: {
1079 if (getParser().parseAbsoluteExpression(Imm))
1080 return MatchOperand_ParseFail;
1081 Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
1082 return MatchOperand_Success;
1085 case AsmToken::Identifier:
1086 Operands.push_back(AMDGPUOperand::CreateExpr(
1087 MCSymbolRefExpr::Create(getContext().GetOrCreateSymbol(
1088 Parser.getTok().getString()), getContext()), S));
1090 return MatchOperand_Success;
1094 //===----------------------------------------------------------------------===//
1096 //===----------------------------------------------------------------------===//
1098 static const OptionalOperand MubufOptionalOps [] = {
1099 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1100 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1101 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1102 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1105 AMDGPUAsmParser::OperandMatchResultTy
1106 AMDGPUAsmParser::parseMubufOptionalOps(OperandVector &Operands) {
1107 return parseOptionalOps(MubufOptionalOps, Operands);
1110 AMDGPUAsmParser::OperandMatchResultTy
1111 AMDGPUAsmParser::parseOffset(OperandVector &Operands) {
1112 return parseIntWithPrefix("offset", Operands);
1115 AMDGPUAsmParser::OperandMatchResultTy
1116 AMDGPUAsmParser::parseGLC(OperandVector &Operands) {
1117 return parseNamedBit("glc", Operands);
1120 AMDGPUAsmParser::OperandMatchResultTy
1121 AMDGPUAsmParser::parseSLC(OperandVector &Operands) {
1122 return parseNamedBit("slc", Operands);
1125 AMDGPUAsmParser::OperandMatchResultTy
1126 AMDGPUAsmParser::parseTFE(OperandVector &Operands) {
1127 return parseNamedBit("tfe", Operands);
1130 bool AMDGPUOperand::isMubufOffset() const {
1131 return isImm() && isUInt<12>(getImm());
1134 void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
1135 const OperandVector &Operands) {
1136 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1138 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1139 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1141 // Add the register arguments
1143 Op.addRegOperands(Inst, 1);
1147 // Handle the case where soffset is an immediate
1148 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
1149 Op.addImmOperands(Inst, 1);
1153 // Handle tokens like 'offen' which are sometimes hard-coded into the
1154 // asm string. There are no MCInst operands for these.
1160 // Handle optional arguments
1161 OptionalIdx[Op.getImmTy()] = i;
1164 assert(OptionalIdx.size() == 4);
1166 unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
1167 unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
1168 unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
1169 unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
1171 ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1);
1172 ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
1173 ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
1174 ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
1177 //===----------------------------------------------------------------------===//
1179 //===----------------------------------------------------------------------===//
1181 AMDGPUAsmParser::OperandMatchResultTy
1182 AMDGPUAsmParser::parseDMask(OperandVector &Operands) {
1183 return parseIntWithPrefix("dmask", Operands);
1186 AMDGPUAsmParser::OperandMatchResultTy
1187 AMDGPUAsmParser::parseUNorm(OperandVector &Operands) {
1188 return parseNamedBit("unorm", Operands);
1191 AMDGPUAsmParser::OperandMatchResultTy
1192 AMDGPUAsmParser::parseR128(OperandVector &Operands) {
1193 return parseNamedBit("r128", Operands);
1196 //===----------------------------------------------------------------------===//
1198 //===----------------------------------------------------------------------===//
1200 static bool ConvertOmodMul(int64_t &Mul) {
1201 if (Mul != 1 && Mul != 2 && Mul != 4)
1208 static bool ConvertOmodDiv(int64_t &Div) {
1222 static const OptionalOperand VOP3OptionalOps [] = {
1223 {"clamp", AMDGPUOperand::ImmTyClamp, true, 0, nullptr},
1224 {"mul", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodMul},
1225 {"div", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodDiv},
1228 static bool isVOP3(OperandVector &Operands) {
1229 if (operandsHaveModifiers(Operands))
1232 AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]);
1234 if (DstOp.isReg() && DstOp.isRegClass(AMDGPU::SGPR_64RegClassID))
1237 if (Operands.size() >= 5)
1240 if (Operands.size() > 3) {
1241 AMDGPUOperand &Src1Op = ((AMDGPUOperand&)*Operands[3]);
1242 if (Src1Op.getReg() && (Src1Op.isRegClass(AMDGPU::SReg_32RegClassID) ||
1243 Src1Op.isRegClass(AMDGPU::SReg_64RegClassID)))
1249 AMDGPUAsmParser::OperandMatchResultTy
1250 AMDGPUAsmParser::parseVOP3OptionalOps(OperandVector &Operands) {
1252 // The value returned by this function may change after parsing
1253 // an operand so store the original value here.
1254 bool HasModifiers = operandsHaveModifiers(Operands);
1256 bool IsVOP3 = isVOP3(Operands);
1257 if (HasModifiers || IsVOP3 ||
1258 getLexer().isNot(AsmToken::EndOfStatement) ||
1259 getForcedEncodingSize() == 64) {
1261 AMDGPUAsmParser::OperandMatchResultTy Res =
1262 parseOptionalOps(VOP3OptionalOps, Operands);
1264 if (!HasModifiers && Res == MatchOperand_Success) {
1265 // We have added a modifier operation, so we need to make sure all
1266 // previous register operands have modifiers
1267 for (unsigned i = 2, e = Operands.size(); i != e; ++i) {
1268 AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
1275 return MatchOperand_NoMatch;
1278 void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
1279 ((AMDGPUOperand &)*Operands[1]).addRegOperands(Inst, 1);
1282 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1284 if (operandsHaveModifiers(Operands)) {
1285 for (unsigned e = Operands.size(); i != e; ++i) {
1286 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1288 if (Op.isRegWithInputMods()) {
1289 ((AMDGPUOperand &)*Operands[i]).addRegWithInputModsOperands(Inst, 2);
1292 OptionalIdx[Op.getImmTy()] = i;
1295 unsigned ClampIdx = OptionalIdx[AMDGPUOperand::ImmTyClamp];
1296 unsigned OModIdx = OptionalIdx[AMDGPUOperand::ImmTyOMod];
1298 ((AMDGPUOperand &)*Operands[ClampIdx]).addImmOperands(Inst, 1);
1299 ((AMDGPUOperand &)*Operands[OModIdx]).addImmOperands(Inst, 1);
1301 for (unsigned e = Operands.size(); i != e; ++i)
1302 ((AMDGPUOperand &)*Operands[i]).addRegOrImmOperands(Inst, 1);
1306 /// Force static initialization.
1307 extern "C" void LLVMInitializeR600AsmParser() {
1308 RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
1309 RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
1312 #define GET_REGISTER_MATCHER
1313 #define GET_MATCHER_IMPLEMENTATION
1314 #include "AMDGPUGenAsmMatcher.inc"