unsigned RegNo;
int Modifiers;
const MCRegisterInfo *TRI;
+ bool IsForcedVOP3;
};
union {
}
void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
- Inst.addOperand(MCOperand::CreateImm(Reg.Modifiers));
+ Inst.addOperand(MCOperand::CreateImm(
+ Reg.Modifiers == -1 ? 0 : Reg.Modifiers));
addRegOperands(Inst, N);
}
return Imm.Type;
}
+ bool isRegKind() const {
+ return Kind == Register;
+ }
+
bool isReg() const override {
return Kind == Register && Reg.Modifiers == -1;
}
bool isRegWithInputMods() const {
- return Kind == Register && Reg.Modifiers != -1;
+ return Kind == Register && (Reg.IsForcedVOP3 || Reg.Modifiers != -1);
}
void setModifiers(unsigned Mods) {
Reg.Modifiers = Mods;
}
+ bool hasModifiers() const {
+ assert(isRegKind());
+ return Reg.Modifiers != -1;
+ }
+
unsigned getReg() const override {
return Reg.RegNo;
}
static std::unique_ptr<AMDGPUOperand> CreateReg(unsigned RegNo, SMLoc S,
SMLoc E,
- const MCRegisterInfo *TRI) {
+ const MCRegisterInfo *TRI,
+ bool ForceVOP3) {
auto Op = llvm::make_unique<AMDGPUOperand>(Register);
Op->Reg.RegNo = RegNo;
Op->Reg.TRI = TRI;
Op->Reg.Modifiers = -1;
+ Op->Reg.IsForcedVOP3 = ForceVOP3;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
ForcedEncodingSize = Size;
}
+ bool isForcedVOP3() const {
+ return ForcedEncodingSize == 64;
+ }
+
bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
unsigned checkTargetMatchPredicate(MCInst &Inst) override;
bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
SMLoc ErrorLoc = IDLoc;
if (ErrorInfo != ~0ULL) {
if (ErrorInfo >= Operands.size()) {
+ if (isForcedVOP3()) {
+ // If 64-bit encoding has been forced we can end up with no
+ // clamp or omod operands if none of the registers have modifiers,
+ // so we need to add these to the operand list.
+ AMDGPUOperand &LastOp =
+ ((AMDGPUOperand &)*Operands[Operands.size() - 1]);
+ if (LastOp.isRegKind() ||
+ (LastOp.isImm() &&
+ LastOp.getImmTy() != AMDGPUOperand::ImmTyNone)) {
+ SMLoc S = Parser.getTok().getLoc();
+ Operands.push_back(AMDGPUOperand::CreateImm(0, S,
+ AMDGPUOperand::ImmTyClamp));
+ Operands.push_back(AMDGPUOperand::CreateImm(0, S,
+ AMDGPUOperand::ImmTyOMod));
+ bool Res = MatchAndEmitInstruction(IDLoc, Opcode, Operands,
+ Out, ErrorInfo,
+ MatchingInlineAsm);
+ if (!Res)
+ return Res;
+ }
+
+ }
return Error(IDLoc, "too few operands for instruction");
}
for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
- if (Op.isRegWithInputMods())
+ if (Op.isRegKind() && Op.hasModifiers())
return true;
if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOMod ||
Op.getImmTy() == AMDGPUOperand::ImmTyClamp))
Operands.push_back(AMDGPUOperand::CreateReg(
- RegNo, S, E, getContext().getRegisterInfo()));
+ RegNo, S, E, getContext().getRegisterInfo(),
+ isForcedVOP3()));
if (HasModifiers || Modifiers) {
AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[Operands.size() - 1]);
// VOPC Instructions
//===----------------------------------------------------------------------===//
+// Test forced e64 encoding
+
+v_cmp_lt_f32_e64 s[2:3], v4, -v6
+// CHECK: v_cmp_lt_f32_e64 s[2:3], v4, -v6 ; encoding: [0x02,0x00,0x02,0xd0,0x04,0x0d,0x02,0x40]
+
//
// Modifier tests:
//
// VOP2 Instructions
///===---------------------------------------------------------------------===//
+// Test forced e64 encoding with e32 operands
+
+v_ldexp_f32_e64 v1, v3, v5
+// CHECK: v_ldexp_f32_e64 v1, v3, v5 ; encoding: [0x01,0x00,0x56,0xd2,0x03,0x0b,0x02,0x00]
+
+
// TODO: Modifier tests
v_cndmask_b32 v1, v3, v5, s[4:5]