1 //===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief R600 Implementation of TargetInstrInfo.
13 //===----------------------------------------------------------------------===//
15 #include "R600InstrInfo.h"
16 #include "AMDGPUSubtarget.h"
17 #include "AMDGPUTargetMachine.h"
18 #include "R600Defines.h"
19 #include "R600MachineFunctionInfo.h"
20 #include "R600RegisterInfo.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #define GET_INSTRINFO_CTOR
26 #include "AMDGPUGenDFAPacketizer.inc"
30 R600InstrInfo::R600InstrInfo(AMDGPUTargetMachine &tm)
31 : AMDGPUInstrInfo(tm),
35 const R600RegisterInfo &R600InstrInfo::getRegisterInfo() const {
39 bool R600InstrInfo::isTrig(const MachineInstr &MI) const {
40 return get(MI.getOpcode()).TSFlags & R600_InstFlag::TRIG;
43 bool R600InstrInfo::isVector(const MachineInstr &MI) const {
44 return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR;
48 R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
49 MachineBasicBlock::iterator MI, DebugLoc DL,
50 unsigned DestReg, unsigned SrcReg,
52 if (AMDGPU::R600_Reg128RegClass.contains(DestReg)
53 && AMDGPU::R600_Reg128RegClass.contains(SrcReg)) {
54 for (unsigned I = 0; I < 4; I++) {
55 unsigned SubRegIndex = RI.getSubRegFromChannel(I);
56 buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
57 RI.getSubReg(DestReg, SubRegIndex),
58 RI.getSubReg(SrcReg, SubRegIndex))
60 RegState::Define | RegState::Implicit);
64 // We can't copy vec4 registers
65 assert(!AMDGPU::R600_Reg128RegClass.contains(DestReg)
66 && !AMDGPU::R600_Reg128RegClass.contains(SrcReg));
68 MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
70 NewMI->getOperand(getOperandIdx(*NewMI, R600Operands::SRC0))
75 MachineInstr * R600InstrInfo::getMovImmInstr(MachineFunction *MF,
76 unsigned DstReg, int64_t Imm) const {
77 MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::MOV), DebugLoc());
78 MachineInstrBuilder MIB(*MF, MI);
79 MIB.addReg(DstReg, RegState::Define);
80 MIB.addReg(AMDGPU::ALU_LITERAL_X);
82 MIB.addReg(0); // PREDICATE_BIT
87 unsigned R600InstrInfo::getIEQOpcode() const {
88 return AMDGPU::SETE_INT;
91 bool R600InstrInfo::isMov(unsigned Opcode) const {
95 default: return false;
97 case AMDGPU::MOV_IMM_F32:
98 case AMDGPU::MOV_IMM_I32:
103 // Some instructions act as place holders to emulate operations that the GPU
104 // hardware does automatically. This function can be used to check if
105 // an opcode falls into this category.
106 bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const {
108 default: return false;
114 bool R600InstrInfo::isReductionOp(unsigned Opcode) const {
116 default: return false;
117 case AMDGPU::DOT4_r600_pseudo:
118 case AMDGPU::DOT4_eg_pseudo:
123 bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
125 default: return false;
126 case AMDGPU::CUBE_r600_pseudo:
127 case AMDGPU::CUBE_r600_real:
128 case AMDGPU::CUBE_eg_pseudo:
129 case AMDGPU::CUBE_eg_real:
134 bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
135 unsigned TargetFlags = get(Opcode).TSFlags;
137 return ((TargetFlags & R600_InstFlag::OP1) |
138 (TargetFlags & R600_InstFlag::OP2) |
139 (TargetFlags & R600_InstFlag::OP3));
142 DFAPacketizer *R600InstrInfo::CreateTargetScheduleState(const TargetMachine *TM,
143 const ScheduleDAG *DAG) const {
144 const InstrItineraryData *II = TM->getInstrItineraryData();
145 return TM->getSubtarget<AMDGPUSubtarget>().createDFAPacketizer(II);
149 isPredicateSetter(unsigned Opcode) {
158 static MachineInstr *
159 findFirstPredicateSetterFrom(MachineBasicBlock &MBB,
160 MachineBasicBlock::iterator I) {
161 while (I != MBB.begin()) {
163 MachineInstr *MI = I;
164 if (isPredicateSetter(MI->getOpcode()))
172 bool isJump(unsigned Opcode) {
173 return Opcode == AMDGPU::JUMP || Opcode == AMDGPU::JUMP_COND;
177 R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
178 MachineBasicBlock *&TBB,
179 MachineBasicBlock *&FBB,
180 SmallVectorImpl<MachineOperand> &Cond,
181 bool AllowModify) const {
182 // Most of the following comes from the ARM implementation of AnalyzeBranch
184 // If the block has no terminators, it just falls into the block after it.
185 MachineBasicBlock::iterator I = MBB.end();
186 if (I == MBB.begin())
189 while (I->isDebugValue()) {
190 if (I == MBB.begin())
194 if (!isJump(static_cast<MachineInstr *>(I)->getOpcode())) {
198 // Get the last instruction in the block.
199 MachineInstr *LastInst = I;
201 // If there is only one terminator instruction, process it.
202 unsigned LastOpc = LastInst->getOpcode();
203 if (I == MBB.begin() ||
204 !isJump(static_cast<MachineInstr *>(--I)->getOpcode())) {
205 if (LastOpc == AMDGPU::JUMP) {
206 TBB = LastInst->getOperand(0).getMBB();
208 } else if (LastOpc == AMDGPU::JUMP_COND) {
209 MachineInstr *predSet = I;
210 while (!isPredicateSetter(predSet->getOpcode())) {
213 TBB = LastInst->getOperand(0).getMBB();
214 Cond.push_back(predSet->getOperand(1));
215 Cond.push_back(predSet->getOperand(2));
216 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
219 return true; // Can't handle indirect branch.
222 // Get the instruction before it if it is a terminator.
223 MachineInstr *SecondLastInst = I;
224 unsigned SecondLastOpc = SecondLastInst->getOpcode();
226 // If the block ends with a B and a Bcc, handle it.
227 if (SecondLastOpc == AMDGPU::JUMP_COND && LastOpc == AMDGPU::JUMP) {
228 MachineInstr *predSet = --I;
229 while (!isPredicateSetter(predSet->getOpcode())) {
232 TBB = SecondLastInst->getOperand(0).getMBB();
233 FBB = LastInst->getOperand(0).getMBB();
234 Cond.push_back(predSet->getOperand(1));
235 Cond.push_back(predSet->getOperand(2));
236 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
240 // Otherwise, can't handle this.
244 int R600InstrInfo::getBranchInstr(const MachineOperand &op) const {
245 const MachineInstr *MI = op.getParent();
247 switch (MI->getDesc().OpInfo->RegClass) {
248 default: // FIXME: fallthrough??
249 case AMDGPU::GPRI32RegClassID: return AMDGPU::BRANCH_COND_i32;
250 case AMDGPU::GPRF32RegClassID: return AMDGPU::BRANCH_COND_f32;
255 R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
256 MachineBasicBlock *TBB,
257 MachineBasicBlock *FBB,
258 const SmallVectorImpl<MachineOperand> &Cond,
260 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
264 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(TBB);
267 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
268 assert(PredSet && "No previous predicate !");
269 addFlag(PredSet, 0, MO_FLAG_PUSH);
270 PredSet->getOperand(2).setImm(Cond[1].getImm());
272 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
274 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
278 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
279 assert(PredSet && "No previous predicate !");
280 addFlag(PredSet, 0, MO_FLAG_PUSH);
281 PredSet->getOperand(2).setImm(Cond[1].getImm());
282 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
284 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
285 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(FBB);
291 R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
293 // Note : we leave PRED* instructions there.
294 // They may be needed when predicating instructions.
296 MachineBasicBlock::iterator I = MBB.end();
298 if (I == MBB.begin()) {
302 switch (I->getOpcode()) {
305 case AMDGPU::JUMP_COND: {
306 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
307 clearFlag(predSet, 0, MO_FLAG_PUSH);
308 I->eraseFromParent();
312 I->eraseFromParent();
317 if (I == MBB.begin()) {
321 switch (I->getOpcode()) {
322 // FIXME: only one case??
325 case AMDGPU::JUMP_COND: {
326 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
327 clearFlag(predSet, 0, MO_FLAG_PUSH);
328 I->eraseFromParent();
332 I->eraseFromParent();
339 R600InstrInfo::isPredicated(const MachineInstr *MI) const {
340 int idx = MI->findFirstPredOperandIdx();
344 unsigned Reg = MI->getOperand(idx).getReg();
346 default: return false;
347 case AMDGPU::PRED_SEL_ONE:
348 case AMDGPU::PRED_SEL_ZERO:
349 case AMDGPU::PREDICATE_BIT:
355 R600InstrInfo::isPredicable(MachineInstr *MI) const {
356 // XXX: KILL* instructions can be predicated, but they must be the last
357 // instruction in a clause, so this means any instructions after them cannot
358 // be predicated. Until we have proper support for instruction clauses in the
359 // backend, we will mark KILL* instructions as unpredicable.
361 if (MI->getOpcode() == AMDGPU::KILLGT) {
363 } else if (isVector(*MI)) {
366 return AMDGPUInstrInfo::isPredicable(MI);
372 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
374 unsigned ExtraPredCycles,
375 const BranchProbability &Probability) const{
380 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
382 unsigned ExtraTCycles,
383 MachineBasicBlock &FMBB,
385 unsigned ExtraFCycles,
386 const BranchProbability &Probability) const {
391 R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
393 const BranchProbability &Probability)
399 R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
400 MachineBasicBlock &FMBB) const {
406 R600InstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
407 MachineOperand &MO = Cond[1];
408 switch (MO.getImm()) {
409 case OPCODE_IS_ZERO_INT:
410 MO.setImm(OPCODE_IS_NOT_ZERO_INT);
412 case OPCODE_IS_NOT_ZERO_INT:
413 MO.setImm(OPCODE_IS_ZERO_INT);
416 MO.setImm(OPCODE_IS_NOT_ZERO);
418 case OPCODE_IS_NOT_ZERO:
419 MO.setImm(OPCODE_IS_ZERO);
425 MachineOperand &MO2 = Cond[2];
426 switch (MO2.getReg()) {
427 case AMDGPU::PRED_SEL_ZERO:
428 MO2.setReg(AMDGPU::PRED_SEL_ONE);
430 case AMDGPU::PRED_SEL_ONE:
431 MO2.setReg(AMDGPU::PRED_SEL_ZERO);
440 R600InstrInfo::DefinesPredicate(MachineInstr *MI,
441 std::vector<MachineOperand> &Pred) const {
442 return isPredicateSetter(MI->getOpcode());
447 R600InstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
448 const SmallVectorImpl<MachineOperand> &Pred2) const {
454 R600InstrInfo::PredicateInstruction(MachineInstr *MI,
455 const SmallVectorImpl<MachineOperand> &Pred) const {
456 int PIdx = MI->findFirstPredOperandIdx();
459 MachineOperand &PMO = MI->getOperand(PIdx);
460 PMO.setReg(Pred[2].getReg());
461 MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
462 MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
469 unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
470 const MachineInstr *MI,
471 unsigned *PredCost) const {
477 int R600InstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
478 const MachineRegisterInfo &MRI = MF.getRegInfo();
479 const MachineFrameInfo *MFI = MF.getFrameInfo();
482 if (MFI->getNumObjects() == 0) {
486 if (MRI.livein_empty()) {
490 for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
491 LE = MRI.livein_end();
493 Offset = std::max(Offset,
494 GET_REG_INDEX(RI.getEncodingValue(LI->first)));
500 int R600InstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
502 const MachineFrameInfo *MFI = MF.getFrameInfo();
504 // Variable sized objects are not supported
505 assert(!MFI->hasVarSizedObjects());
507 if (MFI->getNumObjects() == 0) {
511 Offset = TM.getFrameLowering()->getFrameIndexOffset(MF, -1);
513 return getIndirectIndexBegin(MF) + Offset;
516 std::vector<unsigned> R600InstrInfo::getIndirectReservedRegs(
517 const MachineFunction &MF) const {
518 const AMDGPUFrameLowering *TFL =
519 static_cast<const AMDGPUFrameLowering*>(TM.getFrameLowering());
520 std::vector<unsigned> Regs;
522 unsigned StackWidth = TFL->getStackWidth(MF);
523 int End = getIndirectIndexEnd(MF);
529 for (int Index = getIndirectIndexBegin(MF); Index <= End; ++Index) {
530 unsigned SuperReg = AMDGPU::R600_Reg128RegClass.getRegister(Index);
531 Regs.push_back(SuperReg);
532 for (unsigned Chan = 0; Chan < StackWidth; ++Chan) {
533 unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister((4 * Index) + Chan);
540 unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex,
541 unsigned Channel) const {
542 // XXX: Remove when we support a stack width > 2
543 assert(Channel == 0);
547 const TargetRegisterClass * R600InstrInfo::getIndirectAddrStoreRegClass(
548 unsigned SourceReg) const {
549 return &AMDGPU::R600_TReg32RegClass;
552 const TargetRegisterClass *R600InstrInfo::getIndirectAddrLoadRegClass() const {
553 return &AMDGPU::TRegMemRegClass;
556 MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
557 MachineBasicBlock::iterator I,
558 unsigned ValueReg, unsigned Address,
559 unsigned OffsetReg) const {
560 unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address);
561 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
562 AMDGPU::AR_X, OffsetReg);
563 setImmOperand(MOVA, R600Operands::WRITE, 0);
565 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
567 .addReg(AMDGPU::AR_X, RegState::Implicit);
568 setImmOperand(Mov, R600Operands::DST_REL, 1);
572 MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
573 MachineBasicBlock::iterator I,
574 unsigned ValueReg, unsigned Address,
575 unsigned OffsetReg) const {
576 unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address);
577 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
580 setImmOperand(MOVA, R600Operands::WRITE, 0);
581 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
584 .addReg(AMDGPU::AR_X, RegState::Implicit);
585 setImmOperand(Mov, R600Operands::SRC0_REL, 1);
590 const TargetRegisterClass *R600InstrInfo::getSuperIndirectRegClass() const {
591 return &AMDGPU::IndirectRegRegClass;
595 MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB,
596 MachineBasicBlock::iterator I,
600 unsigned Src1Reg) const {
601 MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opcode),
605 MIB.addImm(0) // $update_exec_mask
606 .addImm(0); // $update_predicate
608 MIB.addImm(1) // $write
610 .addImm(0) // $dst_rel
611 .addImm(0) // $dst_clamp
612 .addReg(Src0Reg) // $src0
613 .addImm(0) // $src0_neg
614 .addImm(0) // $src0_rel
615 .addImm(0) // $src0_abs
616 .addImm(-1); // $src0_sel
619 MIB.addReg(Src1Reg) // $src1
620 .addImm(0) // $src1_neg
621 .addImm(0) // $src1_rel
622 .addImm(0) // $src1_abs
623 .addImm(-1); // $src1_sel
626 //XXX: The r600g finalizer expects this to be 1, once we've moved the
627 //scheduling to the backend, we can change the default to 0.
628 MIB.addImm(1) // $last
629 .addReg(AMDGPU::PRED_SEL_OFF) // $pred_sel
630 .addImm(0); // $literal
635 MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
636 MachineBasicBlock::iterator I,
638 uint64_t Imm) const {
639 MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg,
640 AMDGPU::ALU_LITERAL_X);
641 setImmOperand(MovImm, R600Operands::IMM, Imm);
645 int R600InstrInfo::getOperandIdx(const MachineInstr &MI,
646 R600Operands::Ops Op) const {
647 return getOperandIdx(MI.getOpcode(), Op);
650 int R600InstrInfo::getOperandIdx(unsigned Opcode,
651 R600Operands::Ops Op) const {
652 unsigned TargetFlags = get(Opcode).TSFlags;
655 if (!HAS_NATIVE_OPERANDS(TargetFlags)) {
657 case R600Operands::DST: return 0;
658 case R600Operands::SRC0: return 1;
659 case R600Operands::SRC1: return 2;
660 case R600Operands::SRC2: return 3;
662 assert(!"Unknown operand type for instruction");
667 if (TargetFlags & R600_InstFlag::OP1) {
669 } else if (TargetFlags & R600_InstFlag::OP2) {
672 assert((TargetFlags & R600_InstFlag::OP3) && "OP1, OP2, or OP3 not defined "
673 "for this instruction");
677 return R600Operands::ALUOpTable[OpTableIdx][Op];
680 void R600InstrInfo::setImmOperand(MachineInstr *MI, R600Operands::Ops Op,
682 int Idx = getOperandIdx(*MI, Op);
683 assert(Idx != -1 && "Operand not supported for this instruction.");
684 assert(MI->getOperand(Idx).isImm());
685 MI->getOperand(Idx).setImm(Imm);
688 //===----------------------------------------------------------------------===//
689 // Instruction flag getters/setters
690 //===----------------------------------------------------------------------===//
692 bool R600InstrInfo::hasFlagOperand(const MachineInstr &MI) const {
693 return GET_FLAG_OPERAND_IDX(get(MI.getOpcode()).TSFlags) != 0;
696 MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
697 unsigned Flag) const {
698 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
701 // If we pass something other than the default value of Flag to this
702 // function, it means we are want to set a flag on an instruction
703 // that uses native encoding.
704 assert(HAS_NATIVE_OPERANDS(TargetFlags));
705 bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
708 FlagIndex = getOperandIdx(*MI, R600Operands::CLAMP);
711 FlagIndex = getOperandIdx(*MI, R600Operands::WRITE);
713 case MO_FLAG_NOT_LAST:
715 FlagIndex = getOperandIdx(*MI, R600Operands::LAST);
719 case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_NEG); break;
720 case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_NEG); break;
721 case 2: FlagIndex = getOperandIdx(*MI, R600Operands::SRC2_NEG); break;
726 assert(!IsOP3 && "Cannot set absolute value modifier for OP3 "
730 case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_ABS); break;
731 case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_ABS); break;
739 assert(FlagIndex != -1 && "Flag not supported for this instruction");
741 FlagIndex = GET_FLAG_OPERAND_IDX(TargetFlags);
742 assert(FlagIndex != 0 &&
743 "Instruction flags not supported for this instruction");
746 MachineOperand &FlagOp = MI->getOperand(FlagIndex);
747 assert(FlagOp.isImm());
751 void R600InstrInfo::addFlag(MachineInstr *MI, unsigned Operand,
752 unsigned Flag) const {
753 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
757 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
758 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
759 if (Flag == MO_FLAG_NOT_LAST) {
760 clearFlag(MI, Operand, MO_FLAG_LAST);
761 } else if (Flag == MO_FLAG_MASK) {
762 clearFlag(MI, Operand, Flag);
767 MachineOperand &FlagOp = getFlagOp(MI, Operand);
768 FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand)));
772 void R600InstrInfo::clearFlag(MachineInstr *MI, unsigned Operand,
773 unsigned Flag) const {
774 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
775 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
776 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
779 MachineOperand &FlagOp = getFlagOp(MI);
780 unsigned InstFlags = FlagOp.getImm();
781 InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand));
782 FlagOp.setImm(InstFlags);