MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
AddrReg, ValueReg)
- .addReg(AMDGPU::AR_X, RegState::Implicit);
+ .addReg(AMDGPU::AR_X,
+ RegState::Implicit | RegState::Kill);
setImmOperand(Mov, R600Operands::DST_REL, 1);
return Mov;
}
MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
ValueReg,
AddrReg)
- .addReg(AMDGPU::AR_X, RegState::Implicit);
+ .addReg(AMDGPU::AR_X,
+ RegState::Implicit | RegState::Kill);
setImmOperand(Mov, R600Operands::SRC0_REL, 1);
return Mov;
bool AllowSwitchFromAlu = (CurEmitted >= InstKindLimit[CurInstKind]) &&
(!Available[IDFetch].empty() || !Available[IDOther].empty());
- if ((AllowSwitchToAlu && CurInstKind != IDAlu) ||
- (!AllowSwitchFromAlu && CurInstKind == IDAlu)) {
+ // We want to scheduled AR defs as soon as possible to make sure they aren't
+ // put in a different ALU clause from their uses.
+ if (!SU && !UnscheduledARDefs.empty()) {
+ SU = UnscheduledARDefs[0];
+ UnscheduledARDefs.erase(UnscheduledARDefs.begin());
+ NextInstKind = IDAlu;
+ }
+
+ if (!SU && ((AllowSwitchToAlu && CurInstKind != IDAlu) ||
+ (!AllowSwitchFromAlu && CurInstKind == IDAlu))) {
// try to pick ALU
SU = pickAlu();
if (SU) {
NextInstKind = IDOther;
}
+ // We want to schedule the AR uses as late as possible to make sure that
+ // the AR defs have been released.
+ if (!SU && !UnscheduledARUses.empty()) {
+ SU = UnscheduledARUses[0];
+ UnscheduledARUses.erase(UnscheduledARUses.begin());
+ NextInstKind = IDAlu;
+ }
+
+
DEBUG(
if (SU) {
dbgs() << " ** Pick node **\n";
DEBUG(dbgs() << "Bottom Releasing ";SU->dump(DAG););
int IK = getInstKind(SU);
+
+ // Check for AR register defines
+ for (MachineInstr::const_mop_iterator I = SU->getInstr()->operands_begin(),
+ E = SU->getInstr()->operands_end();
+ I != E; ++I) {
+ if (I->isReg() && I->getReg() == AMDGPU::AR_X) {
+ if (I->isDef()) {
+ UnscheduledARDefs.push_back(SU);
+ } else {
+ UnscheduledARUses.push_back(SU);
+ }
+ return;
+ }
+ }
+
// There is no export clause, we can schedule one as soon as its ready
if (IK == IDOther)
Available[IDOther].push_back(SU);
std::vector<SUnit *> Available[IDLast], Pending[IDLast];
std::vector<SUnit *> AvailableAlus[AluLast];
+ std::vector<SUnit *> UnscheduledARDefs;
+ std::vector<SUnit *> UnscheduledARUses;
InstKind CurInstKind;
int CurEmitted;
--- /dev/null
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+
+; This test checks that uses and defs of the AR register happen in the same
+; instruction clause.
+
+; CHECK: @mova_same_clause
+; CHECK: MOVA_INT
+; CHECK-NOT: ALU clause
+; CHECK: 0 + AR.x
+; CHECK: MOVA_INT
+; CHECK-NOT: ALU clause
+; CHECK: 0 + AR.x
+
+define void @mova_same_clause(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) {
+entry:
+ %stack = alloca [5 x i32], align 4
+ %0 = load i32 addrspace(1)* %in, align 4
+ %arrayidx1 = getelementptr inbounds [5 x i32]* %stack, i32 0, i32 %0
+ store i32 4, i32* %arrayidx1, align 4
+ %arrayidx2 = getelementptr inbounds i32 addrspace(1)* %in, i32 1
+ %1 = load i32 addrspace(1)* %arrayidx2, align 4
+ %arrayidx3 = getelementptr inbounds [5 x i32]* %stack, i32 0, i32 %1
+ store i32 5, i32* %arrayidx3, align 4
+ %arrayidx10 = getelementptr inbounds [5 x i32]* %stack, i32 0, i32 0
+ %2 = load i32* %arrayidx10, align 4
+ store i32 %2, i32 addrspace(1)* %out, align 4
+ %arrayidx12 = getelementptr inbounds [5 x i32]* %stack, i32 0, i32 1
+ %3 = load i32* %arrayidx12
+ %arrayidx13 = getelementptr inbounds i32 addrspace(1)* %out, i32 1
+ store i32 %3, i32 addrspace(1)* %arrayidx13
+ ret void
+}