R600: Enable folding of inline literals into REQ_SEQUENCE instructions
authorTom Stellard <thomas.stellard@amd.com>
Fri, 16 Aug 2013 01:11:55 +0000 (01:11 +0000)
committerTom Stellard <thomas.stellard@amd.com>
Fri, 16 Aug 2013 01:11:55 +0000 (01:11 +0000)
Tested-by: Aaron Watry <awatry@gmail.com>
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@188517 91177308-0d34-0410-b5e6-96231b3b80d8

lib/Target/R600/AMDGPUISelDAGToDAG.cpp
lib/Target/R600/R600OptimizeVectorRegisters.cpp
test/CodeGen/R600/literals.ll

index 77ca8856018f6c04b125619a236075908157ff39..4f78f2938f9b36dc38fd5ef8c8d669c430909b76 100644 (file)
@@ -453,29 +453,32 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
             continue;
           }
       } else {
-        if (!TII->isALUInstr(Use->getMachineOpcode()) ||
-            (TII->get(Use->getMachineOpcode()).TSFlags &
-            R600_InstFlag::VECTOR)) {
-          continue;
-        }
-
-        int ImmIdx = TII->getOperandIdx(Use->getMachineOpcode(),
-                                        AMDGPU::OpName::literal);
-        if (ImmIdx == -1) {
-          continue;
-        }
-
-        if (TII->getOperandIdx(Use->getMachineOpcode(),
-                               AMDGPU::OpName::dst) != -1) {
-          // subtract one from ImmIdx, because the DST operand is usually index
-          // 0 for MachineInstrs, but we have no DST in the Ops vector.
-          ImmIdx--;
+        switch(Use->getMachineOpcode()) {
+        case AMDGPU::REG_SEQUENCE: break;
+        default:
+          if (!TII->isALUInstr(Use->getMachineOpcode()) ||
+              (TII->get(Use->getMachineOpcode()).TSFlags &
+               R600_InstFlag::VECTOR)) {
+            continue;
+          }
         }
 
         // Check that we aren't already using an immediate.
         // XXX: It's possible for an instruction to have more than one
         // immediate operand, but this is not supported yet.
         if (ImmReg == AMDGPU::ALU_LITERAL_X) {
+          int ImmIdx = TII->getOperandIdx(Use->getMachineOpcode(),
+                                          AMDGPU::OpName::literal);
+          if (ImmIdx == -1) {
+            continue;
+          }
+
+          if (TII->getOperandIdx(Use->getMachineOpcode(),
+                                 AMDGPU::OpName::dst) != -1) {
+            // subtract one from ImmIdx, because the DST operand is usually index
+            // 0 for MachineInstrs, but we have no DST in the Ops vector.
+            ImmIdx--;
+          }
           ConstantSDNode *C = dyn_cast<ConstantSDNode>(Use->getOperand(ImmIdx));
           assert(C);
 
index acacffa85d5f19eea98e63a05271e3e3ba18b668..cf719c0b9fe12200aa031e1a2a3cbbc9dcedb627 100644 (file)
@@ -50,6 +50,9 @@ isImplicitlyDef(MachineRegisterInfo &MRI, unsigned Reg) {
       E = MRI.def_end(); It != E; ++It) {
     return (*It).isImplicitDef();
   }
+  if (MRI.isReserved(Reg)) {
+    return false;
+  }
   llvm_unreachable("Reg without a def");
   return false;
 }
index 77b168ebdee30df0c60de85f96c2c0a331718b4d..7a113f1a4c58d524fc4d27db524e287bc6974a05 100644 (file)
@@ -31,3 +31,16 @@ entry:
   store float %0, float addrspace(1)* %out
   ret void
 }
+
+; Make sure inline literals are folded into REG_SEQUENCE instructions.
+; CHECK: @inline_literal_reg_sequence
+; CHECK: MOV T[[GPR:[0-9]]].X, 0.0
+; CHECK-NEXT: MOV T[[GPR]].Y, 0.0
+; CHECK-NEXT: MOV T[[GPR]].Z, 0.0
+; CHECK-NEXT: MOV * T[[GPR]].W, 0.0
+
+define void @inline_literal_reg_sequence(<4 x i32> addrspace(1)* %out) {
+entry:
+  store <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> addrspace(1)* %out
+  ret void
+}