Revert the optimization in r122596. It is correct for all current targets, but
authorCameron Zwarich <zwarich@apple.com>
Tue, 28 Dec 2010 23:02:56 +0000 (23:02 +0000)
committerCameron Zwarich <zwarich@apple.com>
Tue, 28 Dec 2010 23:02:56 +0000 (23:02 +0000)
it relies on assumptions that may not be true in the future.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@122608 91177308-0d34-0410-b5e6-96231b3b80d8

lib/CodeGen/StrongPHIElimination.cpp

index f84fdbe5bc2c45990319825903d7f6018eb76e4e..5713e6aed29531248e27971eb9c742c74518d14f 100644 (file)
@@ -472,9 +472,16 @@ StrongPHIElimination::SplitInterferencesForBasicBlock(
   for (MachineBasicBlock::iterator BBI = MBB.begin(), BBE = MBB.end();
   BBI != BBE; ++BBI) {
     for (MachineInstr::const_mop_iterator I = BBI->operands_begin(),
-         E = BBI->operands_end(); I != E && I->isReg() && I->isDef(); ++I) {
+         E = BBI->operands_end(); I != E; ++I) {
       const MachineOperand& MO = *I;
 
+      // FIXME: This would be faster if it were possible to bail out of checking
+      // an instruction's operands after the explicit defs, but this is incorrect
+      // for variadic instructions, which may appear before register allocation
+      // in the future.
+      if (!MO.isReg() || !MO.isDef())
+        continue;
+
       unsigned DestReg = MO.getReg();
       if (!DestReg || !TargetRegisterInfo::isVirtualRegister(DestReg))
         continue;