From da754b50159075802dc8ecf6e799468bcd8f6424 Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Thu, 19 Nov 2015 16:37:10 +0000 Subject: [PATCH] [CGP] despeculate expensive cttz/ctlz intrinsics This is another step towards allowing SimplifyCFG to speculate harder, but then have CGP clean things up if the target doesn't like it. Previous patches in this series: http://reviews.llvm.org/D12882 http://reviews.llvm.org/D13297 D13297 should catch most expensive ops, but speculation of cttz/ctlz requires special handling because of weirdness in the intrinsic definition for handling a zero input (that definition can probably be blamed on x86). For example, if we have the usual speculated-by-select expensive op pattern like this: %tobool = icmp eq i64 %A, 0 %0 = tail call i64 @llvm.cttz.i64(i64 %A, i1 true) ; is_zero_undef == true %cond = select i1 %tobool, i64 64, i64 %0 ret i64 %cond There's an instcombine that will turn it into: %0 = tail call i64 @llvm.cttz.i64(i64 %A, i1 false) ; is_zero_undef == false This CGP patch is looking for that case and despeculating it back into: entry: %tobool = icmp eq i64 %A, 0 br i1 %tobool, label %cond.end, label %cond.true cond.true: %0 = tail call i64 @llvm.cttz.i64(i64 %A, i1 true) ; is_zero_undef == true br label %cond.end cond.end: %cond = phi i64 [ %0, %cond.true ], [ 64, %entry ] ret i64 %cond This unfortunately may lead to poorer codegen (see the changes in the existing x86 test), but if we increase speculation in SimplifyCFG (the next step in this patch series), then we should avoid those kinds of cases in the first place. The need for this patch was originally mentioned here: http://reviews.llvm.org/D7506 with follow-up here: http://reviews.llvm.org/D7554 Differential Revision: http://reviews.llvm.org/D14630 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@253573 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/CodeGenPrepare.cpp | 84 ++++++++++++++++++++++++++++++++++ test/CodeGen/X86/clz.ll | 55 ++++++++++++++-------- 2 files changed, 121 insertions(+), 18 deletions(-) diff --git a/lib/CodeGen/CodeGenPrepare.cpp b/lib/CodeGen/CodeGenPrepare.cpp index de5c68f7767..894cc814a01 100644 --- a/lib/CodeGen/CodeGenPrepare.cpp +++ b/lib/CodeGen/CodeGenPrepare.cpp @@ -1606,6 +1606,85 @@ static void ScalarizeMaskedScatter(CallInst *CI) { CI->eraseFromParent(); } +/// If counting leading or trailing zeros is an expensive operation and a zero +/// input is defined, add a check for zero to avoid calling the intrinsic. +/// +/// We want to transform: +/// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false) +/// +/// into: +/// entry: +/// %cmpz = icmp eq i64 %A, 0 +/// br i1 %cmpz, label %cond.end, label %cond.false +/// cond.false: +/// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true) +/// br label %cond.end +/// cond.end: +/// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ] +/// +/// If the transform is performed, return true and set ModifiedDT to true. +static bool despeculateCountZeros(IntrinsicInst *CountZeros, + const TargetLowering *TLI, + const DataLayout *DL, + bool &ModifiedDT) { + if (!TLI || !DL) + return false; + + // If a zero input is undefined, it doesn't make sense to despeculate that. + if (match(CountZeros->getOperand(1), m_One())) + return false; + + // If it's cheap to speculate, there's nothing to do. + auto IntrinsicID = CountZeros->getIntrinsicID(); + if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz()) || + (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz())) + return false; + + // Only handle legal scalar cases. Anything else requires too much work. + Type *Ty = CountZeros->getType(); + unsigned SizeInBits = Ty->getPrimitiveSizeInBits(); + if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSize()) + return false; + + // The intrinsic will be sunk behind a compare against zero and branch. + BasicBlock *StartBlock = CountZeros->getParent(); + BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false"); + + // Create another block after the count zero intrinsic. A PHI will be added + // in this block to select the result of the intrinsic or the bit-width + // constant if the input to the intrinsic is zero. + BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros)); + BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end"); + + // Set up a builder to create a compare, conditional branch, and PHI. + IRBuilder<> Builder(CountZeros->getContext()); + Builder.SetInsertPoint(StartBlock->getTerminator()); + Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc()); + + // Replace the unconditional branch that was created by the first split with + // a compare against zero and a conditional branch. + Value *Zero = Constant::getNullValue(Ty); + Value *Cmp = Builder.CreateICmpEQ(CountZeros->getOperand(0), Zero, "cmpz"); + Builder.CreateCondBr(Cmp, EndBlock, CallBlock); + StartBlock->getTerminator()->eraseFromParent(); + + // Create a PHI in the end block to select either the output of the intrinsic + // or the bit width of the operand. + Builder.SetInsertPoint(&EndBlock->front()); + PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz"); + CountZeros->replaceAllUsesWith(PN); + Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits)); + PN->addIncoming(BitWidth, StartBlock); + PN->addIncoming(CountZeros, CallBlock); + + // We are explicitly handling the zero case, so we can set the intrinsic's + // undefined zero argument to 'true'. This will also prevent reprocessing the + // intrinsic; we only despeculate when a zero input is defined. + CountZeros->setArgOperand(1, Builder.getTrue()); + ModifiedDT = true; + return true; +} + bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool& ModifiedDT) { BasicBlock *BB = CI->getParent(); @@ -1746,6 +1825,11 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool& ModifiedDT) { II->replaceAllUsesWith(II->getArgOperand(0)); II->eraseFromParent(); return true; + + case Intrinsic::cttz: + case Intrinsic::ctlz: + // If counting zeros is expensive, try to avoid it. + return despeculateCountZeros(II, TLI, DL, ModifiedDT); } if (TLI) { diff --git a/test/CodeGen/X86/clz.ll b/test/CodeGen/X86/clz.ll index e50d7ec437c..4a094480c93 100644 --- a/test/CodeGen/X86/clz.ll +++ b/test/CodeGen/X86/clz.ll @@ -87,55 +87,74 @@ define i64 @ctlz_i64(i64 %x) { ret i64 %tmp } -define i32 @ctlz_i32_cmov(i32 %n) { -; CHECK-LABEL: ctlz_i32_cmov: +define i32 @ctlz_i32_zero_test(i32 %n) { +; Generate a test and branch to handle zero inputs because bsr/bsf are very slow. + +; CHECK-LABEL: ctlz_i32_zero_test: ; CHECK: # BB#0: -; CHECK-NEXT: bsrl %edi, %ecx -; CHECK-NEXT: movl $63, %eax -; CHECK-NEXT: cmovnel %ecx, %eax +; CHECK-NEXT: movl $32, %eax +; CHECK-NEXT: testl %edi, %edi +; CHECK-NEXT: je .LBB8_2 +; CHECK-NEXT: # BB#1: # %cond.false +; CHECK-NEXT: bsrl %edi, %eax ; CHECK-NEXT: xorl $31, %eax +; CHECK-NEXT: .LBB8_2: # %cond.end ; CHECK-NEXT: retq -; Generate a cmov to handle zero inputs when necessary. %tmp1 = call i32 @llvm.ctlz.i32(i32 %n, i1 false) ret i32 %tmp1 } define i32 @ctlz_i32_fold_cmov(i32 %n) { +; Don't generate the cmovne when the source is known non-zero (and bsr would +; not set ZF). +; rdar://9490949 +; FIXME: The compare and branch are produced late in IR (by CodeGenPrepare), and +; codegen doesn't know how to delete the movl and je. + ; CHECK-LABEL: ctlz_i32_fold_cmov: ; CHECK: # BB#0: ; CHECK-NEXT: orl $1, %edi +; CHECK-NEXT: movl $32, %eax +; CHECK-NEXT: je .LBB9_2 +; CHECK-NEXT: # BB#1: # %cond.false ; CHECK-NEXT: bsrl %edi, %eax ; CHECK-NEXT: xorl $31, %eax +; CHECK-NEXT: .LBB9_2: # %cond.end ; CHECK-NEXT: retq -; Don't generate the cmovne when the source is known non-zero (and bsr would -; not set ZF). -; rdar://9490949 %or = or i32 %n, 1 %tmp1 = call i32 @llvm.ctlz.i32(i32 %or, i1 false) ret i32 %tmp1 } define i32 @ctlz_bsr(i32 %n) { +; Don't generate any xors when a 'ctlz' intrinsic is actually used to compute +; the most significant bit, which is what 'bsr' does natively. + ; CHECK-LABEL: ctlz_bsr: ; CHECK: # BB#0: ; CHECK-NEXT: bsrl %edi, %eax ; CHECK-NEXT: retq -; Don't generate any xors when a 'ctlz' intrinsic is actually used to compute -; the most significant bit, which is what 'bsr' does natively. %ctlz = call i32 @llvm.ctlz.i32(i32 %n, i1 true) %bsr = xor i32 %ctlz, 31 ret i32 %bsr } -define i32 @ctlz_bsr_cmov(i32 %n) { -; CHECK-LABEL: ctlz_bsr_cmov: +define i32 @ctlz_bsr_zero_test(i32 %n) { +; Generate a test and branch to handle zero inputs because bsr/bsf are very slow. +; FIXME: The compare and branch are produced late in IR (by CodeGenPrepare), and +; codegen doesn't know how to combine the $32 and $31 into $63. + +; CHECK-LABEL: ctlz_bsr_zero_test: ; CHECK: # BB#0: -; CHECK-NEXT: bsrl %edi, %ecx -; CHECK-NEXT: movl $63, %eax -; CHECK-NEXT: cmovnel %ecx, %eax +; CHECK-NEXT: movl $32, %eax +; CHECK-NEXT: testl %edi, %edi +; CHECK-NEXT: je .LBB11_2 +; CHECK-NEXT: # BB#1: # %cond.false +; CHECK-NEXT: bsrl %edi, %eax +; CHECK-NEXT: xorl $31, %eax +; CHECK-NEXT: .LBB11_2: # %cond.end +; CHECK-NEXT: xorl $31, %eax ; CHECK-NEXT: retq -; Same as ctlz_bsr, but ensure this happens even when there is a potential -; zero. %ctlz = call i32 @llvm.ctlz.i32(i32 %n, i1 false) %bsr = xor i32 %ctlz, 31 ret i32 %bsr -- 2.34.1