1 //===- InstCombinePHI.cpp -------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visitPHINode function.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/Analysis/InstructionSimplify.h"
20 #define DEBUG_TYPE "instcombine"
22 /// If we have something like phi [add (a,b), add(a,c)] and if a/b/c and the
23 /// adds all have a single use, turn this into a phi and a single binop.
24 Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) {
25 Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
26 assert(isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst));
27 unsigned Opc = FirstInst->getOpcode();
28 Value *LHSVal = FirstInst->getOperand(0);
29 Value *RHSVal = FirstInst->getOperand(1);
31 Type *LHSType = LHSVal->getType();
32 Type *RHSType = RHSVal->getType();
34 bool isNUW = false, isNSW = false, isExact = false;
35 if (OverflowingBinaryOperator *BO =
36 dyn_cast<OverflowingBinaryOperator>(FirstInst)) {
37 isNUW = BO->hasNoUnsignedWrap();
38 isNSW = BO->hasNoSignedWrap();
39 } else if (PossiblyExactOperator *PEO =
40 dyn_cast<PossiblyExactOperator>(FirstInst))
41 isExact = PEO->isExact();
43 // Scan to see if all operands are the same opcode, and all have one use.
44 for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
45 Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i));
46 if (!I || I->getOpcode() != Opc || !I->hasOneUse() ||
47 // Verify type of the LHS matches so we don't fold cmp's of different
49 I->getOperand(0)->getType() != LHSType ||
50 I->getOperand(1)->getType() != RHSType)
53 // If they are CmpInst instructions, check their predicates
54 if (CmpInst *CI = dyn_cast<CmpInst>(I))
55 if (CI->getPredicate() != cast<CmpInst>(FirstInst)->getPredicate())
59 isNUW = cast<OverflowingBinaryOperator>(I)->hasNoUnsignedWrap();
61 isNSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
63 isExact = cast<PossiblyExactOperator>(I)->isExact();
65 // Keep track of which operand needs a phi node.
66 if (I->getOperand(0) != LHSVal) LHSVal = nullptr;
67 if (I->getOperand(1) != RHSVal) RHSVal = nullptr;
70 // If both LHS and RHS would need a PHI, don't do this transformation,
71 // because it would increase the number of PHIs entering the block,
72 // which leads to higher register pressure. This is especially
73 // bad when the PHIs are in the header of a loop.
74 if (!LHSVal && !RHSVal)
77 // Otherwise, this is safe to transform!
79 Value *InLHS = FirstInst->getOperand(0);
80 Value *InRHS = FirstInst->getOperand(1);
81 PHINode *NewLHS = nullptr, *NewRHS = nullptr;
83 NewLHS = PHINode::Create(LHSType, PN.getNumIncomingValues(),
84 FirstInst->getOperand(0)->getName() + ".pn");
85 NewLHS->addIncoming(InLHS, PN.getIncomingBlock(0));
86 InsertNewInstBefore(NewLHS, PN);
91 NewRHS = PHINode::Create(RHSType, PN.getNumIncomingValues(),
92 FirstInst->getOperand(1)->getName() + ".pn");
93 NewRHS->addIncoming(InRHS, PN.getIncomingBlock(0));
94 InsertNewInstBefore(NewRHS, PN);
98 // Add all operands to the new PHIs.
99 if (NewLHS || NewRHS) {
100 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
101 Instruction *InInst = cast<Instruction>(PN.getIncomingValue(i));
103 Value *NewInLHS = InInst->getOperand(0);
104 NewLHS->addIncoming(NewInLHS, PN.getIncomingBlock(i));
107 Value *NewInRHS = InInst->getOperand(1);
108 NewRHS->addIncoming(NewInRHS, PN.getIncomingBlock(i));
113 if (CmpInst *CIOp = dyn_cast<CmpInst>(FirstInst)) {
114 CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
116 NewCI->setDebugLoc(FirstInst->getDebugLoc());
120 BinaryOperator *BinOp = cast<BinaryOperator>(FirstInst);
121 BinaryOperator *NewBinOp =
122 BinaryOperator::Create(BinOp->getOpcode(), LHSVal, RHSVal);
123 if (isNUW) NewBinOp->setHasNoUnsignedWrap();
124 if (isNSW) NewBinOp->setHasNoSignedWrap();
125 if (isExact) NewBinOp->setIsExact();
126 NewBinOp->setDebugLoc(FirstInst->getDebugLoc());
130 Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) {
131 GetElementPtrInst *FirstInst =cast<GetElementPtrInst>(PN.getIncomingValue(0));
133 SmallVector<Value*, 16> FixedOperands(FirstInst->op_begin(),
134 FirstInst->op_end());
135 // This is true if all GEP bases are allocas and if all indices into them are
137 bool AllBasePointersAreAllocas = true;
139 // We don't want to replace this phi if the replacement would require
140 // more than one phi, which leads to higher register pressure. This is
141 // especially bad when the PHIs are in the header of a loop.
142 bool NeededPhi = false;
144 bool AllInBounds = true;
146 // Scan to see if all operands are the same opcode, and all have one use.
147 for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
148 GetElementPtrInst *GEP= dyn_cast<GetElementPtrInst>(PN.getIncomingValue(i));
149 if (!GEP || !GEP->hasOneUse() || GEP->getType() != FirstInst->getType() ||
150 GEP->getNumOperands() != FirstInst->getNumOperands())
153 AllInBounds &= GEP->isInBounds();
155 // Keep track of whether or not all GEPs are of alloca pointers.
156 if (AllBasePointersAreAllocas &&
157 (!isa<AllocaInst>(GEP->getOperand(0)) ||
158 !GEP->hasAllConstantIndices()))
159 AllBasePointersAreAllocas = false;
161 // Compare the operand lists.
162 for (unsigned op = 0, e = FirstInst->getNumOperands(); op != e; ++op) {
163 if (FirstInst->getOperand(op) == GEP->getOperand(op))
166 // Don't merge two GEPs when two operands differ (introducing phi nodes)
167 // if one of the PHIs has a constant for the index. The index may be
168 // substantially cheaper to compute for the constants, so making it a
169 // variable index could pessimize the path. This also handles the case
170 // for struct indices, which must always be constant.
171 if (isa<ConstantInt>(FirstInst->getOperand(op)) ||
172 isa<ConstantInt>(GEP->getOperand(op)))
175 if (FirstInst->getOperand(op)->getType() !=GEP->getOperand(op)->getType())
178 // If we already needed a PHI for an earlier operand, and another operand
179 // also requires a PHI, we'd be introducing more PHIs than we're
180 // eliminating, which increases register pressure on entry to the PHI's
185 FixedOperands[op] = nullptr; // Needs a PHI.
190 // If all of the base pointers of the PHI'd GEPs are from allocas, don't
191 // bother doing this transformation. At best, this will just save a bit of
192 // offset calculation, but all the predecessors will have to materialize the
193 // stack address into a register anyway. We'd actually rather *clone* the
194 // load up into the predecessors so that we have a load of a gep of an alloca,
195 // which can usually all be folded into the load.
196 if (AllBasePointersAreAllocas)
199 // Otherwise, this is safe to transform. Insert PHI nodes for each operand
201 SmallVector<PHINode*, 16> OperandPhis(FixedOperands.size());
203 bool HasAnyPHIs = false;
204 for (unsigned i = 0, e = FixedOperands.size(); i != e; ++i) {
205 if (FixedOperands[i]) continue; // operand doesn't need a phi.
206 Value *FirstOp = FirstInst->getOperand(i);
207 PHINode *NewPN = PHINode::Create(FirstOp->getType(), e,
208 FirstOp->getName()+".pn");
209 InsertNewInstBefore(NewPN, PN);
211 NewPN->addIncoming(FirstOp, PN.getIncomingBlock(0));
212 OperandPhis[i] = NewPN;
213 FixedOperands[i] = NewPN;
218 // Add all operands to the new PHIs.
220 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
221 GetElementPtrInst *InGEP =cast<GetElementPtrInst>(PN.getIncomingValue(i));
222 BasicBlock *InBB = PN.getIncomingBlock(i);
224 for (unsigned op = 0, e = OperandPhis.size(); op != e; ++op)
225 if (PHINode *OpPhi = OperandPhis[op])
226 OpPhi->addIncoming(InGEP->getOperand(op), InBB);
230 Value *Base = FixedOperands[0];
231 GetElementPtrInst *NewGEP =
232 GetElementPtrInst::Create(FirstInst->getSourceElementType(), Base,
233 makeArrayRef(FixedOperands).slice(1));
234 if (AllInBounds) NewGEP->setIsInBounds();
235 NewGEP->setDebugLoc(FirstInst->getDebugLoc());
240 /// Return true if we know that it is safe to sink the load out of the block
241 /// that defines it. This means that it must be obvious the value of the load is
242 /// not changed from the point of the load to the end of the block it is in.
244 /// Finally, it is safe, but not profitable, to sink a load targeting a
245 /// non-address-taken alloca. Doing so will cause us to not promote the alloca
247 static bool isSafeAndProfitableToSinkLoad(LoadInst *L) {
248 BasicBlock::iterator BBI = L, E = L->getParent()->end();
250 for (++BBI; BBI != E; ++BBI)
251 if (BBI->mayWriteToMemory())
254 // Check for non-address taken alloca. If not address-taken already, it isn't
255 // profitable to do this xform.
256 if (AllocaInst *AI = dyn_cast<AllocaInst>(L->getOperand(0))) {
257 bool isAddressTaken = false;
258 for (User *U : AI->users()) {
259 if (isa<LoadInst>(U)) continue;
260 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
261 // If storing TO the alloca, then the address isn't taken.
262 if (SI->getOperand(1) == AI) continue;
264 isAddressTaken = true;
268 if (!isAddressTaken && AI->isStaticAlloca())
272 // If this load is a load from a GEP with a constant offset from an alloca,
273 // then we don't want to sink it. In its present form, it will be
274 // load [constant stack offset]. Sinking it will cause us to have to
275 // materialize the stack addresses in each predecessor in a register only to
276 // do a shared load from register in the successor.
277 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(L->getOperand(0)))
278 if (AllocaInst *AI = dyn_cast<AllocaInst>(GEP->getOperand(0)))
279 if (AI->isStaticAlloca() && GEP->hasAllConstantIndices())
285 Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) {
286 LoadInst *FirstLI = cast<LoadInst>(PN.getIncomingValue(0));
288 // FIXME: This is overconservative; this transform is allowed in some cases
289 // for atomic operations.
290 if (FirstLI->isAtomic())
293 // When processing loads, we need to propagate two bits of information to the
294 // sunk load: whether it is volatile, and what its alignment is. We currently
295 // don't sink loads when some have their alignment specified and some don't.
296 // visitLoadInst will propagate an alignment onto the load when TD is around,
297 // and if TD isn't around, we can't handle the mixed case.
298 bool isVolatile = FirstLI->isVolatile();
299 unsigned LoadAlignment = FirstLI->getAlignment();
300 unsigned LoadAddrSpace = FirstLI->getPointerAddressSpace();
302 // We can't sink the load if the loaded value could be modified between the
304 if (FirstLI->getParent() != PN.getIncomingBlock(0) ||
305 !isSafeAndProfitableToSinkLoad(FirstLI))
308 // If the PHI is of volatile loads and the load block has multiple
309 // successors, sinking it would remove a load of the volatile value from
310 // the path through the other successor.
312 FirstLI->getParent()->getTerminator()->getNumSuccessors() != 1)
315 // Check to see if all arguments are the same operation.
316 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
317 LoadInst *LI = dyn_cast<LoadInst>(PN.getIncomingValue(i));
318 if (!LI || !LI->hasOneUse())
321 // We can't sink the load if the loaded value could be modified between
322 // the load and the PHI.
323 if (LI->isVolatile() != isVolatile ||
324 LI->getParent() != PN.getIncomingBlock(i) ||
325 LI->getPointerAddressSpace() != LoadAddrSpace ||
326 !isSafeAndProfitableToSinkLoad(LI))
329 // If some of the loads have an alignment specified but not all of them,
330 // we can't do the transformation.
331 if ((LoadAlignment != 0) != (LI->getAlignment() != 0))
334 LoadAlignment = std::min(LoadAlignment, LI->getAlignment());
336 // If the PHI is of volatile loads and the load block has multiple
337 // successors, sinking it would remove a load of the volatile value from
338 // the path through the other successor.
340 LI->getParent()->getTerminator()->getNumSuccessors() != 1)
344 // Okay, they are all the same operation. Create a new PHI node of the
345 // correct type, and PHI together all of the LHS's of the instructions.
346 PHINode *NewPN = PHINode::Create(FirstLI->getOperand(0)->getType(),
347 PN.getNumIncomingValues(),
350 Value *InVal = FirstLI->getOperand(0);
351 NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
353 // Add all operands to the new PHI.
354 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
355 Value *NewInVal = cast<LoadInst>(PN.getIncomingValue(i))->getOperand(0);
356 if (NewInVal != InVal)
358 NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
363 // The new PHI unions all of the same values together. This is really
364 // common, so we handle it intelligently here for compile-time speed.
368 InsertNewInstBefore(NewPN, PN);
372 // If this was a volatile load that we are merging, make sure to loop through
373 // and mark all the input loads as non-volatile. If we don't do this, we will
374 // insert a new volatile load and the old ones will not be deletable.
376 for (Value *IncValue : PN.incoming_values())
377 cast<LoadInst>(IncValue)->setVolatile(false);
379 LoadInst *NewLI = new LoadInst(PhiVal, "", isVolatile, LoadAlignment);
380 NewLI->setDebugLoc(FirstLI->getDebugLoc());
386 /// If all operands to a PHI node are the same "unary" operator and they all are
387 /// only used by the PHI, PHI together their inputs, and do the operation once,
388 /// to the result of the PHI.
389 Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
390 Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
392 if (isa<GetElementPtrInst>(FirstInst))
393 return FoldPHIArgGEPIntoPHI(PN);
394 if (isa<LoadInst>(FirstInst))
395 return FoldPHIArgLoadIntoPHI(PN);
397 // Scan the instruction, looking for input operations that can be folded away.
398 // If all input operands to the phi are the same instruction (e.g. a cast from
399 // the same type or "+42") we can pull the operation through the PHI, reducing
400 // code size and simplifying code.
401 Constant *ConstantOp = nullptr;
402 Type *CastSrcTy = nullptr;
403 bool isNUW = false, isNSW = false, isExact = false;
405 if (isa<CastInst>(FirstInst)) {
406 CastSrcTy = FirstInst->getOperand(0)->getType();
408 // Be careful about transforming integer PHIs. We don't want to pessimize
409 // the code by turning an i32 into an i1293.
410 if (PN.getType()->isIntegerTy() && CastSrcTy->isIntegerTy()) {
411 if (!ShouldChangeType(PN.getType(), CastSrcTy))
414 } else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) {
415 // Can fold binop, compare or shift here if the RHS is a constant,
416 // otherwise call FoldPHIArgBinOpIntoPHI.
417 ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1));
419 return FoldPHIArgBinOpIntoPHI(PN);
421 if (OverflowingBinaryOperator *BO =
422 dyn_cast<OverflowingBinaryOperator>(FirstInst)) {
423 isNUW = BO->hasNoUnsignedWrap();
424 isNSW = BO->hasNoSignedWrap();
425 } else if (PossiblyExactOperator *PEO =
426 dyn_cast<PossiblyExactOperator>(FirstInst))
427 isExact = PEO->isExact();
429 return nullptr; // Cannot fold this operation.
432 // Check to see if all arguments are the same operation.
433 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
434 Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i));
435 if (!I || !I->hasOneUse() || !I->isSameOperationAs(FirstInst))
438 if (I->getOperand(0)->getType() != CastSrcTy)
439 return nullptr; // Cast operation must match.
440 } else if (I->getOperand(1) != ConstantOp) {
445 isNUW = cast<OverflowingBinaryOperator>(I)->hasNoUnsignedWrap();
447 isNSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
449 isExact = cast<PossiblyExactOperator>(I)->isExact();
452 // Okay, they are all the same operation. Create a new PHI node of the
453 // correct type, and PHI together all of the LHS's of the instructions.
454 PHINode *NewPN = PHINode::Create(FirstInst->getOperand(0)->getType(),
455 PN.getNumIncomingValues(),
458 Value *InVal = FirstInst->getOperand(0);
459 NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
461 // Add all operands to the new PHI.
462 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
463 Value *NewInVal = cast<Instruction>(PN.getIncomingValue(i))->getOperand(0);
464 if (NewInVal != InVal)
466 NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
471 // The new PHI unions all of the same values together. This is really
472 // common, so we handle it intelligently here for compile-time speed.
476 InsertNewInstBefore(NewPN, PN);
480 // Insert and return the new operation.
481 if (CastInst *FirstCI = dyn_cast<CastInst>(FirstInst)) {
482 CastInst *NewCI = CastInst::Create(FirstCI->getOpcode(), PhiVal,
484 NewCI->setDebugLoc(FirstInst->getDebugLoc());
488 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst)) {
489 BinOp = BinaryOperator::Create(BinOp->getOpcode(), PhiVal, ConstantOp);
490 if (isNUW) BinOp->setHasNoUnsignedWrap();
491 if (isNSW) BinOp->setHasNoSignedWrap();
492 if (isExact) BinOp->setIsExact();
493 BinOp->setDebugLoc(FirstInst->getDebugLoc());
497 CmpInst *CIOp = cast<CmpInst>(FirstInst);
498 CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
500 NewCI->setDebugLoc(FirstInst->getDebugLoc());
504 /// Return true if this PHI node is only used by a PHI node cycle that is dead.
505 static bool DeadPHICycle(PHINode *PN,
506 SmallPtrSetImpl<PHINode*> &PotentiallyDeadPHIs) {
507 if (PN->use_empty()) return true;
508 if (!PN->hasOneUse()) return false;
510 // Remember this node, and if we find the cycle, return.
511 if (!PotentiallyDeadPHIs.insert(PN).second)
514 // Don't scan crazily complex things.
515 if (PotentiallyDeadPHIs.size() == 16)
518 if (PHINode *PU = dyn_cast<PHINode>(PN->user_back()))
519 return DeadPHICycle(PU, PotentiallyDeadPHIs);
524 /// Return true if this phi node is always equal to NonPhiInVal.
525 /// This happens with mutually cyclic phi nodes like:
526 /// z = some value; x = phi (y, z); y = phi (x, z)
527 static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal,
528 SmallPtrSetImpl<PHINode*> &ValueEqualPHIs) {
529 // See if we already saw this PHI node.
530 if (!ValueEqualPHIs.insert(PN).second)
533 // Don't scan crazily complex things.
534 if (ValueEqualPHIs.size() == 16)
537 // Scan the operands to see if they are either phi nodes or are equal to
539 for (Value *Op : PN->incoming_values()) {
540 if (PHINode *OpPN = dyn_cast<PHINode>(Op)) {
541 if (!PHIsEqualValue(OpPN, NonPhiInVal, ValueEqualPHIs))
543 } else if (Op != NonPhiInVal)
552 struct PHIUsageRecord {
553 unsigned PHIId; // The ID # of the PHI (something determinstic to sort on)
554 unsigned Shift; // The amount shifted.
555 Instruction *Inst; // The trunc instruction.
557 PHIUsageRecord(unsigned pn, unsigned Sh, Instruction *User)
558 : PHIId(pn), Shift(Sh), Inst(User) {}
560 bool operator<(const PHIUsageRecord &RHS) const {
561 if (PHIId < RHS.PHIId) return true;
562 if (PHIId > RHS.PHIId) return false;
563 if (Shift < RHS.Shift) return true;
564 if (Shift > RHS.Shift) return false;
565 return Inst->getType()->getPrimitiveSizeInBits() <
566 RHS.Inst->getType()->getPrimitiveSizeInBits();
570 struct LoweredPHIRecord {
571 PHINode *PN; // The PHI that was lowered.
572 unsigned Shift; // The amount shifted.
573 unsigned Width; // The width extracted.
575 LoweredPHIRecord(PHINode *pn, unsigned Sh, Type *Ty)
576 : PN(pn), Shift(Sh), Width(Ty->getPrimitiveSizeInBits()) {}
578 // Ctor form used by DenseMap.
579 LoweredPHIRecord(PHINode *pn, unsigned Sh)
580 : PN(pn), Shift(Sh), Width(0) {}
586 struct DenseMapInfo<LoweredPHIRecord> {
587 static inline LoweredPHIRecord getEmptyKey() {
588 return LoweredPHIRecord(nullptr, 0);
590 static inline LoweredPHIRecord getTombstoneKey() {
591 return LoweredPHIRecord(nullptr, 1);
593 static unsigned getHashValue(const LoweredPHIRecord &Val) {
594 return DenseMapInfo<PHINode*>::getHashValue(Val.PN) ^ (Val.Shift>>3) ^
597 static bool isEqual(const LoweredPHIRecord &LHS,
598 const LoweredPHIRecord &RHS) {
599 return LHS.PN == RHS.PN && LHS.Shift == RHS.Shift &&
600 LHS.Width == RHS.Width;
606 /// This is an integer PHI and we know that it has an illegal type: see if it is
607 /// only used by trunc or trunc(lshr) operations. If so, we split the PHI into
608 /// the various pieces being extracted. This sort of thing is introduced when
609 /// SROA promotes an aggregate to large integer values.
611 /// TODO: The user of the trunc may be an bitcast to float/double/vector or an
612 /// inttoptr. We should produce new PHIs in the right type.
614 Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
615 // PHIUsers - Keep track of all of the truncated values extracted from a set
616 // of PHIs, along with their offset. These are the things we want to rewrite.
617 SmallVector<PHIUsageRecord, 16> PHIUsers;
619 // PHIs are often mutually cyclic, so we keep track of a whole set of PHI
620 // nodes which are extracted from. PHIsToSlice is a set we use to avoid
621 // revisiting PHIs, PHIsInspected is a ordered list of PHIs that we need to
622 // check the uses of (to ensure they are all extracts).
623 SmallVector<PHINode*, 8> PHIsToSlice;
624 SmallPtrSet<PHINode*, 8> PHIsInspected;
626 PHIsToSlice.push_back(&FirstPhi);
627 PHIsInspected.insert(&FirstPhi);
629 for (unsigned PHIId = 0; PHIId != PHIsToSlice.size(); ++PHIId) {
630 PHINode *PN = PHIsToSlice[PHIId];
632 // Scan the input list of the PHI. If any input is an invoke, and if the
633 // input is defined in the predecessor, then we won't be split the critical
634 // edge which is required to insert a truncate. Because of this, we have to
636 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
637 InvokeInst *II = dyn_cast<InvokeInst>(PN->getIncomingValue(i));
639 if (II->getParent() != PN->getIncomingBlock(i))
642 // If we have a phi, and if it's directly in the predecessor, then we have
643 // a critical edge where we need to put the truncate. Since we can't
644 // split the edge in instcombine, we have to bail out.
648 for (User *U : PN->users()) {
649 Instruction *UserI = cast<Instruction>(U);
651 // If the user is a PHI, inspect its uses recursively.
652 if (PHINode *UserPN = dyn_cast<PHINode>(UserI)) {
653 if (PHIsInspected.insert(UserPN).second)
654 PHIsToSlice.push_back(UserPN);
658 // Truncates are always ok.
659 if (isa<TruncInst>(UserI)) {
660 PHIUsers.push_back(PHIUsageRecord(PHIId, 0, UserI));
664 // Otherwise it must be a lshr which can only be used by one trunc.
665 if (UserI->getOpcode() != Instruction::LShr ||
666 !UserI->hasOneUse() || !isa<TruncInst>(UserI->user_back()) ||
667 !isa<ConstantInt>(UserI->getOperand(1)))
670 unsigned Shift = cast<ConstantInt>(UserI->getOperand(1))->getZExtValue();
671 PHIUsers.push_back(PHIUsageRecord(PHIId, Shift, UserI->user_back()));
675 // If we have no users, they must be all self uses, just nuke the PHI.
676 if (PHIUsers.empty())
677 return ReplaceInstUsesWith(FirstPhi, UndefValue::get(FirstPhi.getType()));
679 // If this phi node is transformable, create new PHIs for all the pieces
680 // extracted out of it. First, sort the users by their offset and size.
681 array_pod_sort(PHIUsers.begin(), PHIUsers.end());
683 DEBUG(dbgs() << "SLICING UP PHI: " << FirstPhi << '\n';
684 for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i)
685 dbgs() << "AND USER PHI #" << i << ": " << *PHIsToSlice[i] << '\n';
688 // PredValues - This is a temporary used when rewriting PHI nodes. It is
689 // hoisted out here to avoid construction/destruction thrashing.
690 DenseMap<BasicBlock*, Value*> PredValues;
692 // ExtractedVals - Each new PHI we introduce is saved here so we don't
693 // introduce redundant PHIs.
694 DenseMap<LoweredPHIRecord, PHINode*> ExtractedVals;
696 for (unsigned UserI = 0, UserE = PHIUsers.size(); UserI != UserE; ++UserI) {
697 unsigned PHIId = PHIUsers[UserI].PHIId;
698 PHINode *PN = PHIsToSlice[PHIId];
699 unsigned Offset = PHIUsers[UserI].Shift;
700 Type *Ty = PHIUsers[UserI].Inst->getType();
704 // If we've already lowered a user like this, reuse the previously lowered
706 if ((EltPHI = ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)]) == nullptr) {
708 // Otherwise, Create the new PHI node for this user.
709 EltPHI = PHINode::Create(Ty, PN->getNumIncomingValues(),
710 PN->getName()+".off"+Twine(Offset), PN);
711 assert(EltPHI->getType() != PN->getType() &&
712 "Truncate didn't shrink phi?");
714 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
715 BasicBlock *Pred = PN->getIncomingBlock(i);
716 Value *&PredVal = PredValues[Pred];
718 // If we already have a value for this predecessor, reuse it.
720 EltPHI->addIncoming(PredVal, Pred);
724 // Handle the PHI self-reuse case.
725 Value *InVal = PN->getIncomingValue(i);
728 EltPHI->addIncoming(PredVal, Pred);
732 if (PHINode *InPHI = dyn_cast<PHINode>(PN)) {
733 // If the incoming value was a PHI, and if it was one of the PHIs we
734 // already rewrote it, just use the lowered value.
735 if (Value *Res = ExtractedVals[LoweredPHIRecord(InPHI, Offset, Ty)]) {
737 EltPHI->addIncoming(PredVal, Pred);
742 // Otherwise, do an extract in the predecessor.
743 Builder->SetInsertPoint(Pred, Pred->getTerminator());
746 Res = Builder->CreateLShr(Res, ConstantInt::get(InVal->getType(),
748 Res = Builder->CreateTrunc(Res, Ty, "extract.t");
750 EltPHI->addIncoming(Res, Pred);
752 // If the incoming value was a PHI, and if it was one of the PHIs we are
753 // rewriting, we will ultimately delete the code we inserted. This
754 // means we need to revisit that PHI to make sure we extract out the
756 if (PHINode *OldInVal = dyn_cast<PHINode>(PN->getIncomingValue(i)))
757 if (PHIsInspected.count(OldInVal)) {
758 unsigned RefPHIId = std::find(PHIsToSlice.begin(),PHIsToSlice.end(),
759 OldInVal)-PHIsToSlice.begin();
760 PHIUsers.push_back(PHIUsageRecord(RefPHIId, Offset,
761 cast<Instruction>(Res)));
767 DEBUG(dbgs() << " Made element PHI for offset " << Offset << ": "
769 ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)] = EltPHI;
772 // Replace the use of this piece with the PHI node.
773 ReplaceInstUsesWith(*PHIUsers[UserI].Inst, EltPHI);
776 // Replace all the remaining uses of the PHI nodes (self uses and the lshrs)
778 Value *Undef = UndefValue::get(FirstPhi.getType());
779 for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i)
780 ReplaceInstUsesWith(*PHIsToSlice[i], Undef);
781 return ReplaceInstUsesWith(FirstPhi, Undef);
784 // PHINode simplification
786 Instruction *InstCombiner::visitPHINode(PHINode &PN) {
787 if (Value *V = SimplifyInstruction(&PN, DL, TLI, DT, AC))
788 return ReplaceInstUsesWith(PN, V);
790 // If all PHI operands are the same operation, pull them through the PHI,
791 // reducing code size.
792 if (isa<Instruction>(PN.getIncomingValue(0)) &&
793 isa<Instruction>(PN.getIncomingValue(1)) &&
794 cast<Instruction>(PN.getIncomingValue(0))->getOpcode() ==
795 cast<Instruction>(PN.getIncomingValue(1))->getOpcode() &&
796 // FIXME: The hasOneUse check will fail for PHIs that use the value more
797 // than themselves more than once.
798 PN.getIncomingValue(0)->hasOneUse())
799 if (Instruction *Result = FoldPHIArgOpIntoPHI(PN))
802 // If this is a trivial cycle in the PHI node graph, remove it. Basically, if
803 // this PHI only has a single use (a PHI), and if that PHI only has one use (a
804 // PHI)... break the cycle.
805 if (PN.hasOneUse()) {
806 Instruction *PHIUser = cast<Instruction>(PN.user_back());
807 if (PHINode *PU = dyn_cast<PHINode>(PHIUser)) {
808 SmallPtrSet<PHINode*, 16> PotentiallyDeadPHIs;
809 PotentiallyDeadPHIs.insert(&PN);
810 if (DeadPHICycle(PU, PotentiallyDeadPHIs))
811 return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType()));
814 // If this phi has a single use, and if that use just computes a value for
815 // the next iteration of a loop, delete the phi. This occurs with unused
816 // induction variables, e.g. "for (int j = 0; ; ++j);". Detecting this
817 // common case here is good because the only other things that catch this
818 // are induction variable analysis (sometimes) and ADCE, which is only run
820 if (PHIUser->hasOneUse() &&
821 (isa<BinaryOperator>(PHIUser) || isa<GetElementPtrInst>(PHIUser)) &&
822 PHIUser->user_back() == &PN) {
823 return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType()));
827 // We sometimes end up with phi cycles that non-obviously end up being the
828 // same value, for example:
829 // z = some value; x = phi (y, z); y = phi (x, z)
830 // where the phi nodes don't necessarily need to be in the same block. Do a
831 // quick check to see if the PHI node only contains a single non-phi value, if
832 // so, scan to see if the phi cycle is actually equal to that value.
834 unsigned InValNo = 0, NumIncomingVals = PN.getNumIncomingValues();
835 // Scan for the first non-phi operand.
836 while (InValNo != NumIncomingVals &&
837 isa<PHINode>(PN.getIncomingValue(InValNo)))
840 if (InValNo != NumIncomingVals) {
841 Value *NonPhiInVal = PN.getIncomingValue(InValNo);
843 // Scan the rest of the operands to see if there are any conflicts, if so
844 // there is no need to recursively scan other phis.
845 for (++InValNo; InValNo != NumIncomingVals; ++InValNo) {
846 Value *OpVal = PN.getIncomingValue(InValNo);
847 if (OpVal != NonPhiInVal && !isa<PHINode>(OpVal))
851 // If we scanned over all operands, then we have one unique value plus
852 // phi values. Scan PHI nodes to see if they all merge in each other or
854 if (InValNo == NumIncomingVals) {
855 SmallPtrSet<PHINode*, 16> ValueEqualPHIs;
856 if (PHIsEqualValue(&PN, NonPhiInVal, ValueEqualPHIs))
857 return ReplaceInstUsesWith(PN, NonPhiInVal);
862 // If there are multiple PHIs, sort their operands so that they all list
863 // the blocks in the same order. This will help identical PHIs be eliminated
864 // by other passes. Other passes shouldn't depend on this for correctness
866 PHINode *FirstPN = cast<PHINode>(PN.getParent()->begin());
868 for (unsigned i = 0, e = FirstPN->getNumIncomingValues(); i != e; ++i) {
869 BasicBlock *BBA = PN.getIncomingBlock(i);
870 BasicBlock *BBB = FirstPN->getIncomingBlock(i);
872 Value *VA = PN.getIncomingValue(i);
873 unsigned j = PN.getBasicBlockIndex(BBB);
874 Value *VB = PN.getIncomingValue(j);
875 PN.setIncomingBlock(i, BBB);
876 PN.setIncomingValue(i, VB);
877 PN.setIncomingBlock(j, BBA);
878 PN.setIncomingValue(j, VA);
879 // NOTE: Instcombine normally would want us to "return &PN" if we
880 // modified any of the operands of an instruction. However, since we
881 // aren't adding or removing uses (just rearranging them) we don't do
882 // this in this case.
886 // If this is an integer PHI and we know that it has an illegal type, see if
887 // it is only used by trunc or trunc(lshr) operations. If so, we split the
888 // PHI into the various pieces being extracted. This sort of thing is
889 // introduced when SROA promotes an aggregate to a single large integer type.
890 if (PN.getType()->isIntegerTy() &&
891 !DL.isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))
892 if (Instruction *Res = SliceUpIllegalIntegerPHI(PN))