1 //===-- SeparateConstOffsetFromGEP.cpp - ------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Loop unrolling may create many similar GEPs for array accesses.
11 // e.g., a 2-level loop
13 // float a[32][32]; // global variable
15 // for (int i = 0; i < 2; ++i) {
16 // for (int j = 0; j < 2; ++j) {
18 // ... = a[x + i][y + j];
23 // will probably be unrolled to:
25 // gep %a, 0, %x, %y; load
26 // gep %a, 0, %x, %y + 1; load
27 // gep %a, 0, %x + 1, %y; load
28 // gep %a, 0, %x + 1, %y + 1; load
30 // LLVM's GVN does not use partial redundancy elimination yet, and is thus
31 // unable to reuse (gep %a, 0, %x, %y). As a result, this misoptimization incurs
32 // significant slowdown in targets with limited addressing modes. For instance,
33 // because the PTX target does not support the reg+reg addressing mode, the
34 // NVPTX backend emits PTX code that literally computes the pointer address of
35 // each GEP, wasting tons of registers. It emits the following PTX for the
36 // first load and similar PTX for other loads.
40 // mul.wide.u32 %rl2, %r1, 128;
42 // add.s64 %rl4, %rl3, %rl2;
43 // mul.wide.u32 %rl5, %r2, 4;
44 // add.s64 %rl6, %rl4, %rl5;
45 // ld.global.f32 %f1, [%rl6];
47 // To reduce the register pressure, the optimization implemented in this file
48 // merges the common part of a group of GEPs, so we can compute each pointer
49 // address by adding a simple offset to the common part, saving many registers.
51 // It works by splitting each GEP into a variadic base and a constant offset.
52 // The variadic base can be computed once and reused by multiple GEPs, and the
53 // constant offsets can be nicely folded into the reg+immediate addressing mode
54 // (supported by most targets) without using any extra register.
56 // For instance, we transform the four GEPs and four loads in the above example
59 // base = gep a, 0, x, y
61 // laod base + 1 * sizeof(float)
62 // load base + 32 * sizeof(float)
63 // load base + 33 * sizeof(float)
65 // Given the transformed IR, a backend that supports the reg+immediate
66 // addressing mode can easily fold the pointer arithmetics into the loads. For
67 // example, the NVPTX backend can easily fold the pointer arithmetics into the
68 // ld.global.f32 instructions, and the resultant PTX uses much fewer registers.
70 // mov.u32 %r1, %tid.x;
71 // mov.u32 %r2, %tid.y;
72 // mul.wide.u32 %rl2, %r1, 128;
74 // add.s64 %rl4, %rl3, %rl2;
75 // mul.wide.u32 %rl5, %r2, 4;
76 // add.s64 %rl6, %rl4, %rl5;
77 // ld.global.f32 %f1, [%rl6]; // so far the same as unoptimized PTX
78 // ld.global.f32 %f2, [%rl6+4]; // much better
79 // ld.global.f32 %f3, [%rl6+128]; // much better
80 // ld.global.f32 %f4, [%rl6+132]; // much better
82 // Another improvement enabled by the LowerGEP flag is to lower a GEP with
83 // multiple indices to either multiple GEPs with a single index or arithmetic
84 // operations (depending on whether the target uses alias analysis in codegen).
85 // Such transformation can have following benefits:
86 // (1) It can always extract constants in the indices of structure type.
87 // (2) After such Lowering, there are more optimization opportunities such as
90 // E.g. The following GEPs have multiple indices:
92 // %p = getelementptr [10 x %struct]* %ptr, i64 %i, i64 %j1, i32 3
96 // %p2 = getelementptr [10 x %struct]* %ptr, i64 %i, i64 %j1, i32 2
100 // We can not do CSE for to the common part related to index "i64 %i". Lowering
101 // GEPs can achieve such goals.
102 // If the target does not use alias analysis in codegen, this pass will
103 // lower a GEP with multiple indices into arithmetic operations:
105 // %1 = ptrtoint [10 x %struct]* %ptr to i64 ; CSE opportunity
106 // %2 = mul i64 %i, length_of_10xstruct ; CSE opportunity
107 // %3 = add i64 %1, %2 ; CSE opportunity
108 // %4 = mul i64 %j1, length_of_struct
109 // %5 = add i64 %3, %4
110 // %6 = add i64 %3, struct_field_3 ; Constant offset
111 // %p = inttoptr i64 %6 to i32*
115 // %7 = ptrtoint [10 x %struct]* %ptr to i64 ; CSE opportunity
116 // %8 = mul i64 %i, length_of_10xstruct ; CSE opportunity
117 // %9 = add i64 %7, %8 ; CSE opportunity
118 // %10 = mul i64 %j2, length_of_struct
119 // %11 = add i64 %9, %10
120 // %12 = add i64 %11, struct_field_2 ; Constant offset
121 // %p = inttoptr i64 %12 to i32*
125 // If the target uses alias analysis in codegen, this pass will lower a GEP
126 // with multiple indices into multiple GEPs with a single index:
128 // %1 = bitcast [10 x %struct]* %ptr to i8* ; CSE opportunity
129 // %2 = mul i64 %i, length_of_10xstruct ; CSE opportunity
130 // %3 = getelementptr i8* %1, i64 %2 ; CSE opportunity
131 // %4 = mul i64 %j1, length_of_struct
132 // %5 = getelementptr i8* %3, i64 %4
133 // %6 = getelementptr i8* %5, struct_field_3 ; Constant offset
134 // %p = bitcast i8* %6 to i32*
138 // %7 = bitcast [10 x %struct]* %ptr to i8* ; CSE opportunity
139 // %8 = mul i64 %i, length_of_10xstruct ; CSE opportunity
140 // %9 = getelementptr i8* %7, i64 %8 ; CSE opportunity
141 // %10 = mul i64 %j2, length_of_struct
142 // %11 = getelementptr i8* %9, i64 %10
143 // %12 = getelementptr i8* %11, struct_field_2 ; Constant offset
144 // %p2 = bitcast i8* %12 to i32*
148 // Lowering GEPs can also benefit other passes such as LICM and CGP.
149 // LICM (Loop Invariant Code Motion) can not hoist/sink a GEP of multiple
150 // indices if one of the index is variant. If we lower such GEP into invariant
151 // parts and variant parts, LICM can hoist/sink those invariant parts.
152 // CGP (CodeGen Prepare) tries to sink address calculations that match the
153 // target's addressing modes. A GEP with multiple indices may not match and will
154 // not be sunk. If we lower such GEP into smaller parts, CGP may sink some of
155 // them. So we end up with a better addressing mode.
157 //===----------------------------------------------------------------------===//
159 #include "llvm/Analysis/TargetTransformInfo.h"
160 #include "llvm/Analysis/ValueTracking.h"
161 #include "llvm/IR/Constants.h"
162 #include "llvm/IR/DataLayout.h"
163 #include "llvm/IR/Dominators.h"
164 #include "llvm/IR/Instructions.h"
165 #include "llvm/IR/LLVMContext.h"
166 #include "llvm/IR/Module.h"
167 #include "llvm/IR/Operator.h"
168 #include "llvm/Support/CommandLine.h"
169 #include "llvm/Support/raw_ostream.h"
170 #include "llvm/Transforms/Scalar.h"
171 #include "llvm/Transforms/Utils/Local.h"
172 #include "llvm/Target/TargetMachine.h"
173 #include "llvm/Target/TargetSubtargetInfo.h"
174 #include "llvm/IR/IRBuilder.h"
176 using namespace llvm;
178 static cl::opt<bool> DisableSeparateConstOffsetFromGEP(
179 "disable-separate-const-offset-from-gep", cl::init(false),
180 cl::desc("Do not separate the constant offset from a GEP instruction"),
182 // Setting this flag may emit false positives when the input module already
183 // contains dead instructions. Therefore, we set it only in unit tests that are
184 // free of dead code.
186 VerifyNoDeadCode("reassociate-geps-verify-no-dead-code", cl::init(false),
187 cl::desc("Verify this pass produces no dead code"),
192 /// \brief A helper class for separating a constant offset from a GEP index.
194 /// In real programs, a GEP index may be more complicated than a simple addition
195 /// of something and a constant integer which can be trivially splitted. For
196 /// example, to split ((a << 3) | 5) + b, we need to search deeper for the
197 /// constant offset, so that we can separate the index to (a << 3) + b and 5.
199 /// Therefore, this class looks into the expression that computes a given GEP
200 /// index, and tries to find a constant integer that can be hoisted to the
201 /// outermost level of the expression as an addition. Not every constant in an
202 /// expression can jump out. e.g., we cannot transform (b * (a + 5)) to (b * a +
203 /// 5); nor can we transform (3 * (a + 5)) to (3 * a + 5), however in this case,
204 /// -instcombine probably already optimized (3 * (a + 5)) to (3 * a + 15).
205 class ConstantOffsetExtractor {
207 /// Extracts a constant offset from the given GEP index. It returns the
208 /// new index representing the remainder (equal to the original index minus
209 /// the constant offset), or nullptr if we cannot extract a constant offset.
210 /// \p Idx The given GEP index
211 /// \p GEP The given GEP
212 /// \p UserChainTail Outputs the tail of UserChain so that we can
213 /// garbage-collect unused instructions in UserChain.
214 static Value *Extract(Value *Idx, GetElementPtrInst *GEP,
215 User *&UserChainTail, const DominatorTree *DT);
216 /// Looks for a constant offset from the given GEP index without extracting
217 /// it. It returns the numeric value of the extracted constant offset (0 if
218 /// failed). The meaning of the arguments are the same as Extract.
219 static int64_t Find(Value *Idx, GetElementPtrInst *GEP,
220 const DominatorTree *DT);
223 ConstantOffsetExtractor(Instruction *InsertionPt, const DominatorTree *DT)
224 : IP(InsertionPt), DL(InsertionPt->getModule()->getDataLayout()), DT(DT) {
226 /// Searches the expression that computes V for a non-zero constant C s.t.
227 /// V can be reassociated into the form V' + C. If the searching is
228 /// successful, returns C and update UserChain as a def-use chain from C to V;
229 /// otherwise, UserChain is empty.
231 /// \p V The given expression
232 /// \p SignExtended Whether V will be sign-extended in the computation of the
234 /// \p ZeroExtended Whether V will be zero-extended in the computation of the
236 /// \p NonNegative Whether V is guaranteed to be non-negative. For example,
237 /// an index of an inbounds GEP is guaranteed to be
238 /// non-negative. Levaraging this, we can better split
240 APInt find(Value *V, bool SignExtended, bool ZeroExtended, bool NonNegative);
241 /// A helper function to look into both operands of a binary operator.
242 APInt findInEitherOperand(BinaryOperator *BO, bool SignExtended,
244 /// After finding the constant offset C from the GEP index I, we build a new
245 /// index I' s.t. I' + C = I. This function builds and returns the new
246 /// index I' according to UserChain produced by function "find".
248 /// The building conceptually takes two steps:
249 /// 1) iteratively distribute s/zext towards the leaves of the expression tree
251 /// 2) reassociate the expression tree to the form I' + C.
253 /// For example, to extract the 5 from sext(a + (b + 5)), we first distribute
254 /// sext to a, b and 5 so that we have
255 /// sext(a) + (sext(b) + 5).
256 /// Then, we reassociate it to
257 /// (sext(a) + sext(b)) + 5.
258 /// Given this form, we know I' is sext(a) + sext(b).
259 Value *rebuildWithoutConstOffset();
260 /// After the first step of rebuilding the GEP index without the constant
261 /// offset, distribute s/zext to the operands of all operators in UserChain.
262 /// e.g., zext(sext(a + (b + 5)) (assuming no overflow) =>
263 /// zext(sext(a)) + (zext(sext(b)) + zext(sext(5))).
265 /// The function also updates UserChain to point to new subexpressions after
266 /// distributing s/zext. e.g., the old UserChain of the above example is
267 /// 5 -> b + 5 -> a + (b + 5) -> sext(...) -> zext(sext(...)),
268 /// and the new UserChain is
269 /// zext(sext(5)) -> zext(sext(b)) + zext(sext(5)) ->
270 /// zext(sext(a)) + (zext(sext(b)) + zext(sext(5))
272 /// \p ChainIndex The index to UserChain. ChainIndex is initially
273 /// UserChain.size() - 1, and is decremented during
275 Value *distributeExtsAndCloneChain(unsigned ChainIndex);
276 /// Reassociates the GEP index to the form I' + C and returns I'.
277 Value *removeConstOffset(unsigned ChainIndex);
278 /// A helper function to apply ExtInsts, a list of s/zext, to value V.
279 /// e.g., if ExtInsts = [sext i32 to i64, zext i16 to i32], this function
280 /// returns "sext i32 (zext i16 V to i32) to i64".
281 Value *applyExts(Value *V);
283 /// A helper function that returns whether we can trace into the operands
284 /// of binary operator BO for a constant offset.
286 /// \p SignExtended Whether BO is surrounded by sext
287 /// \p ZeroExtended Whether BO is surrounded by zext
288 /// \p NonNegative Whether BO is known to be non-negative, e.g., an in-bound
290 bool CanTraceInto(bool SignExtended, bool ZeroExtended, BinaryOperator *BO,
293 /// The path from the constant offset to the old GEP index. e.g., if the GEP
294 /// index is "a * b + (c + 5)". After running function find, UserChain[0] will
295 /// be the constant 5, UserChain[1] will be the subexpression "c + 5", and
296 /// UserChain[2] will be the entire expression "a * b + (c + 5)".
298 /// This path helps to rebuild the new GEP index.
299 SmallVector<User *, 8> UserChain;
300 /// A data structure used in rebuildWithoutConstOffset. Contains all
301 /// sext/zext instructions along UserChain.
302 SmallVector<CastInst *, 16> ExtInsts;
303 Instruction *IP; /// Insertion position of cloned instructions.
304 const DataLayout &DL;
305 const DominatorTree *DT;
308 /// \brief A pass that tries to split every GEP in the function into a variadic
309 /// base and a constant offset. It is a FunctionPass because searching for the
310 /// constant offset may inspect other basic blocks.
311 class SeparateConstOffsetFromGEP : public FunctionPass {
314 SeparateConstOffsetFromGEP(const TargetMachine *TM = nullptr,
315 bool LowerGEP = false)
316 : FunctionPass(ID), DL(nullptr), DT(nullptr), TM(TM), LowerGEP(LowerGEP) {
317 initializeSeparateConstOffsetFromGEPPass(*PassRegistry::getPassRegistry());
320 void getAnalysisUsage(AnalysisUsage &AU) const override {
321 AU.addRequired<DominatorTreeWrapperPass>();
322 AU.addRequired<TargetTransformInfoWrapperPass>();
323 AU.setPreservesCFG();
326 bool doInitialization(Module &M) override {
327 DL = &M.getDataLayout();
330 bool runOnFunction(Function &F) override;
333 /// Tries to split the given GEP into a variadic base and a constant offset,
334 /// and returns true if the splitting succeeds.
335 bool splitGEP(GetElementPtrInst *GEP);
336 /// Lower a GEP with multiple indices into multiple GEPs with a single index.
337 /// Function splitGEP already split the original GEP into a variadic part and
338 /// a constant offset (i.e., AccumulativeByteOffset). This function lowers the
339 /// variadic part into a set of GEPs with a single index and applies
340 /// AccumulativeByteOffset to it.
341 /// \p Variadic The variadic part of the original GEP.
342 /// \p AccumulativeByteOffset The constant offset.
343 void lowerToSingleIndexGEPs(GetElementPtrInst *Variadic,
344 int64_t AccumulativeByteOffset);
345 /// Lower a GEP with multiple indices into ptrtoint+arithmetics+inttoptr form.
346 /// Function splitGEP already split the original GEP into a variadic part and
347 /// a constant offset (i.e., AccumulativeByteOffset). This function lowers the
348 /// variadic part into a set of arithmetic operations and applies
349 /// AccumulativeByteOffset to it.
350 /// \p Variadic The variadic part of the original GEP.
351 /// \p AccumulativeByteOffset The constant offset.
352 void lowerToArithmetics(GetElementPtrInst *Variadic,
353 int64_t AccumulativeByteOffset);
354 /// Finds the constant offset within each index and accumulates them. If
355 /// LowerGEP is true, it finds in indices of both sequential and structure
356 /// types, otherwise it only finds in sequential indices. The output
357 /// NeedsExtraction indicates whether we successfully find a non-zero constant
359 int64_t accumulateByteOffset(GetElementPtrInst *GEP, bool &NeedsExtraction);
360 /// Canonicalize array indices to pointer-size integers. This helps to
361 /// simplify the logic of splitting a GEP. For example, if a + b is a
362 /// pointer-size integer, we have
363 /// gep base, a + b = gep (gep base, a), b
364 /// However, this equality may not hold if the size of a + b is smaller than
365 /// the pointer size, because LLVM conceptually sign-extends GEP indices to
366 /// pointer size before computing the address
367 /// (http://llvm.org/docs/LangRef.html#id181).
369 /// This canonicalization is very likely already done in clang and
370 /// instcombine. Therefore, the program will probably remain the same.
372 /// Returns true if the module changes.
374 /// Verified in @i32_add in split-gep.ll
375 bool canonicalizeArrayIndicesToPointerSize(GetElementPtrInst *GEP);
376 /// Verify F is free of dead code.
377 void verifyNoDeadCode(Function &F);
379 const DataLayout *DL;
380 const DominatorTree *DT;
381 const TargetMachine *TM;
382 /// Whether to lower a GEP with multiple indices into arithmetic operations or
383 /// multiple GEPs with a single index.
386 } // anonymous namespace
388 char SeparateConstOffsetFromGEP::ID = 0;
389 INITIALIZE_PASS_BEGIN(
390 SeparateConstOffsetFromGEP, "separate-const-offset-from-gep",
391 "Split GEPs to a variadic base and a constant offset for better CSE", false,
393 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
394 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
396 SeparateConstOffsetFromGEP, "separate-const-offset-from-gep",
397 "Split GEPs to a variadic base and a constant offset for better CSE", false,
401 llvm::createSeparateConstOffsetFromGEPPass(const TargetMachine *TM,
403 return new SeparateConstOffsetFromGEP(TM, LowerGEP);
406 bool ConstantOffsetExtractor::CanTraceInto(bool SignExtended,
410 // We only consider ADD, SUB and OR, because a non-zero constant found in
411 // expressions composed of these operations can be easily hoisted as a
412 // constant offset by reassociation.
413 if (BO->getOpcode() != Instruction::Add &&
414 BO->getOpcode() != Instruction::Sub &&
415 BO->getOpcode() != Instruction::Or) {
419 Value *LHS = BO->getOperand(0), *RHS = BO->getOperand(1);
420 // Do not trace into "or" unless it is equivalent to "add". If LHS and RHS
421 // don't have common bits, (LHS | RHS) is equivalent to (LHS + RHS).
422 if (BO->getOpcode() == Instruction::Or &&
423 !haveNoCommonBitsSet(LHS, RHS, DL, nullptr, BO, DT))
426 // In addition, tracing into BO requires that its surrounding s/zext (if
427 // any) is distributable to both operands.
429 // Suppose BO = A op B.
430 // SignExtended | ZeroExtended | Distributable?
431 // --------------+--------------+----------------------------------
432 // 0 | 0 | true because no s/zext exists
433 // 0 | 1 | zext(BO) == zext(A) op zext(B)
434 // 1 | 0 | sext(BO) == sext(A) op sext(B)
435 // 1 | 1 | zext(sext(BO)) ==
436 // | | zext(sext(A)) op zext(sext(B))
437 if (BO->getOpcode() == Instruction::Add && !ZeroExtended && NonNegative) {
438 // If a + b >= 0 and (a >= 0 or b >= 0), then
439 // sext(a + b) = sext(a) + sext(b)
440 // even if the addition is not marked nsw.
442 // Leveraging this invarient, we can trace into an sext'ed inbound GEP
443 // index if the constant offset is non-negative.
445 // Verified in @sext_add in split-gep.ll.
446 if (ConstantInt *ConstLHS = dyn_cast<ConstantInt>(LHS)) {
447 if (!ConstLHS->isNegative())
450 if (ConstantInt *ConstRHS = dyn_cast<ConstantInt>(RHS)) {
451 if (!ConstRHS->isNegative())
456 // sext (add/sub nsw A, B) == add/sub nsw (sext A), (sext B)
457 // zext (add/sub nuw A, B) == add/sub nuw (zext A), (zext B)
458 if (BO->getOpcode() == Instruction::Add ||
459 BO->getOpcode() == Instruction::Sub) {
460 if (SignExtended && !BO->hasNoSignedWrap())
462 if (ZeroExtended && !BO->hasNoUnsignedWrap())
469 APInt ConstantOffsetExtractor::findInEitherOperand(BinaryOperator *BO,
472 // BO being non-negative does not shed light on whether its operands are
473 // non-negative. Clear the NonNegative flag here.
474 APInt ConstantOffset = find(BO->getOperand(0), SignExtended, ZeroExtended,
475 /* NonNegative */ false);
476 // If we found a constant offset in the left operand, stop and return that.
477 // This shortcut might cause us to miss opportunities of combining the
478 // constant offsets in both operands, e.g., (a + 4) + (b + 5) => (a + b) + 9.
479 // However, such cases are probably already handled by -instcombine,
480 // given this pass runs after the standard optimizations.
481 if (ConstantOffset != 0) return ConstantOffset;
482 ConstantOffset = find(BO->getOperand(1), SignExtended, ZeroExtended,
483 /* NonNegative */ false);
484 // If U is a sub operator, negate the constant offset found in the right
486 if (BO->getOpcode() == Instruction::Sub)
487 ConstantOffset = -ConstantOffset;
488 return ConstantOffset;
491 APInt ConstantOffsetExtractor::find(Value *V, bool SignExtended,
492 bool ZeroExtended, bool NonNegative) {
493 // TODO(jingyue): We could trace into integer/pointer casts, such as
494 // inttoptr, ptrtoint, bitcast, and addrspacecast. We choose to handle only
495 // integers because it gives good enough results for our benchmarks.
496 unsigned BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
498 // We cannot do much with Values that are not a User, such as an Argument.
499 User *U = dyn_cast<User>(V);
500 if (U == nullptr) return APInt(BitWidth, 0);
502 APInt ConstantOffset(BitWidth, 0);
503 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
504 // Hooray, we found it!
505 ConstantOffset = CI->getValue();
506 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(V)) {
507 // Trace into subexpressions for more hoisting opportunities.
508 if (CanTraceInto(SignExtended, ZeroExtended, BO, NonNegative))
509 ConstantOffset = findInEitherOperand(BO, SignExtended, ZeroExtended);
510 } else if (isa<SExtInst>(V)) {
511 ConstantOffset = find(U->getOperand(0), /* SignExtended */ true,
512 ZeroExtended, NonNegative).sext(BitWidth);
513 } else if (isa<ZExtInst>(V)) {
514 // As an optimization, we can clear the SignExtended flag because
515 // sext(zext(a)) = zext(a). Verified in @sext_zext in split-gep.ll.
517 // Clear the NonNegative flag, because zext(a) >= 0 does not imply a >= 0.
519 find(U->getOperand(0), /* SignExtended */ false,
520 /* ZeroExtended */ true, /* NonNegative */ false).zext(BitWidth);
523 // If we found a non-zero constant offset, add it to the path for
524 // rebuildWithoutConstOffset. Zero is a valid constant offset, but doesn't
525 // help this optimization.
526 if (ConstantOffset != 0)
527 UserChain.push_back(U);
528 return ConstantOffset;
531 Value *ConstantOffsetExtractor::applyExts(Value *V) {
533 // ExtInsts is built in the use-def order. Therefore, we apply them to V
534 // in the reversed order.
535 for (auto I = ExtInsts.rbegin(), E = ExtInsts.rend(); I != E; ++I) {
536 if (Constant *C = dyn_cast<Constant>(Current)) {
537 // If Current is a constant, apply s/zext using ConstantExpr::getCast.
538 // ConstantExpr::getCast emits a ConstantInt if C is a ConstantInt.
539 Current = ConstantExpr::getCast((*I)->getOpcode(), C, (*I)->getType());
541 Instruction *Ext = (*I)->clone();
542 Ext->setOperand(0, Current);
543 Ext->insertBefore(IP);
550 Value *ConstantOffsetExtractor::rebuildWithoutConstOffset() {
551 distributeExtsAndCloneChain(UserChain.size() - 1);
552 // Remove all nullptrs (used to be s/zext) from UserChain.
553 unsigned NewSize = 0;
554 for (auto I = UserChain.begin(), E = UserChain.end(); I != E; ++I) {
556 UserChain[NewSize] = *I;
560 UserChain.resize(NewSize);
561 return removeConstOffset(UserChain.size() - 1);
565 ConstantOffsetExtractor::distributeExtsAndCloneChain(unsigned ChainIndex) {
566 User *U = UserChain[ChainIndex];
567 if (ChainIndex == 0) {
568 assert(isa<ConstantInt>(U));
569 // If U is a ConstantInt, applyExts will return a ConstantInt as well.
570 return UserChain[ChainIndex] = cast<ConstantInt>(applyExts(U));
573 if (CastInst *Cast = dyn_cast<CastInst>(U)) {
574 assert((isa<SExtInst>(Cast) || isa<ZExtInst>(Cast)) &&
575 "We only traced into two types of CastInst: sext and zext");
576 ExtInsts.push_back(Cast);
577 UserChain[ChainIndex] = nullptr;
578 return distributeExtsAndCloneChain(ChainIndex - 1);
581 // Function find only trace into BinaryOperator and CastInst.
582 BinaryOperator *BO = cast<BinaryOperator>(U);
583 // OpNo = which operand of BO is UserChain[ChainIndex - 1]
584 unsigned OpNo = (BO->getOperand(0) == UserChain[ChainIndex - 1] ? 0 : 1);
585 Value *TheOther = applyExts(BO->getOperand(1 - OpNo));
586 Value *NextInChain = distributeExtsAndCloneChain(ChainIndex - 1);
588 BinaryOperator *NewBO = nullptr;
590 NewBO = BinaryOperator::Create(BO->getOpcode(), NextInChain, TheOther,
593 NewBO = BinaryOperator::Create(BO->getOpcode(), TheOther, NextInChain,
596 return UserChain[ChainIndex] = NewBO;
599 Value *ConstantOffsetExtractor::removeConstOffset(unsigned ChainIndex) {
600 if (ChainIndex == 0) {
601 assert(isa<ConstantInt>(UserChain[ChainIndex]));
602 return ConstantInt::getNullValue(UserChain[ChainIndex]->getType());
605 BinaryOperator *BO = cast<BinaryOperator>(UserChain[ChainIndex]);
606 assert(BO->getNumUses() <= 1 &&
607 "distributeExtsAndCloneChain clones each BinaryOperator in "
608 "UserChain, so no one should be used more than "
611 unsigned OpNo = (BO->getOperand(0) == UserChain[ChainIndex - 1] ? 0 : 1);
612 assert(BO->getOperand(OpNo) == UserChain[ChainIndex - 1]);
613 Value *NextInChain = removeConstOffset(ChainIndex - 1);
614 Value *TheOther = BO->getOperand(1 - OpNo);
616 // If NextInChain is 0 and not the LHS of a sub, we can simplify the
617 // sub-expression to be just TheOther.
618 if (ConstantInt *CI = dyn_cast<ConstantInt>(NextInChain)) {
619 if (CI->isZero() && !(BO->getOpcode() == Instruction::Sub && OpNo == 0))
623 BinaryOperator::BinaryOps NewOp = BO->getOpcode();
624 if (BO->getOpcode() == Instruction::Or) {
625 // Rebuild "or" as "add", because "or" may be invalid for the new
628 // For instance, given
629 // a | (b + 5) where a and b + 5 have no common bits,
630 // we can extract 5 as the constant offset.
632 // However, reusing the "or" in the new index would give us
634 // which does not equal a | (b + 5).
636 // Replacing the "or" with "add" is fine, because
637 // a | (b + 5) = a + (b + 5) = (a + b) + 5
638 NewOp = Instruction::Add;
641 BinaryOperator *NewBO;
643 NewBO = BinaryOperator::Create(NewOp, NextInChain, TheOther, "", IP);
645 NewBO = BinaryOperator::Create(NewOp, TheOther, NextInChain, "", IP);
651 Value *ConstantOffsetExtractor::Extract(Value *Idx, GetElementPtrInst *GEP,
652 User *&UserChainTail,
653 const DominatorTree *DT) {
654 ConstantOffsetExtractor Extractor(GEP, DT);
655 // Find a non-zero constant offset first.
656 APInt ConstantOffset =
657 Extractor.find(Idx, /* SignExtended */ false, /* ZeroExtended */ false,
659 if (ConstantOffset == 0) {
660 UserChainTail = nullptr;
663 // Separates the constant offset from the GEP index.
664 Value *IdxWithoutConstOffset = Extractor.rebuildWithoutConstOffset();
665 UserChainTail = Extractor.UserChain.back();
666 return IdxWithoutConstOffset;
669 int64_t ConstantOffsetExtractor::Find(Value *Idx, GetElementPtrInst *GEP,
670 const DominatorTree *DT) {
671 // If Idx is an index of an inbound GEP, Idx is guaranteed to be non-negative.
672 return ConstantOffsetExtractor(GEP, DT)
673 .find(Idx, /* SignExtended */ false, /* ZeroExtended */ false,
678 bool SeparateConstOffsetFromGEP::canonicalizeArrayIndicesToPointerSize(
679 GetElementPtrInst *GEP) {
680 bool Changed = false;
681 Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
682 gep_type_iterator GTI = gep_type_begin(*GEP);
683 for (User::op_iterator I = GEP->op_begin() + 1, E = GEP->op_end();
684 I != E; ++I, ++GTI) {
685 // Skip struct member indices which must be i32.
686 if (isa<SequentialType>(*GTI)) {
687 if ((*I)->getType() != IntPtrTy) {
688 *I = CastInst::CreateIntegerCast(*I, IntPtrTy, true, "idxprom", GEP);
697 SeparateConstOffsetFromGEP::accumulateByteOffset(GetElementPtrInst *GEP,
698 bool &NeedsExtraction) {
699 NeedsExtraction = false;
700 int64_t AccumulativeByteOffset = 0;
701 gep_type_iterator GTI = gep_type_begin(*GEP);
702 for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
703 if (isa<SequentialType>(*GTI)) {
704 // Tries to extract a constant offset from this GEP index.
705 int64_t ConstantOffset =
706 ConstantOffsetExtractor::Find(GEP->getOperand(I), GEP, DT);
707 if (ConstantOffset != 0) {
708 NeedsExtraction = true;
709 // A GEP may have multiple indices. We accumulate the extracted
710 // constant offset to a byte offset, and later offset the remainder of
711 // the original GEP with this byte offset.
712 AccumulativeByteOffset +=
713 ConstantOffset * DL->getTypeAllocSize(GTI.getIndexedType());
715 } else if (LowerGEP) {
716 StructType *StTy = cast<StructType>(*GTI);
717 uint64_t Field = cast<ConstantInt>(GEP->getOperand(I))->getZExtValue();
718 // Skip field 0 as the offset is always 0.
720 NeedsExtraction = true;
721 AccumulativeByteOffset +=
722 DL->getStructLayout(StTy)->getElementOffset(Field);
726 return AccumulativeByteOffset;
729 void SeparateConstOffsetFromGEP::lowerToSingleIndexGEPs(
730 GetElementPtrInst *Variadic, int64_t AccumulativeByteOffset) {
731 IRBuilder<> Builder(Variadic);
732 Type *IntPtrTy = DL->getIntPtrType(Variadic->getType());
735 Builder.getInt8PtrTy(Variadic->getType()->getPointerAddressSpace());
736 Value *ResultPtr = Variadic->getOperand(0);
737 if (ResultPtr->getType() != I8PtrTy)
738 ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy);
740 gep_type_iterator GTI = gep_type_begin(*Variadic);
741 // Create an ugly GEP for each sequential index. We don't create GEPs for
742 // structure indices, as they are accumulated in the constant offset index.
743 for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) {
744 if (isa<SequentialType>(*GTI)) {
745 Value *Idx = Variadic->getOperand(I);
746 // Skip zero indices.
747 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx))
751 APInt ElementSize = APInt(IntPtrTy->getIntegerBitWidth(),
752 DL->getTypeAllocSize(GTI.getIndexedType()));
753 // Scale the index by element size.
754 if (ElementSize != 1) {
755 if (ElementSize.isPowerOf2()) {
756 Idx = Builder.CreateShl(
757 Idx, ConstantInt::get(IntPtrTy, ElementSize.logBase2()));
759 Idx = Builder.CreateMul(Idx, ConstantInt::get(IntPtrTy, ElementSize));
762 // Create an ugly GEP with a single index for each index.
764 Builder.CreateGEP(Builder.getInt8Ty(), ResultPtr, Idx, "uglygep");
768 // Create a GEP with the constant offset index.
769 if (AccumulativeByteOffset != 0) {
770 Value *Offset = ConstantInt::get(IntPtrTy, AccumulativeByteOffset);
772 Builder.CreateGEP(Builder.getInt8Ty(), ResultPtr, Offset, "uglygep");
774 if (ResultPtr->getType() != Variadic->getType())
775 ResultPtr = Builder.CreateBitCast(ResultPtr, Variadic->getType());
777 Variadic->replaceAllUsesWith(ResultPtr);
778 Variadic->eraseFromParent();
782 SeparateConstOffsetFromGEP::lowerToArithmetics(GetElementPtrInst *Variadic,
783 int64_t AccumulativeByteOffset) {
784 IRBuilder<> Builder(Variadic);
785 Type *IntPtrTy = DL->getIntPtrType(Variadic->getType());
787 Value *ResultPtr = Builder.CreatePtrToInt(Variadic->getOperand(0), IntPtrTy);
788 gep_type_iterator GTI = gep_type_begin(*Variadic);
789 // Create ADD/SHL/MUL arithmetic operations for each sequential indices. We
790 // don't create arithmetics for structure indices, as they are accumulated
791 // in the constant offset index.
792 for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) {
793 if (isa<SequentialType>(*GTI)) {
794 Value *Idx = Variadic->getOperand(I);
795 // Skip zero indices.
796 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx))
800 APInt ElementSize = APInt(IntPtrTy->getIntegerBitWidth(),
801 DL->getTypeAllocSize(GTI.getIndexedType()));
802 // Scale the index by element size.
803 if (ElementSize != 1) {
804 if (ElementSize.isPowerOf2()) {
805 Idx = Builder.CreateShl(
806 Idx, ConstantInt::get(IntPtrTy, ElementSize.logBase2()));
808 Idx = Builder.CreateMul(Idx, ConstantInt::get(IntPtrTy, ElementSize));
811 // Create an ADD for each index.
812 ResultPtr = Builder.CreateAdd(ResultPtr, Idx);
816 // Create an ADD for the constant offset index.
817 if (AccumulativeByteOffset != 0) {
818 ResultPtr = Builder.CreateAdd(
819 ResultPtr, ConstantInt::get(IntPtrTy, AccumulativeByteOffset));
822 ResultPtr = Builder.CreateIntToPtr(ResultPtr, Variadic->getType());
823 Variadic->replaceAllUsesWith(ResultPtr);
824 Variadic->eraseFromParent();
827 bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) {
829 if (GEP->getType()->isVectorTy())
832 // The backend can already nicely handle the case where all indices are
834 if (GEP->hasAllConstantIndices())
837 bool Changed = canonicalizeArrayIndicesToPointerSize(GEP);
839 bool NeedsExtraction;
840 int64_t AccumulativeByteOffset = accumulateByteOffset(GEP, NeedsExtraction);
842 if (!NeedsExtraction)
844 // If LowerGEP is disabled, before really splitting the GEP, check whether the
845 // backend supports the addressing mode we are about to produce. If no, this
846 // splitting probably won't be beneficial.
847 // If LowerGEP is enabled, even the extracted constant offset can not match
848 // the addressing mode, we can still do optimizations to other lowered parts
849 // of variable indices. Therefore, we don't check for addressing modes in that
852 TargetTransformInfo &TTI =
853 getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
854 *GEP->getParent()->getParent());
855 if (!TTI.isLegalAddressingMode(GEP->getType()->getElementType(),
856 /*BaseGV=*/nullptr, AccumulativeByteOffset,
857 /*HasBaseReg=*/true, /*Scale=*/0)) {
862 // Remove the constant offset in each sequential index. The resultant GEP
863 // computes the variadic base.
864 // Notice that we don't remove struct field indices here. If LowerGEP is
865 // disabled, a structure index is not accumulated and we still use the old
866 // one. If LowerGEP is enabled, a structure index is accumulated in the
867 // constant offset. LowerToSingleIndexGEPs or lowerToArithmetics will later
868 // handle the constant offset and won't need a new structure index.
869 gep_type_iterator GTI = gep_type_begin(*GEP);
870 for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
871 if (isa<SequentialType>(*GTI)) {
872 // Splits this GEP index into a variadic part and a constant offset, and
873 // uses the variadic part as the new index.
874 Value *OldIdx = GEP->getOperand(I);
877 ConstantOffsetExtractor::Extract(OldIdx, GEP, UserChainTail, DT);
878 if (NewIdx != nullptr) {
879 // Switches to the index with the constant offset removed.
880 GEP->setOperand(I, NewIdx);
881 // After switching to the new index, we can garbage-collect UserChain
882 // and the old index if they are not used.
883 RecursivelyDeleteTriviallyDeadInstructions(UserChainTail);
884 RecursivelyDeleteTriviallyDeadInstructions(OldIdx);
889 // Clear the inbounds attribute because the new index may be off-bound.
893 // addr = gep inbounds float* p, i64 b
895 // is transformed to:
897 // addr2 = gep float* p, i64 a
898 // addr = gep float* addr2, i64 5
900 // If a is -4, although the old index b is in bounds, the new index a is
901 // off-bound. http://llvm.org/docs/LangRef.html#id181 says "if the
902 // inbounds keyword is not present, the offsets are added to the base
903 // address with silently-wrapping two's complement arithmetic".
904 // Therefore, the final code will be a semantically equivalent.
906 // TODO(jingyue): do some range analysis to keep as many inbounds as
907 // possible. GEPs with inbounds are more friendly to alias analysis.
908 GEP->setIsInBounds(false);
910 // Lowers a GEP to either GEPs with a single index or arithmetic operations.
912 // As currently BasicAA does not analyze ptrtoint/inttoptr, do not lower to
913 // arithmetic operations if the target uses alias analysis in codegen.
914 if (TM && TM->getSubtargetImpl(*GEP->getParent()->getParent())->useAA())
915 lowerToSingleIndexGEPs(GEP, AccumulativeByteOffset);
917 lowerToArithmetics(GEP, AccumulativeByteOffset);
921 // No need to create another GEP if the accumulative byte offset is 0.
922 if (AccumulativeByteOffset == 0)
925 // Offsets the base with the accumulative byte offset.
932 // %gep2 ; clone of %gep
933 // %new.gep = gep %gep2, <offset / sizeof(*%gep)>
934 // %gep ; will be removed
937 // => replace all uses of %gep with %new.gep and remove %gep
939 // %gep2 ; clone of %gep
940 // %new.gep = gep %gep2, <offset / sizeof(*%gep)>
943 // If AccumulativeByteOffset is not a multiple of sizeof(*%gep), we emit an
944 // uglygep (http://llvm.org/docs/GetElementPtr.html#what-s-an-uglygep):
945 // bitcast %gep2 to i8*, add the offset, and bitcast the result back to the
948 // %gep2 ; clone of %gep
949 // %0 = bitcast %gep2 to i8*
950 // %uglygep = gep %0, <offset>
951 // %new.gep = bitcast %uglygep to <type of %gep>
953 Instruction *NewGEP = GEP->clone();
954 NewGEP->insertBefore(GEP);
956 // Per ANSI C standard, signed / unsigned = unsigned and signed % unsigned =
957 // unsigned.. Therefore, we cast ElementTypeSizeOfGEP to signed because it is
958 // used with unsigned integers later.
959 int64_t ElementTypeSizeOfGEP = static_cast<int64_t>(
960 DL->getTypeAllocSize(GEP->getType()->getElementType()));
961 Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
962 if (AccumulativeByteOffset % ElementTypeSizeOfGEP == 0) {
963 // Very likely. As long as %gep is natually aligned, the byte offset we
964 // extracted should be a multiple of sizeof(*%gep).
965 int64_t Index = AccumulativeByteOffset / ElementTypeSizeOfGEP;
966 NewGEP = GetElementPtrInst::Create(GEP->getResultElementType(), NewGEP,
967 ConstantInt::get(IntPtrTy, Index, true),
968 GEP->getName(), GEP);
970 // Unlikely but possible. For example,
978 // Suppose the gep before extraction is &s[i + 1].b[j + 3]. After
979 // extraction, it becomes &s[i].b[j] and AccumulativeByteOffset is
980 // sizeof(S) + 3 * sizeof(int64) = 100, which is not a multiple of
983 // Emit an uglygep in this case.
984 Type *I8PtrTy = Type::getInt8PtrTy(GEP->getContext(),
985 GEP->getPointerAddressSpace());
986 NewGEP = new BitCastInst(NewGEP, I8PtrTy, "", GEP);
987 NewGEP = GetElementPtrInst::Create(
988 Type::getInt8Ty(GEP->getContext()), NewGEP,
989 ConstantInt::get(IntPtrTy, AccumulativeByteOffset, true), "uglygep",
991 if (GEP->getType() != I8PtrTy)
992 NewGEP = new BitCastInst(NewGEP, GEP->getType(), GEP->getName(), GEP);
995 GEP->replaceAllUsesWith(NewGEP);
996 GEP->eraseFromParent();
1001 bool SeparateConstOffsetFromGEP::runOnFunction(Function &F) {
1002 if (skipOptnoneFunction(F))
1005 if (DisableSeparateConstOffsetFromGEP)
1008 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1010 bool Changed = false;
1011 for (Function::iterator B = F.begin(), BE = F.end(); B != BE; ++B) {
1012 for (BasicBlock::iterator I = B->begin(), IE = B->end(); I != IE; ) {
1013 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I++)) {
1014 Changed |= splitGEP(GEP);
1016 // No need to split GEP ConstantExprs because all its indices are constant
1021 if (VerifyNoDeadCode)
1022 verifyNoDeadCode(F);
1027 void SeparateConstOffsetFromGEP::verifyNoDeadCode(Function &F) {
1030 if (isInstructionTriviallyDead(&I)) {
1031 std::string ErrMessage;
1032 raw_string_ostream RSO(ErrMessage);
1033 RSO << "Dead instruction detected!\n" << I << "\n";
1034 llvm_unreachable(RSO.str().c_str());