1 //===-- SeparateConstOffsetFromGEP.cpp - ------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Loop unrolling may create many similar GEPs for array accesses.
11 // e.g., a 2-level loop
13 // float a[32][32]; // global variable
15 // for (int i = 0; i < 2; ++i) {
16 // for (int j = 0; j < 2; ++j) {
18 // ... = a[x + i][y + j];
23 // will probably be unrolled to:
25 // gep %a, 0, %x, %y; load
26 // gep %a, 0, %x, %y + 1; load
27 // gep %a, 0, %x + 1, %y; load
28 // gep %a, 0, %x + 1, %y + 1; load
30 // LLVM's GVN does not use partial redundancy elimination yet, and is thus
31 // unable to reuse (gep %a, 0, %x, %y). As a result, this misoptimization incurs
32 // significant slowdown in targets with limited addressing modes. For instance,
33 // because the PTX target does not support the reg+reg addressing mode, the
34 // NVPTX backend emits PTX code that literally computes the pointer address of
35 // each GEP, wasting tons of registers. It emits the following PTX for the
36 // first load and similar PTX for other loads.
40 // mul.wide.u32 %rl2, %r1, 128;
42 // add.s64 %rl4, %rl3, %rl2;
43 // mul.wide.u32 %rl5, %r2, 4;
44 // add.s64 %rl6, %rl4, %rl5;
45 // ld.global.f32 %f1, [%rl6];
47 // To reduce the register pressure, the optimization implemented in this file
48 // merges the common part of a group of GEPs, so we can compute each pointer
49 // address by adding a simple offset to the common part, saving many registers.
51 // It works by splitting each GEP into a variadic base and a constant offset.
52 // The variadic base can be computed once and reused by multiple GEPs, and the
53 // constant offsets can be nicely folded into the reg+immediate addressing mode
54 // (supported by most targets) without using any extra register.
56 // For instance, we transform the four GEPs and four loads in the above example
59 // base = gep a, 0, x, y
61 // laod base + 1 * sizeof(float)
62 // load base + 32 * sizeof(float)
63 // load base + 33 * sizeof(float)
65 // Given the transformed IR, a backend that supports the reg+immediate
66 // addressing mode can easily fold the pointer arithmetics into the loads. For
67 // example, the NVPTX backend can easily fold the pointer arithmetics into the
68 // ld.global.f32 instructions, and the resultant PTX uses much fewer registers.
70 // mov.u32 %r1, %tid.x;
71 // mov.u32 %r2, %tid.y;
72 // mul.wide.u32 %rl2, %r1, 128;
74 // add.s64 %rl4, %rl3, %rl2;
75 // mul.wide.u32 %rl5, %r2, 4;
76 // add.s64 %rl6, %rl4, %rl5;
77 // ld.global.f32 %f1, [%rl6]; // so far the same as unoptimized PTX
78 // ld.global.f32 %f2, [%rl6+4]; // much better
79 // ld.global.f32 %f3, [%rl6+128]; // much better
80 // ld.global.f32 %f4, [%rl6+132]; // much better
82 // Another improvement enabled by the LowerGEP flag is to lower a GEP with
83 // multiple indices to either multiple GEPs with a single index or arithmetic
84 // operations (depending on whether the target uses alias analysis in codegen).
85 // Such transformation can have following benefits:
86 // (1) It can always extract constants in the indices of structure type.
87 // (2) After such Lowering, there are more optimization opportunities such as
90 // E.g. The following GEPs have multiple indices:
92 // %p = getelementptr [10 x %struct]* %ptr, i64 %i, i64 %j1, i32 3
96 // %p2 = getelementptr [10 x %struct]* %ptr, i64 %i, i64 %j1, i32 2
100 // We can not do CSE for to the common part related to index "i64 %i". Lowering
101 // GEPs can achieve such goals.
102 // If the target does not use alias analysis in codegen, this pass will
103 // lower a GEP with multiple indices into arithmetic operations:
105 // %1 = ptrtoint [10 x %struct]* %ptr to i64 ; CSE opportunity
106 // %2 = mul i64 %i, length_of_10xstruct ; CSE opportunity
107 // %3 = add i64 %1, %2 ; CSE opportunity
108 // %4 = mul i64 %j1, length_of_struct
109 // %5 = add i64 %3, %4
110 // %6 = add i64 %3, struct_field_3 ; Constant offset
111 // %p = inttoptr i64 %6 to i32*
115 // %7 = ptrtoint [10 x %struct]* %ptr to i64 ; CSE opportunity
116 // %8 = mul i64 %i, length_of_10xstruct ; CSE opportunity
117 // %9 = add i64 %7, %8 ; CSE opportunity
118 // %10 = mul i64 %j2, length_of_struct
119 // %11 = add i64 %9, %10
120 // %12 = add i64 %11, struct_field_2 ; Constant offset
121 // %p = inttoptr i64 %12 to i32*
125 // If the target uses alias analysis in codegen, this pass will lower a GEP
126 // with multiple indices into multiple GEPs with a single index:
128 // %1 = bitcast [10 x %struct]* %ptr to i8* ; CSE opportunity
129 // %2 = mul i64 %i, length_of_10xstruct ; CSE opportunity
130 // %3 = getelementptr i8* %1, i64 %2 ; CSE opportunity
131 // %4 = mul i64 %j1, length_of_struct
132 // %5 = getelementptr i8* %3, i64 %4
133 // %6 = getelementptr i8* %5, struct_field_3 ; Constant offset
134 // %p = bitcast i8* %6 to i32*
138 // %7 = bitcast [10 x %struct]* %ptr to i8* ; CSE opportunity
139 // %8 = mul i64 %i, length_of_10xstruct ; CSE opportunity
140 // %9 = getelementptr i8* %7, i64 %8 ; CSE opportunity
141 // %10 = mul i64 %j2, length_of_struct
142 // %11 = getelementptr i8* %9, i64 %10
143 // %12 = getelementptr i8* %11, struct_field_2 ; Constant offset
144 // %p2 = bitcast i8* %12 to i32*
148 // Lowering GEPs can also benefit other passes such as LICM and CGP.
149 // LICM (Loop Invariant Code Motion) can not hoist/sink a GEP of multiple
150 // indices if one of the index is variant. If we lower such GEP into invariant
151 // parts and variant parts, LICM can hoist/sink those invariant parts.
152 // CGP (CodeGen Prepare) tries to sink address calculations that match the
153 // target's addressing modes. A GEP with multiple indices may not match and will
154 // not be sunk. If we lower such GEP into smaller parts, CGP may sink some of
155 // them. So we end up with a better addressing mode.
157 //===----------------------------------------------------------------------===//
159 #include "llvm/Analysis/TargetTransformInfo.h"
160 #include "llvm/Analysis/ValueTracking.h"
161 #include "llvm/IR/Constants.h"
162 #include "llvm/IR/DataLayout.h"
163 #include "llvm/IR/Instructions.h"
164 #include "llvm/IR/LLVMContext.h"
165 #include "llvm/IR/Module.h"
166 #include "llvm/IR/Operator.h"
167 #include "llvm/Support/CommandLine.h"
168 #include "llvm/Support/raw_ostream.h"
169 #include "llvm/Transforms/Scalar.h"
170 #include "llvm/Target/TargetMachine.h"
171 #include "llvm/Target/TargetSubtargetInfo.h"
172 #include "llvm/IR/IRBuilder.h"
174 using namespace llvm;
176 static cl::opt<bool> DisableSeparateConstOffsetFromGEP(
177 "disable-separate-const-offset-from-gep", cl::init(false),
178 cl::desc("Do not separate the constant offset from a GEP instruction"),
183 /// \brief A helper class for separating a constant offset from a GEP index.
185 /// In real programs, a GEP index may be more complicated than a simple addition
186 /// of something and a constant integer which can be trivially splitted. For
187 /// example, to split ((a << 3) | 5) + b, we need to search deeper for the
188 /// constant offset, so that we can separate the index to (a << 3) + b and 5.
190 /// Therefore, this class looks into the expression that computes a given GEP
191 /// index, and tries to find a constant integer that can be hoisted to the
192 /// outermost level of the expression as an addition. Not every constant in an
193 /// expression can jump out. e.g., we cannot transform (b * (a + 5)) to (b * a +
194 /// 5); nor can we transform (3 * (a + 5)) to (3 * a + 5), however in this case,
195 /// -instcombine probably already optimized (3 * (a + 5)) to (3 * a + 15).
196 class ConstantOffsetExtractor {
198 /// Extracts a constant offset from the given GEP index. It returns the
199 /// new index representing the remainder (equal to the original index minus
200 /// the constant offset), or nullptr if we cannot extract a constant offset.
201 /// \p Idx The given GEP index
202 /// \p DL The datalayout of the module
203 /// \p GEP The given GEP
204 static Value *Extract(Value *Idx, const DataLayout *DL,
205 GetElementPtrInst *GEP);
206 /// Looks for a constant offset from the given GEP index without extracting
207 /// it. It returns the numeric value of the extracted constant offset (0 if
208 /// failed). The meaning of the arguments are the same as Extract.
209 static int64_t Find(Value *Idx, const DataLayout *DL, GetElementPtrInst *GEP);
212 ConstantOffsetExtractor(const DataLayout *Layout, Instruction *InsertionPt)
213 : DL(Layout), IP(InsertionPt) {}
214 /// Searches the expression that computes V for a non-zero constant C s.t.
215 /// V can be reassociated into the form V' + C. If the searching is
216 /// successful, returns C and update UserChain as a def-use chain from C to V;
217 /// otherwise, UserChain is empty.
219 /// \p V The given expression
220 /// \p SignExtended Whether V will be sign-extended in the computation of the
222 /// \p ZeroExtended Whether V will be zero-extended in the computation of the
224 /// \p NonNegative Whether V is guaranteed to be non-negative. For example,
225 /// an index of an inbounds GEP is guaranteed to be
226 /// non-negative. Levaraging this, we can better split
228 APInt find(Value *V, bool SignExtended, bool ZeroExtended, bool NonNegative);
229 /// A helper function to look into both operands of a binary operator.
230 APInt findInEitherOperand(BinaryOperator *BO, bool SignExtended,
232 /// After finding the constant offset C from the GEP index I, we build a new
233 /// index I' s.t. I' + C = I. This function builds and returns the new
234 /// index I' according to UserChain produced by function "find".
236 /// The building conceptually takes two steps:
237 /// 1) iteratively distribute s/zext towards the leaves of the expression tree
239 /// 2) reassociate the expression tree to the form I' + C.
241 /// For example, to extract the 5 from sext(a + (b + 5)), we first distribute
242 /// sext to a, b and 5 so that we have
243 /// sext(a) + (sext(b) + 5).
244 /// Then, we reassociate it to
245 /// (sext(a) + sext(b)) + 5.
246 /// Given this form, we know I' is sext(a) + sext(b).
247 Value *rebuildWithoutConstOffset();
248 /// After the first step of rebuilding the GEP index without the constant
249 /// offset, distribute s/zext to the operands of all operators in UserChain.
250 /// e.g., zext(sext(a + (b + 5)) (assuming no overflow) =>
251 /// zext(sext(a)) + (zext(sext(b)) + zext(sext(5))).
253 /// The function also updates UserChain to point to new subexpressions after
254 /// distributing s/zext. e.g., the old UserChain of the above example is
255 /// 5 -> b + 5 -> a + (b + 5) -> sext(...) -> zext(sext(...)),
256 /// and the new UserChain is
257 /// zext(sext(5)) -> zext(sext(b)) + zext(sext(5)) ->
258 /// zext(sext(a)) + (zext(sext(b)) + zext(sext(5))
260 /// \p ChainIndex The index to UserChain. ChainIndex is initially
261 /// UserChain.size() - 1, and is decremented during
263 Value *distributeExtsAndCloneChain(unsigned ChainIndex);
264 /// Reassociates the GEP index to the form I' + C and returns I'.
265 Value *removeConstOffset(unsigned ChainIndex);
266 /// A helper function to apply ExtInsts, a list of s/zext, to value V.
267 /// e.g., if ExtInsts = [sext i32 to i64, zext i16 to i32], this function
268 /// returns "sext i32 (zext i16 V to i32) to i64".
269 Value *applyExts(Value *V);
271 /// Returns true if LHS and RHS have no bits in common, i.e., LHS | RHS == 0.
272 bool NoCommonBits(Value *LHS, Value *RHS) const;
273 /// Computes which bits are known to be one or zero.
274 /// \p KnownOne Mask of all bits that are known to be one.
275 /// \p KnownZero Mask of all bits that are known to be zero.
276 void ComputeKnownBits(Value *V, APInt &KnownOne, APInt &KnownZero) const;
277 /// A helper function that returns whether we can trace into the operands
278 /// of binary operator BO for a constant offset.
280 /// \p SignExtended Whether BO is surrounded by sext
281 /// \p ZeroExtended Whether BO is surrounded by zext
282 /// \p NonNegative Whether BO is known to be non-negative, e.g., an in-bound
284 bool CanTraceInto(bool SignExtended, bool ZeroExtended, BinaryOperator *BO,
287 /// The path from the constant offset to the old GEP index. e.g., if the GEP
288 /// index is "a * b + (c + 5)". After running function find, UserChain[0] will
289 /// be the constant 5, UserChain[1] will be the subexpression "c + 5", and
290 /// UserChain[2] will be the entire expression "a * b + (c + 5)".
292 /// This path helps to rebuild the new GEP index.
293 SmallVector<User *, 8> UserChain;
294 /// A data structure used in rebuildWithoutConstOffset. Contains all
295 /// sext/zext instructions along UserChain.
296 SmallVector<CastInst *, 16> ExtInsts;
297 /// The data layout of the module. Used in ComputeKnownBits.
298 const DataLayout *DL;
299 Instruction *IP; /// Insertion position of cloned instructions.
302 /// \brief A pass that tries to split every GEP in the function into a variadic
303 /// base and a constant offset. It is a FunctionPass because searching for the
304 /// constant offset may inspect other basic blocks.
305 class SeparateConstOffsetFromGEP : public FunctionPass {
308 SeparateConstOffsetFromGEP(const TargetMachine *TM = nullptr,
309 bool LowerGEP = false)
310 : FunctionPass(ID), TM(TM), LowerGEP(LowerGEP) {
311 initializeSeparateConstOffsetFromGEPPass(*PassRegistry::getPassRegistry());
314 void getAnalysisUsage(AnalysisUsage &AU) const override {
315 AU.addRequired<DataLayoutPass>();
316 AU.addRequired<TargetTransformInfoWrapperPass>();
317 AU.setPreservesCFG();
320 bool doInitialization(Module &M) override {
321 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
323 report_fatal_error("data layout missing");
324 DL = &DLP->getDataLayout();
328 bool runOnFunction(Function &F) override;
331 /// Tries to split the given GEP into a variadic base and a constant offset,
332 /// and returns true if the splitting succeeds.
333 bool splitGEP(GetElementPtrInst *GEP);
334 /// Lower a GEP with multiple indices into multiple GEPs with a single index.
335 /// Function splitGEP already split the original GEP into a variadic part and
336 /// a constant offset (i.e., AccumulativeByteOffset). This function lowers the
337 /// variadic part into a set of GEPs with a single index and applies
338 /// AccumulativeByteOffset to it.
339 /// \p Variadic The variadic part of the original GEP.
340 /// \p AccumulativeByteOffset The constant offset.
341 void lowerToSingleIndexGEPs(GetElementPtrInst *Variadic,
342 int64_t AccumulativeByteOffset);
343 /// Lower a GEP with multiple indices into ptrtoint+arithmetics+inttoptr form.
344 /// Function splitGEP already split the original GEP into a variadic part and
345 /// a constant offset (i.e., AccumulativeByteOffset). This function lowers the
346 /// variadic part into a set of arithmetic operations and applies
347 /// AccumulativeByteOffset to it.
348 /// \p Variadic The variadic part of the original GEP.
349 /// \p AccumulativeByteOffset The constant offset.
350 void lowerToArithmetics(GetElementPtrInst *Variadic,
351 int64_t AccumulativeByteOffset);
352 /// Finds the constant offset within each index and accumulates them. If
353 /// LowerGEP is true, it finds in indices of both sequential and structure
354 /// types, otherwise it only finds in sequential indices. The output
355 /// NeedsExtraction indicates whether we successfully find a non-zero constant
357 int64_t accumulateByteOffset(GetElementPtrInst *GEP, bool &NeedsExtraction);
358 /// Canonicalize array indices to pointer-size integers. This helps to
359 /// simplify the logic of splitting a GEP. For example, if a + b is a
360 /// pointer-size integer, we have
361 /// gep base, a + b = gep (gep base, a), b
362 /// However, this equality may not hold if the size of a + b is smaller than
363 /// the pointer size, because LLVM conceptually sign-extends GEP indices to
364 /// pointer size before computing the address
365 /// (http://llvm.org/docs/LangRef.html#id181).
367 /// This canonicalization is very likely already done in clang and
368 /// instcombine. Therefore, the program will probably remain the same.
370 /// Returns true if the module changes.
372 /// Verified in @i32_add in split-gep.ll
373 bool canonicalizeArrayIndicesToPointerSize(GetElementPtrInst *GEP);
375 const DataLayout *DL;
376 const TargetMachine *TM;
377 /// Whether to lower a GEP with multiple indices into arithmetic operations or
378 /// multiple GEPs with a single index.
381 } // anonymous namespace
383 char SeparateConstOffsetFromGEP::ID = 0;
384 INITIALIZE_PASS_BEGIN(
385 SeparateConstOffsetFromGEP, "separate-const-offset-from-gep",
386 "Split GEPs to a variadic base and a constant offset for better CSE", false,
388 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
389 INITIALIZE_PASS_DEPENDENCY(DataLayoutPass)
391 SeparateConstOffsetFromGEP, "separate-const-offset-from-gep",
392 "Split GEPs to a variadic base and a constant offset for better CSE", false,
396 llvm::createSeparateConstOffsetFromGEPPass(const TargetMachine *TM,
398 return new SeparateConstOffsetFromGEP(TM, LowerGEP);
401 bool ConstantOffsetExtractor::CanTraceInto(bool SignExtended,
405 // We only consider ADD, SUB and OR, because a non-zero constant found in
406 // expressions composed of these operations can be easily hoisted as a
407 // constant offset by reassociation.
408 if (BO->getOpcode() != Instruction::Add &&
409 BO->getOpcode() != Instruction::Sub &&
410 BO->getOpcode() != Instruction::Or) {
414 Value *LHS = BO->getOperand(0), *RHS = BO->getOperand(1);
415 // Do not trace into "or" unless it is equivalent to "add". If LHS and RHS
416 // don't have common bits, (LHS | RHS) is equivalent to (LHS + RHS).
417 if (BO->getOpcode() == Instruction::Or && !NoCommonBits(LHS, RHS))
420 // In addition, tracing into BO requires that its surrounding s/zext (if
421 // any) is distributable to both operands.
423 // Suppose BO = A op B.
424 // SignExtended | ZeroExtended | Distributable?
425 // --------------+--------------+----------------------------------
426 // 0 | 0 | true because no s/zext exists
427 // 0 | 1 | zext(BO) == zext(A) op zext(B)
428 // 1 | 0 | sext(BO) == sext(A) op sext(B)
429 // 1 | 1 | zext(sext(BO)) ==
430 // | | zext(sext(A)) op zext(sext(B))
431 if (BO->getOpcode() == Instruction::Add && !ZeroExtended && NonNegative) {
432 // If a + b >= 0 and (a >= 0 or b >= 0), then
433 // sext(a + b) = sext(a) + sext(b)
434 // even if the addition is not marked nsw.
436 // Leveraging this invarient, we can trace into an sext'ed inbound GEP
437 // index if the constant offset is non-negative.
439 // Verified in @sext_add in split-gep.ll.
440 if (ConstantInt *ConstLHS = dyn_cast<ConstantInt>(LHS)) {
441 if (!ConstLHS->isNegative())
444 if (ConstantInt *ConstRHS = dyn_cast<ConstantInt>(RHS)) {
445 if (!ConstRHS->isNegative())
450 // sext (add/sub nsw A, B) == add/sub nsw (sext A), (sext B)
451 // zext (add/sub nuw A, B) == add/sub nuw (zext A), (zext B)
452 if (BO->getOpcode() == Instruction::Add ||
453 BO->getOpcode() == Instruction::Sub) {
454 if (SignExtended && !BO->hasNoSignedWrap())
456 if (ZeroExtended && !BO->hasNoUnsignedWrap())
463 APInt ConstantOffsetExtractor::findInEitherOperand(BinaryOperator *BO,
466 // BO being non-negative does not shed light on whether its operands are
467 // non-negative. Clear the NonNegative flag here.
468 APInt ConstantOffset = find(BO->getOperand(0), SignExtended, ZeroExtended,
469 /* NonNegative */ false);
470 // If we found a constant offset in the left operand, stop and return that.
471 // This shortcut might cause us to miss opportunities of combining the
472 // constant offsets in both operands, e.g., (a + 4) + (b + 5) => (a + b) + 9.
473 // However, such cases are probably already handled by -instcombine,
474 // given this pass runs after the standard optimizations.
475 if (ConstantOffset != 0) return ConstantOffset;
476 ConstantOffset = find(BO->getOperand(1), SignExtended, ZeroExtended,
477 /* NonNegative */ false);
478 // If U is a sub operator, negate the constant offset found in the right
480 if (BO->getOpcode() == Instruction::Sub)
481 ConstantOffset = -ConstantOffset;
482 return ConstantOffset;
485 APInt ConstantOffsetExtractor::find(Value *V, bool SignExtended,
486 bool ZeroExtended, bool NonNegative) {
487 // TODO(jingyue): We could trace into integer/pointer casts, such as
488 // inttoptr, ptrtoint, bitcast, and addrspacecast. We choose to handle only
489 // integers because it gives good enough results for our benchmarks.
490 unsigned BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
492 // We cannot do much with Values that are not a User, such as an Argument.
493 User *U = dyn_cast<User>(V);
494 if (U == nullptr) return APInt(BitWidth, 0);
496 APInt ConstantOffset(BitWidth, 0);
497 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
498 // Hooray, we found it!
499 ConstantOffset = CI->getValue();
500 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(V)) {
501 // Trace into subexpressions for more hoisting opportunities.
502 if (CanTraceInto(SignExtended, ZeroExtended, BO, NonNegative)) {
503 ConstantOffset = findInEitherOperand(BO, SignExtended, ZeroExtended);
505 } else if (isa<SExtInst>(V)) {
506 ConstantOffset = find(U->getOperand(0), /* SignExtended */ true,
507 ZeroExtended, NonNegative).sext(BitWidth);
508 } else if (isa<ZExtInst>(V)) {
509 // As an optimization, we can clear the SignExtended flag because
510 // sext(zext(a)) = zext(a). Verified in @sext_zext in split-gep.ll.
512 // Clear the NonNegative flag, because zext(a) >= 0 does not imply a >= 0.
514 find(U->getOperand(0), /* SignExtended */ false,
515 /* ZeroExtended */ true, /* NonNegative */ false).zext(BitWidth);
518 // If we found a non-zero constant offset, add it to the path for
519 // rebuildWithoutConstOffset. Zero is a valid constant offset, but doesn't
520 // help this optimization.
521 if (ConstantOffset != 0)
522 UserChain.push_back(U);
523 return ConstantOffset;
526 Value *ConstantOffsetExtractor::applyExts(Value *V) {
528 // ExtInsts is built in the use-def order. Therefore, we apply them to V
529 // in the reversed order.
530 for (auto I = ExtInsts.rbegin(), E = ExtInsts.rend(); I != E; ++I) {
531 if (Constant *C = dyn_cast<Constant>(Current)) {
532 // If Current is a constant, apply s/zext using ConstantExpr::getCast.
533 // ConstantExpr::getCast emits a ConstantInt if C is a ConstantInt.
534 Current = ConstantExpr::getCast((*I)->getOpcode(), C, (*I)->getType());
536 Instruction *Ext = (*I)->clone();
537 Ext->setOperand(0, Current);
538 Ext->insertBefore(IP);
545 Value *ConstantOffsetExtractor::rebuildWithoutConstOffset() {
546 distributeExtsAndCloneChain(UserChain.size() - 1);
547 // Remove all nullptrs (used to be s/zext) from UserChain.
548 unsigned NewSize = 0;
549 for (auto I = UserChain.begin(), E = UserChain.end(); I != E; ++I) {
551 UserChain[NewSize] = *I;
555 UserChain.resize(NewSize);
556 return removeConstOffset(UserChain.size() - 1);
560 ConstantOffsetExtractor::distributeExtsAndCloneChain(unsigned ChainIndex) {
561 User *U = UserChain[ChainIndex];
562 if (ChainIndex == 0) {
563 assert(isa<ConstantInt>(U));
564 // If U is a ConstantInt, applyExts will return a ConstantInt as well.
565 return UserChain[ChainIndex] = cast<ConstantInt>(applyExts(U));
568 if (CastInst *Cast = dyn_cast<CastInst>(U)) {
569 assert((isa<SExtInst>(Cast) || isa<ZExtInst>(Cast)) &&
570 "We only traced into two types of CastInst: sext and zext");
571 ExtInsts.push_back(Cast);
572 UserChain[ChainIndex] = nullptr;
573 return distributeExtsAndCloneChain(ChainIndex - 1);
576 // Function find only trace into BinaryOperator and CastInst.
577 BinaryOperator *BO = cast<BinaryOperator>(U);
578 // OpNo = which operand of BO is UserChain[ChainIndex - 1]
579 unsigned OpNo = (BO->getOperand(0) == UserChain[ChainIndex - 1] ? 0 : 1);
580 Value *TheOther = applyExts(BO->getOperand(1 - OpNo));
581 Value *NextInChain = distributeExtsAndCloneChain(ChainIndex - 1);
583 BinaryOperator *NewBO = nullptr;
585 NewBO = BinaryOperator::Create(BO->getOpcode(), NextInChain, TheOther,
588 NewBO = BinaryOperator::Create(BO->getOpcode(), TheOther, NextInChain,
591 return UserChain[ChainIndex] = NewBO;
594 Value *ConstantOffsetExtractor::removeConstOffset(unsigned ChainIndex) {
595 if (ChainIndex == 0) {
596 assert(isa<ConstantInt>(UserChain[ChainIndex]));
597 return ConstantInt::getNullValue(UserChain[ChainIndex]->getType());
600 BinaryOperator *BO = cast<BinaryOperator>(UserChain[ChainIndex]);
601 unsigned OpNo = (BO->getOperand(0) == UserChain[ChainIndex - 1] ? 0 : 1);
602 assert(BO->getOperand(OpNo) == UserChain[ChainIndex - 1]);
603 Value *NextInChain = removeConstOffset(ChainIndex - 1);
604 Value *TheOther = BO->getOperand(1 - OpNo);
606 // If NextInChain is 0 and not the LHS of a sub, we can simplify the
607 // sub-expression to be just TheOther.
608 if (ConstantInt *CI = dyn_cast<ConstantInt>(NextInChain)) {
609 if (CI->isZero() && !(BO->getOpcode() == Instruction::Sub && OpNo == 0))
613 if (BO->getOpcode() == Instruction::Or) {
614 // Rebuild "or" as "add", because "or" may be invalid for the new
617 // For instance, given
618 // a | (b + 5) where a and b + 5 have no common bits,
619 // we can extract 5 as the constant offset.
621 // However, reusing the "or" in the new index would give us
623 // which does not equal a | (b + 5).
625 // Replacing the "or" with "add" is fine, because
626 // a | (b + 5) = a + (b + 5) = (a + b) + 5
628 return BinaryOperator::CreateAdd(NextInChain, TheOther, BO->getName(),
631 return BinaryOperator::CreateAdd(TheOther, NextInChain, BO->getName(),
636 // We can reuse BO in this case, because the new expression shares the same
637 // instruction type and BO is used at most once.
638 assert(BO->getNumUses() <= 1 &&
639 "distributeExtsAndCloneChain clones each BinaryOperator in "
640 "UserChain, so no one should be used more than "
642 BO->setOperand(OpNo, NextInChain);
643 BO->setHasNoSignedWrap(false);
644 BO->setHasNoUnsignedWrap(false);
645 // Make sure it appears after all instructions we've inserted so far.
650 Value *ConstantOffsetExtractor::Extract(Value *Idx, const DataLayout *DL,
651 GetElementPtrInst *GEP) {
652 ConstantOffsetExtractor Extractor(DL, GEP);
653 // Find a non-zero constant offset first.
654 APInt ConstantOffset =
655 Extractor.find(Idx, /* SignExtended */ false, /* ZeroExtended */ false,
657 if (ConstantOffset == 0)
659 // Separates the constant offset from the GEP index.
660 return Extractor.rebuildWithoutConstOffset();
663 int64_t ConstantOffsetExtractor::Find(Value *Idx, const DataLayout *DL,
664 GetElementPtrInst *GEP) {
665 // If Idx is an index of an inbound GEP, Idx is guaranteed to be non-negative.
666 return ConstantOffsetExtractor(DL, GEP)
667 .find(Idx, /* SignExtended */ false, /* ZeroExtended */ false,
672 void ConstantOffsetExtractor::ComputeKnownBits(Value *V, APInt &KnownOne,
673 APInt &KnownZero) const {
674 IntegerType *IT = cast<IntegerType>(V->getType());
675 KnownOne = APInt(IT->getBitWidth(), 0);
676 KnownZero = APInt(IT->getBitWidth(), 0);
677 llvm::computeKnownBits(V, KnownZero, KnownOne, DL, 0);
680 bool ConstantOffsetExtractor::NoCommonBits(Value *LHS, Value *RHS) const {
681 assert(LHS->getType() == RHS->getType() &&
682 "LHS and RHS should have the same type");
683 APInt LHSKnownOne, LHSKnownZero, RHSKnownOne, RHSKnownZero;
684 ComputeKnownBits(LHS, LHSKnownOne, LHSKnownZero);
685 ComputeKnownBits(RHS, RHSKnownOne, RHSKnownZero);
686 return (LHSKnownZero | RHSKnownZero).isAllOnesValue();
689 bool SeparateConstOffsetFromGEP::canonicalizeArrayIndicesToPointerSize(
690 GetElementPtrInst *GEP) {
691 bool Changed = false;
692 Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
693 gep_type_iterator GTI = gep_type_begin(*GEP);
694 for (User::op_iterator I = GEP->op_begin() + 1, E = GEP->op_end();
695 I != E; ++I, ++GTI) {
696 // Skip struct member indices which must be i32.
697 if (isa<SequentialType>(*GTI)) {
698 if ((*I)->getType() != IntPtrTy) {
699 *I = CastInst::CreateIntegerCast(*I, IntPtrTy, true, "idxprom", GEP);
708 SeparateConstOffsetFromGEP::accumulateByteOffset(GetElementPtrInst *GEP,
709 bool &NeedsExtraction) {
710 NeedsExtraction = false;
711 int64_t AccumulativeByteOffset = 0;
712 gep_type_iterator GTI = gep_type_begin(*GEP);
713 for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
714 if (isa<SequentialType>(*GTI)) {
715 // Tries to extract a constant offset from this GEP index.
716 int64_t ConstantOffset =
717 ConstantOffsetExtractor::Find(GEP->getOperand(I), DL, GEP);
718 if (ConstantOffset != 0) {
719 NeedsExtraction = true;
720 // A GEP may have multiple indices. We accumulate the extracted
721 // constant offset to a byte offset, and later offset the remainder of
722 // the original GEP with this byte offset.
723 AccumulativeByteOffset +=
724 ConstantOffset * DL->getTypeAllocSize(GTI.getIndexedType());
726 } else if (LowerGEP) {
727 StructType *StTy = cast<StructType>(*GTI);
728 uint64_t Field = cast<ConstantInt>(GEP->getOperand(I))->getZExtValue();
729 // Skip field 0 as the offset is always 0.
731 NeedsExtraction = true;
732 AccumulativeByteOffset +=
733 DL->getStructLayout(StTy)->getElementOffset(Field);
737 return AccumulativeByteOffset;
740 void SeparateConstOffsetFromGEP::lowerToSingleIndexGEPs(
741 GetElementPtrInst *Variadic, int64_t AccumulativeByteOffset) {
742 IRBuilder<> Builder(Variadic);
743 Type *IntPtrTy = DL->getIntPtrType(Variadic->getType());
746 Builder.getInt8PtrTy(Variadic->getType()->getPointerAddressSpace());
747 Value *ResultPtr = Variadic->getOperand(0);
748 if (ResultPtr->getType() != I8PtrTy)
749 ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy);
751 gep_type_iterator GTI = gep_type_begin(*Variadic);
752 // Create an ugly GEP for each sequential index. We don't create GEPs for
753 // structure indices, as they are accumulated in the constant offset index.
754 for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) {
755 if (isa<SequentialType>(*GTI)) {
756 Value *Idx = Variadic->getOperand(I);
757 // Skip zero indices.
758 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx))
762 APInt ElementSize = APInt(IntPtrTy->getIntegerBitWidth(),
763 DL->getTypeAllocSize(GTI.getIndexedType()));
764 // Scale the index by element size.
765 if (ElementSize != 1) {
766 if (ElementSize.isPowerOf2()) {
767 Idx = Builder.CreateShl(
768 Idx, ConstantInt::get(IntPtrTy, ElementSize.logBase2()));
770 Idx = Builder.CreateMul(Idx, ConstantInt::get(IntPtrTy, ElementSize));
773 // Create an ugly GEP with a single index for each index.
774 ResultPtr = Builder.CreateGEP(ResultPtr, Idx, "uglygep");
778 // Create a GEP with the constant offset index.
779 if (AccumulativeByteOffset != 0) {
780 Value *Offset = ConstantInt::get(IntPtrTy, AccumulativeByteOffset);
781 ResultPtr = Builder.CreateGEP(ResultPtr, Offset, "uglygep");
783 if (ResultPtr->getType() != Variadic->getType())
784 ResultPtr = Builder.CreateBitCast(ResultPtr, Variadic->getType());
786 Variadic->replaceAllUsesWith(ResultPtr);
787 Variadic->eraseFromParent();
791 SeparateConstOffsetFromGEP::lowerToArithmetics(GetElementPtrInst *Variadic,
792 int64_t AccumulativeByteOffset) {
793 IRBuilder<> Builder(Variadic);
794 Type *IntPtrTy = DL->getIntPtrType(Variadic->getType());
796 Value *ResultPtr = Builder.CreatePtrToInt(Variadic->getOperand(0), IntPtrTy);
797 gep_type_iterator GTI = gep_type_begin(*Variadic);
798 // Create ADD/SHL/MUL arithmetic operations for each sequential indices. We
799 // don't create arithmetics for structure indices, as they are accumulated
800 // in the constant offset index.
801 for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) {
802 if (isa<SequentialType>(*GTI)) {
803 Value *Idx = Variadic->getOperand(I);
804 // Skip zero indices.
805 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx))
809 APInt ElementSize = APInt(IntPtrTy->getIntegerBitWidth(),
810 DL->getTypeAllocSize(GTI.getIndexedType()));
811 // Scale the index by element size.
812 if (ElementSize != 1) {
813 if (ElementSize.isPowerOf2()) {
814 Idx = Builder.CreateShl(
815 Idx, ConstantInt::get(IntPtrTy, ElementSize.logBase2()));
817 Idx = Builder.CreateMul(Idx, ConstantInt::get(IntPtrTy, ElementSize));
820 // Create an ADD for each index.
821 ResultPtr = Builder.CreateAdd(ResultPtr, Idx);
825 // Create an ADD for the constant offset index.
826 if (AccumulativeByteOffset != 0) {
827 ResultPtr = Builder.CreateAdd(
828 ResultPtr, ConstantInt::get(IntPtrTy, AccumulativeByteOffset));
831 ResultPtr = Builder.CreateIntToPtr(ResultPtr, Variadic->getType());
832 Variadic->replaceAllUsesWith(ResultPtr);
833 Variadic->eraseFromParent();
836 bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) {
838 if (GEP->getType()->isVectorTy())
841 // The backend can already nicely handle the case where all indices are
843 if (GEP->hasAllConstantIndices())
846 bool Changed = canonicalizeArrayIndicesToPointerSize(GEP);
848 bool NeedsExtraction;
849 int64_t AccumulativeByteOffset = accumulateByteOffset(GEP, NeedsExtraction);
851 if (!NeedsExtraction)
853 // If LowerGEP is disabled, before really splitting the GEP, check whether the
854 // backend supports the addressing mode we are about to produce. If no, this
855 // splitting probably won't be beneficial.
856 // If LowerGEP is enabled, even the extracted constant offset can not match
857 // the addressing mode, we can still do optimizations to other lowered parts
858 // of variable indices. Therefore, we don't check for addressing modes in that
861 TargetTransformInfo &TTI =
862 getAnalysis<TargetTransformInfoWrapperPass>().getTTI();
863 if (!TTI.isLegalAddressingMode(GEP->getType()->getElementType(),
864 /*BaseGV=*/nullptr, AccumulativeByteOffset,
865 /*HasBaseReg=*/true, /*Scale=*/0)) {
870 // Remove the constant offset in each sequential index. The resultant GEP
871 // computes the variadic base.
872 // Notice that we don't remove struct field indices here. If LowerGEP is
873 // disabled, a structure index is not accumulated and we still use the old
874 // one. If LowerGEP is enabled, a structure index is accumulated in the
875 // constant offset. LowerToSingleIndexGEPs or lowerToArithmetics will later
876 // handle the constant offset and won't need a new structure index.
877 gep_type_iterator GTI = gep_type_begin(*GEP);
878 for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
879 if (isa<SequentialType>(*GTI)) {
880 // Splits this GEP index into a variadic part and a constant offset, and
881 // uses the variadic part as the new index.
883 ConstantOffsetExtractor::Extract(GEP->getOperand(I), DL, GEP);
884 if (NewIdx != nullptr) {
885 GEP->setOperand(I, NewIdx);
890 // Clear the inbounds attribute because the new index may be off-bound.
894 // addr = gep inbounds float* p, i64 b
896 // is transformed to:
898 // addr2 = gep float* p, i64 a
899 // addr = gep float* addr2, i64 5
901 // If a is -4, although the old index b is in bounds, the new index a is
902 // off-bound. http://llvm.org/docs/LangRef.html#id181 says "if the
903 // inbounds keyword is not present, the offsets are added to the base
904 // address with silently-wrapping two's complement arithmetic".
905 // Therefore, the final code will be a semantically equivalent.
907 // TODO(jingyue): do some range analysis to keep as many inbounds as
908 // possible. GEPs with inbounds are more friendly to alias analysis.
909 GEP->setIsInBounds(false);
911 // Lowers a GEP to either GEPs with a single index or arithmetic operations.
913 // As currently BasicAA does not analyze ptrtoint/inttoptr, do not lower to
914 // arithmetic operations if the target uses alias analysis in codegen.
915 if (TM && TM->getSubtargetImpl(*GEP->getParent()->getParent())->useAA())
916 lowerToSingleIndexGEPs(GEP, AccumulativeByteOffset);
918 lowerToArithmetics(GEP, AccumulativeByteOffset);
922 // No need to create another GEP if the accumulative byte offset is 0.
923 if (AccumulativeByteOffset == 0)
926 // Offsets the base with the accumulative byte offset.
933 // %gep2 ; clone of %gep
934 // %new.gep = gep %gep2, <offset / sizeof(*%gep)>
935 // %gep ; will be removed
938 // => replace all uses of %gep with %new.gep and remove %gep
940 // %gep2 ; clone of %gep
941 // %new.gep = gep %gep2, <offset / sizeof(*%gep)>
944 // If AccumulativeByteOffset is not a multiple of sizeof(*%gep), we emit an
945 // uglygep (http://llvm.org/docs/GetElementPtr.html#what-s-an-uglygep):
946 // bitcast %gep2 to i8*, add the offset, and bitcast the result back to the
949 // %gep2 ; clone of %gep
950 // %0 = bitcast %gep2 to i8*
951 // %uglygep = gep %0, <offset>
952 // %new.gep = bitcast %uglygep to <type of %gep>
954 Instruction *NewGEP = GEP->clone();
955 NewGEP->insertBefore(GEP);
957 // Per ANSI C standard, signed / unsigned = unsigned and signed % unsigned =
958 // unsigned.. Therefore, we cast ElementTypeSizeOfGEP to signed because it is
959 // used with unsigned integers later.
960 int64_t ElementTypeSizeOfGEP = static_cast<int64_t>(
961 DL->getTypeAllocSize(GEP->getType()->getElementType()));
962 Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
963 if (AccumulativeByteOffset % ElementTypeSizeOfGEP == 0) {
964 // Very likely. As long as %gep is natually aligned, the byte offset we
965 // extracted should be a multiple of sizeof(*%gep).
966 int64_t Index = AccumulativeByteOffset / ElementTypeSizeOfGEP;
967 NewGEP = GetElementPtrInst::Create(
968 NewGEP, ConstantInt::get(IntPtrTy, Index, true), GEP->getName(), GEP);
970 // Unlikely but possible. For example,
978 // Suppose the gep before extraction is &s[i + 1].b[j + 3]. After
979 // extraction, it becomes &s[i].b[j] and AccumulativeByteOffset is
980 // sizeof(S) + 3 * sizeof(int64) = 100, which is not a multiple of
983 // Emit an uglygep in this case.
984 Type *I8PtrTy = Type::getInt8PtrTy(GEP->getContext(),
985 GEP->getPointerAddressSpace());
986 NewGEP = new BitCastInst(NewGEP, I8PtrTy, "", GEP);
987 NewGEP = GetElementPtrInst::Create(
988 NewGEP, ConstantInt::get(IntPtrTy, AccumulativeByteOffset, true),
990 if (GEP->getType() != I8PtrTy)
991 NewGEP = new BitCastInst(NewGEP, GEP->getType(), GEP->getName(), GEP);
994 GEP->replaceAllUsesWith(NewGEP);
995 GEP->eraseFromParent();
1000 bool SeparateConstOffsetFromGEP::runOnFunction(Function &F) {
1001 if (skipOptnoneFunction(F))
1004 if (DisableSeparateConstOffsetFromGEP)
1007 bool Changed = false;
1008 for (Function::iterator B = F.begin(), BE = F.end(); B != BE; ++B) {
1009 for (BasicBlock::iterator I = B->begin(), IE = B->end(); I != IE; ) {
1010 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I++)) {
1011 Changed |= splitGEP(GEP);
1013 // No need to split GEP ConstantExprs because all its indices are constant