1 //===- NVPTXLowerAggrCopies.cpp - ------------------------------*- C++ -*--===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 // Lower aggregate copies, memset, memcpy, memmov intrinsics into loops when
12 // the size is large or is not a compile-time constant.
14 //===----------------------------------------------------------------------===//
16 #include "NVPTXLowerAggrCopies.h"
17 #include "llvm/CodeGen/MachineFunctionAnalysis.h"
18 #include "llvm/CodeGen/StackProtector.h"
19 #include "llvm/IR/Constants.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/Function.h"
22 #include "llvm/IR/IRBuilder.h"
23 #include "llvm/IR/Instructions.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/Intrinsics.h"
26 #include "llvm/IR/LLVMContext.h"
27 #include "llvm/IR/Module.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
31 #define DEBUG_TYPE "nvptx"
37 // actual analysis class, which is a functionpass
38 struct NVPTXLowerAggrCopies : public FunctionPass {
41 NVPTXLowerAggrCopies() : FunctionPass(ID) {}
43 void getAnalysisUsage(AnalysisUsage &AU) const override {
44 AU.addPreserved<MachineFunctionAnalysis>();
45 AU.addPreserved<StackProtector>();
48 bool runOnFunction(Function &F) override;
50 static const unsigned MaxAggrCopySize = 128;
52 const char *getPassName() const override {
53 return "Lower aggregate copies/intrinsics into loops";
57 char NVPTXLowerAggrCopies::ID = 0;
59 // Lower memcpy to loop.
60 void convertMemCpyToLoop(Instruction *ConvertedInst, Value *SrcAddr,
61 Value *DstAddr, Value *CopyLen, bool SrcIsVolatile,
62 bool DstIsVolatile, LLVMContext &Context,
64 Type *TypeOfCopyLen = CopyLen->getType();
66 BasicBlock *OrigBB = ConvertedInst->getParent();
68 ConvertedInst->getParent()->splitBasicBlock(ConvertedInst, "split");
69 BasicBlock *LoopBB = BasicBlock::Create(Context, "loadstoreloop", &F, NewBB);
71 OrigBB->getTerminator()->setSuccessor(0, LoopBB);
72 IRBuilder<> Builder(OrigBB, OrigBB->getTerminator());
74 // SrcAddr and DstAddr are expected to be pointer types,
75 // so no check is made here.
76 unsigned SrcAS = cast<PointerType>(SrcAddr->getType())->getAddressSpace();
77 unsigned DstAS = cast<PointerType>(DstAddr->getType())->getAddressSpace();
79 // Cast pointers to (char *)
80 SrcAddr = Builder.CreateBitCast(SrcAddr, Builder.getInt8PtrTy(SrcAS));
81 DstAddr = Builder.CreateBitCast(DstAddr, Builder.getInt8PtrTy(DstAS));
83 IRBuilder<> LoopBuilder(LoopBB);
84 PHINode *LoopIndex = LoopBuilder.CreatePHI(TypeOfCopyLen, 0);
85 LoopIndex->addIncoming(ConstantInt::get(TypeOfCopyLen, 0), OrigBB);
87 // load from SrcAddr+LoopIndex
88 // TODO: we can leverage the align parameter of llvm.memcpy for more efficient
89 // word-sized loads and stores.
90 Value *Element = LoopBuilder.CreateLoad(
91 LoopBuilder.CreateGEP(LoopBuilder.getInt8Ty(), SrcAddr, LoopIndex),
93 // store at DstAddr+LoopIndex
94 LoopBuilder.CreateStore(
96 LoopBuilder.CreateGEP(LoopBuilder.getInt8Ty(), DstAddr, LoopIndex),
99 // The value for LoopIndex coming from backedge is (LoopIndex + 1)
101 LoopBuilder.CreateAdd(LoopIndex, ConstantInt::get(TypeOfCopyLen, 1));
102 LoopIndex->addIncoming(NewIndex, LoopBB);
104 LoopBuilder.CreateCondBr(LoopBuilder.CreateICmpULT(NewIndex, CopyLen), LoopBB,
108 // Lower memmove to IR. memmove is required to correctly copy overlapping memory
109 // regions; therefore, it has to check the relative positions of the source and
110 // destination pointers and choose the copy direction accordingly.
112 // The code below is an IR rendition of this C function:
114 // void* memmove(void* dst, const void* src, size_t n) {
115 // unsigned char* d = dst;
116 // const unsigned char* s = src;
124 // for (size_t i = 0; i < n; ++i) {
130 void convertMemMoveToLoop(Instruction *ConvertedInst, Value *SrcAddr,
131 Value *DstAddr, Value *CopyLen, bool SrcIsVolatile,
132 bool DstIsVolatile, LLVMContext &Context,
134 Type *TypeOfCopyLen = CopyLen->getType();
135 BasicBlock *OrigBB = ConvertedInst->getParent();
137 // Create the a comparison of src and dst, based on which we jump to either
138 // the forward-copy part of the function (if src >= dst) or the backwards-copy
139 // part (if src < dst).
140 // SplitBlockAndInsertIfThenElse conveniently creates the basic if-then-else
141 // structure. Its block terminators (unconditional branches) are replaced by
142 // the appropriate conditional branches when the loop is built.
143 ICmpInst *PtrCompare = new ICmpInst(ConvertedInst, ICmpInst::ICMP_ULT,
144 SrcAddr, DstAddr, "compare_src_dst");
145 TerminatorInst *ThenTerm, *ElseTerm;
146 SplitBlockAndInsertIfThenElse(PtrCompare, ConvertedInst, &ThenTerm,
149 // Each part of the function consists of two blocks:
150 // copy_backwards: used to skip the loop when n == 0
151 // copy_backwards_loop: the actual backwards loop BB
152 // copy_forward: used to skip the loop when n == 0
153 // copy_forward_loop: the actual forward loop BB
154 BasicBlock *CopyBackwardsBB = ThenTerm->getParent();
155 CopyBackwardsBB->setName("copy_backwards");
156 BasicBlock *CopyForwardBB = ElseTerm->getParent();
157 CopyForwardBB->setName("copy_forward");
158 BasicBlock *ExitBB = ConvertedInst->getParent();
159 ExitBB->setName("memmove_done");
161 // Initial comparison of n == 0 that lets us skip the loops altogether. Shared
162 // between both backwards and forward copy clauses.
164 new ICmpInst(OrigBB->getTerminator(), ICmpInst::ICMP_EQ, CopyLen,
165 ConstantInt::get(TypeOfCopyLen, 0), "compare_n_to_0");
167 // Copying backwards.
169 BasicBlock::Create(Context, "copy_backwards_loop", &F, CopyForwardBB);
170 IRBuilder<> LoopBuilder(LoopBB);
171 PHINode *LoopPhi = LoopBuilder.CreatePHI(TypeOfCopyLen, 0);
172 Value *IndexPtr = LoopBuilder.CreateSub(
173 LoopPhi, ConstantInt::get(TypeOfCopyLen, 1), "index_ptr");
174 Value *Element = LoopBuilder.CreateLoad(
175 LoopBuilder.CreateInBoundsGEP(SrcAddr, IndexPtr), "element");
176 LoopBuilder.CreateStore(Element,
177 LoopBuilder.CreateInBoundsGEP(DstAddr, IndexPtr));
178 LoopBuilder.CreateCondBr(
179 LoopBuilder.CreateICmpEQ(IndexPtr, ConstantInt::get(TypeOfCopyLen, 0)),
181 LoopPhi->addIncoming(IndexPtr, LoopBB);
182 LoopPhi->addIncoming(CopyLen, CopyBackwardsBB);
183 BranchInst::Create(ExitBB, LoopBB, CompareN, ThenTerm);
184 ThenTerm->eraseFromParent();
187 BasicBlock *FwdLoopBB =
188 BasicBlock::Create(Context, "copy_forward_loop", &F, ExitBB);
189 IRBuilder<> FwdLoopBuilder(FwdLoopBB);
190 PHINode *FwdCopyPhi = FwdLoopBuilder.CreatePHI(TypeOfCopyLen, 0, "index_ptr");
191 Value *FwdElement = FwdLoopBuilder.CreateLoad(
192 FwdLoopBuilder.CreateInBoundsGEP(SrcAddr, FwdCopyPhi), "element");
193 FwdLoopBuilder.CreateStore(
194 FwdElement, FwdLoopBuilder.CreateInBoundsGEP(DstAddr, FwdCopyPhi));
195 Value *FwdIndexPtr = FwdLoopBuilder.CreateAdd(
196 FwdCopyPhi, ConstantInt::get(TypeOfCopyLen, 1), "index_increment");
197 FwdLoopBuilder.CreateCondBr(FwdLoopBuilder.CreateICmpEQ(FwdIndexPtr, CopyLen),
199 FwdCopyPhi->addIncoming(FwdIndexPtr, FwdLoopBB);
200 FwdCopyPhi->addIncoming(ConstantInt::get(TypeOfCopyLen, 0), CopyForwardBB);
202 BranchInst::Create(ExitBB, FwdLoopBB, CompareN, ElseTerm);
203 ElseTerm->eraseFromParent();
206 // Lower memset to loop.
207 void convertMemSetToLoop(Instruction *ConvertedInst, Value *DstAddr,
208 Value *CopyLen, Value *SetValue, LLVMContext &Context,
210 BasicBlock *OrigBB = ConvertedInst->getParent();
212 ConvertedInst->getParent()->splitBasicBlock(ConvertedInst, "split");
213 BasicBlock *LoopBB = BasicBlock::Create(Context, "loadstoreloop", &F, NewBB);
215 OrigBB->getTerminator()->setSuccessor(0, LoopBB);
216 IRBuilder<> Builder(OrigBB, OrigBB->getTerminator());
218 // Cast pointer to the type of value getting stored
219 unsigned dstAS = cast<PointerType>(DstAddr->getType())->getAddressSpace();
220 DstAddr = Builder.CreateBitCast(DstAddr,
221 PointerType::get(SetValue->getType(), dstAS));
223 IRBuilder<> LoopBuilder(LoopBB);
224 PHINode *LoopIndex = LoopBuilder.CreatePHI(CopyLen->getType(), 0);
225 LoopIndex->addIncoming(ConstantInt::get(CopyLen->getType(), 0), OrigBB);
227 LoopBuilder.CreateStore(
228 SetValue, LoopBuilder.CreateGEP(SetValue->getType(), DstAddr, LoopIndex),
232 LoopBuilder.CreateAdd(LoopIndex, ConstantInt::get(CopyLen->getType(), 1));
233 LoopIndex->addIncoming(NewIndex, LoopBB);
235 LoopBuilder.CreateCondBr(LoopBuilder.CreateICmpULT(NewIndex, CopyLen), LoopBB,
239 bool NVPTXLowerAggrCopies::runOnFunction(Function &F) {
240 SmallVector<LoadInst *, 4> AggrLoads;
241 SmallVector<MemIntrinsic *, 4> MemCalls;
243 const DataLayout &DL = F.getParent()->getDataLayout();
244 LLVMContext &Context = F.getParent()->getContext();
246 // Collect all aggregate loads and mem* calls.
247 for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
248 for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE;
250 if (LoadInst *LI = dyn_cast<LoadInst>(II)) {
251 if (!LI->hasOneUse())
254 if (DL.getTypeStoreSize(LI->getType()) < MaxAggrCopySize)
257 if (StoreInst *SI = dyn_cast<StoreInst>(LI->user_back())) {
258 if (SI->getOperand(0) != LI)
260 AggrLoads.push_back(LI);
262 } else if (MemIntrinsic *IntrCall = dyn_cast<MemIntrinsic>(II)) {
263 // Convert intrinsic calls with variable size or with constant size
264 // larger than the MaxAggrCopySize threshold.
265 if (ConstantInt *LenCI = dyn_cast<ConstantInt>(IntrCall->getLength())) {
266 if (LenCI->getZExtValue() >= MaxAggrCopySize) {
267 MemCalls.push_back(IntrCall);
270 MemCalls.push_back(IntrCall);
276 if (AggrLoads.size() == 0 && MemCalls.size() == 0) {
281 // Do the transformation of an aggr load/copy/set to a loop
283 for (LoadInst *LI : AggrLoads) {
284 StoreInst *SI = dyn_cast<StoreInst>(*LI->user_begin());
285 Value *SrcAddr = LI->getOperand(0);
286 Value *DstAddr = SI->getOperand(1);
287 unsigned NumLoads = DL.getTypeStoreSize(LI->getType());
288 Value *CopyLen = ConstantInt::get(Type::getInt32Ty(Context), NumLoads);
290 convertMemCpyToLoop(/* ConvertedInst */ SI,
291 /* SrcAddr */ SrcAddr, /* DstAddr */ DstAddr,
292 /* CopyLen */ CopyLen,
293 /* SrcIsVolatile */ LI->isVolatile(),
294 /* DstIsVolatile */ SI->isVolatile(),
295 /* Context */ Context,
298 SI->eraseFromParent();
299 LI->eraseFromParent();
302 // Transform mem* intrinsic calls.
303 for (MemIntrinsic *MemCall : MemCalls) {
304 if (MemCpyInst *Memcpy = dyn_cast<MemCpyInst>(MemCall)) {
305 convertMemCpyToLoop(/* ConvertedInst */ Memcpy,
306 /* SrcAddr */ Memcpy->getRawSource(),
307 /* DstAddr */ Memcpy->getRawDest(),
308 /* CopyLen */ Memcpy->getLength(),
309 /* SrcIsVolatile */ Memcpy->isVolatile(),
310 /* DstIsVolatile */ Memcpy->isVolatile(),
311 /* Context */ Context,
313 } else if (MemMoveInst *Memmove = dyn_cast<MemMoveInst>(MemCall)) {
314 convertMemMoveToLoop(/* ConvertedInst */ Memmove,
315 /* SrcAddr */ Memmove->getRawSource(),
316 /* DstAddr */ Memmove->getRawDest(),
317 /* CopyLen */ Memmove->getLength(),
318 /* SrcIsVolatile */ Memmove->isVolatile(),
319 /* DstIsVolatile */ Memmove->isVolatile(),
320 /* Context */ Context,
323 } else if (MemSetInst *Memset = dyn_cast<MemSetInst>(MemCall)) {
324 convertMemSetToLoop(/* ConvertedInst */ Memset,
325 /* DstAddr */ Memset->getRawDest(),
326 /* CopyLen */ Memset->getLength(),
327 /* SetValue */ Memset->getValue(),
328 /* Context */ Context,
331 MemCall->eraseFromParent();
340 void initializeNVPTXLowerAggrCopiesPass(PassRegistry &);
343 INITIALIZE_PASS(NVPTXLowerAggrCopies, "nvptx-lower-aggr-copies",
344 "Lower aggregate copies, and llvm.mem* intrinsics into loops",
347 FunctionPass *llvm::createLowerAggrCopies() {
348 return new NVPTXLowerAggrCopies();