1 //===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the auto-upgrade helper functions
12 //===----------------------------------------------------------------------===//
14 #include "llvm/AutoUpgrade.h"
15 #include "llvm/Constants.h"
16 #include "llvm/Function.h"
17 #include "llvm/LLVMContext.h"
18 #include "llvm/Module.h"
19 #include "llvm/IntrinsicInst.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/Support/CallSite.h"
22 #include "llvm/Support/ErrorHandling.h"
23 #include "llvm/Support/IRBuilder.h"
28 static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
29 assert(F && "Illegal to upgrade a non-existent Function.");
31 // Get the Function's name.
32 const std::string& Name = F->getName();
35 const FunctionType *FTy = F->getFunctionType();
37 // Quickly eliminate it, if it's not a candidate.
38 if (Name.length() <= 8 || Name[0] != 'l' || Name[1] != 'l' ||
39 Name[2] != 'v' || Name[3] != 'm' || Name[4] != '.')
42 Module *M = F->getParent();
46 // This upgrades the llvm.atomic.lcs, llvm.atomic.las, llvm.atomic.lss,
47 // and atomics with default address spaces to their new names to their new
48 // function name (e.g. llvm.atomic.add.i32 => llvm.atomic.add.i32.p0i32)
49 if (Name.compare(5,7,"atomic.",7) == 0) {
50 if (Name.compare(12,3,"lcs",3) == 0) {
51 std::string::size_type delim = Name.find('.',12);
52 F->setName("llvm.atomic.cmp.swap" + Name.substr(delim) +
53 ".p0" + Name.substr(delim+1));
57 else if (Name.compare(12,3,"las",3) == 0) {
58 std::string::size_type delim = Name.find('.',12);
59 F->setName("llvm.atomic.load.add"+Name.substr(delim)
60 + ".p0" + Name.substr(delim+1));
64 else if (Name.compare(12,3,"lss",3) == 0) {
65 std::string::size_type delim = Name.find('.',12);
66 F->setName("llvm.atomic.load.sub"+Name.substr(delim)
67 + ".p0" + Name.substr(delim+1));
71 else if (Name.rfind(".p") == std::string::npos) {
72 // We don't have an address space qualifier so this has be upgraded
73 // to the new name. Copy the type name at the end of the intrinsic
75 std::string::size_type delim = Name.find_last_of('.');
76 assert(delim != std::string::npos && "can not find type");
77 F->setName(Name + ".p0" + Name.substr(delim+1));
81 } else if (Name.compare(5, 9, "arm.neon.", 9) == 0) {
82 if (((Name.compare(14, 5, "vmovl", 5) == 0 ||
83 Name.compare(14, 5, "vaddl", 5) == 0 ||
84 Name.compare(14, 5, "vsubl", 5) == 0) &&
85 (Name.compare(19, 2, "s.", 2) == 0 ||
86 Name.compare(19, 2, "u.", 2) == 0)) ||
88 ((Name.compare(14, 5, "vaddw", 5) == 0 ||
89 Name.compare(14, 5, "vsubw", 5) == 0) &&
90 (Name.compare(19, 2, "s.", 2) == 0 ||
91 Name.compare(19, 2, "u.", 2) == 0)) ||
93 (Name.compare(14, 6, "vmovn.", 6) == 0)) {
95 // Calls to these are transformed into IR without intrinsics.
99 // Old versions of NEON ld/st intrinsics are missing alignment arguments.
100 bool isVLd = (Name.compare(14, 3, "vld", 3) == 0);
101 bool isVSt = (Name.compare(14, 3, "vst", 3) == 0);
102 if (isVLd || isVSt) {
103 unsigned NumVecs = Name.at(17) - '0';
104 if (NumVecs == 0 || NumVecs > 4)
106 bool isLaneOp = (Name.compare(18, 5, "lane.", 5) == 0);
107 if (!isLaneOp && Name.at(18) != '.')
109 unsigned ExpectedArgs = 2; // for the address and alignment
110 if (isVSt || isLaneOp)
111 ExpectedArgs += NumVecs;
113 ExpectedArgs += 1; // for the lane number
114 unsigned NumP = FTy->getNumParams();
115 if (NumP != ExpectedArgs - 1)
118 // Change the name of the old (bad) intrinsic, because
119 // its type is incorrect, but we cannot overload that name.
122 // One argument is missing: add the alignment argument.
123 std::vector<const Type*> NewParams;
124 for (unsigned p = 0; p < NumP; ++p)
125 NewParams.push_back(FTy->getParamType(p));
126 NewParams.push_back(Type::getInt32Ty(F->getContext()));
127 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(),
129 NewFn = cast<Function>(M->getOrInsertFunction(Name, NewFTy));
135 // This upgrades the name of the llvm.bswap intrinsic function to only use
136 // a single type name for overloading. We only care about the old format
137 // 'llvm.bswap.i*.i*', so check for 'bswap.' and then for there being
138 // a '.' after 'bswap.'
139 if (Name.compare(5,6,"bswap.",6) == 0) {
140 std::string::size_type delim = Name.find('.',11);
142 if (delim != std::string::npos) {
143 // Construct the new name as 'llvm.bswap' + '.i*'
144 F->setName(Name.substr(0,10)+Name.substr(delim));
152 // We only want to fix the 'llvm.ct*' intrinsics which do not have the
153 // correct return type, so we check for the name, and then check if the
154 // return type does not match the parameter type.
155 if ( (Name.compare(5,5,"ctpop",5) == 0 ||
156 Name.compare(5,4,"ctlz",4) == 0 ||
157 Name.compare(5,4,"cttz",4) == 0) &&
158 FTy->getReturnType() != FTy->getParamType(0)) {
159 // We first need to change the name of the old (bad) intrinsic, because
160 // its type is incorrect, but we cannot overload that name. We
161 // arbitrarily unique it here allowing us to construct a correctly named
162 // and typed function below.
165 // Now construct the new intrinsic with the correct name and type. We
166 // leave the old function around in order to query its type, whatever it
167 // may be, and correctly convert up to the new type.
168 NewFn = cast<Function>(M->getOrInsertFunction(Name,
169 FTy->getParamType(0),
170 FTy->getParamType(0),
177 // The old llvm.eh.selector.i32 is equivalent to the new llvm.eh.selector.
178 if (Name.compare("llvm.eh.selector.i32") == 0) {
179 F->setName("llvm.eh.selector");
183 // The old llvm.eh.typeid.for.i32 is equivalent to llvm.eh.typeid.for.
184 if (Name.compare("llvm.eh.typeid.for.i32") == 0) {
185 F->setName("llvm.eh.typeid.for");
189 // Convert the old llvm.eh.selector.i64 to a call to llvm.eh.selector.
190 if (Name.compare("llvm.eh.selector.i64") == 0) {
191 NewFn = Intrinsic::getDeclaration(M, Intrinsic::eh_selector);
194 // Convert the old llvm.eh.typeid.for.i64 to a call to llvm.eh.typeid.for.
195 if (Name.compare("llvm.eh.typeid.for.i64") == 0) {
196 NewFn = Intrinsic::getDeclaration(M, Intrinsic::eh_typeid_for);
202 // This upgrades the llvm.memcpy, llvm.memmove, and llvm.memset to the
203 // new format that allows overloading the pointer for different address
204 // space (e.g., llvm.memcpy.i16 => llvm.memcpy.p0i8.p0i8.i16)
205 const char* NewFnName = NULL;
206 if (Name.compare(5,8,"memcpy.i",8) == 0) {
208 NewFnName = "llvm.memcpy.p0i8.p0i8.i8";
209 else if (Name.compare(13,2,"16") == 0)
210 NewFnName = "llvm.memcpy.p0i8.p0i8.i16";
211 else if (Name.compare(13,2,"32") == 0)
212 NewFnName = "llvm.memcpy.p0i8.p0i8.i32";
213 else if (Name.compare(13,2,"64") == 0)
214 NewFnName = "llvm.memcpy.p0i8.p0i8.i64";
215 } else if (Name.compare(5,9,"memmove.i",9) == 0) {
217 NewFnName = "llvm.memmove.p0i8.p0i8.i8";
218 else if (Name.compare(14,2,"16") == 0)
219 NewFnName = "llvm.memmove.p0i8.p0i8.i16";
220 else if (Name.compare(14,2,"32") == 0)
221 NewFnName = "llvm.memmove.p0i8.p0i8.i32";
222 else if (Name.compare(14,2,"64") == 0)
223 NewFnName = "llvm.memmove.p0i8.p0i8.i64";
225 else if (Name.compare(5,8,"memset.i",8) == 0) {
227 NewFnName = "llvm.memset.p0i8.i8";
228 else if (Name.compare(13,2,"16") == 0)
229 NewFnName = "llvm.memset.p0i8.i16";
230 else if (Name.compare(13,2,"32") == 0)
231 NewFnName = "llvm.memset.p0i8.i32";
232 else if (Name.compare(13,2,"64") == 0)
233 NewFnName = "llvm.memset.p0i8.i64";
236 NewFn = cast<Function>(M->getOrInsertFunction(NewFnName,
237 FTy->getReturnType(),
238 FTy->getParamType(0),
239 FTy->getParamType(1),
240 FTy->getParamType(2),
241 FTy->getParamType(3),
242 Type::getInt1Ty(F->getContext()),
249 // This upgrades the llvm.part.select overloaded intrinsic names to only
250 // use one type specifier in the name. We only care about the old format
251 // 'llvm.part.select.i*.i*', and solve as above with bswap.
252 if (Name.compare(5,12,"part.select.",12) == 0) {
253 std::string::size_type delim = Name.find('.',17);
255 if (delim != std::string::npos) {
256 // Construct a new name as 'llvm.part.select' + '.i*'
257 F->setName(Name.substr(0,16)+Name.substr(delim));
264 // This upgrades the llvm.part.set intrinsics similarly as above, however
265 // we care about 'llvm.part.set.i*.i*.i*', but only the first two types
266 // must match. There is an additional type specifier after these two
267 // matching types that we must retain when upgrading. Thus, we require
268 // finding 2 periods, not just one, after the intrinsic name.
269 if (Name.compare(5,9,"part.set.",9) == 0) {
270 std::string::size_type delim = Name.find('.',14);
272 if (delim != std::string::npos &&
273 Name.find('.',delim+1) != std::string::npos) {
274 // Construct a new name as 'llvm.part.select' + '.i*.i*'
275 F->setName(Name.substr(0,13)+Name.substr(delim));
284 // This fixes all MMX shift intrinsic instructions to take a
285 // v1i64 instead of a v2i32 as the second parameter.
286 if (Name.compare(5,10,"x86.mmx.ps",10) == 0 &&
287 (Name.compare(13,4,"psll", 4) == 0 ||
288 Name.compare(13,4,"psra", 4) == 0 ||
289 Name.compare(13,4,"psrl", 4) == 0) && Name[17] != 'i') {
291 const llvm::Type *VT =
292 VectorType::get(IntegerType::get(FTy->getContext(), 64), 1);
294 // We don't have to do anything if the parameter already has
296 if (FTy->getParamType(1) == VT)
299 // We first need to change the name of the old (bad) intrinsic, because
300 // its type is incorrect, but we cannot overload that name. We
301 // arbitrarily unique it here allowing us to construct a correctly named
302 // and typed function below.
305 assert(FTy->getNumParams() == 2 && "MMX shift intrinsics take 2 args!");
307 // Now construct the new intrinsic with the correct name and type. We
308 // leave the old function around in order to query its type, whatever it
309 // may be, and correctly convert up to the new type.
310 NewFn = cast<Function>(M->getOrInsertFunction(Name,
311 FTy->getReturnType(),
312 FTy->getParamType(0),
316 } else if (Name.compare(5,17,"x86.sse2.loadh.pd",17) == 0 ||
317 Name.compare(5,17,"x86.sse2.loadl.pd",17) == 0 ||
318 Name.compare(5,16,"x86.sse2.movl.dq",16) == 0 ||
319 Name.compare(5,15,"x86.sse2.movs.d",15) == 0 ||
320 Name.compare(5,16,"x86.sse2.shuf.pd",16) == 0 ||
321 Name.compare(5,18,"x86.sse2.unpckh.pd",18) == 0 ||
322 Name.compare(5,18,"x86.sse2.unpckl.pd",18) == 0 ||
323 Name.compare(5,20,"x86.sse2.punpckh.qdq",20) == 0 ||
324 Name.compare(5,20,"x86.sse2.punpckl.qdq",20) == 0) {
325 // Calls to these intrinsics are transformed into ShuffleVector's.
328 } else if (Name.compare(5, 16, "x86.sse41.pmulld", 16) == 0) {
329 // Calls to these intrinsics are transformed into vector multiplies.
332 } else if (Name.compare(5, 18, "x86.ssse3.palign.r", 18) == 0 ||
333 Name.compare(5, 22, "x86.ssse3.palign.r.128", 22) == 0) {
334 // Calls to these intrinsics are transformed into vector shuffles, shifts,
343 // This may not belong here. This function is effectively being overloaded
344 // to both detect an intrinsic which needs upgrading, and to provide the
345 // upgraded form of the intrinsic. We should perhaps have two separate
346 // functions for this.
350 bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) {
352 bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn);
354 // Upgrade intrinsic attributes. This does not change the function.
357 if (unsigned id = F->getIntrinsicID())
358 F->setAttributes(Intrinsic::getAttributes((Intrinsic::ID)id));
362 // UpgradeIntrinsicCall - Upgrade a call to an old intrinsic to be a call the
363 // upgraded intrinsic. All argument and return casting must be provided in
364 // order to seamlessly integrate with existing context.
365 void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
366 Function *F = CI->getCalledFunction();
367 LLVMContext &C = CI->getContext();
368 ImmutableCallSite CS(CI);
370 assert(F && "CallInst has no function associated with it.");
373 // Get the Function's name.
374 const std::string& Name = F->getName();
376 // Upgrade ARM NEON intrinsics.
377 if (Name.compare(5, 9, "arm.neon.", 9) == 0) {
379 if (Name.compare(14, 7, "vmovls.", 7) == 0) {
380 NewI = new SExtInst(CI->getArgOperand(0), CI->getType(),
381 "upgraded." + CI->getName(), CI);
382 } else if (Name.compare(14, 7, "vmovlu.", 7) == 0) {
383 NewI = new ZExtInst(CI->getArgOperand(0), CI->getType(),
384 "upgraded." + CI->getName(), CI);
386 } else if (Name.compare(14, 4, "vadd", 4) == 0 ||
387 Name.compare(14, 4, "vsub", 4) == 0) {
388 // Extend one (vaddw/vsubw) or both (vaddl/vsubl) operands.
389 Value *V0 = CI->getArgOperand(0);
390 Value *V1 = CI->getArgOperand(1);
391 if (Name.at(19) == 's') {
392 if (Name.at(18) == 'l')
393 V0 = new SExtInst(CI->getArgOperand(0), CI->getType(), "", CI);
394 V1 = new SExtInst(CI->getArgOperand(1), CI->getType(), "", CI);
396 assert(Name.at(19) == 'u' && "unexpected vadd/vsub intrinsic");
397 if (Name.at(18) == 'l')
398 V0 = new ZExtInst(CI->getArgOperand(0), CI->getType(), "", CI);
399 V1 = new ZExtInst(CI->getArgOperand(1), CI->getType(), "", CI);
401 if (Name.compare(14, 4, "vadd", 4) == 0)
402 NewI = BinaryOperator::CreateAdd(V0, V1,"upgraded."+CI->getName(),CI);
404 NewI = BinaryOperator::CreateSub(V0, V1,"upgraded."+CI->getName(),CI);
406 } else if (Name.compare(14, 6, "vmovn.", 6) == 0) {
407 NewI = new TruncInst(CI->getArgOperand(0), CI->getType(),
408 "upgraded." + CI->getName(), CI);
410 llvm_unreachable("Unknown arm.neon function for CallInst upgrade.");
412 // Replace any uses of the old CallInst.
413 if (!CI->use_empty())
414 CI->replaceAllUsesWith(NewI);
415 CI->eraseFromParent();
419 bool isLoadH = false, isLoadL = false, isMovL = false;
420 bool isMovSD = false, isShufPD = false;
421 bool isUnpckhPD = false, isUnpcklPD = false;
422 bool isPunpckhQPD = false, isPunpcklQPD = false;
423 if (F->getName() == "llvm.x86.sse2.loadh.pd")
425 else if (F->getName() == "llvm.x86.sse2.loadl.pd")
427 else if (F->getName() == "llvm.x86.sse2.movl.dq")
429 else if (F->getName() == "llvm.x86.sse2.movs.d")
431 else if (F->getName() == "llvm.x86.sse2.shuf.pd")
433 else if (F->getName() == "llvm.x86.sse2.unpckh.pd")
435 else if (F->getName() == "llvm.x86.sse2.unpckl.pd")
437 else if (F->getName() == "llvm.x86.sse2.punpckh.qdq")
439 else if (F->getName() == "llvm.x86.sse2.punpckl.qdq")
442 if (isLoadH || isLoadL || isMovL || isMovSD || isShufPD ||
443 isUnpckhPD || isUnpcklPD || isPunpckhQPD || isPunpcklQPD) {
444 std::vector<Constant*> Idxs;
445 Value *Op0 = CI->getArgOperand(0);
446 ShuffleVectorInst *SI = NULL;
447 if (isLoadH || isLoadL) {
448 Value *Op1 = UndefValue::get(Op0->getType());
449 Value *Addr = new BitCastInst(CI->getArgOperand(1),
450 Type::getDoublePtrTy(C),
452 Value *Load = new LoadInst(Addr, "upgraded.", false, 8, CI);
453 Value *Idx = ConstantInt::get(Type::getInt32Ty(C), 0);
454 Op1 = InsertElementInst::Create(Op1, Load, Idx, "upgraded.", CI);
457 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
458 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
460 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
461 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
463 Value *Mask = ConstantVector::get(Idxs);
464 SI = new ShuffleVectorInst(Op0, Op1, Mask, "upgraded.", CI);
466 Constant *Zero = ConstantInt::get(Type::getInt32Ty(C), 0);
467 Idxs.push_back(Zero);
468 Idxs.push_back(Zero);
469 Idxs.push_back(Zero);
470 Idxs.push_back(Zero);
471 Value *ZeroV = ConstantVector::get(Idxs);
474 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 4));
475 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 5));
476 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
477 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 3));
478 Value *Mask = ConstantVector::get(Idxs);
479 SI = new ShuffleVectorInst(ZeroV, Op0, Mask, "upgraded.", CI);
480 } else if (isMovSD ||
481 isUnpckhPD || isUnpcklPD || isPunpckhQPD || isPunpcklQPD) {
482 Value *Op1 = CI->getArgOperand(1);
484 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
485 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
486 } else if (isUnpckhPD || isPunpckhQPD) {
487 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
488 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 3));
490 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
491 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
493 Value *Mask = ConstantVector::get(Idxs);
494 SI = new ShuffleVectorInst(Op0, Op1, Mask, "upgraded.", CI);
495 } else if (isShufPD) {
496 Value *Op1 = CI->getArgOperand(1);
498 cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
499 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), MaskVal & 1));
500 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C),
501 ((MaskVal >> 1) & 1)+2));
502 Value *Mask = ConstantVector::get(Idxs);
503 SI = new ShuffleVectorInst(Op0, Op1, Mask, "upgraded.", CI);
506 assert(SI && "Unexpected!");
508 // Handle any uses of the old CallInst.
509 if (!CI->use_empty())
510 // Replace all uses of the old call with the new cast which has the
512 CI->replaceAllUsesWith(SI);
514 // Clean up the old call now that it has been completely upgraded.
515 CI->eraseFromParent();
516 } else if (F->getName() == "llvm.x86.sse41.pmulld") {
517 // Upgrade this set of intrinsics into vector multiplies.
518 Instruction *Mul = BinaryOperator::CreateMul(CI->getArgOperand(0),
519 CI->getArgOperand(1),
522 // Fix up all the uses with our new multiply.
523 if (!CI->use_empty())
524 CI->replaceAllUsesWith(Mul);
526 // Remove upgraded multiply.
527 CI->eraseFromParent();
528 } else if (F->getName() == "llvm.x86.ssse3.palign.r") {
529 Value *Op1 = CI->getArgOperand(0);
530 Value *Op2 = CI->getArgOperand(1);
531 Value *Op3 = CI->getArgOperand(2);
532 unsigned shiftVal = cast<ConstantInt>(Op3)->getZExtValue();
534 IRBuilder<> Builder(C);
535 Builder.SetInsertPoint(CI->getParent(), CI);
537 // If palignr is shifting the pair of input vectors less than 9 bytes,
538 // emit a shuffle instruction.
540 const Type *IntTy = Type::getInt32Ty(C);
541 const Type *EltTy = Type::getInt8Ty(C);
542 const Type *VecTy = VectorType::get(EltTy, 8);
544 Op2 = Builder.CreateBitCast(Op2, VecTy);
545 Op1 = Builder.CreateBitCast(Op1, VecTy);
547 llvm::SmallVector<llvm::Constant*, 8> Indices;
548 for (unsigned i = 0; i != 8; ++i)
549 Indices.push_back(ConstantInt::get(IntTy, shiftVal + i));
551 Value *SV = ConstantVector::get(Indices.begin(), Indices.size());
552 Rep = Builder.CreateShuffleVector(Op2, Op1, SV, "palignr");
553 Rep = Builder.CreateBitCast(Rep, F->getReturnType());
556 // If palignr is shifting the pair of input vectors more than 8 but less
557 // than 16 bytes, emit a logical right shift of the destination.
558 else if (shiftVal < 16) {
559 // MMX has these as 1 x i64 vectors for some odd optimization reasons.
560 const Type *EltTy = Type::getInt64Ty(C);
561 const Type *VecTy = VectorType::get(EltTy, 1);
563 Op1 = Builder.CreateBitCast(Op1, VecTy, "cast");
564 Op2 = ConstantInt::get(VecTy, (shiftVal-8) * 8);
566 // create i32 constant
568 Intrinsic::getDeclaration(F->getParent(), Intrinsic::x86_mmx_psrl_q);
569 Rep = Builder.CreateCall2(I, Op1, Op2, "palignr");
572 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
574 Rep = Constant::getNullValue(F->getReturnType());
577 // Replace any uses with our new instruction.
578 if (!CI->use_empty())
579 CI->replaceAllUsesWith(Rep);
581 // Remove upgraded instruction.
582 CI->eraseFromParent();
584 } else if (F->getName() == "llvm.x86.ssse3.palign.r.128") {
585 Value *Op1 = CI->getArgOperand(0);
586 Value *Op2 = CI->getArgOperand(1);
587 Value *Op3 = CI->getArgOperand(2);
588 unsigned shiftVal = cast<ConstantInt>(Op3)->getZExtValue();
590 IRBuilder<> Builder(C);
591 Builder.SetInsertPoint(CI->getParent(), CI);
593 // If palignr is shifting the pair of input vectors less than 17 bytes,
594 // emit a shuffle instruction.
595 if (shiftVal <= 16) {
596 const Type *IntTy = Type::getInt32Ty(C);
597 const Type *EltTy = Type::getInt8Ty(C);
598 const Type *VecTy = VectorType::get(EltTy, 16);
600 Op2 = Builder.CreateBitCast(Op2, VecTy);
601 Op1 = Builder.CreateBitCast(Op1, VecTy);
603 llvm::SmallVector<llvm::Constant*, 16> Indices;
604 for (unsigned i = 0; i != 16; ++i)
605 Indices.push_back(ConstantInt::get(IntTy, shiftVal + i));
607 Value *SV = ConstantVector::get(Indices.begin(), Indices.size());
608 Rep = Builder.CreateShuffleVector(Op2, Op1, SV, "palignr");
609 Rep = Builder.CreateBitCast(Rep, F->getReturnType());
612 // If palignr is shifting the pair of input vectors more than 16 but less
613 // than 32 bytes, emit a logical right shift of the destination.
614 else if (shiftVal < 32) {
615 const Type *EltTy = Type::getInt64Ty(C);
616 const Type *VecTy = VectorType::get(EltTy, 2);
617 const Type *IntTy = Type::getInt32Ty(C);
619 Op1 = Builder.CreateBitCast(Op1, VecTy, "cast");
620 Op2 = ConstantInt::get(IntTy, (shiftVal-16) * 8);
622 // create i32 constant
624 Intrinsic::getDeclaration(F->getParent(), Intrinsic::x86_sse2_psrl_dq);
625 Rep = Builder.CreateCall2(I, Op1, Op2, "palignr");
628 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
630 Rep = Constant::getNullValue(F->getReturnType());
633 // Replace any uses with our new instruction.
634 if (!CI->use_empty())
635 CI->replaceAllUsesWith(Rep);
637 // Remove upgraded instruction.
638 CI->eraseFromParent();
641 llvm_unreachable("Unknown function for CallInst upgrade.");
646 switch (NewFn->getIntrinsicID()) {
647 default: llvm_unreachable("Unknown function for CallInst upgrade.");
648 case Intrinsic::arm_neon_vld1:
649 case Intrinsic::arm_neon_vld2:
650 case Intrinsic::arm_neon_vld3:
651 case Intrinsic::arm_neon_vld4:
652 case Intrinsic::arm_neon_vst1:
653 case Intrinsic::arm_neon_vst2:
654 case Intrinsic::arm_neon_vst3:
655 case Intrinsic::arm_neon_vst4:
656 case Intrinsic::arm_neon_vld2lane:
657 case Intrinsic::arm_neon_vld3lane:
658 case Intrinsic::arm_neon_vld4lane:
659 case Intrinsic::arm_neon_vst2lane:
660 case Intrinsic::arm_neon_vst3lane:
661 case Intrinsic::arm_neon_vst4lane: {
662 // Add a default alignment argument of 1.
663 SmallVector<Value*, 8> Operands(CS.arg_begin(), CS.arg_end());
664 Operands.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
665 CallInst *NewCI = CallInst::Create(NewFn, Operands.begin(), Operands.end(),
667 NewCI->setTailCall(CI->isTailCall());
668 NewCI->setCallingConv(CI->getCallingConv());
670 // Handle any uses of the old CallInst.
671 if (!CI->use_empty())
672 // Replace all uses of the old call with the new cast which has the
674 CI->replaceAllUsesWith(NewCI);
676 // Clean up the old call now that it has been completely upgraded.
677 CI->eraseFromParent();
681 case Intrinsic::x86_mmx_psll_d:
682 case Intrinsic::x86_mmx_psll_q:
683 case Intrinsic::x86_mmx_psll_w:
684 case Intrinsic::x86_mmx_psra_d:
685 case Intrinsic::x86_mmx_psra_w:
686 case Intrinsic::x86_mmx_psrl_d:
687 case Intrinsic::x86_mmx_psrl_q:
688 case Intrinsic::x86_mmx_psrl_w: {
691 Operands[0] = CI->getArgOperand(0);
693 // Cast the second parameter to the correct type.
694 BitCastInst *BC = new BitCastInst(CI->getArgOperand(1),
695 NewFn->getFunctionType()->getParamType(1),
699 // Construct a new CallInst
700 CallInst *NewCI = CallInst::Create(NewFn, Operands, Operands+2,
701 "upgraded."+CI->getName(), CI);
702 NewCI->setTailCall(CI->isTailCall());
703 NewCI->setCallingConv(CI->getCallingConv());
705 // Handle any uses of the old CallInst.
706 if (!CI->use_empty())
707 // Replace all uses of the old call with the new cast which has the
709 CI->replaceAllUsesWith(NewCI);
711 // Clean up the old call now that it has been completely upgraded.
712 CI->eraseFromParent();
715 case Intrinsic::ctlz:
716 case Intrinsic::ctpop:
717 case Intrinsic::cttz: {
718 // Build a small vector of the original arguments.
719 SmallVector<Value*, 8> Operands(CS.arg_begin(), CS.arg_end());
721 // Construct a new CallInst
722 CallInst *NewCI = CallInst::Create(NewFn, Operands.begin(), Operands.end(),
723 "upgraded."+CI->getName(), CI);
724 NewCI->setTailCall(CI->isTailCall());
725 NewCI->setCallingConv(CI->getCallingConv());
727 // Handle any uses of the old CallInst.
728 if (!CI->use_empty()) {
729 // Check for sign extend parameter attributes on the return values.
730 bool SrcSExt = NewFn->getAttributes().paramHasAttr(0, Attribute::SExt);
731 bool DestSExt = F->getAttributes().paramHasAttr(0, Attribute::SExt);
733 // Construct an appropriate cast from the new return type to the old.
734 CastInst *RetCast = CastInst::Create(
735 CastInst::getCastOpcode(NewCI, SrcSExt,
738 NewCI, F->getReturnType(),
739 NewCI->getName(), CI);
740 NewCI->moveBefore(RetCast);
742 // Replace all uses of the old call with the new cast which has the
744 CI->replaceAllUsesWith(RetCast);
747 // Clean up the old call now that it has been completely upgraded.
748 CI->eraseFromParent();
751 case Intrinsic::eh_selector:
752 case Intrinsic::eh_typeid_for: {
753 // Only the return type changed.
754 SmallVector<Value*, 8> Operands(CS.arg_begin(), CS.arg_end());
755 CallInst *NewCI = CallInst::Create(NewFn, Operands.begin(), Operands.end(),
756 "upgraded." + CI->getName(), CI);
757 NewCI->setTailCall(CI->isTailCall());
758 NewCI->setCallingConv(CI->getCallingConv());
760 // Handle any uses of the old CallInst.
761 if (!CI->use_empty()) {
762 // Construct an appropriate cast from the new return type to the old.
764 CastInst::Create(CastInst::getCastOpcode(NewCI, true,
765 F->getReturnType(), true),
766 NewCI, F->getReturnType(), NewCI->getName(), CI);
767 CI->replaceAllUsesWith(RetCast);
769 CI->eraseFromParent();
772 case Intrinsic::memcpy:
773 case Intrinsic::memmove:
774 case Intrinsic::memset: {
776 const llvm::Type *I1Ty = llvm::Type::getInt1Ty(CI->getContext());
777 Value *Operands[5] = { CI->getArgOperand(0), CI->getArgOperand(1),
778 CI->getArgOperand(2), CI->getArgOperand(3),
779 llvm::ConstantInt::get(I1Ty, 0) };
780 CallInst *NewCI = CallInst::Create(NewFn, Operands, Operands+5,
782 NewCI->setTailCall(CI->isTailCall());
783 NewCI->setCallingConv(CI->getCallingConv());
784 // Handle any uses of the old CallInst.
785 if (!CI->use_empty())
786 // Replace all uses of the old call with the new cast which has the
788 CI->replaceAllUsesWith(NewCI);
790 // Clean up the old call now that it has been completely upgraded.
791 CI->eraseFromParent();
797 // This tests each Function to determine if it needs upgrading. When we find
798 // one we are interested in, we then upgrade all calls to reflect the new
800 void llvm::UpgradeCallsToIntrinsic(Function* F) {
801 assert(F && "Illegal attempt to upgrade a non-existent intrinsic.");
803 // Upgrade the function and check if it is a totaly new function.
805 if (UpgradeIntrinsicFunction(F, NewFn)) {
807 // Replace all uses to the old function with the new one if necessary.
808 for (Value::use_iterator UI = F->use_begin(), UE = F->use_end();
810 if (CallInst* CI = dyn_cast<CallInst>(*UI++))
811 UpgradeIntrinsicCall(CI, NewFn);
813 // Remove old function, no longer used, from the module.
814 F->eraseFromParent();
819 /// This function strips all debug info intrinsics, except for llvm.dbg.declare.
820 /// If an llvm.dbg.declare intrinsic is invalid, then this function simply
822 void llvm::CheckDebugInfoIntrinsics(Module *M) {
825 if (Function *FuncStart = M->getFunction("llvm.dbg.func.start")) {
826 while (!FuncStart->use_empty()) {
827 CallInst *CI = cast<CallInst>(FuncStart->use_back());
828 CI->eraseFromParent();
830 FuncStart->eraseFromParent();
833 if (Function *StopPoint = M->getFunction("llvm.dbg.stoppoint")) {
834 while (!StopPoint->use_empty()) {
835 CallInst *CI = cast<CallInst>(StopPoint->use_back());
836 CI->eraseFromParent();
838 StopPoint->eraseFromParent();
841 if (Function *RegionStart = M->getFunction("llvm.dbg.region.start")) {
842 while (!RegionStart->use_empty()) {
843 CallInst *CI = cast<CallInst>(RegionStart->use_back());
844 CI->eraseFromParent();
846 RegionStart->eraseFromParent();
849 if (Function *RegionEnd = M->getFunction("llvm.dbg.region.end")) {
850 while (!RegionEnd->use_empty()) {
851 CallInst *CI = cast<CallInst>(RegionEnd->use_back());
852 CI->eraseFromParent();
854 RegionEnd->eraseFromParent();
857 if (Function *Declare = M->getFunction("llvm.dbg.declare")) {
858 if (!Declare->use_empty()) {
859 DbgDeclareInst *DDI = cast<DbgDeclareInst>(Declare->use_back());
860 if (!isa<MDNode>(DDI->getArgOperand(0)) ||
861 !isa<MDNode>(DDI->getArgOperand(1))) {
862 while (!Declare->use_empty()) {
863 CallInst *CI = cast<CallInst>(Declare->use_back());
864 CI->eraseFromParent();
866 Declare->eraseFromParent();