Renamed llvm.x86.sse42.crc32 intrinsics; crc64 doesn't exist.
[oota-llvm.git] / lib / VMCore / AutoUpgrade.cpp
1 //===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the auto-upgrade helper functions 
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/AutoUpgrade.h"
15 #include "llvm/Constants.h"
16 #include "llvm/Function.h"
17 #include "llvm/LLVMContext.h"
18 #include "llvm/Module.h"
19 #include "llvm/IntrinsicInst.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/Support/CallSite.h"
22 #include "llvm/Support/ErrorHandling.h"
23 #include "llvm/Support/IRBuilder.h"
24 #include <cstring>
25 using namespace llvm;
26
27
28 static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
29   assert(F && "Illegal to upgrade a non-existent Function.");
30
31   // Get the Function's name.
32   const std::string& Name = F->getName();
33
34   // Convenience
35   const FunctionType *FTy = F->getFunctionType();
36
37   // Quickly eliminate it, if it's not a candidate.
38   if (Name.length() <= 8 || Name[0] != 'l' || Name[1] != 'l' || 
39       Name[2] != 'v' || Name[3] != 'm' || Name[4] != '.')
40     return false;
41
42   Module *M = F->getParent();
43   switch (Name[5]) {
44   default: break;
45   case 'a':
46     // This upgrades the llvm.atomic.lcs, llvm.atomic.las, llvm.atomic.lss,
47     // and atomics with default address spaces to their new names to their new
48     // function name (e.g. llvm.atomic.add.i32 => llvm.atomic.add.i32.p0i32)
49     if (Name.compare(5,7,"atomic.",7) == 0) {
50       if (Name.compare(12,3,"lcs",3) == 0) {
51         std::string::size_type delim = Name.find('.',12);
52         F->setName("llvm.atomic.cmp.swap" + Name.substr(delim) +
53                    ".p0" + Name.substr(delim+1));
54         NewFn = F;
55         return true;
56       }
57       else if (Name.compare(12,3,"las",3) == 0) {
58         std::string::size_type delim = Name.find('.',12);
59         F->setName("llvm.atomic.load.add"+Name.substr(delim)
60                    + ".p0" + Name.substr(delim+1));
61         NewFn = F;
62         return true;
63       }
64       else if (Name.compare(12,3,"lss",3) == 0) {
65         std::string::size_type delim = Name.find('.',12);
66         F->setName("llvm.atomic.load.sub"+Name.substr(delim)
67                    + ".p0" + Name.substr(delim+1));
68         NewFn = F;
69         return true;
70       }
71       else if (Name.rfind(".p") == std::string::npos) {
72         // We don't have an address space qualifier so this has be upgraded
73         // to the new name.  Copy the type name at the end of the intrinsic
74         // and add to it
75         std::string::size_type delim = Name.find_last_of('.');
76         assert(delim != std::string::npos && "can not find type");
77         F->setName(Name + ".p0" + Name.substr(delim+1));
78         NewFn = F;
79         return true;
80       }
81     } else if (Name.compare(5, 9, "arm.neon.", 9) == 0) {
82       if (((Name.compare(14, 5, "vmovl", 5) == 0 ||
83             Name.compare(14, 5, "vaddl", 5) == 0 ||
84             Name.compare(14, 5, "vsubl", 5) == 0 ||
85             Name.compare(14, 5, "vaddw", 5) == 0 ||
86             Name.compare(14, 5, "vsubw", 5) == 0 ||
87             Name.compare(14, 5, "vmlal", 5) == 0 ||
88             Name.compare(14, 5, "vmlsl", 5) == 0 ||
89             Name.compare(14, 5, "vabdl", 5) == 0 ||
90             Name.compare(14, 5, "vabal", 5) == 0) &&
91            (Name.compare(19, 2, "s.", 2) == 0 ||
92             Name.compare(19, 2, "u.", 2) == 0)) ||
93
94           (Name.compare(14, 4, "vaba", 4) == 0 &&
95            (Name.compare(18, 2, "s.", 2) == 0 ||
96             Name.compare(18, 2, "u.", 2) == 0)) ||
97
98           (Name.compare(14, 6, "vmovn.", 6) == 0)) {
99
100         // Calls to these are transformed into IR without intrinsics.
101         NewFn = 0;
102         return true;
103       }
104       // Old versions of NEON ld/st intrinsics are missing alignment arguments.
105       bool isVLd = (Name.compare(14, 3, "vld", 3) == 0);
106       bool isVSt = (Name.compare(14, 3, "vst", 3) == 0);
107       if (isVLd || isVSt) {
108         unsigned NumVecs = Name.at(17) - '0';
109         if (NumVecs == 0 || NumVecs > 4)
110           return false;
111         bool isLaneOp = (Name.compare(18, 5, "lane.", 5) == 0);
112         if (!isLaneOp && Name.at(18) != '.')
113           return false;
114         unsigned ExpectedArgs = 2; // for the address and alignment
115         if (isVSt || isLaneOp)
116           ExpectedArgs += NumVecs;
117         if (isLaneOp)
118           ExpectedArgs += 1; // for the lane number
119         unsigned NumP = FTy->getNumParams();
120         if (NumP != ExpectedArgs - 1)
121           return false;
122
123         // Change the name of the old (bad) intrinsic, because 
124         // its type is incorrect, but we cannot overload that name.
125         F->setName("");
126
127         // One argument is missing: add the alignment argument.
128         std::vector<const Type*> NewParams;
129         for (unsigned p = 0; p < NumP; ++p)
130           NewParams.push_back(FTy->getParamType(p));
131         NewParams.push_back(Type::getInt32Ty(F->getContext()));
132         FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(),
133                                                  NewParams, false);
134         NewFn = cast<Function>(M->getOrInsertFunction(Name, NewFTy));
135         return true;
136       }
137     }
138     break;
139   case 'b':
140     //  This upgrades the name of the llvm.bswap intrinsic function to only use 
141     //  a single type name for overloading. We only care about the old format
142     //  'llvm.bswap.i*.i*', so check for 'bswap.' and then for there being 
143     //  a '.' after 'bswap.'
144     if (Name.compare(5,6,"bswap.",6) == 0) {
145       std::string::size_type delim = Name.find('.',11);
146       
147       if (delim != std::string::npos) {
148         //  Construct the new name as 'llvm.bswap' + '.i*'
149         F->setName(Name.substr(0,10)+Name.substr(delim));
150         NewFn = F;
151         return true;
152       }
153     }
154     break;
155
156   case 'c':
157     //  We only want to fix the 'llvm.ct*' intrinsics which do not have the 
158     //  correct return type, so we check for the name, and then check if the 
159     //  return type does not match the parameter type.
160     if ( (Name.compare(5,5,"ctpop",5) == 0 ||
161           Name.compare(5,4,"ctlz",4) == 0 ||
162           Name.compare(5,4,"cttz",4) == 0) &&
163         FTy->getReturnType() != FTy->getParamType(0)) {
164       //  We first need to change the name of the old (bad) intrinsic, because 
165       //  its type is incorrect, but we cannot overload that name. We 
166       //  arbitrarily unique it here allowing us to construct a correctly named 
167       //  and typed function below.
168       F->setName("");
169
170       //  Now construct the new intrinsic with the correct name and type. We 
171       //  leave the old function around in order to query its type, whatever it 
172       //  may be, and correctly convert up to the new type.
173       NewFn = cast<Function>(M->getOrInsertFunction(Name, 
174                                                     FTy->getParamType(0),
175                                                     FTy->getParamType(0),
176                                                     (Type *)0));
177       return true;
178     }
179     break;
180
181   case 'e':
182     //  The old llvm.eh.selector.i32 is equivalent to the new llvm.eh.selector.
183     if (Name.compare("llvm.eh.selector.i32") == 0) {
184       F->setName("llvm.eh.selector");
185       NewFn = F;
186       return true;
187     }
188     //  The old llvm.eh.typeid.for.i32 is equivalent to llvm.eh.typeid.for.
189     if (Name.compare("llvm.eh.typeid.for.i32") == 0) {
190       F->setName("llvm.eh.typeid.for");
191       NewFn = F;
192       return true;
193     }
194     //  Convert the old llvm.eh.selector.i64 to a call to llvm.eh.selector.
195     if (Name.compare("llvm.eh.selector.i64") == 0) {
196       NewFn = Intrinsic::getDeclaration(M, Intrinsic::eh_selector);
197       return true;
198     }
199     //  Convert the old llvm.eh.typeid.for.i64 to a call to llvm.eh.typeid.for.
200     if (Name.compare("llvm.eh.typeid.for.i64") == 0) {
201       NewFn = Intrinsic::getDeclaration(M, Intrinsic::eh_typeid_for);
202       return true;
203     }
204     break;
205
206   case 'm': {
207     // This upgrades the llvm.memcpy, llvm.memmove, and llvm.memset to the
208     // new format that allows overloading the pointer for different address
209     // space (e.g., llvm.memcpy.i16 => llvm.memcpy.p0i8.p0i8.i16)
210     const char* NewFnName = NULL;
211     if (Name.compare(5,8,"memcpy.i",8) == 0) {
212       if (Name[13] == '8')
213         NewFnName = "llvm.memcpy.p0i8.p0i8.i8";
214       else if (Name.compare(13,2,"16") == 0)
215         NewFnName = "llvm.memcpy.p0i8.p0i8.i16";
216       else if (Name.compare(13,2,"32") == 0)
217         NewFnName = "llvm.memcpy.p0i8.p0i8.i32";
218       else if (Name.compare(13,2,"64") == 0)
219         NewFnName = "llvm.memcpy.p0i8.p0i8.i64";
220     } else if (Name.compare(5,9,"memmove.i",9) == 0) {
221       if (Name[14] == '8')
222         NewFnName = "llvm.memmove.p0i8.p0i8.i8";
223       else if (Name.compare(14,2,"16") == 0)
224         NewFnName = "llvm.memmove.p0i8.p0i8.i16";
225       else if (Name.compare(14,2,"32") == 0)
226         NewFnName = "llvm.memmove.p0i8.p0i8.i32";
227       else if (Name.compare(14,2,"64") == 0)
228         NewFnName = "llvm.memmove.p0i8.p0i8.i64";
229     }
230     else if (Name.compare(5,8,"memset.i",8) == 0) {
231       if (Name[13] == '8')
232         NewFnName = "llvm.memset.p0i8.i8";
233       else if (Name.compare(13,2,"16") == 0)
234         NewFnName = "llvm.memset.p0i8.i16";
235       else if (Name.compare(13,2,"32") == 0)
236         NewFnName = "llvm.memset.p0i8.i32";
237       else if (Name.compare(13,2,"64") == 0)
238         NewFnName = "llvm.memset.p0i8.i64";
239     }
240     if (NewFnName) {
241       NewFn = cast<Function>(M->getOrInsertFunction(NewFnName, 
242                                             FTy->getReturnType(),
243                                             FTy->getParamType(0),
244                                             FTy->getParamType(1),
245                                             FTy->getParamType(2),
246                                             FTy->getParamType(3),
247                                             Type::getInt1Ty(F->getContext()),
248                                             (Type *)0));
249       return true;
250     }
251     break;
252   }
253   case 'p':
254     //  This upgrades the llvm.part.select overloaded intrinsic names to only 
255     //  use one type specifier in the name. We only care about the old format
256     //  'llvm.part.select.i*.i*', and solve as above with bswap.
257     if (Name.compare(5,12,"part.select.",12) == 0) {
258       std::string::size_type delim = Name.find('.',17);
259       
260       if (delim != std::string::npos) {
261         //  Construct a new name as 'llvm.part.select' + '.i*'
262         F->setName(Name.substr(0,16)+Name.substr(delim));
263         NewFn = F;
264         return true;
265       }
266       break;
267     }
268
269     //  This upgrades the llvm.part.set intrinsics similarly as above, however 
270     //  we care about 'llvm.part.set.i*.i*.i*', but only the first two types 
271     //  must match. There is an additional type specifier after these two 
272     //  matching types that we must retain when upgrading.  Thus, we require 
273     //  finding 2 periods, not just one, after the intrinsic name.
274     if (Name.compare(5,9,"part.set.",9) == 0) {
275       std::string::size_type delim = Name.find('.',14);
276
277       if (delim != std::string::npos &&
278           Name.find('.',delim+1) != std::string::npos) {
279         //  Construct a new name as 'llvm.part.select' + '.i*.i*'
280         F->setName(Name.substr(0,13)+Name.substr(delim));
281         NewFn = F;
282         return true;
283       }
284       break;
285     }
286
287     break;
288   case 'x':
289     // This fixes the poorly named crc32 intrinsics
290     if (Name.compare(5, 13, "x86.sse42.crc", 13) == 0) {
291       const char* NewFnName = NULL;
292       if (Name.compare(18, 2, "32", 2) == 0) {
293         if (Name.compare(20, 2, ".8") == 0) {
294           NewFnName = "llvm.x86.sse42.crc32.32.8";
295         } else if (Name.compare(20, 2, ".16") == 0) {
296           NewFnName = "llvm.x86.sse42.crc32.32.16";
297         } else if (Name.compare(20, 2, ".32") == 0) {
298           NewFnName = "llvm.x86.sse42.crc32.32.32";
299         }
300       }
301       else if (Name.compare(18, 2, "64", 2) == 0) {
302         if (Name.compare(20, 2, ".8") == 0) {
303           NewFnName = "llvm.x86.sse42.crc32.64.8";
304         } else if (Name.compare(20, 2, ".64") == 0) {
305           NewFnName = "llvm.x86.sse42.crc32.64.64";
306         }
307       }
308       if (NewFnName) {
309         F->setName(NewFnName);
310         NewFn = F;
311         return true;
312       }
313     }
314
315     // This fixes all MMX shift intrinsic instructions to take a
316     // x86_mmx instead of a v1i64, v2i32, v4i16, or v8i8.
317     if (Name.compare(5, 8, "x86.mmx.", 8) == 0) {
318       const Type *X86_MMXTy = VectorType::getX86_MMXTy(FTy->getContext());
319
320       if (Name.compare(13, 4, "padd", 4) == 0   ||
321           Name.compare(13, 4, "psub", 4) == 0   ||
322           Name.compare(13, 4, "pmul", 4) == 0   ||
323           Name.compare(13, 5, "pmadd", 5) == 0  ||
324           Name.compare(13, 4, "pand", 4) == 0   ||
325           Name.compare(13, 3, "por", 3) == 0    ||
326           Name.compare(13, 4, "pxor", 4) == 0   ||
327           Name.compare(13, 4, "pavg", 4) == 0   ||
328           Name.compare(13, 4, "pmax", 4) == 0   ||
329           Name.compare(13, 4, "pmin", 4) == 0   ||
330           Name.compare(13, 4, "psad", 4) == 0   ||
331           Name.compare(13, 4, "psll", 4) == 0   ||
332           Name.compare(13, 4, "psrl", 4) == 0   ||
333           Name.compare(13, 4, "psra", 4) == 0   ||
334           Name.compare(13, 4, "pack", 4) == 0   ||
335           Name.compare(13, 6, "punpck", 6) == 0 ||
336           Name.compare(13, 4, "pcmp", 4) == 0) {
337         assert(FTy->getNumParams() == 2 && "MMX intrinsic takes 2 args!");
338         const Type *SecondParamTy = X86_MMXTy;
339
340         if (Name.compare(13, 5, "pslli", 5) == 0 ||
341             Name.compare(13, 5, "psrli", 5) == 0 ||
342             Name.compare(13, 5, "psrai", 5) == 0)
343           SecondParamTy = FTy->getParamType(1);
344
345         // Don't do anything if it has the correct types.
346         if (FTy->getReturnType() == X86_MMXTy &&
347             FTy->getParamType(0) == X86_MMXTy &&
348             FTy->getParamType(1) == SecondParamTy)
349           break;
350
351         // We first need to change the name of the old (bad) intrinsic, because
352         // its type is incorrect, but we cannot overload that name. We
353         // arbitrarily unique it here allowing us to construct a correctly named
354         // and typed function below.
355         F->setName("");
356
357         // Now construct the new intrinsic with the correct name and type. We
358         // leave the old function around in order to query its type, whatever it
359         // may be, and correctly convert up to the new type.
360         NewFn = cast<Function>(M->getOrInsertFunction(Name, 
361                                                       X86_MMXTy, X86_MMXTy,
362                                                       SecondParamTy, (Type*)0));
363         return true;
364       }
365
366       if (Name.compare(13, 8, "maskmovq", 8) == 0) {
367         // Don't do anything if it has the correct types.
368         if (FTy->getParamType(0) == X86_MMXTy &&
369             FTy->getParamType(1) == X86_MMXTy)
370           break;
371
372         F->setName("");
373         NewFn = cast<Function>(M->getOrInsertFunction(Name, 
374                                                       FTy->getReturnType(),
375                                                       X86_MMXTy,
376                                                       X86_MMXTy,
377                                                       FTy->getParamType(2),
378                                                       (Type*)0));
379         return true;
380       }
381
382       if (Name.compare(13, 8, "pmovmskb", 8) == 0) {
383         if (FTy->getParamType(0) == X86_MMXTy)
384           break;
385
386         F->setName("");
387         NewFn = cast<Function>(M->getOrInsertFunction(Name, 
388                                                       FTy->getReturnType(),
389                                                       X86_MMXTy,
390                                                       (Type*)0));
391         return true;
392       }
393
394       if (Name.compare(13, 5, "movnt", 5) == 0) {
395         if (FTy->getParamType(1) == X86_MMXTy)
396           break;
397
398         F->setName("");
399         NewFn = cast<Function>(M->getOrInsertFunction(Name, 
400                                                       FTy->getReturnType(),
401                                                       FTy->getParamType(0),
402                                                       X86_MMXTy,
403                                                       (Type*)0));
404         return true;
405       }
406
407       if (Name.compare(13, 7, "palignr", 7) == 0) {
408         if (FTy->getReturnType() == X86_MMXTy &&
409             FTy->getParamType(0) == X86_MMXTy &&
410             FTy->getParamType(1) == X86_MMXTy)
411           break;
412
413         F->setName("");
414         NewFn = cast<Function>(M->getOrInsertFunction(Name, 
415                                                       X86_MMXTy,
416                                                       X86_MMXTy,
417                                                       X86_MMXTy,
418                                                       FTy->getParamType(2),
419                                                       (Type*)0));
420         return true;
421       }
422
423       if (Name.compare(13, 5, "pextr", 5) == 0) {
424         if (FTy->getParamType(0) == X86_MMXTy)
425           break;
426
427         F->setName("");
428         NewFn = cast<Function>(M->getOrInsertFunction(Name, 
429                                                       FTy->getReturnType(),
430                                                       X86_MMXTy,
431                                                       FTy->getParamType(1),
432                                                       (Type*)0));
433         return true;
434       }
435
436       if (Name.compare(13, 5, "pinsr", 5) == 0) {
437         if (FTy->getReturnType() == X86_MMXTy &&
438             FTy->getParamType(0) == X86_MMXTy)
439           break;
440
441         F->setName("");
442         NewFn = cast<Function>(M->getOrInsertFunction(Name, 
443                                                       X86_MMXTy,
444                                                       X86_MMXTy,
445                                                       FTy->getParamType(1),
446                                                       FTy->getParamType(2),
447                                                       (Type*)0));
448         return true;
449       }
450
451       if (Name.compare(13, 12, "cvtsi32.si64", 12) == 0) {
452         if (FTy->getReturnType() == X86_MMXTy)
453           break;
454
455         F->setName("");
456         NewFn = cast<Function>(M->getOrInsertFunction(Name, 
457                                                       X86_MMXTy,
458                                                       FTy->getParamType(0),
459                                                       (Type*)0));
460         return true;
461       }
462
463       if (Name.compare(13, 12, "cvtsi64.si32", 12) == 0) {
464         if (FTy->getParamType(0) == X86_MMXTy)
465           break;
466
467         F->setName("");
468         NewFn = cast<Function>(M->getOrInsertFunction(Name, 
469                                                       FTy->getReturnType(),
470                                                       X86_MMXTy,
471                                                       (Type*)0));
472         return true;
473       }
474
475       if (Name.compare(13, 8, "vec.init", 8) == 0) {
476         if (FTy->getReturnType() == X86_MMXTy)
477           break;
478
479         F->setName("");
480
481         if (Name.compare(21, 2, ".b", 2) == 0)
482           NewFn = cast<Function>(M->getOrInsertFunction(Name, 
483                                                         X86_MMXTy,
484                                                         FTy->getParamType(0),
485                                                         FTy->getParamType(1),
486                                                         FTy->getParamType(2),
487                                                         FTy->getParamType(3),
488                                                         FTy->getParamType(4),
489                                                         FTy->getParamType(5),
490                                                         FTy->getParamType(6),
491                                                         FTy->getParamType(7),
492                                                         (Type*)0));
493         else if (Name.compare(21, 2, ".w", 2) == 0)
494           NewFn = cast<Function>(M->getOrInsertFunction(Name, 
495                                                         X86_MMXTy,
496                                                         FTy->getParamType(0),
497                                                         FTy->getParamType(1),
498                                                         FTy->getParamType(2),
499                                                         FTy->getParamType(3),
500                                                         (Type*)0));
501         else if (Name.compare(21, 2, ".d", 2) == 0)
502           NewFn = cast<Function>(M->getOrInsertFunction(Name, 
503                                                         X86_MMXTy,
504                                                         FTy->getParamType(0),
505                                                         FTy->getParamType(1),
506                                                         (Type*)0));
507         return true;
508       }
509
510
511       if (Name.compare(13, 9, "vec.ext.d", 9) == 0) {
512         if (FTy->getReturnType() == X86_MMXTy &&
513             FTy->getParamType(0) == X86_MMXTy)
514           break;
515
516         F->setName("");
517         NewFn = cast<Function>(M->getOrInsertFunction(Name, 
518                                                       X86_MMXTy,
519                                                       X86_MMXTy,
520                                                       FTy->getParamType(1),
521                                                       (Type*)0));
522         return true;
523       }
524
525       if (Name.compare(13, 9, "emms", 4) == 0 ||
526           Name.compare(13, 9, "femms", 5) == 0) {
527         NewFn = 0;
528         break;
529       }
530
531       // We really shouldn't get here ever.
532       assert(0 && "Invalid MMX intrinsic!");
533       break;
534     } else if (Name.compare(5,17,"x86.sse2.loadh.pd",17) == 0 ||
535                Name.compare(5,17,"x86.sse2.loadl.pd",17) == 0 ||
536                Name.compare(5,16,"x86.sse2.movl.dq",16) == 0 ||
537                Name.compare(5,15,"x86.sse2.movs.d",15) == 0 ||
538                Name.compare(5,16,"x86.sse2.shuf.pd",16) == 0 ||
539                Name.compare(5,18,"x86.sse2.unpckh.pd",18) == 0 ||
540                Name.compare(5,18,"x86.sse2.unpckl.pd",18) == 0 ||
541                Name.compare(5,20,"x86.sse2.punpckh.qdq",20) == 0 ||
542                Name.compare(5,20,"x86.sse2.punpckl.qdq",20) == 0) {
543       // Calls to these intrinsics are transformed into ShuffleVector's.
544       NewFn = 0;
545       return true;
546     } else if (Name.compare(5, 16, "x86.sse41.pmulld", 16) == 0) {
547       // Calls to these intrinsics are transformed into vector multiplies.
548       NewFn = 0;
549       return true;
550     } else if (Name.compare(5, 18, "x86.ssse3.palign.r", 18) == 0 ||
551                Name.compare(5, 22, "x86.ssse3.palign.r.128", 22) == 0) {
552       // Calls to these intrinsics are transformed into vector shuffles, shifts,
553       // or 0.
554       NewFn = 0;
555       return true;           
556     } else if (Name.compare(5, 16, "x86.sse.loadu.ps", 16) == 0 ||
557                Name.compare(5, 17, "x86.sse2.loadu.dq", 17) == 0 ||
558                Name.compare(5, 17, "x86.sse2.loadu.pd", 17) == 0) {
559       // Calls to these instructions are transformed into unaligned loads.
560       NewFn = 0;
561       return true;
562     } else if (Name.compare(5, 16, "x86.sse.movnt.ps", 16) == 0 ||
563                Name.compare(5, 17, "x86.sse2.movnt.dq", 17) == 0 ||
564                Name.compare(5, 17, "x86.sse2.movnt.pd", 17) == 0 ||
565                Name.compare(5, 17, "x86.sse2.movnt.i", 16) == 0) {
566       // Calls to these instructions are transformed into nontemporal stores.
567       NewFn = 0;
568       return true;
569     } else if (Name.compare(5, 17, "x86.ssse3.pshuf.w", 17) == 0) {
570       // This is an SSE/MMX instruction.
571       const Type *X86_MMXTy = VectorType::getX86_MMXTy(FTy->getContext());
572       NewFn =
573         cast<Function>(M->getOrInsertFunction("llvm.x86.sse.pshuf.w",
574                                               X86_MMXTy,
575                                               X86_MMXTy,
576                                               Type::getInt8Ty(F->getContext()),
577                                               (Type*)0));
578       return true;
579     }
580
581     break;
582   }
583
584   //  This may not belong here. This function is effectively being overloaded 
585   //  to both detect an intrinsic which needs upgrading, and to provide the 
586   //  upgraded form of the intrinsic. We should perhaps have two separate 
587   //  functions for this.
588   return false;
589 }
590
591 bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) {
592   NewFn = 0;
593   bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn);
594
595   // Upgrade intrinsic attributes.  This does not change the function.
596   if (NewFn)
597     F = NewFn;
598   if (unsigned id = F->getIntrinsicID())
599     F->setAttributes(Intrinsic::getAttributes((Intrinsic::ID)id));
600   return Upgraded;
601 }
602
603 bool llvm::UpgradeGlobalVariable(GlobalVariable *GV) {
604   StringRef Name(GV->getName());
605
606   // We are only upgrading one symbol here.
607   if (Name == ".llvm.eh.catch.all.value") {
608     GV->setName("llvm.eh.catch.all.value");
609     return true;
610   }
611
612   return false;
613 }
614
615 /// ExtendNEONArgs - For NEON "long" and "wide" operations, where the results
616 /// have vector elements twice as big as one or both source operands, do the
617 /// sign- or zero-extension that used to be handled by intrinsics.  The
618 /// extended values are returned via V0 and V1.
619 static void ExtendNEONArgs(CallInst *CI, Value *Arg0, Value *Arg1,
620                            Value *&V0, Value *&V1) {
621   Function *F = CI->getCalledFunction();
622   const std::string& Name = F->getName();
623   bool isLong = (Name.at(18) == 'l');
624   bool isSigned = (Name.at(19) == 's');
625
626   if (isSigned) {
627     if (isLong)
628       V0 = new SExtInst(Arg0, CI->getType(), "", CI);
629     else
630       V0 = Arg0;
631     V1 = new SExtInst(Arg1, CI->getType(), "", CI);
632   } else {
633     if (isLong)
634       V0 = new ZExtInst(Arg0, CI->getType(), "", CI);
635     else
636       V0 = Arg0;
637     V1 = new ZExtInst(Arg1, CI->getType(), "", CI);
638   }
639 }
640
641 /// CallVABD - As part of expanding a call to one of the old NEON vabdl, vaba,
642 /// or vabal intrinsics, construct a call to a vabd intrinsic.  Examine the
643 /// name of the old intrinsic to determine whether to use a signed or unsigned
644 /// vabd intrinsic.  Get the type from the old call instruction, adjusted for
645 /// half-size vector elements if the old intrinsic was vabdl or vabal.
646 static Instruction *CallVABD(CallInst *CI, Value *Arg0, Value *Arg1) {
647   Function *F = CI->getCalledFunction();
648   const std::string& Name = F->getName();
649   bool isLong = (Name.at(18) == 'l');
650   bool isSigned = (Name.at(isLong ? 19 : 18) == 's');
651
652   Intrinsic::ID intID;
653   if (isSigned)
654     intID = Intrinsic::arm_neon_vabds;
655   else
656     intID = Intrinsic::arm_neon_vabdu;
657
658   const Type *Ty = CI->getType();
659   if (isLong)
660     Ty = VectorType::getTruncatedElementVectorType(cast<const VectorType>(Ty));
661
662   Function *VABD = Intrinsic::getDeclaration(F->getParent(), intID, &Ty, 1);
663   Value *Operands[2];
664   Operands[0] = Arg0;
665   Operands[1] = Arg1;
666   return CallInst::Create(VABD, Operands, Operands+2, 
667                           "upgraded."+CI->getName(), CI);
668 }
669
670 /// ConstructNewCallInst - Construct a new CallInst with the signature of NewFn.
671 static void ConstructNewCallInst(Function *NewFn, CallInst *OldCI,
672                                  Value **Operands, unsigned NumOps,
673                                  bool AssignName = true) {
674   // Construct a new CallInst.
675   CallInst *NewCI =
676     CallInst::Create(NewFn, Operands, Operands + NumOps,
677                      AssignName ? "upgraded." + OldCI->getName() : "", OldCI);
678
679   NewCI->setTailCall(OldCI->isTailCall());
680   NewCI->setCallingConv(OldCI->getCallingConv());
681
682   // Handle any uses of the old CallInst. If the type has changed, add a cast.
683   if (!OldCI->use_empty()) {
684     if (OldCI->getType() != NewCI->getType()) {
685       Function *OldFn = OldCI->getCalledFunction();
686       CastInst *RetCast =
687         CastInst::Create(CastInst::getCastOpcode(NewCI, true,
688                                                  OldFn->getReturnType(), true),
689                          NewCI, OldFn->getReturnType(), NewCI->getName(),OldCI);
690
691       // Replace all uses of the old call with the new cast which has the
692       // correct type.
693       OldCI->replaceAllUsesWith(RetCast);
694     } else {
695       OldCI->replaceAllUsesWith(NewCI);
696     }
697   }
698
699   // Clean up the old call now that it has been completely upgraded.
700   OldCI->eraseFromParent();
701 }
702
703 // UpgradeIntrinsicCall - Upgrade a call to an old intrinsic to be a call the 
704 // upgraded intrinsic. All argument and return casting must be provided in 
705 // order to seamlessly integrate with existing context.
706 void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
707   Function *F = CI->getCalledFunction();
708   LLVMContext &C = CI->getContext();
709   ImmutableCallSite CS(CI);
710
711   assert(F && "CallInst has no function associated with it.");
712
713   if (!NewFn) {
714     // Get the Function's name.
715     const std::string& Name = F->getName();
716
717     // Upgrade ARM NEON intrinsics.
718     if (Name.compare(5, 9, "arm.neon.", 9) == 0) {
719       Instruction *NewI;
720       Value *V0, *V1;
721       if (Name.compare(14, 7, "vmovls.", 7) == 0) {
722         NewI = new SExtInst(CI->getArgOperand(0), CI->getType(),
723                             "upgraded." + CI->getName(), CI);
724       } else if (Name.compare(14, 7, "vmovlu.", 7) == 0) {
725         NewI = new ZExtInst(CI->getArgOperand(0), CI->getType(),
726                             "upgraded." + CI->getName(), CI);
727       } else if (Name.compare(14, 4, "vadd", 4) == 0) {
728         ExtendNEONArgs(CI, CI->getArgOperand(0), CI->getArgOperand(1), V0, V1);
729         NewI = BinaryOperator::CreateAdd(V0, V1, "upgraded."+CI->getName(), CI);
730       } else if (Name.compare(14, 4, "vsub", 4) == 0) {
731         ExtendNEONArgs(CI, CI->getArgOperand(0), CI->getArgOperand(1), V0, V1);
732         NewI = BinaryOperator::CreateSub(V0, V1,"upgraded."+CI->getName(),CI);
733       } else if (Name.compare(14, 4, "vmul", 4) == 0) {
734         ExtendNEONArgs(CI, CI->getArgOperand(0), CI->getArgOperand(1), V0, V1);
735         NewI = BinaryOperator::CreateMul(V0, V1,"upgraded."+CI->getName(),CI);
736       } else if (Name.compare(14, 4, "vmla", 4) == 0) {
737         ExtendNEONArgs(CI, CI->getArgOperand(1), CI->getArgOperand(2), V0, V1);
738         Instruction *MulI = BinaryOperator::CreateMul(V0, V1, "", CI);
739         NewI = BinaryOperator::CreateAdd(CI->getArgOperand(0), MulI,
740                                          "upgraded."+CI->getName(), CI);
741       } else if (Name.compare(14, 4, "vmls", 4) == 0) {
742         ExtendNEONArgs(CI, CI->getArgOperand(1), CI->getArgOperand(2), V0, V1);
743         Instruction *MulI = BinaryOperator::CreateMul(V0, V1, "", CI);
744         NewI = BinaryOperator::CreateSub(CI->getArgOperand(0), MulI,
745                                          "upgraded."+CI->getName(), CI);
746       } else if (Name.compare(14, 4, "vabd", 4) == 0) {
747         NewI = CallVABD(CI, CI->getArgOperand(0), CI->getArgOperand(1));
748         NewI = new ZExtInst(NewI, CI->getType(), "upgraded."+CI->getName(), CI);
749       } else if (Name.compare(14, 4, "vaba", 4) == 0) {
750         NewI = CallVABD(CI, CI->getArgOperand(1), CI->getArgOperand(2));
751         if (Name.at(18) == 'l')
752           NewI = new ZExtInst(NewI, CI->getType(), "", CI);
753         NewI = BinaryOperator::CreateAdd(CI->getArgOperand(0), NewI,
754                                          "upgraded."+CI->getName(), CI);
755       } else if (Name.compare(14, 6, "vmovn.", 6) == 0) {
756         NewI = new TruncInst(CI->getArgOperand(0), CI->getType(),
757                              "upgraded." + CI->getName(), CI);
758       } else {
759         llvm_unreachable("Unknown arm.neon function for CallInst upgrade.");
760       }
761       // Replace any uses of the old CallInst.
762       if (!CI->use_empty())
763         CI->replaceAllUsesWith(NewI);
764       CI->eraseFromParent();
765       return;
766     }
767
768     bool isLoadH = false, isLoadL = false, isMovL = false;
769     bool isMovSD = false, isShufPD = false;
770     bool isUnpckhPD = false, isUnpcklPD = false;
771     bool isPunpckhQPD = false, isPunpcklQPD = false;
772     if (F->getName() == "llvm.x86.sse2.loadh.pd")
773       isLoadH = true;
774     else if (F->getName() == "llvm.x86.sse2.loadl.pd")
775       isLoadL = true;
776     else if (F->getName() == "llvm.x86.sse2.movl.dq")
777       isMovL = true;
778     else if (F->getName() == "llvm.x86.sse2.movs.d")
779       isMovSD = true;
780     else if (F->getName() == "llvm.x86.sse2.shuf.pd")
781       isShufPD = true;
782     else if (F->getName() == "llvm.x86.sse2.unpckh.pd")
783       isUnpckhPD = true;
784     else if (F->getName() == "llvm.x86.sse2.unpckl.pd")
785       isUnpcklPD = true;
786     else if (F->getName() ==  "llvm.x86.sse2.punpckh.qdq")
787       isPunpckhQPD = true;
788     else if (F->getName() ==  "llvm.x86.sse2.punpckl.qdq")
789       isPunpcklQPD = true;
790
791     if (isLoadH || isLoadL || isMovL || isMovSD || isShufPD ||
792         isUnpckhPD || isUnpcklPD || isPunpckhQPD || isPunpcklQPD) {
793       std::vector<Constant*> Idxs;
794       Value *Op0 = CI->getArgOperand(0);
795       ShuffleVectorInst *SI = NULL;
796       if (isLoadH || isLoadL) {
797         Value *Op1 = UndefValue::get(Op0->getType());
798         Value *Addr = new BitCastInst(CI->getArgOperand(1), 
799                                   Type::getDoublePtrTy(C),
800                                       "upgraded.", CI);
801         Value *Load = new LoadInst(Addr, "upgraded.", false, 8, CI);
802         Value *Idx = ConstantInt::get(Type::getInt32Ty(C), 0);
803         Op1 = InsertElementInst::Create(Op1, Load, Idx, "upgraded.", CI);
804
805         if (isLoadH) {
806           Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
807           Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
808         } else {
809           Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
810           Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
811         }
812         Value *Mask = ConstantVector::get(Idxs);
813         SI = new ShuffleVectorInst(Op0, Op1, Mask, "upgraded.", CI);
814       } else if (isMovL) {
815         Constant *Zero = ConstantInt::get(Type::getInt32Ty(C), 0);
816         Idxs.push_back(Zero);
817         Idxs.push_back(Zero);
818         Idxs.push_back(Zero);
819         Idxs.push_back(Zero);
820         Value *ZeroV = ConstantVector::get(Idxs);
821
822         Idxs.clear(); 
823         Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 4));
824         Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 5));
825         Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
826         Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 3));
827         Value *Mask = ConstantVector::get(Idxs);
828         SI = new ShuffleVectorInst(ZeroV, Op0, Mask, "upgraded.", CI);
829       } else if (isMovSD ||
830                  isUnpckhPD || isUnpcklPD || isPunpckhQPD || isPunpcklQPD) {
831         Value *Op1 = CI->getArgOperand(1);
832         if (isMovSD) {
833           Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
834           Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
835         } else if (isUnpckhPD || isPunpckhQPD) {
836           Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
837           Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 3));
838         } else {
839           Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
840           Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
841         }
842         Value *Mask = ConstantVector::get(Idxs);
843         SI = new ShuffleVectorInst(Op0, Op1, Mask, "upgraded.", CI);
844       } else if (isShufPD) {
845         Value *Op1 = CI->getArgOperand(1);
846         unsigned MaskVal =
847                         cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
848         Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), MaskVal & 1));
849         Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C),
850                                                ((MaskVal >> 1) & 1)+2));
851         Value *Mask = ConstantVector::get(Idxs);
852         SI = new ShuffleVectorInst(Op0, Op1, Mask, "upgraded.", CI);
853       }
854
855       assert(SI && "Unexpected!");
856
857       // Handle any uses of the old CallInst.
858       if (!CI->use_empty())
859         //  Replace all uses of the old call with the new cast which has the 
860         //  correct type.
861         CI->replaceAllUsesWith(SI);
862       
863       //  Clean up the old call now that it has been completely upgraded.
864       CI->eraseFromParent();
865     } else if (F->getName() == "llvm.x86.sse41.pmulld") {
866       // Upgrade this set of intrinsics into vector multiplies.
867       Instruction *Mul = BinaryOperator::CreateMul(CI->getArgOperand(0),
868                                                    CI->getArgOperand(1),
869                                                    CI->getName(),
870                                                    CI);
871       // Fix up all the uses with our new multiply.
872       if (!CI->use_empty())
873         CI->replaceAllUsesWith(Mul);
874         
875       // Remove upgraded multiply.
876       CI->eraseFromParent();
877     } else if (F->getName() == "llvm.x86.ssse3.palign.r") {
878       Value *Op1 = CI->getArgOperand(0);
879       Value *Op2 = CI->getArgOperand(1);
880       Value *Op3 = CI->getArgOperand(2);
881       unsigned shiftVal = cast<ConstantInt>(Op3)->getZExtValue();
882       Value *Rep;
883       IRBuilder<> Builder(C);
884       Builder.SetInsertPoint(CI->getParent(), CI);
885
886       // If palignr is shifting the pair of input vectors less than 9 bytes,
887       // emit a shuffle instruction.
888       if (shiftVal <= 8) {
889         const Type *IntTy = Type::getInt32Ty(C);
890         const Type *EltTy = Type::getInt8Ty(C);
891         const Type *VecTy = VectorType::get(EltTy, 8);
892         
893         Op2 = Builder.CreateBitCast(Op2, VecTy);
894         Op1 = Builder.CreateBitCast(Op1, VecTy);
895
896         llvm::SmallVector<llvm::Constant*, 8> Indices;
897         for (unsigned i = 0; i != 8; ++i)
898           Indices.push_back(ConstantInt::get(IntTy, shiftVal + i));
899
900         Value *SV = ConstantVector::get(Indices);
901         Rep = Builder.CreateShuffleVector(Op2, Op1, SV, "palignr");
902         Rep = Builder.CreateBitCast(Rep, F->getReturnType());
903       }
904
905       // If palignr is shifting the pair of input vectors more than 8 but less
906       // than 16 bytes, emit a logical right shift of the destination.
907       else if (shiftVal < 16) {
908         // MMX has these as 1 x i64 vectors for some odd optimization reasons.
909         const Type *EltTy = Type::getInt64Ty(C);
910         const Type *VecTy = VectorType::get(EltTy, 1);
911
912         Op1 = Builder.CreateBitCast(Op1, VecTy, "cast");
913         Op2 = ConstantInt::get(VecTy, (shiftVal-8) * 8);
914
915         // create i32 constant
916         Function *I =
917           Intrinsic::getDeclaration(F->getParent(), Intrinsic::x86_mmx_psrl_q);
918         Rep = Builder.CreateCall2(I, Op1, Op2, "palignr");
919       }
920
921       // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
922       else {
923         Rep = Constant::getNullValue(F->getReturnType());
924       }
925       
926       // Replace any uses with our new instruction.
927       if (!CI->use_empty())
928         CI->replaceAllUsesWith(Rep);
929         
930       // Remove upgraded instruction.
931       CI->eraseFromParent();
932       
933     } else if (F->getName() == "llvm.x86.ssse3.palign.r.128") {
934       Value *Op1 = CI->getArgOperand(0);
935       Value *Op2 = CI->getArgOperand(1);
936       Value *Op3 = CI->getArgOperand(2);
937       unsigned shiftVal = cast<ConstantInt>(Op3)->getZExtValue();
938       Value *Rep;
939       IRBuilder<> Builder(C);
940       Builder.SetInsertPoint(CI->getParent(), CI);
941
942       // If palignr is shifting the pair of input vectors less than 17 bytes,
943       // emit a shuffle instruction.
944       if (shiftVal <= 16) {
945         const Type *IntTy = Type::getInt32Ty(C);
946         const Type *EltTy = Type::getInt8Ty(C);
947         const Type *VecTy = VectorType::get(EltTy, 16);
948         
949         Op2 = Builder.CreateBitCast(Op2, VecTy);
950         Op1 = Builder.CreateBitCast(Op1, VecTy);
951
952         llvm::SmallVector<llvm::Constant*, 16> Indices;
953         for (unsigned i = 0; i != 16; ++i)
954           Indices.push_back(ConstantInt::get(IntTy, shiftVal + i));
955
956         Value *SV = ConstantVector::get(Indices);
957         Rep = Builder.CreateShuffleVector(Op2, Op1, SV, "palignr");
958         Rep = Builder.CreateBitCast(Rep, F->getReturnType());
959       }
960
961       // If palignr is shifting the pair of input vectors more than 16 but less
962       // than 32 bytes, emit a logical right shift of the destination.
963       else if (shiftVal < 32) {
964         const Type *EltTy = Type::getInt64Ty(C);
965         const Type *VecTy = VectorType::get(EltTy, 2);
966         const Type *IntTy = Type::getInt32Ty(C);
967
968         Op1 = Builder.CreateBitCast(Op1, VecTy, "cast");
969         Op2 = ConstantInt::get(IntTy, (shiftVal-16) * 8);
970
971         // create i32 constant
972         Function *I =
973           Intrinsic::getDeclaration(F->getParent(), Intrinsic::x86_sse2_psrl_dq);
974         Rep = Builder.CreateCall2(I, Op1, Op2, "palignr");
975       }
976
977       // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
978       else {
979         Rep = Constant::getNullValue(F->getReturnType());
980       }
981       
982       // Replace any uses with our new instruction.
983       if (!CI->use_empty())
984         CI->replaceAllUsesWith(Rep);
985         
986       // Remove upgraded instruction.
987       CI->eraseFromParent();
988     
989     } else if (F->getName() == "llvm.x86.sse.loadu.ps" ||
990                F->getName() == "llvm.x86.sse2.loadu.dq" ||
991                F->getName() == "llvm.x86.sse2.loadu.pd") {
992       // Convert to a native, unaligned load.
993       const Type *VecTy = CI->getType();
994       const Type *IntTy = IntegerType::get(C, 128);
995       IRBuilder<> Builder(C);
996       Builder.SetInsertPoint(CI->getParent(), CI);
997
998       Value *BC = Builder.CreateBitCast(CI->getArgOperand(0),
999                                         PointerType::getUnqual(IntTy),
1000                                         "cast");
1001       LoadInst *LI = Builder.CreateLoad(BC, CI->getName());
1002       LI->setAlignment(1);      // Unaligned load.
1003       BC = Builder.CreateBitCast(LI, VecTy, "new.cast");
1004
1005       // Fix up all the uses with our new load.
1006       if (!CI->use_empty())
1007         CI->replaceAllUsesWith(BC);
1008
1009       // Remove intrinsic.
1010       CI->eraseFromParent();
1011     } else if (F->getName() == "llvm.x86.sse.movnt.ps" ||
1012                F->getName() == "llvm.x86.sse2.movnt.dq" ||
1013                F->getName() == "llvm.x86.sse2.movnt.pd" ||
1014                F->getName() == "llvm.x86.sse2.movnt.i") {
1015       IRBuilder<> Builder(C);
1016       Builder.SetInsertPoint(CI->getParent(), CI);
1017
1018       Module *M = F->getParent();
1019       SmallVector<Value *, 1> Elts;
1020       Elts.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
1021       MDNode *Node = MDNode::get(C, Elts);
1022
1023       Value *Arg0 = CI->getArgOperand(0);
1024       Value *Arg1 = CI->getArgOperand(1);
1025
1026       // Convert the type of the pointer to a pointer to the stored type.
1027       Value *BC = Builder.CreateBitCast(Arg0,
1028                                         PointerType::getUnqual(Arg1->getType()),
1029                                         "cast");
1030       StoreInst *SI = Builder.CreateStore(Arg1, BC);
1031       SI->setMetadata(M->getMDKindID("nontemporal"), Node);
1032       SI->setAlignment(16);
1033
1034       // Remove intrinsic.
1035       CI->eraseFromParent();
1036     } else {
1037       llvm_unreachable("Unknown function for CallInst upgrade.");
1038     }
1039     return;
1040   }
1041
1042   switch (NewFn->getIntrinsicID()) {
1043   default: llvm_unreachable("Unknown function for CallInst upgrade.");
1044   case Intrinsic::arm_neon_vld1:
1045   case Intrinsic::arm_neon_vld2:
1046   case Intrinsic::arm_neon_vld3:
1047   case Intrinsic::arm_neon_vld4:
1048   case Intrinsic::arm_neon_vst1:
1049   case Intrinsic::arm_neon_vst2:
1050   case Intrinsic::arm_neon_vst3:
1051   case Intrinsic::arm_neon_vst4:
1052   case Intrinsic::arm_neon_vld2lane:
1053   case Intrinsic::arm_neon_vld3lane:
1054   case Intrinsic::arm_neon_vld4lane:
1055   case Intrinsic::arm_neon_vst2lane:
1056   case Intrinsic::arm_neon_vst3lane:
1057   case Intrinsic::arm_neon_vst4lane: {
1058     // Add a default alignment argument of 1.
1059     SmallVector<Value*, 8> Operands(CS.arg_begin(), CS.arg_end());
1060     Operands.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
1061     CallInst *NewCI = CallInst::Create(NewFn, Operands.begin(), Operands.end(),
1062                                        CI->getName(), CI);
1063     NewCI->setTailCall(CI->isTailCall());
1064     NewCI->setCallingConv(CI->getCallingConv());
1065
1066     //  Handle any uses of the old CallInst.
1067     if (!CI->use_empty())
1068       //  Replace all uses of the old call with the new cast which has the 
1069       //  correct type.
1070       CI->replaceAllUsesWith(NewCI);
1071     
1072     //  Clean up the old call now that it has been completely upgraded.
1073     CI->eraseFromParent();
1074     break;
1075   }        
1076
1077   case Intrinsic::x86_mmx_padd_b:
1078   case Intrinsic::x86_mmx_padd_w:
1079   case Intrinsic::x86_mmx_padd_d:
1080   case Intrinsic::x86_mmx_padd_q:
1081   case Intrinsic::x86_mmx_padds_b:
1082   case Intrinsic::x86_mmx_padds_w:
1083   case Intrinsic::x86_mmx_paddus_b:
1084   case Intrinsic::x86_mmx_paddus_w:
1085   case Intrinsic::x86_mmx_psub_b:
1086   case Intrinsic::x86_mmx_psub_w:
1087   case Intrinsic::x86_mmx_psub_d:
1088   case Intrinsic::x86_mmx_psub_q:
1089   case Intrinsic::x86_mmx_psubs_b:
1090   case Intrinsic::x86_mmx_psubs_w:
1091   case Intrinsic::x86_mmx_psubus_b:
1092   case Intrinsic::x86_mmx_psubus_w:
1093   case Intrinsic::x86_mmx_pmulh_w:
1094   case Intrinsic::x86_mmx_pmull_w:
1095   case Intrinsic::x86_mmx_pmulhu_w:
1096   case Intrinsic::x86_mmx_pmulu_dq:
1097   case Intrinsic::x86_mmx_pmadd_wd:
1098   case Intrinsic::x86_mmx_pand:
1099   case Intrinsic::x86_mmx_pandn:
1100   case Intrinsic::x86_mmx_por:
1101   case Intrinsic::x86_mmx_pxor:
1102   case Intrinsic::x86_mmx_pavg_b:
1103   case Intrinsic::x86_mmx_pavg_w:
1104   case Intrinsic::x86_mmx_pmaxu_b:
1105   case Intrinsic::x86_mmx_pmaxs_w:
1106   case Intrinsic::x86_mmx_pminu_b:
1107   case Intrinsic::x86_mmx_pmins_w:
1108   case Intrinsic::x86_mmx_psad_bw:
1109   case Intrinsic::x86_mmx_psll_w:
1110   case Intrinsic::x86_mmx_psll_d:
1111   case Intrinsic::x86_mmx_psll_q:
1112   case Intrinsic::x86_mmx_pslli_w:
1113   case Intrinsic::x86_mmx_pslli_d:
1114   case Intrinsic::x86_mmx_pslli_q:
1115   case Intrinsic::x86_mmx_psrl_w:
1116   case Intrinsic::x86_mmx_psrl_d:
1117   case Intrinsic::x86_mmx_psrl_q:
1118   case Intrinsic::x86_mmx_psrli_w:
1119   case Intrinsic::x86_mmx_psrli_d:
1120   case Intrinsic::x86_mmx_psrli_q:
1121   case Intrinsic::x86_mmx_psra_w:
1122   case Intrinsic::x86_mmx_psra_d:
1123   case Intrinsic::x86_mmx_psrai_w:
1124   case Intrinsic::x86_mmx_psrai_d:
1125   case Intrinsic::x86_mmx_packsswb:
1126   case Intrinsic::x86_mmx_packssdw:
1127   case Intrinsic::x86_mmx_packuswb:
1128   case Intrinsic::x86_mmx_punpckhbw:
1129   case Intrinsic::x86_mmx_punpckhwd:
1130   case Intrinsic::x86_mmx_punpckhdq:
1131   case Intrinsic::x86_mmx_punpcklbw:
1132   case Intrinsic::x86_mmx_punpcklwd:
1133   case Intrinsic::x86_mmx_punpckldq:
1134   case Intrinsic::x86_mmx_pcmpeq_b:
1135   case Intrinsic::x86_mmx_pcmpeq_w:
1136   case Intrinsic::x86_mmx_pcmpeq_d:
1137   case Intrinsic::x86_mmx_pcmpgt_b:
1138   case Intrinsic::x86_mmx_pcmpgt_w:
1139   case Intrinsic::x86_mmx_pcmpgt_d: {
1140     Value *Operands[2];
1141     
1142     // Cast the operand to the X86 MMX type.
1143     Operands[0] = new BitCastInst(CI->getArgOperand(0), 
1144                                   NewFn->getFunctionType()->getParamType(0),
1145                                   "upgraded.", CI);
1146
1147     switch (NewFn->getIntrinsicID()) {
1148     default:
1149       // Cast to the X86 MMX type.
1150       Operands[1] = new BitCastInst(CI->getArgOperand(1), 
1151                                     NewFn->getFunctionType()->getParamType(1),
1152                                     "upgraded.", CI);
1153       break;
1154     case Intrinsic::x86_mmx_pslli_w:
1155     case Intrinsic::x86_mmx_pslli_d:
1156     case Intrinsic::x86_mmx_pslli_q:
1157     case Intrinsic::x86_mmx_psrli_w:
1158     case Intrinsic::x86_mmx_psrli_d:
1159     case Intrinsic::x86_mmx_psrli_q:
1160     case Intrinsic::x86_mmx_psrai_w:
1161     case Intrinsic::x86_mmx_psrai_d:
1162       // These take an i32 as their second parameter.
1163       Operands[1] = CI->getArgOperand(1);
1164       break;
1165     }
1166
1167     ConstructNewCallInst(NewFn, CI, Operands, 2);
1168     break;
1169   }
1170   case Intrinsic::x86_mmx_maskmovq: {
1171     Value *Operands[3];
1172
1173     // Cast the operands to the X86 MMX type.
1174     Operands[0] = new BitCastInst(CI->getArgOperand(0), 
1175                                   NewFn->getFunctionType()->getParamType(0),
1176                                   "upgraded.", CI);
1177     Operands[1] = new BitCastInst(CI->getArgOperand(1), 
1178                                   NewFn->getFunctionType()->getParamType(1),
1179                                   "upgraded.", CI);
1180     Operands[2] = CI->getArgOperand(2);
1181
1182     ConstructNewCallInst(NewFn, CI, Operands, 3, false);
1183     break;
1184   }
1185   case Intrinsic::x86_mmx_pmovmskb: {
1186     Value *Operands[1];
1187
1188     // Cast the operand to the X86 MMX type.
1189     Operands[0] = new BitCastInst(CI->getArgOperand(0), 
1190                                   NewFn->getFunctionType()->getParamType(0),
1191                                   "upgraded.", CI);
1192
1193     ConstructNewCallInst(NewFn, CI, Operands, 1);
1194     break;
1195   }
1196   case Intrinsic::x86_mmx_movnt_dq: {
1197     Value *Operands[2];
1198
1199     Operands[0] = CI->getArgOperand(0);
1200
1201     // Cast the operand to the X86 MMX type.
1202     Operands[1] = new BitCastInst(CI->getArgOperand(1),
1203                                   NewFn->getFunctionType()->getParamType(1),
1204                                   "upgraded.", CI);
1205
1206     ConstructNewCallInst(NewFn, CI, Operands, 2, false);
1207     break;
1208   }
1209   case Intrinsic::x86_mmx_palignr_b: {
1210     Value *Operands[3];
1211
1212     // Cast the operands to the X86 MMX type.
1213     Operands[0] = new BitCastInst(CI->getArgOperand(0),
1214                                   NewFn->getFunctionType()->getParamType(0),
1215                                   "upgraded.", CI);
1216     Operands[1] = new BitCastInst(CI->getArgOperand(1),
1217                                   NewFn->getFunctionType()->getParamType(1),
1218                                   "upgraded.", CI);
1219     Operands[2] = CI->getArgOperand(2);
1220
1221     ConstructNewCallInst(NewFn, CI, Operands, 3);
1222     break;
1223   }
1224   case Intrinsic::x86_mmx_pextr_w: {
1225     Value *Operands[2];
1226
1227     // Cast the operands to the X86 MMX type.
1228     Operands[0] = new BitCastInst(CI->getArgOperand(0),
1229                                   NewFn->getFunctionType()->getParamType(0),
1230                                   "upgraded.", CI);
1231     Operands[1] = CI->getArgOperand(1);
1232
1233     ConstructNewCallInst(NewFn, CI, Operands, 2);
1234     break;
1235   }
1236   case Intrinsic::x86_mmx_pinsr_w: {
1237     Value *Operands[3];
1238
1239     // Cast the operands to the X86 MMX type.
1240     Operands[0] = new BitCastInst(CI->getArgOperand(0),
1241                                   NewFn->getFunctionType()->getParamType(0),
1242                                   "upgraded.", CI);
1243     Operands[1] = CI->getArgOperand(1);
1244     Operands[2] = CI->getArgOperand(2);
1245
1246     ConstructNewCallInst(NewFn, CI, Operands, 3);
1247     break;
1248   }
1249   case Intrinsic::x86_sse_pshuf_w: {
1250     IRBuilder<> Builder(C);
1251     Builder.SetInsertPoint(CI->getParent(), CI);
1252
1253     // Cast the operand to the X86 MMX type.
1254     Value *Operands[2];
1255     Operands[0] =
1256       Builder.CreateBitCast(CI->getArgOperand(0), 
1257                             NewFn->getFunctionType()->getParamType(0),
1258                             "upgraded.");
1259     Operands[1] =
1260       Builder.CreateTrunc(CI->getArgOperand(1),
1261                           Type::getInt8Ty(C),
1262                           "upgraded.");
1263
1264     ConstructNewCallInst(NewFn, CI, Operands, 2);
1265     break;
1266   }
1267
1268   case Intrinsic::ctlz:
1269   case Intrinsic::ctpop:
1270   case Intrinsic::cttz: {
1271     //  Build a small vector of the original arguments.
1272     SmallVector<Value*, 8> Operands(CS.arg_begin(), CS.arg_end());
1273
1274     //  Construct a new CallInst
1275     CallInst *NewCI = CallInst::Create(NewFn, Operands.begin(), Operands.end(),
1276                                        "upgraded."+CI->getName(), CI);
1277     NewCI->setTailCall(CI->isTailCall());
1278     NewCI->setCallingConv(CI->getCallingConv());
1279
1280     //  Handle any uses of the old CallInst.
1281     if (!CI->use_empty()) {
1282       //  Check for sign extend parameter attributes on the return values.
1283       bool SrcSExt = NewFn->getAttributes().paramHasAttr(0, Attribute::SExt);
1284       bool DestSExt = F->getAttributes().paramHasAttr(0, Attribute::SExt);
1285       
1286       //  Construct an appropriate cast from the new return type to the old.
1287       CastInst *RetCast = CastInst::Create(
1288                             CastInst::getCastOpcode(NewCI, SrcSExt,
1289                                                     F->getReturnType(),
1290                                                     DestSExt),
1291                             NewCI, F->getReturnType(),
1292                             NewCI->getName(), CI);
1293       NewCI->moveBefore(RetCast);
1294
1295       //  Replace all uses of the old call with the new cast which has the 
1296       //  correct type.
1297       CI->replaceAllUsesWith(RetCast);
1298     }
1299
1300     //  Clean up the old call now that it has been completely upgraded.
1301     CI->eraseFromParent();
1302   }
1303   break;
1304   case Intrinsic::eh_selector:
1305   case Intrinsic::eh_typeid_for: {
1306     // Only the return type changed.
1307     SmallVector<Value*, 8> Operands(CS.arg_begin(), CS.arg_end());
1308     CallInst *NewCI = CallInst::Create(NewFn, Operands.begin(), Operands.end(),
1309                                        "upgraded." + CI->getName(), CI);
1310     NewCI->setTailCall(CI->isTailCall());
1311     NewCI->setCallingConv(CI->getCallingConv());
1312
1313     //  Handle any uses of the old CallInst.
1314     if (!CI->use_empty()) {
1315       //  Construct an appropriate cast from the new return type to the old.
1316       CastInst *RetCast =
1317         CastInst::Create(CastInst::getCastOpcode(NewCI, true,
1318                                                  F->getReturnType(), true),
1319                          NewCI, F->getReturnType(), NewCI->getName(), CI);
1320       CI->replaceAllUsesWith(RetCast);
1321     }
1322     CI->eraseFromParent();
1323   }
1324   break;
1325   case Intrinsic::memcpy:
1326   case Intrinsic::memmove:
1327   case Intrinsic::memset: {
1328     // Add isVolatile
1329     const llvm::Type *I1Ty = llvm::Type::getInt1Ty(CI->getContext());
1330     Value *Operands[5] = { CI->getArgOperand(0), CI->getArgOperand(1),
1331                            CI->getArgOperand(2), CI->getArgOperand(3),
1332                            llvm::ConstantInt::get(I1Ty, 0) };
1333     CallInst *NewCI = CallInst::Create(NewFn, Operands, Operands+5,
1334                                        CI->getName(), CI);
1335     NewCI->setTailCall(CI->isTailCall());
1336     NewCI->setCallingConv(CI->getCallingConv());
1337     //  Handle any uses of the old CallInst.
1338     if (!CI->use_empty())
1339       //  Replace all uses of the old call with the new cast which has the 
1340       //  correct type.
1341       CI->replaceAllUsesWith(NewCI);
1342     
1343     //  Clean up the old call now that it has been completely upgraded.
1344     CI->eraseFromParent();
1345     break;
1346   }
1347   }
1348 }
1349
1350 // This tests each Function to determine if it needs upgrading. When we find 
1351 // one we are interested in, we then upgrade all calls to reflect the new 
1352 // function.
1353 void llvm::UpgradeCallsToIntrinsic(Function* F) {
1354   assert(F && "Illegal attempt to upgrade a non-existent intrinsic.");
1355
1356   // Upgrade the function and check if it is a totaly new function.
1357   Function* NewFn;
1358   if (UpgradeIntrinsicFunction(F, NewFn)) {
1359     if (NewFn != F) {
1360       // Replace all uses to the old function with the new one if necessary.
1361       for (Value::use_iterator UI = F->use_begin(), UE = F->use_end();
1362            UI != UE; ) {
1363         if (CallInst* CI = dyn_cast<CallInst>(*UI++))
1364           UpgradeIntrinsicCall(CI, NewFn);
1365       }
1366       // Remove old function, no longer used, from the module.
1367       F->eraseFromParent();
1368     }
1369   }
1370 }
1371
1372 /// This function strips all debug info intrinsics, except for llvm.dbg.declare.
1373 /// If an llvm.dbg.declare intrinsic is invalid, then this function simply
1374 /// strips that use.
1375 void llvm::CheckDebugInfoIntrinsics(Module *M) {
1376
1377
1378   if (Function *FuncStart = M->getFunction("llvm.dbg.func.start")) {
1379     while (!FuncStart->use_empty()) {
1380       CallInst *CI = cast<CallInst>(FuncStart->use_back());
1381       CI->eraseFromParent();
1382     }
1383     FuncStart->eraseFromParent();
1384   }
1385   
1386   if (Function *StopPoint = M->getFunction("llvm.dbg.stoppoint")) {
1387     while (!StopPoint->use_empty()) {
1388       CallInst *CI = cast<CallInst>(StopPoint->use_back());
1389       CI->eraseFromParent();
1390     }
1391     StopPoint->eraseFromParent();
1392   }
1393
1394   if (Function *RegionStart = M->getFunction("llvm.dbg.region.start")) {
1395     while (!RegionStart->use_empty()) {
1396       CallInst *CI = cast<CallInst>(RegionStart->use_back());
1397       CI->eraseFromParent();
1398     }
1399     RegionStart->eraseFromParent();
1400   }
1401
1402   if (Function *RegionEnd = M->getFunction("llvm.dbg.region.end")) {
1403     while (!RegionEnd->use_empty()) {
1404       CallInst *CI = cast<CallInst>(RegionEnd->use_back());
1405       CI->eraseFromParent();
1406     }
1407     RegionEnd->eraseFromParent();
1408   }
1409   
1410   if (Function *Declare = M->getFunction("llvm.dbg.declare")) {
1411     if (!Declare->use_empty()) {
1412       DbgDeclareInst *DDI = cast<DbgDeclareInst>(Declare->use_back());
1413       if (!isa<MDNode>(DDI->getArgOperand(0)) ||
1414           !isa<MDNode>(DDI->getArgOperand(1))) {
1415         while (!Declare->use_empty()) {
1416           CallInst *CI = cast<CallInst>(Declare->use_back());
1417           CI->eraseFromParent();
1418         }
1419         Declare->eraseFromParent();
1420       }
1421     }
1422   }
1423 }