1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Chris Lattner and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the PPCISelLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "PPCISelLowering.h"
15 #include "PPCTargetMachine.h"
16 #include "PPCPerfectShuffle.h"
17 #include "llvm/ADT/VectorExtras.h"
18 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/CodeGen/SSARegMap.h"
24 #include "llvm/Constants.h"
25 #include "llvm/Function.h"
26 #include "llvm/Intrinsics.h"
27 #include "llvm/Support/MathExtras.h"
28 #include "llvm/Target/TargetOptions.h"
31 PPCTargetLowering::PPCTargetLowering(TargetMachine &TM)
32 : TargetLowering(TM) {
34 // Fold away setcc operations if possible.
35 setSetCCIsExpensive();
38 // Use _setjmp/_longjmp instead of setjmp/longjmp.
39 setUseUnderscoreSetJmpLongJmp(true);
41 // Set up the register classes.
42 addRegisterClass(MVT::i32, PPC::GPRCRegisterClass);
43 addRegisterClass(MVT::f32, PPC::F4RCRegisterClass);
44 addRegisterClass(MVT::f64, PPC::F8RCRegisterClass);
46 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
47 setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
49 // PowerPC has no intrinsics for these particular operations
50 setOperationAction(ISD::MEMMOVE, MVT::Other, Expand);
51 setOperationAction(ISD::MEMSET, MVT::Other, Expand);
52 setOperationAction(ISD::MEMCPY, MVT::Other, Expand);
54 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD
55 setOperationAction(ISD::SEXTLOAD, MVT::i1, Expand);
56 setOperationAction(ISD::SEXTLOAD, MVT::i8, Expand);
58 // PowerPC has no SREM/UREM instructions
59 setOperationAction(ISD::SREM, MVT::i32, Expand);
60 setOperationAction(ISD::UREM, MVT::i32, Expand);
62 // We don't support sin/cos/sqrt/fmod
63 setOperationAction(ISD::FSIN , MVT::f64, Expand);
64 setOperationAction(ISD::FCOS , MVT::f64, Expand);
65 setOperationAction(ISD::FREM , MVT::f64, Expand);
66 setOperationAction(ISD::FSIN , MVT::f32, Expand);
67 setOperationAction(ISD::FCOS , MVT::f32, Expand);
68 setOperationAction(ISD::FREM , MVT::f32, Expand);
70 // If we're enabling GP optimizations, use hardware square root
71 if (!TM.getSubtarget<PPCSubtarget>().hasFSQRT()) {
72 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
73 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
76 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
77 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
79 // PowerPC does not have BSWAP, CTPOP or CTTZ
80 setOperationAction(ISD::BSWAP, MVT::i32 , Expand);
81 setOperationAction(ISD::CTPOP, MVT::i32 , Expand);
82 setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
84 // PowerPC does not have ROTR
85 setOperationAction(ISD::ROTR, MVT::i32 , Expand);
87 // PowerPC does not have Select
88 setOperationAction(ISD::SELECT, MVT::i32, Expand);
89 setOperationAction(ISD::SELECT, MVT::f32, Expand);
90 setOperationAction(ISD::SELECT, MVT::f64, Expand);
92 // PowerPC wants to turn select_cc of FP into fsel when possible.
93 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
94 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
96 // PowerPC wants to optimize integer setcc a bit
97 setOperationAction(ISD::SETCC, MVT::i32, Custom);
99 // PowerPC does not have BRCOND which requires SetCC
100 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
102 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
103 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
105 // PowerPC does not have [U|S]INT_TO_FP
106 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
107 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
109 setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand);
110 setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand);
112 // PowerPC does not have truncstore for i1.
113 setOperationAction(ISD::TRUNCSTORE, MVT::i1, Promote);
115 // We cannot sextinreg(i1). Expand to shifts.
116 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
119 // Support label based line numbers.
120 setOperationAction(ISD::LOCATION, MVT::Other, Expand);
121 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
122 // FIXME - use subtarget debug flags
123 if (!TM.getSubtarget<PPCSubtarget>().isDarwin())
124 setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand);
126 // We want to legalize GlobalAddress and ConstantPool nodes into the
127 // appropriate instructions to materialize the address.
128 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
129 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
130 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
131 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
132 setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
133 setOperationAction(ISD::JumpTable, MVT::i64, Custom);
135 // RET must be custom lowered, to meet ABI requirements
136 setOperationAction(ISD::RET , MVT::Other, Custom);
138 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
139 setOperationAction(ISD::VASTART , MVT::Other, Custom);
141 // Use the default implementation.
142 setOperationAction(ISD::VAARG , MVT::Other, Expand);
143 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
144 setOperationAction(ISD::VAEND , MVT::Other, Expand);
145 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
146 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
147 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
149 // We want to custom lower some of our intrinsics.
150 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
152 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) {
153 // They also have instructions for converting between i64 and fp.
154 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
155 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
157 // FIXME: disable this lowered code. This generates 64-bit register values,
158 // and we don't model the fact that the top part is clobbered by calls. We
159 // need to flag these together so that the value isn't live across a call.
160 //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
162 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT
163 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote);
165 // PowerPC does not have FP_TO_UINT on 32-bit implementations.
166 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
169 if (TM.getSubtarget<PPCSubtarget>().use64BitRegs()) {
170 // 64 bit PowerPC implementations can support i64 types directly
171 addRegisterClass(MVT::i64, PPC::G8RCRegisterClass);
172 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
173 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
175 // 32 bit PowerPC wants to expand i64 shifts itself.
176 setOperationAction(ISD::SHL, MVT::i64, Custom);
177 setOperationAction(ISD::SRL, MVT::i64, Custom);
178 setOperationAction(ISD::SRA, MVT::i64, Custom);
181 if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) {
182 // First set operation action for all vector types to expand. Then we
183 // will selectively turn on ones that can be effectively codegen'd.
184 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
185 VT != (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
186 // add/sub are legal for all supported vector VT's.
187 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Legal);
188 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Legal);
190 // We promote all shuffles to v16i8.
191 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Promote);
192 AddPromotedToType (ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, MVT::v16i8);
194 // We promote all non-typed operations to v4i32.
195 setOperationAction(ISD::AND , (MVT::ValueType)VT, Promote);
196 AddPromotedToType (ISD::AND , (MVT::ValueType)VT, MVT::v4i32);
197 setOperationAction(ISD::OR , (MVT::ValueType)VT, Promote);
198 AddPromotedToType (ISD::OR , (MVT::ValueType)VT, MVT::v4i32);
199 setOperationAction(ISD::XOR , (MVT::ValueType)VT, Promote);
200 AddPromotedToType (ISD::XOR , (MVT::ValueType)VT, MVT::v4i32);
201 setOperationAction(ISD::LOAD , (MVT::ValueType)VT, Promote);
202 AddPromotedToType (ISD::LOAD , (MVT::ValueType)VT, MVT::v4i32);
203 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote);
204 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v4i32);
205 setOperationAction(ISD::STORE, (MVT::ValueType)VT, Promote);
206 AddPromotedToType (ISD::STORE, (MVT::ValueType)VT, MVT::v4i32);
208 // No other operations are legal.
209 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand);
210 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand);
211 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand);
212 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand);
213 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand);
214 setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand);
215 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
216 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
217 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Expand);
219 setOperationAction(ISD::SCALAR_TO_VECTOR, (MVT::ValueType)VT, Expand);
222 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
223 // with merges, splats, etc.
224 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
226 setOperationAction(ISD::AND , MVT::v4i32, Legal);
227 setOperationAction(ISD::OR , MVT::v4i32, Legal);
228 setOperationAction(ISD::XOR , MVT::v4i32, Legal);
229 setOperationAction(ISD::LOAD , MVT::v4i32, Legal);
230 setOperationAction(ISD::SELECT, MVT::v4i32, Expand);
231 setOperationAction(ISD::STORE , MVT::v4i32, Legal);
233 addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass);
234 addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass);
235 addRegisterClass(MVT::v8i16, PPC::VRRCRegisterClass);
236 addRegisterClass(MVT::v16i8, PPC::VRRCRegisterClass);
238 setOperationAction(ISD::MUL, MVT::v4f32, Legal);
239 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
240 setOperationAction(ISD::MUL, MVT::v8i16, Custom);
241 setOperationAction(ISD::MUL, MVT::v16i8, Custom);
243 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
244 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
246 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
247 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
248 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
249 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
252 setSetCCResultType(MVT::i32);
253 setSetCCResultContents(ZeroOrOneSetCCResult);
254 setStackPointerRegisterToSaveRestore(PPC::R1);
256 // We have target-specific dag combine patterns for the following nodes:
257 setTargetDAGCombine(ISD::SINT_TO_FP);
258 setTargetDAGCombine(ISD::STORE);
259 setTargetDAGCombine(ISD::BR_CC);
261 computeRegisterProperties();
264 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
267 case PPCISD::FSEL: return "PPCISD::FSEL";
268 case PPCISD::FCFID: return "PPCISD::FCFID";
269 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ";
270 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ";
271 case PPCISD::STFIWX: return "PPCISD::STFIWX";
272 case PPCISD::VMADDFP: return "PPCISD::VMADDFP";
273 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP";
274 case PPCISD::VPERM: return "PPCISD::VPERM";
275 case PPCISD::Hi: return "PPCISD::Hi";
276 case PPCISD::Lo: return "PPCISD::Lo";
277 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg";
278 case PPCISD::SRL: return "PPCISD::SRL";
279 case PPCISD::SRA: return "PPCISD::SRA";
280 case PPCISD::SHL: return "PPCISD::SHL";
281 case PPCISD::EXTSW_32: return "PPCISD::EXTSW_32";
282 case PPCISD::STD_32: return "PPCISD::STD_32";
283 case PPCISD::CALL: return "PPCISD::CALL";
284 case PPCISD::MTCTR: return "PPCISD::MTCTR";
285 case PPCISD::BCTRL: return "PPCISD::BCTRL";
286 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG";
287 case PPCISD::MFCR: return "PPCISD::MFCR";
288 case PPCISD::VCMP: return "PPCISD::VCMP";
289 case PPCISD::VCMPo: return "PPCISD::VCMPo";
290 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH";
294 //===----------------------------------------------------------------------===//
295 // Node matching predicates, for use by the tblgen matching code.
296 //===----------------------------------------------------------------------===//
298 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
299 static bool isFloatingPointZero(SDOperand Op) {
300 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
301 return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0);
302 else if (Op.getOpcode() == ISD::EXTLOAD || Op.getOpcode() == ISD::LOAD) {
303 // Maybe this has already been legalized into the constant pool?
304 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
305 if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->get()))
306 return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0);
311 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return
312 /// true if Op is undef or if it matches the specified value.
313 static bool isConstantOrUndef(SDOperand Op, unsigned Val) {
314 return Op.getOpcode() == ISD::UNDEF ||
315 cast<ConstantSDNode>(Op)->getValue() == Val;
318 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
319 /// VPKUHUM instruction.
320 bool PPC::isVPKUHUMShuffleMask(SDNode *N, bool isUnary) {
322 for (unsigned i = 0; i != 16; ++i)
323 if (!isConstantOrUndef(N->getOperand(i), i*2+1))
326 for (unsigned i = 0; i != 8; ++i)
327 if (!isConstantOrUndef(N->getOperand(i), i*2+1) ||
328 !isConstantOrUndef(N->getOperand(i+8), i*2+1))
334 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
335 /// VPKUWUM instruction.
336 bool PPC::isVPKUWUMShuffleMask(SDNode *N, bool isUnary) {
338 for (unsigned i = 0; i != 16; i += 2)
339 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) ||
340 !isConstantOrUndef(N->getOperand(i+1), i*2+3))
343 for (unsigned i = 0; i != 8; i += 2)
344 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) ||
345 !isConstantOrUndef(N->getOperand(i+1), i*2+3) ||
346 !isConstantOrUndef(N->getOperand(i+8), i*2+2) ||
347 !isConstantOrUndef(N->getOperand(i+9), i*2+3))
353 /// isVMerge - Common function, used to match vmrg* shuffles.
355 static bool isVMerge(SDNode *N, unsigned UnitSize,
356 unsigned LHSStart, unsigned RHSStart) {
357 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
358 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!");
359 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
360 "Unsupported merge size!");
362 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units
363 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit
364 if (!isConstantOrUndef(N->getOperand(i*UnitSize*2+j),
365 LHSStart+j+i*UnitSize) ||
366 !isConstantOrUndef(N->getOperand(i*UnitSize*2+UnitSize+j),
367 RHSStart+j+i*UnitSize))
373 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
374 /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
375 bool PPC::isVMRGLShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) {
377 return isVMerge(N, UnitSize, 8, 24);
378 return isVMerge(N, UnitSize, 8, 8);
381 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
382 /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
383 bool PPC::isVMRGHShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) {
385 return isVMerge(N, UnitSize, 0, 16);
386 return isVMerge(N, UnitSize, 0, 0);
390 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
391 /// amount, otherwise return -1.
392 int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) {
393 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
394 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!");
395 // Find the first non-undef value in the shuffle mask.
397 for (i = 0; i != 16 && N->getOperand(i).getOpcode() == ISD::UNDEF; ++i)
400 if (i == 16) return -1; // all undef.
402 // Otherwise, check to see if the rest of the elements are consequtively
403 // numbered from this value.
404 unsigned ShiftAmt = cast<ConstantSDNode>(N->getOperand(i))->getValue();
405 if (ShiftAmt < i) return -1;
409 // Check the rest of the elements to see if they are consequtive.
410 for (++i; i != 16; ++i)
411 if (!isConstantOrUndef(N->getOperand(i), ShiftAmt+i))
414 // Check the rest of the elements to see if they are consequtive.
415 for (++i; i != 16; ++i)
416 if (!isConstantOrUndef(N->getOperand(i), (ShiftAmt+i) & 15))
423 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
424 /// specifies a splat of a single element that is suitable for input to
425 /// VSPLTB/VSPLTH/VSPLTW.
426 bool PPC::isSplatShuffleMask(SDNode *N, unsigned EltSize) {
427 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
428 N->getNumOperands() == 16 &&
429 (EltSize == 1 || EltSize == 2 || EltSize == 4));
431 // This is a splat operation if each element of the permute is the same, and
432 // if the value doesn't reference the second vector.
433 unsigned ElementBase = 0;
434 SDOperand Elt = N->getOperand(0);
435 if (ConstantSDNode *EltV = dyn_cast<ConstantSDNode>(Elt))
436 ElementBase = EltV->getValue();
438 return false; // FIXME: Handle UNDEF elements too!
440 if (cast<ConstantSDNode>(Elt)->getValue() >= 16)
443 // Check that they are consequtive.
444 for (unsigned i = 1; i != EltSize; ++i) {
445 if (!isa<ConstantSDNode>(N->getOperand(i)) ||
446 cast<ConstantSDNode>(N->getOperand(i))->getValue() != i+ElementBase)
450 assert(isa<ConstantSDNode>(Elt) && "Invalid VECTOR_SHUFFLE mask!");
451 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
452 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
453 assert(isa<ConstantSDNode>(N->getOperand(i)) &&
454 "Invalid VECTOR_SHUFFLE mask!");
455 for (unsigned j = 0; j != EltSize; ++j)
456 if (N->getOperand(i+j) != N->getOperand(j))
463 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
464 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
465 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) {
466 assert(isSplatShuffleMask(N, EltSize));
467 return cast<ConstantSDNode>(N->getOperand(0))->getValue() / EltSize;
470 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
471 /// by using a vspltis[bhw] instruction of the specified element size, return
472 /// the constant being splatted. The ByteSize field indicates the number of
473 /// bytes of each element [124] -> [bhw].
474 SDOperand PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
475 SDOperand OpVal(0, 0);
477 // If ByteSize of the splat is bigger than the element size of the
478 // build_vector, then we have a case where we are checking for a splat where
479 // multiple elements of the buildvector are folded together into a single
480 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
481 unsigned EltSize = 16/N->getNumOperands();
482 if (EltSize < ByteSize) {
483 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval.
484 SDOperand UniquedVals[4];
485 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
487 // See if all of the elements in the buildvector agree across.
488 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
489 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
490 // If the element isn't a constant, bail fully out.
491 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDOperand();
494 if (UniquedVals[i&(Multiple-1)].Val == 0)
495 UniquedVals[i&(Multiple-1)] = N->getOperand(i);
496 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
497 return SDOperand(); // no match.
500 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
501 // either constant or undef values that are identical for each chunk. See
502 // if these chunks can form into a larger vspltis*.
504 // Check to see if all of the leading entries are either 0 or -1. If
505 // neither, then this won't fit into the immediate field.
506 bool LeadingZero = true;
507 bool LeadingOnes = true;
508 for (unsigned i = 0; i != Multiple-1; ++i) {
509 if (UniquedVals[i].Val == 0) continue; // Must have been undefs.
511 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue();
512 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue();
514 // Finally, check the least significant entry.
516 if (UniquedVals[Multiple-1].Val == 0)
517 return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef
518 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getValue();
520 return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4)
523 if (UniquedVals[Multiple-1].Val == 0)
524 return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef
525 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSignExtended();
526 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2)
527 return DAG.getTargetConstant(Val, MVT::i32);
533 // Check to see if this buildvec has a single non-undef value in its elements.
534 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
535 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
537 OpVal = N->getOperand(i);
538 else if (OpVal != N->getOperand(i))
542 if (OpVal.Val == 0) return SDOperand(); // All UNDEF: use implicit def.
544 unsigned ValSizeInBytes = 0;
546 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
547 Value = CN->getValue();
548 ValSizeInBytes = MVT::getSizeInBits(CN->getValueType(0))/8;
549 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
550 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
551 Value = FloatToBits(CN->getValue());
555 // If the splat value is larger than the element value, then we can never do
556 // this splat. The only case that we could fit the replicated bits into our
557 // immediate field for would be zero, and we prefer to use vxor for it.
558 if (ValSizeInBytes < ByteSize) return SDOperand();
560 // If the element value is larger than the splat value, cut it in half and
561 // check to see if the two halves are equal. Continue doing this until we
562 // get to ByteSize. This allows us to handle 0x01010101 as 0x01.
563 while (ValSizeInBytes > ByteSize) {
564 ValSizeInBytes >>= 1;
566 // If the top half equals the bottom half, we're still ok.
567 if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) !=
568 (Value & ((1 << (8*ValSizeInBytes))-1)))
572 // Properly sign extend the value.
573 int ShAmt = (4-ByteSize)*8;
574 int MaskVal = ((int)Value << ShAmt) >> ShAmt;
576 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
577 if (MaskVal == 0) return SDOperand();
579 // Finally, if this value fits in a 5 bit sext field, return it
580 if (((MaskVal << (32-5)) >> (32-5)) == MaskVal)
581 return DAG.getTargetConstant(MaskVal, MVT::i32);
585 //===----------------------------------------------------------------------===//
586 // LowerOperation implementation
587 //===----------------------------------------------------------------------===//
589 static SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG) {
590 MVT::ValueType PtrVT = Op.getValueType();
591 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
592 Constant *C = CP->get();
593 SDOperand CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment());
594 SDOperand Zero = DAG.getConstant(0, PtrVT);
596 const TargetMachine &TM = DAG.getTarget();
598 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, CPI, Zero);
599 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, CPI, Zero);
601 // If this is a non-darwin platform, we don't support non-static relo models
603 if (TM.getRelocationModel() == Reloc::Static ||
604 !TM.getSubtarget<PPCSubtarget>().isDarwin()) {
605 // Generate non-pic code that has direct accesses to the constant pool.
606 // The address of the global is just (hi(&g)+lo(&g)).
607 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
610 if (TM.getRelocationModel() == Reloc::PIC) {
611 // With PIC, the first instruction is actually "GR+hi(&G)".
612 Hi = DAG.getNode(ISD::ADD, PtrVT,
613 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi);
616 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
620 static SDOperand LowerJumpTable(SDOperand Op, SelectionDAG &DAG) {
621 MVT::ValueType PtrVT = Op.getValueType();
622 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
623 SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
624 SDOperand Zero = DAG.getConstant(0, PtrVT);
626 const TargetMachine &TM = DAG.getTarget();
628 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, JTI, Zero);
629 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, JTI, Zero);
631 // If this is a non-darwin platform, we don't support non-static relo models
633 if (TM.getRelocationModel() == Reloc::Static ||
634 !TM.getSubtarget<PPCSubtarget>().isDarwin()) {
635 // Generate non-pic code that has direct accesses to the constant pool.
636 // The address of the global is just (hi(&g)+lo(&g)).
637 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
640 if (TM.getRelocationModel() == Reloc::PIC) {
641 // With PIC, the first instruction is actually "GR+hi(&G)".
642 Hi = DAG.getNode(ISD::ADD, PtrVT,
643 DAG.getNode(PPCISD::GlobalBaseReg, MVT::i32), Hi);
646 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
650 static SDOperand LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) {
651 MVT::ValueType PtrVT = Op.getValueType();
652 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
653 GlobalValue *GV = GSDN->getGlobal();
654 SDOperand GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset());
655 SDOperand Zero = DAG.getConstant(0, PtrVT);
657 const TargetMachine &TM = DAG.getTarget();
659 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, GA, Zero);
660 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, GA, Zero);
662 // If this is a non-darwin platform, we don't support non-static relo models
664 if (TM.getRelocationModel() == Reloc::Static ||
665 !TM.getSubtarget<PPCSubtarget>().isDarwin()) {
666 // Generate non-pic code that has direct accesses to globals.
667 // The address of the global is just (hi(&g)+lo(&g)).
668 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
671 if (TM.getRelocationModel() == Reloc::PIC) {
672 // With PIC, the first instruction is actually "GR+hi(&G)".
673 Hi = DAG.getNode(ISD::ADD, PtrVT,
674 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi);
677 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
679 if (!GV->hasWeakLinkage() && !GV->hasLinkOnceLinkage() &&
680 (!GV->isExternal() || GV->hasNotBeenReadFromBytecode()))
683 // If the global is weak or external, we have to go through the lazy
685 return DAG.getLoad(PtrVT, DAG.getEntryNode(), Lo, DAG.getSrcValue(0));
688 static SDOperand LowerSETCC(SDOperand Op, SelectionDAG &DAG) {
689 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
691 // If we're comparing for equality to zero, expose the fact that this is
692 // implented as a ctlz/srl pair on ppc, so that the dag combiner can
693 // fold the new nodes.
694 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
695 if (C->isNullValue() && CC == ISD::SETEQ) {
696 MVT::ValueType VT = Op.getOperand(0).getValueType();
697 SDOperand Zext = Op.getOperand(0);
700 Zext = DAG.getNode(ISD::ZERO_EXTEND, VT, Op.getOperand(0));
702 unsigned Log2b = Log2_32(MVT::getSizeInBits(VT));
703 SDOperand Clz = DAG.getNode(ISD::CTLZ, VT, Zext);
704 SDOperand Scc = DAG.getNode(ISD::SRL, VT, Clz,
705 DAG.getConstant(Log2b, MVT::i32));
706 return DAG.getNode(ISD::TRUNCATE, MVT::i32, Scc);
708 // Leave comparisons against 0 and -1 alone for now, since they're usually
709 // optimized. FIXME: revisit this when we can custom lower all setcc
711 if (C->isAllOnesValue() || C->isNullValue())
715 // If we have an integer seteq/setne, turn it into a compare against zero
716 // by subtracting the rhs from the lhs, which is faster than setting a
717 // condition register, reading it back out, and masking the correct bit.
718 MVT::ValueType LHSVT = Op.getOperand(0).getValueType();
719 if (MVT::isInteger(LHSVT) && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
720 MVT::ValueType VT = Op.getValueType();
721 SDOperand Sub = DAG.getNode(ISD::SUB, LHSVT, Op.getOperand(0),
723 return DAG.getSetCC(VT, Sub, DAG.getConstant(0, LHSVT), CC);
728 static SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG,
729 unsigned VarArgsFrameIndex) {
730 // vastart just stores the address of the VarArgsFrameIndex slot into the
731 // memory location argument.
732 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32);
733 return DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0), FR,
734 Op.getOperand(1), Op.getOperand(2));
737 static SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG,
738 int &VarArgsFrameIndex) {
739 // TODO: add description of PPC stack frame format, or at least some docs.
741 MachineFunction &MF = DAG.getMachineFunction();
742 MachineFrameInfo *MFI = MF.getFrameInfo();
743 SSARegMap *RegMap = MF.getSSARegMap();
744 std::vector<SDOperand> ArgValues;
745 SDOperand Root = Op.getOperand(0);
747 unsigned ArgOffset = 24;
748 const unsigned Num_GPR_Regs = 8;
749 const unsigned Num_FPR_Regs = 13;
750 const unsigned Num_VR_Regs = 12;
751 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
753 static const unsigned GPR_32[] = { // 32-bit registers.
754 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
755 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
757 static const unsigned GPR_64[] = { // 64-bit registers.
758 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
759 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
761 static const unsigned FPR[] = {
762 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
763 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13
765 static const unsigned VR[] = {
766 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
767 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
770 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
771 bool isPPC64 = PtrVT == MVT::i64;
772 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32;
774 // Add DAG nodes to load the arguments or copy them out of registers. On
775 // entry to a function on PPC, the arguments start at offset 24, although the
776 // first ones are often in registers.
777 for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e; ++ArgNo) {
779 bool needsLoad = false;
780 MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType();
781 unsigned ObjSize = MVT::getSizeInBits(ObjectVT)/8;
783 unsigned CurArgOffset = ArgOffset;
785 default: assert(0 && "Unhandled argument type!");
787 // All int arguments reserve stack space.
788 ArgOffset += isPPC64 ? 8 : 4;
790 if (GPR_idx != Num_GPR_Regs) {
791 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
792 MF.addLiveIn(GPR[GPR_idx], VReg);
793 ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i32);
799 case MVT::i64: // PPC64
800 // All int arguments reserve stack space.
803 if (GPR_idx != Num_GPR_Regs) {
804 unsigned VReg = RegMap->createVirtualRegister(&PPC::G8RCRegClass);
805 MF.addLiveIn(GPR[GPR_idx], VReg);
806 ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i64);
814 // All FP arguments reserve stack space.
815 ArgOffset += ObjSize;
817 // Every 4 bytes of argument space consumes one of the GPRs available for
819 if (GPR_idx != Num_GPR_Regs) {
821 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs)
824 if (FPR_idx != Num_FPR_Regs) {
826 if (ObjectVT == MVT::f32)
827 VReg = RegMap->createVirtualRegister(&PPC::F4RCRegClass);
829 VReg = RegMap->createVirtualRegister(&PPC::F8RCRegClass);
830 MF.addLiveIn(FPR[FPR_idx], VReg);
831 ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT);
841 // Note that vector arguments in registers don't reserve stack space.
842 if (VR_idx != Num_VR_Regs) {
843 unsigned VReg = RegMap->createVirtualRegister(&PPC::VRRCRegClass);
844 MF.addLiveIn(VR[VR_idx], VReg);
845 ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT);
848 // This should be simple, but requires getting 16-byte aligned stack
850 assert(0 && "Loading VR argument not implemented yet!");
856 // We need to load the argument to a virtual register if we determined above
857 // that we ran out of physical registers of the appropriate type
859 // If the argument is actually used, emit a load from the right stack
861 if (!Op.Val->hasNUsesOfValue(0, ArgNo)) {
862 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset);
863 SDOperand FIN = DAG.getFrameIndex(FI, PtrVT);
864 ArgVal = DAG.getLoad(ObjectVT, Root, FIN,
865 DAG.getSrcValue(NULL));
867 // Don't emit a dead load.
868 ArgVal = DAG.getNode(ISD::UNDEF, ObjectVT);
872 ArgValues.push_back(ArgVal);
875 // If the function takes variable number of arguments, make a frame index for
876 // the start of the first vararg value... for expansion of llvm.va_start.
877 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
879 VarArgsFrameIndex = MFI->CreateFixedObject(MVT::getSizeInBits(PtrVT)/8,
881 SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
882 // If this function is vararg, store any remaining integer argument regs
883 // to their spots on the stack so that they may be loaded by deferencing the
884 // result of va_next.
885 std::vector<SDOperand> MemOps;
886 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
887 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
888 MF.addLiveIn(GPR[GPR_idx], VReg);
889 SDOperand Val = DAG.getCopyFromReg(Root, VReg, PtrVT);
890 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, Val.getValue(1),
891 Val, FIN, DAG.getSrcValue(NULL));
892 MemOps.push_back(Store);
893 // Increment the address by four for the next argument to store
894 SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(PtrVT)/8, PtrVT);
895 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff);
898 Root = DAG.getNode(ISD::TokenFactor, MVT::Other, MemOps);
901 ArgValues.push_back(Root);
903 // Return the new list of results.
904 std::vector<MVT::ValueType> RetVT(Op.Val->value_begin(),
905 Op.Val->value_end());
906 return DAG.getNode(ISD::MERGE_VALUES, RetVT, ArgValues);
909 /// isCallCompatibleAddress - Return the immediate to use if the specified
910 /// 32-bit value is representable in the immediate field of a BxA instruction.
911 static SDNode *isBLACompatibleAddress(SDOperand Op, SelectionDAG &DAG) {
912 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
915 int Addr = C->getValue();
916 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero.
917 (Addr << 6 >> 6) != Addr)
918 return 0; // Top 6 bits have to be sext of immediate.
920 return DAG.getConstant((int)C->getValue() >> 2, MVT::i32).Val;
924 static SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG) {
925 SDOperand Chain = Op.getOperand(0);
926 unsigned CallingConv= cast<ConstantSDNode>(Op.getOperand(1))->getValue();
927 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
928 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0;
929 SDOperand Callee = Op.getOperand(4);
930 unsigned NumOps = (Op.getNumOperands() - 5) / 2;
932 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
933 bool isPPC64 = PtrVT == MVT::i64;
934 unsigned PtrByteSize = isPPC64 ? 8 : 4;
937 // args_to_use will accumulate outgoing args for the PPCISD::CALL case in
938 // SelectExpr to use to put the arguments in the appropriate registers.
939 std::vector<SDOperand> args_to_use;
941 // Count how many bytes are to be pushed on the stack, including the linkage
942 // area, and parameter passing area. We start with 24/48 bytes, which is
943 // prereserved space for [SP][CR][LR][3 x unused].
944 unsigned NumBytes = 6*PtrByteSize;
946 // Add up all the space actually used.
947 for (unsigned i = 0; i != NumOps; ++i)
948 NumBytes += MVT::getSizeInBits(Op.getOperand(5+2*i).getValueType())/8;
950 // The prolog code of the callee may store up to 8 GPR argument registers to
951 // the stack, allowing va_start to index over them in memory if its varargs.
952 // Because we cannot tell if this is needed on the caller side, we have to
953 // conservatively assume that it is needed. As such, make sure we have at
954 // least enough stack space for the caller to store the 8 GPRs.
955 if (NumBytes < 6*PtrByteSize+8*PtrByteSize)
956 NumBytes = 6*PtrByteSize+8*PtrByteSize;
958 // Adjust the stack pointer for the new arguments...
959 // These operations are automatically eliminated by the prolog/epilog pass
960 Chain = DAG.getCALLSEQ_START(Chain,
961 DAG.getConstant(NumBytes, PtrVT));
963 // Set up a copy of the stack pointer for use loading and storing any
964 // arguments that may not fit in the registers available for argument
968 StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
970 StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
972 // Figure out which arguments are going to go in registers, and which in
973 // memory. Also, if this is a vararg function, floating point operations
974 // must be stored to our stack, and loaded into integer regs as well, if
975 // any integer regs are available for argument passing.
976 unsigned ArgOffset = 6*PtrByteSize;
977 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
978 static const unsigned GPR_32[] = { // 32-bit registers.
979 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
980 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
982 static const unsigned GPR_64[] = { // 64-bit registers.
983 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
984 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
986 static const unsigned FPR[] = {
987 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
988 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13
990 static const unsigned VR[] = {
991 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
992 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
994 const unsigned NumGPRs = sizeof(GPR_32)/sizeof(GPR_32[0]);
995 const unsigned NumFPRs = sizeof(FPR)/sizeof(FPR[0]);
996 const unsigned NumVRs = sizeof( VR)/sizeof( VR[0]);
998 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32;
1000 std::vector<std::pair<unsigned, SDOperand> > RegsToPass;
1001 std::vector<SDOperand> MemOpChains;
1002 for (unsigned i = 0; i != NumOps; ++i) {
1003 SDOperand Arg = Op.getOperand(5+2*i);
1005 // PtrOff will be used to store the current argument to the stack if a
1006 // register cannot be found for it.
1007 SDOperand PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
1008 PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr, PtrOff);
1010 // On PPC64, promote integers to 64-bit values.
1011 if (isPPC64 && Arg.getValueType() == MVT::i32) {
1012 unsigned ExtOp = ISD::ZERO_EXTEND;
1013 if (cast<ConstantSDNode>(Op.getOperand(5+2*i+1))->getValue())
1014 ExtOp = ISD::SIGN_EXTEND;
1015 Arg = DAG.getNode(ExtOp, MVT::i64, Arg);
1018 switch (Arg.getValueType()) {
1019 default: assert(0 && "Unexpected ValueType for argument!");
1022 if (GPR_idx != NumGPRs) {
1023 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
1025 MemOpChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
1026 Arg, PtrOff, DAG.getSrcValue(NULL)));
1028 ArgOffset += PtrByteSize;
1032 if (FPR_idx != NumFPRs) {
1033 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
1036 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, Chain,
1038 DAG.getSrcValue(NULL));
1039 MemOpChains.push_back(Store);
1041 // Float varargs are always shadowed in available integer registers
1042 if (GPR_idx != NumGPRs) {
1043 SDOperand Load = DAG.getLoad(PtrVT, Store, PtrOff,
1044 DAG.getSrcValue(NULL));
1045 MemOpChains.push_back(Load.getValue(1));
1046 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
1048 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64) {
1049 SDOperand ConstFour = DAG.getConstant(4, PtrOff.getValueType());
1050 PtrOff = DAG.getNode(ISD::ADD, PtrVT, PtrOff, ConstFour);
1051 SDOperand Load = DAG.getLoad(PtrVT, Store, PtrOff,
1052 DAG.getSrcValue(NULL));
1053 MemOpChains.push_back(Load.getValue(1));
1054 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
1057 // If we have any FPRs remaining, we may also have GPRs remaining.
1058 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
1060 if (GPR_idx != NumGPRs)
1062 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64)
1066 MemOpChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
1067 Arg, PtrOff, DAG.getSrcValue(NULL)));
1072 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
1078 assert(!isVarArg && "Don't support passing vectors to varargs yet!");
1079 assert(VR_idx != NumVRs &&
1080 "Don't support passing more than 12 vector args yet!");
1081 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
1085 if (!MemOpChains.empty())
1086 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, MemOpChains);
1088 // Build a sequence of copy-to-reg nodes chained together with token chain
1089 // and flag operands which copy the outgoing args into the appropriate regs.
1091 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1092 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
1094 InFlag = Chain.getValue(1);
1097 std::vector<MVT::ValueType> NodeTys;
1098 NodeTys.push_back(MVT::Other); // Returns a chain
1099 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
1101 std::vector<SDOperand> Ops;
1102 unsigned CallOpc = PPCISD::CALL;
1104 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
1105 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
1106 // node so that legalize doesn't hack it.
1107 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1108 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), Callee.getValueType());
1109 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
1110 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType());
1111 else if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG))
1112 // If this is an absolute destination address, use the munged value.
1113 Callee = SDOperand(Dest, 0);
1115 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair
1116 // to do the call, we can't use PPCISD::CALL.
1117 Ops.push_back(Chain);
1118 Ops.push_back(Callee);
1121 Ops.push_back(InFlag);
1122 Chain = DAG.getNode(PPCISD::MTCTR, NodeTys, Ops);
1123 InFlag = Chain.getValue(1);
1125 // Copy the callee address into R12 on darwin.
1126 Chain = DAG.getCopyToReg(Chain, PPC::R12, Callee, InFlag);
1127 InFlag = Chain.getValue(1);
1130 NodeTys.push_back(MVT::Other);
1131 NodeTys.push_back(MVT::Flag);
1133 Ops.push_back(Chain);
1134 CallOpc = PPCISD::BCTRL;
1138 // If this is a direct call, pass the chain and the callee.
1140 Ops.push_back(Chain);
1141 Ops.push_back(Callee);
1144 // Add argument registers to the end of the list so that they are known live
1146 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1147 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1148 RegsToPass[i].second.getValueType()));
1151 Ops.push_back(InFlag);
1152 Chain = DAG.getNode(CallOpc, NodeTys, Ops);
1153 InFlag = Chain.getValue(1);
1155 std::vector<SDOperand> ResultVals;
1158 // If the call has results, copy the values out of the ret val registers.
1159 switch (Op.Val->getValueType(0)) {
1160 default: assert(0 && "Unexpected ret value!");
1161 case MVT::Other: break;
1163 if (Op.Val->getValueType(1) == MVT::i32) {
1164 Chain = DAG.getCopyFromReg(Chain, PPC::R4, MVT::i32, InFlag).getValue(1);
1165 ResultVals.push_back(Chain.getValue(0));
1166 Chain = DAG.getCopyFromReg(Chain, PPC::R3, MVT::i32,
1167 Chain.getValue(2)).getValue(1);
1168 ResultVals.push_back(Chain.getValue(0));
1169 NodeTys.push_back(MVT::i32);
1171 Chain = DAG.getCopyFromReg(Chain, PPC::R3, MVT::i32, InFlag).getValue(1);
1172 ResultVals.push_back(Chain.getValue(0));
1174 NodeTys.push_back(MVT::i32);
1177 Chain = DAG.getCopyFromReg(Chain, PPC::X3, MVT::i64, InFlag).getValue(1);
1178 ResultVals.push_back(Chain.getValue(0));
1179 NodeTys.push_back(MVT::i64);
1183 Chain = DAG.getCopyFromReg(Chain, PPC::F1, Op.Val->getValueType(0),
1184 InFlag).getValue(1);
1185 ResultVals.push_back(Chain.getValue(0));
1186 NodeTys.push_back(Op.Val->getValueType(0));
1192 Chain = DAG.getCopyFromReg(Chain, PPC::V2, Op.Val->getValueType(0),
1193 InFlag).getValue(1);
1194 ResultVals.push_back(Chain.getValue(0));
1195 NodeTys.push_back(Op.Val->getValueType(0));
1199 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, Chain,
1200 DAG.getConstant(NumBytes, PtrVT));
1201 NodeTys.push_back(MVT::Other);
1203 // If the function returns void, just return the chain.
1204 if (ResultVals.empty())
1207 // Otherwise, merge everything together with a MERGE_VALUES node.
1208 ResultVals.push_back(Chain);
1209 SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys, ResultVals);
1210 return Res.getValue(Op.ResNo);
1213 static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) {
1215 switch(Op.getNumOperands()) {
1217 assert(0 && "Do not know how to return this many arguments!");
1220 return SDOperand(); // ret void is legal
1222 MVT::ValueType ArgVT = Op.getOperand(1).getValueType();
1224 if (ArgVT == MVT::i32) {
1226 } else if (ArgVT == MVT::i64) {
1228 } else if (MVT::isFloatingPoint(ArgVT)) {
1231 assert(MVT::isVector(ArgVT));
1235 Copy = DAG.getCopyToReg(Op.getOperand(0), ArgReg, Op.getOperand(1),
1238 // If we haven't noted the R3/F1 are live out, do so now.
1239 if (DAG.getMachineFunction().liveout_empty())
1240 DAG.getMachineFunction().addLiveOut(ArgReg);
1244 Copy = DAG.getCopyToReg(Op.getOperand(0), PPC::R3, Op.getOperand(3),
1246 Copy = DAG.getCopyToReg(Copy, PPC::R4, Op.getOperand(1),Copy.getValue(1));
1247 // If we haven't noted the R3+R4 are live out, do so now.
1248 if (DAG.getMachineFunction().liveout_empty()) {
1249 DAG.getMachineFunction().addLiveOut(PPC::R3);
1250 DAG.getMachineFunction().addLiveOut(PPC::R4);
1254 return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Copy, Copy.getValue(1));
1257 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
1259 static SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) {
1260 // Not FP? Not a fsel.
1261 if (!MVT::isFloatingPoint(Op.getOperand(0).getValueType()) ||
1262 !MVT::isFloatingPoint(Op.getOperand(2).getValueType()))
1265 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
1267 // Cannot handle SETEQ/SETNE.
1268 if (CC == ISD::SETEQ || CC == ISD::SETNE) return SDOperand();
1270 MVT::ValueType ResVT = Op.getValueType();
1271 MVT::ValueType CmpVT = Op.getOperand(0).getValueType();
1272 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
1273 SDOperand TV = Op.getOperand(2), FV = Op.getOperand(3);
1275 // If the RHS of the comparison is a 0.0, we don't need to do the
1276 // subtraction at all.
1277 if (isFloatingPointZero(RHS))
1279 default: break; // SETUO etc aren't handled by fsel.
1283 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
1287 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
1288 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS);
1289 return DAG.getNode(PPCISD::FSEL, ResVT, LHS, TV, FV);
1293 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
1297 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
1298 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS);
1299 return DAG.getNode(PPCISD::FSEL, ResVT,
1300 DAG.getNode(ISD::FNEG, MVT::f64, LHS), TV, FV);
1305 default: break; // SETUO etc aren't handled by fsel.
1309 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS);
1310 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
1311 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
1312 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV);
1316 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS);
1317 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
1318 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
1319 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV);
1323 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS);
1324 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
1325 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
1326 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV);
1330 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS);
1331 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
1332 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
1333 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV);
1338 static SDOperand LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) {
1339 assert(MVT::isFloatingPoint(Op.getOperand(0).getValueType()));
1340 SDOperand Src = Op.getOperand(0);
1341 if (Src.getValueType() == MVT::f32)
1342 Src = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Src);
1345 switch (Op.getValueType()) {
1346 default: assert(0 && "Unhandled FP_TO_SINT type in custom expander!");
1348 Tmp = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Src);
1351 Tmp = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Src);
1355 // Convert the FP value to an int value through memory.
1356 SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Tmp);
1357 if (Op.getValueType() == MVT::i32)
1358 Bits = DAG.getNode(ISD::TRUNCATE, MVT::i32, Bits);
1362 static SDOperand LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) {
1363 if (Op.getOperand(0).getValueType() == MVT::i64) {
1364 SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::f64, Op.getOperand(0));
1365 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Bits);
1366 if (Op.getValueType() == MVT::f32)
1367 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP);
1371 assert(Op.getOperand(0).getValueType() == MVT::i32 &&
1372 "Unhandled SINT_TO_FP type in custom expander!");
1373 // Since we only generate this in 64-bit mode, we can take advantage of
1374 // 64-bit registers. In particular, sign extend the input value into the
1375 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
1376 // then lfd it and fcfid it.
1377 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
1378 int FrameIdx = FrameInfo->CreateStackObject(8, 8);
1379 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, MVT::i32);
1381 SDOperand Ext64 = DAG.getNode(PPCISD::EXTSW_32, MVT::i32,
1384 // STD the extended value into the stack slot.
1385 SDOperand Store = DAG.getNode(PPCISD::STD_32, MVT::Other,
1386 DAG.getEntryNode(), Ext64, FIdx,
1387 DAG.getSrcValue(NULL));
1388 // Load the value as a double.
1389 SDOperand Ld = DAG.getLoad(MVT::f64, Store, FIdx, DAG.getSrcValue(NULL));
1391 // FCFID it and return it.
1392 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Ld);
1393 if (Op.getValueType() == MVT::f32)
1394 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP);
1398 static SDOperand LowerSHL(SDOperand Op, SelectionDAG &DAG,
1399 MVT::ValueType PtrVT) {
1400 assert(Op.getValueType() == MVT::i64 &&
1401 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SHL!");
1402 // The generic code does a fine job expanding shift by a constant.
1403 if (isa<ConstantSDNode>(Op.getOperand(1))) return SDOperand();
1405 // Otherwise, expand into a bunch of logical ops. Note that these ops
1406 // depend on the PPC behavior for oversized shift amounts.
1407 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
1408 DAG.getConstant(0, PtrVT));
1409 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
1410 DAG.getConstant(1, PtrVT));
1411 SDOperand Amt = Op.getOperand(1);
1413 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32,
1414 DAG.getConstant(32, MVT::i32), Amt);
1415 SDOperand Tmp2 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Amt);
1416 SDOperand Tmp3 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Tmp1);
1417 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3);
1418 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt,
1419 DAG.getConstant(-32U, MVT::i32));
1420 SDOperand Tmp6 = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Tmp5);
1421 SDOperand OutHi = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6);
1422 SDOperand OutLo = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Amt);
1423 return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi);
1426 static SDOperand LowerSRL(SDOperand Op, SelectionDAG &DAG,
1427 MVT::ValueType PtrVT) {
1428 assert(Op.getValueType() == MVT::i64 &&
1429 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SHL!");
1430 // The generic code does a fine job expanding shift by a constant.
1431 if (isa<ConstantSDNode>(Op.getOperand(1))) return SDOperand();
1433 // Otherwise, expand into a bunch of logical ops. Note that these ops
1434 // depend on the PPC behavior for oversized shift amounts.
1435 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
1436 DAG.getConstant(0, PtrVT));
1437 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
1438 DAG.getConstant(1, PtrVT));
1439 SDOperand Amt = Op.getOperand(1);
1441 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32,
1442 DAG.getConstant(32, MVT::i32), Amt);
1443 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Amt);
1444 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Tmp1);
1445 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3);
1446 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt,
1447 DAG.getConstant(-32U, MVT::i32));
1448 SDOperand Tmp6 = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Tmp5);
1449 SDOperand OutLo = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6);
1450 SDOperand OutHi = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Amt);
1451 return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi);
1454 static SDOperand LowerSRA(SDOperand Op, SelectionDAG &DAG,
1455 MVT::ValueType PtrVT) {
1456 assert(Op.getValueType() == MVT::i64 &&
1457 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SRA!");
1458 // The generic code does a fine job expanding shift by a constant.
1459 if (isa<ConstantSDNode>(Op.getOperand(1))) return SDOperand();
1461 // Otherwise, expand into a bunch of logical ops, followed by a select_cc.
1462 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
1463 DAG.getConstant(0, PtrVT));
1464 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
1465 DAG.getConstant(1, PtrVT));
1466 SDOperand Amt = Op.getOperand(1);
1468 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32,
1469 DAG.getConstant(32, MVT::i32), Amt);
1470 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Amt);
1471 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Tmp1);
1472 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3);
1473 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt,
1474 DAG.getConstant(-32U, MVT::i32));
1475 SDOperand Tmp6 = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Tmp5);
1476 SDOperand OutHi = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Amt);
1477 SDOperand OutLo = DAG.getSelectCC(Tmp5, DAG.getConstant(0, MVT::i32),
1478 Tmp4, Tmp6, ISD::SETLE);
1479 return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi);
1482 //===----------------------------------------------------------------------===//
1483 // Vector related lowering.
1486 // If this is a vector of constants or undefs, get the bits. A bit in
1487 // UndefBits is set if the corresponding element of the vector is an
1488 // ISD::UNDEF value. For undefs, the corresponding VectorBits values are
1489 // zero. Return true if this is not an array of constants, false if it is.
1491 static bool GetConstantBuildVectorBits(SDNode *BV, uint64_t VectorBits[2],
1492 uint64_t UndefBits[2]) {
1493 // Start with zero'd results.
1494 VectorBits[0] = VectorBits[1] = UndefBits[0] = UndefBits[1] = 0;
1496 unsigned EltBitSize = MVT::getSizeInBits(BV->getOperand(0).getValueType());
1497 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
1498 SDOperand OpVal = BV->getOperand(i);
1500 unsigned PartNo = i >= e/2; // In the upper 128 bits?
1501 unsigned SlotNo = e/2 - (i & (e/2-1))-1; // Which subpiece of the uint64_t.
1503 uint64_t EltBits = 0;
1504 if (OpVal.getOpcode() == ISD::UNDEF) {
1505 uint64_t EltUndefBits = ~0U >> (32-EltBitSize);
1506 UndefBits[PartNo] |= EltUndefBits << (SlotNo*EltBitSize);
1508 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
1509 EltBits = CN->getValue() & (~0U >> (32-EltBitSize));
1510 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
1511 assert(CN->getValueType(0) == MVT::f32 &&
1512 "Only one legal FP vector type!");
1513 EltBits = FloatToBits(CN->getValue());
1515 // Nonconstant element.
1519 VectorBits[PartNo] |= EltBits << (SlotNo*EltBitSize);
1522 //printf("%llx %llx %llx %llx\n",
1523 // VectorBits[0], VectorBits[1], UndefBits[0], UndefBits[1]);
1527 // If this is a splat (repetition) of a value across the whole vector, return
1528 // the smallest size that splats it. For example, "0x01010101010101..." is a
1529 // splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and
1530 // SplatSize = 1 byte.
1531 static bool isConstantSplat(const uint64_t Bits128[2],
1532 const uint64_t Undef128[2],
1533 unsigned &SplatBits, unsigned &SplatUndef,
1534 unsigned &SplatSize) {
1536 // Don't let undefs prevent splats from matching. See if the top 64-bits are
1537 // the same as the lower 64-bits, ignoring undefs.
1538 if ((Bits128[0] & ~Undef128[1]) != (Bits128[1] & ~Undef128[0]))
1539 return false; // Can't be a splat if two pieces don't match.
1541 uint64_t Bits64 = Bits128[0] | Bits128[1];
1542 uint64_t Undef64 = Undef128[0] & Undef128[1];
1544 // Check that the top 32-bits are the same as the lower 32-bits, ignoring
1546 if ((Bits64 & (~Undef64 >> 32)) != ((Bits64 >> 32) & ~Undef64))
1547 return false; // Can't be a splat if two pieces don't match.
1549 uint32_t Bits32 = uint32_t(Bits64) | uint32_t(Bits64 >> 32);
1550 uint32_t Undef32 = uint32_t(Undef64) & uint32_t(Undef64 >> 32);
1552 // If the top 16-bits are different than the lower 16-bits, ignoring
1553 // undefs, we have an i32 splat.
1554 if ((Bits32 & (~Undef32 >> 16)) != ((Bits32 >> 16) & ~Undef32)) {
1556 SplatUndef = Undef32;
1561 uint16_t Bits16 = uint16_t(Bits32) | uint16_t(Bits32 >> 16);
1562 uint16_t Undef16 = uint16_t(Undef32) & uint16_t(Undef32 >> 16);
1564 // If the top 8-bits are different than the lower 8-bits, ignoring
1565 // undefs, we have an i16 splat.
1566 if ((Bits16 & (uint16_t(~Undef16) >> 8)) != ((Bits16 >> 8) & ~Undef16)) {
1568 SplatUndef = Undef16;
1573 // Otherwise, we have an 8-bit splat.
1574 SplatBits = uint8_t(Bits16) | uint8_t(Bits16 >> 8);
1575 SplatUndef = uint8_t(Undef16) & uint8_t(Undef16 >> 8);
1580 /// BuildSplatI - Build a canonical splati of Val with an element size of
1581 /// SplatSize. Cast the result to VT.
1582 static SDOperand BuildSplatI(int Val, unsigned SplatSize, MVT::ValueType VT,
1583 SelectionDAG &DAG) {
1584 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!");
1586 // Force vspltis[hw] -1 to vspltisb -1.
1587 if (Val == -1) SplatSize = 1;
1589 static const MVT::ValueType VTys[] = { // canonical VT to use for each size.
1590 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
1592 MVT::ValueType CanonicalVT = VTys[SplatSize-1];
1594 // Build a canonical splat for this value.
1595 SDOperand Elt = DAG.getConstant(Val, MVT::getVectorBaseType(CanonicalVT));
1596 std::vector<SDOperand> Ops(MVT::getVectorNumElements(CanonicalVT), Elt);
1597 SDOperand Res = DAG.getNode(ISD::BUILD_VECTOR, CanonicalVT, Ops);
1598 return DAG.getNode(ISD::BIT_CONVERT, VT, Res);
1601 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
1602 /// specified intrinsic ID.
1603 static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand LHS, SDOperand RHS,
1605 MVT::ValueType DestVT = MVT::Other) {
1606 if (DestVT == MVT::Other) DestVT = LHS.getValueType();
1607 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT,
1608 DAG.getConstant(IID, MVT::i32), LHS, RHS);
1611 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
1612 /// specified intrinsic ID.
1613 static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand Op0, SDOperand Op1,
1614 SDOperand Op2, SelectionDAG &DAG,
1615 MVT::ValueType DestVT = MVT::Other) {
1616 if (DestVT == MVT::Other) DestVT = Op0.getValueType();
1617 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT,
1618 DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2);
1622 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
1623 /// amount. The result has the specified value type.
1624 static SDOperand BuildVSLDOI(SDOperand LHS, SDOperand RHS, unsigned Amt,
1625 MVT::ValueType VT, SelectionDAG &DAG) {
1626 // Force LHS/RHS to be the right type.
1627 LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, LHS);
1628 RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, RHS);
1630 std::vector<SDOperand> Ops;
1631 for (unsigned i = 0; i != 16; ++i)
1632 Ops.push_back(DAG.getConstant(i+Amt, MVT::i32));
1633 SDOperand T = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, LHS, RHS,
1634 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops));
1635 return DAG.getNode(ISD::BIT_CONVERT, VT, T);
1638 // If this is a case we can't handle, return null and let the default
1639 // expansion code take care of it. If we CAN select this case, and if it
1640 // selects to a single instruction, return Op. Otherwise, if we can codegen
1641 // this case more efficiently than a constant pool load, lower it to the
1642 // sequence of ops that should be used.
1643 static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) {
1644 // If this is a vector of constants or undefs, get the bits. A bit in
1645 // UndefBits is set if the corresponding element of the vector is an
1646 // ISD::UNDEF value. For undefs, the corresponding VectorBits values are
1648 uint64_t VectorBits[2];
1649 uint64_t UndefBits[2];
1650 if (GetConstantBuildVectorBits(Op.Val, VectorBits, UndefBits))
1651 return SDOperand(); // Not a constant vector.
1653 // If this is a splat (repetition) of a value across the whole vector, return
1654 // the smallest size that splats it. For example, "0x01010101010101..." is a
1655 // splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and
1656 // SplatSize = 1 byte.
1657 unsigned SplatBits, SplatUndef, SplatSize;
1658 if (isConstantSplat(VectorBits, UndefBits, SplatBits, SplatUndef, SplatSize)){
1659 bool HasAnyUndefs = (UndefBits[0] | UndefBits[1]) != 0;
1661 // First, handle single instruction cases.
1664 if (SplatBits == 0) {
1665 // Canonicalize all zero vectors to be v4i32.
1666 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
1667 SDOperand Z = DAG.getConstant(0, MVT::i32);
1668 Z = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Z, Z, Z, Z);
1669 Op = DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Z);
1674 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
1675 int32_t SextVal= int32_t(SplatBits << (32-8*SplatSize)) >> (32-8*SplatSize);
1676 if (SextVal >= -16 && SextVal <= 15)
1677 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG);
1680 // Two instruction sequences.
1682 // If this value is in the range [-32,30] and is even, use:
1683 // tmp = VSPLTI[bhw], result = add tmp, tmp
1684 if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) {
1685 Op = BuildSplatI(SextVal >> 1, SplatSize, Op.getValueType(), DAG);
1686 return DAG.getNode(ISD::ADD, Op.getValueType(), Op, Op);
1689 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is
1690 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important
1692 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
1693 // Make -1 and vspltisw -1:
1694 SDOperand OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG);
1696 // Make the VSLW intrinsic, computing 0x8000_0000.
1697 SDOperand Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
1700 // xor by OnesV to invert it.
1701 Res = DAG.getNode(ISD::XOR, MVT::v4i32, Res, OnesV);
1702 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
1705 // Check to see if this is a wide variety of vsplti*, binop self cases.
1706 unsigned SplatBitSize = SplatSize*8;
1707 static const char SplatCsts[] = {
1708 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
1709 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
1711 for (unsigned idx = 0; idx < sizeof(SplatCsts)/sizeof(SplatCsts[0]); ++idx){
1712 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
1713 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1'
1714 int i = SplatCsts[idx];
1716 // Figure out what shift amount will be used by altivec if shifted by i in
1718 unsigned TypeShiftAmt = i & (SplatBitSize-1);
1720 // vsplti + shl self.
1721 if (SextVal == (i << (int)TypeShiftAmt)) {
1722 Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG);
1723 static const unsigned IIDs[] = { // Intrinsic to use for each size.
1724 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
1725 Intrinsic::ppc_altivec_vslw
1727 return BuildIntrinsicOp(IIDs[SplatSize-1], Op, Op, DAG);
1730 // vsplti + srl self.
1731 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
1732 Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG);
1733 static const unsigned IIDs[] = { // Intrinsic to use for each size.
1734 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
1735 Intrinsic::ppc_altivec_vsrw
1737 return BuildIntrinsicOp(IIDs[SplatSize-1], Op, Op, DAG);
1740 // vsplti + sra self.
1741 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
1742 Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG);
1743 static const unsigned IIDs[] = { // Intrinsic to use for each size.
1744 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
1745 Intrinsic::ppc_altivec_vsraw
1747 return BuildIntrinsicOp(IIDs[SplatSize-1], Op, Op, DAG);
1750 // vsplti + rol self.
1751 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
1752 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
1753 Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG);
1754 static const unsigned IIDs[] = { // Intrinsic to use for each size.
1755 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
1756 Intrinsic::ppc_altivec_vrlw
1758 return BuildIntrinsicOp(IIDs[SplatSize-1], Op, Op, DAG);
1761 // t = vsplti c, result = vsldoi t, t, 1
1762 if (SextVal == ((i << 8) | (i >> (TypeShiftAmt-8)))) {
1763 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG);
1764 return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG);
1766 // t = vsplti c, result = vsldoi t, t, 2
1767 if (SextVal == ((i << 16) | (i >> (TypeShiftAmt-16)))) {
1768 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG);
1769 return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG);
1771 // t = vsplti c, result = vsldoi t, t, 3
1772 if (SextVal == ((i << 24) | (i >> (TypeShiftAmt-24)))) {
1773 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG);
1774 return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG);
1778 // Three instruction sequences.
1780 // Odd, in range [17,31]: (vsplti C)-(vsplti -16).
1781 if (SextVal >= 0 && SextVal <= 31) {
1782 SDOperand LHS = BuildSplatI(SextVal-16, SplatSize, Op.getValueType(),DAG);
1783 SDOperand RHS = BuildSplatI(-16, SplatSize, Op.getValueType(), DAG);
1784 return DAG.getNode(ISD::SUB, Op.getValueType(), LHS, RHS);
1786 // Odd, in range [-31,-17]: (vsplti C)+(vsplti -16).
1787 if (SextVal >= -31 && SextVal <= 0) {
1788 SDOperand LHS = BuildSplatI(SextVal+16, SplatSize, Op.getValueType(),DAG);
1789 SDOperand RHS = BuildSplatI(-16, SplatSize, Op.getValueType(), DAG);
1790 return DAG.getNode(ISD::ADD, Op.getValueType(), LHS, RHS);
1797 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
1798 /// the specified operations to build the shuffle.
1799 static SDOperand GeneratePerfectShuffle(unsigned PFEntry, SDOperand LHS,
1800 SDOperand RHS, SelectionDAG &DAG) {
1801 unsigned OpNum = (PFEntry >> 26) & 0x0F;
1802 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
1803 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
1806 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
1818 if (OpNum == OP_COPY) {
1819 if (LHSID == (1*9+2)*9+3) return LHS;
1820 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
1824 SDOperand OpLHS, OpRHS;
1825 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG);
1826 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG);
1828 unsigned ShufIdxs[16];
1830 default: assert(0 && "Unknown i32 permute!");
1832 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3;
1833 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
1834 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7;
1835 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
1838 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
1839 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
1840 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
1841 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
1844 for (unsigned i = 0; i != 16; ++i)
1845 ShufIdxs[i] = (i&3)+0;
1848 for (unsigned i = 0; i != 16; ++i)
1849 ShufIdxs[i] = (i&3)+4;
1852 for (unsigned i = 0; i != 16; ++i)
1853 ShufIdxs[i] = (i&3)+8;
1856 for (unsigned i = 0; i != 16; ++i)
1857 ShufIdxs[i] = (i&3)+12;
1860 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG);
1862 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG);
1864 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG);
1866 std::vector<SDOperand> Ops;
1867 for (unsigned i = 0; i != 16; ++i)
1868 Ops.push_back(DAG.getConstant(ShufIdxs[i], MVT::i32));
1870 return DAG.getNode(ISD::VECTOR_SHUFFLE, OpLHS.getValueType(), OpLHS, OpRHS,
1871 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops));
1874 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this
1875 /// is a shuffle we can handle in a single instruction, return it. Otherwise,
1876 /// return the code it can be lowered into. Worst case, it can always be
1877 /// lowered into a vperm.
1878 static SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) {
1879 SDOperand V1 = Op.getOperand(0);
1880 SDOperand V2 = Op.getOperand(1);
1881 SDOperand PermMask = Op.getOperand(2);
1883 // Cases that are handled by instructions that take permute immediates
1884 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
1885 // selected by the instruction selector.
1886 if (V2.getOpcode() == ISD::UNDEF) {
1887 if (PPC::isSplatShuffleMask(PermMask.Val, 1) ||
1888 PPC::isSplatShuffleMask(PermMask.Val, 2) ||
1889 PPC::isSplatShuffleMask(PermMask.Val, 4) ||
1890 PPC::isVPKUWUMShuffleMask(PermMask.Val, true) ||
1891 PPC::isVPKUHUMShuffleMask(PermMask.Val, true) ||
1892 PPC::isVSLDOIShuffleMask(PermMask.Val, true) != -1 ||
1893 PPC::isVMRGLShuffleMask(PermMask.Val, 1, true) ||
1894 PPC::isVMRGLShuffleMask(PermMask.Val, 2, true) ||
1895 PPC::isVMRGLShuffleMask(PermMask.Val, 4, true) ||
1896 PPC::isVMRGHShuffleMask(PermMask.Val, 1, true) ||
1897 PPC::isVMRGHShuffleMask(PermMask.Val, 2, true) ||
1898 PPC::isVMRGHShuffleMask(PermMask.Val, 4, true)) {
1903 // Altivec has a variety of "shuffle immediates" that take two vector inputs
1904 // and produce a fixed permutation. If any of these match, do not lower to
1906 if (PPC::isVPKUWUMShuffleMask(PermMask.Val, false) ||
1907 PPC::isVPKUHUMShuffleMask(PermMask.Val, false) ||
1908 PPC::isVSLDOIShuffleMask(PermMask.Val, false) != -1 ||
1909 PPC::isVMRGLShuffleMask(PermMask.Val, 1, false) ||
1910 PPC::isVMRGLShuffleMask(PermMask.Val, 2, false) ||
1911 PPC::isVMRGLShuffleMask(PermMask.Val, 4, false) ||
1912 PPC::isVMRGHShuffleMask(PermMask.Val, 1, false) ||
1913 PPC::isVMRGHShuffleMask(PermMask.Val, 2, false) ||
1914 PPC::isVMRGHShuffleMask(PermMask.Val, 4, false))
1917 // Check to see if this is a shuffle of 4-byte values. If so, we can use our
1918 // perfect shuffle table to emit an optimal matching sequence.
1919 unsigned PFIndexes[4];
1920 bool isFourElementShuffle = true;
1921 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
1922 unsigned EltNo = 8; // Start out undef.
1923 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte.
1924 if (PermMask.getOperand(i*4+j).getOpcode() == ISD::UNDEF)
1925 continue; // Undef, ignore it.
1927 unsigned ByteSource =
1928 cast<ConstantSDNode>(PermMask.getOperand(i*4+j))->getValue();
1929 if ((ByteSource & 3) != j) {
1930 isFourElementShuffle = false;
1935 EltNo = ByteSource/4;
1936 } else if (EltNo != ByteSource/4) {
1937 isFourElementShuffle = false;
1941 PFIndexes[i] = EltNo;
1944 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
1945 // perfect shuffle vector to determine if it is cost effective to do this as
1946 // discrete instructions, or whether we should use a vperm.
1947 if (isFourElementShuffle) {
1948 // Compute the index in the perfect shuffle table.
1949 unsigned PFTableIndex =
1950 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
1952 unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
1953 unsigned Cost = (PFEntry >> 30);
1955 // Determining when to avoid vperm is tricky. Many things affect the cost
1956 // of vperm, particularly how many times the perm mask needs to be computed.
1957 // For example, if the perm mask can be hoisted out of a loop or is already
1958 // used (perhaps because there are multiple permutes with the same shuffle
1959 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of
1960 // the loop requires an extra register.
1962 // As a compromise, we only emit discrete instructions if the shuffle can be
1963 // generated in 3 or fewer operations. When we have loop information
1964 // available, if this block is within a loop, we should avoid using vperm
1965 // for 3-operation perms and use a constant pool load instead.
1967 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG);
1970 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
1971 // vector that will get spilled to the constant pool.
1972 if (V2.getOpcode() == ISD::UNDEF) V2 = V1;
1974 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
1975 // that it is in input element units, not in bytes. Convert now.
1976 MVT::ValueType EltVT = MVT::getVectorBaseType(V1.getValueType());
1977 unsigned BytesPerElement = MVT::getSizeInBits(EltVT)/8;
1979 std::vector<SDOperand> ResultMask;
1980 for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) {
1982 if (PermMask.getOperand(i).getOpcode() == ISD::UNDEF)
1985 SrcElt = cast<ConstantSDNode>(PermMask.getOperand(i))->getValue();
1987 for (unsigned j = 0; j != BytesPerElement; ++j)
1988 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,
1992 SDOperand VPermMask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, ResultMask);
1993 return DAG.getNode(PPCISD::VPERM, V1.getValueType(), V1, V2, VPermMask);
1996 /// getAltivecCompareInfo - Given an intrinsic, return false if it is not an
1997 /// altivec comparison. If it is, return true and fill in Opc/isDot with
1998 /// information about the intrinsic.
1999 static bool getAltivecCompareInfo(SDOperand Intrin, int &CompareOpc,
2001 unsigned IntrinsicID = cast<ConstantSDNode>(Intrin.getOperand(0))->getValue();
2004 switch (IntrinsicID) {
2005 default: return false;
2006 // Comparison predicates.
2007 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break;
2008 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break;
2009 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break;
2010 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break;
2011 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break;
2012 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break;
2013 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break;
2014 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break;
2015 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break;
2016 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break;
2017 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break;
2018 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break;
2019 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break;
2021 // Normal Comparisons.
2022 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break;
2023 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break;
2024 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break;
2025 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break;
2026 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break;
2027 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break;
2028 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break;
2029 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break;
2030 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break;
2031 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break;
2032 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break;
2033 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break;
2034 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break;
2039 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
2040 /// lower, do it, otherwise return null.
2041 static SDOperand LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) {
2042 // If this is a lowered altivec predicate compare, CompareOpc is set to the
2043 // opcode number of the comparison.
2046 if (!getAltivecCompareInfo(Op, CompareOpc, isDot))
2047 return SDOperand(); // Don't custom lower most intrinsics.
2049 // If this is a non-dot comparison, make the VCMP node and we are done.
2051 SDOperand Tmp = DAG.getNode(PPCISD::VCMP, Op.getOperand(2).getValueType(),
2052 Op.getOperand(1), Op.getOperand(2),
2053 DAG.getConstant(CompareOpc, MVT::i32));
2054 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Tmp);
2057 // Create the PPCISD altivec 'dot' comparison node.
2058 std::vector<SDOperand> Ops;
2059 std::vector<MVT::ValueType> VTs;
2060 Ops.push_back(Op.getOperand(2)); // LHS
2061 Ops.push_back(Op.getOperand(3)); // RHS
2062 Ops.push_back(DAG.getConstant(CompareOpc, MVT::i32));
2063 VTs.push_back(Op.getOperand(2).getValueType());
2064 VTs.push_back(MVT::Flag);
2065 SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops);
2067 // Now that we have the comparison, emit a copy from the CR to a GPR.
2068 // This is flagged to the above dot comparison.
2069 SDOperand Flags = DAG.getNode(PPCISD::MFCR, MVT::i32,
2070 DAG.getRegister(PPC::CR6, MVT::i32),
2071 CompNode.getValue(1));
2073 // Unpack the result based on how the target uses it.
2074 unsigned BitNo; // Bit # of CR6.
2075 bool InvertBit; // Invert result?
2076 switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) {
2077 default: // Can't happen, don't crash on invalid number though.
2078 case 0: // Return the value of the EQ bit of CR6.
2079 BitNo = 0; InvertBit = false;
2081 case 1: // Return the inverted value of the EQ bit of CR6.
2082 BitNo = 0; InvertBit = true;
2084 case 2: // Return the value of the LT bit of CR6.
2085 BitNo = 2; InvertBit = false;
2087 case 3: // Return the inverted value of the LT bit of CR6.
2088 BitNo = 2; InvertBit = true;
2092 // Shift the bit into the low position.
2093 Flags = DAG.getNode(ISD::SRL, MVT::i32, Flags,
2094 DAG.getConstant(8-(3-BitNo), MVT::i32));
2096 Flags = DAG.getNode(ISD::AND, MVT::i32, Flags,
2097 DAG.getConstant(1, MVT::i32));
2099 // If we are supposed to, toggle the bit.
2101 Flags = DAG.getNode(ISD::XOR, MVT::i32, Flags,
2102 DAG.getConstant(1, MVT::i32));
2106 static SDOperand LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) {
2107 // Create a stack slot that is 16-byte aligned.
2108 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
2109 int FrameIdx = FrameInfo->CreateStackObject(16, 16);
2110 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, MVT::i32);
2112 // Store the input value into Value#0 of the stack slot.
2113 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, DAG.getEntryNode(),
2114 Op.getOperand(0), FIdx,DAG.getSrcValue(NULL));
2116 return DAG.getLoad(Op.getValueType(), Store, FIdx, DAG.getSrcValue(NULL));
2119 static SDOperand LowerMUL(SDOperand Op, SelectionDAG &DAG) {
2120 if (Op.getValueType() == MVT::v4i32) {
2121 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
2123 SDOperand Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG);
2124 SDOperand Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG); // +16 as shift amt.
2126 SDOperand RHSSwap = // = vrlw RHS, 16
2127 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG);
2129 // Shrinkify inputs to v8i16.
2130 LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, LHS);
2131 RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHS);
2132 RHSSwap = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHSSwap);
2134 // Low parts multiplied together, generating 32-bit results (we ignore the
2136 SDOperand LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
2137 LHS, RHS, DAG, MVT::v4i32);
2139 SDOperand HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
2140 LHS, RHSSwap, Zero, DAG, MVT::v4i32);
2141 // Shift the high parts up 16 bits.
2142 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, Neg16, DAG);
2143 return DAG.getNode(ISD::ADD, MVT::v4i32, LoProd, HiProd);
2144 } else if (Op.getValueType() == MVT::v8i16) {
2145 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
2147 SDOperand Zero = BuildSplatI(0, 1, MVT::v8i16, DAG);
2149 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm,
2150 LHS, RHS, Zero, DAG);
2151 } else if (Op.getValueType() == MVT::v16i8) {
2152 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
2154 // Multiply the even 8-bit parts, producing 16-bit sums.
2155 SDOperand EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
2156 LHS, RHS, DAG, MVT::v8i16);
2157 EvenParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, EvenParts);
2159 // Multiply the odd 8-bit parts, producing 16-bit sums.
2160 SDOperand OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
2161 LHS, RHS, DAG, MVT::v8i16);
2162 OddParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, OddParts);
2164 // Merge the results together.
2165 std::vector<SDOperand> Ops;
2166 for (unsigned i = 0; i != 8; ++i) {
2167 Ops.push_back(DAG.getConstant(2*i+1, MVT::i8));
2168 Ops.push_back(DAG.getConstant(2*i+1+16, MVT::i8));
2171 return DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, EvenParts, OddParts,
2172 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops));
2174 assert(0 && "Unknown mul to lower!");
2179 /// LowerOperation - Provide custom lowering hooks for some operations.
2181 SDOperand PPCTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
2182 switch (Op.getOpcode()) {
2183 default: assert(0 && "Wasn't expecting to be able to lower this!");
2184 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
2185 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
2186 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
2187 case ISD::SETCC: return LowerSETCC(Op, DAG);
2188 case ISD::VASTART: return LowerVASTART(Op, DAG, VarArgsFrameIndex);
2189 case ISD::FORMAL_ARGUMENTS:
2190 return LowerFORMAL_ARGUMENTS(Op, DAG, VarArgsFrameIndex);
2191 case ISD::CALL: return LowerCALL(Op, DAG);
2192 case ISD::RET: return LowerRET(Op, DAG);
2194 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
2195 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
2196 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
2198 // Lower 64-bit shifts.
2199 case ISD::SHL: return LowerSHL(Op, DAG, getPointerTy());
2200 case ISD::SRL: return LowerSRL(Op, DAG, getPointerTy());
2201 case ISD::SRA: return LowerSRA(Op, DAG, getPointerTy());
2203 // Vector-related lowering.
2204 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
2205 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
2206 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
2207 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
2208 case ISD::MUL: return LowerMUL(Op, DAG);
2213 //===----------------------------------------------------------------------===//
2214 // Other Lowering Code
2215 //===----------------------------------------------------------------------===//
2218 PPCTargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
2219 MachineBasicBlock *BB) {
2220 assert((MI->getOpcode() == PPC::SELECT_CC_I4 ||
2221 MI->getOpcode() == PPC::SELECT_CC_I8 ||
2222 MI->getOpcode() == PPC::SELECT_CC_F4 ||
2223 MI->getOpcode() == PPC::SELECT_CC_F8 ||
2224 MI->getOpcode() == PPC::SELECT_CC_VRRC) &&
2225 "Unexpected instr type to insert");
2227 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
2228 // control-flow pattern. The incoming instruction knows the destination vreg
2229 // to set, the condition code register to branch on, the true/false values to
2230 // select between, and a branch opcode to use.
2231 const BasicBlock *LLVM_BB = BB->getBasicBlock();
2232 ilist<MachineBasicBlock>::iterator It = BB;
2238 // cmpTY ccX, r1, r2
2240 // fallthrough --> copy0MBB
2241 MachineBasicBlock *thisMBB = BB;
2242 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
2243 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
2244 BuildMI(BB, MI->getOperand(4).getImmedValue(), 2)
2245 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
2246 MachineFunction *F = BB->getParent();
2247 F->getBasicBlockList().insert(It, copy0MBB);
2248 F->getBasicBlockList().insert(It, sinkMBB);
2249 // Update machine-CFG edges by first adding all successors of the current
2250 // block to the new block which will contain the Phi node for the select.
2251 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(),
2252 e = BB->succ_end(); i != e; ++i)
2253 sinkMBB->addSuccessor(*i);
2254 // Next, remove all successors of the current block, and add the true
2255 // and fallthrough blocks as its successors.
2256 while(!BB->succ_empty())
2257 BB->removeSuccessor(BB->succ_begin());
2258 BB->addSuccessor(copy0MBB);
2259 BB->addSuccessor(sinkMBB);
2262 // %FalseValue = ...
2263 // # fallthrough to sinkMBB
2266 // Update machine-CFG edges
2267 BB->addSuccessor(sinkMBB);
2270 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
2273 BuildMI(BB, PPC::PHI, 4, MI->getOperand(0).getReg())
2274 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
2275 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
2277 delete MI; // The pseudo instruction is gone now.
2281 //===----------------------------------------------------------------------===//
2282 // Target Optimization Hooks
2283 //===----------------------------------------------------------------------===//
2285 SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N,
2286 DAGCombinerInfo &DCI) const {
2287 TargetMachine &TM = getTargetMachine();
2288 SelectionDAG &DAG = DCI.DAG;
2289 switch (N->getOpcode()) {
2291 case ISD::SINT_TO_FP:
2292 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) {
2293 if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) {
2294 // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores.
2295 // We allow the src/dst to be either f32/f64, but the intermediate
2296 // type must be i64.
2297 if (N->getOperand(0).getValueType() == MVT::i64) {
2298 SDOperand Val = N->getOperand(0).getOperand(0);
2299 if (Val.getValueType() == MVT::f32) {
2300 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val);
2301 DCI.AddToWorklist(Val.Val);
2304 Val = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Val);
2305 DCI.AddToWorklist(Val.Val);
2306 Val = DAG.getNode(PPCISD::FCFID, MVT::f64, Val);
2307 DCI.AddToWorklist(Val.Val);
2308 if (N->getValueType(0) == MVT::f32) {
2309 Val = DAG.getNode(ISD::FP_ROUND, MVT::f32, Val);
2310 DCI.AddToWorklist(Val.Val);
2313 } else if (N->getOperand(0).getValueType() == MVT::i32) {
2314 // If the intermediate type is i32, we can avoid the load/store here
2321 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)).
2322 if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() &&
2323 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT &&
2324 N->getOperand(1).getValueType() == MVT::i32) {
2325 SDOperand Val = N->getOperand(1).getOperand(0);
2326 if (Val.getValueType() == MVT::f32) {
2327 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val);
2328 DCI.AddToWorklist(Val.Val);
2330 Val = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Val);
2331 DCI.AddToWorklist(Val.Val);
2333 Val = DAG.getNode(PPCISD::STFIWX, MVT::Other, N->getOperand(0), Val,
2334 N->getOperand(2), N->getOperand(3));
2335 DCI.AddToWorklist(Val.Val);
2339 case PPCISD::VCMP: {
2340 // If a VCMPo node already exists with exactly the same operands as this
2341 // node, use its result instead of this node (VCMPo computes both a CR6 and
2342 // a normal output).
2344 if (!N->getOperand(0).hasOneUse() &&
2345 !N->getOperand(1).hasOneUse() &&
2346 !N->getOperand(2).hasOneUse()) {
2348 // Scan all of the users of the LHS, looking for VCMPo's that match.
2349 SDNode *VCMPoNode = 0;
2351 SDNode *LHSN = N->getOperand(0).Val;
2352 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
2354 if ((*UI)->getOpcode() == PPCISD::VCMPo &&
2355 (*UI)->getOperand(1) == N->getOperand(1) &&
2356 (*UI)->getOperand(2) == N->getOperand(2) &&
2357 (*UI)->getOperand(0) == N->getOperand(0)) {
2362 // If there is no VCMPo node, or if the flag value has a single use, don't
2364 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1))
2367 // Look at the (necessarily single) use of the flag value. If it has a
2368 // chain, this transformation is more complex. Note that multiple things
2369 // could use the value result, which we should ignore.
2370 SDNode *FlagUser = 0;
2371 for (SDNode::use_iterator UI = VCMPoNode->use_begin();
2372 FlagUser == 0; ++UI) {
2373 assert(UI != VCMPoNode->use_end() && "Didn't find user!");
2375 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
2376 if (User->getOperand(i) == SDOperand(VCMPoNode, 1)) {
2383 // If the user is a MFCR instruction, we know this is safe. Otherwise we
2384 // give up for right now.
2385 if (FlagUser->getOpcode() == PPCISD::MFCR)
2386 return SDOperand(VCMPoNode, 0);
2391 // If this is a branch on an altivec predicate comparison, lower this so
2392 // that we don't have to do a MFCR: instead, branch directly on CR6. This
2393 // lowering is done pre-legalize, because the legalizer lowers the predicate
2394 // compare down to code that is difficult to reassemble.
2395 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
2396 SDOperand LHS = N->getOperand(2), RHS = N->getOperand(3);
2400 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
2401 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
2402 getAltivecCompareInfo(LHS, CompareOpc, isDot)) {
2403 assert(isDot && "Can't compare against a vector result!");
2405 // If this is a comparison against something other than 0/1, then we know
2406 // that the condition is never/always true.
2407 unsigned Val = cast<ConstantSDNode>(RHS)->getValue();
2408 if (Val != 0 && Val != 1) {
2409 if (CC == ISD::SETEQ) // Cond never true, remove branch.
2410 return N->getOperand(0);
2411 // Always !=, turn it into an unconditional branch.
2412 return DAG.getNode(ISD::BR, MVT::Other,
2413 N->getOperand(0), N->getOperand(4));
2416 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
2418 // Create the PPCISD altivec 'dot' comparison node.
2419 std::vector<SDOperand> Ops;
2420 std::vector<MVT::ValueType> VTs;
2421 Ops.push_back(LHS.getOperand(2)); // LHS of compare
2422 Ops.push_back(LHS.getOperand(3)); // RHS of compare
2423 Ops.push_back(DAG.getConstant(CompareOpc, MVT::i32));
2424 VTs.push_back(LHS.getOperand(2).getValueType());
2425 VTs.push_back(MVT::Flag);
2426 SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops);
2428 // Unpack the result based on how the target uses it.
2430 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getValue()) {
2431 default: // Can't happen, don't crash on invalid number though.
2432 case 0: // Branch on the value of the EQ bit of CR6.
2433 CompOpc = BranchOnWhenPredTrue ? PPC::BEQ : PPC::BNE;
2435 case 1: // Branch on the inverted value of the EQ bit of CR6.
2436 CompOpc = BranchOnWhenPredTrue ? PPC::BNE : PPC::BEQ;
2438 case 2: // Branch on the value of the LT bit of CR6.
2439 CompOpc = BranchOnWhenPredTrue ? PPC::BLT : PPC::BGE;
2441 case 3: // Branch on the inverted value of the LT bit of CR6.
2442 CompOpc = BranchOnWhenPredTrue ? PPC::BGE : PPC::BLT;
2446 return DAG.getNode(PPCISD::COND_BRANCH, MVT::Other, N->getOperand(0),
2447 DAG.getRegister(PPC::CR6, MVT::i32),
2448 DAG.getConstant(CompOpc, MVT::i32),
2449 N->getOperand(4), CompNode.getValue(1));
2458 //===----------------------------------------------------------------------===//
2459 // Inline Assembly Support
2460 //===----------------------------------------------------------------------===//
2462 void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
2464 uint64_t &KnownZero,
2466 unsigned Depth) const {
2469 switch (Op.getOpcode()) {
2471 case ISD::INTRINSIC_WO_CHAIN: {
2472 switch (cast<ConstantSDNode>(Op.getOperand(0))->getValue()) {
2474 case Intrinsic::ppc_altivec_vcmpbfp_p:
2475 case Intrinsic::ppc_altivec_vcmpeqfp_p:
2476 case Intrinsic::ppc_altivec_vcmpequb_p:
2477 case Intrinsic::ppc_altivec_vcmpequh_p:
2478 case Intrinsic::ppc_altivec_vcmpequw_p:
2479 case Intrinsic::ppc_altivec_vcmpgefp_p:
2480 case Intrinsic::ppc_altivec_vcmpgtfp_p:
2481 case Intrinsic::ppc_altivec_vcmpgtsb_p:
2482 case Intrinsic::ppc_altivec_vcmpgtsh_p:
2483 case Intrinsic::ppc_altivec_vcmpgtsw_p:
2484 case Intrinsic::ppc_altivec_vcmpgtub_p:
2485 case Intrinsic::ppc_altivec_vcmpgtuh_p:
2486 case Intrinsic::ppc_altivec_vcmpgtuw_p:
2487 KnownZero = ~1U; // All bits but the low one are known to be zero.
2495 /// getConstraintType - Given a constraint letter, return the type of
2496 /// constraint it is for this target.
2497 PPCTargetLowering::ConstraintType
2498 PPCTargetLowering::getConstraintType(char ConstraintLetter) const {
2499 switch (ConstraintLetter) {
2506 return C_RegisterClass;
2508 return TargetLowering::getConstraintType(ConstraintLetter);
2512 std::vector<unsigned> PPCTargetLowering::
2513 getRegClassForInlineAsmConstraint(const std::string &Constraint,
2514 MVT::ValueType VT) const {
2515 if (Constraint.size() == 1) {
2516 switch (Constraint[0]) { // GCC RS6000 Constraint Letters
2517 default: break; // Unknown constriant letter
2519 return make_vector<unsigned>(/*no R0*/ PPC::R1 , PPC::R2 , PPC::R3 ,
2520 PPC::R4 , PPC::R5 , PPC::R6 , PPC::R7 ,
2521 PPC::R8 , PPC::R9 , PPC::R10, PPC::R11,
2522 PPC::R12, PPC::R13, PPC::R14, PPC::R15,
2523 PPC::R16, PPC::R17, PPC::R18, PPC::R19,
2524 PPC::R20, PPC::R21, PPC::R22, PPC::R23,
2525 PPC::R24, PPC::R25, PPC::R26, PPC::R27,
2526 PPC::R28, PPC::R29, PPC::R30, PPC::R31,
2529 return make_vector<unsigned>(PPC::R0 , PPC::R1 , PPC::R2 , PPC::R3 ,
2530 PPC::R4 , PPC::R5 , PPC::R6 , PPC::R7 ,
2531 PPC::R8 , PPC::R9 , PPC::R10, PPC::R11,
2532 PPC::R12, PPC::R13, PPC::R14, PPC::R15,
2533 PPC::R16, PPC::R17, PPC::R18, PPC::R19,
2534 PPC::R20, PPC::R21, PPC::R22, PPC::R23,
2535 PPC::R24, PPC::R25, PPC::R26, PPC::R27,
2536 PPC::R28, PPC::R29, PPC::R30, PPC::R31,
2539 return make_vector<unsigned>(PPC::F0 , PPC::F1 , PPC::F2 , PPC::F3 ,
2540 PPC::F4 , PPC::F5 , PPC::F6 , PPC::F7 ,
2541 PPC::F8 , PPC::F9 , PPC::F10, PPC::F11,
2542 PPC::F12, PPC::F13, PPC::F14, PPC::F15,
2543 PPC::F16, PPC::F17, PPC::F18, PPC::F19,
2544 PPC::F20, PPC::F21, PPC::F22, PPC::F23,
2545 PPC::F24, PPC::F25, PPC::F26, PPC::F27,
2546 PPC::F28, PPC::F29, PPC::F30, PPC::F31,
2549 return make_vector<unsigned>(PPC::V0 , PPC::V1 , PPC::V2 , PPC::V3 ,
2550 PPC::V4 , PPC::V5 , PPC::V6 , PPC::V7 ,
2551 PPC::V8 , PPC::V9 , PPC::V10, PPC::V11,
2552 PPC::V12, PPC::V13, PPC::V14, PPC::V15,
2553 PPC::V16, PPC::V17, PPC::V18, PPC::V19,
2554 PPC::V20, PPC::V21, PPC::V22, PPC::V23,
2555 PPC::V24, PPC::V25, PPC::V26, PPC::V27,
2556 PPC::V28, PPC::V29, PPC::V30, PPC::V31,
2559 return make_vector<unsigned>(PPC::CR0, PPC::CR1, PPC::CR2, PPC::CR3,
2560 PPC::CR4, PPC::CR5, PPC::CR6, PPC::CR7,
2565 return std::vector<unsigned>();
2568 // isOperandValidForConstraint
2569 bool PPCTargetLowering::
2570 isOperandValidForConstraint(SDOperand Op, char Letter) {
2581 if (!isa<ConstantSDNode>(Op)) return false; // Must be an immediate.
2582 unsigned Value = cast<ConstantSDNode>(Op)->getValue();
2584 default: assert(0 && "Unknown constraint letter!");
2585 case 'I': // "I" is a signed 16-bit constant.
2586 return (short)Value == (int)Value;
2587 case 'J': // "J" is a constant with only the high-order 16 bits nonzero.
2588 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits.
2589 return (short)Value == 0;
2590 case 'K': // "K" is a constant with only the low-order 16 bits nonzero.
2591 return (Value >> 16) == 0;
2592 case 'M': // "M" is a constant that is greater than 31.
2594 case 'N': // "N" is a positive constant that is an exact power of two.
2595 return (int)Value > 0 && isPowerOf2_32(Value);
2596 case 'O': // "O" is the constant zero.
2598 case 'P': // "P" is a constant whose negation is a signed 16-bit constant.
2599 return (short)-Value == (int)-Value;
2605 // Handle standard constraint letters.
2606 return TargetLowering::isOperandValidForConstraint(Op, Letter);
2609 /// isLegalAddressImmediate - Return true if the integer value can be used
2610 /// as the offset of the target addressing mode.
2611 bool PPCTargetLowering::isLegalAddressImmediate(int64_t V) const {
2612 // PPC allows a sign-extended 16-bit immediate field.
2613 return (V > -(1 << 16) && V < (1 << 16)-1);