1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Chris Lattner and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the PPCISelLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "PPCISelLowering.h"
15 #include "PPCTargetMachine.h"
16 #include "PPCPerfectShuffle.h"
17 #include "llvm/ADT/VectorExtras.h"
18 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/CodeGen/SSARegMap.h"
24 #include "llvm/Constants.h"
25 #include "llvm/Function.h"
26 #include "llvm/Intrinsics.h"
27 #include "llvm/Support/MathExtras.h"
28 #include "llvm/Target/TargetOptions.h"
31 PPCTargetLowering::PPCTargetLowering(TargetMachine &TM)
32 : TargetLowering(TM) {
34 // Fold away setcc operations if possible.
35 setSetCCIsExpensive();
38 // Use _setjmp/_longjmp instead of setjmp/longjmp.
39 setUseUnderscoreSetJmpLongJmp(true);
41 // Set up the register classes.
42 addRegisterClass(MVT::i32, PPC::GPRCRegisterClass);
43 addRegisterClass(MVT::f32, PPC::F4RCRegisterClass);
44 addRegisterClass(MVT::f64, PPC::F8RCRegisterClass);
46 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
47 setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
49 // PowerPC has no intrinsics for these particular operations
50 setOperationAction(ISD::MEMMOVE, MVT::Other, Expand);
51 setOperationAction(ISD::MEMSET, MVT::Other, Expand);
52 setOperationAction(ISD::MEMCPY, MVT::Other, Expand);
54 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD
55 setOperationAction(ISD::SEXTLOAD, MVT::i1, Expand);
56 setOperationAction(ISD::SEXTLOAD, MVT::i8, Expand);
58 // PowerPC has no SREM/UREM instructions
59 setOperationAction(ISD::SREM, MVT::i32, Expand);
60 setOperationAction(ISD::UREM, MVT::i32, Expand);
62 // We don't support sin/cos/sqrt/fmod
63 setOperationAction(ISD::FSIN , MVT::f64, Expand);
64 setOperationAction(ISD::FCOS , MVT::f64, Expand);
65 setOperationAction(ISD::FREM , MVT::f64, Expand);
66 setOperationAction(ISD::FSIN , MVT::f32, Expand);
67 setOperationAction(ISD::FCOS , MVT::f32, Expand);
68 setOperationAction(ISD::FREM , MVT::f32, Expand);
70 // If we're enabling GP optimizations, use hardware square root
71 if (!TM.getSubtarget<PPCSubtarget>().hasFSQRT()) {
72 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
73 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
76 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
77 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
79 // PowerPC does not have BSWAP, CTPOP or CTTZ
80 setOperationAction(ISD::BSWAP, MVT::i32 , Expand);
81 setOperationAction(ISD::CTPOP, MVT::i32 , Expand);
82 setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
84 // PowerPC does not have ROTR
85 setOperationAction(ISD::ROTR, MVT::i32 , Expand);
87 // PowerPC does not have Select
88 setOperationAction(ISD::SELECT, MVT::i32, Expand);
89 setOperationAction(ISD::SELECT, MVT::f32, Expand);
90 setOperationAction(ISD::SELECT, MVT::f64, Expand);
92 // PowerPC wants to turn select_cc of FP into fsel when possible.
93 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
94 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
96 // PowerPC wants to optimize integer setcc a bit
97 setOperationAction(ISD::SETCC, MVT::i32, Custom);
99 // PowerPC does not have BRCOND which requires SetCC
100 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
102 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
103 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
105 // PowerPC does not have [U|S]INT_TO_FP
106 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
107 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
109 setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand);
110 setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand);
112 // PowerPC does not have truncstore for i1.
113 setOperationAction(ISD::TRUNCSTORE, MVT::i1, Promote);
115 // Support label based line numbers.
116 setOperationAction(ISD::LOCATION, MVT::Other, Expand);
117 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
118 // FIXME - use subtarget debug flags
119 if (!TM.getSubtarget<PPCSubtarget>().isDarwin())
120 setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand);
122 // We want to legalize GlobalAddress and ConstantPool nodes into the
123 // appropriate instructions to materialize the address.
124 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
125 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
127 // RET must be custom lowered, to meet ABI requirements
128 setOperationAction(ISD::RET , MVT::Other, Custom);
130 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
131 setOperationAction(ISD::VASTART , MVT::Other, Custom);
133 // Use the default implementation.
134 setOperationAction(ISD::VAARG , MVT::Other, Expand);
135 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
136 setOperationAction(ISD::VAEND , MVT::Other, Expand);
137 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
138 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
139 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
141 // We want to custom lower some of our intrinsics.
142 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
144 if (TM.getSubtarget<PPCSubtarget>().is64Bit()) {
145 // They also have instructions for converting between i64 and fp.
146 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
147 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
149 // FIXME: disable this lowered code. This generates 64-bit register values,
150 // and we don't model the fact that the top part is clobbered by calls. We
151 // need to flag these together so that the value isn't live across a call.
152 //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
154 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT
155 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote);
157 // PowerPC does not have FP_TO_UINT on 32-bit implementations.
158 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
161 if (TM.getSubtarget<PPCSubtarget>().has64BitRegs()) {
162 // 64 bit PowerPC implementations can support i64 types directly
163 addRegisterClass(MVT::i64, PPC::G8RCRegisterClass);
164 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
165 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
167 // 32 bit PowerPC wants to expand i64 shifts itself.
168 setOperationAction(ISD::SHL, MVT::i64, Custom);
169 setOperationAction(ISD::SRL, MVT::i64, Custom);
170 setOperationAction(ISD::SRA, MVT::i64, Custom);
173 if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) {
174 // First set operation action for all vector types to expand. Then we
175 // will selectively turn on ones that can be effectively codegen'd.
176 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
177 VT != (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
178 // add/sub are legal for all supported vector VT's.
179 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Legal);
180 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Legal);
182 // We promote all shuffles to v16i8.
183 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Promote);
184 AddPromotedToType (ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, MVT::v16i8);
186 // We promote all non-typed operations to v4i32.
187 setOperationAction(ISD::AND , (MVT::ValueType)VT, Promote);
188 AddPromotedToType (ISD::AND , (MVT::ValueType)VT, MVT::v4i32);
189 setOperationAction(ISD::OR , (MVT::ValueType)VT, Promote);
190 AddPromotedToType (ISD::OR , (MVT::ValueType)VT, MVT::v4i32);
191 setOperationAction(ISD::XOR , (MVT::ValueType)VT, Promote);
192 AddPromotedToType (ISD::XOR , (MVT::ValueType)VT, MVT::v4i32);
193 setOperationAction(ISD::LOAD , (MVT::ValueType)VT, Promote);
194 AddPromotedToType (ISD::LOAD , (MVT::ValueType)VT, MVT::v4i32);
195 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote);
196 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v4i32);
197 setOperationAction(ISD::STORE, (MVT::ValueType)VT, Promote);
198 AddPromotedToType (ISD::STORE, (MVT::ValueType)VT, MVT::v4i32);
200 // No other operations are legal.
201 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand);
202 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand);
203 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand);
204 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand);
205 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand);
206 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
207 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
208 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Expand);
210 setOperationAction(ISD::SCALAR_TO_VECTOR, (MVT::ValueType)VT, Expand);
213 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
214 // with merges, splats, etc.
215 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
217 setOperationAction(ISD::AND , MVT::v4i32, Legal);
218 setOperationAction(ISD::OR , MVT::v4i32, Legal);
219 setOperationAction(ISD::XOR , MVT::v4i32, Legal);
220 setOperationAction(ISD::LOAD , MVT::v4i32, Legal);
221 setOperationAction(ISD::SELECT, MVT::v4i32, Expand);
222 setOperationAction(ISD::STORE , MVT::v4i32, Legal);
224 addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass);
225 addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass);
226 addRegisterClass(MVT::v8i16, PPC::VRRCRegisterClass);
227 addRegisterClass(MVT::v16i8, PPC::VRRCRegisterClass);
229 setOperationAction(ISD::MUL, MVT::v4f32, Legal);
230 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
231 setOperationAction(ISD::MUL, MVT::v8i16, Custom);
232 setOperationAction(ISD::MUL, MVT::v16i8, Custom);
234 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
235 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
237 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
238 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
239 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
240 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
243 setSetCCResultContents(ZeroOrOneSetCCResult);
244 setStackPointerRegisterToSaveRestore(PPC::R1);
246 // We have target-specific dag combine patterns for the following nodes:
247 setTargetDAGCombine(ISD::SINT_TO_FP);
248 setTargetDAGCombine(ISD::STORE);
249 setTargetDAGCombine(ISD::BR_CC);
251 computeRegisterProperties();
254 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
257 case PPCISD::FSEL: return "PPCISD::FSEL";
258 case PPCISD::FCFID: return "PPCISD::FCFID";
259 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ";
260 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ";
261 case PPCISD::STFIWX: return "PPCISD::STFIWX";
262 case PPCISD::VMADDFP: return "PPCISD::VMADDFP";
263 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP";
264 case PPCISD::VPERM: return "PPCISD::VPERM";
265 case PPCISD::Hi: return "PPCISD::Hi";
266 case PPCISD::Lo: return "PPCISD::Lo";
267 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg";
268 case PPCISD::SRL: return "PPCISD::SRL";
269 case PPCISD::SRA: return "PPCISD::SRA";
270 case PPCISD::SHL: return "PPCISD::SHL";
271 case PPCISD::EXTSW_32: return "PPCISD::EXTSW_32";
272 case PPCISD::STD_32: return "PPCISD::STD_32";
273 case PPCISD::CALL: return "PPCISD::CALL";
274 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG";
275 case PPCISD::MFCR: return "PPCISD::MFCR";
276 case PPCISD::VCMP: return "PPCISD::VCMP";
277 case PPCISD::VCMPo: return "PPCISD::VCMPo";
281 //===----------------------------------------------------------------------===//
282 // Node matching predicates, for use by the tblgen matching code.
283 //===----------------------------------------------------------------------===//
285 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
286 static bool isFloatingPointZero(SDOperand Op) {
287 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
288 return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0);
289 else if (Op.getOpcode() == ISD::EXTLOAD || Op.getOpcode() == ISD::LOAD) {
290 // Maybe this has already been legalized into the constant pool?
291 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
292 if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->get()))
293 return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0);
298 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return
299 /// true if Op is undef or if it matches the specified value.
300 static bool isConstantOrUndef(SDOperand Op, unsigned Val) {
301 return Op.getOpcode() == ISD::UNDEF ||
302 cast<ConstantSDNode>(Op)->getValue() == Val;
305 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
306 /// VPKUHUM instruction.
307 bool PPC::isVPKUHUMShuffleMask(SDNode *N, bool isUnary) {
309 for (unsigned i = 0; i != 16; ++i)
310 if (!isConstantOrUndef(N->getOperand(i), i*2+1))
313 for (unsigned i = 0; i != 8; ++i)
314 if (!isConstantOrUndef(N->getOperand(i), i*2+1) ||
315 !isConstantOrUndef(N->getOperand(i+8), i*2+1))
321 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
322 /// VPKUWUM instruction.
323 bool PPC::isVPKUWUMShuffleMask(SDNode *N, bool isUnary) {
325 for (unsigned i = 0; i != 16; i += 2)
326 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) ||
327 !isConstantOrUndef(N->getOperand(i+1), i*2+3))
330 for (unsigned i = 0; i != 8; i += 2)
331 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) ||
332 !isConstantOrUndef(N->getOperand(i+1), i*2+3) ||
333 !isConstantOrUndef(N->getOperand(i+8), i*2+2) ||
334 !isConstantOrUndef(N->getOperand(i+9), i*2+3))
340 /// isVMerge - Common function, used to match vmrg* shuffles.
342 static bool isVMerge(SDNode *N, unsigned UnitSize,
343 unsigned LHSStart, unsigned RHSStart) {
344 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
345 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!");
346 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
347 "Unsupported merge size!");
349 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units
350 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit
351 if (!isConstantOrUndef(N->getOperand(i*UnitSize*2+j),
352 LHSStart+j+i*UnitSize) ||
353 !isConstantOrUndef(N->getOperand(i*UnitSize*2+UnitSize+j),
354 RHSStart+j+i*UnitSize))
360 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
361 /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
362 bool PPC::isVMRGLShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) {
364 return isVMerge(N, UnitSize, 8, 24);
365 return isVMerge(N, UnitSize, 8, 8);
368 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
369 /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
370 bool PPC::isVMRGHShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) {
372 return isVMerge(N, UnitSize, 0, 16);
373 return isVMerge(N, UnitSize, 0, 0);
377 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
378 /// amount, otherwise return -1.
379 int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) {
380 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
381 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!");
382 // Find the first non-undef value in the shuffle mask.
384 for (i = 0; i != 16 && N->getOperand(i).getOpcode() == ISD::UNDEF; ++i)
387 if (i == 16) return -1; // all undef.
389 // Otherwise, check to see if the rest of the elements are consequtively
390 // numbered from this value.
391 unsigned ShiftAmt = cast<ConstantSDNode>(N->getOperand(i))->getValue();
392 if (ShiftAmt < i) return -1;
396 // Check the rest of the elements to see if they are consequtive.
397 for (++i; i != 16; ++i)
398 if (!isConstantOrUndef(N->getOperand(i), ShiftAmt+i))
401 // Check the rest of the elements to see if they are consequtive.
402 for (++i; i != 16; ++i)
403 if (!isConstantOrUndef(N->getOperand(i), (ShiftAmt+i) & 15))
410 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
411 /// specifies a splat of a single element that is suitable for input to
412 /// VSPLTB/VSPLTH/VSPLTW.
413 bool PPC::isSplatShuffleMask(SDNode *N, unsigned EltSize) {
414 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
415 N->getNumOperands() == 16 &&
416 (EltSize == 1 || EltSize == 2 || EltSize == 4));
418 // This is a splat operation if each element of the permute is the same, and
419 // if the value doesn't reference the second vector.
420 unsigned ElementBase = 0;
421 SDOperand Elt = N->getOperand(0);
422 if (ConstantSDNode *EltV = dyn_cast<ConstantSDNode>(Elt))
423 ElementBase = EltV->getValue();
425 return false; // FIXME: Handle UNDEF elements too!
427 if (cast<ConstantSDNode>(Elt)->getValue() >= 16)
430 // Check that they are consequtive.
431 for (unsigned i = 1; i != EltSize; ++i) {
432 if (!isa<ConstantSDNode>(N->getOperand(i)) ||
433 cast<ConstantSDNode>(N->getOperand(i))->getValue() != i+ElementBase)
437 assert(isa<ConstantSDNode>(Elt) && "Invalid VECTOR_SHUFFLE mask!");
438 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
439 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
440 assert(isa<ConstantSDNode>(N->getOperand(i)) &&
441 "Invalid VECTOR_SHUFFLE mask!");
442 for (unsigned j = 0; j != EltSize; ++j)
443 if (N->getOperand(i+j) != N->getOperand(j))
450 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
451 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
452 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) {
453 assert(isSplatShuffleMask(N, EltSize));
454 return cast<ConstantSDNode>(N->getOperand(0))->getValue() / EltSize;
457 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
458 /// by using a vspltis[bhw] instruction of the specified element size, return
459 /// the constant being splatted. The ByteSize field indicates the number of
460 /// bytes of each element [124] -> [bhw].
461 SDOperand PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
462 SDOperand OpVal(0, 0);
464 // If ByteSize of the splat is bigger than the element size of the
465 // build_vector, then we have a case where we are checking for a splat where
466 // multiple elements of the buildvector are folded together into a single
467 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
468 unsigned EltSize = 16/N->getNumOperands();
469 if (EltSize < ByteSize) {
470 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval.
471 SDOperand UniquedVals[4];
472 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
474 // See if all of the elements in the buildvector agree across.
475 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
476 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
477 // If the element isn't a constant, bail fully out.
478 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDOperand();
481 if (UniquedVals[i&(Multiple-1)].Val == 0)
482 UniquedVals[i&(Multiple-1)] = N->getOperand(i);
483 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
484 return SDOperand(); // no match.
487 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
488 // either constant or undef values that are identical for each chunk. See
489 // if these chunks can form into a larger vspltis*.
491 // Check to see if all of the leading entries are either 0 or -1. If
492 // neither, then this won't fit into the immediate field.
493 bool LeadingZero = true;
494 bool LeadingOnes = true;
495 for (unsigned i = 0; i != Multiple-1; ++i) {
496 if (UniquedVals[i].Val == 0) continue; // Must have been undefs.
498 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue();
499 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue();
501 // Finally, check the least significant entry.
503 if (UniquedVals[Multiple-1].Val == 0)
504 return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef
505 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getValue();
507 return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4)
510 if (UniquedVals[Multiple-1].Val == 0)
511 return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef
512 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSignExtended();
513 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2)
514 return DAG.getTargetConstant(Val, MVT::i32);
520 // Check to see if this buildvec has a single non-undef value in its elements.
521 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
522 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
524 OpVal = N->getOperand(i);
525 else if (OpVal != N->getOperand(i))
529 if (OpVal.Val == 0) return SDOperand(); // All UNDEF: use implicit def.
531 unsigned ValSizeInBytes = 0;
533 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
534 Value = CN->getValue();
535 ValSizeInBytes = MVT::getSizeInBits(CN->getValueType(0))/8;
536 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
537 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
538 Value = FloatToBits(CN->getValue());
542 // If the splat value is larger than the element value, then we can never do
543 // this splat. The only case that we could fit the replicated bits into our
544 // immediate field for would be zero, and we prefer to use vxor for it.
545 if (ValSizeInBytes < ByteSize) return SDOperand();
547 // If the element value is larger than the splat value, cut it in half and
548 // check to see if the two halves are equal. Continue doing this until we
549 // get to ByteSize. This allows us to handle 0x01010101 as 0x01.
550 while (ValSizeInBytes > ByteSize) {
551 ValSizeInBytes >>= 1;
553 // If the top half equals the bottom half, we're still ok.
554 if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) !=
555 (Value & ((1 << (8*ValSizeInBytes))-1)))
559 // Properly sign extend the value.
560 int ShAmt = (4-ByteSize)*8;
561 int MaskVal = ((int)Value << ShAmt) >> ShAmt;
563 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
564 if (MaskVal == 0) return SDOperand();
566 // Finally, if this value fits in a 5 bit sext field, return it
567 if (((MaskVal << (32-5)) >> (32-5)) == MaskVal)
568 return DAG.getTargetConstant(MaskVal, MVT::i32);
572 //===----------------------------------------------------------------------===//
573 // LowerOperation implementation
574 //===----------------------------------------------------------------------===//
576 static SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG) {
577 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
578 Constant *C = CP->get();
579 SDOperand CPI = DAG.getTargetConstantPool(C, MVT::i32, CP->getAlignment());
580 SDOperand Zero = DAG.getConstant(0, MVT::i32);
582 const TargetMachine &TM = DAG.getTarget();
584 // If this is a non-darwin platform, we don't support non-static relo models
586 if (TM.getRelocationModel() == Reloc::Static ||
587 !TM.getSubtarget<PPCSubtarget>().isDarwin()) {
588 // Generate non-pic code that has direct accesses to the constant pool.
589 // The address of the global is just (hi(&g)+lo(&g)).
590 SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, CPI, Zero);
591 SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, CPI, Zero);
592 return DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo);
595 SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, CPI, Zero);
596 if (TM.getRelocationModel() == Reloc::PIC) {
597 // With PIC, the first instruction is actually "GR+hi(&G)".
598 Hi = DAG.getNode(ISD::ADD, MVT::i32,
599 DAG.getNode(PPCISD::GlobalBaseReg, MVT::i32), Hi);
602 SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, CPI, Zero);
603 Lo = DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo);
607 static SDOperand LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) {
608 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
609 GlobalValue *GV = GSDN->getGlobal();
610 SDOperand GA = DAG.getTargetGlobalAddress(GV, MVT::i32, GSDN->getOffset());
611 SDOperand Zero = DAG.getConstant(0, MVT::i32);
613 const TargetMachine &TM = DAG.getTarget();
615 // If this is a non-darwin platform, we don't support non-static relo models
617 if (TM.getRelocationModel() == Reloc::Static ||
618 !TM.getSubtarget<PPCSubtarget>().isDarwin()) {
619 // Generate non-pic code that has direct accesses to globals.
620 // The address of the global is just (hi(&g)+lo(&g)).
621 SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, GA, Zero);
622 SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, GA, Zero);
623 return DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo);
626 SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, GA, Zero);
627 if (TM.getRelocationModel() == Reloc::PIC) {
628 // With PIC, the first instruction is actually "GR+hi(&G)".
629 Hi = DAG.getNode(ISD::ADD, MVT::i32,
630 DAG.getNode(PPCISD::GlobalBaseReg, MVT::i32), Hi);
633 SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, GA, Zero);
634 Lo = DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo);
636 if (!GV->hasWeakLinkage() && !GV->hasLinkOnceLinkage() &&
637 (!GV->isExternal() || GV->hasNotBeenReadFromBytecode()))
640 // If the global is weak or external, we have to go through the lazy
642 return DAG.getLoad(MVT::i32, DAG.getEntryNode(), Lo, DAG.getSrcValue(0));
645 static SDOperand LowerSETCC(SDOperand Op, SelectionDAG &DAG) {
646 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
648 // If we're comparing for equality to zero, expose the fact that this is
649 // implented as a ctlz/srl pair on ppc, so that the dag combiner can
650 // fold the new nodes.
651 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
652 if (C->isNullValue() && CC == ISD::SETEQ) {
653 MVT::ValueType VT = Op.getOperand(0).getValueType();
654 SDOperand Zext = Op.getOperand(0);
657 Zext = DAG.getNode(ISD::ZERO_EXTEND, VT, Op.getOperand(0));
659 unsigned Log2b = Log2_32(MVT::getSizeInBits(VT));
660 SDOperand Clz = DAG.getNode(ISD::CTLZ, VT, Zext);
661 SDOperand Scc = DAG.getNode(ISD::SRL, VT, Clz,
662 DAG.getConstant(Log2b, MVT::i32));
663 return DAG.getNode(ISD::TRUNCATE, MVT::i32, Scc);
665 // Leave comparisons against 0 and -1 alone for now, since they're usually
666 // optimized. FIXME: revisit this when we can custom lower all setcc
668 if (C->isAllOnesValue() || C->isNullValue())
672 // If we have an integer seteq/setne, turn it into a compare against zero
673 // by subtracting the rhs from the lhs, which is faster than setting a
674 // condition register, reading it back out, and masking the correct bit.
675 MVT::ValueType LHSVT = Op.getOperand(0).getValueType();
676 if (MVT::isInteger(LHSVT) && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
677 MVT::ValueType VT = Op.getValueType();
678 SDOperand Sub = DAG.getNode(ISD::SUB, LHSVT, Op.getOperand(0),
680 return DAG.getSetCC(VT, Sub, DAG.getConstant(0, LHSVT), CC);
685 static SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG,
686 unsigned VarArgsFrameIndex) {
687 // vastart just stores the address of the VarArgsFrameIndex slot into the
688 // memory location argument.
689 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32);
690 return DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0), FR,
691 Op.getOperand(1), Op.getOperand(2));
694 static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) {
696 switch(Op.getNumOperands()) {
698 assert(0 && "Do not know how to return this many arguments!");
701 return SDOperand(); // ret void is legal
703 MVT::ValueType ArgVT = Op.getOperand(1).getValueType();
705 if (MVT::isVector(ArgVT))
707 else if (MVT::isInteger(ArgVT))
710 assert(MVT::isFloatingPoint(ArgVT));
714 Copy = DAG.getCopyToReg(Op.getOperand(0), ArgReg, Op.getOperand(1),
717 // If we haven't noted the R3/F1 are live out, do so now.
718 if (DAG.getMachineFunction().liveout_empty())
719 DAG.getMachineFunction().addLiveOut(ArgReg);
723 Copy = DAG.getCopyToReg(Op.getOperand(0), PPC::R3, Op.getOperand(2),
725 Copy = DAG.getCopyToReg(Copy, PPC::R4, Op.getOperand(1),Copy.getValue(1));
726 // If we haven't noted the R3+R4 are live out, do so now.
727 if (DAG.getMachineFunction().liveout_empty()) {
728 DAG.getMachineFunction().addLiveOut(PPC::R3);
729 DAG.getMachineFunction().addLiveOut(PPC::R4);
733 return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Copy, Copy.getValue(1));
736 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
738 static SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) {
739 // Not FP? Not a fsel.
740 if (!MVT::isFloatingPoint(Op.getOperand(0).getValueType()) ||
741 !MVT::isFloatingPoint(Op.getOperand(2).getValueType()))
744 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
746 // Cannot handle SETEQ/SETNE.
747 if (CC == ISD::SETEQ || CC == ISD::SETNE) return SDOperand();
749 MVT::ValueType ResVT = Op.getValueType();
750 MVT::ValueType CmpVT = Op.getOperand(0).getValueType();
751 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
752 SDOperand TV = Op.getOperand(2), FV = Op.getOperand(3);
754 // If the RHS of the comparison is a 0.0, we don't need to do the
755 // subtraction at all.
756 if (isFloatingPointZero(RHS))
758 default: break; // SETUO etc aren't handled by fsel.
761 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
764 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
765 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS);
766 return DAG.getNode(PPCISD::FSEL, ResVT, LHS, TV, FV);
769 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
772 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
773 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS);
774 return DAG.getNode(PPCISD::FSEL, ResVT,
775 DAG.getNode(ISD::FNEG, MVT::f64, LHS), TV, FV);
780 default: break; // SETUO etc aren't handled by fsel.
783 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS);
784 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
785 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
786 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV);
789 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS);
790 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
791 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
792 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV);
795 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS);
796 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
797 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
798 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV);
801 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS);
802 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
803 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
804 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV);
809 static SDOperand LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) {
810 assert(MVT::isFloatingPoint(Op.getOperand(0).getValueType()));
811 SDOperand Src = Op.getOperand(0);
812 if (Src.getValueType() == MVT::f32)
813 Src = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Src);
816 switch (Op.getValueType()) {
817 default: assert(0 && "Unhandled FP_TO_SINT type in custom expander!");
819 Tmp = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Src);
822 Tmp = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Src);
826 // Convert the FP value to an int value through memory.
827 SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Tmp);
828 if (Op.getValueType() == MVT::i32)
829 Bits = DAG.getNode(ISD::TRUNCATE, MVT::i32, Bits);
833 static SDOperand LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) {
834 if (Op.getOperand(0).getValueType() == MVT::i64) {
835 SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::f64, Op.getOperand(0));
836 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Bits);
837 if (Op.getValueType() == MVT::f32)
838 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP);
842 assert(Op.getOperand(0).getValueType() == MVT::i32 &&
843 "Unhandled SINT_TO_FP type in custom expander!");
844 // Since we only generate this in 64-bit mode, we can take advantage of
845 // 64-bit registers. In particular, sign extend the input value into the
846 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
847 // then lfd it and fcfid it.
848 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
849 int FrameIdx = FrameInfo->CreateStackObject(8, 8);
850 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, MVT::i32);
852 SDOperand Ext64 = DAG.getNode(PPCISD::EXTSW_32, MVT::i32,
855 // STD the extended value into the stack slot.
856 SDOperand Store = DAG.getNode(PPCISD::STD_32, MVT::Other,
857 DAG.getEntryNode(), Ext64, FIdx,
858 DAG.getSrcValue(NULL));
859 // Load the value as a double.
860 SDOperand Ld = DAG.getLoad(MVT::f64, Store, FIdx, DAG.getSrcValue(NULL));
862 // FCFID it and return it.
863 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Ld);
864 if (Op.getValueType() == MVT::f32)
865 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP);
869 static SDOperand LowerSHL(SDOperand Op, SelectionDAG &DAG) {
870 assert(Op.getValueType() == MVT::i64 &&
871 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SHL!");
872 // The generic code does a fine job expanding shift by a constant.
873 if (isa<ConstantSDNode>(Op.getOperand(1))) return SDOperand();
875 // Otherwise, expand into a bunch of logical ops. Note that these ops
876 // depend on the PPC behavior for oversized shift amounts.
877 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
878 DAG.getConstant(0, MVT::i32));
879 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
880 DAG.getConstant(1, MVT::i32));
881 SDOperand Amt = Op.getOperand(1);
883 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32,
884 DAG.getConstant(32, MVT::i32), Amt);
885 SDOperand Tmp2 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Amt);
886 SDOperand Tmp3 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Tmp1);
887 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3);
888 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt,
889 DAG.getConstant(-32U, MVT::i32));
890 SDOperand Tmp6 = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Tmp5);
891 SDOperand OutHi = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6);
892 SDOperand OutLo = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Amt);
893 return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi);
896 static SDOperand LowerSRL(SDOperand Op, SelectionDAG &DAG) {
897 assert(Op.getValueType() == MVT::i64 &&
898 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SHL!");
899 // The generic code does a fine job expanding shift by a constant.
900 if (isa<ConstantSDNode>(Op.getOperand(1))) return SDOperand();
902 // Otherwise, expand into a bunch of logical ops. Note that these ops
903 // depend on the PPC behavior for oversized shift amounts.
904 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
905 DAG.getConstant(0, MVT::i32));
906 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
907 DAG.getConstant(1, MVT::i32));
908 SDOperand Amt = Op.getOperand(1);
910 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32,
911 DAG.getConstant(32, MVT::i32), Amt);
912 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Amt);
913 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Tmp1);
914 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3);
915 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt,
916 DAG.getConstant(-32U, MVT::i32));
917 SDOperand Tmp6 = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Tmp5);
918 SDOperand OutLo = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6);
919 SDOperand OutHi = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Amt);
920 return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi);
923 static SDOperand LowerSRA(SDOperand Op, SelectionDAG &DAG) {
924 assert(Op.getValueType() == MVT::i64 &&
925 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SRA!");
926 // The generic code does a fine job expanding shift by a constant.
927 if (isa<ConstantSDNode>(Op.getOperand(1))) return SDOperand();
929 // Otherwise, expand into a bunch of logical ops, followed by a select_cc.
930 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
931 DAG.getConstant(0, MVT::i32));
932 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
933 DAG.getConstant(1, MVT::i32));
934 SDOperand Amt = Op.getOperand(1);
936 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32,
937 DAG.getConstant(32, MVT::i32), Amt);
938 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Amt);
939 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Tmp1);
940 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3);
941 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt,
942 DAG.getConstant(-32U, MVT::i32));
943 SDOperand Tmp6 = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Tmp5);
944 SDOperand OutHi = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Amt);
945 SDOperand OutLo = DAG.getSelectCC(Tmp5, DAG.getConstant(0, MVT::i32),
946 Tmp4, Tmp6, ISD::SETLE);
947 return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi);
950 //===----------------------------------------------------------------------===//
951 // Vector related lowering.
954 // If this is a vector of constants or undefs, get the bits. A bit in
955 // UndefBits is set if the corresponding element of the vector is an
956 // ISD::UNDEF value. For undefs, the corresponding VectorBits values are
957 // zero. Return true if this is not an array of constants, false if it is.
959 static bool GetConstantBuildVectorBits(SDNode *BV, uint64_t VectorBits[2],
960 uint64_t UndefBits[2]) {
961 // Start with zero'd results.
962 VectorBits[0] = VectorBits[1] = UndefBits[0] = UndefBits[1] = 0;
964 unsigned EltBitSize = MVT::getSizeInBits(BV->getOperand(0).getValueType());
965 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
966 SDOperand OpVal = BV->getOperand(i);
968 unsigned PartNo = i >= e/2; // In the upper 128 bits?
969 unsigned SlotNo = e/2 - (i & (e/2-1))-1; // Which subpiece of the uint64_t.
971 uint64_t EltBits = 0;
972 if (OpVal.getOpcode() == ISD::UNDEF) {
973 uint64_t EltUndefBits = ~0U >> (32-EltBitSize);
974 UndefBits[PartNo] |= EltUndefBits << (SlotNo*EltBitSize);
976 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
977 EltBits = CN->getValue() & (~0U >> (32-EltBitSize));
978 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
979 assert(CN->getValueType(0) == MVT::f32 &&
980 "Only one legal FP vector type!");
981 EltBits = FloatToBits(CN->getValue());
983 // Nonconstant element.
987 VectorBits[PartNo] |= EltBits << (SlotNo*EltBitSize);
990 //printf("%llx %llx %llx %llx\n",
991 // VectorBits[0], VectorBits[1], UndefBits[0], UndefBits[1]);
995 // If this is a splat (repetition) of a value across the whole vector, return
996 // the smallest size that splats it. For example, "0x01010101010101..." is a
997 // splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and
998 // SplatSize = 1 byte.
999 static bool isConstantSplat(const uint64_t Bits128[2],
1000 const uint64_t Undef128[2],
1001 unsigned &SplatBits, unsigned &SplatUndef,
1002 unsigned &SplatSize) {
1004 // Don't let undefs prevent splats from matching. See if the top 64-bits are
1005 // the same as the lower 64-bits, ignoring undefs.
1006 if ((Bits128[0] & ~Undef128[1]) != (Bits128[1] & ~Undef128[0]))
1007 return false; // Can't be a splat if two pieces don't match.
1009 uint64_t Bits64 = Bits128[0] | Bits128[1];
1010 uint64_t Undef64 = Undef128[0] & Undef128[1];
1012 // Check that the top 32-bits are the same as the lower 32-bits, ignoring
1014 if ((Bits64 & (~Undef64 >> 32)) != ((Bits64 >> 32) & ~Undef64))
1015 return false; // Can't be a splat if two pieces don't match.
1017 uint32_t Bits32 = uint32_t(Bits64) | uint32_t(Bits64 >> 32);
1018 uint32_t Undef32 = uint32_t(Undef64) & uint32_t(Undef64 >> 32);
1020 // If the top 16-bits are different than the lower 16-bits, ignoring
1021 // undefs, we have an i32 splat.
1022 if ((Bits32 & (~Undef32 >> 16)) != ((Bits32 >> 16) & ~Undef32)) {
1024 SplatUndef = Undef32;
1029 uint16_t Bits16 = uint16_t(Bits32) | uint16_t(Bits32 >> 16);
1030 uint16_t Undef16 = uint16_t(Undef32) & uint16_t(Undef32 >> 16);
1032 // If the top 8-bits are different than the lower 8-bits, ignoring
1033 // undefs, we have an i16 splat.
1034 if ((Bits16 & (uint16_t(~Undef16) >> 8)) != ((Bits16 >> 8) & ~Undef16)) {
1036 SplatUndef = Undef16;
1041 // Otherwise, we have an 8-bit splat.
1042 SplatBits = uint8_t(Bits16) | uint8_t(Bits16 >> 8);
1043 SplatUndef = uint8_t(Undef16) & uint8_t(Undef16 >> 8);
1048 /// BuildSplatI - Build a canonical splati of Val with an element size of
1049 /// SplatSize. Cast the result to VT.
1050 static SDOperand BuildSplatI(int Val, unsigned SplatSize, MVT::ValueType VT,
1051 SelectionDAG &DAG) {
1052 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!");
1054 // Force vspltis[hw] -1 to vspltisb -1.
1055 if (Val == -1) SplatSize = 1;
1057 static const MVT::ValueType VTys[] = { // canonical VT to use for each size.
1058 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
1060 MVT::ValueType CanonicalVT = VTys[SplatSize-1];
1062 // Build a canonical splat for this value.
1063 SDOperand Elt = DAG.getConstant(Val, MVT::getVectorBaseType(CanonicalVT));
1064 std::vector<SDOperand> Ops(MVT::getVectorNumElements(CanonicalVT), Elt);
1065 SDOperand Res = DAG.getNode(ISD::BUILD_VECTOR, CanonicalVT, Ops);
1066 return DAG.getNode(ISD::BIT_CONVERT, VT, Res);
1069 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
1070 /// specified intrinsic ID.
1071 static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand LHS, SDOperand RHS,
1073 MVT::ValueType DestVT = MVT::Other) {
1074 if (DestVT == MVT::Other) DestVT = LHS.getValueType();
1075 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT,
1076 DAG.getConstant(IID, MVT::i32), LHS, RHS);
1079 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
1080 /// specified intrinsic ID.
1081 static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand Op0, SDOperand Op1,
1082 SDOperand Op2, SelectionDAG &DAG,
1083 MVT::ValueType DestVT = MVT::Other) {
1084 if (DestVT == MVT::Other) DestVT = Op0.getValueType();
1085 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT,
1086 DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2);
1090 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
1091 /// amount. The result has the specified value type.
1092 static SDOperand BuildVSLDOI(SDOperand LHS, SDOperand RHS, unsigned Amt,
1093 MVT::ValueType VT, SelectionDAG &DAG) {
1094 // Force LHS/RHS to be the right type.
1095 LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, LHS);
1096 RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, RHS);
1098 std::vector<SDOperand> Ops;
1099 for (unsigned i = 0; i != 16; ++i)
1100 Ops.push_back(DAG.getConstant(i+Amt, MVT::i32));
1101 SDOperand T = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, LHS, RHS,
1102 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops));
1103 return DAG.getNode(ISD::BIT_CONVERT, VT, T);
1106 // If this is a case we can't handle, return null and let the default
1107 // expansion code take care of it. If we CAN select this case, and if it
1108 // selects to a single instruction, return Op. Otherwise, if we can codegen
1109 // this case more efficiently than a constant pool load, lower it to the
1110 // sequence of ops that should be used.
1111 static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) {
1112 // If this is a vector of constants or undefs, get the bits. A bit in
1113 // UndefBits is set if the corresponding element of the vector is an
1114 // ISD::UNDEF value. For undefs, the corresponding VectorBits values are
1116 uint64_t VectorBits[2];
1117 uint64_t UndefBits[2];
1118 if (GetConstantBuildVectorBits(Op.Val, VectorBits, UndefBits))
1119 return SDOperand(); // Not a constant vector.
1121 // If this is a splat (repetition) of a value across the whole vector, return
1122 // the smallest size that splats it. For example, "0x01010101010101..." is a
1123 // splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and
1124 // SplatSize = 1 byte.
1125 unsigned SplatBits, SplatUndef, SplatSize;
1126 if (isConstantSplat(VectorBits, UndefBits, SplatBits, SplatUndef, SplatSize)){
1127 bool HasAnyUndefs = (UndefBits[0] | UndefBits[1]) != 0;
1129 // First, handle single instruction cases.
1132 if (SplatBits == 0) {
1133 // Canonicalize all zero vectors to be v4i32.
1134 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
1135 SDOperand Z = DAG.getConstant(0, MVT::i32);
1136 Z = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Z, Z, Z, Z);
1137 Op = DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Z);
1142 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
1143 int32_t SextVal= int32_t(SplatBits << (32-8*SplatSize)) >> (32-8*SplatSize);
1144 if (SextVal >= -16 && SextVal <= 15)
1145 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG);
1148 // Two instruction sequences.
1150 // If this value is in the range [-32,30] and is even, use:
1151 // tmp = VSPLTI[bhw], result = add tmp, tmp
1152 if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) {
1153 Op = BuildSplatI(SextVal >> 1, SplatSize, Op.getValueType(), DAG);
1154 return DAG.getNode(ISD::ADD, Op.getValueType(), Op, Op);
1157 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is
1158 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important
1160 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
1161 // Make -1 and vspltisw -1:
1162 SDOperand OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG);
1164 // Make the VSLW intrinsic, computing 0x8000_0000.
1165 SDOperand Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
1168 // xor by OnesV to invert it.
1169 Res = DAG.getNode(ISD::XOR, MVT::v4i32, Res, OnesV);
1170 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
1173 // Check to see if this is a wide variety of vsplti*, binop self cases.
1174 unsigned SplatBitSize = SplatSize*8;
1175 static const char SplatCsts[] = {
1176 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
1177 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
1179 for (unsigned idx = 0; idx < sizeof(SplatCsts)/sizeof(SplatCsts[0]); ++idx){
1180 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
1181 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1'
1182 int i = SplatCsts[idx];
1184 // Figure out what shift amount will be used by altivec if shifted by i in
1186 unsigned TypeShiftAmt = i & (SplatBitSize-1);
1188 // vsplti + shl self.
1189 if (SextVal == (i << (int)TypeShiftAmt)) {
1190 Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG);
1191 static const unsigned IIDs[] = { // Intrinsic to use for each size.
1192 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
1193 Intrinsic::ppc_altivec_vslw
1195 return BuildIntrinsicOp(IIDs[SplatSize-1], Op, Op, DAG);
1198 // vsplti + srl self.
1199 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
1200 Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG);
1201 static const unsigned IIDs[] = { // Intrinsic to use for each size.
1202 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
1203 Intrinsic::ppc_altivec_vsrw
1205 return BuildIntrinsicOp(IIDs[SplatSize-1], Op, Op, DAG);
1208 // vsplti + sra self.
1209 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
1210 Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG);
1211 static const unsigned IIDs[] = { // Intrinsic to use for each size.
1212 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
1213 Intrinsic::ppc_altivec_vsraw
1215 return BuildIntrinsicOp(IIDs[SplatSize-1], Op, Op, DAG);
1218 // vsplti + rol self.
1219 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
1220 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
1221 Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG);
1222 static const unsigned IIDs[] = { // Intrinsic to use for each size.
1223 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
1224 Intrinsic::ppc_altivec_vrlw
1226 return BuildIntrinsicOp(IIDs[SplatSize-1], Op, Op, DAG);
1229 // t = vsplti c, result = vsldoi t, t, 1
1230 if (SextVal == ((i << 8) | (i >> (TypeShiftAmt-8)))) {
1231 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG);
1232 return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG);
1234 // t = vsplti c, result = vsldoi t, t, 2
1235 if (SextVal == ((i << 16) | (i >> (TypeShiftAmt-16)))) {
1236 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG);
1237 return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG);
1239 // t = vsplti c, result = vsldoi t, t, 3
1240 if (SextVal == ((i << 24) | (i >> (TypeShiftAmt-24)))) {
1241 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG);
1242 return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG);
1246 // Three instruction sequences.
1248 // Odd, in range [17,31]: (vsplti C)-(vsplti -16).
1249 if (SextVal >= 0 && SextVal <= 31) {
1250 SDOperand LHS = BuildSplatI(SextVal-16, SplatSize, Op.getValueType(),DAG);
1251 SDOperand RHS = BuildSplatI(-16, SplatSize, Op.getValueType(), DAG);
1252 return DAG.getNode(ISD::SUB, Op.getValueType(), LHS, RHS);
1254 // Odd, in range [-31,-17]: (vsplti C)+(vsplti -16).
1255 if (SextVal >= -31 && SextVal <= 0) {
1256 SDOperand LHS = BuildSplatI(SextVal+16, SplatSize, Op.getValueType(),DAG);
1257 SDOperand RHS = BuildSplatI(-16, SplatSize, Op.getValueType(), DAG);
1258 return DAG.getNode(ISD::ADD, Op.getValueType(), LHS, RHS);
1265 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
1266 /// the specified operations to build the shuffle.
1267 static SDOperand GeneratePerfectShuffle(unsigned PFEntry, SDOperand LHS,
1268 SDOperand RHS, SelectionDAG &DAG) {
1269 unsigned OpNum = (PFEntry >> 26) & 0x0F;
1270 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
1271 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
1274 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
1286 if (OpNum == OP_COPY) {
1287 if (LHSID == (1*9+2)*9+3) return LHS;
1288 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
1292 SDOperand OpLHS, OpRHS;
1293 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG);
1294 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG);
1296 unsigned ShufIdxs[16];
1298 default: assert(0 && "Unknown i32 permute!");
1300 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3;
1301 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
1302 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7;
1303 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
1306 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
1307 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
1308 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
1309 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
1312 for (unsigned i = 0; i != 16; ++i)
1313 ShufIdxs[i] = (i&3)+0;
1316 for (unsigned i = 0; i != 16; ++i)
1317 ShufIdxs[i] = (i&3)+4;
1320 for (unsigned i = 0; i != 16; ++i)
1321 ShufIdxs[i] = (i&3)+8;
1324 for (unsigned i = 0; i != 16; ++i)
1325 ShufIdxs[i] = (i&3)+12;
1328 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG);
1330 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG);
1332 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG);
1334 std::vector<SDOperand> Ops;
1335 for (unsigned i = 0; i != 16; ++i)
1336 Ops.push_back(DAG.getConstant(ShufIdxs[i], MVT::i32));
1338 return DAG.getNode(ISD::VECTOR_SHUFFLE, OpLHS.getValueType(), OpLHS, OpRHS,
1339 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops));
1342 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this
1343 /// is a shuffle we can handle in a single instruction, return it. Otherwise,
1344 /// return the code it can be lowered into. Worst case, it can always be
1345 /// lowered into a vperm.
1346 static SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) {
1347 SDOperand V1 = Op.getOperand(0);
1348 SDOperand V2 = Op.getOperand(1);
1349 SDOperand PermMask = Op.getOperand(2);
1351 // Cases that are handled by instructions that take permute immediates
1352 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
1353 // selected by the instruction selector.
1354 if (V2.getOpcode() == ISD::UNDEF) {
1355 if (PPC::isSplatShuffleMask(PermMask.Val, 1) ||
1356 PPC::isSplatShuffleMask(PermMask.Val, 2) ||
1357 PPC::isSplatShuffleMask(PermMask.Val, 4) ||
1358 PPC::isVPKUWUMShuffleMask(PermMask.Val, true) ||
1359 PPC::isVPKUHUMShuffleMask(PermMask.Val, true) ||
1360 PPC::isVSLDOIShuffleMask(PermMask.Val, true) != -1 ||
1361 PPC::isVMRGLShuffleMask(PermMask.Val, 1, true) ||
1362 PPC::isVMRGLShuffleMask(PermMask.Val, 2, true) ||
1363 PPC::isVMRGLShuffleMask(PermMask.Val, 4, true) ||
1364 PPC::isVMRGHShuffleMask(PermMask.Val, 1, true) ||
1365 PPC::isVMRGHShuffleMask(PermMask.Val, 2, true) ||
1366 PPC::isVMRGHShuffleMask(PermMask.Val, 4, true)) {
1371 // Altivec has a variety of "shuffle immediates" that take two vector inputs
1372 // and produce a fixed permutation. If any of these match, do not lower to
1374 if (PPC::isVPKUWUMShuffleMask(PermMask.Val, false) ||
1375 PPC::isVPKUHUMShuffleMask(PermMask.Val, false) ||
1376 PPC::isVSLDOIShuffleMask(PermMask.Val, false) != -1 ||
1377 PPC::isVMRGLShuffleMask(PermMask.Val, 1, false) ||
1378 PPC::isVMRGLShuffleMask(PermMask.Val, 2, false) ||
1379 PPC::isVMRGLShuffleMask(PermMask.Val, 4, false) ||
1380 PPC::isVMRGHShuffleMask(PermMask.Val, 1, false) ||
1381 PPC::isVMRGHShuffleMask(PermMask.Val, 2, false) ||
1382 PPC::isVMRGHShuffleMask(PermMask.Val, 4, false))
1385 // Check to see if this is a shuffle of 4-byte values. If so, we can use our
1386 // perfect shuffle table to emit an optimal matching sequence.
1387 unsigned PFIndexes[4];
1388 bool isFourElementShuffle = true;
1389 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
1390 unsigned EltNo = 8; // Start out undef.
1391 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte.
1392 if (PermMask.getOperand(i*4+j).getOpcode() == ISD::UNDEF)
1393 continue; // Undef, ignore it.
1395 unsigned ByteSource =
1396 cast<ConstantSDNode>(PermMask.getOperand(i*4+j))->getValue();
1397 if ((ByteSource & 3) != j) {
1398 isFourElementShuffle = false;
1403 EltNo = ByteSource/4;
1404 } else if (EltNo != ByteSource/4) {
1405 isFourElementShuffle = false;
1409 PFIndexes[i] = EltNo;
1412 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
1413 // perfect shuffle vector to determine if it is cost effective to do this as
1414 // discrete instructions, or whether we should use a vperm.
1415 if (isFourElementShuffle) {
1416 // Compute the index in the perfect shuffle table.
1417 unsigned PFTableIndex =
1418 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
1420 unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
1421 unsigned Cost = (PFEntry >> 30);
1423 // Determining when to avoid vperm is tricky. Many things affect the cost
1424 // of vperm, particularly how many times the perm mask needs to be computed.
1425 // For example, if the perm mask can be hoisted out of a loop or is already
1426 // used (perhaps because there are multiple permutes with the same shuffle
1427 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of
1428 // the loop requires an extra register.
1430 // As a compromise, we only emit discrete instructions if the shuffle can be
1431 // generated in 3 or fewer operations. When we have loop information
1432 // available, if this block is within a loop, we should avoid using vperm
1433 // for 3-operation perms and use a constant pool load instead.
1435 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG);
1438 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
1439 // vector that will get spilled to the constant pool.
1440 if (V2.getOpcode() == ISD::UNDEF) V2 = V1;
1442 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
1443 // that it is in input element units, not in bytes. Convert now.
1444 MVT::ValueType EltVT = MVT::getVectorBaseType(V1.getValueType());
1445 unsigned BytesPerElement = MVT::getSizeInBits(EltVT)/8;
1447 std::vector<SDOperand> ResultMask;
1448 for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) {
1450 if (PermMask.getOperand(i).getOpcode() == ISD::UNDEF)
1453 SrcElt = cast<ConstantSDNode>(PermMask.getOperand(i))->getValue();
1455 for (unsigned j = 0; j != BytesPerElement; ++j)
1456 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,
1460 SDOperand VPermMask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, ResultMask);
1461 return DAG.getNode(PPCISD::VPERM, V1.getValueType(), V1, V2, VPermMask);
1464 /// getAltivecCompareInfo - Given an intrinsic, return false if it is not an
1465 /// altivec comparison. If it is, return true and fill in Opc/isDot with
1466 /// information about the intrinsic.
1467 static bool getAltivecCompareInfo(SDOperand Intrin, int &CompareOpc,
1469 unsigned IntrinsicID = cast<ConstantSDNode>(Intrin.getOperand(0))->getValue();
1472 switch (IntrinsicID) {
1473 default: return false;
1474 // Comparison predicates.
1475 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break;
1476 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break;
1477 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break;
1478 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break;
1479 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break;
1480 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break;
1481 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break;
1482 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break;
1483 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break;
1484 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break;
1485 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break;
1486 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break;
1487 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break;
1489 // Normal Comparisons.
1490 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break;
1491 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break;
1492 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break;
1493 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break;
1494 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break;
1495 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break;
1496 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break;
1497 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break;
1498 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break;
1499 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break;
1500 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break;
1501 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break;
1502 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break;
1507 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
1508 /// lower, do it, otherwise return null.
1509 static SDOperand LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) {
1510 // If this is a lowered altivec predicate compare, CompareOpc is set to the
1511 // opcode number of the comparison.
1514 if (!getAltivecCompareInfo(Op, CompareOpc, isDot))
1515 return SDOperand(); // Don't custom lower most intrinsics.
1517 // If this is a non-dot comparison, make the VCMP node and we are done.
1519 SDOperand Tmp = DAG.getNode(PPCISD::VCMP, Op.getOperand(2).getValueType(),
1520 Op.getOperand(1), Op.getOperand(2),
1521 DAG.getConstant(CompareOpc, MVT::i32));
1522 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Tmp);
1525 // Create the PPCISD altivec 'dot' comparison node.
1526 std::vector<SDOperand> Ops;
1527 std::vector<MVT::ValueType> VTs;
1528 Ops.push_back(Op.getOperand(2)); // LHS
1529 Ops.push_back(Op.getOperand(3)); // RHS
1530 Ops.push_back(DAG.getConstant(CompareOpc, MVT::i32));
1531 VTs.push_back(Op.getOperand(2).getValueType());
1532 VTs.push_back(MVT::Flag);
1533 SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops);
1535 // Now that we have the comparison, emit a copy from the CR to a GPR.
1536 // This is flagged to the above dot comparison.
1537 SDOperand Flags = DAG.getNode(PPCISD::MFCR, MVT::i32,
1538 DAG.getRegister(PPC::CR6, MVT::i32),
1539 CompNode.getValue(1));
1541 // Unpack the result based on how the target uses it.
1542 unsigned BitNo; // Bit # of CR6.
1543 bool InvertBit; // Invert result?
1544 switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) {
1545 default: // Can't happen, don't crash on invalid number though.
1546 case 0: // Return the value of the EQ bit of CR6.
1547 BitNo = 0; InvertBit = false;
1549 case 1: // Return the inverted value of the EQ bit of CR6.
1550 BitNo = 0; InvertBit = true;
1552 case 2: // Return the value of the LT bit of CR6.
1553 BitNo = 2; InvertBit = false;
1555 case 3: // Return the inverted value of the LT bit of CR6.
1556 BitNo = 2; InvertBit = true;
1560 // Shift the bit into the low position.
1561 Flags = DAG.getNode(ISD::SRL, MVT::i32, Flags,
1562 DAG.getConstant(8-(3-BitNo), MVT::i32));
1564 Flags = DAG.getNode(ISD::AND, MVT::i32, Flags,
1565 DAG.getConstant(1, MVT::i32));
1567 // If we are supposed to, toggle the bit.
1569 Flags = DAG.getNode(ISD::XOR, MVT::i32, Flags,
1570 DAG.getConstant(1, MVT::i32));
1574 static SDOperand LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) {
1575 // Create a stack slot that is 16-byte aligned.
1576 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
1577 int FrameIdx = FrameInfo->CreateStackObject(16, 16);
1578 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, MVT::i32);
1580 // Store the input value into Value#0 of the stack slot.
1581 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, DAG.getEntryNode(),
1582 Op.getOperand(0), FIdx,DAG.getSrcValue(NULL));
1584 return DAG.getLoad(Op.getValueType(), Store, FIdx, DAG.getSrcValue(NULL));
1587 static SDOperand LowerMUL(SDOperand Op, SelectionDAG &DAG) {
1588 if (Op.getValueType() == MVT::v4i32) {
1589 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
1591 SDOperand Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG);
1592 SDOperand Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG); // +16 as shift amt.
1594 SDOperand RHSSwap = // = vrlw RHS, 16
1595 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG);
1597 // Shrinkify inputs to v8i16.
1598 LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, LHS);
1599 RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHS);
1600 RHSSwap = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHSSwap);
1602 // Low parts multiplied together, generating 32-bit results (we ignore the
1604 SDOperand LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
1605 LHS, RHS, DAG, MVT::v4i32);
1607 SDOperand HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
1608 LHS, RHSSwap, Zero, DAG, MVT::v4i32);
1609 // Shift the high parts up 16 bits.
1610 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, Neg16, DAG);
1611 return DAG.getNode(ISD::ADD, MVT::v4i32, LoProd, HiProd);
1612 } else if (Op.getValueType() == MVT::v8i16) {
1613 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
1615 SDOperand Zero = BuildSplatI(0, 1, MVT::v8i16, DAG);
1617 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm,
1618 LHS, RHS, Zero, DAG);
1619 } else if (Op.getValueType() == MVT::v16i8) {
1620 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
1622 // Multiply the even 8-bit parts, producing 16-bit sums.
1623 SDOperand EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
1624 LHS, RHS, DAG, MVT::v8i16);
1625 EvenParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, EvenParts);
1627 // Multiply the odd 8-bit parts, producing 16-bit sums.
1628 SDOperand OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
1629 LHS, RHS, DAG, MVT::v8i16);
1630 OddParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, OddParts);
1632 // Merge the results together.
1633 std::vector<SDOperand> Ops;
1634 for (unsigned i = 0; i != 8; ++i) {
1635 Ops.push_back(DAG.getConstant(2*i+1, MVT::i8));
1636 Ops.push_back(DAG.getConstant(2*i+1+16, MVT::i8));
1639 return DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, EvenParts, OddParts,
1640 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops));
1642 assert(0 && "Unknown mul to lower!");
1647 /// LowerOperation - Provide custom lowering hooks for some operations.
1649 SDOperand PPCTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
1650 switch (Op.getOpcode()) {
1651 default: assert(0 && "Wasn't expecting to be able to lower this!");
1652 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
1653 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
1654 case ISD::SETCC: return LowerSETCC(Op, DAG);
1655 case ISD::VASTART: return LowerVASTART(Op, DAG, VarArgsFrameIndex);
1656 case ISD::RET: return LowerRET(Op, DAG);
1658 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
1659 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
1660 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
1662 // Lower 64-bit shifts.
1663 case ISD::SHL: return LowerSHL(Op, DAG);
1664 case ISD::SRL: return LowerSRL(Op, DAG);
1665 case ISD::SRA: return LowerSRA(Op, DAG);
1667 // Vector-related lowering.
1668 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
1669 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
1670 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
1671 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
1672 case ISD::MUL: return LowerMUL(Op, DAG);
1677 //===----------------------------------------------------------------------===//
1678 // Other Lowering Code
1679 //===----------------------------------------------------------------------===//
1681 std::vector<SDOperand>
1682 PPCTargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
1684 // add beautiful description of PPC stack frame format, or at least some docs
1686 MachineFunction &MF = DAG.getMachineFunction();
1687 MachineFrameInfo *MFI = MF.getFrameInfo();
1688 MachineBasicBlock& BB = MF.front();
1689 SSARegMap *RegMap = MF.getSSARegMap();
1690 std::vector<SDOperand> ArgValues;
1692 unsigned ArgOffset = 24;
1693 unsigned GPR_remaining = 8;
1694 unsigned FPR_remaining = 13;
1695 unsigned GPR_idx = 0, FPR_idx = 0;
1696 static const unsigned GPR[] = {
1697 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
1698 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
1700 static const unsigned FPR[] = {
1701 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
1702 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13
1705 // Add DAG nodes to load the arguments... On entry to a function on PPC,
1706 // the arguments start at offset 24, although they are likely to be passed
1708 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
1709 SDOperand newroot, argt;
1711 bool needsLoad = false;
1712 bool ArgLive = !I->use_empty();
1713 MVT::ValueType ObjectVT = getValueType(I->getType());
1716 default: assert(0 && "Unhandled argument type!");
1722 if (!ArgLive) break;
1723 if (GPR_remaining > 0) {
1724 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
1725 MF.addLiveIn(GPR[GPR_idx], VReg);
1726 argt = newroot = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32);
1727 if (ObjectVT != MVT::i32) {
1728 unsigned AssertOp = I->getType()->isSigned() ? ISD::AssertSext
1730 argt = DAG.getNode(AssertOp, MVT::i32, argt,
1731 DAG.getValueType(ObjectVT));
1732 argt = DAG.getNode(ISD::TRUNCATE, ObjectVT, argt);
1740 if (!ArgLive) break;
1741 if (GPR_remaining > 0) {
1742 SDOperand argHi, argLo;
1743 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
1744 MF.addLiveIn(GPR[GPR_idx], VReg);
1745 argHi = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32);
1746 // If we have two or more remaining argument registers, then both halves
1747 // of the i64 can be sourced from there. Otherwise, the lower half will
1748 // have to come off the stack. This can happen when an i64 is preceded
1749 // by 28 bytes of arguments.
1750 if (GPR_remaining > 1) {
1751 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
1752 MF.addLiveIn(GPR[GPR_idx+1], VReg);
1753 argLo = DAG.getCopyFromReg(argHi, VReg, MVT::i32);
1755 int FI = MFI->CreateFixedObject(4, ArgOffset+4);
1756 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
1757 argLo = DAG.getLoad(MVT::i32, DAG.getEntryNode(), FIN,
1758 DAG.getSrcValue(NULL));
1760 // Build the outgoing arg thingy
1761 argt = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, argLo, argHi);
1769 ObjSize = (ObjectVT == MVT::f64) ? 8 : 4;
1771 if (FPR_remaining > 0) {
1777 if (FPR_remaining > 0) {
1779 if (ObjectVT == MVT::f32)
1780 VReg = RegMap->createVirtualRegister(&PPC::F4RCRegClass);
1782 VReg = RegMap->createVirtualRegister(&PPC::F8RCRegClass);
1783 MF.addLiveIn(FPR[FPR_idx], VReg);
1784 argt = newroot = DAG.getCopyFromReg(DAG.getRoot(), VReg, ObjectVT);
1793 // We need to load the argument to a virtual register if we determined above
1794 // that we ran out of physical registers of the appropriate type
1796 unsigned SubregOffset = 0;
1797 if (ObjectVT == MVT::i8 || ObjectVT == MVT::i1) SubregOffset = 3;
1798 if (ObjectVT == MVT::i16) SubregOffset = 2;
1799 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
1800 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
1801 FIN = DAG.getNode(ISD::ADD, MVT::i32, FIN,
1802 DAG.getConstant(SubregOffset, MVT::i32));
1803 argt = newroot = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN,
1804 DAG.getSrcValue(NULL));
1807 // Every 4 bytes of argument space consumes one of the GPRs available for
1808 // argument passing.
1809 if (GPR_remaining > 0) {
1810 unsigned delta = (GPR_remaining > 1 && ObjSize == 8) ? 2 : 1;
1811 GPR_remaining -= delta;
1814 ArgOffset += ObjSize;
1816 DAG.setRoot(newroot.getValue(1));
1818 ArgValues.push_back(argt);
1821 // If the function takes variable number of arguments, make a frame index for
1822 // the start of the first vararg value... for expansion of llvm.va_start.
1824 VarArgsFrameIndex = MFI->CreateFixedObject(4, ArgOffset);
1825 SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32);
1826 // If this function is vararg, store any remaining integer argument regs
1827 // to their spots on the stack so that they may be loaded by deferencing the
1828 // result of va_next.
1829 std::vector<SDOperand> MemOps;
1830 for (; GPR_remaining > 0; --GPR_remaining, ++GPR_idx) {
1831 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
1832 MF.addLiveIn(GPR[GPR_idx], VReg);
1833 SDOperand Val = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32);
1834 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, Val.getValue(1),
1835 Val, FIN, DAG.getSrcValue(NULL));
1836 MemOps.push_back(Store);
1837 // Increment the address by four for the next argument to store
1838 SDOperand PtrOff = DAG.getConstant(4, getPointerTy());
1839 FIN = DAG.getNode(ISD::ADD, MVT::i32, FIN, PtrOff);
1841 if (!MemOps.empty()) {
1842 MemOps.push_back(DAG.getRoot());
1843 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, MemOps));
1850 std::pair<SDOperand, SDOperand>
1851 PPCTargetLowering::LowerCallTo(SDOperand Chain,
1852 const Type *RetTy, bool isVarArg,
1853 unsigned CallingConv, bool isTailCall,
1854 SDOperand Callee, ArgListTy &Args,
1855 SelectionDAG &DAG) {
1856 // args_to_use will accumulate outgoing args for the PPCISD::CALL case in
1857 // SelectExpr to use to put the arguments in the appropriate registers.
1858 std::vector<SDOperand> args_to_use;
1860 // Count how many bytes are to be pushed on the stack, including the linkage
1861 // area, and parameter passing area.
1862 unsigned NumBytes = 24;
1865 Chain = DAG.getCALLSEQ_START(Chain,
1866 DAG.getConstant(NumBytes, getPointerTy()));
1868 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
1869 switch (getValueType(Args[i].second)) {
1870 default: assert(0 && "Unknown value type!");
1885 // Just to be safe, we'll always reserve the full 24 bytes of linkage area
1886 // plus 32 bytes of argument space in case any called code gets funky on us.
1887 // (Required by ABI to support var arg)
1888 if (NumBytes < 56) NumBytes = 56;
1890 // Adjust the stack pointer for the new arguments...
1891 // These operations are automatically eliminated by the prolog/epilog pass
1892 Chain = DAG.getCALLSEQ_START(Chain,
1893 DAG.getConstant(NumBytes, getPointerTy()));
1895 // Set up a copy of the stack pointer for use loading and storing any
1896 // arguments that may not fit in the registers available for argument
1898 SDOperand StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
1900 // Figure out which arguments are going to go in registers, and which in
1901 // memory. Also, if this is a vararg function, floating point operations
1902 // must be stored to our stack, and loaded into integer regs as well, if
1903 // any integer regs are available for argument passing.
1904 unsigned ArgOffset = 24;
1905 unsigned GPR_remaining = 8;
1906 unsigned FPR_remaining = 13;
1908 std::vector<SDOperand> MemOps;
1909 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
1910 // PtrOff will be used to store the current argument to the stack if a
1911 // register cannot be found for it.
1912 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
1913 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
1914 MVT::ValueType ArgVT = getValueType(Args[i].second);
1917 default: assert(0 && "Unexpected ValueType for argument!");
1921 // Promote the integer to 32 bits. If the input type is signed use a
1922 // sign extend, otherwise use a zero extend.
1923 if (Args[i].second->isSigned())
1924 Args[i].first =DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, Args[i].first);
1926 Args[i].first =DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Args[i].first);
1929 if (GPR_remaining > 0) {
1930 args_to_use.push_back(Args[i].first);
1933 MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
1934 Args[i].first, PtrOff,
1935 DAG.getSrcValue(NULL)));
1940 // If we have one free GPR left, we can place the upper half of the i64
1941 // in it, and store the other half to the stack. If we have two or more
1942 // free GPRs, then we can pass both halves of the i64 in registers.
1943 if (GPR_remaining > 0) {
1944 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
1945 Args[i].first, DAG.getConstant(1, MVT::i32));
1946 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
1947 Args[i].first, DAG.getConstant(0, MVT::i32));
1948 args_to_use.push_back(Hi);
1950 if (GPR_remaining > 0) {
1951 args_to_use.push_back(Lo);
1954 SDOperand ConstFour = DAG.getConstant(4, getPointerTy());
1955 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, PtrOff, ConstFour);
1956 MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
1957 Lo, PtrOff, DAG.getSrcValue(NULL)));
1960 MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
1961 Args[i].first, PtrOff,
1962 DAG.getSrcValue(NULL)));
1968 if (FPR_remaining > 0) {
1969 args_to_use.push_back(Args[i].first);
1972 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, Chain,
1973 Args[i].first, PtrOff,
1974 DAG.getSrcValue(NULL));
1975 MemOps.push_back(Store);
1976 // Float varargs are always shadowed in available integer registers
1977 if (GPR_remaining > 0) {
1978 SDOperand Load = DAG.getLoad(MVT::i32, Store, PtrOff,
1979 DAG.getSrcValue(NULL));
1980 MemOps.push_back(Load.getValue(1));
1981 args_to_use.push_back(Load);
1984 if (GPR_remaining > 0 && MVT::f64 == ArgVT) {
1985 SDOperand ConstFour = DAG.getConstant(4, getPointerTy());
1986 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, PtrOff, ConstFour);
1987 SDOperand Load = DAG.getLoad(MVT::i32, Store, PtrOff,
1988 DAG.getSrcValue(NULL));
1989 MemOps.push_back(Load.getValue(1));
1990 args_to_use.push_back(Load);
1994 // If we have any FPRs remaining, we may also have GPRs remaining.
1995 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
1997 if (GPR_remaining > 0) {
1998 args_to_use.push_back(DAG.getNode(ISD::UNDEF, MVT::i32));
2001 if (GPR_remaining > 0 && MVT::f64 == ArgVT) {
2002 args_to_use.push_back(DAG.getNode(ISD::UNDEF, MVT::i32));
2007 MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
2008 Args[i].first, PtrOff,
2009 DAG.getSrcValue(NULL)));
2011 ArgOffset += (ArgVT == MVT::f32) ? 4 : 8;
2015 if (!MemOps.empty())
2016 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, MemOps);
2019 std::vector<MVT::ValueType> RetVals;
2020 MVT::ValueType RetTyVT = getValueType(RetTy);
2021 MVT::ValueType ActualRetTyVT = RetTyVT;
2022 if (RetTyVT >= MVT::i1 && RetTyVT <= MVT::i16)
2023 ActualRetTyVT = MVT::i32; // Promote result to i32.
2025 if (RetTyVT == MVT::i64) {
2026 RetVals.push_back(MVT::i32);
2027 RetVals.push_back(MVT::i32);
2028 } else if (RetTyVT != MVT::isVoid) {
2029 RetVals.push_back(ActualRetTyVT);
2031 RetVals.push_back(MVT::Other);
2033 // If the callee is a GlobalAddress node (quite common, every direct call is)
2034 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
2035 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
2036 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i32);
2038 std::vector<SDOperand> Ops;
2039 Ops.push_back(Chain);
2040 Ops.push_back(Callee);
2041 Ops.insert(Ops.end(), args_to_use.begin(), args_to_use.end());
2042 SDOperand TheCall = DAG.getNode(PPCISD::CALL, RetVals, Ops);
2043 Chain = TheCall.getValue(TheCall.Val->getNumValues()-1);
2044 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, Chain,
2045 DAG.getConstant(NumBytes, getPointerTy()));
2046 SDOperand RetVal = TheCall;
2048 // If the result is a small value, add a note so that we keep track of the
2049 // information about whether it is sign or zero extended.
2050 if (RetTyVT != ActualRetTyVT) {
2051 RetVal = DAG.getNode(RetTy->isSigned() ? ISD::AssertSext : ISD::AssertZext,
2052 MVT::i32, RetVal, DAG.getValueType(RetTyVT));
2053 RetVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, RetVal);
2054 } else if (RetTyVT == MVT::i64) {
2055 RetVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, RetVal, RetVal.getValue(1));
2058 return std::make_pair(RetVal, Chain);
2062 PPCTargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
2063 MachineBasicBlock *BB) {
2064 assert((MI->getOpcode() == PPC::SELECT_CC_Int ||
2065 MI->getOpcode() == PPC::SELECT_CC_F4 ||
2066 MI->getOpcode() == PPC::SELECT_CC_F8 ||
2067 MI->getOpcode() == PPC::SELECT_CC_VRRC) &&
2068 "Unexpected instr type to insert");
2070 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
2071 // control-flow pattern. The incoming instruction knows the destination vreg
2072 // to set, the condition code register to branch on, the true/false values to
2073 // select between, and a branch opcode to use.
2074 const BasicBlock *LLVM_BB = BB->getBasicBlock();
2075 ilist<MachineBasicBlock>::iterator It = BB;
2081 // cmpTY ccX, r1, r2
2083 // fallthrough --> copy0MBB
2084 MachineBasicBlock *thisMBB = BB;
2085 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
2086 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
2087 BuildMI(BB, MI->getOperand(4).getImmedValue(), 2)
2088 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
2089 MachineFunction *F = BB->getParent();
2090 F->getBasicBlockList().insert(It, copy0MBB);
2091 F->getBasicBlockList().insert(It, sinkMBB);
2092 // Update machine-CFG edges by first adding all successors of the current
2093 // block to the new block which will contain the Phi node for the select.
2094 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(),
2095 e = BB->succ_end(); i != e; ++i)
2096 sinkMBB->addSuccessor(*i);
2097 // Next, remove all successors of the current block, and add the true
2098 // and fallthrough blocks as its successors.
2099 while(!BB->succ_empty())
2100 BB->removeSuccessor(BB->succ_begin());
2101 BB->addSuccessor(copy0MBB);
2102 BB->addSuccessor(sinkMBB);
2105 // %FalseValue = ...
2106 // # fallthrough to sinkMBB
2109 // Update machine-CFG edges
2110 BB->addSuccessor(sinkMBB);
2113 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
2116 BuildMI(BB, PPC::PHI, 4, MI->getOperand(0).getReg())
2117 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
2118 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
2120 delete MI; // The pseudo instruction is gone now.
2124 //===----------------------------------------------------------------------===//
2125 // Target Optimization Hooks
2126 //===----------------------------------------------------------------------===//
2128 SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N,
2129 DAGCombinerInfo &DCI) const {
2130 TargetMachine &TM = getTargetMachine();
2131 SelectionDAG &DAG = DCI.DAG;
2132 switch (N->getOpcode()) {
2134 case ISD::SINT_TO_FP:
2135 if (TM.getSubtarget<PPCSubtarget>().is64Bit()) {
2136 if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) {
2137 // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores.
2138 // We allow the src/dst to be either f32/f64, but the intermediate
2139 // type must be i64.
2140 if (N->getOperand(0).getValueType() == MVT::i64) {
2141 SDOperand Val = N->getOperand(0).getOperand(0);
2142 if (Val.getValueType() == MVT::f32) {
2143 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val);
2144 DCI.AddToWorklist(Val.Val);
2147 Val = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Val);
2148 DCI.AddToWorklist(Val.Val);
2149 Val = DAG.getNode(PPCISD::FCFID, MVT::f64, Val);
2150 DCI.AddToWorklist(Val.Val);
2151 if (N->getValueType(0) == MVT::f32) {
2152 Val = DAG.getNode(ISD::FP_ROUND, MVT::f32, Val);
2153 DCI.AddToWorklist(Val.Val);
2156 } else if (N->getOperand(0).getValueType() == MVT::i32) {
2157 // If the intermediate type is i32, we can avoid the load/store here
2164 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)).
2165 if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() &&
2166 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT &&
2167 N->getOperand(1).getValueType() == MVT::i32) {
2168 SDOperand Val = N->getOperand(1).getOperand(0);
2169 if (Val.getValueType() == MVT::f32) {
2170 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val);
2171 DCI.AddToWorklist(Val.Val);
2173 Val = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Val);
2174 DCI.AddToWorklist(Val.Val);
2176 Val = DAG.getNode(PPCISD::STFIWX, MVT::Other, N->getOperand(0), Val,
2177 N->getOperand(2), N->getOperand(3));
2178 DCI.AddToWorklist(Val.Val);
2182 case PPCISD::VCMP: {
2183 // If a VCMPo node already exists with exactly the same operands as this
2184 // node, use its result instead of this node (VCMPo computes both a CR6 and
2185 // a normal output).
2187 if (!N->getOperand(0).hasOneUse() &&
2188 !N->getOperand(1).hasOneUse() &&
2189 !N->getOperand(2).hasOneUse()) {
2191 // Scan all of the users of the LHS, looking for VCMPo's that match.
2192 SDNode *VCMPoNode = 0;
2194 SDNode *LHSN = N->getOperand(0).Val;
2195 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
2197 if ((*UI)->getOpcode() == PPCISD::VCMPo &&
2198 (*UI)->getOperand(1) == N->getOperand(1) &&
2199 (*UI)->getOperand(2) == N->getOperand(2) &&
2200 (*UI)->getOperand(0) == N->getOperand(0)) {
2205 // If there are non-zero uses of the flag value, use the VCMPo node!
2206 if (VCMPoNode && !VCMPoNode->hasNUsesOfValue(0, 1))
2207 return SDOperand(VCMPoNode, 0);
2212 // If this is a branch on an altivec predicate comparison, lower this so
2213 // that we don't have to do a MFCR: instead, branch directly on CR6. This
2214 // lowering is done pre-legalize, because the legalizer lowers the predicate
2215 // compare down to code that is difficult to reassemble.
2216 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
2217 SDOperand LHS = N->getOperand(2), RHS = N->getOperand(3);
2221 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
2222 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
2223 getAltivecCompareInfo(LHS, CompareOpc, isDot)) {
2224 assert(isDot && "Can't compare against a vector result!");
2226 // If this is a comparison against something other than 0/1, then we know
2227 // that the condition is never/always true.
2228 unsigned Val = cast<ConstantSDNode>(RHS)->getValue();
2229 if (Val != 0 && Val != 1) {
2230 if (CC == ISD::SETEQ) // Cond never true, remove branch.
2231 return N->getOperand(0);
2232 // Always !=, turn it into an unconditional branch.
2233 return DAG.getNode(ISD::BR, MVT::Other,
2234 N->getOperand(0), N->getOperand(4));
2237 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
2239 // Create the PPCISD altivec 'dot' comparison node.
2240 std::vector<SDOperand> Ops;
2241 std::vector<MVT::ValueType> VTs;
2242 Ops.push_back(LHS.getOperand(2)); // LHS of compare
2243 Ops.push_back(LHS.getOperand(3)); // RHS of compare
2244 Ops.push_back(DAG.getConstant(CompareOpc, MVT::i32));
2245 VTs.push_back(LHS.getOperand(2).getValueType());
2246 VTs.push_back(MVT::Flag);
2247 SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops);
2249 // Unpack the result based on how the target uses it.
2251 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getValue()) {
2252 default: // Can't happen, don't crash on invalid number though.
2253 case 0: // Branch on the value of the EQ bit of CR6.
2254 CompOpc = BranchOnWhenPredTrue ? PPC::BEQ : PPC::BNE;
2256 case 1: // Branch on the inverted value of the EQ bit of CR6.
2257 CompOpc = BranchOnWhenPredTrue ? PPC::BNE : PPC::BEQ;
2259 case 2: // Branch on the value of the LT bit of CR6.
2260 CompOpc = BranchOnWhenPredTrue ? PPC::BLT : PPC::BGE;
2262 case 3: // Branch on the inverted value of the LT bit of CR6.
2263 CompOpc = BranchOnWhenPredTrue ? PPC::BGE : PPC::BLT;
2267 return DAG.getNode(PPCISD::COND_BRANCH, MVT::Other, N->getOperand(0),
2268 DAG.getRegister(PPC::CR6, MVT::i32),
2269 DAG.getConstant(CompOpc, MVT::i32),
2270 N->getOperand(4), CompNode.getValue(1));
2279 //===----------------------------------------------------------------------===//
2280 // Inline Assembly Support
2281 //===----------------------------------------------------------------------===//
2283 void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
2285 uint64_t &KnownZero,
2287 unsigned Depth) const {
2290 switch (Op.getOpcode()) {
2292 case ISD::INTRINSIC_WO_CHAIN: {
2293 switch (cast<ConstantSDNode>(Op.getOperand(0))->getValue()) {
2295 case Intrinsic::ppc_altivec_vcmpbfp_p:
2296 case Intrinsic::ppc_altivec_vcmpeqfp_p:
2297 case Intrinsic::ppc_altivec_vcmpequb_p:
2298 case Intrinsic::ppc_altivec_vcmpequh_p:
2299 case Intrinsic::ppc_altivec_vcmpequw_p:
2300 case Intrinsic::ppc_altivec_vcmpgefp_p:
2301 case Intrinsic::ppc_altivec_vcmpgtfp_p:
2302 case Intrinsic::ppc_altivec_vcmpgtsb_p:
2303 case Intrinsic::ppc_altivec_vcmpgtsh_p:
2304 case Intrinsic::ppc_altivec_vcmpgtsw_p:
2305 case Intrinsic::ppc_altivec_vcmpgtub_p:
2306 case Intrinsic::ppc_altivec_vcmpgtuh_p:
2307 case Intrinsic::ppc_altivec_vcmpgtuw_p:
2308 KnownZero = ~1U; // All bits but the low one are known to be zero.
2316 /// getConstraintType - Given a constraint letter, return the type of
2317 /// constraint it is for this target.
2318 PPCTargetLowering::ConstraintType
2319 PPCTargetLowering::getConstraintType(char ConstraintLetter) const {
2320 switch (ConstraintLetter) {
2327 return C_RegisterClass;
2329 return TargetLowering::getConstraintType(ConstraintLetter);
2333 std::vector<unsigned> PPCTargetLowering::
2334 getRegClassForInlineAsmConstraint(const std::string &Constraint,
2335 MVT::ValueType VT) const {
2336 if (Constraint.size() == 1) {
2337 switch (Constraint[0]) { // GCC RS6000 Constraint Letters
2338 default: break; // Unknown constriant letter
2340 return make_vector<unsigned>(/*no R0*/ PPC::R1 , PPC::R2 , PPC::R3 ,
2341 PPC::R4 , PPC::R5 , PPC::R6 , PPC::R7 ,
2342 PPC::R8 , PPC::R9 , PPC::R10, PPC::R11,
2343 PPC::R12, PPC::R13, PPC::R14, PPC::R15,
2344 PPC::R16, PPC::R17, PPC::R18, PPC::R19,
2345 PPC::R20, PPC::R21, PPC::R22, PPC::R23,
2346 PPC::R24, PPC::R25, PPC::R26, PPC::R27,
2347 PPC::R28, PPC::R29, PPC::R30, PPC::R31,
2350 return make_vector<unsigned>(PPC::R0 , PPC::R1 , PPC::R2 , PPC::R3 ,
2351 PPC::R4 , PPC::R5 , PPC::R6 , PPC::R7 ,
2352 PPC::R8 , PPC::R9 , PPC::R10, PPC::R11,
2353 PPC::R12, PPC::R13, PPC::R14, PPC::R15,
2354 PPC::R16, PPC::R17, PPC::R18, PPC::R19,
2355 PPC::R20, PPC::R21, PPC::R22, PPC::R23,
2356 PPC::R24, PPC::R25, PPC::R26, PPC::R27,
2357 PPC::R28, PPC::R29, PPC::R30, PPC::R31,
2360 return make_vector<unsigned>(PPC::F0 , PPC::F1 , PPC::F2 , PPC::F3 ,
2361 PPC::F4 , PPC::F5 , PPC::F6 , PPC::F7 ,
2362 PPC::F8 , PPC::F9 , PPC::F10, PPC::F11,
2363 PPC::F12, PPC::F13, PPC::F14, PPC::F15,
2364 PPC::F16, PPC::F17, PPC::F18, PPC::F19,
2365 PPC::F20, PPC::F21, PPC::F22, PPC::F23,
2366 PPC::F24, PPC::F25, PPC::F26, PPC::F27,
2367 PPC::F28, PPC::F29, PPC::F30, PPC::F31,
2370 return make_vector<unsigned>(PPC::V0 , PPC::V1 , PPC::V2 , PPC::V3 ,
2371 PPC::V4 , PPC::V5 , PPC::V6 , PPC::V7 ,
2372 PPC::V8 , PPC::V9 , PPC::V10, PPC::V11,
2373 PPC::V12, PPC::V13, PPC::V14, PPC::V15,
2374 PPC::V16, PPC::V17, PPC::V18, PPC::V19,
2375 PPC::V20, PPC::V21, PPC::V22, PPC::V23,
2376 PPC::V24, PPC::V25, PPC::V26, PPC::V27,
2377 PPC::V28, PPC::V29, PPC::V30, PPC::V31,
2380 return make_vector<unsigned>(PPC::CR0, PPC::CR1, PPC::CR2, PPC::CR3,
2381 PPC::CR4, PPC::CR5, PPC::CR6, PPC::CR7,
2386 return std::vector<unsigned>();
2389 // isOperandValidForConstraint
2390 bool PPCTargetLowering::
2391 isOperandValidForConstraint(SDOperand Op, char Letter) {
2402 if (!isa<ConstantSDNode>(Op)) return false; // Must be an immediate.
2403 unsigned Value = cast<ConstantSDNode>(Op)->getValue();
2405 default: assert(0 && "Unknown constraint letter!");
2406 case 'I': // "I" is a signed 16-bit constant.
2407 return (short)Value == (int)Value;
2408 case 'J': // "J" is a constant with only the high-order 16 bits nonzero.
2409 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits.
2410 return (short)Value == 0;
2411 case 'K': // "K" is a constant with only the low-order 16 bits nonzero.
2412 return (Value >> 16) == 0;
2413 case 'M': // "M" is a constant that is greater than 31.
2415 case 'N': // "N" is a positive constant that is an exact power of two.
2416 return (int)Value > 0 && isPowerOf2_32(Value);
2417 case 'O': // "O" is the constant zero.
2419 case 'P': // "P" is a constant whose negation is a signed 16-bit constant.
2420 return (short)-Value == (int)-Value;
2426 // Handle standard constraint letters.
2427 return TargetLowering::isOperandValidForConstraint(Op, Letter);
2430 /// isLegalAddressImmediate - Return true if the integer value can be used
2431 /// as the offset of the target addressing mode.
2432 bool PPCTargetLowering::isLegalAddressImmediate(int64_t V) const {
2433 // PPC allows a sign-extended 16-bit immediate field.
2434 return (V > -(1 << 16) && V < (1 << 16)-1);