1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Chris Lattner and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the PPCISelLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "PPCISelLowering.h"
15 #include "PPCMachineFunctionInfo.h"
16 #include "PPCPredicates.h"
17 #include "PPCTargetMachine.h"
18 #include "PPCPerfectShuffle.h"
19 #include "llvm/ADT/VectorExtras.h"
20 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/SelectionDAG.h"
25 #include "llvm/CodeGen/SSARegMap.h"
26 #include "llvm/Constants.h"
27 #include "llvm/Function.h"
28 #include "llvm/Intrinsics.h"
29 #include "llvm/Support/MathExtras.h"
30 #include "llvm/Target/TargetOptions.h"
31 #include "llvm/Support/CommandLine.h"
34 static cl::opt<bool> EnablePPCPreinc("enable-ppc-preinc");
36 PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
37 : TargetLowering(TM), PPCSubTarget(*TM.getSubtargetImpl()) {
39 // Fold away setcc operations if possible.
40 setSetCCIsExpensive();
43 // Use _setjmp/_longjmp instead of setjmp/longjmp.
44 setUseUnderscoreSetJmp(true);
45 setUseUnderscoreLongJmp(true);
47 // Set up the register classes.
48 addRegisterClass(MVT::i32, PPC::GPRCRegisterClass);
49 addRegisterClass(MVT::f32, PPC::F4RCRegisterClass);
50 addRegisterClass(MVT::f64, PPC::F8RCRegisterClass);
52 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD
53 setLoadXAction(ISD::SEXTLOAD, MVT::i1, Expand);
54 setLoadXAction(ISD::SEXTLOAD, MVT::i8, Expand);
56 // PowerPC does not have truncstore for i1.
57 setStoreXAction(MVT::i1, Promote);
59 // PowerPC has pre-inc load and store's.
60 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
61 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
62 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
63 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
64 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
65 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
66 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
67 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
68 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
69 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
71 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
72 setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
74 // PowerPC has no intrinsics for these particular operations
75 setOperationAction(ISD::MEMMOVE, MVT::Other, Expand);
76 setOperationAction(ISD::MEMSET, MVT::Other, Expand);
77 setOperationAction(ISD::MEMCPY, MVT::Other, Expand);
79 // PowerPC has no SREM/UREM instructions
80 setOperationAction(ISD::SREM, MVT::i32, Expand);
81 setOperationAction(ISD::UREM, MVT::i32, Expand);
82 setOperationAction(ISD::SREM, MVT::i64, Expand);
83 setOperationAction(ISD::UREM, MVT::i64, Expand);
85 // We don't support sin/cos/sqrt/fmod
86 setOperationAction(ISD::FSIN , MVT::f64, Expand);
87 setOperationAction(ISD::FCOS , MVT::f64, Expand);
88 setOperationAction(ISD::FREM , MVT::f64, Expand);
89 setOperationAction(ISD::FSIN , MVT::f32, Expand);
90 setOperationAction(ISD::FCOS , MVT::f32, Expand);
91 setOperationAction(ISD::FREM , MVT::f32, Expand);
93 // If we're enabling GP optimizations, use hardware square root
94 if (!TM.getSubtarget<PPCSubtarget>().hasFSQRT()) {
95 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
96 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
99 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
100 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
102 // PowerPC does not have BSWAP, CTPOP or CTTZ
103 setOperationAction(ISD::BSWAP, MVT::i32 , Expand);
104 setOperationAction(ISD::CTPOP, MVT::i32 , Expand);
105 setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
106 setOperationAction(ISD::BSWAP, MVT::i64 , Expand);
107 setOperationAction(ISD::CTPOP, MVT::i64 , Expand);
108 setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
110 // PowerPC does not have ROTR
111 setOperationAction(ISD::ROTR, MVT::i32 , Expand);
113 // PowerPC does not have Select
114 setOperationAction(ISD::SELECT, MVT::i32, Expand);
115 setOperationAction(ISD::SELECT, MVT::i64, Expand);
116 setOperationAction(ISD::SELECT, MVT::f32, Expand);
117 setOperationAction(ISD::SELECT, MVT::f64, Expand);
119 // PowerPC wants to turn select_cc of FP into fsel when possible.
120 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
121 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
123 // PowerPC wants to optimize integer setcc a bit
124 setOperationAction(ISD::SETCC, MVT::i32, Custom);
126 // PowerPC does not have BRCOND which requires SetCC
127 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
129 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
131 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
132 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
134 // PowerPC does not have [U|S]INT_TO_FP
135 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
136 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
138 setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand);
139 setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand);
140 setOperationAction(ISD::BIT_CONVERT, MVT::i64, Expand);
141 setOperationAction(ISD::BIT_CONVERT, MVT::f64, Expand);
143 // We cannot sextinreg(i1). Expand to shifts.
144 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
147 // Support label based line numbers.
148 setOperationAction(ISD::LOCATION, MVT::Other, Expand);
149 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
150 // FIXME - use subtarget debug flags
151 if (!TM.getSubtarget<PPCSubtarget>().isDarwin())
152 setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand);
154 // We want to legalize GlobalAddress and ConstantPool nodes into the
155 // appropriate instructions to materialize the address.
156 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
157 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
158 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
159 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
160 setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
161 setOperationAction(ISD::JumpTable, MVT::i64, Custom);
163 // RET must be custom lowered, to meet ABI requirements
164 setOperationAction(ISD::RET , MVT::Other, Custom);
166 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
167 setOperationAction(ISD::VASTART , MVT::Other, Custom);
169 // Use the default implementation.
170 setOperationAction(ISD::VAARG , MVT::Other, Expand);
171 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
172 setOperationAction(ISD::VAEND , MVT::Other, Expand);
173 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
174 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom);
175 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
176 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom);
178 // We want to custom lower some of our intrinsics.
179 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
181 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) {
182 // They also have instructions for converting between i64 and fp.
183 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
184 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
185 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
187 // FIXME: disable this lowered code. This generates 64-bit register values,
188 // and we don't model the fact that the top part is clobbered by calls. We
189 // need to flag these together so that the value isn't live across a call.
190 //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
192 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT
193 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote);
195 // PowerPC does not have FP_TO_UINT on 32-bit implementations.
196 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
199 if (TM.getSubtarget<PPCSubtarget>().use64BitRegs()) {
200 // 64 bit PowerPC implementations can support i64 types directly
201 addRegisterClass(MVT::i64, PPC::G8RCRegisterClass);
202 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
203 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
205 // 32 bit PowerPC wants to expand i64 shifts itself.
206 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
207 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
208 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
211 if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) {
212 // First set operation action for all vector types to expand. Then we
213 // will selectively turn on ones that can be effectively codegen'd.
214 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
215 VT != (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
216 // add/sub are legal for all supported vector VT's.
217 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Legal);
218 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Legal);
220 // We promote all shuffles to v16i8.
221 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Promote);
222 AddPromotedToType (ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, MVT::v16i8);
224 // We promote all non-typed operations to v4i32.
225 setOperationAction(ISD::AND , (MVT::ValueType)VT, Promote);
226 AddPromotedToType (ISD::AND , (MVT::ValueType)VT, MVT::v4i32);
227 setOperationAction(ISD::OR , (MVT::ValueType)VT, Promote);
228 AddPromotedToType (ISD::OR , (MVT::ValueType)VT, MVT::v4i32);
229 setOperationAction(ISD::XOR , (MVT::ValueType)VT, Promote);
230 AddPromotedToType (ISD::XOR , (MVT::ValueType)VT, MVT::v4i32);
231 setOperationAction(ISD::LOAD , (MVT::ValueType)VT, Promote);
232 AddPromotedToType (ISD::LOAD , (MVT::ValueType)VT, MVT::v4i32);
233 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote);
234 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v4i32);
235 setOperationAction(ISD::STORE, (MVT::ValueType)VT, Promote);
236 AddPromotedToType (ISD::STORE, (MVT::ValueType)VT, MVT::v4i32);
238 // No other operations are legal.
239 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand);
240 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand);
241 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand);
242 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand);
243 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand);
244 setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand);
245 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
246 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
247 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Expand);
249 setOperationAction(ISD::SCALAR_TO_VECTOR, (MVT::ValueType)VT, Expand);
252 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
253 // with merges, splats, etc.
254 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
256 setOperationAction(ISD::AND , MVT::v4i32, Legal);
257 setOperationAction(ISD::OR , MVT::v4i32, Legal);
258 setOperationAction(ISD::XOR , MVT::v4i32, Legal);
259 setOperationAction(ISD::LOAD , MVT::v4i32, Legal);
260 setOperationAction(ISD::SELECT, MVT::v4i32, Expand);
261 setOperationAction(ISD::STORE , MVT::v4i32, Legal);
263 addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass);
264 addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass);
265 addRegisterClass(MVT::v8i16, PPC::VRRCRegisterClass);
266 addRegisterClass(MVT::v16i8, PPC::VRRCRegisterClass);
268 setOperationAction(ISD::MUL, MVT::v4f32, Legal);
269 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
270 setOperationAction(ISD::MUL, MVT::v8i16, Custom);
271 setOperationAction(ISD::MUL, MVT::v16i8, Custom);
273 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
274 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
276 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
277 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
278 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
279 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
282 setSetCCResultType(MVT::i32);
283 setShiftAmountType(MVT::i32);
284 setSetCCResultContents(ZeroOrOneSetCCResult);
286 if (TM.getSubtarget<PPCSubtarget>().isPPC64())
287 setStackPointerRegisterToSaveRestore(PPC::X1);
289 setStackPointerRegisterToSaveRestore(PPC::R1);
291 // We have target-specific dag combine patterns for the following nodes:
292 setTargetDAGCombine(ISD::SINT_TO_FP);
293 setTargetDAGCombine(ISD::STORE);
294 setTargetDAGCombine(ISD::BR_CC);
295 setTargetDAGCombine(ISD::BSWAP);
297 computeRegisterProperties();
300 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
303 case PPCISD::FSEL: return "PPCISD::FSEL";
304 case PPCISD::FCFID: return "PPCISD::FCFID";
305 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ";
306 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ";
307 case PPCISD::STFIWX: return "PPCISD::STFIWX";
308 case PPCISD::VMADDFP: return "PPCISD::VMADDFP";
309 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP";
310 case PPCISD::VPERM: return "PPCISD::VPERM";
311 case PPCISD::Hi: return "PPCISD::Hi";
312 case PPCISD::Lo: return "PPCISD::Lo";
313 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg";
314 case PPCISD::SRL: return "PPCISD::SRL";
315 case PPCISD::SRA: return "PPCISD::SRA";
316 case PPCISD::SHL: return "PPCISD::SHL";
317 case PPCISD::EXTSW_32: return "PPCISD::EXTSW_32";
318 case PPCISD::STD_32: return "PPCISD::STD_32";
319 case PPCISD::CALL: return "PPCISD::CALL";
320 case PPCISD::MTCTR: return "PPCISD::MTCTR";
321 case PPCISD::BCTRL: return "PPCISD::BCTRL";
322 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG";
323 case PPCISD::MFCR: return "PPCISD::MFCR";
324 case PPCISD::VCMP: return "PPCISD::VCMP";
325 case PPCISD::VCMPo: return "PPCISD::VCMPo";
326 case PPCISD::LBRX: return "PPCISD::LBRX";
327 case PPCISD::STBRX: return "PPCISD::STBRX";
328 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH";
332 //===----------------------------------------------------------------------===//
333 // Node matching predicates, for use by the tblgen matching code.
334 //===----------------------------------------------------------------------===//
336 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
337 static bool isFloatingPointZero(SDOperand Op) {
338 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
339 return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0);
340 else if (ISD::isEXTLoad(Op.Val) || ISD::isNON_EXTLoad(Op.Val)) {
341 // Maybe this has already been legalized into the constant pool?
342 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
343 if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
344 return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0);
349 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return
350 /// true if Op is undef or if it matches the specified value.
351 static bool isConstantOrUndef(SDOperand Op, unsigned Val) {
352 return Op.getOpcode() == ISD::UNDEF ||
353 cast<ConstantSDNode>(Op)->getValue() == Val;
356 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
357 /// VPKUHUM instruction.
358 bool PPC::isVPKUHUMShuffleMask(SDNode *N, bool isUnary) {
360 for (unsigned i = 0; i != 16; ++i)
361 if (!isConstantOrUndef(N->getOperand(i), i*2+1))
364 for (unsigned i = 0; i != 8; ++i)
365 if (!isConstantOrUndef(N->getOperand(i), i*2+1) ||
366 !isConstantOrUndef(N->getOperand(i+8), i*2+1))
372 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
373 /// VPKUWUM instruction.
374 bool PPC::isVPKUWUMShuffleMask(SDNode *N, bool isUnary) {
376 for (unsigned i = 0; i != 16; i += 2)
377 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) ||
378 !isConstantOrUndef(N->getOperand(i+1), i*2+3))
381 for (unsigned i = 0; i != 8; i += 2)
382 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) ||
383 !isConstantOrUndef(N->getOperand(i+1), i*2+3) ||
384 !isConstantOrUndef(N->getOperand(i+8), i*2+2) ||
385 !isConstantOrUndef(N->getOperand(i+9), i*2+3))
391 /// isVMerge - Common function, used to match vmrg* shuffles.
393 static bool isVMerge(SDNode *N, unsigned UnitSize,
394 unsigned LHSStart, unsigned RHSStart) {
395 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
396 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!");
397 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
398 "Unsupported merge size!");
400 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units
401 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit
402 if (!isConstantOrUndef(N->getOperand(i*UnitSize*2+j),
403 LHSStart+j+i*UnitSize) ||
404 !isConstantOrUndef(N->getOperand(i*UnitSize*2+UnitSize+j),
405 RHSStart+j+i*UnitSize))
411 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
412 /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
413 bool PPC::isVMRGLShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) {
415 return isVMerge(N, UnitSize, 8, 24);
416 return isVMerge(N, UnitSize, 8, 8);
419 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
420 /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
421 bool PPC::isVMRGHShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) {
423 return isVMerge(N, UnitSize, 0, 16);
424 return isVMerge(N, UnitSize, 0, 0);
428 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
429 /// amount, otherwise return -1.
430 int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) {
431 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
432 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!");
433 // Find the first non-undef value in the shuffle mask.
435 for (i = 0; i != 16 && N->getOperand(i).getOpcode() == ISD::UNDEF; ++i)
438 if (i == 16) return -1; // all undef.
440 // Otherwise, check to see if the rest of the elements are consequtively
441 // numbered from this value.
442 unsigned ShiftAmt = cast<ConstantSDNode>(N->getOperand(i))->getValue();
443 if (ShiftAmt < i) return -1;
447 // Check the rest of the elements to see if they are consequtive.
448 for (++i; i != 16; ++i)
449 if (!isConstantOrUndef(N->getOperand(i), ShiftAmt+i))
452 // Check the rest of the elements to see if they are consequtive.
453 for (++i; i != 16; ++i)
454 if (!isConstantOrUndef(N->getOperand(i), (ShiftAmt+i) & 15))
461 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
462 /// specifies a splat of a single element that is suitable for input to
463 /// VSPLTB/VSPLTH/VSPLTW.
464 bool PPC::isSplatShuffleMask(SDNode *N, unsigned EltSize) {
465 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
466 N->getNumOperands() == 16 &&
467 (EltSize == 1 || EltSize == 2 || EltSize == 4));
469 // This is a splat operation if each element of the permute is the same, and
470 // if the value doesn't reference the second vector.
471 unsigned ElementBase = 0;
472 SDOperand Elt = N->getOperand(0);
473 if (ConstantSDNode *EltV = dyn_cast<ConstantSDNode>(Elt))
474 ElementBase = EltV->getValue();
476 return false; // FIXME: Handle UNDEF elements too!
478 if (cast<ConstantSDNode>(Elt)->getValue() >= 16)
481 // Check that they are consequtive.
482 for (unsigned i = 1; i != EltSize; ++i) {
483 if (!isa<ConstantSDNode>(N->getOperand(i)) ||
484 cast<ConstantSDNode>(N->getOperand(i))->getValue() != i+ElementBase)
488 assert(isa<ConstantSDNode>(Elt) && "Invalid VECTOR_SHUFFLE mask!");
489 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
490 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
491 assert(isa<ConstantSDNode>(N->getOperand(i)) &&
492 "Invalid VECTOR_SHUFFLE mask!");
493 for (unsigned j = 0; j != EltSize; ++j)
494 if (N->getOperand(i+j) != N->getOperand(j))
501 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
502 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
503 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) {
504 assert(isSplatShuffleMask(N, EltSize));
505 return cast<ConstantSDNode>(N->getOperand(0))->getValue() / EltSize;
508 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
509 /// by using a vspltis[bhw] instruction of the specified element size, return
510 /// the constant being splatted. The ByteSize field indicates the number of
511 /// bytes of each element [124] -> [bhw].
512 SDOperand PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
513 SDOperand OpVal(0, 0);
515 // If ByteSize of the splat is bigger than the element size of the
516 // build_vector, then we have a case where we are checking for a splat where
517 // multiple elements of the buildvector are folded together into a single
518 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
519 unsigned EltSize = 16/N->getNumOperands();
520 if (EltSize < ByteSize) {
521 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval.
522 SDOperand UniquedVals[4];
523 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
525 // See if all of the elements in the buildvector agree across.
526 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
527 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
528 // If the element isn't a constant, bail fully out.
529 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDOperand();
532 if (UniquedVals[i&(Multiple-1)].Val == 0)
533 UniquedVals[i&(Multiple-1)] = N->getOperand(i);
534 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
535 return SDOperand(); // no match.
538 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
539 // either constant or undef values that are identical for each chunk. See
540 // if these chunks can form into a larger vspltis*.
542 // Check to see if all of the leading entries are either 0 or -1. If
543 // neither, then this won't fit into the immediate field.
544 bool LeadingZero = true;
545 bool LeadingOnes = true;
546 for (unsigned i = 0; i != Multiple-1; ++i) {
547 if (UniquedVals[i].Val == 0) continue; // Must have been undefs.
549 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue();
550 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue();
552 // Finally, check the least significant entry.
554 if (UniquedVals[Multiple-1].Val == 0)
555 return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef
556 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getValue();
558 return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4)
561 if (UniquedVals[Multiple-1].Val == 0)
562 return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef
563 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSignExtended();
564 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2)
565 return DAG.getTargetConstant(Val, MVT::i32);
571 // Check to see if this buildvec has a single non-undef value in its elements.
572 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
573 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
575 OpVal = N->getOperand(i);
576 else if (OpVal != N->getOperand(i))
580 if (OpVal.Val == 0) return SDOperand(); // All UNDEF: use implicit def.
582 unsigned ValSizeInBytes = 0;
584 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
585 Value = CN->getValue();
586 ValSizeInBytes = MVT::getSizeInBits(CN->getValueType(0))/8;
587 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
588 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
589 Value = FloatToBits(CN->getValue());
593 // If the splat value is larger than the element value, then we can never do
594 // this splat. The only case that we could fit the replicated bits into our
595 // immediate field for would be zero, and we prefer to use vxor for it.
596 if (ValSizeInBytes < ByteSize) return SDOperand();
598 // If the element value is larger than the splat value, cut it in half and
599 // check to see if the two halves are equal. Continue doing this until we
600 // get to ByteSize. This allows us to handle 0x01010101 as 0x01.
601 while (ValSizeInBytes > ByteSize) {
602 ValSizeInBytes >>= 1;
604 // If the top half equals the bottom half, we're still ok.
605 if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) !=
606 (Value & ((1 << (8*ValSizeInBytes))-1)))
610 // Properly sign extend the value.
611 int ShAmt = (4-ByteSize)*8;
612 int MaskVal = ((int)Value << ShAmt) >> ShAmt;
614 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
615 if (MaskVal == 0) return SDOperand();
617 // Finally, if this value fits in a 5 bit sext field, return it
618 if (((MaskVal << (32-5)) >> (32-5)) == MaskVal)
619 return DAG.getTargetConstant(MaskVal, MVT::i32);
623 //===----------------------------------------------------------------------===//
624 // Addressing Mode Selection
625 //===----------------------------------------------------------------------===//
627 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit
628 /// or 64-bit immediate, and if the value can be accurately represented as a
629 /// sign extension from a 16-bit value. If so, this returns true and the
631 static bool isIntS16Immediate(SDNode *N, short &Imm) {
632 if (N->getOpcode() != ISD::Constant)
635 Imm = (short)cast<ConstantSDNode>(N)->getValue();
636 if (N->getValueType(0) == MVT::i32)
637 return Imm == (int32_t)cast<ConstantSDNode>(N)->getValue();
639 return Imm == (int64_t)cast<ConstantSDNode>(N)->getValue();
641 static bool isIntS16Immediate(SDOperand Op, short &Imm) {
642 return isIntS16Immediate(Op.Val, Imm);
646 /// SelectAddressRegReg - Given the specified addressed, check to see if it
647 /// can be represented as an indexed [r+r] operation. Returns false if it
648 /// can be more efficiently represented with [r+imm].
649 bool PPCTargetLowering::SelectAddressRegReg(SDOperand N, SDOperand &Base,
653 if (N.getOpcode() == ISD::ADD) {
654 if (isIntS16Immediate(N.getOperand(1), imm))
656 if (N.getOperand(1).getOpcode() == PPCISD::Lo)
659 Base = N.getOperand(0);
660 Index = N.getOperand(1);
662 } else if (N.getOpcode() == ISD::OR) {
663 if (isIntS16Immediate(N.getOperand(1), imm))
664 return false; // r+i can fold it if we can.
666 // If this is an or of disjoint bitfields, we can codegen this as an add
667 // (for better address arithmetic) if the LHS and RHS of the OR are provably
669 uint64_t LHSKnownZero, LHSKnownOne;
670 uint64_t RHSKnownZero, RHSKnownOne;
671 ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne);
674 ComputeMaskedBits(N.getOperand(1), ~0U, RHSKnownZero, RHSKnownOne);
675 // If all of the bits are known zero on the LHS or RHS, the add won't
677 if ((LHSKnownZero | RHSKnownZero) == ~0U) {
678 Base = N.getOperand(0);
679 Index = N.getOperand(1);
688 /// Returns true if the address N can be represented by a base register plus
689 /// a signed 16-bit displacement [r+imm], and if it is not better
690 /// represented as reg+reg.
691 bool PPCTargetLowering::SelectAddressRegImm(SDOperand N, SDOperand &Disp,
692 SDOperand &Base, SelectionDAG &DAG){
693 // If this can be more profitably realized as r+r, fail.
694 if (SelectAddressRegReg(N, Disp, Base, DAG))
697 if (N.getOpcode() == ISD::ADD) {
699 if (isIntS16Immediate(N.getOperand(1), imm)) {
700 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32);
701 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
702 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
704 Base = N.getOperand(0);
706 return true; // [r+i]
707 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
708 // Match LOAD (ADD (X, Lo(G))).
709 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getValue()
710 && "Cannot handle constant offsets yet!");
711 Disp = N.getOperand(1).getOperand(0); // The global address.
712 assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
713 Disp.getOpcode() == ISD::TargetConstantPool ||
714 Disp.getOpcode() == ISD::TargetJumpTable);
715 Base = N.getOperand(0);
716 return true; // [&g+r]
718 } else if (N.getOpcode() == ISD::OR) {
720 if (isIntS16Immediate(N.getOperand(1), imm)) {
721 // If this is an or of disjoint bitfields, we can codegen this as an add
722 // (for better address arithmetic) if the LHS and RHS of the OR are
723 // provably disjoint.
724 uint64_t LHSKnownZero, LHSKnownOne;
725 ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne);
726 if ((LHSKnownZero|~(unsigned)imm) == ~0U) {
727 // If all of the bits are known zero on the LHS or RHS, the add won't
729 Base = N.getOperand(0);
730 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32);
734 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
735 // Loading from a constant address.
737 // If this address fits entirely in a 16-bit sext immediate field, codegen
740 if (isIntS16Immediate(CN, Imm)) {
741 Disp = DAG.getTargetConstant(Imm, CN->getValueType(0));
742 Base = DAG.getRegister(PPC::R0, CN->getValueType(0));
746 // FIXME: Handle small sext constant offsets in PPC64 mode also!
747 if (CN->getValueType(0) == MVT::i32) {
748 int Addr = (int)CN->getValue();
750 // Otherwise, break this down into an LIS + disp.
751 Disp = DAG.getTargetConstant((short)Addr, MVT::i32);
752 Base = DAG.getConstant(Addr - (signed short)Addr, MVT::i32);
757 Disp = DAG.getTargetConstant(0, getPointerTy());
758 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N))
759 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
762 return true; // [r+0]
765 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
766 /// represented as an indexed [r+r] operation.
767 bool PPCTargetLowering::SelectAddressRegRegOnly(SDOperand N, SDOperand &Base,
770 // Check to see if we can easily represent this as an [r+r] address. This
771 // will fail if it thinks that the address is more profitably represented as
772 // reg+imm, e.g. where imm = 0.
773 if (SelectAddressRegReg(N, Base, Index, DAG))
776 // If the operand is an addition, always emit this as [r+r], since this is
777 // better (for code size, and execution, as the memop does the add for free)
778 // than emitting an explicit add.
779 if (N.getOpcode() == ISD::ADD) {
780 Base = N.getOperand(0);
781 Index = N.getOperand(1);
785 // Otherwise, do it the hard way, using R0 as the base register.
786 Base = DAG.getRegister(PPC::R0, N.getValueType());
791 /// SelectAddressRegImmShift - Returns true if the address N can be
792 /// represented by a base register plus a signed 14-bit displacement
793 /// [r+imm*4]. Suitable for use by STD and friends.
794 bool PPCTargetLowering::SelectAddressRegImmShift(SDOperand N, SDOperand &Disp,
797 // If this can be more profitably realized as r+r, fail.
798 if (SelectAddressRegReg(N, Disp, Base, DAG))
801 if (N.getOpcode() == ISD::ADD) {
803 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) {
804 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32);
805 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
806 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
808 Base = N.getOperand(0);
810 return true; // [r+i]
811 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
812 // Match LOAD (ADD (X, Lo(G))).
813 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getValue()
814 && "Cannot handle constant offsets yet!");
815 Disp = N.getOperand(1).getOperand(0); // The global address.
816 assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
817 Disp.getOpcode() == ISD::TargetConstantPool ||
818 Disp.getOpcode() == ISD::TargetJumpTable);
819 Base = N.getOperand(0);
820 return true; // [&g+r]
822 } else if (N.getOpcode() == ISD::OR) {
824 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) {
825 // If this is an or of disjoint bitfields, we can codegen this as an add
826 // (for better address arithmetic) if the LHS and RHS of the OR are
827 // provably disjoint.
828 uint64_t LHSKnownZero, LHSKnownOne;
829 ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne);
830 if ((LHSKnownZero|~(unsigned)imm) == ~0U) {
831 // If all of the bits are known zero on the LHS or RHS, the add won't
833 Base = N.getOperand(0);
834 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32);
838 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
839 // Loading from a constant address.
841 // If this address fits entirely in a 14-bit sext immediate field, codegen
844 if (isIntS16Immediate(CN, Imm)) {
845 Disp = DAG.getTargetConstant((unsigned short)Imm >> 2, getPointerTy());
846 Base = DAG.getRegister(PPC::R0, CN->getValueType(0));
850 // FIXME: Handle small sext constant offsets in PPC64 mode also!
851 if (CN->getValueType(0) == MVT::i32) {
852 int Addr = (int)CN->getValue();
854 // Otherwise, break this down into an LIS + disp.
855 Disp = DAG.getTargetConstant((short)Addr >> 2, MVT::i32);
856 Base = DAG.getConstant(Addr - (signed short)Addr, MVT::i32);
861 Disp = DAG.getTargetConstant(0, getPointerTy());
862 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N))
863 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
866 return true; // [r+0]
870 /// getPreIndexedAddressParts - returns true by value, base pointer and
871 /// offset pointer and addressing mode by reference if the node's address
872 /// can be legally represented as pre-indexed load / store address.
873 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDOperand &Base,
875 ISD::MemIndexedMode &AM,
877 // Disabled by default for now.
878 if (!EnablePPCPreinc) return false;
882 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
883 Ptr = LD->getBasePtr();
884 VT = LD->getLoadedVT();
886 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
888 Ptr = ST->getBasePtr();
889 VT = ST->getStoredVT();
893 // PowerPC doesn't have preinc load/store instructions for vectors.
894 if (MVT::isVector(VT))
897 // TODO: Check reg+reg first.
899 // LDU/STU use reg+imm*4, others use reg+imm.
900 if (VT != MVT::i64) {
902 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG))
906 if (!SelectAddressRegImmShift(Ptr, Offset, Base, DAG))
910 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
911 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of
912 // sext i32 to i64 when addr mode is r+i.
913 if (LD->getValueType(0) == MVT::i64 && LD->getLoadedVT() == MVT::i32 &&
914 LD->getExtensionType() == ISD::SEXTLOAD &&
915 isa<ConstantSDNode>(Offset))
923 //===----------------------------------------------------------------------===//
924 // LowerOperation implementation
925 //===----------------------------------------------------------------------===//
927 static SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG) {
928 MVT::ValueType PtrVT = Op.getValueType();
929 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
930 Constant *C = CP->getConstVal();
931 SDOperand CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment());
932 SDOperand Zero = DAG.getConstant(0, PtrVT);
934 const TargetMachine &TM = DAG.getTarget();
936 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, CPI, Zero);
937 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, CPI, Zero);
939 // If this is a non-darwin platform, we don't support non-static relo models
941 if (TM.getRelocationModel() == Reloc::Static ||
942 !TM.getSubtarget<PPCSubtarget>().isDarwin()) {
943 // Generate non-pic code that has direct accesses to the constant pool.
944 // The address of the global is just (hi(&g)+lo(&g)).
945 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
948 if (TM.getRelocationModel() == Reloc::PIC_) {
949 // With PIC, the first instruction is actually "GR+hi(&G)".
950 Hi = DAG.getNode(ISD::ADD, PtrVT,
951 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi);
954 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
958 static SDOperand LowerJumpTable(SDOperand Op, SelectionDAG &DAG) {
959 MVT::ValueType PtrVT = Op.getValueType();
960 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
961 SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
962 SDOperand Zero = DAG.getConstant(0, PtrVT);
964 const TargetMachine &TM = DAG.getTarget();
966 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, JTI, Zero);
967 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, JTI, Zero);
969 // If this is a non-darwin platform, we don't support non-static relo models
971 if (TM.getRelocationModel() == Reloc::Static ||
972 !TM.getSubtarget<PPCSubtarget>().isDarwin()) {
973 // Generate non-pic code that has direct accesses to the constant pool.
974 // The address of the global is just (hi(&g)+lo(&g)).
975 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
978 if (TM.getRelocationModel() == Reloc::PIC_) {
979 // With PIC, the first instruction is actually "GR+hi(&G)".
980 Hi = DAG.getNode(ISD::ADD, PtrVT,
981 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi);
984 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
988 static SDOperand LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) {
989 MVT::ValueType PtrVT = Op.getValueType();
990 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
991 GlobalValue *GV = GSDN->getGlobal();
992 SDOperand GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset());
993 SDOperand Zero = DAG.getConstant(0, PtrVT);
995 const TargetMachine &TM = DAG.getTarget();
997 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, GA, Zero);
998 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, GA, Zero);
1000 // If this is a non-darwin platform, we don't support non-static relo models
1002 if (TM.getRelocationModel() == Reloc::Static ||
1003 !TM.getSubtarget<PPCSubtarget>().isDarwin()) {
1004 // Generate non-pic code that has direct accesses to globals.
1005 // The address of the global is just (hi(&g)+lo(&g)).
1006 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
1009 if (TM.getRelocationModel() == Reloc::PIC_) {
1010 // With PIC, the first instruction is actually "GR+hi(&G)".
1011 Hi = DAG.getNode(ISD::ADD, PtrVT,
1012 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi);
1015 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
1017 if (!GV->hasWeakLinkage() && !GV->hasLinkOnceLinkage() &&
1018 (!GV->isExternal() || GV->hasNotBeenReadFromBytecode()))
1021 // If the global is weak or external, we have to go through the lazy
1023 return DAG.getLoad(PtrVT, DAG.getEntryNode(), Lo, NULL, 0);
1026 static SDOperand LowerSETCC(SDOperand Op, SelectionDAG &DAG) {
1027 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
1029 // If we're comparing for equality to zero, expose the fact that this is
1030 // implented as a ctlz/srl pair on ppc, so that the dag combiner can
1031 // fold the new nodes.
1032 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1033 if (C->isNullValue() && CC == ISD::SETEQ) {
1034 MVT::ValueType VT = Op.getOperand(0).getValueType();
1035 SDOperand Zext = Op.getOperand(0);
1036 if (VT < MVT::i32) {
1038 Zext = DAG.getNode(ISD::ZERO_EXTEND, VT, Op.getOperand(0));
1040 unsigned Log2b = Log2_32(MVT::getSizeInBits(VT));
1041 SDOperand Clz = DAG.getNode(ISD::CTLZ, VT, Zext);
1042 SDOperand Scc = DAG.getNode(ISD::SRL, VT, Clz,
1043 DAG.getConstant(Log2b, MVT::i32));
1044 return DAG.getNode(ISD::TRUNCATE, MVT::i32, Scc);
1046 // Leave comparisons against 0 and -1 alone for now, since they're usually
1047 // optimized. FIXME: revisit this when we can custom lower all setcc
1049 if (C->isAllOnesValue() || C->isNullValue())
1053 // If we have an integer seteq/setne, turn it into a compare against zero
1054 // by xor'ing the rhs with the lhs, which is faster than setting a
1055 // condition register, reading it back out, and masking the correct bit. The
1056 // normal approach here uses sub to do this instead of xor. Using xor exposes
1057 // the result to other bit-twiddling opportunities.
1058 MVT::ValueType LHSVT = Op.getOperand(0).getValueType();
1059 if (MVT::isInteger(LHSVT) && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
1060 MVT::ValueType VT = Op.getValueType();
1061 SDOperand Sub = DAG.getNode(ISD::XOR, LHSVT, Op.getOperand(0),
1063 return DAG.getSetCC(VT, Sub, DAG.getConstant(0, LHSVT), CC);
1068 static SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG,
1069 unsigned VarArgsFrameIndex) {
1070 // vastart just stores the address of the VarArgsFrameIndex slot into the
1071 // memory location argument.
1072 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1073 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
1074 SrcValueSDNode *SV = cast<SrcValueSDNode>(Op.getOperand(2));
1075 return DAG.getStore(Op.getOperand(0), FR, Op.getOperand(1), SV->getValue(),
1079 static SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG,
1080 int &VarArgsFrameIndex) {
1081 // TODO: add description of PPC stack frame format, or at least some docs.
1083 MachineFunction &MF = DAG.getMachineFunction();
1084 MachineFrameInfo *MFI = MF.getFrameInfo();
1085 SSARegMap *RegMap = MF.getSSARegMap();
1086 SmallVector<SDOperand, 8> ArgValues;
1087 SDOperand Root = Op.getOperand(0);
1089 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1090 bool isPPC64 = PtrVT == MVT::i64;
1091 unsigned PtrByteSize = isPPC64 ? 8 : 4;
1093 unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64);
1095 static const unsigned GPR_32[] = { // 32-bit registers.
1096 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
1097 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
1099 static const unsigned GPR_64[] = { // 64-bit registers.
1100 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
1101 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
1103 static const unsigned FPR[] = {
1104 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
1105 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13
1107 static const unsigned VR[] = {
1108 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
1109 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
1112 const unsigned Num_GPR_Regs = sizeof(GPR_32)/sizeof(GPR_32[0]);
1113 const unsigned Num_FPR_Regs = sizeof(FPR)/sizeof(FPR[0]);
1114 const unsigned Num_VR_Regs = sizeof( VR)/sizeof( VR[0]);
1116 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
1118 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32;
1120 // Add DAG nodes to load the arguments or copy them out of registers. On
1121 // entry to a function on PPC, the arguments start after the linkage area,
1122 // although the first ones are often in registers.
1123 for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e; ++ArgNo) {
1125 bool needsLoad = false;
1126 MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType();
1127 unsigned ObjSize = MVT::getSizeInBits(ObjectVT)/8;
1128 unsigned ArgSize = ObjSize;
1130 unsigned CurArgOffset = ArgOffset;
1132 default: assert(0 && "Unhandled argument type!");
1134 // All int arguments reserve stack space.
1135 ArgOffset += PtrByteSize;
1137 if (GPR_idx != Num_GPR_Regs) {
1138 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
1139 MF.addLiveIn(GPR[GPR_idx], VReg);
1140 ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i32);
1144 ArgSize = PtrByteSize;
1147 case MVT::i64: // PPC64
1148 // All int arguments reserve stack space.
1151 if (GPR_idx != Num_GPR_Regs) {
1152 unsigned VReg = RegMap->createVirtualRegister(&PPC::G8RCRegClass);
1153 MF.addLiveIn(GPR[GPR_idx], VReg);
1154 ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i64);
1162 // All FP arguments reserve stack space.
1163 ArgOffset += isPPC64 ? 8 : ObjSize;
1165 // Every 4 bytes of argument space consumes one of the GPRs available for
1166 // argument passing.
1167 if (GPR_idx != Num_GPR_Regs) {
1169 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
1172 if (FPR_idx != Num_FPR_Regs) {
1174 if (ObjectVT == MVT::f32)
1175 VReg = RegMap->createVirtualRegister(&PPC::F4RCRegClass);
1177 VReg = RegMap->createVirtualRegister(&PPC::F8RCRegClass);
1178 MF.addLiveIn(FPR[FPR_idx], VReg);
1179 ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT);
1189 // Note that vector arguments in registers don't reserve stack space.
1190 if (VR_idx != Num_VR_Regs) {
1191 unsigned VReg = RegMap->createVirtualRegister(&PPC::VRRCRegClass);
1192 MF.addLiveIn(VR[VR_idx], VReg);
1193 ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT);
1196 // This should be simple, but requires getting 16-byte aligned stack
1198 assert(0 && "Loading VR argument not implemented yet!");
1204 // We need to load the argument to a virtual register if we determined above
1205 // that we ran out of physical registers of the appropriate type
1207 // If the argument is actually used, emit a load from the right stack
1209 if (!Op.Val->hasNUsesOfValue(0, ArgNo)) {
1210 int FI = MFI->CreateFixedObject(ObjSize,
1211 CurArgOffset + (ArgSize - ObjSize));
1212 SDOperand FIN = DAG.getFrameIndex(FI, PtrVT);
1213 ArgVal = DAG.getLoad(ObjectVT, Root, FIN, NULL, 0);
1215 // Don't emit a dead load.
1216 ArgVal = DAG.getNode(ISD::UNDEF, ObjectVT);
1220 ArgValues.push_back(ArgVal);
1223 // If the function takes variable number of arguments, make a frame index for
1224 // the start of the first vararg value... for expansion of llvm.va_start.
1225 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
1227 VarArgsFrameIndex = MFI->CreateFixedObject(MVT::getSizeInBits(PtrVT)/8,
1229 SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
1230 // If this function is vararg, store any remaining integer argument regs
1231 // to their spots on the stack so that they may be loaded by deferencing the
1232 // result of va_next.
1233 SmallVector<SDOperand, 8> MemOps;
1234 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
1237 VReg = RegMap->createVirtualRegister(&PPC::G8RCRegClass);
1239 VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
1241 MF.addLiveIn(GPR[GPR_idx], VReg);
1242 SDOperand Val = DAG.getCopyFromReg(Root, VReg, PtrVT);
1243 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
1244 MemOps.push_back(Store);
1245 // Increment the address by four for the next argument to store
1246 SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(PtrVT)/8, PtrVT);
1247 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff);
1249 if (!MemOps.empty())
1250 Root = DAG.getNode(ISD::TokenFactor, MVT::Other,&MemOps[0],MemOps.size());
1253 ArgValues.push_back(Root);
1255 // Return the new list of results.
1256 std::vector<MVT::ValueType> RetVT(Op.Val->value_begin(),
1257 Op.Val->value_end());
1258 return DAG.getNode(ISD::MERGE_VALUES, RetVT, &ArgValues[0], ArgValues.size());
1261 /// isCallCompatibleAddress - Return the immediate to use if the specified
1262 /// 32-bit value is representable in the immediate field of a BxA instruction.
1263 static SDNode *isBLACompatibleAddress(SDOperand Op, SelectionDAG &DAG) {
1264 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
1267 int Addr = C->getValue();
1268 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero.
1269 (Addr << 6 >> 6) != Addr)
1270 return 0; // Top 6 bits have to be sext of immediate.
1272 return DAG.getConstant((int)C->getValue() >> 2, MVT::i32).Val;
1275 static SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG) {
1276 SDOperand Chain = Op.getOperand(0);
1277 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
1278 SDOperand Callee = Op.getOperand(4);
1279 unsigned NumOps = (Op.getNumOperands() - 5) / 2;
1281 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1282 bool isPPC64 = PtrVT == MVT::i64;
1283 unsigned PtrByteSize = isPPC64 ? 8 : 4;
1285 // args_to_use will accumulate outgoing args for the PPCISD::CALL case in
1286 // SelectExpr to use to put the arguments in the appropriate registers.
1287 std::vector<SDOperand> args_to_use;
1289 // Count how many bytes are to be pushed on the stack, including the linkage
1290 // area, and parameter passing area. We start with 24/48 bytes, which is
1291 // prereserved space for [SP][CR][LR][3 x unused].
1292 unsigned NumBytes = PPCFrameInfo::getLinkageSize(isPPC64);
1294 // Add up all the space actually used.
1295 for (unsigned i = 0; i != NumOps; ++i) {
1296 unsigned ArgSize =MVT::getSizeInBits(Op.getOperand(5+2*i).getValueType())/8;
1297 ArgSize = std::max(ArgSize, PtrByteSize);
1298 NumBytes += ArgSize;
1301 // The prolog code of the callee may store up to 8 GPR argument registers to
1302 // the stack, allowing va_start to index over them in memory if its varargs.
1303 // Because we cannot tell if this is needed on the caller side, we have to
1304 // conservatively assume that it is needed. As such, make sure we have at
1305 // least enough stack space for the caller to store the 8 GPRs.
1306 NumBytes = std::max(NumBytes, PPCFrameInfo::getMinCallFrameSize(isPPC64));
1308 // Adjust the stack pointer for the new arguments...
1309 // These operations are automatically eliminated by the prolog/epilog pass
1310 Chain = DAG.getCALLSEQ_START(Chain,
1311 DAG.getConstant(NumBytes, PtrVT));
1313 // Set up a copy of the stack pointer for use loading and storing any
1314 // arguments that may not fit in the registers available for argument
1318 StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
1320 StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
1322 // Figure out which arguments are going to go in registers, and which in
1323 // memory. Also, if this is a vararg function, floating point operations
1324 // must be stored to our stack, and loaded into integer regs as well, if
1325 // any integer regs are available for argument passing.
1326 unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64);
1327 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
1329 static const unsigned GPR_32[] = { // 32-bit registers.
1330 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
1331 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
1333 static const unsigned GPR_64[] = { // 64-bit registers.
1334 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
1335 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
1337 static const unsigned FPR[] = {
1338 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
1339 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13
1341 static const unsigned VR[] = {
1342 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
1343 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
1345 const unsigned NumGPRs = sizeof(GPR_32)/sizeof(GPR_32[0]);
1346 const unsigned NumFPRs = sizeof(FPR)/sizeof(FPR[0]);
1347 const unsigned NumVRs = sizeof( VR)/sizeof( VR[0]);
1349 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32;
1351 std::vector<std::pair<unsigned, SDOperand> > RegsToPass;
1352 SmallVector<SDOperand, 8> MemOpChains;
1353 for (unsigned i = 0; i != NumOps; ++i) {
1354 SDOperand Arg = Op.getOperand(5+2*i);
1356 // PtrOff will be used to store the current argument to the stack if a
1357 // register cannot be found for it.
1358 SDOperand PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
1359 PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr, PtrOff);
1361 // On PPC64, promote integers to 64-bit values.
1362 if (isPPC64 && Arg.getValueType() == MVT::i32) {
1363 unsigned ExtOp = ISD::ZERO_EXTEND;
1364 if (cast<ConstantSDNode>(Op.getOperand(5+2*i+1))->getValue())
1365 ExtOp = ISD::SIGN_EXTEND;
1366 Arg = DAG.getNode(ExtOp, MVT::i64, Arg);
1369 switch (Arg.getValueType()) {
1370 default: assert(0 && "Unexpected ValueType for argument!");
1373 if (GPR_idx != NumGPRs) {
1374 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
1376 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
1378 ArgOffset += PtrByteSize;
1382 if (isVarArg && isPPC64) {
1383 // Float varargs need to be promoted to double.
1384 if (Arg.getValueType() == MVT::f32)
1385 Arg = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Arg);
1388 if (FPR_idx != NumFPRs) {
1389 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
1392 SDOperand Store = DAG.getStore(Chain, Arg, PtrOff, NULL, 0);
1393 MemOpChains.push_back(Store);
1395 // Float varargs are always shadowed in available integer registers
1396 if (GPR_idx != NumGPRs) {
1397 SDOperand Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0);
1398 MemOpChains.push_back(Load.getValue(1));
1399 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
1401 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
1402 SDOperand ConstFour = DAG.getConstant(4, PtrOff.getValueType());
1403 PtrOff = DAG.getNode(ISD::ADD, PtrVT, PtrOff, ConstFour);
1404 SDOperand Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0);
1405 MemOpChains.push_back(Load.getValue(1));
1406 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
1409 // If we have any FPRs remaining, we may also have GPRs remaining.
1410 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
1412 if (GPR_idx != NumGPRs)
1414 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64)
1418 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
1423 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
1429 assert(!isVarArg && "Don't support passing vectors to varargs yet!");
1430 assert(VR_idx != NumVRs &&
1431 "Don't support passing more than 12 vector args yet!");
1432 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
1436 if (!MemOpChains.empty())
1437 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
1438 &MemOpChains[0], MemOpChains.size());
1440 // Build a sequence of copy-to-reg nodes chained together with token chain
1441 // and flag operands which copy the outgoing args into the appropriate regs.
1443 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1444 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
1446 InFlag = Chain.getValue(1);
1449 std::vector<MVT::ValueType> NodeTys;
1450 NodeTys.push_back(MVT::Other); // Returns a chain
1451 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
1453 SmallVector<SDOperand, 8> Ops;
1454 unsigned CallOpc = PPCISD::CALL;
1456 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
1457 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
1458 // node so that legalize doesn't hack it.
1459 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1460 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), Callee.getValueType());
1461 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
1462 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType());
1463 else if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG))
1464 // If this is an absolute destination address, use the munged value.
1465 Callee = SDOperand(Dest, 0);
1467 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair
1468 // to do the call, we can't use PPCISD::CALL.
1469 SDOperand MTCTROps[] = {Chain, Callee, InFlag};
1470 Chain = DAG.getNode(PPCISD::MTCTR, NodeTys, MTCTROps, 2+(InFlag.Val!=0));
1471 InFlag = Chain.getValue(1);
1473 // Copy the callee address into R12 on darwin.
1474 Chain = DAG.getCopyToReg(Chain, PPC::R12, Callee, InFlag);
1475 InFlag = Chain.getValue(1);
1478 NodeTys.push_back(MVT::Other);
1479 NodeTys.push_back(MVT::Flag);
1480 Ops.push_back(Chain);
1481 CallOpc = PPCISD::BCTRL;
1485 // If this is a direct call, pass the chain and the callee.
1487 Ops.push_back(Chain);
1488 Ops.push_back(Callee);
1491 // Add argument registers to the end of the list so that they are known live
1493 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1494 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1495 RegsToPass[i].second.getValueType()));
1498 Ops.push_back(InFlag);
1499 Chain = DAG.getNode(CallOpc, NodeTys, &Ops[0], Ops.size());
1500 InFlag = Chain.getValue(1);
1502 SDOperand ResultVals[3];
1503 unsigned NumResults = 0;
1506 // If the call has results, copy the values out of the ret val registers.
1507 switch (Op.Val->getValueType(0)) {
1508 default: assert(0 && "Unexpected ret value!");
1509 case MVT::Other: break;
1511 if (Op.Val->getValueType(1) == MVT::i32) {
1512 Chain = DAG.getCopyFromReg(Chain, PPC::R4, MVT::i32, InFlag).getValue(1);
1513 ResultVals[0] = Chain.getValue(0);
1514 Chain = DAG.getCopyFromReg(Chain, PPC::R3, MVT::i32,
1515 Chain.getValue(2)).getValue(1);
1516 ResultVals[1] = Chain.getValue(0);
1518 NodeTys.push_back(MVT::i32);
1520 Chain = DAG.getCopyFromReg(Chain, PPC::R3, MVT::i32, InFlag).getValue(1);
1521 ResultVals[0] = Chain.getValue(0);
1524 NodeTys.push_back(MVT::i32);
1527 Chain = DAG.getCopyFromReg(Chain, PPC::X3, MVT::i64, InFlag).getValue(1);
1528 ResultVals[0] = Chain.getValue(0);
1530 NodeTys.push_back(MVT::i64);
1534 Chain = DAG.getCopyFromReg(Chain, PPC::F1, Op.Val->getValueType(0),
1535 InFlag).getValue(1);
1536 ResultVals[0] = Chain.getValue(0);
1538 NodeTys.push_back(Op.Val->getValueType(0));
1544 Chain = DAG.getCopyFromReg(Chain, PPC::V2, Op.Val->getValueType(0),
1545 InFlag).getValue(1);
1546 ResultVals[0] = Chain.getValue(0);
1548 NodeTys.push_back(Op.Val->getValueType(0));
1552 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, Chain,
1553 DAG.getConstant(NumBytes, PtrVT));
1554 NodeTys.push_back(MVT::Other);
1556 // If the function returns void, just return the chain.
1557 if (NumResults == 0)
1560 // Otherwise, merge everything together with a MERGE_VALUES node.
1561 ResultVals[NumResults++] = Chain;
1562 SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys,
1563 ResultVals, NumResults);
1564 return Res.getValue(Op.ResNo);
1567 static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) {
1569 switch(Op.getNumOperands()) {
1571 assert(0 && "Do not know how to return this many arguments!");
1574 return SDOperand(); // ret void is legal
1576 MVT::ValueType ArgVT = Op.getOperand(1).getValueType();
1578 if (ArgVT == MVT::i32) {
1580 } else if (ArgVT == MVT::i64) {
1582 } else if (MVT::isVector(ArgVT)) {
1585 assert(MVT::isFloatingPoint(ArgVT));
1589 Copy = DAG.getCopyToReg(Op.getOperand(0), ArgReg, Op.getOperand(1),
1592 // If we haven't noted the R3/F1 are live out, do so now.
1593 if (DAG.getMachineFunction().liveout_empty())
1594 DAG.getMachineFunction().addLiveOut(ArgReg);
1598 Copy = DAG.getCopyToReg(Op.getOperand(0), PPC::R3, Op.getOperand(3),
1600 Copy = DAG.getCopyToReg(Copy, PPC::R4, Op.getOperand(1),Copy.getValue(1));
1601 // If we haven't noted the R3+R4 are live out, do so now.
1602 if (DAG.getMachineFunction().liveout_empty()) {
1603 DAG.getMachineFunction().addLiveOut(PPC::R3);
1604 DAG.getMachineFunction().addLiveOut(PPC::R4);
1608 return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Copy, Copy.getValue(1));
1611 static SDOperand LowerSTACKRESTORE(SDOperand Op, SelectionDAG &DAG,
1612 const PPCSubtarget &Subtarget) {
1613 // When we pop the dynamic allocation we need to restore the SP link.
1615 // Get the corect type for pointers.
1616 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1618 // Construct the stack pointer operand.
1619 bool IsPPC64 = Subtarget.isPPC64();
1620 unsigned SP = IsPPC64 ? PPC::X1 : PPC::R1;
1621 SDOperand StackPtr = DAG.getRegister(SP, PtrVT);
1623 // Get the operands for the STACKRESTORE.
1624 SDOperand Chain = Op.getOperand(0);
1625 SDOperand SaveSP = Op.getOperand(1);
1627 // Load the old link SP.
1628 SDOperand LoadLinkSP = DAG.getLoad(PtrVT, Chain, StackPtr, NULL, 0);
1630 // Restore the stack pointer.
1631 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), SP, SaveSP);
1633 // Store the old link SP.
1634 return DAG.getStore(Chain, LoadLinkSP, StackPtr, NULL, 0);
1637 static SDOperand LowerDYNAMIC_STACKALLOC(SDOperand Op, SelectionDAG &DAG,
1638 const PPCSubtarget &Subtarget) {
1639 MachineFunction &MF = DAG.getMachineFunction();
1640 bool IsPPC64 = Subtarget.isPPC64();
1642 // Get current frame pointer save index. The users of this index will be
1643 // primarily DYNALLOC instructions.
1644 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
1645 int FPSI = FI->getFramePointerSaveIndex();
1647 // If the frame pointer save index hasn't been defined yet.
1649 // Find out what the fix offset of the frame pointer save area.
1650 int Offset = PPCFrameInfo::getFramePointerSaveOffset(IsPPC64);
1651 // Allocate the frame index for frame pointer save area.
1652 FPSI = MF.getFrameInfo()->CreateFixedObject(IsPPC64? 8 : 4, Offset);
1654 FI->setFramePointerSaveIndex(FPSI);
1658 SDOperand Chain = Op.getOperand(0);
1659 SDOperand Size = Op.getOperand(1);
1661 // Get the corect type for pointers.
1662 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1664 SDOperand NegSize = DAG.getNode(ISD::SUB, PtrVT,
1665 DAG.getConstant(0, PtrVT), Size);
1666 // Construct a node for the frame pointer save index.
1667 SDOperand FPSIdx = DAG.getFrameIndex(FPSI, PtrVT);
1668 // Build a DYNALLOC node.
1669 SDOperand Ops[3] = { Chain, NegSize, FPSIdx };
1670 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
1671 return DAG.getNode(PPCISD::DYNALLOC, VTs, Ops, 3);
1675 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
1677 static SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) {
1678 // Not FP? Not a fsel.
1679 if (!MVT::isFloatingPoint(Op.getOperand(0).getValueType()) ||
1680 !MVT::isFloatingPoint(Op.getOperand(2).getValueType()))
1683 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
1685 // Cannot handle SETEQ/SETNE.
1686 if (CC == ISD::SETEQ || CC == ISD::SETNE) return SDOperand();
1688 MVT::ValueType ResVT = Op.getValueType();
1689 MVT::ValueType CmpVT = Op.getOperand(0).getValueType();
1690 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
1691 SDOperand TV = Op.getOperand(2), FV = Op.getOperand(3);
1693 // If the RHS of the comparison is a 0.0, we don't need to do the
1694 // subtraction at all.
1695 if (isFloatingPointZero(RHS))
1697 default: break; // SETUO etc aren't handled by fsel.
1701 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
1705 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
1706 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS);
1707 return DAG.getNode(PPCISD::FSEL, ResVT, LHS, TV, FV);
1711 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
1715 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
1716 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS);
1717 return DAG.getNode(PPCISD::FSEL, ResVT,
1718 DAG.getNode(ISD::FNEG, MVT::f64, LHS), TV, FV);
1723 default: break; // SETUO etc aren't handled by fsel.
1727 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS);
1728 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
1729 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
1730 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV);
1734 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS);
1735 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
1736 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
1737 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV);
1741 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS);
1742 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
1743 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
1744 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV);
1748 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS);
1749 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
1750 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
1751 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV);
1756 static SDOperand LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) {
1757 assert(MVT::isFloatingPoint(Op.getOperand(0).getValueType()));
1758 SDOperand Src = Op.getOperand(0);
1759 if (Src.getValueType() == MVT::f32)
1760 Src = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Src);
1763 switch (Op.getValueType()) {
1764 default: assert(0 && "Unhandled FP_TO_SINT type in custom expander!");
1766 Tmp = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Src);
1769 Tmp = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Src);
1773 // Convert the FP value to an int value through memory.
1774 SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Tmp);
1775 if (Op.getValueType() == MVT::i32)
1776 Bits = DAG.getNode(ISD::TRUNCATE, MVT::i32, Bits);
1780 static SDOperand LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) {
1781 if (Op.getOperand(0).getValueType() == MVT::i64) {
1782 SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::f64, Op.getOperand(0));
1783 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Bits);
1784 if (Op.getValueType() == MVT::f32)
1785 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP);
1789 assert(Op.getOperand(0).getValueType() == MVT::i32 &&
1790 "Unhandled SINT_TO_FP type in custom expander!");
1791 // Since we only generate this in 64-bit mode, we can take advantage of
1792 // 64-bit registers. In particular, sign extend the input value into the
1793 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
1794 // then lfd it and fcfid it.
1795 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
1796 int FrameIdx = FrameInfo->CreateStackObject(8, 8);
1797 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1798 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
1800 SDOperand Ext64 = DAG.getNode(PPCISD::EXTSW_32, MVT::i32,
1803 // STD the extended value into the stack slot.
1804 SDOperand Store = DAG.getNode(PPCISD::STD_32, MVT::Other,
1805 DAG.getEntryNode(), Ext64, FIdx,
1806 DAG.getSrcValue(NULL));
1807 // Load the value as a double.
1808 SDOperand Ld = DAG.getLoad(MVT::f64, Store, FIdx, NULL, 0);
1810 // FCFID it and return it.
1811 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Ld);
1812 if (Op.getValueType() == MVT::f32)
1813 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP);
1817 static SDOperand LowerSHL_PARTS(SDOperand Op, SelectionDAG &DAG) {
1818 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 &&
1819 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SHL!");
1821 // Expand into a bunch of logical ops. Note that these ops
1822 // depend on the PPC behavior for oversized shift amounts.
1823 SDOperand Lo = Op.getOperand(0);
1824 SDOperand Hi = Op.getOperand(1);
1825 SDOperand Amt = Op.getOperand(2);
1827 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32,
1828 DAG.getConstant(32, MVT::i32), Amt);
1829 SDOperand Tmp2 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Amt);
1830 SDOperand Tmp3 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Tmp1);
1831 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3);
1832 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt,
1833 DAG.getConstant(-32U, MVT::i32));
1834 SDOperand Tmp6 = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Tmp5);
1835 SDOperand OutHi = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6);
1836 SDOperand OutLo = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Amt);
1837 SDOperand OutOps[] = { OutLo, OutHi };
1838 return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(MVT::i32, MVT::i32),
1842 static SDOperand LowerSRL_PARTS(SDOperand Op, SelectionDAG &DAG) {
1843 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 &&
1844 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SRL!");
1846 // Otherwise, expand into a bunch of logical ops. Note that these ops
1847 // depend on the PPC behavior for oversized shift amounts.
1848 SDOperand Lo = Op.getOperand(0);
1849 SDOperand Hi = Op.getOperand(1);
1850 SDOperand Amt = Op.getOperand(2);
1852 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32,
1853 DAG.getConstant(32, MVT::i32), Amt);
1854 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Amt);
1855 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Tmp1);
1856 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3);
1857 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt,
1858 DAG.getConstant(-32U, MVT::i32));
1859 SDOperand Tmp6 = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Tmp5);
1860 SDOperand OutLo = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6);
1861 SDOperand OutHi = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Amt);
1862 SDOperand OutOps[] = { OutLo, OutHi };
1863 return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(MVT::i32, MVT::i32),
1867 static SDOperand LowerSRA_PARTS(SDOperand Op, SelectionDAG &DAG) {
1868 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 &&
1869 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SRA!");
1871 // Otherwise, expand into a bunch of logical ops, followed by a select_cc.
1872 SDOperand Lo = Op.getOperand(0);
1873 SDOperand Hi = Op.getOperand(1);
1874 SDOperand Amt = Op.getOperand(2);
1876 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32,
1877 DAG.getConstant(32, MVT::i32), Amt);
1878 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Amt);
1879 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Tmp1);
1880 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3);
1881 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt,
1882 DAG.getConstant(-32U, MVT::i32));
1883 SDOperand Tmp6 = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Tmp5);
1884 SDOperand OutHi = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Amt);
1885 SDOperand OutLo = DAG.getSelectCC(Tmp5, DAG.getConstant(0, MVT::i32),
1886 Tmp4, Tmp6, ISD::SETLE);
1887 SDOperand OutOps[] = { OutLo, OutHi };
1888 return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(MVT::i32, MVT::i32),
1892 //===----------------------------------------------------------------------===//
1893 // Vector related lowering.
1896 // If this is a vector of constants or undefs, get the bits. A bit in
1897 // UndefBits is set if the corresponding element of the vector is an
1898 // ISD::UNDEF value. For undefs, the corresponding VectorBits values are
1899 // zero. Return true if this is not an array of constants, false if it is.
1901 static bool GetConstantBuildVectorBits(SDNode *BV, uint64_t VectorBits[2],
1902 uint64_t UndefBits[2]) {
1903 // Start with zero'd results.
1904 VectorBits[0] = VectorBits[1] = UndefBits[0] = UndefBits[1] = 0;
1906 unsigned EltBitSize = MVT::getSizeInBits(BV->getOperand(0).getValueType());
1907 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
1908 SDOperand OpVal = BV->getOperand(i);
1910 unsigned PartNo = i >= e/2; // In the upper 128 bits?
1911 unsigned SlotNo = e/2 - (i & (e/2-1))-1; // Which subpiece of the uint64_t.
1913 uint64_t EltBits = 0;
1914 if (OpVal.getOpcode() == ISD::UNDEF) {
1915 uint64_t EltUndefBits = ~0U >> (32-EltBitSize);
1916 UndefBits[PartNo] |= EltUndefBits << (SlotNo*EltBitSize);
1918 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
1919 EltBits = CN->getValue() & (~0U >> (32-EltBitSize));
1920 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
1921 assert(CN->getValueType(0) == MVT::f32 &&
1922 "Only one legal FP vector type!");
1923 EltBits = FloatToBits(CN->getValue());
1925 // Nonconstant element.
1929 VectorBits[PartNo] |= EltBits << (SlotNo*EltBitSize);
1932 //printf("%llx %llx %llx %llx\n",
1933 // VectorBits[0], VectorBits[1], UndefBits[0], UndefBits[1]);
1937 // If this is a splat (repetition) of a value across the whole vector, return
1938 // the smallest size that splats it. For example, "0x01010101010101..." is a
1939 // splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and
1940 // SplatSize = 1 byte.
1941 static bool isConstantSplat(const uint64_t Bits128[2],
1942 const uint64_t Undef128[2],
1943 unsigned &SplatBits, unsigned &SplatUndef,
1944 unsigned &SplatSize) {
1946 // Don't let undefs prevent splats from matching. See if the top 64-bits are
1947 // the same as the lower 64-bits, ignoring undefs.
1948 if ((Bits128[0] & ~Undef128[1]) != (Bits128[1] & ~Undef128[0]))
1949 return false; // Can't be a splat if two pieces don't match.
1951 uint64_t Bits64 = Bits128[0] | Bits128[1];
1952 uint64_t Undef64 = Undef128[0] & Undef128[1];
1954 // Check that the top 32-bits are the same as the lower 32-bits, ignoring
1956 if ((Bits64 & (~Undef64 >> 32)) != ((Bits64 >> 32) & ~Undef64))
1957 return false; // Can't be a splat if two pieces don't match.
1959 uint32_t Bits32 = uint32_t(Bits64) | uint32_t(Bits64 >> 32);
1960 uint32_t Undef32 = uint32_t(Undef64) & uint32_t(Undef64 >> 32);
1962 // If the top 16-bits are different than the lower 16-bits, ignoring
1963 // undefs, we have an i32 splat.
1964 if ((Bits32 & (~Undef32 >> 16)) != ((Bits32 >> 16) & ~Undef32)) {
1966 SplatUndef = Undef32;
1971 uint16_t Bits16 = uint16_t(Bits32) | uint16_t(Bits32 >> 16);
1972 uint16_t Undef16 = uint16_t(Undef32) & uint16_t(Undef32 >> 16);
1974 // If the top 8-bits are different than the lower 8-bits, ignoring
1975 // undefs, we have an i16 splat.
1976 if ((Bits16 & (uint16_t(~Undef16) >> 8)) != ((Bits16 >> 8) & ~Undef16)) {
1978 SplatUndef = Undef16;
1983 // Otherwise, we have an 8-bit splat.
1984 SplatBits = uint8_t(Bits16) | uint8_t(Bits16 >> 8);
1985 SplatUndef = uint8_t(Undef16) & uint8_t(Undef16 >> 8);
1990 /// BuildSplatI - Build a canonical splati of Val with an element size of
1991 /// SplatSize. Cast the result to VT.
1992 static SDOperand BuildSplatI(int Val, unsigned SplatSize, MVT::ValueType VT,
1993 SelectionDAG &DAG) {
1994 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!");
1996 static const MVT::ValueType VTys[] = { // canonical VT to use for each size.
1997 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
2000 MVT::ValueType ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
2002 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize.
2006 MVT::ValueType CanonicalVT = VTys[SplatSize-1];
2008 // Build a canonical splat for this value.
2009 SDOperand Elt = DAG.getConstant(Val, MVT::getVectorBaseType(CanonicalVT));
2010 SmallVector<SDOperand, 8> Ops;
2011 Ops.assign(MVT::getVectorNumElements(CanonicalVT), Elt);
2012 SDOperand Res = DAG.getNode(ISD::BUILD_VECTOR, CanonicalVT,
2013 &Ops[0], Ops.size());
2014 return DAG.getNode(ISD::BIT_CONVERT, ReqVT, Res);
2017 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
2018 /// specified intrinsic ID.
2019 static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand LHS, SDOperand RHS,
2021 MVT::ValueType DestVT = MVT::Other) {
2022 if (DestVT == MVT::Other) DestVT = LHS.getValueType();
2023 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT,
2024 DAG.getConstant(IID, MVT::i32), LHS, RHS);
2027 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
2028 /// specified intrinsic ID.
2029 static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand Op0, SDOperand Op1,
2030 SDOperand Op2, SelectionDAG &DAG,
2031 MVT::ValueType DestVT = MVT::Other) {
2032 if (DestVT == MVT::Other) DestVT = Op0.getValueType();
2033 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT,
2034 DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2);
2038 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
2039 /// amount. The result has the specified value type.
2040 static SDOperand BuildVSLDOI(SDOperand LHS, SDOperand RHS, unsigned Amt,
2041 MVT::ValueType VT, SelectionDAG &DAG) {
2042 // Force LHS/RHS to be the right type.
2043 LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, LHS);
2044 RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, RHS);
2047 for (unsigned i = 0; i != 16; ++i)
2048 Ops[i] = DAG.getConstant(i+Amt, MVT::i32);
2049 SDOperand T = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, LHS, RHS,
2050 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops,16));
2051 return DAG.getNode(ISD::BIT_CONVERT, VT, T);
2054 // If this is a case we can't handle, return null and let the default
2055 // expansion code take care of it. If we CAN select this case, and if it
2056 // selects to a single instruction, return Op. Otherwise, if we can codegen
2057 // this case more efficiently than a constant pool load, lower it to the
2058 // sequence of ops that should be used.
2059 static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) {
2060 // If this is a vector of constants or undefs, get the bits. A bit in
2061 // UndefBits is set if the corresponding element of the vector is an
2062 // ISD::UNDEF value. For undefs, the corresponding VectorBits values are
2064 uint64_t VectorBits[2];
2065 uint64_t UndefBits[2];
2066 if (GetConstantBuildVectorBits(Op.Val, VectorBits, UndefBits))
2067 return SDOperand(); // Not a constant vector.
2069 // If this is a splat (repetition) of a value across the whole vector, return
2070 // the smallest size that splats it. For example, "0x01010101010101..." is a
2071 // splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and
2072 // SplatSize = 1 byte.
2073 unsigned SplatBits, SplatUndef, SplatSize;
2074 if (isConstantSplat(VectorBits, UndefBits, SplatBits, SplatUndef, SplatSize)){
2075 bool HasAnyUndefs = (UndefBits[0] | UndefBits[1]) != 0;
2077 // First, handle single instruction cases.
2080 if (SplatBits == 0) {
2081 // Canonicalize all zero vectors to be v4i32.
2082 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
2083 SDOperand Z = DAG.getConstant(0, MVT::i32);
2084 Z = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Z, Z, Z, Z);
2085 Op = DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Z);
2090 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
2091 int32_t SextVal= int32_t(SplatBits << (32-8*SplatSize)) >> (32-8*SplatSize);
2092 if (SextVal >= -16 && SextVal <= 15)
2093 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG);
2096 // Two instruction sequences.
2098 // If this value is in the range [-32,30] and is even, use:
2099 // tmp = VSPLTI[bhw], result = add tmp, tmp
2100 if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) {
2101 Op = BuildSplatI(SextVal >> 1, SplatSize, Op.getValueType(), DAG);
2102 return DAG.getNode(ISD::ADD, Op.getValueType(), Op, Op);
2105 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is
2106 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important
2108 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
2109 // Make -1 and vspltisw -1:
2110 SDOperand OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG);
2112 // Make the VSLW intrinsic, computing 0x8000_0000.
2113 SDOperand Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
2116 // xor by OnesV to invert it.
2117 Res = DAG.getNode(ISD::XOR, MVT::v4i32, Res, OnesV);
2118 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
2121 // Check to see if this is a wide variety of vsplti*, binop self cases.
2122 unsigned SplatBitSize = SplatSize*8;
2123 static const char SplatCsts[] = {
2124 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
2125 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
2128 for (unsigned idx = 0; idx < sizeof(SplatCsts)/sizeof(SplatCsts[0]); ++idx){
2129 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
2130 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1'
2131 int i = SplatCsts[idx];
2133 // Figure out what shift amount will be used by altivec if shifted by i in
2135 unsigned TypeShiftAmt = i & (SplatBitSize-1);
2137 // vsplti + shl self.
2138 if (SextVal == (i << (int)TypeShiftAmt)) {
2139 SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG);
2140 static const unsigned IIDs[] = { // Intrinsic to use for each size.
2141 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
2142 Intrinsic::ppc_altivec_vslw
2144 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG);
2145 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
2148 // vsplti + srl self.
2149 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
2150 SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG);
2151 static const unsigned IIDs[] = { // Intrinsic to use for each size.
2152 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
2153 Intrinsic::ppc_altivec_vsrw
2155 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG);
2156 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
2159 // vsplti + sra self.
2160 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
2161 SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG);
2162 static const unsigned IIDs[] = { // Intrinsic to use for each size.
2163 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
2164 Intrinsic::ppc_altivec_vsraw
2166 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG);
2167 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
2170 // vsplti + rol self.
2171 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
2172 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
2173 SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG);
2174 static const unsigned IIDs[] = { // Intrinsic to use for each size.
2175 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
2176 Intrinsic::ppc_altivec_vrlw
2178 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG);
2179 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
2182 // t = vsplti c, result = vsldoi t, t, 1
2183 if (SextVal == ((i << 8) | (i >> (TypeShiftAmt-8)))) {
2184 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG);
2185 return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG);
2187 // t = vsplti c, result = vsldoi t, t, 2
2188 if (SextVal == ((i << 16) | (i >> (TypeShiftAmt-16)))) {
2189 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG);
2190 return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG);
2192 // t = vsplti c, result = vsldoi t, t, 3
2193 if (SextVal == ((i << 24) | (i >> (TypeShiftAmt-24)))) {
2194 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG);
2195 return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG);
2199 // Three instruction sequences.
2201 // Odd, in range [17,31]: (vsplti C)-(vsplti -16).
2202 if (SextVal >= 0 && SextVal <= 31) {
2203 SDOperand LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG);
2204 SDOperand RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG);
2205 LHS = DAG.getNode(ISD::SUB, Op.getValueType(), LHS, RHS);
2206 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS);
2208 // Odd, in range [-31,-17]: (vsplti C)+(vsplti -16).
2209 if (SextVal >= -31 && SextVal <= 0) {
2210 SDOperand LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG);
2211 SDOperand RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG);
2212 LHS = DAG.getNode(ISD::ADD, Op.getValueType(), LHS, RHS);
2213 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS);
2220 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
2221 /// the specified operations to build the shuffle.
2222 static SDOperand GeneratePerfectShuffle(unsigned PFEntry, SDOperand LHS,
2223 SDOperand RHS, SelectionDAG &DAG) {
2224 unsigned OpNum = (PFEntry >> 26) & 0x0F;
2225 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
2226 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
2229 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
2241 if (OpNum == OP_COPY) {
2242 if (LHSID == (1*9+2)*9+3) return LHS;
2243 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
2247 SDOperand OpLHS, OpRHS;
2248 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG);
2249 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG);
2251 unsigned ShufIdxs[16];
2253 default: assert(0 && "Unknown i32 permute!");
2255 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3;
2256 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
2257 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7;
2258 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
2261 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
2262 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
2263 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
2264 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
2267 for (unsigned i = 0; i != 16; ++i)
2268 ShufIdxs[i] = (i&3)+0;
2271 for (unsigned i = 0; i != 16; ++i)
2272 ShufIdxs[i] = (i&3)+4;
2275 for (unsigned i = 0; i != 16; ++i)
2276 ShufIdxs[i] = (i&3)+8;
2279 for (unsigned i = 0; i != 16; ++i)
2280 ShufIdxs[i] = (i&3)+12;
2283 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG);
2285 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG);
2287 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG);
2290 for (unsigned i = 0; i != 16; ++i)
2291 Ops[i] = DAG.getConstant(ShufIdxs[i], MVT::i32);
2293 return DAG.getNode(ISD::VECTOR_SHUFFLE, OpLHS.getValueType(), OpLHS, OpRHS,
2294 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops, 16));
2297 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this
2298 /// is a shuffle we can handle in a single instruction, return it. Otherwise,
2299 /// return the code it can be lowered into. Worst case, it can always be
2300 /// lowered into a vperm.
2301 static SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) {
2302 SDOperand V1 = Op.getOperand(0);
2303 SDOperand V2 = Op.getOperand(1);
2304 SDOperand PermMask = Op.getOperand(2);
2306 // Cases that are handled by instructions that take permute immediates
2307 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
2308 // selected by the instruction selector.
2309 if (V2.getOpcode() == ISD::UNDEF) {
2310 if (PPC::isSplatShuffleMask(PermMask.Val, 1) ||
2311 PPC::isSplatShuffleMask(PermMask.Val, 2) ||
2312 PPC::isSplatShuffleMask(PermMask.Val, 4) ||
2313 PPC::isVPKUWUMShuffleMask(PermMask.Val, true) ||
2314 PPC::isVPKUHUMShuffleMask(PermMask.Val, true) ||
2315 PPC::isVSLDOIShuffleMask(PermMask.Val, true) != -1 ||
2316 PPC::isVMRGLShuffleMask(PermMask.Val, 1, true) ||
2317 PPC::isVMRGLShuffleMask(PermMask.Val, 2, true) ||
2318 PPC::isVMRGLShuffleMask(PermMask.Val, 4, true) ||
2319 PPC::isVMRGHShuffleMask(PermMask.Val, 1, true) ||
2320 PPC::isVMRGHShuffleMask(PermMask.Val, 2, true) ||
2321 PPC::isVMRGHShuffleMask(PermMask.Val, 4, true)) {
2326 // Altivec has a variety of "shuffle immediates" that take two vector inputs
2327 // and produce a fixed permutation. If any of these match, do not lower to
2329 if (PPC::isVPKUWUMShuffleMask(PermMask.Val, false) ||
2330 PPC::isVPKUHUMShuffleMask(PermMask.Val, false) ||
2331 PPC::isVSLDOIShuffleMask(PermMask.Val, false) != -1 ||
2332 PPC::isVMRGLShuffleMask(PermMask.Val, 1, false) ||
2333 PPC::isVMRGLShuffleMask(PermMask.Val, 2, false) ||
2334 PPC::isVMRGLShuffleMask(PermMask.Val, 4, false) ||
2335 PPC::isVMRGHShuffleMask(PermMask.Val, 1, false) ||
2336 PPC::isVMRGHShuffleMask(PermMask.Val, 2, false) ||
2337 PPC::isVMRGHShuffleMask(PermMask.Val, 4, false))
2340 // Check to see if this is a shuffle of 4-byte values. If so, we can use our
2341 // perfect shuffle table to emit an optimal matching sequence.
2342 unsigned PFIndexes[4];
2343 bool isFourElementShuffle = true;
2344 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
2345 unsigned EltNo = 8; // Start out undef.
2346 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte.
2347 if (PermMask.getOperand(i*4+j).getOpcode() == ISD::UNDEF)
2348 continue; // Undef, ignore it.
2350 unsigned ByteSource =
2351 cast<ConstantSDNode>(PermMask.getOperand(i*4+j))->getValue();
2352 if ((ByteSource & 3) != j) {
2353 isFourElementShuffle = false;
2358 EltNo = ByteSource/4;
2359 } else if (EltNo != ByteSource/4) {
2360 isFourElementShuffle = false;
2364 PFIndexes[i] = EltNo;
2367 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
2368 // perfect shuffle vector to determine if it is cost effective to do this as
2369 // discrete instructions, or whether we should use a vperm.
2370 if (isFourElementShuffle) {
2371 // Compute the index in the perfect shuffle table.
2372 unsigned PFTableIndex =
2373 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
2375 unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
2376 unsigned Cost = (PFEntry >> 30);
2378 // Determining when to avoid vperm is tricky. Many things affect the cost
2379 // of vperm, particularly how many times the perm mask needs to be computed.
2380 // For example, if the perm mask can be hoisted out of a loop or is already
2381 // used (perhaps because there are multiple permutes with the same shuffle
2382 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of
2383 // the loop requires an extra register.
2385 // As a compromise, we only emit discrete instructions if the shuffle can be
2386 // generated in 3 or fewer operations. When we have loop information
2387 // available, if this block is within a loop, we should avoid using vperm
2388 // for 3-operation perms and use a constant pool load instead.
2390 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG);
2393 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
2394 // vector that will get spilled to the constant pool.
2395 if (V2.getOpcode() == ISD::UNDEF) V2 = V1;
2397 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
2398 // that it is in input element units, not in bytes. Convert now.
2399 MVT::ValueType EltVT = MVT::getVectorBaseType(V1.getValueType());
2400 unsigned BytesPerElement = MVT::getSizeInBits(EltVT)/8;
2402 SmallVector<SDOperand, 16> ResultMask;
2403 for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) {
2405 if (PermMask.getOperand(i).getOpcode() == ISD::UNDEF)
2408 SrcElt = cast<ConstantSDNode>(PermMask.getOperand(i))->getValue();
2410 for (unsigned j = 0; j != BytesPerElement; ++j)
2411 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,
2415 SDOperand VPermMask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8,
2416 &ResultMask[0], ResultMask.size());
2417 return DAG.getNode(PPCISD::VPERM, V1.getValueType(), V1, V2, VPermMask);
2420 /// getAltivecCompareInfo - Given an intrinsic, return false if it is not an
2421 /// altivec comparison. If it is, return true and fill in Opc/isDot with
2422 /// information about the intrinsic.
2423 static bool getAltivecCompareInfo(SDOperand Intrin, int &CompareOpc,
2425 unsigned IntrinsicID = cast<ConstantSDNode>(Intrin.getOperand(0))->getValue();
2428 switch (IntrinsicID) {
2429 default: return false;
2430 // Comparison predicates.
2431 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break;
2432 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break;
2433 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break;
2434 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break;
2435 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break;
2436 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break;
2437 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break;
2438 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break;
2439 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break;
2440 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break;
2441 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break;
2442 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break;
2443 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break;
2445 // Normal Comparisons.
2446 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break;
2447 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break;
2448 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break;
2449 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break;
2450 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break;
2451 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break;
2452 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break;
2453 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break;
2454 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break;
2455 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break;
2456 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break;
2457 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break;
2458 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break;
2463 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
2464 /// lower, do it, otherwise return null.
2465 static SDOperand LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) {
2466 // If this is a lowered altivec predicate compare, CompareOpc is set to the
2467 // opcode number of the comparison.
2470 if (!getAltivecCompareInfo(Op, CompareOpc, isDot))
2471 return SDOperand(); // Don't custom lower most intrinsics.
2473 // If this is a non-dot comparison, make the VCMP node and we are done.
2475 SDOperand Tmp = DAG.getNode(PPCISD::VCMP, Op.getOperand(2).getValueType(),
2476 Op.getOperand(1), Op.getOperand(2),
2477 DAG.getConstant(CompareOpc, MVT::i32));
2478 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Tmp);
2481 // Create the PPCISD altivec 'dot' comparison node.
2483 Op.getOperand(2), // LHS
2484 Op.getOperand(3), // RHS
2485 DAG.getConstant(CompareOpc, MVT::i32)
2487 std::vector<MVT::ValueType> VTs;
2488 VTs.push_back(Op.getOperand(2).getValueType());
2489 VTs.push_back(MVT::Flag);
2490 SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3);
2492 // Now that we have the comparison, emit a copy from the CR to a GPR.
2493 // This is flagged to the above dot comparison.
2494 SDOperand Flags = DAG.getNode(PPCISD::MFCR, MVT::i32,
2495 DAG.getRegister(PPC::CR6, MVT::i32),
2496 CompNode.getValue(1));
2498 // Unpack the result based on how the target uses it.
2499 unsigned BitNo; // Bit # of CR6.
2500 bool InvertBit; // Invert result?
2501 switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) {
2502 default: // Can't happen, don't crash on invalid number though.
2503 case 0: // Return the value of the EQ bit of CR6.
2504 BitNo = 0; InvertBit = false;
2506 case 1: // Return the inverted value of the EQ bit of CR6.
2507 BitNo = 0; InvertBit = true;
2509 case 2: // Return the value of the LT bit of CR6.
2510 BitNo = 2; InvertBit = false;
2512 case 3: // Return the inverted value of the LT bit of CR6.
2513 BitNo = 2; InvertBit = true;
2517 // Shift the bit into the low position.
2518 Flags = DAG.getNode(ISD::SRL, MVT::i32, Flags,
2519 DAG.getConstant(8-(3-BitNo), MVT::i32));
2521 Flags = DAG.getNode(ISD::AND, MVT::i32, Flags,
2522 DAG.getConstant(1, MVT::i32));
2524 // If we are supposed to, toggle the bit.
2526 Flags = DAG.getNode(ISD::XOR, MVT::i32, Flags,
2527 DAG.getConstant(1, MVT::i32));
2531 static SDOperand LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) {
2532 // Create a stack slot that is 16-byte aligned.
2533 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
2534 int FrameIdx = FrameInfo->CreateStackObject(16, 16);
2535 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2536 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
2538 // Store the input value into Value#0 of the stack slot.
2539 SDOperand Store = DAG.getStore(DAG.getEntryNode(),
2540 Op.getOperand(0), FIdx, NULL, 0);
2542 return DAG.getLoad(Op.getValueType(), Store, FIdx, NULL, 0);
2545 static SDOperand LowerMUL(SDOperand Op, SelectionDAG &DAG) {
2546 if (Op.getValueType() == MVT::v4i32) {
2547 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
2549 SDOperand Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG);
2550 SDOperand Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG); // +16 as shift amt.
2552 SDOperand RHSSwap = // = vrlw RHS, 16
2553 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG);
2555 // Shrinkify inputs to v8i16.
2556 LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, LHS);
2557 RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHS);
2558 RHSSwap = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHSSwap);
2560 // Low parts multiplied together, generating 32-bit results (we ignore the
2562 SDOperand LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
2563 LHS, RHS, DAG, MVT::v4i32);
2565 SDOperand HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
2566 LHS, RHSSwap, Zero, DAG, MVT::v4i32);
2567 // Shift the high parts up 16 bits.
2568 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, Neg16, DAG);
2569 return DAG.getNode(ISD::ADD, MVT::v4i32, LoProd, HiProd);
2570 } else if (Op.getValueType() == MVT::v8i16) {
2571 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
2573 SDOperand Zero = BuildSplatI(0, 1, MVT::v8i16, DAG);
2575 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm,
2576 LHS, RHS, Zero, DAG);
2577 } else if (Op.getValueType() == MVT::v16i8) {
2578 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
2580 // Multiply the even 8-bit parts, producing 16-bit sums.
2581 SDOperand EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
2582 LHS, RHS, DAG, MVT::v8i16);
2583 EvenParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, EvenParts);
2585 // Multiply the odd 8-bit parts, producing 16-bit sums.
2586 SDOperand OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
2587 LHS, RHS, DAG, MVT::v8i16);
2588 OddParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, OddParts);
2590 // Merge the results together.
2592 for (unsigned i = 0; i != 8; ++i) {
2593 Ops[i*2 ] = DAG.getConstant(2*i+1, MVT::i8);
2594 Ops[i*2+1] = DAG.getConstant(2*i+1+16, MVT::i8);
2596 return DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, EvenParts, OddParts,
2597 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops, 16));
2599 assert(0 && "Unknown mul to lower!");
2604 /// LowerOperation - Provide custom lowering hooks for some operations.
2606 SDOperand PPCTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
2607 switch (Op.getOpcode()) {
2608 default: assert(0 && "Wasn't expecting to be able to lower this!");
2609 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
2610 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
2611 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
2612 case ISD::SETCC: return LowerSETCC(Op, DAG);
2613 case ISD::VASTART: return LowerVASTART(Op, DAG, VarArgsFrameIndex);
2614 case ISD::FORMAL_ARGUMENTS:
2615 return LowerFORMAL_ARGUMENTS(Op, DAG, VarArgsFrameIndex);
2616 case ISD::CALL: return LowerCALL(Op, DAG);
2617 case ISD::RET: return LowerRET(Op, DAG);
2618 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, PPCSubTarget);
2619 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG,
2622 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
2623 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
2624 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
2626 // Lower 64-bit shifts.
2627 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG);
2628 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG);
2629 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG);
2631 // Vector-related lowering.
2632 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
2633 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
2634 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
2635 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
2636 case ISD::MUL: return LowerMUL(Op, DAG);
2641 //===----------------------------------------------------------------------===//
2642 // Other Lowering Code
2643 //===----------------------------------------------------------------------===//
2646 PPCTargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
2647 MachineBasicBlock *BB) {
2648 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
2649 assert((MI->getOpcode() == PPC::SELECT_CC_I4 ||
2650 MI->getOpcode() == PPC::SELECT_CC_I8 ||
2651 MI->getOpcode() == PPC::SELECT_CC_F4 ||
2652 MI->getOpcode() == PPC::SELECT_CC_F8 ||
2653 MI->getOpcode() == PPC::SELECT_CC_VRRC) &&
2654 "Unexpected instr type to insert");
2656 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
2657 // control-flow pattern. The incoming instruction knows the destination vreg
2658 // to set, the condition code register to branch on, the true/false values to
2659 // select between, and a branch opcode to use.
2660 const BasicBlock *LLVM_BB = BB->getBasicBlock();
2661 ilist<MachineBasicBlock>::iterator It = BB;
2667 // cmpTY ccX, r1, r2
2669 // fallthrough --> copy0MBB
2670 MachineBasicBlock *thisMBB = BB;
2671 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
2672 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
2673 unsigned SelectPred = MI->getOperand(4).getImm();
2674 BuildMI(BB, TII->get(PPC::BCC))
2675 .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
2676 MachineFunction *F = BB->getParent();
2677 F->getBasicBlockList().insert(It, copy0MBB);
2678 F->getBasicBlockList().insert(It, sinkMBB);
2679 // Update machine-CFG edges by first adding all successors of the current
2680 // block to the new block which will contain the Phi node for the select.
2681 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(),
2682 e = BB->succ_end(); i != e; ++i)
2683 sinkMBB->addSuccessor(*i);
2684 // Next, remove all successors of the current block, and add the true
2685 // and fallthrough blocks as its successors.
2686 while(!BB->succ_empty())
2687 BB->removeSuccessor(BB->succ_begin());
2688 BB->addSuccessor(copy0MBB);
2689 BB->addSuccessor(sinkMBB);
2692 // %FalseValue = ...
2693 // # fallthrough to sinkMBB
2696 // Update machine-CFG edges
2697 BB->addSuccessor(sinkMBB);
2700 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
2703 BuildMI(BB, TII->get(PPC::PHI), MI->getOperand(0).getReg())
2704 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
2705 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
2707 delete MI; // The pseudo instruction is gone now.
2711 //===----------------------------------------------------------------------===//
2712 // Target Optimization Hooks
2713 //===----------------------------------------------------------------------===//
2715 SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N,
2716 DAGCombinerInfo &DCI) const {
2717 TargetMachine &TM = getTargetMachine();
2718 SelectionDAG &DAG = DCI.DAG;
2719 switch (N->getOpcode()) {
2722 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
2723 if (C->getValue() == 0) // 0 << V -> 0.
2724 return N->getOperand(0);
2728 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
2729 if (C->getValue() == 0) // 0 >>u V -> 0.
2730 return N->getOperand(0);
2734 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
2735 if (C->getValue() == 0 || // 0 >>s V -> 0.
2736 C->isAllOnesValue()) // -1 >>s V -> -1.
2737 return N->getOperand(0);
2741 case ISD::SINT_TO_FP:
2742 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) {
2743 if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) {
2744 // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores.
2745 // We allow the src/dst to be either f32/f64, but the intermediate
2746 // type must be i64.
2747 if (N->getOperand(0).getValueType() == MVT::i64) {
2748 SDOperand Val = N->getOperand(0).getOperand(0);
2749 if (Val.getValueType() == MVT::f32) {
2750 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val);
2751 DCI.AddToWorklist(Val.Val);
2754 Val = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Val);
2755 DCI.AddToWorklist(Val.Val);
2756 Val = DAG.getNode(PPCISD::FCFID, MVT::f64, Val);
2757 DCI.AddToWorklist(Val.Val);
2758 if (N->getValueType(0) == MVT::f32) {
2759 Val = DAG.getNode(ISD::FP_ROUND, MVT::f32, Val);
2760 DCI.AddToWorklist(Val.Val);
2763 } else if (N->getOperand(0).getValueType() == MVT::i32) {
2764 // If the intermediate type is i32, we can avoid the load/store here
2771 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)).
2772 if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() &&
2773 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT &&
2774 N->getOperand(1).getValueType() == MVT::i32) {
2775 SDOperand Val = N->getOperand(1).getOperand(0);
2776 if (Val.getValueType() == MVT::f32) {
2777 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val);
2778 DCI.AddToWorklist(Val.Val);
2780 Val = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Val);
2781 DCI.AddToWorklist(Val.Val);
2783 Val = DAG.getNode(PPCISD::STFIWX, MVT::Other, N->getOperand(0), Val,
2784 N->getOperand(2), N->getOperand(3));
2785 DCI.AddToWorklist(Val.Val);
2789 // Turn STORE (BSWAP) -> sthbrx/stwbrx.
2790 if (N->getOperand(1).getOpcode() == ISD::BSWAP &&
2791 N->getOperand(1).Val->hasOneUse() &&
2792 (N->getOperand(1).getValueType() == MVT::i32 ||
2793 N->getOperand(1).getValueType() == MVT::i16)) {
2794 SDOperand BSwapOp = N->getOperand(1).getOperand(0);
2795 // Do an any-extend to 32-bits if this is a half-word input.
2796 if (BSwapOp.getValueType() == MVT::i16)
2797 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, BSwapOp);
2799 return DAG.getNode(PPCISD::STBRX, MVT::Other, N->getOperand(0), BSwapOp,
2800 N->getOperand(2), N->getOperand(3),
2801 DAG.getValueType(N->getOperand(1).getValueType()));
2805 // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
2806 if (ISD::isNON_EXTLoad(N->getOperand(0).Val) &&
2807 N->getOperand(0).hasOneUse() &&
2808 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16)) {
2809 SDOperand Load = N->getOperand(0);
2810 LoadSDNode *LD = cast<LoadSDNode>(Load);
2811 // Create the byte-swapping load.
2812 std::vector<MVT::ValueType> VTs;
2813 VTs.push_back(MVT::i32);
2814 VTs.push_back(MVT::Other);
2815 SDOperand SV = DAG.getSrcValue(LD->getSrcValue(), LD->getSrcValueOffset());
2817 LD->getChain(), // Chain
2818 LD->getBasePtr(), // Ptr
2820 DAG.getValueType(N->getValueType(0)) // VT
2822 SDOperand BSLoad = DAG.getNode(PPCISD::LBRX, VTs, Ops, 4);
2824 // If this is an i16 load, insert the truncate.
2825 SDOperand ResVal = BSLoad;
2826 if (N->getValueType(0) == MVT::i16)
2827 ResVal = DAG.getNode(ISD::TRUNCATE, MVT::i16, BSLoad);
2829 // First, combine the bswap away. This makes the value produced by the
2831 DCI.CombineTo(N, ResVal);
2833 // Next, combine the load away, we give it a bogus result value but a real
2834 // chain result. The result value is dead because the bswap is dead.
2835 DCI.CombineTo(Load.Val, ResVal, BSLoad.getValue(1));
2837 // Return N so it doesn't get rechecked!
2838 return SDOperand(N, 0);
2842 case PPCISD::VCMP: {
2843 // If a VCMPo node already exists with exactly the same operands as this
2844 // node, use its result instead of this node (VCMPo computes both a CR6 and
2845 // a normal output).
2847 if (!N->getOperand(0).hasOneUse() &&
2848 !N->getOperand(1).hasOneUse() &&
2849 !N->getOperand(2).hasOneUse()) {
2851 // Scan all of the users of the LHS, looking for VCMPo's that match.
2852 SDNode *VCMPoNode = 0;
2854 SDNode *LHSN = N->getOperand(0).Val;
2855 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
2857 if ((*UI)->getOpcode() == PPCISD::VCMPo &&
2858 (*UI)->getOperand(1) == N->getOperand(1) &&
2859 (*UI)->getOperand(2) == N->getOperand(2) &&
2860 (*UI)->getOperand(0) == N->getOperand(0)) {
2865 // If there is no VCMPo node, or if the flag value has a single use, don't
2867 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1))
2870 // Look at the (necessarily single) use of the flag value. If it has a
2871 // chain, this transformation is more complex. Note that multiple things
2872 // could use the value result, which we should ignore.
2873 SDNode *FlagUser = 0;
2874 for (SDNode::use_iterator UI = VCMPoNode->use_begin();
2875 FlagUser == 0; ++UI) {
2876 assert(UI != VCMPoNode->use_end() && "Didn't find user!");
2878 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
2879 if (User->getOperand(i) == SDOperand(VCMPoNode, 1)) {
2886 // If the user is a MFCR instruction, we know this is safe. Otherwise we
2887 // give up for right now.
2888 if (FlagUser->getOpcode() == PPCISD::MFCR)
2889 return SDOperand(VCMPoNode, 0);
2894 // If this is a branch on an altivec predicate comparison, lower this so
2895 // that we don't have to do a MFCR: instead, branch directly on CR6. This
2896 // lowering is done pre-legalize, because the legalizer lowers the predicate
2897 // compare down to code that is difficult to reassemble.
2898 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
2899 SDOperand LHS = N->getOperand(2), RHS = N->getOperand(3);
2903 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
2904 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
2905 getAltivecCompareInfo(LHS, CompareOpc, isDot)) {
2906 assert(isDot && "Can't compare against a vector result!");
2908 // If this is a comparison against something other than 0/1, then we know
2909 // that the condition is never/always true.
2910 unsigned Val = cast<ConstantSDNode>(RHS)->getValue();
2911 if (Val != 0 && Val != 1) {
2912 if (CC == ISD::SETEQ) // Cond never true, remove branch.
2913 return N->getOperand(0);
2914 // Always !=, turn it into an unconditional branch.
2915 return DAG.getNode(ISD::BR, MVT::Other,
2916 N->getOperand(0), N->getOperand(4));
2919 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
2921 // Create the PPCISD altivec 'dot' comparison node.
2922 std::vector<MVT::ValueType> VTs;
2924 LHS.getOperand(2), // LHS of compare
2925 LHS.getOperand(3), // RHS of compare
2926 DAG.getConstant(CompareOpc, MVT::i32)
2928 VTs.push_back(LHS.getOperand(2).getValueType());
2929 VTs.push_back(MVT::Flag);
2930 SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3);
2932 // Unpack the result based on how the target uses it.
2933 PPC::Predicate CompOpc;
2934 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getValue()) {
2935 default: // Can't happen, don't crash on invalid number though.
2936 case 0: // Branch on the value of the EQ bit of CR6.
2937 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
2939 case 1: // Branch on the inverted value of the EQ bit of CR6.
2940 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
2942 case 2: // Branch on the value of the LT bit of CR6.
2943 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
2945 case 3: // Branch on the inverted value of the LT bit of CR6.
2946 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
2950 return DAG.getNode(PPCISD::COND_BRANCH, MVT::Other, N->getOperand(0),
2951 DAG.getConstant(CompOpc, MVT::i32),
2952 DAG.getRegister(PPC::CR6, MVT::i32),
2953 N->getOperand(4), CompNode.getValue(1));
2962 //===----------------------------------------------------------------------===//
2963 // Inline Assembly Support
2964 //===----------------------------------------------------------------------===//
2966 void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
2968 uint64_t &KnownZero,
2970 unsigned Depth) const {
2973 switch (Op.getOpcode()) {
2975 case PPCISD::LBRX: {
2976 // lhbrx is known to have the top bits cleared out.
2977 if (cast<VTSDNode>(Op.getOperand(3))->getVT() == MVT::i16)
2978 KnownZero = 0xFFFF0000;
2981 case ISD::INTRINSIC_WO_CHAIN: {
2982 switch (cast<ConstantSDNode>(Op.getOperand(0))->getValue()) {
2984 case Intrinsic::ppc_altivec_vcmpbfp_p:
2985 case Intrinsic::ppc_altivec_vcmpeqfp_p:
2986 case Intrinsic::ppc_altivec_vcmpequb_p:
2987 case Intrinsic::ppc_altivec_vcmpequh_p:
2988 case Intrinsic::ppc_altivec_vcmpequw_p:
2989 case Intrinsic::ppc_altivec_vcmpgefp_p:
2990 case Intrinsic::ppc_altivec_vcmpgtfp_p:
2991 case Intrinsic::ppc_altivec_vcmpgtsb_p:
2992 case Intrinsic::ppc_altivec_vcmpgtsh_p:
2993 case Intrinsic::ppc_altivec_vcmpgtsw_p:
2994 case Intrinsic::ppc_altivec_vcmpgtub_p:
2995 case Intrinsic::ppc_altivec_vcmpgtuh_p:
2996 case Intrinsic::ppc_altivec_vcmpgtuw_p:
2997 KnownZero = ~1U; // All bits but the low one are known to be zero.
3005 /// getConstraintType - Given a constraint letter, return the type of
3006 /// constraint it is for this target.
3007 PPCTargetLowering::ConstraintType
3008 PPCTargetLowering::getConstraintType(char ConstraintLetter) const {
3009 switch (ConstraintLetter) {
3016 return C_RegisterClass;
3018 return TargetLowering::getConstraintType(ConstraintLetter);
3021 std::pair<unsigned, const TargetRegisterClass*>
3022 PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
3023 MVT::ValueType VT) const {
3024 if (Constraint.size() == 1) {
3025 // GCC RS6000 Constraint Letters
3026 switch (Constraint[0]) {
3029 if (VT == MVT::i64 && PPCSubTarget.isPPC64())
3030 return std::make_pair(0U, PPC::G8RCRegisterClass);
3031 return std::make_pair(0U, PPC::GPRCRegisterClass);
3034 return std::make_pair(0U, PPC::F4RCRegisterClass);
3035 else if (VT == MVT::f64)
3036 return std::make_pair(0U, PPC::F8RCRegisterClass);
3039 return std::make_pair(0U, PPC::VRRCRegisterClass);
3041 return std::make_pair(0U, PPC::CRRCRegisterClass);
3045 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
3049 // isOperandValidForConstraint
3050 SDOperand PPCTargetLowering::
3051 isOperandValidForConstraint(SDOperand Op, char Letter, SelectionDAG &DAG) {
3062 if (!isa<ConstantSDNode>(Op)) return SDOperand(0,0);// Must be an immediate.
3063 unsigned Value = cast<ConstantSDNode>(Op)->getValue();
3065 default: assert(0 && "Unknown constraint letter!");
3066 case 'I': // "I" is a signed 16-bit constant.
3067 if ((short)Value == (int)Value) return Op;
3069 case 'J': // "J" is a constant with only the high-order 16 bits nonzero.
3070 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits.
3071 if ((short)Value == 0) return Op;
3073 case 'K': // "K" is a constant with only the low-order 16 bits nonzero.
3074 if ((Value >> 16) == 0) return Op;
3076 case 'M': // "M" is a constant that is greater than 31.
3077 if (Value > 31) return Op;
3079 case 'N': // "N" is a positive constant that is an exact power of two.
3080 if ((int)Value > 0 && isPowerOf2_32(Value)) return Op;
3082 case 'O': // "O" is the constant zero.
3083 if (Value == 0) return Op;
3085 case 'P': // "P" is a constant whose negation is a signed 16-bit constant.
3086 if ((short)-Value == (int)-Value) return Op;
3093 // Handle standard constraint letters.
3094 return TargetLowering::isOperandValidForConstraint(Op, Letter, DAG);
3097 /// isLegalAddressImmediate - Return true if the integer value can be used
3098 /// as the offset of the target addressing mode.
3099 bool PPCTargetLowering::isLegalAddressImmediate(int64_t V) const {
3100 // PPC allows a sign-extended 16-bit immediate field.
3101 return (V > -(1 << 16) && V < (1 << 16)-1);
3104 bool PPCTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const {
3105 return TargetLowering::isLegalAddressImmediate(GV);