1 //===-- SPUISelLowering.cpp - Cell SPU DAG Lowering Implementation --------===//
2 // The LLVM Compiler Infrastructure
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
7 //===----------------------------------------------------------------------===//
9 // This file implements the SPUTargetLowering class.
11 //===----------------------------------------------------------------------===//
13 #include "SPUISelLowering.h"
14 #include "SPUTargetMachine.h"
15 #include "SPUFrameLowering.h"
16 #include "SPUMachineFunction.h"
17 #include "llvm/Constants.h"
18 #include "llvm/Function.h"
19 #include "llvm/Intrinsics.h"
20 #include "llvm/CallingConv.h"
21 #include "llvm/Type.h"
22 #include "llvm/CodeGen/CallingConvLower.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/SelectionDAG.h"
28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29 #include "llvm/Target/TargetOptions.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Support/MathExtras.h"
33 #include "llvm/Support/raw_ostream.h"
38 // Used in getTargetNodeName() below
40 std::map<unsigned, const char *> node_names;
42 // Byte offset of the preferred slot (counted from the MSB)
43 int prefslotOffset(EVT VT) {
45 if (VT==MVT::i1) retval=3;
46 if (VT==MVT::i8) retval=3;
47 if (VT==MVT::i16) retval=2;
52 //! Expand a library call into an actual call DAG node
55 This code is taken from SelectionDAGLegalize, since it is not exposed as
56 part of the LLVM SelectionDAG API.
60 ExpandLibCall(RTLIB::Libcall LC, SDValue Op, SelectionDAG &DAG,
61 bool isSigned, SDValue &Hi, const SPUTargetLowering &TLI) {
62 // The input chain to this libcall is the entry node of the function.
63 // Legalizing the call will automatically add the previous call to the
65 SDValue InChain = DAG.getEntryNode();
67 TargetLowering::ArgListTy Args;
68 TargetLowering::ArgListEntry Entry;
69 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
70 EVT ArgVT = Op.getOperand(i).getValueType();
71 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
72 Entry.Node = Op.getOperand(i);
74 Entry.isSExt = isSigned;
75 Entry.isZExt = !isSigned;
76 Args.push_back(Entry);
78 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
81 // Splice the libcall in wherever FindInputOutputChains tells us to.
83 Op.getNode()->getValueType(0).getTypeForEVT(*DAG.getContext());
84 std::pair<SDValue, SDValue> CallInfo =
85 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
86 0, TLI.getLibcallCallingConv(LC), false,
87 /*isReturnValueUsed=*/true,
88 Callee, Args, DAG, Op.getDebugLoc());
90 return CallInfo.first;
94 SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
95 : TargetLowering(TM, new TargetLoweringObjectFileELF()),
98 // Use _setjmp/_longjmp instead of setjmp/longjmp.
99 setUseUnderscoreSetJmp(true);
100 setUseUnderscoreLongJmp(true);
102 // Set RTLIB libcall names as used by SPU:
103 setLibcallName(RTLIB::DIV_F64, "__fast_divdf3");
105 // Set up the SPU's register classes:
106 addRegisterClass(MVT::i8, SPU::R8CRegisterClass);
107 addRegisterClass(MVT::i16, SPU::R16CRegisterClass);
108 addRegisterClass(MVT::i32, SPU::R32CRegisterClass);
109 addRegisterClass(MVT::i64, SPU::R64CRegisterClass);
110 addRegisterClass(MVT::f32, SPU::R32FPRegisterClass);
111 addRegisterClass(MVT::f64, SPU::R64FPRegisterClass);
112 addRegisterClass(MVT::i128, SPU::GPRCRegisterClass);
114 // SPU has no sign or zero extended loads for i1, i8, i16:
115 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
116 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
117 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
119 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
120 setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand);
122 setTruncStoreAction(MVT::i128, MVT::i64, Expand);
123 setTruncStoreAction(MVT::i128, MVT::i32, Expand);
124 setTruncStoreAction(MVT::i128, MVT::i16, Expand);
125 setTruncStoreAction(MVT::i128, MVT::i8, Expand);
127 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
129 // SPU constant load actions are custom lowered:
130 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
131 setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
133 // SPU's loads and stores have to be custom lowered:
134 for (unsigned sctype = (unsigned) MVT::i8; sctype < (unsigned) MVT::i128;
136 MVT::SimpleValueType VT = (MVT::SimpleValueType)sctype;
138 setOperationAction(ISD::LOAD, VT, Custom);
139 setOperationAction(ISD::STORE, VT, Custom);
140 setLoadExtAction(ISD::EXTLOAD, VT, Custom);
141 setLoadExtAction(ISD::ZEXTLOAD, VT, Custom);
142 setLoadExtAction(ISD::SEXTLOAD, VT, Custom);
144 for (unsigned stype = sctype - 1; stype >= (unsigned) MVT::i8; --stype) {
145 MVT::SimpleValueType StoreVT = (MVT::SimpleValueType) stype;
146 setTruncStoreAction(VT, StoreVT, Expand);
150 for (unsigned sctype = (unsigned) MVT::f32; sctype < (unsigned) MVT::f64;
152 MVT::SimpleValueType VT = (MVT::SimpleValueType) sctype;
154 setOperationAction(ISD::LOAD, VT, Custom);
155 setOperationAction(ISD::STORE, VT, Custom);
157 for (unsigned stype = sctype - 1; stype >= (unsigned) MVT::f32; --stype) {
158 MVT::SimpleValueType StoreVT = (MVT::SimpleValueType) stype;
159 setTruncStoreAction(VT, StoreVT, Expand);
163 // Expand the jumptable branches
164 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
165 setOperationAction(ISD::BR_CC, MVT::Other, Expand);
167 // Custom lower SELECT_CC for most cases, but expand by default
168 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
169 setOperationAction(ISD::SELECT_CC, MVT::i8, Custom);
170 setOperationAction(ISD::SELECT_CC, MVT::i16, Custom);
171 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
172 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
174 // SPU has no intrinsics for these particular operations:
175 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
176 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand);
178 // SPU has no division/remainder instructions
179 setOperationAction(ISD::SREM, MVT::i8, Expand);
180 setOperationAction(ISD::UREM, MVT::i8, Expand);
181 setOperationAction(ISD::SDIV, MVT::i8, Expand);
182 setOperationAction(ISD::UDIV, MVT::i8, Expand);
183 setOperationAction(ISD::SDIVREM, MVT::i8, Expand);
184 setOperationAction(ISD::UDIVREM, MVT::i8, Expand);
185 setOperationAction(ISD::SREM, MVT::i16, Expand);
186 setOperationAction(ISD::UREM, MVT::i16, Expand);
187 setOperationAction(ISD::SDIV, MVT::i16, Expand);
188 setOperationAction(ISD::UDIV, MVT::i16, Expand);
189 setOperationAction(ISD::SDIVREM, MVT::i16, Expand);
190 setOperationAction(ISD::UDIVREM, MVT::i16, Expand);
191 setOperationAction(ISD::SREM, MVT::i32, Expand);
192 setOperationAction(ISD::UREM, MVT::i32, Expand);
193 setOperationAction(ISD::SDIV, MVT::i32, Expand);
194 setOperationAction(ISD::UDIV, MVT::i32, Expand);
195 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
196 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
197 setOperationAction(ISD::SREM, MVT::i64, Expand);
198 setOperationAction(ISD::UREM, MVT::i64, Expand);
199 setOperationAction(ISD::SDIV, MVT::i64, Expand);
200 setOperationAction(ISD::UDIV, MVT::i64, Expand);
201 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
202 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
203 setOperationAction(ISD::SREM, MVT::i128, Expand);
204 setOperationAction(ISD::UREM, MVT::i128, Expand);
205 setOperationAction(ISD::SDIV, MVT::i128, Expand);
206 setOperationAction(ISD::UDIV, MVT::i128, Expand);
207 setOperationAction(ISD::SDIVREM, MVT::i128, Expand);
208 setOperationAction(ISD::UDIVREM, MVT::i128, Expand);
210 // We don't support sin/cos/sqrt/fmod
211 setOperationAction(ISD::FSIN , MVT::f64, Expand);
212 setOperationAction(ISD::FCOS , MVT::f64, Expand);
213 setOperationAction(ISD::FREM , MVT::f64, Expand);
214 setOperationAction(ISD::FSIN , MVT::f32, Expand);
215 setOperationAction(ISD::FCOS , MVT::f32, Expand);
216 setOperationAction(ISD::FREM , MVT::f32, Expand);
218 // Expand fsqrt to the appropriate libcall (NOTE: should use h/w fsqrt
220 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
221 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
223 setOperationAction(ISD::FMA, MVT::f64, Expand);
224 setOperationAction(ISD::FMA, MVT::f32, Expand);
226 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
227 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
229 // SPU can do rotate right and left, so legalize it... but customize for i8
230 // because instructions don't exist.
232 // FIXME: Change from "expand" to appropriate type once ROTR is supported in
234 setOperationAction(ISD::ROTR, MVT::i32, Expand /*Legal*/);
235 setOperationAction(ISD::ROTR, MVT::i16, Expand /*Legal*/);
236 setOperationAction(ISD::ROTR, MVT::i8, Expand /*Custom*/);
238 setOperationAction(ISD::ROTL, MVT::i32, Legal);
239 setOperationAction(ISD::ROTL, MVT::i16, Legal);
240 setOperationAction(ISD::ROTL, MVT::i8, Custom);
242 // SPU has no native version of shift left/right for i8
243 setOperationAction(ISD::SHL, MVT::i8, Custom);
244 setOperationAction(ISD::SRL, MVT::i8, Custom);
245 setOperationAction(ISD::SRA, MVT::i8, Custom);
247 // Make these operations legal and handle them during instruction selection:
248 setOperationAction(ISD::SHL, MVT::i64, Legal);
249 setOperationAction(ISD::SRL, MVT::i64, Legal);
250 setOperationAction(ISD::SRA, MVT::i64, Legal);
252 // Custom lower i8, i32 and i64 multiplications
253 setOperationAction(ISD::MUL, MVT::i8, Custom);
254 setOperationAction(ISD::MUL, MVT::i32, Legal);
255 setOperationAction(ISD::MUL, MVT::i64, Legal);
257 // Expand double-width multiplication
258 // FIXME: It would probably be reasonable to support some of these operations
259 setOperationAction(ISD::UMUL_LOHI, MVT::i8, Expand);
260 setOperationAction(ISD::SMUL_LOHI, MVT::i8, Expand);
261 setOperationAction(ISD::MULHU, MVT::i8, Expand);
262 setOperationAction(ISD::MULHS, MVT::i8, Expand);
263 setOperationAction(ISD::UMUL_LOHI, MVT::i16, Expand);
264 setOperationAction(ISD::SMUL_LOHI, MVT::i16, Expand);
265 setOperationAction(ISD::MULHU, MVT::i16, Expand);
266 setOperationAction(ISD::MULHS, MVT::i16, Expand);
267 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
268 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
269 setOperationAction(ISD::MULHU, MVT::i32, Expand);
270 setOperationAction(ISD::MULHS, MVT::i32, Expand);
271 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
272 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
273 setOperationAction(ISD::MULHU, MVT::i64, Expand);
274 setOperationAction(ISD::MULHS, MVT::i64, Expand);
276 // Need to custom handle (some) common i8, i64 math ops
277 setOperationAction(ISD::ADD, MVT::i8, Custom);
278 setOperationAction(ISD::ADD, MVT::i64, Legal);
279 setOperationAction(ISD::SUB, MVT::i8, Custom);
280 setOperationAction(ISD::SUB, MVT::i64, Legal);
282 // SPU does not have BSWAP. It does have i32 support CTLZ.
283 // CTPOP has to be custom lowered.
284 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
285 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
287 setOperationAction(ISD::CTPOP, MVT::i8, Custom);
288 setOperationAction(ISD::CTPOP, MVT::i16, Custom);
289 setOperationAction(ISD::CTPOP, MVT::i32, Custom);
290 setOperationAction(ISD::CTPOP, MVT::i64, Custom);
291 setOperationAction(ISD::CTPOP, MVT::i128, Expand);
293 setOperationAction(ISD::CTTZ , MVT::i8, Expand);
294 setOperationAction(ISD::CTTZ , MVT::i16, Expand);
295 setOperationAction(ISD::CTTZ , MVT::i32, Expand);
296 setOperationAction(ISD::CTTZ , MVT::i64, Expand);
297 setOperationAction(ISD::CTTZ , MVT::i128, Expand);
298 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i8, Expand);
299 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Expand);
300 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
301 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
302 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i128, Expand);
304 setOperationAction(ISD::CTLZ , MVT::i8, Promote);
305 setOperationAction(ISD::CTLZ , MVT::i16, Promote);
306 setOperationAction(ISD::CTLZ , MVT::i32, Legal);
307 setOperationAction(ISD::CTLZ , MVT::i64, Expand);
308 setOperationAction(ISD::CTLZ , MVT::i128, Expand);
309 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8, Expand);
310 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Expand);
311 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
312 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
313 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i128, Expand);
315 // SPU has a version of select that implements (a&~c)|(b&c), just like
316 // select ought to work:
317 setOperationAction(ISD::SELECT, MVT::i8, Legal);
318 setOperationAction(ISD::SELECT, MVT::i16, Legal);
319 setOperationAction(ISD::SELECT, MVT::i32, Legal);
320 setOperationAction(ISD::SELECT, MVT::i64, Legal);
322 setOperationAction(ISD::SETCC, MVT::i8, Legal);
323 setOperationAction(ISD::SETCC, MVT::i16, Legal);
324 setOperationAction(ISD::SETCC, MVT::i32, Legal);
325 setOperationAction(ISD::SETCC, MVT::i64, Legal);
326 setOperationAction(ISD::SETCC, MVT::f64, Custom);
328 // Custom lower i128 -> i64 truncates
329 setOperationAction(ISD::TRUNCATE, MVT::i64, Custom);
331 // Custom lower i32/i64 -> i128 sign extend
332 setOperationAction(ISD::SIGN_EXTEND, MVT::i128, Custom);
334 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
335 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
336 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
337 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
338 // SPU has a legal FP -> signed INT instruction for f32, but for f64, need
339 // to expand to a libcall, hence the custom lowering:
340 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
341 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
342 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Expand);
343 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
344 setOperationAction(ISD::FP_TO_SINT, MVT::i128, Expand);
345 setOperationAction(ISD::FP_TO_UINT, MVT::i128, Expand);
347 // FDIV on SPU requires custom lowering
348 setOperationAction(ISD::FDIV, MVT::f64, Expand); // to libcall
350 // SPU has [U|S]INT_TO_FP for f32->i32, but not for f64->i32, f64->i64:
351 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
352 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
353 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
354 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
355 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
356 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
357 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
358 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
360 setOperationAction(ISD::BITCAST, MVT::i32, Legal);
361 setOperationAction(ISD::BITCAST, MVT::f32, Legal);
362 setOperationAction(ISD::BITCAST, MVT::i64, Legal);
363 setOperationAction(ISD::BITCAST, MVT::f64, Legal);
365 // We cannot sextinreg(i1). Expand to shifts.
366 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
368 // We want to legalize GlobalAddress and ConstantPool nodes into the
369 // appropriate instructions to materialize the address.
370 for (unsigned sctype = (unsigned) MVT::i8; sctype < (unsigned) MVT::f128;
372 MVT::SimpleValueType VT = (MVT::SimpleValueType)sctype;
374 setOperationAction(ISD::GlobalAddress, VT, Custom);
375 setOperationAction(ISD::ConstantPool, VT, Custom);
376 setOperationAction(ISD::JumpTable, VT, Custom);
379 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
380 setOperationAction(ISD::VASTART , MVT::Other, Custom);
382 // Use the default implementation.
383 setOperationAction(ISD::VAARG , MVT::Other, Expand);
384 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
385 setOperationAction(ISD::VAEND , MVT::Other, Expand);
386 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
387 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
388 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
389 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Expand);
391 // Cell SPU has instructions for converting between i64 and fp.
392 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
393 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
395 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT
396 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote);
398 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
399 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
401 // First set operation action for all vector types to expand. Then we
402 // will selectively turn on ones that can be effectively codegen'd.
403 addRegisterClass(MVT::v16i8, SPU::VECREGRegisterClass);
404 addRegisterClass(MVT::v8i16, SPU::VECREGRegisterClass);
405 addRegisterClass(MVT::v4i32, SPU::VECREGRegisterClass);
406 addRegisterClass(MVT::v2i64, SPU::VECREGRegisterClass);
407 addRegisterClass(MVT::v4f32, SPU::VECREGRegisterClass);
408 addRegisterClass(MVT::v2f64, SPU::VECREGRegisterClass);
410 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
411 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
412 MVT::SimpleValueType VT = (MVT::SimpleValueType)i;
414 // Set operation actions to legal types only.
415 if (!isTypeLegal(VT)) continue;
417 // add/sub are legal for all supported vector VT's.
418 setOperationAction(ISD::ADD, VT, Legal);
419 setOperationAction(ISD::SUB, VT, Legal);
420 // mul has to be custom lowered.
421 setOperationAction(ISD::MUL, VT, Legal);
423 setOperationAction(ISD::AND, VT, Legal);
424 setOperationAction(ISD::OR, VT, Legal);
425 setOperationAction(ISD::XOR, VT, Legal);
426 setOperationAction(ISD::LOAD, VT, Custom);
427 setOperationAction(ISD::SELECT, VT, Legal);
428 setOperationAction(ISD::STORE, VT, Custom);
430 // These operations need to be expanded:
431 setOperationAction(ISD::SDIV, VT, Expand);
432 setOperationAction(ISD::SREM, VT, Expand);
433 setOperationAction(ISD::UDIV, VT, Expand);
434 setOperationAction(ISD::UREM, VT, Expand);
436 // Expand all trunc stores
437 for (unsigned j = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
438 j <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++j) {
439 MVT::SimpleValueType TargetVT = (MVT::SimpleValueType)j;
440 setTruncStoreAction(VT, TargetVT, Expand);
443 // Custom lower build_vector, constant pool spills, insert and
444 // extract vector elements:
445 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
446 setOperationAction(ISD::ConstantPool, VT, Custom);
447 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
448 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
449 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
450 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
453 setOperationAction(ISD::SHL, MVT::v2i64, Expand);
455 setOperationAction(ISD::AND, MVT::v16i8, Custom);
456 setOperationAction(ISD::OR, MVT::v16i8, Custom);
457 setOperationAction(ISD::XOR, MVT::v16i8, Custom);
458 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
460 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
462 setBooleanContents(ZeroOrNegativeOneBooleanContent);
463 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); // FIXME: Is this correct?
465 setStackPointerRegisterToSaveRestore(SPU::R1);
467 // We have target-specific dag combine patterns for the following nodes:
468 setTargetDAGCombine(ISD::ADD);
469 setTargetDAGCombine(ISD::ZERO_EXTEND);
470 setTargetDAGCombine(ISD::SIGN_EXTEND);
471 setTargetDAGCombine(ISD::ANY_EXTEND);
473 setMinFunctionAlignment(3);
475 computeRegisterProperties();
477 // Set pre-RA register scheduler default to BURR, which produces slightly
478 // better code than the default (could also be TDRR, but TargetLowering.h
479 // needs a mod to support that model):
480 setSchedulingPreference(Sched::RegPressure);
484 SPUTargetLowering::getTargetNodeName(unsigned Opcode) const
486 if (node_names.empty()) {
487 node_names[(unsigned) SPUISD::RET_FLAG] = "SPUISD::RET_FLAG";
488 node_names[(unsigned) SPUISD::Hi] = "SPUISD::Hi";
489 node_names[(unsigned) SPUISD::Lo] = "SPUISD::Lo";
490 node_names[(unsigned) SPUISD::PCRelAddr] = "SPUISD::PCRelAddr";
491 node_names[(unsigned) SPUISD::AFormAddr] = "SPUISD::AFormAddr";
492 node_names[(unsigned) SPUISD::IndirectAddr] = "SPUISD::IndirectAddr";
493 node_names[(unsigned) SPUISD::LDRESULT] = "SPUISD::LDRESULT";
494 node_names[(unsigned) SPUISD::CALL] = "SPUISD::CALL";
495 node_names[(unsigned) SPUISD::SHUFB] = "SPUISD::SHUFB";
496 node_names[(unsigned) SPUISD::SHUFFLE_MASK] = "SPUISD::SHUFFLE_MASK";
497 node_names[(unsigned) SPUISD::CNTB] = "SPUISD::CNTB";
498 node_names[(unsigned) SPUISD::PREFSLOT2VEC] = "SPUISD::PREFSLOT2VEC";
499 node_names[(unsigned) SPUISD::VEC2PREFSLOT] = "SPUISD::VEC2PREFSLOT";
500 node_names[(unsigned) SPUISD::SHL_BITS] = "SPUISD::SHL_BITS";
501 node_names[(unsigned) SPUISD::SHL_BYTES] = "SPUISD::SHL_BYTES";
502 node_names[(unsigned) SPUISD::VEC_ROTL] = "SPUISD::VEC_ROTL";
503 node_names[(unsigned) SPUISD::VEC_ROTR] = "SPUISD::VEC_ROTR";
504 node_names[(unsigned) SPUISD::ROTBYTES_LEFT] = "SPUISD::ROTBYTES_LEFT";
505 node_names[(unsigned) SPUISD::ROTBYTES_LEFT_BITS] =
506 "SPUISD::ROTBYTES_LEFT_BITS";
507 node_names[(unsigned) SPUISD::SELECT_MASK] = "SPUISD::SELECT_MASK";
508 node_names[(unsigned) SPUISD::SELB] = "SPUISD::SELB";
509 node_names[(unsigned) SPUISD::ADD64_MARKER] = "SPUISD::ADD64_MARKER";
510 node_names[(unsigned) SPUISD::SUB64_MARKER] = "SPUISD::SUB64_MARKER";
511 node_names[(unsigned) SPUISD::MUL64_MARKER] = "SPUISD::MUL64_MARKER";
514 std::map<unsigned, const char *>::iterator i = node_names.find(Opcode);
516 return ((i != node_names.end()) ? i->second : 0);
519 //===----------------------------------------------------------------------===//
520 // Return the Cell SPU's SETCC result type
521 //===----------------------------------------------------------------------===//
523 EVT SPUTargetLowering::getSetCCResultType(EVT VT) const {
524 // i8, i16 and i32 are valid SETCC result types
525 MVT::SimpleValueType retval;
527 switch(VT.getSimpleVT().SimpleTy){
530 retval = MVT::i8; break;
532 retval = MVT::i16; break;
540 //===----------------------------------------------------------------------===//
541 // Calling convention code:
542 //===----------------------------------------------------------------------===//
544 #include "SPUGenCallingConv.inc"
546 //===----------------------------------------------------------------------===//
547 // LowerOperation implementation
548 //===----------------------------------------------------------------------===//
550 /// Custom lower loads for CellSPU
552 All CellSPU loads and stores are aligned to 16-byte boundaries, so for elements
553 within a 16-byte block, we have to rotate to extract the requested element.
555 For extending loads, we also want to ensure that the following sequence is
556 emitted, e.g. for MVT::f32 extending load to MVT::f64:
560 %2 v16i8,ch = rotate %1
561 %3 v4f8, ch = bitconvert %2
562 %4 f32 = vec2perfslot %3
563 %5 f64 = fp_extend %4
567 LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
568 LoadSDNode *LN = cast<LoadSDNode>(Op);
569 SDValue the_chain = LN->getChain();
570 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
571 EVT InVT = LN->getMemoryVT();
572 EVT OutVT = Op.getValueType();
573 ISD::LoadExtType ExtType = LN->getExtensionType();
574 unsigned alignment = LN->getAlignment();
575 int pso = prefslotOffset(InVT);
576 DebugLoc dl = Op.getDebugLoc();
577 EVT vecVT = InVT.isVector()? InVT: EVT::getVectorVT(*DAG.getContext(), InVT,
578 (128 / InVT.getSizeInBits()));
581 assert( LN->getAddressingMode() == ISD::UNINDEXED
582 && "we should get only UNINDEXED adresses");
583 // clean aligned loads can be selected as-is
584 if (InVT.getSizeInBits() == 128 && (alignment%16) == 0)
587 // Get pointerinfos to the memory chunk(s) that contain the data to load
588 uint64_t mpi_offset = LN->getPointerInfo().Offset;
589 mpi_offset -= mpi_offset%16;
590 MachinePointerInfo lowMemPtr(LN->getPointerInfo().V, mpi_offset);
591 MachinePointerInfo highMemPtr(LN->getPointerInfo().V, mpi_offset+16);
594 SDValue basePtr = LN->getBasePtr();
597 if ((alignment%16) == 0) {
600 // Special cases for a known aligned load to simplify the base pointer
601 // and the rotation amount:
602 if (basePtr.getOpcode() == ISD::ADD
603 && (CN = dyn_cast<ConstantSDNode > (basePtr.getOperand(1))) != 0) {
604 // Known offset into basePtr
605 int64_t offset = CN->getSExtValue();
606 int64_t rotamt = int64_t((offset & 0xf) - pso);
611 rotate = DAG.getConstant(rotamt, MVT::i16);
613 // Simplify the base pointer for this case:
614 basePtr = basePtr.getOperand(0);
615 if ((offset & ~0xf) > 0) {
616 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
618 DAG.getConstant((offset & ~0xf), PtrVT));
620 } else if ((basePtr.getOpcode() == SPUISD::AFormAddr)
621 || (basePtr.getOpcode() == SPUISD::IndirectAddr
622 && basePtr.getOperand(0).getOpcode() == SPUISD::Hi
623 && basePtr.getOperand(1).getOpcode() == SPUISD::Lo)) {
624 // Plain aligned a-form address: rotate into preferred slot
625 // Same for (SPUindirect (SPUhi ...), (SPUlo ...))
626 int64_t rotamt = -pso;
629 rotate = DAG.getConstant(rotamt, MVT::i16);
631 // Offset the rotate amount by the basePtr and the preferred slot
633 int64_t rotamt = -pso;
636 rotate = DAG.getNode(ISD::ADD, dl, PtrVT,
638 DAG.getConstant(rotamt, PtrVT));
641 // Unaligned load: must be more pessimistic about addressing modes:
642 if (basePtr.getOpcode() == ISD::ADD) {
643 MachineFunction &MF = DAG.getMachineFunction();
644 MachineRegisterInfo &RegInfo = MF.getRegInfo();
645 unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
648 SDValue Op0 = basePtr.getOperand(0);
649 SDValue Op1 = basePtr.getOperand(1);
651 if (isa<ConstantSDNode>(Op1)) {
652 // Convert the (add <ptr>, <const>) to an indirect address contained
653 // in a register. Note that this is done because we need to avoid
654 // creating a 0(reg) d-form address due to the SPU's block loads.
655 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
656 the_chain = DAG.getCopyToReg(the_chain, dl, VReg, basePtr, Flag);
657 basePtr = DAG.getCopyFromReg(the_chain, dl, VReg, PtrVT);
659 // Convert the (add <arg1>, <arg2>) to an indirect address, which
660 // will likely be lowered as a reg(reg) x-form address.
661 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
664 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
666 DAG.getConstant(0, PtrVT));
669 // Offset the rotate amount by the basePtr and the preferred slot
671 rotate = DAG.getNode(ISD::ADD, dl, PtrVT,
673 DAG.getConstant(-pso, PtrVT));
676 // Do the load as a i128 to allow possible shifting
677 SDValue low = DAG.getLoad(MVT::i128, dl, the_chain, basePtr,
679 LN->isVolatile(), LN->isNonTemporal(), false, 16);
681 // When the size is not greater than alignment we get all data with just
683 if (alignment >= InVT.getSizeInBits()/8) {
685 the_chain = low.getValue(1);
687 // Rotate into the preferred slot:
688 result = DAG.getNode(SPUISD::ROTBYTES_LEFT, dl, MVT::i128,
689 low.getValue(0), rotate);
691 // Convert the loaded v16i8 vector to the appropriate vector type
692 // specified by the operand:
693 EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
694 InVT, (128 / InVT.getSizeInBits()));
695 result = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, InVT,
696 DAG.getNode(ISD::BITCAST, dl, vecVT, result));
698 // When alignment is less than the size, we might need (known only at
699 // run-time) two loads
700 // TODO: if the memory address is composed only from constants, we have
701 // extra kowledge, and might avoid the second load
703 // storage position offset from lower 16 byte aligned memory chunk
704 SDValue offset = DAG.getNode(ISD::AND, dl, MVT::i32,
705 basePtr, DAG.getConstant( 0xf, MVT::i32 ) );
706 // get a registerfull of ones. (this implementation is a workaround: LLVM
707 // cannot handle 128 bit signed int constants)
708 SDValue ones = DAG.getConstant(-1, MVT::v4i32 );
709 ones = DAG.getNode(ISD::BITCAST, dl, MVT::i128, ones);
711 SDValue high = DAG.getLoad(MVT::i128, dl, the_chain,
712 DAG.getNode(ISD::ADD, dl, PtrVT,
714 DAG.getConstant(16, PtrVT)),
716 LN->isVolatile(), LN->isNonTemporal(), false,
719 the_chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(1),
722 // Shift the (possible) high part right to compensate the misalignemnt.
723 // if there is no highpart (i.e. value is i64 and offset is 4), this
724 // will zero out the high value.
725 high = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, high,
726 DAG.getNode(ISD::SUB, dl, MVT::i32,
727 DAG.getConstant( 16, MVT::i32),
731 // Shift the low similarly
732 // TODO: add SPUISD::SHL_BYTES
733 low = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, low, offset );
735 // Merge the two parts
736 result = DAG.getNode(ISD::BITCAST, dl, vecVT,
737 DAG.getNode(ISD::OR, dl, MVT::i128, low, high));
739 if (!InVT.isVector()) {
740 result = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, InVT, result );
744 // Handle extending loads by extending the scalar result:
745 if (ExtType == ISD::SEXTLOAD) {
746 result = DAG.getNode(ISD::SIGN_EXTEND, dl, OutVT, result);
747 } else if (ExtType == ISD::ZEXTLOAD) {
748 result = DAG.getNode(ISD::ZERO_EXTEND, dl, OutVT, result);
749 } else if (ExtType == ISD::EXTLOAD) {
750 unsigned NewOpc = ISD::ANY_EXTEND;
752 if (OutVT.isFloatingPoint())
753 NewOpc = ISD::FP_EXTEND;
755 result = DAG.getNode(NewOpc, dl, OutVT, result);
758 SDVTList retvts = DAG.getVTList(OutVT, MVT::Other);
759 SDValue retops[2] = {
764 result = DAG.getNode(SPUISD::LDRESULT, dl, retvts,
765 retops, sizeof(retops) / sizeof(retops[0]));
769 /// Custom lower stores for CellSPU
771 All CellSPU stores are aligned to 16-byte boundaries, so for elements
772 within a 16-byte block, we have to generate a shuffle to insert the
773 requested element into its place, then store the resulting block.
776 LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
777 StoreSDNode *SN = cast<StoreSDNode>(Op);
778 SDValue Value = SN->getValue();
779 EVT VT = Value.getValueType();
780 EVT StVT = (!SN->isTruncatingStore() ? VT : SN->getMemoryVT());
781 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
782 DebugLoc dl = Op.getDebugLoc();
783 unsigned alignment = SN->getAlignment();
785 EVT vecVT = StVT.isVector()? StVT: EVT::getVectorVT(*DAG.getContext(), StVT,
786 (128 / StVT.getSizeInBits()));
787 // Get pointerinfos to the memory chunk(s) that contain the data to load
788 uint64_t mpi_offset = SN->getPointerInfo().Offset;
789 mpi_offset -= mpi_offset%16;
790 MachinePointerInfo lowMemPtr(SN->getPointerInfo().V, mpi_offset);
791 MachinePointerInfo highMemPtr(SN->getPointerInfo().V, mpi_offset+16);
795 assert( SN->getAddressingMode() == ISD::UNINDEXED
796 && "we should get only UNINDEXED adresses");
797 // clean aligned loads can be selected as-is
798 if (StVT.getSizeInBits() == 128 && (alignment%16) == 0)
801 SDValue alignLoadVec;
802 SDValue basePtr = SN->getBasePtr();
803 SDValue the_chain = SN->getChain();
804 SDValue insertEltOffs;
806 if ((alignment%16) == 0) {
808 // Special cases for a known aligned load to simplify the base pointer
809 // and insertion byte:
810 if (basePtr.getOpcode() == ISD::ADD
811 && (CN = dyn_cast<ConstantSDNode>(basePtr.getOperand(1))) != 0) {
812 // Known offset into basePtr
813 int64_t offset = CN->getSExtValue();
815 // Simplify the base pointer for this case:
816 basePtr = basePtr.getOperand(0);
817 insertEltOffs = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
819 DAG.getConstant((offset & 0xf), PtrVT));
821 if ((offset & ~0xf) > 0) {
822 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
824 DAG.getConstant((offset & ~0xf), PtrVT));
827 // Otherwise, assume it's at byte 0 of basePtr
828 insertEltOffs = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
830 DAG.getConstant(0, PtrVT));
831 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
833 DAG.getConstant(0, PtrVT));
836 // Unaligned load: must be more pessimistic about addressing modes:
837 if (basePtr.getOpcode() == ISD::ADD) {
838 MachineFunction &MF = DAG.getMachineFunction();
839 MachineRegisterInfo &RegInfo = MF.getRegInfo();
840 unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
843 SDValue Op0 = basePtr.getOperand(0);
844 SDValue Op1 = basePtr.getOperand(1);
846 if (isa<ConstantSDNode>(Op1)) {
847 // Convert the (add <ptr>, <const>) to an indirect address contained
848 // in a register. Note that this is done because we need to avoid
849 // creating a 0(reg) d-form address due to the SPU's block loads.
850 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
851 the_chain = DAG.getCopyToReg(the_chain, dl, VReg, basePtr, Flag);
852 basePtr = DAG.getCopyFromReg(the_chain, dl, VReg, PtrVT);
854 // Convert the (add <arg1>, <arg2>) to an indirect address, which
855 // will likely be lowered as a reg(reg) x-form address.
856 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
859 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
861 DAG.getConstant(0, PtrVT));
864 // Insertion point is solely determined by basePtr's contents
865 insertEltOffs = DAG.getNode(ISD::ADD, dl, PtrVT,
867 DAG.getConstant(0, PtrVT));
870 // Load the lower part of the memory to which to store.
871 SDValue low = DAG.getLoad(vecVT, dl, the_chain, basePtr,
872 lowMemPtr, SN->isVolatile(), SN->isNonTemporal(),
875 // if we don't need to store over the 16 byte boundary, one store suffices
876 if (alignment >= StVT.getSizeInBits()/8) {
878 the_chain = low.getValue(1);
880 LoadSDNode *LN = cast<LoadSDNode>(low);
881 SDValue theValue = SN->getValue();
884 && (theValue.getOpcode() == ISD::AssertZext
885 || theValue.getOpcode() == ISD::AssertSext)) {
886 // Drill down and get the value for zero- and sign-extended
888 theValue = theValue.getOperand(0);
891 // If the base pointer is already a D-form address, then just create
892 // a new D-form address with a slot offset and the orignal base pointer.
893 // Otherwise generate a D-form address with the slot offset relative
894 // to the stack pointer, which is always aligned.
896 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
897 errs() << "CellSPU LowerSTORE: basePtr = ";
898 basePtr.getNode()->dump(&DAG);
903 SDValue insertEltOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, vecVT,
905 SDValue vectorizeOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, vecVT,
908 result = DAG.getNode(SPUISD::SHUFB, dl, vecVT,
910 DAG.getNode(ISD::BITCAST, dl,
911 MVT::v4i32, insertEltOp));
913 result = DAG.getStore(the_chain, dl, result, basePtr,
915 LN->isVolatile(), LN->isNonTemporal(),
919 // do the store when it might cross the 16 byte memory access boundary.
921 // TODO issue a warning if SN->isVolatile()== true? This is likely not
922 // what the user wanted.
924 // address offset from nearest lower 16byte alinged address
925 SDValue offset = DAG.getNode(ISD::AND, dl, MVT::i32,
927 DAG.getConstant(0xf, MVT::i32));
929 SDValue offset_compl = DAG.getNode(ISD::SUB, dl, MVT::i32,
930 DAG.getConstant( 16, MVT::i32),
932 // 16 - sizeof(Value)
933 SDValue surplus = DAG.getNode(ISD::SUB, dl, MVT::i32,
934 DAG.getConstant( 16, MVT::i32),
935 DAG.getConstant( VT.getSizeInBits()/8,
937 // get a registerfull of ones
938 SDValue ones = DAG.getConstant(-1, MVT::v4i32);
939 ones = DAG.getNode(ISD::BITCAST, dl, MVT::i128, ones);
941 // Create the 128 bit masks that have ones where the data to store is
943 SDValue lowmask, himask;
944 // if the value to store don't fill up the an entire 128 bits, zero
945 // out the last bits of the mask so that only the value we want to store
947 // this is e.g. in the case of store i32, align 2
949 Value = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, Value);
950 lowmask = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, ones, surplus);
951 lowmask = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, lowmask,
953 Value = DAG.getNode(ISD::BITCAST, dl, MVT::i128, Value);
954 Value = DAG.getNode(ISD::AND, dl, MVT::i128, Value, lowmask);
959 Value = DAG.getNode(ISD::BITCAST, dl, MVT::i128, Value);
961 // this will zero, if there are no data that goes to the high quad
962 himask = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, lowmask,
964 lowmask = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, lowmask,
967 // Load in the old data and zero out the parts that will be overwritten with
968 // the new data to store.
969 SDValue hi = DAG.getLoad(MVT::i128, dl, the_chain,
970 DAG.getNode(ISD::ADD, dl, PtrVT, basePtr,
971 DAG.getConstant( 16, PtrVT)),
973 SN->isVolatile(), SN->isNonTemporal(),
975 the_chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(1),
978 low = DAG.getNode(ISD::AND, dl, MVT::i128,
979 DAG.getNode( ISD::BITCAST, dl, MVT::i128, low),
980 DAG.getNode( ISD::XOR, dl, MVT::i128, lowmask, ones));
981 hi = DAG.getNode(ISD::AND, dl, MVT::i128,
982 DAG.getNode( ISD::BITCAST, dl, MVT::i128, hi),
983 DAG.getNode( ISD::XOR, dl, MVT::i128, himask, ones));
985 // Shift the Value to store into place. rlow contains the parts that go to
986 // the lower memory chunk, rhi has the parts that go to the upper one.
987 SDValue rlow = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, Value, offset);
988 rlow = DAG.getNode(ISD::AND, dl, MVT::i128, rlow, lowmask);
989 SDValue rhi = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, Value,
992 // Merge the old data and the new data and store the results
993 // Need to convert vectors here to integer as 'OR'ing floats assert
994 rlow = DAG.getNode(ISD::OR, dl, MVT::i128,
995 DAG.getNode(ISD::BITCAST, dl, MVT::i128, low),
996 DAG.getNode(ISD::BITCAST, dl, MVT::i128, rlow));
997 rhi = DAG.getNode(ISD::OR, dl, MVT::i128,
998 DAG.getNode(ISD::BITCAST, dl, MVT::i128, hi),
999 DAG.getNode(ISD::BITCAST, dl, MVT::i128, rhi));
1001 low = DAG.getStore(the_chain, dl, rlow, basePtr,
1003 SN->isVolatile(), SN->isNonTemporal(), 16);
1004 hi = DAG.getStore(the_chain, dl, rhi,
1005 DAG.getNode(ISD::ADD, dl, PtrVT, basePtr,
1006 DAG.getConstant( 16, PtrVT)),
1008 SN->isVolatile(), SN->isNonTemporal(), 16);
1009 result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(0),
1016 //! Generate the address of a constant pool entry.
1018 LowerConstantPool(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
1019 EVT PtrVT = Op.getValueType();
1020 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
1021 const Constant *C = CP->getConstVal();
1022 SDValue CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment());
1023 SDValue Zero = DAG.getConstant(0, PtrVT);
1024 const TargetMachine &TM = DAG.getTarget();
1025 // FIXME there is no actual debug info here
1026 DebugLoc dl = Op.getDebugLoc();
1028 if (TM.getRelocationModel() == Reloc::Static) {
1029 if (!ST->usingLargeMem()) {
1030 // Just return the SDValue with the constant pool address in it.
1031 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, CPI, Zero);
1033 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, CPI, Zero);
1034 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, CPI, Zero);
1035 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
1039 llvm_unreachable("LowerConstantPool: Relocation model other than static"
1043 //! Alternate entry point for generating the address of a constant pool entry
1045 SPU::LowerConstantPool(SDValue Op, SelectionDAG &DAG, const SPUTargetMachine &TM) {
1046 return ::LowerConstantPool(Op, DAG, TM.getSubtargetImpl());
1050 LowerJumpTable(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
1051 EVT PtrVT = Op.getValueType();
1052 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1053 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
1054 SDValue Zero = DAG.getConstant(0, PtrVT);
1055 const TargetMachine &TM = DAG.getTarget();
1056 // FIXME there is no actual debug info here
1057 DebugLoc dl = Op.getDebugLoc();
1059 if (TM.getRelocationModel() == Reloc::Static) {
1060 if (!ST->usingLargeMem()) {
1061 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, JTI, Zero);
1063 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, JTI, Zero);
1064 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, JTI, Zero);
1065 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
1069 llvm_unreachable("LowerJumpTable: Relocation model other than static"
1074 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
1075 EVT PtrVT = Op.getValueType();
1076 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
1077 const GlobalValue *GV = GSDN->getGlobal();
1078 SDValue GA = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(),
1079 PtrVT, GSDN->getOffset());
1080 const TargetMachine &TM = DAG.getTarget();
1081 SDValue Zero = DAG.getConstant(0, PtrVT);
1082 // FIXME there is no actual debug info here
1083 DebugLoc dl = Op.getDebugLoc();
1085 if (TM.getRelocationModel() == Reloc::Static) {
1086 if (!ST->usingLargeMem()) {
1087 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, GA, Zero);
1089 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, GA, Zero);
1090 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, GA, Zero);
1091 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
1094 report_fatal_error("LowerGlobalAddress: Relocation model other than static"
1100 //! Custom lower double precision floating point constants
1102 LowerConstantFP(SDValue Op, SelectionDAG &DAG) {
1103 EVT VT = Op.getValueType();
1104 // FIXME there is no actual debug info here
1105 DebugLoc dl = Op.getDebugLoc();
1107 if (VT == MVT::f64) {
1108 ConstantFPSDNode *FP = cast<ConstantFPSDNode>(Op.getNode());
1111 "LowerConstantFP: Node is not ConstantFPSDNode");
1113 uint64_t dbits = DoubleToBits(FP->getValueAPF().convertToDouble());
1114 SDValue T = DAG.getConstant(dbits, MVT::i64);
1115 SDValue Tvec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T);
1116 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
1117 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Tvec));
1124 SPUTargetLowering::LowerFormalArguments(SDValue Chain,
1125 CallingConv::ID CallConv, bool isVarArg,
1126 const SmallVectorImpl<ISD::InputArg>
1128 DebugLoc dl, SelectionDAG &DAG,
1129 SmallVectorImpl<SDValue> &InVals)
1132 MachineFunction &MF = DAG.getMachineFunction();
1133 MachineFrameInfo *MFI = MF.getFrameInfo();
1134 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1135 SPUFunctionInfo *FuncInfo = MF.getInfo<SPUFunctionInfo>();
1137 unsigned ArgOffset = SPUFrameLowering::minStackSize();
1138 unsigned ArgRegIdx = 0;
1139 unsigned StackSlotSize = SPUFrameLowering::stackSlotSize();
1141 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1143 SmallVector<CCValAssign, 16> ArgLocs;
1144 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1145 getTargetMachine(), ArgLocs, *DAG.getContext());
1146 // FIXME: allow for other calling conventions
1147 CCInfo.AnalyzeFormalArguments(Ins, CCC_SPU);
1149 // Add DAG nodes to load the arguments or copy them out of registers.
1150 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
1151 EVT ObjectVT = Ins[ArgNo].VT;
1152 unsigned ObjSize = ObjectVT.getSizeInBits()/8;
1154 CCValAssign &VA = ArgLocs[ArgNo];
1156 if (VA.isRegLoc()) {
1157 const TargetRegisterClass *ArgRegClass;
1159 switch (ObjectVT.getSimpleVT().SimpleTy) {
1161 report_fatal_error("LowerFormalArguments Unhandled argument type: " +
1162 Twine(ObjectVT.getEVTString()));
1164 ArgRegClass = &SPU::R8CRegClass;
1167 ArgRegClass = &SPU::R16CRegClass;
1170 ArgRegClass = &SPU::R32CRegClass;
1173 ArgRegClass = &SPU::R64CRegClass;
1176 ArgRegClass = &SPU::GPRCRegClass;
1179 ArgRegClass = &SPU::R32FPRegClass;
1182 ArgRegClass = &SPU::R64FPRegClass;
1190 ArgRegClass = &SPU::VECREGRegClass;
1194 unsigned VReg = RegInfo.createVirtualRegister(ArgRegClass);
1195 RegInfo.addLiveIn(VA.getLocReg(), VReg);
1196 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
1199 // We need to load the argument to a virtual register if we determined
1200 // above that we ran out of physical registers of the appropriate type
1201 // or we're forced to do vararg
1202 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset, true);
1203 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
1204 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(),
1205 false, false, false, 0);
1206 ArgOffset += StackSlotSize;
1209 InVals.push_back(ArgVal);
1211 Chain = ArgVal.getOperand(0);
1216 // FIXME: we should be able to query the argument registers from
1217 // tablegen generated code.
1218 static const unsigned ArgRegs[] = {
1219 SPU::R3, SPU::R4, SPU::R5, SPU::R6, SPU::R7, SPU::R8, SPU::R9,
1220 SPU::R10, SPU::R11, SPU::R12, SPU::R13, SPU::R14, SPU::R15, SPU::R16,
1221 SPU::R17, SPU::R18, SPU::R19, SPU::R20, SPU::R21, SPU::R22, SPU::R23,
1222 SPU::R24, SPU::R25, SPU::R26, SPU::R27, SPU::R28, SPU::R29, SPU::R30,
1223 SPU::R31, SPU::R32, SPU::R33, SPU::R34, SPU::R35, SPU::R36, SPU::R37,
1224 SPU::R38, SPU::R39, SPU::R40, SPU::R41, SPU::R42, SPU::R43, SPU::R44,
1225 SPU::R45, SPU::R46, SPU::R47, SPU::R48, SPU::R49, SPU::R50, SPU::R51,
1226 SPU::R52, SPU::R53, SPU::R54, SPU::R55, SPU::R56, SPU::R57, SPU::R58,
1227 SPU::R59, SPU::R60, SPU::R61, SPU::R62, SPU::R63, SPU::R64, SPU::R65,
1228 SPU::R66, SPU::R67, SPU::R68, SPU::R69, SPU::R70, SPU::R71, SPU::R72,
1229 SPU::R73, SPU::R74, SPU::R75, SPU::R76, SPU::R77, SPU::R78, SPU::R79
1231 // size of ArgRegs array
1232 unsigned NumArgRegs = 77;
1234 // We will spill (79-3)+1 registers to the stack
1235 SmallVector<SDValue, 79-3+1> MemOps;
1237 // Create the frame slot
1238 for (; ArgRegIdx != NumArgRegs; ++ArgRegIdx) {
1239 FuncInfo->setVarArgsFrameIndex(
1240 MFI->CreateFixedObject(StackSlotSize, ArgOffset, true));
1241 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
1242 unsigned VReg = MF.addLiveIn(ArgRegs[ArgRegIdx], &SPU::VECREGRegClass);
1243 SDValue ArgVal = DAG.getRegister(VReg, MVT::v16i8);
1244 SDValue Store = DAG.getStore(Chain, dl, ArgVal, FIN, MachinePointerInfo(),
1246 Chain = Store.getOperand(0);
1247 MemOps.push_back(Store);
1249 // Increment address by stack slot size for the next stored argument
1250 ArgOffset += StackSlotSize;
1252 if (!MemOps.empty())
1253 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1254 &MemOps[0], MemOps.size());
1260 /// isLSAAddress - Return the immediate to use if the specified
1261 /// value is representable as a LSA address.
1262 static SDNode *isLSAAddress(SDValue Op, SelectionDAG &DAG) {
1263 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
1266 int Addr = C->getZExtValue();
1267 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero.
1268 (Addr << 14 >> 14) != Addr)
1269 return 0; // Top 14 bits have to be sext of immediate.
1271 return DAG.getConstant((int)C->getZExtValue() >> 2, MVT::i32).getNode();
1275 SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
1276 CallingConv::ID CallConv, bool isVarArg,
1278 const SmallVectorImpl<ISD::OutputArg> &Outs,
1279 const SmallVectorImpl<SDValue> &OutVals,
1280 const SmallVectorImpl<ISD::InputArg> &Ins,
1281 DebugLoc dl, SelectionDAG &DAG,
1282 SmallVectorImpl<SDValue> &InVals) const {
1283 // CellSPU target does not yet support tail call optimization.
1286 const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
1287 unsigned NumOps = Outs.size();
1288 unsigned StackSlotSize = SPUFrameLowering::stackSlotSize();
1290 SmallVector<CCValAssign, 16> ArgLocs;
1291 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1292 getTargetMachine(), ArgLocs, *DAG.getContext());
1293 // FIXME: allow for other calling conventions
1294 CCInfo.AnalyzeCallOperands(Outs, CCC_SPU);
1296 const unsigned NumArgRegs = ArgLocs.size();
1299 // Handy pointer type
1300 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1302 // Set up a copy of the stack pointer for use loading and storing any
1303 // arguments that may not fit in the registers available for argument
1305 SDValue StackPtr = DAG.getRegister(SPU::R1, MVT::i32);
1307 // Figure out which arguments are going to go in registers, and which in
1309 unsigned ArgOffset = SPUFrameLowering::minStackSize(); // Just below [LR]
1310 unsigned ArgRegIdx = 0;
1312 // Keep track of registers passing arguments
1313 std::vector<std::pair<unsigned, SDValue> > RegsToPass;
1314 // And the arguments passed on the stack
1315 SmallVector<SDValue, 8> MemOpChains;
1317 for (; ArgRegIdx != NumOps; ++ArgRegIdx) {
1318 SDValue Arg = OutVals[ArgRegIdx];
1319 CCValAssign &VA = ArgLocs[ArgRegIdx];
1321 // PtrOff will be used to store the current argument to the stack if a
1322 // register cannot be found for it.
1323 SDValue PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
1324 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
1326 switch (Arg.getValueType().getSimpleVT().SimpleTy) {
1327 default: llvm_unreachable("Unexpected ValueType for argument!");
1341 if (ArgRegIdx != NumArgRegs) {
1342 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1344 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
1345 MachinePointerInfo(),
1347 ArgOffset += StackSlotSize;
1353 // Accumulate how many bytes are to be pushed on the stack, including the
1354 // linkage area, and parameter passing area. According to the SPU ABI,
1355 // we minimally need space for [LR] and [SP].
1356 unsigned NumStackBytes = ArgOffset - SPUFrameLowering::minStackSize();
1358 // Insert a call sequence start
1359 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumStackBytes,
1362 if (!MemOpChains.empty()) {
1363 // Adjust the stack pointer for the stack arguments.
1364 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1365 &MemOpChains[0], MemOpChains.size());
1368 // Build a sequence of copy-to-reg nodes chained together with token chain
1369 // and flag operands which copy the outgoing args into the appropriate regs.
1371 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1372 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1373 RegsToPass[i].second, InFlag);
1374 InFlag = Chain.getValue(1);
1377 SmallVector<SDValue, 8> Ops;
1378 unsigned CallOpc = SPUISD::CALL;
1380 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
1381 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
1382 // node so that legalize doesn't hack it.
1383 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1384 const GlobalValue *GV = G->getGlobal();
1385 EVT CalleeVT = Callee.getValueType();
1386 SDValue Zero = DAG.getConstant(0, PtrVT);
1387 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, CalleeVT);
1389 if (!ST->usingLargeMem()) {
1390 // Turn calls to targets that are defined (i.e., have bodies) into BRSL
1391 // style calls, otherwise, external symbols are BRASL calls. This assumes
1392 // that declared/defined symbols are in the same compilation unit and can
1393 // be reached through PC-relative jumps.
1396 // This may be an unsafe assumption for JIT and really large compilation
1398 if (GV->isDeclaration()) {
1399 Callee = DAG.getNode(SPUISD::AFormAddr, dl, CalleeVT, GA, Zero);
1401 Callee = DAG.getNode(SPUISD::PCRelAddr, dl, CalleeVT, GA, Zero);
1404 // "Large memory" mode: Turn all calls into indirect calls with a X-form
1406 Callee = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, GA, Zero);
1408 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1409 EVT CalleeVT = Callee.getValueType();
1410 SDValue Zero = DAG.getConstant(0, PtrVT);
1411 SDValue ExtSym = DAG.getTargetExternalSymbol(S->getSymbol(),
1412 Callee.getValueType());
1414 if (!ST->usingLargeMem()) {
1415 Callee = DAG.getNode(SPUISD::AFormAddr, dl, CalleeVT, ExtSym, Zero);
1417 Callee = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, ExtSym, Zero);
1419 } else if (SDNode *Dest = isLSAAddress(Callee, DAG)) {
1420 // If this is an absolute destination address that appears to be a legal
1421 // local store address, use the munged value.
1422 Callee = SDValue(Dest, 0);
1425 Ops.push_back(Chain);
1426 Ops.push_back(Callee);
1428 // Add argument registers to the end of the list so that they are known live
1430 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1431 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1432 RegsToPass[i].second.getValueType()));
1434 if (InFlag.getNode())
1435 Ops.push_back(InFlag);
1436 // Returns a chain and a flag for retval copy to use.
1437 Chain = DAG.getNode(CallOpc, dl, DAG.getVTList(MVT::Other, MVT::Glue),
1438 &Ops[0], Ops.size());
1439 InFlag = Chain.getValue(1);
1441 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumStackBytes, true),
1442 DAG.getIntPtrConstant(0, true), InFlag);
1444 InFlag = Chain.getValue(1);
1446 // If the function returns void, just return the chain.
1450 // Now handle the return value(s)
1451 SmallVector<CCValAssign, 16> RVLocs;
1452 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1453 getTargetMachine(), RVLocs, *DAG.getContext());
1454 CCRetInfo.AnalyzeCallResult(Ins, CCC_SPU);
1457 // If the call has results, copy the values out of the ret val registers.
1458 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1459 CCValAssign VA = RVLocs[i];
1461 SDValue Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
1463 Chain = Val.getValue(1);
1464 InFlag = Val.getValue(2);
1465 InVals.push_back(Val);
1472 SPUTargetLowering::LowerReturn(SDValue Chain,
1473 CallingConv::ID CallConv, bool isVarArg,
1474 const SmallVectorImpl<ISD::OutputArg> &Outs,
1475 const SmallVectorImpl<SDValue> &OutVals,
1476 DebugLoc dl, SelectionDAG &DAG) const {
1478 SmallVector<CCValAssign, 16> RVLocs;
1479 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1480 getTargetMachine(), RVLocs, *DAG.getContext());
1481 CCInfo.AnalyzeReturn(Outs, RetCC_SPU);
1483 // If this is the first return lowered for this function, add the regs to the
1484 // liveout set for the function.
1485 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
1486 for (unsigned i = 0; i != RVLocs.size(); ++i)
1487 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
1492 // Copy the result values into the output registers.
1493 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1494 CCValAssign &VA = RVLocs[i];
1495 assert(VA.isRegLoc() && "Can only return in registers!");
1496 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
1498 Flag = Chain.getValue(1);
1502 return DAG.getNode(SPUISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
1504 return DAG.getNode(SPUISD::RET_FLAG, dl, MVT::Other, Chain);
1508 //===----------------------------------------------------------------------===//
1509 // Vector related lowering:
1510 //===----------------------------------------------------------------------===//
1512 static ConstantSDNode *
1513 getVecImm(SDNode *N) {
1514 SDValue OpVal(0, 0);
1516 // Check to see if this buildvec has a single non-undef value in its elements.
1517 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
1518 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
1519 if (OpVal.getNode() == 0)
1520 OpVal = N->getOperand(i);
1521 else if (OpVal != N->getOperand(i))
1525 if (OpVal.getNode() != 0) {
1526 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
1534 /// get_vec_i18imm - Test if this vector is a vector filled with the same value
1535 /// and the value fits into an unsigned 18-bit constant, and if so, return the
1537 SDValue SPU::get_vec_u18imm(SDNode *N, SelectionDAG &DAG,
1539 if (ConstantSDNode *CN = getVecImm(N)) {
1540 uint64_t Value = CN->getZExtValue();
1541 if (ValueType == MVT::i64) {
1542 uint64_t UValue = CN->getZExtValue();
1543 uint32_t upper = uint32_t(UValue >> 32);
1544 uint32_t lower = uint32_t(UValue);
1547 Value = Value >> 32;
1549 if (Value <= 0x3ffff)
1550 return DAG.getTargetConstant(Value, ValueType);
1556 /// get_vec_i16imm - Test if this vector is a vector filled with the same value
1557 /// and the value fits into a signed 16-bit constant, and if so, return the
1559 SDValue SPU::get_vec_i16imm(SDNode *N, SelectionDAG &DAG,
1561 if (ConstantSDNode *CN = getVecImm(N)) {
1562 int64_t Value = CN->getSExtValue();
1563 if (ValueType == MVT::i64) {
1564 uint64_t UValue = CN->getZExtValue();
1565 uint32_t upper = uint32_t(UValue >> 32);
1566 uint32_t lower = uint32_t(UValue);
1569 Value = Value >> 32;
1571 if (Value >= -(1 << 15) && Value <= ((1 << 15) - 1)) {
1572 return DAG.getTargetConstant(Value, ValueType);
1579 /// get_vec_i10imm - Test if this vector is a vector filled with the same value
1580 /// and the value fits into a signed 10-bit constant, and if so, return the
1582 SDValue SPU::get_vec_i10imm(SDNode *N, SelectionDAG &DAG,
1584 if (ConstantSDNode *CN = getVecImm(N)) {
1585 int64_t Value = CN->getSExtValue();
1586 if (ValueType == MVT::i64) {
1587 uint64_t UValue = CN->getZExtValue();
1588 uint32_t upper = uint32_t(UValue >> 32);
1589 uint32_t lower = uint32_t(UValue);
1592 Value = Value >> 32;
1594 if (isInt<10>(Value))
1595 return DAG.getTargetConstant(Value, ValueType);
1601 /// get_vec_i8imm - Test if this vector is a vector filled with the same value
1602 /// and the value fits into a signed 8-bit constant, and if so, return the
1605 /// @note: The incoming vector is v16i8 because that's the only way we can load
1606 /// constant vectors. Thus, we test to see if the upper and lower bytes are the
1608 SDValue SPU::get_vec_i8imm(SDNode *N, SelectionDAG &DAG,
1610 if (ConstantSDNode *CN = getVecImm(N)) {
1611 int Value = (int) CN->getZExtValue();
1612 if (ValueType == MVT::i16
1613 && Value <= 0xffff /* truncated from uint64_t */
1614 && ((short) Value >> 8) == ((short) Value & 0xff))
1615 return DAG.getTargetConstant(Value & 0xff, ValueType);
1616 else if (ValueType == MVT::i8
1617 && (Value & 0xff) == Value)
1618 return DAG.getTargetConstant(Value, ValueType);
1624 /// get_ILHUvec_imm - Test if this vector is a vector filled with the same value
1625 /// and the value fits into a signed 16-bit constant, and if so, return the
1627 SDValue SPU::get_ILHUvec_imm(SDNode *N, SelectionDAG &DAG,
1629 if (ConstantSDNode *CN = getVecImm(N)) {
1630 uint64_t Value = CN->getZExtValue();
1631 if ((ValueType == MVT::i32
1632 && ((unsigned) Value & 0xffff0000) == (unsigned) Value)
1633 || (ValueType == MVT::i64 && (Value & 0xffff0000) == Value))
1634 return DAG.getTargetConstant(Value >> 16, ValueType);
1640 /// get_v4i32_imm - Catch-all for general 32-bit constant vectors
1641 SDValue SPU::get_v4i32_imm(SDNode *N, SelectionDAG &DAG) {
1642 if (ConstantSDNode *CN = getVecImm(N)) {
1643 return DAG.getTargetConstant((unsigned) CN->getZExtValue(), MVT::i32);
1649 /// get_v4i32_imm - Catch-all for general 64-bit constant vectors
1650 SDValue SPU::get_v2i64_imm(SDNode *N, SelectionDAG &DAG) {
1651 if (ConstantSDNode *CN = getVecImm(N)) {
1652 return DAG.getTargetConstant((unsigned) CN->getZExtValue(), MVT::i64);
1658 //! Lower a BUILD_VECTOR instruction creatively:
1660 LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
1661 EVT VT = Op.getValueType();
1662 EVT EltVT = VT.getVectorElementType();
1663 DebugLoc dl = Op.getDebugLoc();
1664 BuildVectorSDNode *BCN = dyn_cast<BuildVectorSDNode>(Op.getNode());
1665 assert(BCN != 0 && "Expected BuildVectorSDNode in SPU LowerBUILD_VECTOR");
1666 unsigned minSplatBits = EltVT.getSizeInBits();
1668 if (minSplatBits < 16)
1671 APInt APSplatBits, APSplatUndef;
1672 unsigned SplatBitSize;
1675 if (!BCN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
1676 HasAnyUndefs, minSplatBits)
1677 || minSplatBits < SplatBitSize)
1678 return SDValue(); // Wasn't a constant vector or splat exceeded min
1680 uint64_t SplatBits = APSplatBits.getZExtValue();
1682 switch (VT.getSimpleVT().SimpleTy) {
1684 report_fatal_error("CellSPU: Unhandled VT in LowerBUILD_VECTOR, VT = " +
1685 Twine(VT.getEVTString()));
1688 uint32_t Value32 = uint32_t(SplatBits);
1689 assert(SplatBitSize == 32
1690 && "LowerBUILD_VECTOR: Unexpected floating point vector element.");
1691 // NOTE: pretend the constant is an integer. LLVM won't load FP constants
1692 SDValue T = DAG.getConstant(Value32, MVT::i32);
1693 return DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,
1694 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, T,T,T,T));
1697 uint64_t f64val = uint64_t(SplatBits);
1698 assert(SplatBitSize == 64
1699 && "LowerBUILD_VECTOR: 64-bit float vector size > 8 bytes.");
1700 // NOTE: pretend the constant is an integer. LLVM won't load FP constants
1701 SDValue T = DAG.getConstant(f64val, MVT::i64);
1702 return DAG.getNode(ISD::BITCAST, dl, MVT::v2f64,
1703 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T));
1706 // 8-bit constants have to be expanded to 16-bits
1707 unsigned short Value16 = SplatBits /* | (SplatBits << 8) */;
1708 SmallVector<SDValue, 8> Ops;
1710 Ops.assign(8, DAG.getConstant(Value16, MVT::i16));
1711 return DAG.getNode(ISD::BITCAST, dl, VT,
1712 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i16, &Ops[0], Ops.size()));
1715 unsigned short Value16 = SplatBits;
1716 SDValue T = DAG.getConstant(Value16, EltVT);
1717 SmallVector<SDValue, 8> Ops;
1720 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], Ops.size());
1723 SDValue T = DAG.getConstant(unsigned(SplatBits), VT.getVectorElementType());
1724 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, T, T, T, T);
1727 return SPU::LowerV2I64Splat(VT, DAG, SplatBits, dl);
1735 SPU::LowerV2I64Splat(EVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
1737 uint32_t upper = uint32_t(SplatVal >> 32);
1738 uint32_t lower = uint32_t(SplatVal);
1740 if (upper == lower) {
1741 // Magic constant that can be matched by IL, ILA, et. al.
1742 SDValue Val = DAG.getTargetConstant(upper, MVT::i32);
1743 return DAG.getNode(ISD::BITCAST, dl, OpVT,
1744 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1745 Val, Val, Val, Val));
1747 bool upper_special, lower_special;
1749 // NOTE: This code creates common-case shuffle masks that can be easily
1750 // detected as common expressions. It is not attempting to create highly
1751 // specialized masks to replace any and all 0's, 0xff's and 0x80's.
1753 // Detect if the upper or lower half is a special shuffle mask pattern:
1754 upper_special = (upper == 0 || upper == 0xffffffff || upper == 0x80000000);
1755 lower_special = (lower == 0 || lower == 0xffffffff || lower == 0x80000000);
1757 // Both upper and lower are special, lower to a constant pool load:
1758 if (lower_special && upper_special) {
1759 SDValue UpperVal = DAG.getConstant(upper, MVT::i32);
1760 SDValue LowerVal = DAG.getConstant(lower, MVT::i32);
1761 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1762 UpperVal, LowerVal, UpperVal, LowerVal);
1763 return DAG.getNode(ISD::BITCAST, dl, OpVT, BV);
1768 SmallVector<SDValue, 16> ShufBytes;
1771 // Create lower vector if not a special pattern
1772 if (!lower_special) {
1773 SDValue LO32C = DAG.getConstant(lower, MVT::i32);
1774 LO32 = DAG.getNode(ISD::BITCAST, dl, OpVT,
1775 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1776 LO32C, LO32C, LO32C, LO32C));
1779 // Create upper vector if not a special pattern
1780 if (!upper_special) {
1781 SDValue HI32C = DAG.getConstant(upper, MVT::i32);
1782 HI32 = DAG.getNode(ISD::BITCAST, dl, OpVT,
1783 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1784 HI32C, HI32C, HI32C, HI32C));
1787 // If either upper or lower are special, then the two input operands are
1788 // the same (basically, one of them is a "don't care")
1794 for (int i = 0; i < 4; ++i) {
1796 for (int j = 0; j < 4; ++j) {
1798 bool process_upper, process_lower;
1800 process_upper = (upper_special && (i & 1) == 0);
1801 process_lower = (lower_special && (i & 1) == 1);
1803 if (process_upper || process_lower) {
1804 if ((process_upper && upper == 0)
1805 || (process_lower && lower == 0))
1807 else if ((process_upper && upper == 0xffffffff)
1808 || (process_lower && lower == 0xffffffff))
1810 else if ((process_upper && upper == 0x80000000)
1811 || (process_lower && lower == 0x80000000))
1812 val |= (j == 0 ? 0xe0 : 0x80);
1814 val |= i * 4 + j + ((i & 1) * 16);
1817 ShufBytes.push_back(DAG.getConstant(val, MVT::i32));
1820 return DAG.getNode(SPUISD::SHUFB, dl, OpVT, HI32, LO32,
1821 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1822 &ShufBytes[0], ShufBytes.size()));
1826 /// LowerVECTOR_SHUFFLE - Lower a vector shuffle (V1, V2, V3) to something on
1827 /// which the Cell can operate. The code inspects V3 to ascertain whether the
1828 /// permutation vector, V3, is monotonically increasing with one "exception"
1829 /// element, e.g., (0, 1, _, 3). If this is the case, then generate a
1830 /// SHUFFLE_MASK synthetic instruction. Otherwise, spill V3 to the constant pool.
1831 /// In either case, the net result is going to eventually invoke SHUFB to
1832 /// permute/shuffle the bytes from V1 and V2.
1834 /// SHUFFLE_MASK is eventually selected as one of the C*D instructions, generate
1835 /// control word for byte/halfword/word insertion. This takes care of a single
1836 /// element move from V2 into V1.
1838 /// SPUISD::SHUFB is eventually selected as Cell's <i>shufb</i> instructions.
1839 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
1840 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
1841 SDValue V1 = Op.getOperand(0);
1842 SDValue V2 = Op.getOperand(1);
1843 DebugLoc dl = Op.getDebugLoc();
1845 if (V2.getOpcode() == ISD::UNDEF) V2 = V1;
1847 // If we have a single element being moved from V1 to V2, this can be handled
1848 // using the C*[DX] compute mask instructions, but the vector elements have
1849 // to be monotonically increasing with one exception element, and the source
1850 // slot of the element to move must be the same as the destination.
1851 EVT VecVT = V1.getValueType();
1852 EVT EltVT = VecVT.getVectorElementType();
1853 unsigned EltsFromV2 = 0;
1854 unsigned V2EltOffset = 0;
1855 unsigned V2EltIdx0 = 0;
1856 unsigned CurrElt = 0;
1857 unsigned MaxElts = VecVT.getVectorNumElements();
1858 unsigned PrevElt = 0;
1859 bool monotonic = true;
1862 EVT maskVT; // which of the c?d instructions to use
1864 if (EltVT == MVT::i8) {
1866 maskVT = MVT::v16i8;
1867 } else if (EltVT == MVT::i16) {
1869 maskVT = MVT::v8i16;
1870 } else if (EltVT == MVT::i32 || EltVT == MVT::f32) {
1872 maskVT = MVT::v4i32;
1873 } else if (EltVT == MVT::i64 || EltVT == MVT::f64) {
1875 maskVT = MVT::v2i64;
1877 llvm_unreachable("Unhandled vector type in LowerVECTOR_SHUFFLE");
1879 for (unsigned i = 0; i != MaxElts; ++i) {
1880 if (SVN->getMaskElt(i) < 0)
1883 unsigned SrcElt = SVN->getMaskElt(i);
1886 if (SrcElt >= V2EltIdx0) {
1887 // TODO: optimize for the monotonic case when several consecutive
1888 // elements are taken form V2. Do we ever get such a case?
1889 if (EltsFromV2 == 0 && CurrElt == (SrcElt - V2EltIdx0))
1890 V2EltOffset = (SrcElt - V2EltIdx0) * (EltVT.getSizeInBits()/8);
1894 } else if (CurrElt != SrcElt) {
1902 if (PrevElt > 0 && SrcElt < MaxElts) {
1903 if ((PrevElt == SrcElt - 1)
1904 || (PrevElt == MaxElts - 1 && SrcElt == 0)) {
1909 } else if (i == 0 || (PrevElt==0 && SrcElt==1)) {
1910 // First time or after a "wrap around"
1914 // This isn't a rotation, takes elements from vector 2
1920 if (EltsFromV2 == 1 && monotonic) {
1921 // Compute mask and shuffle
1922 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1924 // As SHUFFLE_MASK becomes a c?d instruction, feed it an address
1925 // R1 ($sp) is used here only as it is guaranteed to have last bits zero
1926 SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
1927 DAG.getRegister(SPU::R1, PtrVT),
1928 DAG.getConstant(V2EltOffset, MVT::i32));
1929 SDValue ShufMaskOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl,
1932 // Use shuffle mask in SHUFB synthetic instruction:
1933 return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V2, V1,
1935 } else if (rotate) {
1938 rotamt *= EltVT.getSizeInBits()/8;
1939 return DAG.getNode(SPUISD::ROTBYTES_LEFT, dl, V1.getValueType(),
1940 V1, DAG.getConstant(rotamt, MVT::i16));
1942 // Convert the SHUFFLE_VECTOR mask's input element units to the
1944 unsigned BytesPerElement = EltVT.getSizeInBits()/8;
1946 SmallVector<SDValue, 16> ResultMask;
1947 for (unsigned i = 0, e = MaxElts; i != e; ++i) {
1948 unsigned SrcElt = SVN->getMaskElt(i) < 0 ? 0 : SVN->getMaskElt(i);
1950 for (unsigned j = 0; j < BytesPerElement; ++j)
1951 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,MVT::i8));
1953 SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8,
1954 &ResultMask[0], ResultMask.size());
1955 return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V1, V2, VPermMask);
1959 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
1960 SDValue Op0 = Op.getOperand(0); // Op0 = the scalar
1961 DebugLoc dl = Op.getDebugLoc();
1963 if (Op0.getNode()->getOpcode() == ISD::Constant) {
1964 // For a constant, build the appropriate constant vector, which will
1965 // eventually simplify to a vector register load.
1967 ConstantSDNode *CN = cast<ConstantSDNode>(Op0.getNode());
1968 SmallVector<SDValue, 16> ConstVecValues;
1972 // Create a constant vector:
1973 switch (Op.getValueType().getSimpleVT().SimpleTy) {
1974 default: llvm_unreachable("Unexpected constant value type in "
1975 "LowerSCALAR_TO_VECTOR");
1976 case MVT::v16i8: n_copies = 16; VT = MVT::i8; break;
1977 case MVT::v8i16: n_copies = 8; VT = MVT::i16; break;
1978 case MVT::v4i32: n_copies = 4; VT = MVT::i32; break;
1979 case MVT::v4f32: n_copies = 4; VT = MVT::f32; break;
1980 case MVT::v2i64: n_copies = 2; VT = MVT::i64; break;
1981 case MVT::v2f64: n_copies = 2; VT = MVT::f64; break;
1984 SDValue CValue = DAG.getConstant(CN->getZExtValue(), VT);
1985 for (size_t j = 0; j < n_copies; ++j)
1986 ConstVecValues.push_back(CValue);
1988 return DAG.getNode(ISD::BUILD_VECTOR, dl, Op.getValueType(),
1989 &ConstVecValues[0], ConstVecValues.size());
1991 // Otherwise, copy the value from one register to another:
1992 switch (Op0.getValueType().getSimpleVT().SimpleTy) {
1993 default: llvm_unreachable("Unexpected value type in LowerSCALAR_TO_VECTOR");
2000 return DAG.getNode(SPUISD::PREFSLOT2VEC, dl, Op.getValueType(), Op0, Op0);
2005 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
2006 EVT VT = Op.getValueType();
2007 SDValue N = Op.getOperand(0);
2008 SDValue Elt = Op.getOperand(1);
2009 DebugLoc dl = Op.getDebugLoc();
2012 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
2013 // Constant argument:
2014 int EltNo = (int) C->getZExtValue();
2017 if (VT == MVT::i8 && EltNo >= 16)
2018 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i8 extraction slot > 15");
2019 else if (VT == MVT::i16 && EltNo >= 8)
2020 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i16 extraction slot > 7");
2021 else if (VT == MVT::i32 && EltNo >= 4)
2022 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i32 extraction slot > 4");
2023 else if (VT == MVT::i64 && EltNo >= 2)
2024 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i64 extraction slot > 2");
2026 if (EltNo == 0 && (VT == MVT::i32 || VT == MVT::i64)) {
2027 // i32 and i64: Element 0 is the preferred slot
2028 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, N);
2031 // Need to generate shuffle mask and extract:
2032 int prefslot_begin = -1, prefslot_end = -1;
2033 int elt_byte = EltNo * VT.getSizeInBits() / 8;
2035 switch (VT.getSimpleVT().SimpleTy) {
2036 default: llvm_unreachable("Invalid value type!");
2038 prefslot_begin = prefslot_end = 3;
2042 prefslot_begin = 2; prefslot_end = 3;
2047 prefslot_begin = 0; prefslot_end = 3;
2052 prefslot_begin = 0; prefslot_end = 7;
2057 assert(prefslot_begin != -1 && prefslot_end != -1 &&
2058 "LowerEXTRACT_VECTOR_ELT: preferred slots uninitialized");
2060 unsigned int ShufBytes[16] = {
2061 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2063 for (int i = 0; i < 16; ++i) {
2064 // zero fill uppper part of preferred slot, don't care about the
2066 unsigned int mask_val;
2067 if (i <= prefslot_end) {
2069 ((i < prefslot_begin)
2071 : elt_byte + (i - prefslot_begin));
2073 ShufBytes[i] = mask_val;
2075 ShufBytes[i] = ShufBytes[i % (prefslot_end + 1)];
2078 SDValue ShufMask[4];
2079 for (unsigned i = 0; i < sizeof(ShufMask)/sizeof(ShufMask[0]); ++i) {
2080 unsigned bidx = i * 4;
2081 unsigned int bits = ((ShufBytes[bidx] << 24) |
2082 (ShufBytes[bidx+1] << 16) |
2083 (ShufBytes[bidx+2] << 8) |
2085 ShufMask[i] = DAG.getConstant(bits, MVT::i32);
2088 SDValue ShufMaskVec =
2089 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2090 &ShufMask[0], sizeof(ShufMask)/sizeof(ShufMask[0]));
2092 retval = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
2093 DAG.getNode(SPUISD::SHUFB, dl, N.getValueType(),
2094 N, N, ShufMaskVec));
2096 // Variable index: Rotate the requested element into slot 0, then replicate
2097 // slot 0 across the vector
2098 EVT VecVT = N.getValueType();
2099 if (!VecVT.isSimple() || !VecVT.isVector()) {
2100 report_fatal_error("LowerEXTRACT_VECTOR_ELT: Must have a simple, 128-bit"
2104 // Make life easier by making sure the index is zero-extended to i32
2105 if (Elt.getValueType() != MVT::i32)
2106 Elt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Elt);
2108 // Scale the index to a bit/byte shift quantity
2110 APInt(32, uint64_t(16 / N.getValueType().getVectorNumElements()), false);
2111 unsigned scaleShift = scaleFactor.logBase2();
2114 if (scaleShift > 0) {
2115 // Scale the shift factor:
2116 Elt = DAG.getNode(ISD::SHL, dl, MVT::i32, Elt,
2117 DAG.getConstant(scaleShift, MVT::i32));
2120 vecShift = DAG.getNode(SPUISD::SHL_BYTES, dl, VecVT, N, Elt);
2122 // Replicate the bytes starting at byte 0 across the entire vector (for
2123 // consistency with the notion of a unified register set)
2126 switch (VT.getSimpleVT().SimpleTy) {
2128 report_fatal_error("LowerEXTRACT_VECTOR_ELT(varable): Unhandled vector"
2132 SDValue factor = DAG.getConstant(0x00000000, MVT::i32);
2133 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2134 factor, factor, factor, factor);
2138 SDValue factor = DAG.getConstant(0x00010001, MVT::i32);
2139 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2140 factor, factor, factor, factor);
2145 SDValue factor = DAG.getConstant(0x00010203, MVT::i32);
2146 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2147 factor, factor, factor, factor);
2152 SDValue loFactor = DAG.getConstant(0x00010203, MVT::i32);
2153 SDValue hiFactor = DAG.getConstant(0x04050607, MVT::i32);
2154 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2155 loFactor, hiFactor, loFactor, hiFactor);
2160 retval = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
2161 DAG.getNode(SPUISD::SHUFB, dl, VecVT,
2162 vecShift, vecShift, replicate));
2168 static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
2169 SDValue VecOp = Op.getOperand(0);
2170 SDValue ValOp = Op.getOperand(1);
2171 SDValue IdxOp = Op.getOperand(2);
2172 DebugLoc dl = Op.getDebugLoc();
2173 EVT VT = Op.getValueType();
2174 EVT eltVT = ValOp.getValueType();
2176 // use 0 when the lane to insert to is 'undef'
2178 if (IdxOp.getOpcode() != ISD::UNDEF) {
2179 ConstantSDNode *CN = cast<ConstantSDNode>(IdxOp);
2180 assert(CN != 0 && "LowerINSERT_VECTOR_ELT: Index is not constant!");
2181 Offset = (CN->getSExtValue()) * eltVT.getSizeInBits()/8;
2184 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2185 // Use $sp ($1) because it's always 16-byte aligned and it's available:
2186 SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
2187 DAG.getRegister(SPU::R1, PtrVT),
2188 DAG.getConstant(Offset, PtrVT));
2189 // widen the mask when dealing with half vectors
2190 EVT maskVT = EVT::getVectorVT(*(DAG.getContext()), VT.getVectorElementType(),
2191 128/ VT.getVectorElementType().getSizeInBits());
2192 SDValue ShufMask = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, maskVT, Pointer);
2195 DAG.getNode(SPUISD::SHUFB, dl, VT,
2196 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, ValOp),
2198 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, ShufMask));
2203 static SDValue LowerI8Math(SDValue Op, SelectionDAG &DAG, unsigned Opc,
2204 const TargetLowering &TLI)
2206 SDValue N0 = Op.getOperand(0); // Everything has at least one operand
2207 DebugLoc dl = Op.getDebugLoc();
2208 EVT ShiftVT = TLI.getShiftAmountTy(N0.getValueType());
2210 assert(Op.getValueType() == MVT::i8);
2213 llvm_unreachable("Unhandled i8 math operator");
2215 // 8-bit addition: Promote the arguments up to 16-bits and truncate
2217 SDValue N1 = Op.getOperand(1);
2218 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2219 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2220 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2221 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2226 // 8-bit subtraction: Promote the arguments up to 16-bits and truncate
2228 SDValue N1 = Op.getOperand(1);
2229 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2230 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2231 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2232 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2236 SDValue N1 = Op.getOperand(1);
2237 EVT N1VT = N1.getValueType();
2239 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, N0);
2240 if (!N1VT.bitsEq(ShiftVT)) {
2241 unsigned N1Opc = N1.getValueType().bitsLT(ShiftVT)
2244 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2247 // Replicate lower 8-bits into upper 8:
2249 DAG.getNode(ISD::OR, dl, MVT::i16, N0,
2250 DAG.getNode(ISD::SHL, dl, MVT::i16,
2251 N0, DAG.getConstant(8, MVT::i32)));
2253 // Truncate back down to i8
2254 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2255 DAG.getNode(Opc, dl, MVT::i16, ExpandArg, N1));
2259 SDValue N1 = Op.getOperand(1);
2260 EVT N1VT = N1.getValueType();
2262 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, N0);
2263 if (!N1VT.bitsEq(ShiftVT)) {
2264 unsigned N1Opc = ISD::ZERO_EXTEND;
2266 if (N1.getValueType().bitsGT(ShiftVT))
2267 N1Opc = ISD::TRUNCATE;
2269 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2272 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2273 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2276 SDValue N1 = Op.getOperand(1);
2277 EVT N1VT = N1.getValueType();
2279 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2280 if (!N1VT.bitsEq(ShiftVT)) {
2281 unsigned N1Opc = ISD::SIGN_EXTEND;
2283 if (N1VT.bitsGT(ShiftVT))
2284 N1Opc = ISD::TRUNCATE;
2285 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2288 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2289 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2292 SDValue N1 = Op.getOperand(1);
2294 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2295 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2296 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2297 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2302 //! Lower byte immediate operations for v16i8 vectors:
2304 LowerByteImmed(SDValue Op, SelectionDAG &DAG) {
2307 EVT VT = Op.getValueType();
2308 DebugLoc dl = Op.getDebugLoc();
2310 ConstVec = Op.getOperand(0);
2311 Arg = Op.getOperand(1);
2312 if (ConstVec.getNode()->getOpcode() != ISD::BUILD_VECTOR) {
2313 if (ConstVec.getNode()->getOpcode() == ISD::BITCAST) {
2314 ConstVec = ConstVec.getOperand(0);
2316 ConstVec = Op.getOperand(1);
2317 Arg = Op.getOperand(0);
2318 if (ConstVec.getNode()->getOpcode() == ISD::BITCAST) {
2319 ConstVec = ConstVec.getOperand(0);
2324 if (ConstVec.getNode()->getOpcode() == ISD::BUILD_VECTOR) {
2325 BuildVectorSDNode *BCN = dyn_cast<BuildVectorSDNode>(ConstVec.getNode());
2326 assert(BCN != 0 && "Expected BuildVectorSDNode in SPU LowerByteImmed");
2328 APInt APSplatBits, APSplatUndef;
2329 unsigned SplatBitSize;
2331 unsigned minSplatBits = VT.getVectorElementType().getSizeInBits();
2333 if (BCN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
2334 HasAnyUndefs, minSplatBits)
2335 && minSplatBits <= SplatBitSize) {
2336 uint64_t SplatBits = APSplatBits.getZExtValue();
2337 SDValue tc = DAG.getTargetConstant(SplatBits & 0xff, MVT::i8);
2339 SmallVector<SDValue, 16> tcVec;
2340 tcVec.assign(16, tc);
2341 return DAG.getNode(Op.getNode()->getOpcode(), dl, VT, Arg,
2342 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &tcVec[0], tcVec.size()));
2346 // These operations (AND, OR, XOR) are legal, they just couldn't be custom
2347 // lowered. Return the operation, rather than a null SDValue.
2351 //! Custom lowering for CTPOP (count population)
2353 Custom lowering code that counts the number ones in the input
2354 operand. SPU has such an instruction, but it counts the number of
2355 ones per byte, which then have to be accumulated.
2357 static SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) {
2358 EVT VT = Op.getValueType();
2359 EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
2360 VT, (128 / VT.getSizeInBits()));
2361 DebugLoc dl = Op.getDebugLoc();
2363 switch (VT.getSimpleVT().SimpleTy) {
2364 default: llvm_unreachable("Invalid value type!");
2366 SDValue N = Op.getOperand(0);
2367 SDValue Elt0 = DAG.getConstant(0, MVT::i32);
2369 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2370 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2372 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i8, CNTB, Elt0);
2376 MachineFunction &MF = DAG.getMachineFunction();
2377 MachineRegisterInfo &RegInfo = MF.getRegInfo();
2379 unsigned CNTB_reg = RegInfo.createVirtualRegister(&SPU::R16CRegClass);
2381 SDValue N = Op.getOperand(0);
2382 SDValue Elt0 = DAG.getConstant(0, MVT::i16);
2383 SDValue Mask0 = DAG.getConstant(0x0f, MVT::i16);
2384 SDValue Shift1 = DAG.getConstant(8, MVT::i32);
2386 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2387 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2389 // CNTB_result becomes the chain to which all of the virtual registers
2390 // CNTB_reg, SUM1_reg become associated:
2391 SDValue CNTB_result =
2392 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, CNTB, Elt0);
2394 SDValue CNTB_rescopy =
2395 DAG.getCopyToReg(CNTB_result, dl, CNTB_reg, CNTB_result);
2397 SDValue Tmp1 = DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i16);
2399 return DAG.getNode(ISD::AND, dl, MVT::i16,
2400 DAG.getNode(ISD::ADD, dl, MVT::i16,
2401 DAG.getNode(ISD::SRL, dl, MVT::i16,
2408 MachineFunction &MF = DAG.getMachineFunction();
2409 MachineRegisterInfo &RegInfo = MF.getRegInfo();
2411 unsigned CNTB_reg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
2412 unsigned SUM1_reg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
2414 SDValue N = Op.getOperand(0);
2415 SDValue Elt0 = DAG.getConstant(0, MVT::i32);
2416 SDValue Mask0 = DAG.getConstant(0xff, MVT::i32);
2417 SDValue Shift1 = DAG.getConstant(16, MVT::i32);
2418 SDValue Shift2 = DAG.getConstant(8, MVT::i32);
2420 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2421 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2423 // CNTB_result becomes the chain to which all of the virtual registers
2424 // CNTB_reg, SUM1_reg become associated:
2425 SDValue CNTB_result =
2426 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, CNTB, Elt0);
2428 SDValue CNTB_rescopy =
2429 DAG.getCopyToReg(CNTB_result, dl, CNTB_reg, CNTB_result);
2432 DAG.getNode(ISD::SRL, dl, MVT::i32,
2433 DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i32),
2437 DAG.getNode(ISD::ADD, dl, MVT::i32, Comp1,
2438 DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i32));
2440 SDValue Sum1_rescopy =
2441 DAG.getCopyToReg(CNTB_result, dl, SUM1_reg, Sum1);
2444 DAG.getNode(ISD::SRL, dl, MVT::i32,
2445 DAG.getCopyFromReg(Sum1_rescopy, dl, SUM1_reg, MVT::i32),
2448 DAG.getNode(ISD::ADD, dl, MVT::i32, Comp2,
2449 DAG.getCopyFromReg(Sum1_rescopy, dl, SUM1_reg, MVT::i32));
2451 return DAG.getNode(ISD::AND, dl, MVT::i32, Sum2, Mask0);
2461 //! Lower ISD::FP_TO_SINT, ISD::FP_TO_UINT for i32
2463 f32->i32 passes through unchanged, whereas f64->i32 expands to a libcall.
2464 All conversions to i64 are expanded to a libcall.
2466 static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
2467 const SPUTargetLowering &TLI) {
2468 EVT OpVT = Op.getValueType();
2469 SDValue Op0 = Op.getOperand(0);
2470 EVT Op0VT = Op0.getValueType();
2472 if ((OpVT == MVT::i32 && Op0VT == MVT::f64)
2473 || OpVT == MVT::i64) {
2474 // Convert f32 / f64 to i32 / i64 via libcall.
2476 (Op.getOpcode() == ISD::FP_TO_SINT)
2477 ? RTLIB::getFPTOSINT(Op0VT, OpVT)
2478 : RTLIB::getFPTOUINT(Op0VT, OpVT);
2479 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpectd fp-to-int conversion!");
2481 return ExpandLibCall(LC, Op, DAG, false, Dummy, TLI);
2487 //! Lower ISD::SINT_TO_FP, ISD::UINT_TO_FP for i32
2489 i32->f32 passes through unchanged, whereas i32->f64 is expanded to a libcall.
2490 All conversions from i64 are expanded to a libcall.
2492 static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2493 const SPUTargetLowering &TLI) {
2494 EVT OpVT = Op.getValueType();
2495 SDValue Op0 = Op.getOperand(0);
2496 EVT Op0VT = Op0.getValueType();
2498 if ((OpVT == MVT::f64 && Op0VT == MVT::i32)
2499 || Op0VT == MVT::i64) {
2500 // Convert i32, i64 to f64 via libcall:
2502 (Op.getOpcode() == ISD::SINT_TO_FP)
2503 ? RTLIB::getSINTTOFP(Op0VT, OpVT)
2504 : RTLIB::getUINTTOFP(Op0VT, OpVT);
2505 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpectd int-to-fp conversion!");
2507 return ExpandLibCall(LC, Op, DAG, false, Dummy, TLI);
2513 //! Lower ISD::SETCC
2515 This handles MVT::f64 (double floating point) condition lowering
2517 static SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG,
2518 const TargetLowering &TLI) {
2519 CondCodeSDNode *CC = dyn_cast<CondCodeSDNode>(Op.getOperand(2));
2520 DebugLoc dl = Op.getDebugLoc();
2521 assert(CC != 0 && "LowerSETCC: CondCodeSDNode should not be null here!\n");
2523 SDValue lhs = Op.getOperand(0);
2524 SDValue rhs = Op.getOperand(1);
2525 EVT lhsVT = lhs.getValueType();
2526 assert(lhsVT == MVT::f64 && "LowerSETCC: type other than MVT::64\n");
2528 EVT ccResultVT = TLI.getSetCCResultType(lhs.getValueType());
2529 APInt ccResultOnes = APInt::getAllOnesValue(ccResultVT.getSizeInBits());
2530 EVT IntVT(MVT::i64);
2532 // Take advantage of the fact that (truncate (sra arg, 32)) is efficiently
2533 // selected to a NOP:
2534 SDValue i64lhs = DAG.getNode(ISD::BITCAST, dl, IntVT, lhs);
2536 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
2537 DAG.getNode(ISD::SRL, dl, IntVT,
2538 i64lhs, DAG.getConstant(32, MVT::i32)));
2539 SDValue lhsHi32abs =
2540 DAG.getNode(ISD::AND, dl, MVT::i32,
2541 lhsHi32, DAG.getConstant(0x7fffffff, MVT::i32));
2543 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, i64lhs);
2545 // SETO and SETUO only use the lhs operand:
2546 if (CC->get() == ISD::SETO) {
2547 // Evaluates to true if Op0 is not [SQ]NaN - lowers to the inverse of
2549 APInt ccResultAllOnes = APInt::getAllOnesValue(ccResultVT.getSizeInBits());
2550 return DAG.getNode(ISD::XOR, dl, ccResultVT,
2551 DAG.getSetCC(dl, ccResultVT,
2552 lhs, DAG.getConstantFP(0.0, lhsVT),
2554 DAG.getConstant(ccResultAllOnes, ccResultVT));
2555 } else if (CC->get() == ISD::SETUO) {
2556 // Evaluates to true if Op0 is [SQ]NaN
2557 return DAG.getNode(ISD::AND, dl, ccResultVT,
2558 DAG.getSetCC(dl, ccResultVT,
2560 DAG.getConstant(0x7ff00000, MVT::i32),
2562 DAG.getSetCC(dl, ccResultVT,
2564 DAG.getConstant(0, MVT::i32),
2568 SDValue i64rhs = DAG.getNode(ISD::BITCAST, dl, IntVT, rhs);
2570 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
2571 DAG.getNode(ISD::SRL, dl, IntVT,
2572 i64rhs, DAG.getConstant(32, MVT::i32)));
2574 // If a value is negative, subtract from the sign magnitude constant:
2575 SDValue signMag2TC = DAG.getConstant(0x8000000000000000ULL, IntVT);
2577 // Convert the sign-magnitude representation into 2's complement:
2578 SDValue lhsSelectMask = DAG.getNode(ISD::SRA, dl, ccResultVT,
2579 lhsHi32, DAG.getConstant(31, MVT::i32));
2580 SDValue lhsSignMag2TC = DAG.getNode(ISD::SUB, dl, IntVT, signMag2TC, i64lhs);
2582 DAG.getNode(ISD::SELECT, dl, IntVT,
2583 lhsSelectMask, lhsSignMag2TC, i64lhs);
2585 SDValue rhsSelectMask = DAG.getNode(ISD::SRA, dl, ccResultVT,
2586 rhsHi32, DAG.getConstant(31, MVT::i32));
2587 SDValue rhsSignMag2TC = DAG.getNode(ISD::SUB, dl, IntVT, signMag2TC, i64rhs);
2589 DAG.getNode(ISD::SELECT, dl, IntVT,
2590 rhsSelectMask, rhsSignMag2TC, i64rhs);
2594 switch (CC->get()) {
2597 compareOp = ISD::SETEQ; break;
2600 compareOp = ISD::SETGT; break;
2603 compareOp = ISD::SETGE; break;
2606 compareOp = ISD::SETLT; break;
2609 compareOp = ISD::SETLE; break;
2612 compareOp = ISD::SETNE; break;
2614 report_fatal_error("CellSPU ISel Select: unimplemented f64 condition");
2618 DAG.getSetCC(dl, ccResultVT, lhsSelect, rhsSelect,
2619 (ISD::CondCode) compareOp);
2621 if ((CC->get() & 0x8) == 0) {
2622 // Ordered comparison:
2623 SDValue lhsNaN = DAG.getSetCC(dl, ccResultVT,
2624 lhs, DAG.getConstantFP(0.0, MVT::f64),
2626 SDValue rhsNaN = DAG.getSetCC(dl, ccResultVT,
2627 rhs, DAG.getConstantFP(0.0, MVT::f64),
2629 SDValue ordered = DAG.getNode(ISD::AND, dl, ccResultVT, lhsNaN, rhsNaN);
2631 result = DAG.getNode(ISD::AND, dl, ccResultVT, ordered, result);
2637 //! Lower ISD::SELECT_CC
2639 ISD::SELECT_CC can (generally) be implemented directly on the SPU using the
2642 \note Need to revisit this in the future: if the code path through the true
2643 and false value computations is longer than the latency of a branch (6
2644 cycles), then it would be more advantageous to branch and insert a new basic
2645 block and branch on the condition. However, this code does not make that
2646 assumption, given the simplisitc uses so far.
2649 static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
2650 const TargetLowering &TLI) {
2651 EVT VT = Op.getValueType();
2652 SDValue lhs = Op.getOperand(0);
2653 SDValue rhs = Op.getOperand(1);
2654 SDValue trueval = Op.getOperand(2);
2655 SDValue falseval = Op.getOperand(3);
2656 SDValue condition = Op.getOperand(4);
2657 DebugLoc dl = Op.getDebugLoc();
2659 // NOTE: SELB's arguments: $rA, $rB, $mask
2661 // SELB selects bits from $rA where bits in $mask are 0, bits from $rB
2662 // where bits in $mask are 1. CCond will be inverted, having 1s where the
2663 // condition was true and 0s where the condition was false. Hence, the
2664 // arguments to SELB get reversed.
2666 // Note: Really should be ISD::SELECT instead of SPUISD::SELB, but LLVM's
2667 // legalizer insists on combining SETCC/SELECT into SELECT_CC, so we end up
2668 // with another "cannot select select_cc" assert:
2670 SDValue compare = DAG.getNode(ISD::SETCC, dl,
2671 TLI.getSetCCResultType(Op.getValueType()),
2672 lhs, rhs, condition);
2673 return DAG.getNode(SPUISD::SELB, dl, VT, falseval, trueval, compare);
2676 //! Custom lower ISD::TRUNCATE
2677 static SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG)
2679 // Type to truncate to
2680 EVT VT = Op.getValueType();
2681 MVT simpleVT = VT.getSimpleVT();
2682 EVT VecVT = EVT::getVectorVT(*DAG.getContext(),
2683 VT, (128 / VT.getSizeInBits()));
2684 DebugLoc dl = Op.getDebugLoc();
2686 // Type to truncate from
2687 SDValue Op0 = Op.getOperand(0);
2688 EVT Op0VT = Op0.getValueType();
2690 if (Op0VT == MVT::i128 && simpleVT == MVT::i64) {
2691 // Create shuffle mask, least significant doubleword of quadword
2692 unsigned maskHigh = 0x08090a0b;
2693 unsigned maskLow = 0x0c0d0e0f;
2694 // Use a shuffle to perform the truncation
2695 SDValue shufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2696 DAG.getConstant(maskHigh, MVT::i32),
2697 DAG.getConstant(maskLow, MVT::i32),
2698 DAG.getConstant(maskHigh, MVT::i32),
2699 DAG.getConstant(maskLow, MVT::i32));
2701 SDValue truncShuffle = DAG.getNode(SPUISD::SHUFB, dl, VecVT,
2702 Op0, Op0, shufMask);
2704 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, truncShuffle);
2707 return SDValue(); // Leave the truncate unmolested
2711 * Emit the instruction sequence for i64/i32 -> i128 sign extend. The basic
2712 * algorithm is to duplicate the sign bit using rotmai to generate at
2713 * least one byte full of sign bits. Then propagate the "sign-byte" into
2714 * the leftmost words and the i64/i32 into the rightmost words using shufb.
2716 * @param Op The sext operand
2717 * @param DAG The current DAG
2718 * @return The SDValue with the entire instruction sequence
2720 static SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG)
2722 DebugLoc dl = Op.getDebugLoc();
2724 // Type to extend to
2725 MVT OpVT = Op.getValueType().getSimpleVT();
2727 // Type to extend from
2728 SDValue Op0 = Op.getOperand(0);
2729 MVT Op0VT = Op0.getValueType().getSimpleVT();
2731 // extend i8 & i16 via i32
2732 if (Op0VT == MVT::i8 || Op0VT == MVT::i16) {
2733 Op0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, Op0);
2737 // The type to extend to needs to be a i128 and
2738 // the type to extend from needs to be i64 or i32.
2739 assert((OpVT == MVT::i128 && (Op0VT == MVT::i64 || Op0VT == MVT::i32)) &&
2740 "LowerSIGN_EXTEND: input and/or output operand have wrong size");
2743 // Create shuffle mask
2744 unsigned mask1 = 0x10101010; // byte 0 - 3 and 4 - 7
2745 unsigned mask2 = Op0VT == MVT::i64 ? 0x00010203 : 0x10101010; // byte 8 - 11
2746 unsigned mask3 = Op0VT == MVT::i64 ? 0x04050607 : 0x00010203; // byte 12 - 15
2747 SDValue shufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2748 DAG.getConstant(mask1, MVT::i32),
2749 DAG.getConstant(mask1, MVT::i32),
2750 DAG.getConstant(mask2, MVT::i32),
2751 DAG.getConstant(mask3, MVT::i32));
2753 // Word wise arithmetic right shift to generate at least one byte
2754 // that contains sign bits.
2755 MVT mvt = Op0VT == MVT::i64 ? MVT::v2i64 : MVT::v4i32;
2756 SDValue sraVal = DAG.getNode(ISD::SRA,
2759 DAG.getNode(SPUISD::PREFSLOT2VEC, dl, mvt, Op0, Op0),
2760 DAG.getConstant(31, MVT::i32));
2762 // reinterpret as a i128 (SHUFB requires it). This gets lowered away.
2763 SDValue extended = SDValue(DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
2765 DAG.getTargetConstant(
2766 SPU::GPRCRegClass.getID(),
2768 // Shuffle bytes - Copy the sign bits into the upper 64 bits
2769 // and the input value into the lower 64 bits.
2770 SDValue extShuffle = DAG.getNode(SPUISD::SHUFB, dl, mvt,
2771 extended, sraVal, shufMask);
2772 return DAG.getNode(ISD::BITCAST, dl, MVT::i128, extShuffle);
2775 //! Custom (target-specific) lowering entry point
2777 This is where LLVM's DAG selection process calls to do target-specific
2781 SPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
2783 unsigned Opc = (unsigned) Op.getOpcode();
2784 EVT VT = Op.getValueType();
2789 errs() << "SPUTargetLowering::LowerOperation(): need to lower this!\n";
2790 errs() << "Op.getOpcode() = " << Opc << "\n";
2791 errs() << "*Op.getNode():\n";
2792 Op.getNode()->dump();
2794 llvm_unreachable(0);
2800 return LowerLOAD(Op, DAG, SPUTM.getSubtargetImpl());
2802 return LowerSTORE(Op, DAG, SPUTM.getSubtargetImpl());
2803 case ISD::ConstantPool:
2804 return LowerConstantPool(Op, DAG, SPUTM.getSubtargetImpl());
2805 case ISD::GlobalAddress:
2806 return LowerGlobalAddress(Op, DAG, SPUTM.getSubtargetImpl());
2807 case ISD::JumpTable:
2808 return LowerJumpTable(Op, DAG, SPUTM.getSubtargetImpl());
2809 case ISD::ConstantFP:
2810 return LowerConstantFP(Op, DAG);
2812 // i8, i64 math ops:
2821 return LowerI8Math(Op, DAG, Opc, *this);
2825 case ISD::FP_TO_SINT:
2826 case ISD::FP_TO_UINT:
2827 return LowerFP_TO_INT(Op, DAG, *this);
2829 case ISD::SINT_TO_FP:
2830 case ISD::UINT_TO_FP:
2831 return LowerINT_TO_FP(Op, DAG, *this);
2833 // Vector-related lowering.
2834 case ISD::BUILD_VECTOR:
2835 return LowerBUILD_VECTOR(Op, DAG);
2836 case ISD::SCALAR_TO_VECTOR:
2837 return LowerSCALAR_TO_VECTOR(Op, DAG);
2838 case ISD::VECTOR_SHUFFLE:
2839 return LowerVECTOR_SHUFFLE(Op, DAG);
2840 case ISD::EXTRACT_VECTOR_ELT:
2841 return LowerEXTRACT_VECTOR_ELT(Op, DAG);
2842 case ISD::INSERT_VECTOR_ELT:
2843 return LowerINSERT_VECTOR_ELT(Op, DAG);
2845 // Look for ANDBI, ORBI and XORBI opportunities and lower appropriately:
2849 return LowerByteImmed(Op, DAG);
2851 // Vector and i8 multiply:
2854 return LowerI8Math(Op, DAG, Opc, *this);
2857 return LowerCTPOP(Op, DAG);
2859 case ISD::SELECT_CC:
2860 return LowerSELECT_CC(Op, DAG, *this);
2863 return LowerSETCC(Op, DAG, *this);
2866 return LowerTRUNCATE(Op, DAG);
2868 case ISD::SIGN_EXTEND:
2869 return LowerSIGN_EXTEND(Op, DAG);
2875 void SPUTargetLowering::ReplaceNodeResults(SDNode *N,
2876 SmallVectorImpl<SDValue>&Results,
2877 SelectionDAG &DAG) const
2880 unsigned Opc = (unsigned) N->getOpcode();
2881 EVT OpVT = N->getValueType(0);
2885 errs() << "SPUTargetLowering::ReplaceNodeResults(): need to fix this!\n";
2886 errs() << "Op.getOpcode() = " << Opc << "\n";
2887 errs() << "*Op.getNode():\n";
2895 /* Otherwise, return unchanged */
2898 //===----------------------------------------------------------------------===//
2899 // Target Optimization Hooks
2900 //===----------------------------------------------------------------------===//
2903 SPUTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
2906 TargetMachine &TM = getTargetMachine();
2908 const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
2909 SelectionDAG &DAG = DCI.DAG;
2910 SDValue Op0 = N->getOperand(0); // everything has at least one operand
2911 EVT NodeVT = N->getValueType(0); // The node's value type
2912 EVT Op0VT = Op0.getValueType(); // The first operand's result
2913 SDValue Result; // Initially, empty result
2914 DebugLoc dl = N->getDebugLoc();
2916 switch (N->getOpcode()) {
2919 SDValue Op1 = N->getOperand(1);
2921 if (Op0.getOpcode() == SPUISD::IndirectAddr
2922 || Op1.getOpcode() == SPUISD::IndirectAddr) {
2923 // Normalize the operands to reduce repeated code
2924 SDValue IndirectArg = Op0, AddArg = Op1;
2926 if (Op1.getOpcode() == SPUISD::IndirectAddr) {
2931 if (isa<ConstantSDNode>(AddArg)) {
2932 ConstantSDNode *CN0 = cast<ConstantSDNode > (AddArg);
2933 SDValue IndOp1 = IndirectArg.getOperand(1);
2935 if (CN0->isNullValue()) {
2936 // (add (SPUindirect <arg>, <arg>), 0) ->
2937 // (SPUindirect <arg>, <arg>)
2939 #if !defined(NDEBUG)
2940 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2942 << "Replace: (add (SPUindirect <arg>, <arg>), 0)\n"
2943 << "With: (SPUindirect <arg>, <arg>)\n";
2948 } else if (isa<ConstantSDNode>(IndOp1)) {
2949 // (add (SPUindirect <arg>, <const>), <const>) ->
2950 // (SPUindirect <arg>, <const + const>)
2951 ConstantSDNode *CN1 = cast<ConstantSDNode > (IndOp1);
2952 int64_t combinedConst = CN0->getSExtValue() + CN1->getSExtValue();
2953 SDValue combinedValue = DAG.getConstant(combinedConst, Op0VT);
2955 #if !defined(NDEBUG)
2956 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2958 << "Replace: (add (SPUindirect <arg>, " << CN1->getSExtValue()
2959 << "), " << CN0->getSExtValue() << ")\n"
2960 << "With: (SPUindirect <arg>, "
2961 << combinedConst << ")\n";
2965 return DAG.getNode(SPUISD::IndirectAddr, dl, Op0VT,
2966 IndirectArg, combinedValue);
2972 case ISD::SIGN_EXTEND:
2973 case ISD::ZERO_EXTEND:
2974 case ISD::ANY_EXTEND: {
2975 if (Op0.getOpcode() == SPUISD::VEC2PREFSLOT && NodeVT == Op0VT) {
2976 // (any_extend (SPUextract_elt0 <arg>)) ->
2977 // (SPUextract_elt0 <arg>)
2978 // Types must match, however...
2979 #if !defined(NDEBUG)
2980 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2981 errs() << "\nReplace: ";
2983 errs() << "\nWith: ";
2984 Op0.getNode()->dump(&DAG);
2993 case SPUISD::IndirectAddr: {
2994 if (!ST->usingLargeMem() && Op0.getOpcode() == SPUISD::AFormAddr) {
2995 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1));
2996 if (CN != 0 && CN->isNullValue()) {
2997 // (SPUindirect (SPUaform <addr>, 0), 0) ->
2998 // (SPUaform <addr>, 0)
3000 DEBUG(errs() << "Replace: ");
3001 DEBUG(N->dump(&DAG));
3002 DEBUG(errs() << "\nWith: ");
3003 DEBUG(Op0.getNode()->dump(&DAG));
3004 DEBUG(errs() << "\n");
3008 } else if (Op0.getOpcode() == ISD::ADD) {
3009 SDValue Op1 = N->getOperand(1);
3010 if (ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(Op1)) {
3011 // (SPUindirect (add <arg>, <arg>), 0) ->
3012 // (SPUindirect <arg>, <arg>)
3013 if (CN1->isNullValue()) {
3015 #if !defined(NDEBUG)
3016 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
3018 << "Replace: (SPUindirect (add <arg>, <arg>), 0)\n"
3019 << "With: (SPUindirect <arg>, <arg>)\n";
3023 return DAG.getNode(SPUISD::IndirectAddr, dl, Op0VT,
3024 Op0.getOperand(0), Op0.getOperand(1));
3030 case SPUISD::SHL_BITS:
3031 case SPUISD::SHL_BYTES:
3032 case SPUISD::ROTBYTES_LEFT: {
3033 SDValue Op1 = N->getOperand(1);
3035 // Kill degenerate vector shifts:
3036 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
3037 if (CN->isNullValue()) {
3043 case SPUISD::PREFSLOT2VEC: {
3044 switch (Op0.getOpcode()) {
3047 case ISD::ANY_EXTEND:
3048 case ISD::ZERO_EXTEND:
3049 case ISD::SIGN_EXTEND: {
3050 // (SPUprefslot2vec (any|zero|sign_extend (SPUvec2prefslot <arg>))) ->
3052 // but only if the SPUprefslot2vec and <arg> types match.
3053 SDValue Op00 = Op0.getOperand(0);
3054 if (Op00.getOpcode() == SPUISD::VEC2PREFSLOT) {
3055 SDValue Op000 = Op00.getOperand(0);
3056 if (Op000.getValueType() == NodeVT) {
3062 case SPUISD::VEC2PREFSLOT: {
3063 // (SPUprefslot2vec (SPUvec2prefslot <arg>)) ->
3065 Result = Op0.getOperand(0);
3073 // Otherwise, return unchanged.
3075 if (Result.getNode()) {
3076 DEBUG(errs() << "\nReplace.SPU: ");
3077 DEBUG(N->dump(&DAG));
3078 DEBUG(errs() << "\nWith: ");
3079 DEBUG(Result.getNode()->dump(&DAG));
3080 DEBUG(errs() << "\n");
3087 //===----------------------------------------------------------------------===//
3088 // Inline Assembly Support
3089 //===----------------------------------------------------------------------===//
3091 /// getConstraintType - Given a constraint letter, return the type of
3092 /// constraint it is for this target.
3093 SPUTargetLowering::ConstraintType
3094 SPUTargetLowering::getConstraintType(const std::string &ConstraintLetter) const {
3095 if (ConstraintLetter.size() == 1) {
3096 switch (ConstraintLetter[0]) {
3103 return C_RegisterClass;
3106 return TargetLowering::getConstraintType(ConstraintLetter);
3109 /// Examine constraint type and operand type and determine a weight value.
3110 /// This object must already have been set up with the operand type
3111 /// and the current alternative constraint selected.
3112 TargetLowering::ConstraintWeight
3113 SPUTargetLowering::getSingleConstraintMatchWeight(
3114 AsmOperandInfo &info, const char *constraint) const {
3115 ConstraintWeight weight = CW_Invalid;
3116 Value *CallOperandVal = info.CallOperandVal;
3117 // If we don't have a value, we can't do a match,
3118 // but allow it at the lowest weight.
3119 if (CallOperandVal == NULL)
3121 // Look at the constraint type.
3122 switch (*constraint) {
3124 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
3126 //FIXME: Seems like the supported constraint letters were just copied
3127 // from PPC, as the following doesn't correspond to the GCC docs.
3128 // I'm leaving it so until someone adds the corresponding lowering support.
3135 weight = CW_Register;
3141 std::pair<unsigned, const TargetRegisterClass*>
3142 SPUTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
3145 if (Constraint.size() == 1) {
3146 // GCC RS6000 Constraint Letters
3147 switch (Constraint[0]) {
3151 return std::make_pair(0U, SPU::R64CRegisterClass);
3152 return std::make_pair(0U, SPU::R32CRegisterClass);
3155 return std::make_pair(0U, SPU::R32FPRegisterClass);
3156 else if (VT == MVT::f64)
3157 return std::make_pair(0U, SPU::R64FPRegisterClass);
3160 return std::make_pair(0U, SPU::GPRCRegisterClass);
3164 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
3167 //! Compute used/known bits for a SPU operand
3169 SPUTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
3173 const SelectionDAG &DAG,
3174 unsigned Depth ) const {
3176 const uint64_t uint64_sizebits = sizeof(uint64_t) * CHAR_BIT;
3178 switch (Op.getOpcode()) {
3180 // KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
3186 case SPUISD::PREFSLOT2VEC:
3187 case SPUISD::LDRESULT:
3188 case SPUISD::VEC2PREFSLOT:
3189 case SPUISD::SHLQUAD_L_BITS:
3190 case SPUISD::SHLQUAD_L_BYTES:
3191 case SPUISD::VEC_ROTL:
3192 case SPUISD::VEC_ROTR:
3193 case SPUISD::ROTBYTES_LEFT:
3194 case SPUISD::SELECT_MASK:
3201 SPUTargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
3202 unsigned Depth) const {
3203 switch (Op.getOpcode()) {
3208 EVT VT = Op.getValueType();
3210 if (VT != MVT::i8 && VT != MVT::i16 && VT != MVT::i32) {
3213 return VT.getSizeInBits();
3218 // LowerAsmOperandForConstraint
3220 SPUTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
3221 std::string &Constraint,
3222 std::vector<SDValue> &Ops,
3223 SelectionDAG &DAG) const {
3224 // Default, for the time being, to the base class handler
3225 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3228 /// isLegalAddressImmediate - Return true if the integer value can be used
3229 /// as the offset of the target addressing mode.
3230 bool SPUTargetLowering::isLegalAddressImmediate(int64_t V,
3232 // SPU's addresses are 256K:
3233 return (V > -(1 << 18) && V < (1 << 18) - 1);
3236 bool SPUTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const {
3241 SPUTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
3242 // The SPU target isn't yet aware of offsets.
3246 // can we compare to Imm without writing it into a register?
3247 bool SPUTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
3248 //ceqi, cgti, etc. all take s10 operand
3249 return isInt<10>(Imm);
3253 SPUTargetLowering::isLegalAddressingMode(const AddrMode &AM,
3256 // A-form: 18bit absolute address.
3257 if (AM.BaseGV && !AM.HasBaseReg && AM.Scale == 0 && AM.BaseOffs == 0)
3260 // D-form: reg + 14bit offset
3261 if (AM.BaseGV ==0 && AM.HasBaseReg && AM.Scale == 0 && isInt<14>(AM.BaseOffs))
3265 if (AM.BaseGV == 0 && AM.HasBaseReg && AM.Scale == 1 && AM.BaseOffs ==0)