2 //===-- SPUISelLowering.cpp - Cell SPU DAG Lowering Implementation --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the SPUTargetLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "SPURegisterNames.h"
15 #include "SPUISelLowering.h"
16 #include "SPUTargetMachine.h"
17 #include "SPUFrameInfo.h"
18 #include "SPUMachineFunction.h"
19 #include "llvm/Constants.h"
20 #include "llvm/Function.h"
21 #include "llvm/Intrinsics.h"
22 #include "llvm/CallingConv.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/SelectionDAG.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/Target/TargetOptions.h"
31 #include "llvm/ADT/VectorExtras.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/MathExtras.h"
35 #include "llvm/Support/raw_ostream.h"
40 // Used in getTargetNodeName() below
42 std::map<unsigned, const char *> node_names;
44 //! EVT mapping to useful data for Cell SPU
45 struct valtype_map_s {
50 const valtype_map_s valtype_map[] = {
61 const size_t n_valtype_map = sizeof(valtype_map) / sizeof(valtype_map[0]);
63 const valtype_map_s *getValueTypeMapEntry(EVT VT) {
64 const valtype_map_s *retval = 0;
66 for (size_t i = 0; i < n_valtype_map; ++i) {
67 if (valtype_map[i].valtype == VT) {
68 retval = valtype_map + i;
75 report_fatal_error("getValueTypeMapEntry returns NULL for " +
76 Twine(VT.getEVTString()));
83 //! Expand a library call into an actual call DAG node
86 This code is taken from SelectionDAGLegalize, since it is not exposed as
87 part of the LLVM SelectionDAG API.
91 ExpandLibCall(RTLIB::Libcall LC, SDValue Op, SelectionDAG &DAG,
92 bool isSigned, SDValue &Hi, const SPUTargetLowering &TLI) {
93 // The input chain to this libcall is the entry node of the function.
94 // Legalizing the call will automatically add the previous call to the
96 SDValue InChain = DAG.getEntryNode();
98 TargetLowering::ArgListTy Args;
99 TargetLowering::ArgListEntry Entry;
100 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
101 EVT ArgVT = Op.getOperand(i).getValueType();
102 const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
103 Entry.Node = Op.getOperand(i);
105 Entry.isSExt = isSigned;
106 Entry.isZExt = !isSigned;
107 Args.push_back(Entry);
109 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
112 // Splice the libcall in wherever FindInputOutputChains tells us to.
114 Op.getNode()->getValueType(0).getTypeForEVT(*DAG.getContext());
115 std::pair<SDValue, SDValue> CallInfo =
116 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
117 0, TLI.getLibcallCallingConv(LC), false,
118 /*isReturnValueUsed=*/true,
119 Callee, Args, DAG, Op.getDebugLoc());
121 return CallInfo.first;
125 SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
126 : TargetLowering(TM, new TargetLoweringObjectFileELF()),
128 // Fold away setcc operations if possible.
131 // Use _setjmp/_longjmp instead of setjmp/longjmp.
132 setUseUnderscoreSetJmp(true);
133 setUseUnderscoreLongJmp(true);
135 // Set RTLIB libcall names as used by SPU:
136 setLibcallName(RTLIB::DIV_F64, "__fast_divdf3");
138 // Set up the SPU's register classes:
139 addRegisterClass(MVT::i8, SPU::R8CRegisterClass);
140 addRegisterClass(MVT::i16, SPU::R16CRegisterClass);
141 addRegisterClass(MVT::i32, SPU::R32CRegisterClass);
142 addRegisterClass(MVT::i64, SPU::R64CRegisterClass);
143 addRegisterClass(MVT::f32, SPU::R32FPRegisterClass);
144 addRegisterClass(MVT::f64, SPU::R64FPRegisterClass);
145 addRegisterClass(MVT::i128, SPU::GPRCRegisterClass);
147 // SPU has no sign or zero extended loads for i1, i8, i16:
148 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
149 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
150 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
152 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
153 setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand);
155 setTruncStoreAction(MVT::i128, MVT::i64, Expand);
156 setTruncStoreAction(MVT::i128, MVT::i32, Expand);
157 setTruncStoreAction(MVT::i128, MVT::i16, Expand);
158 setTruncStoreAction(MVT::i128, MVT::i8, Expand);
160 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
162 // SPU constant load actions are custom lowered:
163 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
164 setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
166 // SPU's loads and stores have to be custom lowered:
167 for (unsigned sctype = (unsigned) MVT::i8; sctype < (unsigned) MVT::i128;
169 MVT::SimpleValueType VT = (MVT::SimpleValueType)sctype;
171 setOperationAction(ISD::LOAD, VT, Custom);
172 setOperationAction(ISD::STORE, VT, Custom);
173 setLoadExtAction(ISD::EXTLOAD, VT, Custom);
174 setLoadExtAction(ISD::ZEXTLOAD, VT, Custom);
175 setLoadExtAction(ISD::SEXTLOAD, VT, Custom);
177 for (unsigned stype = sctype - 1; stype >= (unsigned) MVT::i8; --stype) {
178 MVT::SimpleValueType StoreVT = (MVT::SimpleValueType) stype;
179 setTruncStoreAction(VT, StoreVT, Expand);
183 for (unsigned sctype = (unsigned) MVT::f32; sctype < (unsigned) MVT::f64;
185 MVT::SimpleValueType VT = (MVT::SimpleValueType) sctype;
187 setOperationAction(ISD::LOAD, VT, Custom);
188 setOperationAction(ISD::STORE, VT, Custom);
190 for (unsigned stype = sctype - 1; stype >= (unsigned) MVT::f32; --stype) {
191 MVT::SimpleValueType StoreVT = (MVT::SimpleValueType) stype;
192 setTruncStoreAction(VT, StoreVT, Expand);
196 // Expand the jumptable branches
197 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
198 setOperationAction(ISD::BR_CC, MVT::Other, Expand);
200 // Custom lower SELECT_CC for most cases, but expand by default
201 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
202 setOperationAction(ISD::SELECT_CC, MVT::i8, Custom);
203 setOperationAction(ISD::SELECT_CC, MVT::i16, Custom);
204 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
205 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
207 // SPU has no intrinsics for these particular operations:
208 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
210 // SPU has no division/remainder instructions
211 setOperationAction(ISD::SREM, MVT::i8, Expand);
212 setOperationAction(ISD::UREM, MVT::i8, Expand);
213 setOperationAction(ISD::SDIV, MVT::i8, Expand);
214 setOperationAction(ISD::UDIV, MVT::i8, Expand);
215 setOperationAction(ISD::SDIVREM, MVT::i8, Expand);
216 setOperationAction(ISD::UDIVREM, MVT::i8, Expand);
217 setOperationAction(ISD::SREM, MVT::i16, Expand);
218 setOperationAction(ISD::UREM, MVT::i16, Expand);
219 setOperationAction(ISD::SDIV, MVT::i16, Expand);
220 setOperationAction(ISD::UDIV, MVT::i16, Expand);
221 setOperationAction(ISD::SDIVREM, MVT::i16, Expand);
222 setOperationAction(ISD::UDIVREM, MVT::i16, Expand);
223 setOperationAction(ISD::SREM, MVT::i32, Expand);
224 setOperationAction(ISD::UREM, MVT::i32, Expand);
225 setOperationAction(ISD::SDIV, MVT::i32, Expand);
226 setOperationAction(ISD::UDIV, MVT::i32, Expand);
227 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
228 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
229 setOperationAction(ISD::SREM, MVT::i64, Expand);
230 setOperationAction(ISD::UREM, MVT::i64, Expand);
231 setOperationAction(ISD::SDIV, MVT::i64, Expand);
232 setOperationAction(ISD::UDIV, MVT::i64, Expand);
233 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
234 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
235 setOperationAction(ISD::SREM, MVT::i128, Expand);
236 setOperationAction(ISD::UREM, MVT::i128, Expand);
237 setOperationAction(ISD::SDIV, MVT::i128, Expand);
238 setOperationAction(ISD::UDIV, MVT::i128, Expand);
239 setOperationAction(ISD::SDIVREM, MVT::i128, Expand);
240 setOperationAction(ISD::UDIVREM, MVT::i128, Expand);
242 // We don't support sin/cos/sqrt/fmod
243 setOperationAction(ISD::FSIN , MVT::f64, Expand);
244 setOperationAction(ISD::FCOS , MVT::f64, Expand);
245 setOperationAction(ISD::FREM , MVT::f64, Expand);
246 setOperationAction(ISD::FSIN , MVT::f32, Expand);
247 setOperationAction(ISD::FCOS , MVT::f32, Expand);
248 setOperationAction(ISD::FREM , MVT::f32, Expand);
250 // Expand fsqrt to the appropriate libcall (NOTE: should use h/w fsqrt
252 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
253 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
255 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
256 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
258 // SPU can do rotate right and left, so legalize it... but customize for i8
259 // because instructions don't exist.
261 // FIXME: Change from "expand" to appropriate type once ROTR is supported in
263 setOperationAction(ISD::ROTR, MVT::i32, Expand /*Legal*/);
264 setOperationAction(ISD::ROTR, MVT::i16, Expand /*Legal*/);
265 setOperationAction(ISD::ROTR, MVT::i8, Expand /*Custom*/);
267 setOperationAction(ISD::ROTL, MVT::i32, Legal);
268 setOperationAction(ISD::ROTL, MVT::i16, Legal);
269 setOperationAction(ISD::ROTL, MVT::i8, Custom);
271 // SPU has no native version of shift left/right for i8
272 setOperationAction(ISD::SHL, MVT::i8, Custom);
273 setOperationAction(ISD::SRL, MVT::i8, Custom);
274 setOperationAction(ISD::SRA, MVT::i8, Custom);
276 // Make these operations legal and handle them during instruction selection:
277 setOperationAction(ISD::SHL, MVT::i64, Legal);
278 setOperationAction(ISD::SRL, MVT::i64, Legal);
279 setOperationAction(ISD::SRA, MVT::i64, Legal);
281 // Custom lower i8, i32 and i64 multiplications
282 setOperationAction(ISD::MUL, MVT::i8, Custom);
283 setOperationAction(ISD::MUL, MVT::i32, Legal);
284 setOperationAction(ISD::MUL, MVT::i64, Legal);
286 // Expand double-width multiplication
287 // FIXME: It would probably be reasonable to support some of these operations
288 setOperationAction(ISD::UMUL_LOHI, MVT::i8, Expand);
289 setOperationAction(ISD::SMUL_LOHI, MVT::i8, Expand);
290 setOperationAction(ISD::MULHU, MVT::i8, Expand);
291 setOperationAction(ISD::MULHS, MVT::i8, Expand);
292 setOperationAction(ISD::UMUL_LOHI, MVT::i16, Expand);
293 setOperationAction(ISD::SMUL_LOHI, MVT::i16, Expand);
294 setOperationAction(ISD::MULHU, MVT::i16, Expand);
295 setOperationAction(ISD::MULHS, MVT::i16, Expand);
296 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
297 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
298 setOperationAction(ISD::MULHU, MVT::i32, Expand);
299 setOperationAction(ISD::MULHS, MVT::i32, Expand);
300 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
301 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
302 setOperationAction(ISD::MULHU, MVT::i64, Expand);
303 setOperationAction(ISD::MULHS, MVT::i64, Expand);
305 // Need to custom handle (some) common i8, i64 math ops
306 setOperationAction(ISD::ADD, MVT::i8, Custom);
307 setOperationAction(ISD::ADD, MVT::i64, Legal);
308 setOperationAction(ISD::SUB, MVT::i8, Custom);
309 setOperationAction(ISD::SUB, MVT::i64, Legal);
311 // SPU does not have BSWAP. It does have i32 support CTLZ.
312 // CTPOP has to be custom lowered.
313 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
314 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
316 setOperationAction(ISD::CTPOP, MVT::i8, Custom);
317 setOperationAction(ISD::CTPOP, MVT::i16, Custom);
318 setOperationAction(ISD::CTPOP, MVT::i32, Custom);
319 setOperationAction(ISD::CTPOP, MVT::i64, Custom);
320 setOperationAction(ISD::CTPOP, MVT::i128, Expand);
322 setOperationAction(ISD::CTTZ , MVT::i8, Expand);
323 setOperationAction(ISD::CTTZ , MVT::i16, Expand);
324 setOperationAction(ISD::CTTZ , MVT::i32, Expand);
325 setOperationAction(ISD::CTTZ , MVT::i64, Expand);
326 setOperationAction(ISD::CTTZ , MVT::i128, Expand);
328 setOperationAction(ISD::CTLZ , MVT::i8, Promote);
329 setOperationAction(ISD::CTLZ , MVT::i16, Promote);
330 setOperationAction(ISD::CTLZ , MVT::i32, Legal);
331 setOperationAction(ISD::CTLZ , MVT::i64, Expand);
332 setOperationAction(ISD::CTLZ , MVT::i128, Expand);
334 // SPU has a version of select that implements (a&~c)|(b&c), just like
335 // select ought to work:
336 setOperationAction(ISD::SELECT, MVT::i8, Legal);
337 setOperationAction(ISD::SELECT, MVT::i16, Legal);
338 setOperationAction(ISD::SELECT, MVT::i32, Legal);
339 setOperationAction(ISD::SELECT, MVT::i64, Legal);
341 setOperationAction(ISD::SETCC, MVT::i8, Legal);
342 setOperationAction(ISD::SETCC, MVT::i16, Legal);
343 setOperationAction(ISD::SETCC, MVT::i32, Legal);
344 setOperationAction(ISD::SETCC, MVT::i64, Legal);
345 setOperationAction(ISD::SETCC, MVT::f64, Custom);
347 // Custom lower i128 -> i64 truncates
348 setOperationAction(ISD::TRUNCATE, MVT::i64, Custom);
350 // Custom lower i32/i64 -> i128 sign extend
351 setOperationAction(ISD::SIGN_EXTEND, MVT::i128, Custom);
353 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
354 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
355 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
356 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
357 // SPU has a legal FP -> signed INT instruction for f32, but for f64, need
358 // to expand to a libcall, hence the custom lowering:
359 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
360 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
361 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Expand);
362 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
363 setOperationAction(ISD::FP_TO_SINT, MVT::i128, Expand);
364 setOperationAction(ISD::FP_TO_UINT, MVT::i128, Expand);
366 // FDIV on SPU requires custom lowering
367 setOperationAction(ISD::FDIV, MVT::f64, Expand); // to libcall
369 // SPU has [U|S]INT_TO_FP for f32->i32, but not for f64->i32, f64->i64:
370 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
371 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
372 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
373 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
374 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
375 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
376 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
377 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
379 setOperationAction(ISD::BIT_CONVERT, MVT::i32, Legal);
380 setOperationAction(ISD::BIT_CONVERT, MVT::f32, Legal);
381 setOperationAction(ISD::BIT_CONVERT, MVT::i64, Legal);
382 setOperationAction(ISD::BIT_CONVERT, MVT::f64, Legal);
384 // We cannot sextinreg(i1). Expand to shifts.
385 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
387 // We want to legalize GlobalAddress and ConstantPool nodes into the
388 // appropriate instructions to materialize the address.
389 for (unsigned sctype = (unsigned) MVT::i8; sctype < (unsigned) MVT::f128;
391 MVT::SimpleValueType VT = (MVT::SimpleValueType)sctype;
393 setOperationAction(ISD::GlobalAddress, VT, Custom);
394 setOperationAction(ISD::ConstantPool, VT, Custom);
395 setOperationAction(ISD::JumpTable, VT, Custom);
398 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
399 setOperationAction(ISD::VASTART , MVT::Other, Custom);
401 // Use the default implementation.
402 setOperationAction(ISD::VAARG , MVT::Other, Expand);
403 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
404 setOperationAction(ISD::VAEND , MVT::Other, Expand);
405 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
406 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
407 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
408 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Expand);
410 // Cell SPU has instructions for converting between i64 and fp.
411 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
412 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
414 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT
415 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote);
417 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
418 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
420 // First set operation action for all vector types to expand. Then we
421 // will selectively turn on ones that can be effectively codegen'd.
422 addRegisterClass(MVT::v16i8, SPU::VECREGRegisterClass);
423 addRegisterClass(MVT::v8i16, SPU::VECREGRegisterClass);
424 addRegisterClass(MVT::v4i32, SPU::VECREGRegisterClass);
425 addRegisterClass(MVT::v2i64, SPU::VECREGRegisterClass);
426 addRegisterClass(MVT::v4f32, SPU::VECREGRegisterClass);
427 addRegisterClass(MVT::v2f64, SPU::VECREGRegisterClass);
429 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
430 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
431 MVT::SimpleValueType VT = (MVT::SimpleValueType)i;
433 // add/sub are legal for all supported vector VT's.
434 setOperationAction(ISD::ADD, VT, Legal);
435 setOperationAction(ISD::SUB, VT, Legal);
436 // mul has to be custom lowered.
437 setOperationAction(ISD::MUL, VT, Legal);
439 setOperationAction(ISD::AND, VT, Legal);
440 setOperationAction(ISD::OR, VT, Legal);
441 setOperationAction(ISD::XOR, VT, Legal);
442 setOperationAction(ISD::LOAD, VT, Legal);
443 setOperationAction(ISD::SELECT, VT, Legal);
444 setOperationAction(ISD::STORE, VT, Legal);
446 // These operations need to be expanded:
447 setOperationAction(ISD::SDIV, VT, Expand);
448 setOperationAction(ISD::SREM, VT, Expand);
449 setOperationAction(ISD::UDIV, VT, Expand);
450 setOperationAction(ISD::UREM, VT, Expand);
452 // Custom lower build_vector, constant pool spills, insert and
453 // extract vector elements:
454 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
455 setOperationAction(ISD::ConstantPool, VT, Custom);
456 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
457 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
458 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
459 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
462 setOperationAction(ISD::AND, MVT::v16i8, Custom);
463 setOperationAction(ISD::OR, MVT::v16i8, Custom);
464 setOperationAction(ISD::XOR, MVT::v16i8, Custom);
465 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
467 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
469 setShiftAmountType(MVT::i32);
470 setBooleanContents(ZeroOrNegativeOneBooleanContent);
472 setStackPointerRegisterToSaveRestore(SPU::R1);
474 // We have target-specific dag combine patterns for the following nodes:
475 setTargetDAGCombine(ISD::ADD);
476 setTargetDAGCombine(ISD::ZERO_EXTEND);
477 setTargetDAGCombine(ISD::SIGN_EXTEND);
478 setTargetDAGCombine(ISD::ANY_EXTEND);
480 computeRegisterProperties();
482 // Set pre-RA register scheduler default to BURR, which produces slightly
483 // better code than the default (could also be TDRR, but TargetLowering.h
484 // needs a mod to support that model):
485 setSchedulingPreference(Sched::RegPressure);
489 SPUTargetLowering::getTargetNodeName(unsigned Opcode) const
491 if (node_names.empty()) {
492 node_names[(unsigned) SPUISD::RET_FLAG] = "SPUISD::RET_FLAG";
493 node_names[(unsigned) SPUISD::Hi] = "SPUISD::Hi";
494 node_names[(unsigned) SPUISD::Lo] = "SPUISD::Lo";
495 node_names[(unsigned) SPUISD::PCRelAddr] = "SPUISD::PCRelAddr";
496 node_names[(unsigned) SPUISD::AFormAddr] = "SPUISD::AFormAddr";
497 node_names[(unsigned) SPUISD::IndirectAddr] = "SPUISD::IndirectAddr";
498 node_names[(unsigned) SPUISD::LDRESULT] = "SPUISD::LDRESULT";
499 node_names[(unsigned) SPUISD::CALL] = "SPUISD::CALL";
500 node_names[(unsigned) SPUISD::SHUFB] = "SPUISD::SHUFB";
501 node_names[(unsigned) SPUISD::SHUFFLE_MASK] = "SPUISD::SHUFFLE_MASK";
502 node_names[(unsigned) SPUISD::CNTB] = "SPUISD::CNTB";
503 node_names[(unsigned) SPUISD::PREFSLOT2VEC] = "SPUISD::PREFSLOT2VEC";
504 node_names[(unsigned) SPUISD::VEC2PREFSLOT] = "SPUISD::VEC2PREFSLOT";
505 node_names[(unsigned) SPUISD::SHLQUAD_L_BITS] = "SPUISD::SHLQUAD_L_BITS";
506 node_names[(unsigned) SPUISD::SHLQUAD_L_BYTES] = "SPUISD::SHLQUAD_L_BYTES";
507 node_names[(unsigned) SPUISD::VEC_ROTL] = "SPUISD::VEC_ROTL";
508 node_names[(unsigned) SPUISD::VEC_ROTR] = "SPUISD::VEC_ROTR";
509 node_names[(unsigned) SPUISD::ROTBYTES_LEFT] = "SPUISD::ROTBYTES_LEFT";
510 node_names[(unsigned) SPUISD::ROTBYTES_LEFT_BITS] =
511 "SPUISD::ROTBYTES_LEFT_BITS";
512 node_names[(unsigned) SPUISD::SELECT_MASK] = "SPUISD::SELECT_MASK";
513 node_names[(unsigned) SPUISD::SELB] = "SPUISD::SELB";
514 node_names[(unsigned) SPUISD::ADD64_MARKER] = "SPUISD::ADD64_MARKER";
515 node_names[(unsigned) SPUISD::SUB64_MARKER] = "SPUISD::SUB64_MARKER";
516 node_names[(unsigned) SPUISD::MUL64_MARKER] = "SPUISD::MUL64_MARKER";
519 std::map<unsigned, const char *>::iterator i = node_names.find(Opcode);
521 return ((i != node_names.end()) ? i->second : 0);
524 /// getFunctionAlignment - Return the Log2 alignment of this function.
525 unsigned SPUTargetLowering::getFunctionAlignment(const Function *) const {
529 //===----------------------------------------------------------------------===//
530 // Return the Cell SPU's SETCC result type
531 //===----------------------------------------------------------------------===//
533 MVT::SimpleValueType SPUTargetLowering::getSetCCResultType(EVT VT) const {
534 // i16 and i32 are valid SETCC result types
535 return ((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) ?
536 VT.getSimpleVT().SimpleTy :
540 //===----------------------------------------------------------------------===//
541 // Calling convention code:
542 //===----------------------------------------------------------------------===//
544 #include "SPUGenCallingConv.inc"
546 //===----------------------------------------------------------------------===//
547 // LowerOperation implementation
548 //===----------------------------------------------------------------------===//
550 /// Custom lower loads for CellSPU
552 All CellSPU loads and stores are aligned to 16-byte boundaries, so for elements
553 within a 16-byte block, we have to rotate to extract the requested element.
555 For extending loads, we also want to ensure that the following sequence is
556 emitted, e.g. for MVT::f32 extending load to MVT::f64:
560 %2 v16i8,ch = rotate %1
561 %3 v4f8, ch = bitconvert %2
562 %4 f32 = vec2perfslot %3
563 %5 f64 = fp_extend %4
567 LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
568 LoadSDNode *LN = cast<LoadSDNode>(Op);
569 SDValue the_chain = LN->getChain();
570 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
571 EVT InVT = LN->getMemoryVT();
572 EVT OutVT = Op.getValueType();
573 ISD::LoadExtType ExtType = LN->getExtensionType();
574 unsigned alignment = LN->getAlignment();
575 const valtype_map_s *vtm = getValueTypeMapEntry(InVT);
576 DebugLoc dl = Op.getDebugLoc();
578 switch (LN->getAddressingMode()) {
579 case ISD::UNINDEXED: {
581 SDValue basePtr = LN->getBasePtr();
584 if (alignment == 16) {
587 // Special cases for a known aligned load to simplify the base pointer
588 // and the rotation amount:
589 if (basePtr.getOpcode() == ISD::ADD
590 && (CN = dyn_cast<ConstantSDNode > (basePtr.getOperand(1))) != 0) {
591 // Known offset into basePtr
592 int64_t offset = CN->getSExtValue();
593 int64_t rotamt = int64_t((offset & 0xf) - vtm->prefslot_byte);
598 rotate = DAG.getConstant(rotamt, MVT::i16);
600 // Simplify the base pointer for this case:
601 basePtr = basePtr.getOperand(0);
602 if ((offset & ~0xf) > 0) {
603 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
605 DAG.getConstant((offset & ~0xf), PtrVT));
607 } else if ((basePtr.getOpcode() == SPUISD::AFormAddr)
608 || (basePtr.getOpcode() == SPUISD::IndirectAddr
609 && basePtr.getOperand(0).getOpcode() == SPUISD::Hi
610 && basePtr.getOperand(1).getOpcode() == SPUISD::Lo)) {
611 // Plain aligned a-form address: rotate into preferred slot
612 // Same for (SPUindirect (SPUhi ...), (SPUlo ...))
613 int64_t rotamt = -vtm->prefslot_byte;
616 rotate = DAG.getConstant(rotamt, MVT::i16);
618 // Offset the rotate amount by the basePtr and the preferred slot
620 int64_t rotamt = -vtm->prefslot_byte;
623 rotate = DAG.getNode(ISD::ADD, dl, PtrVT,
625 DAG.getConstant(rotamt, PtrVT));
628 // Unaligned load: must be more pessimistic about addressing modes:
629 if (basePtr.getOpcode() == ISD::ADD) {
630 MachineFunction &MF = DAG.getMachineFunction();
631 MachineRegisterInfo &RegInfo = MF.getRegInfo();
632 unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
635 SDValue Op0 = basePtr.getOperand(0);
636 SDValue Op1 = basePtr.getOperand(1);
638 if (isa<ConstantSDNode>(Op1)) {
639 // Convert the (add <ptr>, <const>) to an indirect address contained
640 // in a register. Note that this is done because we need to avoid
641 // creating a 0(reg) d-form address due to the SPU's block loads.
642 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
643 the_chain = DAG.getCopyToReg(the_chain, dl, VReg, basePtr, Flag);
644 basePtr = DAG.getCopyFromReg(the_chain, dl, VReg, PtrVT);
646 // Convert the (add <arg1>, <arg2>) to an indirect address, which
647 // will likely be lowered as a reg(reg) x-form address.
648 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
651 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
653 DAG.getConstant(0, PtrVT));
656 // Offset the rotate amount by the basePtr and the preferred slot
658 rotate = DAG.getNode(ISD::ADD, dl, PtrVT,
660 DAG.getConstant(-vtm->prefslot_byte, PtrVT));
663 // Re-emit as a v16i8 vector load
664 result = DAG.getLoad(MVT::v16i8, dl, the_chain, basePtr,
665 LN->getSrcValue(), LN->getSrcValueOffset(),
666 LN->isVolatile(), LN->isNonTemporal(), 16);
669 the_chain = result.getValue(1);
671 // Rotate into the preferred slot:
672 result = DAG.getNode(SPUISD::ROTBYTES_LEFT, dl, MVT::v16i8,
673 result.getValue(0), rotate);
675 // Convert the loaded v16i8 vector to the appropriate vector type
676 // specified by the operand:
677 EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
678 InVT, (128 / InVT.getSizeInBits()));
679 result = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, InVT,
680 DAG.getNode(ISD::BIT_CONVERT, dl, vecVT, result));
682 // Handle extending loads by extending the scalar result:
683 if (ExtType == ISD::SEXTLOAD) {
684 result = DAG.getNode(ISD::SIGN_EXTEND, dl, OutVT, result);
685 } else if (ExtType == ISD::ZEXTLOAD) {
686 result = DAG.getNode(ISD::ZERO_EXTEND, dl, OutVT, result);
687 } else if (ExtType == ISD::EXTLOAD) {
688 unsigned NewOpc = ISD::ANY_EXTEND;
690 if (OutVT.isFloatingPoint())
691 NewOpc = ISD::FP_EXTEND;
693 result = DAG.getNode(NewOpc, dl, OutVT, result);
696 SDVTList retvts = DAG.getVTList(OutVT, MVT::Other);
697 SDValue retops[2] = {
702 result = DAG.getNode(SPUISD::LDRESULT, dl, retvts,
703 retops, sizeof(retops) / sizeof(retops[0]));
710 case ISD::LAST_INDEXED_MODE:
712 report_fatal_error("LowerLOAD: Got a LoadSDNode with an addr mode other "
714 Twine((unsigned)LN->getAddressingMode()));
722 /// Custom lower stores for CellSPU
724 All CellSPU stores are aligned to 16-byte boundaries, so for elements
725 within a 16-byte block, we have to generate a shuffle to insert the
726 requested element into its place, then store the resulting block.
729 LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
730 StoreSDNode *SN = cast<StoreSDNode>(Op);
731 SDValue Value = SN->getValue();
732 EVT VT = Value.getValueType();
733 EVT StVT = (!SN->isTruncatingStore() ? VT : SN->getMemoryVT());
734 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
735 DebugLoc dl = Op.getDebugLoc();
736 unsigned alignment = SN->getAlignment();
738 switch (SN->getAddressingMode()) {
739 case ISD::UNINDEXED: {
740 // The vector type we really want to load from the 16-byte chunk.
741 EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
742 VT, (128 / VT.getSizeInBits()));
744 SDValue alignLoadVec;
745 SDValue basePtr = SN->getBasePtr();
746 SDValue the_chain = SN->getChain();
747 SDValue insertEltOffs;
749 if (alignment == 16) {
751 // Special cases for a known aligned load to simplify the base pointer
752 // and insertion byte:
753 if (basePtr.getOpcode() == ISD::ADD
754 && (CN = dyn_cast<ConstantSDNode>(basePtr.getOperand(1))) != 0) {
755 // Known offset into basePtr
756 int64_t offset = CN->getSExtValue();
758 // Simplify the base pointer for this case:
759 basePtr = basePtr.getOperand(0);
760 insertEltOffs = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
762 DAG.getConstant((offset & 0xf), PtrVT));
764 if ((offset & ~0xf) > 0) {
765 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
767 DAG.getConstant((offset & ~0xf), PtrVT));
770 // Otherwise, assume it's at byte 0 of basePtr
771 insertEltOffs = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
773 DAG.getConstant(0, PtrVT));
774 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
776 DAG.getConstant(0, PtrVT));
779 // Unaligned load: must be more pessimistic about addressing modes:
780 if (basePtr.getOpcode() == ISD::ADD) {
781 MachineFunction &MF = DAG.getMachineFunction();
782 MachineRegisterInfo &RegInfo = MF.getRegInfo();
783 unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
786 SDValue Op0 = basePtr.getOperand(0);
787 SDValue Op1 = basePtr.getOperand(1);
789 if (isa<ConstantSDNode>(Op1)) {
790 // Convert the (add <ptr>, <const>) to an indirect address contained
791 // in a register. Note that this is done because we need to avoid
792 // creating a 0(reg) d-form address due to the SPU's block loads.
793 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
794 the_chain = DAG.getCopyToReg(the_chain, dl, VReg, basePtr, Flag);
795 basePtr = DAG.getCopyFromReg(the_chain, dl, VReg, PtrVT);
797 // Convert the (add <arg1>, <arg2>) to an indirect address, which
798 // will likely be lowered as a reg(reg) x-form address.
799 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
802 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
804 DAG.getConstant(0, PtrVT));
807 // Insertion point is solely determined by basePtr's contents
808 insertEltOffs = DAG.getNode(ISD::ADD, dl, PtrVT,
810 DAG.getConstant(0, PtrVT));
813 // Load the memory to which to store.
814 alignLoadVec = DAG.getLoad(vecVT, dl, the_chain, basePtr,
815 SN->getSrcValue(), SN->getSrcValueOffset(),
816 SN->isVolatile(), SN->isNonTemporal(), 16);
819 the_chain = alignLoadVec.getValue(1);
821 LoadSDNode *LN = cast<LoadSDNode>(alignLoadVec);
822 SDValue theValue = SN->getValue();
826 && (theValue.getOpcode() == ISD::AssertZext
827 || theValue.getOpcode() == ISD::AssertSext)) {
828 // Drill down and get the value for zero- and sign-extended
830 theValue = theValue.getOperand(0);
833 // If the base pointer is already a D-form address, then just create
834 // a new D-form address with a slot offset and the orignal base pointer.
835 // Otherwise generate a D-form address with the slot offset relative
836 // to the stack pointer, which is always aligned.
838 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
839 errs() << "CellSPU LowerSTORE: basePtr = ";
840 basePtr.getNode()->dump(&DAG);
845 SDValue insertEltOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, vecVT,
847 SDValue vectorizeOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, vecVT,
850 result = DAG.getNode(SPUISD::SHUFB, dl, vecVT,
851 vectorizeOp, alignLoadVec,
852 DAG.getNode(ISD::BIT_CONVERT, dl,
853 MVT::v4i32, insertEltOp));
855 result = DAG.getStore(the_chain, dl, result, basePtr,
856 LN->getSrcValue(), LN->getSrcValueOffset(),
857 LN->isVolatile(), LN->isNonTemporal(),
860 #if 0 && !defined(NDEBUG)
861 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
862 const SDValue ¤tRoot = DAG.getRoot();
865 errs() << "------- CellSPU:LowerStore result:\n";
867 errs() << "-------\n";
868 DAG.setRoot(currentRoot);
879 case ISD::LAST_INDEXED_MODE:
881 report_fatal_error("LowerLOAD: Got a LoadSDNode with an addr mode other "
883 Twine((unsigned)SN->getAddressingMode()));
891 //! Generate the address of a constant pool entry.
893 LowerConstantPool(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
894 EVT PtrVT = Op.getValueType();
895 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
896 const Constant *C = CP->getConstVal();
897 SDValue CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment());
898 SDValue Zero = DAG.getConstant(0, PtrVT);
899 const TargetMachine &TM = DAG.getTarget();
900 // FIXME there is no actual debug info here
901 DebugLoc dl = Op.getDebugLoc();
903 if (TM.getRelocationModel() == Reloc::Static) {
904 if (!ST->usingLargeMem()) {
905 // Just return the SDValue with the constant pool address in it.
906 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, CPI, Zero);
908 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, CPI, Zero);
909 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, CPI, Zero);
910 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
914 llvm_unreachable("LowerConstantPool: Relocation model other than static"
919 //! Alternate entry point for generating the address of a constant pool entry
921 SPU::LowerConstantPool(SDValue Op, SelectionDAG &DAG, const SPUTargetMachine &TM) {
922 return ::LowerConstantPool(Op, DAG, TM.getSubtargetImpl());
926 LowerJumpTable(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
927 EVT PtrVT = Op.getValueType();
928 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
929 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
930 SDValue Zero = DAG.getConstant(0, PtrVT);
931 const TargetMachine &TM = DAG.getTarget();
932 // FIXME there is no actual debug info here
933 DebugLoc dl = Op.getDebugLoc();
935 if (TM.getRelocationModel() == Reloc::Static) {
936 if (!ST->usingLargeMem()) {
937 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, JTI, Zero);
939 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, JTI, Zero);
940 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, JTI, Zero);
941 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
945 llvm_unreachable("LowerJumpTable: Relocation model other than static"
951 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
952 EVT PtrVT = Op.getValueType();
953 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
954 const GlobalValue *GV = GSDN->getGlobal();
955 SDValue GA = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(),
956 PtrVT, GSDN->getOffset());
957 const TargetMachine &TM = DAG.getTarget();
958 SDValue Zero = DAG.getConstant(0, PtrVT);
959 // FIXME there is no actual debug info here
960 DebugLoc dl = Op.getDebugLoc();
962 if (TM.getRelocationModel() == Reloc::Static) {
963 if (!ST->usingLargeMem()) {
964 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, GA, Zero);
966 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, GA, Zero);
967 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, GA, Zero);
968 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
971 report_fatal_error("LowerGlobalAddress: Relocation model other than static"
979 //! Custom lower double precision floating point constants
981 LowerConstantFP(SDValue Op, SelectionDAG &DAG) {
982 EVT VT = Op.getValueType();
983 // FIXME there is no actual debug info here
984 DebugLoc dl = Op.getDebugLoc();
986 if (VT == MVT::f64) {
987 ConstantFPSDNode *FP = cast<ConstantFPSDNode>(Op.getNode());
990 "LowerConstantFP: Node is not ConstantFPSDNode");
992 uint64_t dbits = DoubleToBits(FP->getValueAPF().convertToDouble());
993 SDValue T = DAG.getConstant(dbits, MVT::i64);
994 SDValue Tvec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T);
995 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
996 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Tvec));
1003 SPUTargetLowering::LowerFormalArguments(SDValue Chain,
1004 CallingConv::ID CallConv, bool isVarArg,
1005 const SmallVectorImpl<ISD::InputArg>
1007 DebugLoc dl, SelectionDAG &DAG,
1008 SmallVectorImpl<SDValue> &InVals)
1011 MachineFunction &MF = DAG.getMachineFunction();
1012 MachineFrameInfo *MFI = MF.getFrameInfo();
1013 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1014 SPUFunctionInfo *FuncInfo = MF.getInfo<SPUFunctionInfo>();
1016 unsigned ArgOffset = SPUFrameInfo::minStackSize();
1017 unsigned ArgRegIdx = 0;
1018 unsigned StackSlotSize = SPUFrameInfo::stackSlotSize();
1020 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1022 SmallVector<CCValAssign, 16> ArgLocs;
1023 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
1025 // FIXME: allow for other calling conventions
1026 CCInfo.AnalyzeFormalArguments(Ins, CCC_SPU);
1028 // Add DAG nodes to load the arguments or copy them out of registers.
1029 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
1030 EVT ObjectVT = Ins[ArgNo].VT;
1031 unsigned ObjSize = ObjectVT.getSizeInBits()/8;
1033 CCValAssign &VA = ArgLocs[ArgNo];
1035 if (VA.isRegLoc()) {
1036 const TargetRegisterClass *ArgRegClass;
1038 switch (ObjectVT.getSimpleVT().SimpleTy) {
1040 report_fatal_error("LowerFormalArguments Unhandled argument type: " +
1041 Twine(ObjectVT.getEVTString()));
1043 ArgRegClass = &SPU::R8CRegClass;
1046 ArgRegClass = &SPU::R16CRegClass;
1049 ArgRegClass = &SPU::R32CRegClass;
1052 ArgRegClass = &SPU::R64CRegClass;
1055 ArgRegClass = &SPU::GPRCRegClass;
1058 ArgRegClass = &SPU::R32FPRegClass;
1061 ArgRegClass = &SPU::R64FPRegClass;
1069 ArgRegClass = &SPU::VECREGRegClass;
1073 unsigned VReg = RegInfo.createVirtualRegister(ArgRegClass);
1074 RegInfo.addLiveIn(VA.getLocReg(), VReg);
1075 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
1078 // We need to load the argument to a virtual register if we determined
1079 // above that we ran out of physical registers of the appropriate type
1080 // or we're forced to do vararg
1081 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset, true);
1082 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
1083 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, NULL, 0, false, false, 0);
1084 ArgOffset += StackSlotSize;
1087 InVals.push_back(ArgVal);
1089 Chain = ArgVal.getOperand(0);
1094 // FIXME: we should be able to query the argument registers from
1095 // tablegen generated code.
1096 static const unsigned ArgRegs[] = {
1097 SPU::R3, SPU::R4, SPU::R5, SPU::R6, SPU::R7, SPU::R8, SPU::R9,
1098 SPU::R10, SPU::R11, SPU::R12, SPU::R13, SPU::R14, SPU::R15, SPU::R16,
1099 SPU::R17, SPU::R18, SPU::R19, SPU::R20, SPU::R21, SPU::R22, SPU::R23,
1100 SPU::R24, SPU::R25, SPU::R26, SPU::R27, SPU::R28, SPU::R29, SPU::R30,
1101 SPU::R31, SPU::R32, SPU::R33, SPU::R34, SPU::R35, SPU::R36, SPU::R37,
1102 SPU::R38, SPU::R39, SPU::R40, SPU::R41, SPU::R42, SPU::R43, SPU::R44,
1103 SPU::R45, SPU::R46, SPU::R47, SPU::R48, SPU::R49, SPU::R50, SPU::R51,
1104 SPU::R52, SPU::R53, SPU::R54, SPU::R55, SPU::R56, SPU::R57, SPU::R58,
1105 SPU::R59, SPU::R60, SPU::R61, SPU::R62, SPU::R63, SPU::R64, SPU::R65,
1106 SPU::R66, SPU::R67, SPU::R68, SPU::R69, SPU::R70, SPU::R71, SPU::R72,
1107 SPU::R73, SPU::R74, SPU::R75, SPU::R76, SPU::R77, SPU::R78, SPU::R79
1109 // size of ArgRegs array
1110 unsigned NumArgRegs = 77;
1112 // We will spill (79-3)+1 registers to the stack
1113 SmallVector<SDValue, 79-3+1> MemOps;
1115 // Create the frame slot
1116 for (; ArgRegIdx != NumArgRegs; ++ArgRegIdx) {
1117 FuncInfo->setVarArgsFrameIndex(
1118 MFI->CreateFixedObject(StackSlotSize, ArgOffset, true));
1119 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
1120 unsigned VReg = MF.addLiveIn(ArgRegs[ArgRegIdx], &SPU::R32CRegClass);
1121 SDValue ArgVal = DAG.getRegister(VReg, MVT::v16i8);
1122 SDValue Store = DAG.getStore(Chain, dl, ArgVal, FIN, NULL, 0,
1124 Chain = Store.getOperand(0);
1125 MemOps.push_back(Store);
1127 // Increment address by stack slot size for the next stored argument
1128 ArgOffset += StackSlotSize;
1130 if (!MemOps.empty())
1131 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1132 &MemOps[0], MemOps.size());
1138 /// isLSAAddress - Return the immediate to use if the specified
1139 /// value is representable as a LSA address.
1140 static SDNode *isLSAAddress(SDValue Op, SelectionDAG &DAG) {
1141 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
1144 int Addr = C->getZExtValue();
1145 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero.
1146 (Addr << 14 >> 14) != Addr)
1147 return 0; // Top 14 bits have to be sext of immediate.
1149 return DAG.getConstant((int)C->getZExtValue() >> 2, MVT::i32).getNode();
1153 SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
1154 CallingConv::ID CallConv, bool isVarArg,
1156 const SmallVectorImpl<ISD::OutputArg> &Outs,
1157 const SmallVectorImpl<SDValue> &OutVals,
1158 const SmallVectorImpl<ISD::InputArg> &Ins,
1159 DebugLoc dl, SelectionDAG &DAG,
1160 SmallVectorImpl<SDValue> &InVals) const {
1161 // CellSPU target does not yet support tail call optimization.
1164 const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
1165 unsigned NumOps = Outs.size();
1166 unsigned StackSlotSize = SPUFrameInfo::stackSlotSize();
1168 SmallVector<CCValAssign, 16> ArgLocs;
1169 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
1171 // FIXME: allow for other calling conventions
1172 CCInfo.AnalyzeCallOperands(Outs, CCC_SPU);
1174 const unsigned NumArgRegs = ArgLocs.size();
1177 // Handy pointer type
1178 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1180 // Set up a copy of the stack pointer for use loading and storing any
1181 // arguments that may not fit in the registers available for argument
1183 SDValue StackPtr = DAG.getRegister(SPU::R1, MVT::i32);
1185 // Figure out which arguments are going to go in registers, and which in
1187 unsigned ArgOffset = SPUFrameInfo::minStackSize(); // Just below [LR]
1188 unsigned ArgRegIdx = 0;
1190 // Keep track of registers passing arguments
1191 std::vector<std::pair<unsigned, SDValue> > RegsToPass;
1192 // And the arguments passed on the stack
1193 SmallVector<SDValue, 8> MemOpChains;
1195 for (; ArgRegIdx != NumOps; ++ArgRegIdx) {
1196 SDValue Arg = OutVals[ArgRegIdx];
1197 CCValAssign &VA = ArgLocs[ArgRegIdx];
1199 // PtrOff will be used to store the current argument to the stack if a
1200 // register cannot be found for it.
1201 SDValue PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
1202 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
1204 switch (Arg.getValueType().getSimpleVT().SimpleTy) {
1205 default: llvm_unreachable("Unexpected ValueType for argument!");
1219 if (ArgRegIdx != NumArgRegs) {
1220 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1222 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0,
1224 ArgOffset += StackSlotSize;
1230 // Accumulate how many bytes are to be pushed on the stack, including the
1231 // linkage area, and parameter passing area. According to the SPU ABI,
1232 // we minimally need space for [LR] and [SP].
1233 unsigned NumStackBytes = ArgOffset - SPUFrameInfo::minStackSize();
1235 // Insert a call sequence start
1236 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumStackBytes,
1239 if (!MemOpChains.empty()) {
1240 // Adjust the stack pointer for the stack arguments.
1241 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1242 &MemOpChains[0], MemOpChains.size());
1245 // Build a sequence of copy-to-reg nodes chained together with token chain
1246 // and flag operands which copy the outgoing args into the appropriate regs.
1248 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1249 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1250 RegsToPass[i].second, InFlag);
1251 InFlag = Chain.getValue(1);
1254 SmallVector<SDValue, 8> Ops;
1255 unsigned CallOpc = SPUISD::CALL;
1257 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
1258 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
1259 // node so that legalize doesn't hack it.
1260 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1261 const GlobalValue *GV = G->getGlobal();
1262 EVT CalleeVT = Callee.getValueType();
1263 SDValue Zero = DAG.getConstant(0, PtrVT);
1264 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, CalleeVT);
1266 if (!ST->usingLargeMem()) {
1267 // Turn calls to targets that are defined (i.e., have bodies) into BRSL
1268 // style calls, otherwise, external symbols are BRASL calls. This assumes
1269 // that declared/defined symbols are in the same compilation unit and can
1270 // be reached through PC-relative jumps.
1273 // This may be an unsafe assumption for JIT and really large compilation
1275 if (GV->isDeclaration()) {
1276 Callee = DAG.getNode(SPUISD::AFormAddr, dl, CalleeVT, GA, Zero);
1278 Callee = DAG.getNode(SPUISD::PCRelAddr, dl, CalleeVT, GA, Zero);
1281 // "Large memory" mode: Turn all calls into indirect calls with a X-form
1283 Callee = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, GA, Zero);
1285 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1286 EVT CalleeVT = Callee.getValueType();
1287 SDValue Zero = DAG.getConstant(0, PtrVT);
1288 SDValue ExtSym = DAG.getTargetExternalSymbol(S->getSymbol(),
1289 Callee.getValueType());
1291 if (!ST->usingLargeMem()) {
1292 Callee = DAG.getNode(SPUISD::AFormAddr, dl, CalleeVT, ExtSym, Zero);
1294 Callee = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, ExtSym, Zero);
1296 } else if (SDNode *Dest = isLSAAddress(Callee, DAG)) {
1297 // If this is an absolute destination address that appears to be a legal
1298 // local store address, use the munged value.
1299 Callee = SDValue(Dest, 0);
1302 Ops.push_back(Chain);
1303 Ops.push_back(Callee);
1305 // Add argument registers to the end of the list so that they are known live
1307 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1308 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1309 RegsToPass[i].second.getValueType()));
1311 if (InFlag.getNode())
1312 Ops.push_back(InFlag);
1313 // Returns a chain and a flag for retval copy to use.
1314 Chain = DAG.getNode(CallOpc, dl, DAG.getVTList(MVT::Other, MVT::Flag),
1315 &Ops[0], Ops.size());
1316 InFlag = Chain.getValue(1);
1318 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumStackBytes, true),
1319 DAG.getIntPtrConstant(0, true), InFlag);
1321 InFlag = Chain.getValue(1);
1323 // If the function returns void, just return the chain.
1327 // If the call has results, copy the values out of the ret val registers.
1328 switch (Ins[0].VT.getSimpleVT().SimpleTy) {
1329 default: llvm_unreachable("Unexpected ret value!");
1330 case MVT::Other: break;
1332 if (Ins.size() > 1 && Ins[1].VT == MVT::i32) {
1333 Chain = DAG.getCopyFromReg(Chain, dl, SPU::R4,
1334 MVT::i32, InFlag).getValue(1);
1335 InVals.push_back(Chain.getValue(0));
1336 Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, MVT::i32,
1337 Chain.getValue(2)).getValue(1);
1338 InVals.push_back(Chain.getValue(0));
1340 Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, MVT::i32,
1341 InFlag).getValue(1);
1342 InVals.push_back(Chain.getValue(0));
1357 Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, Ins[0].VT,
1358 InFlag).getValue(1);
1359 InVals.push_back(Chain.getValue(0));
1367 SPUTargetLowering::LowerReturn(SDValue Chain,
1368 CallingConv::ID CallConv, bool isVarArg,
1369 const SmallVectorImpl<ISD::OutputArg> &Outs,
1370 const SmallVectorImpl<SDValue> &OutVals,
1371 DebugLoc dl, SelectionDAG &DAG) const {
1373 SmallVector<CCValAssign, 16> RVLocs;
1374 CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
1375 RVLocs, *DAG.getContext());
1376 CCInfo.AnalyzeReturn(Outs, RetCC_SPU);
1378 // If this is the first return lowered for this function, add the regs to the
1379 // liveout set for the function.
1380 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
1381 for (unsigned i = 0; i != RVLocs.size(); ++i)
1382 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
1387 // Copy the result values into the output registers.
1388 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1389 CCValAssign &VA = RVLocs[i];
1390 assert(VA.isRegLoc() && "Can only return in registers!");
1391 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
1393 Flag = Chain.getValue(1);
1397 return DAG.getNode(SPUISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
1399 return DAG.getNode(SPUISD::RET_FLAG, dl, MVT::Other, Chain);
1403 //===----------------------------------------------------------------------===//
1404 // Vector related lowering:
1405 //===----------------------------------------------------------------------===//
1407 static ConstantSDNode *
1408 getVecImm(SDNode *N) {
1409 SDValue OpVal(0, 0);
1411 // Check to see if this buildvec has a single non-undef value in its elements.
1412 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
1413 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
1414 if (OpVal.getNode() == 0)
1415 OpVal = N->getOperand(i);
1416 else if (OpVal != N->getOperand(i))
1420 if (OpVal.getNode() != 0) {
1421 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
1429 /// get_vec_i18imm - Test if this vector is a vector filled with the same value
1430 /// and the value fits into an unsigned 18-bit constant, and if so, return the
1432 SDValue SPU::get_vec_u18imm(SDNode *N, SelectionDAG &DAG,
1434 if (ConstantSDNode *CN = getVecImm(N)) {
1435 uint64_t Value = CN->getZExtValue();
1436 if (ValueType == MVT::i64) {
1437 uint64_t UValue = CN->getZExtValue();
1438 uint32_t upper = uint32_t(UValue >> 32);
1439 uint32_t lower = uint32_t(UValue);
1442 Value = Value >> 32;
1444 if (Value <= 0x3ffff)
1445 return DAG.getTargetConstant(Value, ValueType);
1451 /// get_vec_i16imm - Test if this vector is a vector filled with the same value
1452 /// and the value fits into a signed 16-bit constant, and if so, return the
1454 SDValue SPU::get_vec_i16imm(SDNode *N, SelectionDAG &DAG,
1456 if (ConstantSDNode *CN = getVecImm(N)) {
1457 int64_t Value = CN->getSExtValue();
1458 if (ValueType == MVT::i64) {
1459 uint64_t UValue = CN->getZExtValue();
1460 uint32_t upper = uint32_t(UValue >> 32);
1461 uint32_t lower = uint32_t(UValue);
1464 Value = Value >> 32;
1466 if (Value >= -(1 << 15) && Value <= ((1 << 15) - 1)) {
1467 return DAG.getTargetConstant(Value, ValueType);
1474 /// get_vec_i10imm - Test if this vector is a vector filled with the same value
1475 /// and the value fits into a signed 10-bit constant, and if so, return the
1477 SDValue SPU::get_vec_i10imm(SDNode *N, SelectionDAG &DAG,
1479 if (ConstantSDNode *CN = getVecImm(N)) {
1480 int64_t Value = CN->getSExtValue();
1481 if (ValueType == MVT::i64) {
1482 uint64_t UValue = CN->getZExtValue();
1483 uint32_t upper = uint32_t(UValue >> 32);
1484 uint32_t lower = uint32_t(UValue);
1487 Value = Value >> 32;
1489 if (isInt<10>(Value))
1490 return DAG.getTargetConstant(Value, ValueType);
1496 /// get_vec_i8imm - Test if this vector is a vector filled with the same value
1497 /// and the value fits into a signed 8-bit constant, and if so, return the
1500 /// @note: The incoming vector is v16i8 because that's the only way we can load
1501 /// constant vectors. Thus, we test to see if the upper and lower bytes are the
1503 SDValue SPU::get_vec_i8imm(SDNode *N, SelectionDAG &DAG,
1505 if (ConstantSDNode *CN = getVecImm(N)) {
1506 int Value = (int) CN->getZExtValue();
1507 if (ValueType == MVT::i16
1508 && Value <= 0xffff /* truncated from uint64_t */
1509 && ((short) Value >> 8) == ((short) Value & 0xff))
1510 return DAG.getTargetConstant(Value & 0xff, ValueType);
1511 else if (ValueType == MVT::i8
1512 && (Value & 0xff) == Value)
1513 return DAG.getTargetConstant(Value, ValueType);
1519 /// get_ILHUvec_imm - Test if this vector is a vector filled with the same value
1520 /// and the value fits into a signed 16-bit constant, and if so, return the
1522 SDValue SPU::get_ILHUvec_imm(SDNode *N, SelectionDAG &DAG,
1524 if (ConstantSDNode *CN = getVecImm(N)) {
1525 uint64_t Value = CN->getZExtValue();
1526 if ((ValueType == MVT::i32
1527 && ((unsigned) Value & 0xffff0000) == (unsigned) Value)
1528 || (ValueType == MVT::i64 && (Value & 0xffff0000) == Value))
1529 return DAG.getTargetConstant(Value >> 16, ValueType);
1535 /// get_v4i32_imm - Catch-all for general 32-bit constant vectors
1536 SDValue SPU::get_v4i32_imm(SDNode *N, SelectionDAG &DAG) {
1537 if (ConstantSDNode *CN = getVecImm(N)) {
1538 return DAG.getTargetConstant((unsigned) CN->getZExtValue(), MVT::i32);
1544 /// get_v4i32_imm - Catch-all for general 64-bit constant vectors
1545 SDValue SPU::get_v2i64_imm(SDNode *N, SelectionDAG &DAG) {
1546 if (ConstantSDNode *CN = getVecImm(N)) {
1547 return DAG.getTargetConstant((unsigned) CN->getZExtValue(), MVT::i64);
1553 //! Lower a BUILD_VECTOR instruction creatively:
1555 LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
1556 EVT VT = Op.getValueType();
1557 EVT EltVT = VT.getVectorElementType();
1558 DebugLoc dl = Op.getDebugLoc();
1559 BuildVectorSDNode *BCN = dyn_cast<BuildVectorSDNode>(Op.getNode());
1560 assert(BCN != 0 && "Expected BuildVectorSDNode in SPU LowerBUILD_VECTOR");
1561 unsigned minSplatBits = EltVT.getSizeInBits();
1563 if (minSplatBits < 16)
1566 APInt APSplatBits, APSplatUndef;
1567 unsigned SplatBitSize;
1570 if (!BCN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
1571 HasAnyUndefs, minSplatBits)
1572 || minSplatBits < SplatBitSize)
1573 return SDValue(); // Wasn't a constant vector or splat exceeded min
1575 uint64_t SplatBits = APSplatBits.getZExtValue();
1577 switch (VT.getSimpleVT().SimpleTy) {
1579 report_fatal_error("CellSPU: Unhandled VT in LowerBUILD_VECTOR, VT = " +
1580 Twine(VT.getEVTString()));
1583 uint32_t Value32 = uint32_t(SplatBits);
1584 assert(SplatBitSize == 32
1585 && "LowerBUILD_VECTOR: Unexpected floating point vector element.");
1586 // NOTE: pretend the constant is an integer. LLVM won't load FP constants
1587 SDValue T = DAG.getConstant(Value32, MVT::i32);
1588 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32,
1589 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, T,T,T,T));
1593 uint64_t f64val = uint64_t(SplatBits);
1594 assert(SplatBitSize == 64
1595 && "LowerBUILD_VECTOR: 64-bit float vector size > 8 bytes.");
1596 // NOTE: pretend the constant is an integer. LLVM won't load FP constants
1597 SDValue T = DAG.getConstant(f64val, MVT::i64);
1598 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64,
1599 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T));
1603 // 8-bit constants have to be expanded to 16-bits
1604 unsigned short Value16 = SplatBits /* | (SplatBits << 8) */;
1605 SmallVector<SDValue, 8> Ops;
1607 Ops.assign(8, DAG.getConstant(Value16, MVT::i16));
1608 return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
1609 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i16, &Ops[0], Ops.size()));
1612 unsigned short Value16 = SplatBits;
1613 SDValue T = DAG.getConstant(Value16, EltVT);
1614 SmallVector<SDValue, 8> Ops;
1617 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], Ops.size());
1620 SDValue T = DAG.getConstant(unsigned(SplatBits), VT.getVectorElementType());
1621 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, T, T, T, T);
1624 return SPU::LowerV2I64Splat(VT, DAG, SplatBits, dl);
1634 SPU::LowerV2I64Splat(EVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
1636 uint32_t upper = uint32_t(SplatVal >> 32);
1637 uint32_t lower = uint32_t(SplatVal);
1639 if (upper == lower) {
1640 // Magic constant that can be matched by IL, ILA, et. al.
1641 SDValue Val = DAG.getTargetConstant(upper, MVT::i32);
1642 return DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
1643 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1644 Val, Val, Val, Val));
1646 bool upper_special, lower_special;
1648 // NOTE: This code creates common-case shuffle masks that can be easily
1649 // detected as common expressions. It is not attempting to create highly
1650 // specialized masks to replace any and all 0's, 0xff's and 0x80's.
1652 // Detect if the upper or lower half is a special shuffle mask pattern:
1653 upper_special = (upper == 0 || upper == 0xffffffff || upper == 0x80000000);
1654 lower_special = (lower == 0 || lower == 0xffffffff || lower == 0x80000000);
1656 // Both upper and lower are special, lower to a constant pool load:
1657 if (lower_special && upper_special) {
1658 SDValue SplatValCN = DAG.getConstant(SplatVal, MVT::i64);
1659 return DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64,
1660 SplatValCN, SplatValCN);
1665 SmallVector<SDValue, 16> ShufBytes;
1668 // Create lower vector if not a special pattern
1669 if (!lower_special) {
1670 SDValue LO32C = DAG.getConstant(lower, MVT::i32);
1671 LO32 = DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
1672 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1673 LO32C, LO32C, LO32C, LO32C));
1676 // Create upper vector if not a special pattern
1677 if (!upper_special) {
1678 SDValue HI32C = DAG.getConstant(upper, MVT::i32);
1679 HI32 = DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
1680 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1681 HI32C, HI32C, HI32C, HI32C));
1684 // If either upper or lower are special, then the two input operands are
1685 // the same (basically, one of them is a "don't care")
1691 for (int i = 0; i < 4; ++i) {
1693 for (int j = 0; j < 4; ++j) {
1695 bool process_upper, process_lower;
1697 process_upper = (upper_special && (i & 1) == 0);
1698 process_lower = (lower_special && (i & 1) == 1);
1700 if (process_upper || process_lower) {
1701 if ((process_upper && upper == 0)
1702 || (process_lower && lower == 0))
1704 else if ((process_upper && upper == 0xffffffff)
1705 || (process_lower && lower == 0xffffffff))
1707 else if ((process_upper && upper == 0x80000000)
1708 || (process_lower && lower == 0x80000000))
1709 val |= (j == 0 ? 0xe0 : 0x80);
1711 val |= i * 4 + j + ((i & 1) * 16);
1714 ShufBytes.push_back(DAG.getConstant(val, MVT::i32));
1717 return DAG.getNode(SPUISD::SHUFB, dl, OpVT, HI32, LO32,
1718 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1719 &ShufBytes[0], ShufBytes.size()));
1723 /// LowerVECTOR_SHUFFLE - Lower a vector shuffle (V1, V2, V3) to something on
1724 /// which the Cell can operate. The code inspects V3 to ascertain whether the
1725 /// permutation vector, V3, is monotonically increasing with one "exception"
1726 /// element, e.g., (0, 1, _, 3). If this is the case, then generate a
1727 /// SHUFFLE_MASK synthetic instruction. Otherwise, spill V3 to the constant pool.
1728 /// In either case, the net result is going to eventually invoke SHUFB to
1729 /// permute/shuffle the bytes from V1 and V2.
1731 /// SHUFFLE_MASK is eventually selected as one of the C*D instructions, generate
1732 /// control word for byte/halfword/word insertion. This takes care of a single
1733 /// element move from V2 into V1.
1735 /// SPUISD::SHUFB is eventually selected as Cell's <i>shufb</i> instructions.
1736 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
1737 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
1738 SDValue V1 = Op.getOperand(0);
1739 SDValue V2 = Op.getOperand(1);
1740 DebugLoc dl = Op.getDebugLoc();
1742 if (V2.getOpcode() == ISD::UNDEF) V2 = V1;
1744 // If we have a single element being moved from V1 to V2, this can be handled
1745 // using the C*[DX] compute mask instructions, but the vector elements have
1746 // to be monotonically increasing with one exception element, and the source
1747 // slot of the element to move must be the same as the destination.
1748 EVT VecVT = V1.getValueType();
1749 EVT EltVT = VecVT.getVectorElementType();
1750 unsigned EltsFromV2 = 0;
1751 unsigned V2EltOffset = 0;
1752 unsigned V2EltIdx0 = 0;
1753 unsigned CurrElt = 0;
1754 unsigned MaxElts = VecVT.getVectorNumElements();
1755 unsigned PrevElt = 0;
1757 bool monotonic = true;
1759 EVT maskVT; // which of the c?d instructions to use
1761 if (EltVT == MVT::i8) {
1763 maskVT = MVT::v16i8;
1764 } else if (EltVT == MVT::i16) {
1766 maskVT = MVT::v8i16;
1767 } else if (EltVT == MVT::i32 || EltVT == MVT::f32) {
1769 maskVT = MVT::v4i32;
1770 } else if (EltVT == MVT::i64 || EltVT == MVT::f64) {
1772 maskVT = MVT::v2i64;
1774 llvm_unreachable("Unhandled vector type in LowerVECTOR_SHUFFLE");
1776 for (unsigned i = 0; i != MaxElts; ++i) {
1777 if (SVN->getMaskElt(i) < 0)
1780 unsigned SrcElt = SVN->getMaskElt(i);
1783 if (SrcElt >= V2EltIdx0) {
1784 // TODO: optimize for the monotonic case when several consecutive
1785 // elements are taken form V2. Do we ever get such a case?
1786 if (EltsFromV2 == 0 && CurrElt == (SrcElt - V2EltIdx0))
1787 V2EltOffset = (SrcElt - V2EltIdx0) * (EltVT.getSizeInBits()/8);
1791 } else if (CurrElt != SrcElt) {
1799 if (PrevElt > 0 && SrcElt < MaxElts) {
1800 if ((PrevElt == SrcElt - 1)
1801 || (PrevElt == MaxElts - 1 && SrcElt == 0)) {
1808 } else if (i == 0) {
1809 // First time through, need to keep track of previous element
1812 // This isn't a rotation, takes elements from vector 2
1818 if (EltsFromV2 == 1 && monotonic) {
1819 // Compute mask and shuffle
1820 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1822 // As SHUFFLE_MASK becomes a c?d instruction, feed it an address
1823 // R1 ($sp) is used here only as it is guaranteed to have last bits zero
1824 SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
1825 DAG.getRegister(SPU::R1, PtrVT),
1826 DAG.getConstant(V2EltOffset, MVT::i32));
1827 SDValue ShufMaskOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl,
1830 // Use shuffle mask in SHUFB synthetic instruction:
1831 return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V2, V1,
1833 } else if (rotate) {
1834 int rotamt = (MaxElts - V0Elt) * EltVT.getSizeInBits()/8;
1836 return DAG.getNode(SPUISD::ROTBYTES_LEFT, dl, V1.getValueType(),
1837 V1, DAG.getConstant(rotamt, MVT::i16));
1839 // Convert the SHUFFLE_VECTOR mask's input element units to the
1841 unsigned BytesPerElement = EltVT.getSizeInBits()/8;
1843 SmallVector<SDValue, 16> ResultMask;
1844 for (unsigned i = 0, e = MaxElts; i != e; ++i) {
1845 unsigned SrcElt = SVN->getMaskElt(i) < 0 ? 0 : SVN->getMaskElt(i);
1847 for (unsigned j = 0; j < BytesPerElement; ++j)
1848 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,MVT::i8));
1850 SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8,
1851 &ResultMask[0], ResultMask.size());
1852 return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V1, V2, VPermMask);
1856 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
1857 SDValue Op0 = Op.getOperand(0); // Op0 = the scalar
1858 DebugLoc dl = Op.getDebugLoc();
1860 if (Op0.getNode()->getOpcode() == ISD::Constant) {
1861 // For a constant, build the appropriate constant vector, which will
1862 // eventually simplify to a vector register load.
1864 ConstantSDNode *CN = cast<ConstantSDNode>(Op0.getNode());
1865 SmallVector<SDValue, 16> ConstVecValues;
1869 // Create a constant vector:
1870 switch (Op.getValueType().getSimpleVT().SimpleTy) {
1871 default: llvm_unreachable("Unexpected constant value type in "
1872 "LowerSCALAR_TO_VECTOR");
1873 case MVT::v16i8: n_copies = 16; VT = MVT::i8; break;
1874 case MVT::v8i16: n_copies = 8; VT = MVT::i16; break;
1875 case MVT::v4i32: n_copies = 4; VT = MVT::i32; break;
1876 case MVT::v4f32: n_copies = 4; VT = MVT::f32; break;
1877 case MVT::v2i64: n_copies = 2; VT = MVT::i64; break;
1878 case MVT::v2f64: n_copies = 2; VT = MVT::f64; break;
1881 SDValue CValue = DAG.getConstant(CN->getZExtValue(), VT);
1882 for (size_t j = 0; j < n_copies; ++j)
1883 ConstVecValues.push_back(CValue);
1885 return DAG.getNode(ISD::BUILD_VECTOR, dl, Op.getValueType(),
1886 &ConstVecValues[0], ConstVecValues.size());
1888 // Otherwise, copy the value from one register to another:
1889 switch (Op0.getValueType().getSimpleVT().SimpleTy) {
1890 default: llvm_unreachable("Unexpected value type in LowerSCALAR_TO_VECTOR");
1897 return DAG.getNode(SPUISD::PREFSLOT2VEC, dl, Op.getValueType(), Op0, Op0);
1904 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
1905 EVT VT = Op.getValueType();
1906 SDValue N = Op.getOperand(0);
1907 SDValue Elt = Op.getOperand(1);
1908 DebugLoc dl = Op.getDebugLoc();
1911 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
1912 // Constant argument:
1913 int EltNo = (int) C->getZExtValue();
1916 if (VT == MVT::i8 && EltNo >= 16)
1917 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i8 extraction slot > 15");
1918 else if (VT == MVT::i16 && EltNo >= 8)
1919 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i16 extraction slot > 7");
1920 else if (VT == MVT::i32 && EltNo >= 4)
1921 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i32 extraction slot > 4");
1922 else if (VT == MVT::i64 && EltNo >= 2)
1923 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i64 extraction slot > 2");
1925 if (EltNo == 0 && (VT == MVT::i32 || VT == MVT::i64)) {
1926 // i32 and i64: Element 0 is the preferred slot
1927 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, N);
1930 // Need to generate shuffle mask and extract:
1931 int prefslot_begin = -1, prefslot_end = -1;
1932 int elt_byte = EltNo * VT.getSizeInBits() / 8;
1934 switch (VT.getSimpleVT().SimpleTy) {
1936 assert(false && "Invalid value type!");
1938 prefslot_begin = prefslot_end = 3;
1942 prefslot_begin = 2; prefslot_end = 3;
1947 prefslot_begin = 0; prefslot_end = 3;
1952 prefslot_begin = 0; prefslot_end = 7;
1957 assert(prefslot_begin != -1 && prefslot_end != -1 &&
1958 "LowerEXTRACT_VECTOR_ELT: preferred slots uninitialized");
1960 unsigned int ShufBytes[16] = {
1961 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1963 for (int i = 0; i < 16; ++i) {
1964 // zero fill uppper part of preferred slot, don't care about the
1966 unsigned int mask_val;
1967 if (i <= prefslot_end) {
1969 ((i < prefslot_begin)
1971 : elt_byte + (i - prefslot_begin));
1973 ShufBytes[i] = mask_val;
1975 ShufBytes[i] = ShufBytes[i % (prefslot_end + 1)];
1978 SDValue ShufMask[4];
1979 for (unsigned i = 0; i < sizeof(ShufMask)/sizeof(ShufMask[0]); ++i) {
1980 unsigned bidx = i * 4;
1981 unsigned int bits = ((ShufBytes[bidx] << 24) |
1982 (ShufBytes[bidx+1] << 16) |
1983 (ShufBytes[bidx+2] << 8) |
1985 ShufMask[i] = DAG.getConstant(bits, MVT::i32);
1988 SDValue ShufMaskVec =
1989 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1990 &ShufMask[0], sizeof(ShufMask)/sizeof(ShufMask[0]));
1992 retval = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
1993 DAG.getNode(SPUISD::SHUFB, dl, N.getValueType(),
1994 N, N, ShufMaskVec));
1996 // Variable index: Rotate the requested element into slot 0, then replicate
1997 // slot 0 across the vector
1998 EVT VecVT = N.getValueType();
1999 if (!VecVT.isSimple() || !VecVT.isVector()) {
2000 report_fatal_error("LowerEXTRACT_VECTOR_ELT: Must have a simple, 128-bit"
2004 // Make life easier by making sure the index is zero-extended to i32
2005 if (Elt.getValueType() != MVT::i32)
2006 Elt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Elt);
2008 // Scale the index to a bit/byte shift quantity
2010 APInt(32, uint64_t(16 / N.getValueType().getVectorNumElements()), false);
2011 unsigned scaleShift = scaleFactor.logBase2();
2014 if (scaleShift > 0) {
2015 // Scale the shift factor:
2016 Elt = DAG.getNode(ISD::SHL, dl, MVT::i32, Elt,
2017 DAG.getConstant(scaleShift, MVT::i32));
2020 vecShift = DAG.getNode(SPUISD::SHLQUAD_L_BYTES, dl, VecVT, N, Elt);
2022 // Replicate the bytes starting at byte 0 across the entire vector (for
2023 // consistency with the notion of a unified register set)
2026 switch (VT.getSimpleVT().SimpleTy) {
2028 report_fatal_error("LowerEXTRACT_VECTOR_ELT(varable): Unhandled vector"
2032 SDValue factor = DAG.getConstant(0x00000000, MVT::i32);
2033 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2034 factor, factor, factor, factor);
2038 SDValue factor = DAG.getConstant(0x00010001, MVT::i32);
2039 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2040 factor, factor, factor, factor);
2045 SDValue factor = DAG.getConstant(0x00010203, MVT::i32);
2046 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2047 factor, factor, factor, factor);
2052 SDValue loFactor = DAG.getConstant(0x00010203, MVT::i32);
2053 SDValue hiFactor = DAG.getConstant(0x04050607, MVT::i32);
2054 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2055 loFactor, hiFactor, loFactor, hiFactor);
2060 retval = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
2061 DAG.getNode(SPUISD::SHUFB, dl, VecVT,
2062 vecShift, vecShift, replicate));
2068 static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
2069 SDValue VecOp = Op.getOperand(0);
2070 SDValue ValOp = Op.getOperand(1);
2071 SDValue IdxOp = Op.getOperand(2);
2072 DebugLoc dl = Op.getDebugLoc();
2073 EVT VT = Op.getValueType();
2075 // use 0 when the lane to insert to is 'undef'
2077 if (IdxOp.getOpcode() != ISD::UNDEF) {
2078 ConstantSDNode *CN = cast<ConstantSDNode>(IdxOp);
2079 assert(CN != 0 && "LowerINSERT_VECTOR_ELT: Index is not constant!");
2080 Idx = (CN->getSExtValue());
2083 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2084 // Use $sp ($1) because it's always 16-byte aligned and it's available:
2085 SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
2086 DAG.getRegister(SPU::R1, PtrVT),
2087 DAG.getConstant(Idx, PtrVT));
2088 // widen the mask when dealing with half vectors
2089 EVT maskVT = EVT::getVectorVT(*(DAG.getContext()), VT.getVectorElementType(),
2090 128/ VT.getVectorElementType().getSizeInBits());
2091 SDValue ShufMask = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, maskVT, Pointer);
2094 DAG.getNode(SPUISD::SHUFB, dl, VT,
2095 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, ValOp),
2097 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4i32, ShufMask));
2102 static SDValue LowerI8Math(SDValue Op, SelectionDAG &DAG, unsigned Opc,
2103 const TargetLowering &TLI)
2105 SDValue N0 = Op.getOperand(0); // Everything has at least one operand
2106 DebugLoc dl = Op.getDebugLoc();
2107 EVT ShiftVT = TLI.getShiftAmountTy();
2109 assert(Op.getValueType() == MVT::i8);
2112 llvm_unreachable("Unhandled i8 math operator");
2116 // 8-bit addition: Promote the arguments up to 16-bits and truncate
2118 SDValue N1 = Op.getOperand(1);
2119 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2120 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2121 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2122 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2127 // 8-bit subtraction: Promote the arguments up to 16-bits and truncate
2129 SDValue N1 = Op.getOperand(1);
2130 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2131 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2132 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2133 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2137 SDValue N1 = Op.getOperand(1);
2138 EVT N1VT = N1.getValueType();
2140 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, N0);
2141 if (!N1VT.bitsEq(ShiftVT)) {
2142 unsigned N1Opc = N1.getValueType().bitsLT(ShiftVT)
2145 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2148 // Replicate lower 8-bits into upper 8:
2150 DAG.getNode(ISD::OR, dl, MVT::i16, N0,
2151 DAG.getNode(ISD::SHL, dl, MVT::i16,
2152 N0, DAG.getConstant(8, MVT::i32)));
2154 // Truncate back down to i8
2155 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2156 DAG.getNode(Opc, dl, MVT::i16, ExpandArg, N1));
2160 SDValue N1 = Op.getOperand(1);
2161 EVT N1VT = N1.getValueType();
2163 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, N0);
2164 if (!N1VT.bitsEq(ShiftVT)) {
2165 unsigned N1Opc = ISD::ZERO_EXTEND;
2167 if (N1.getValueType().bitsGT(ShiftVT))
2168 N1Opc = ISD::TRUNCATE;
2170 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2173 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2174 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2177 SDValue N1 = Op.getOperand(1);
2178 EVT N1VT = N1.getValueType();
2180 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2181 if (!N1VT.bitsEq(ShiftVT)) {
2182 unsigned N1Opc = ISD::SIGN_EXTEND;
2184 if (N1VT.bitsGT(ShiftVT))
2185 N1Opc = ISD::TRUNCATE;
2186 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2189 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2190 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2193 SDValue N1 = Op.getOperand(1);
2195 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2196 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2197 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2198 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2206 //! Lower byte immediate operations for v16i8 vectors:
2208 LowerByteImmed(SDValue Op, SelectionDAG &DAG) {
2211 EVT VT = Op.getValueType();
2212 DebugLoc dl = Op.getDebugLoc();
2214 ConstVec = Op.getOperand(0);
2215 Arg = Op.getOperand(1);
2216 if (ConstVec.getNode()->getOpcode() != ISD::BUILD_VECTOR) {
2217 if (ConstVec.getNode()->getOpcode() == ISD::BIT_CONVERT) {
2218 ConstVec = ConstVec.getOperand(0);
2220 ConstVec = Op.getOperand(1);
2221 Arg = Op.getOperand(0);
2222 if (ConstVec.getNode()->getOpcode() == ISD::BIT_CONVERT) {
2223 ConstVec = ConstVec.getOperand(0);
2228 if (ConstVec.getNode()->getOpcode() == ISD::BUILD_VECTOR) {
2229 BuildVectorSDNode *BCN = dyn_cast<BuildVectorSDNode>(ConstVec.getNode());
2230 assert(BCN != 0 && "Expected BuildVectorSDNode in SPU LowerByteImmed");
2232 APInt APSplatBits, APSplatUndef;
2233 unsigned SplatBitSize;
2235 unsigned minSplatBits = VT.getVectorElementType().getSizeInBits();
2237 if (BCN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
2238 HasAnyUndefs, minSplatBits)
2239 && minSplatBits <= SplatBitSize) {
2240 uint64_t SplatBits = APSplatBits.getZExtValue();
2241 SDValue tc = DAG.getTargetConstant(SplatBits & 0xff, MVT::i8);
2243 SmallVector<SDValue, 16> tcVec;
2244 tcVec.assign(16, tc);
2245 return DAG.getNode(Op.getNode()->getOpcode(), dl, VT, Arg,
2246 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &tcVec[0], tcVec.size()));
2250 // These operations (AND, OR, XOR) are legal, they just couldn't be custom
2251 // lowered. Return the operation, rather than a null SDValue.
2255 //! Custom lowering for CTPOP (count population)
2257 Custom lowering code that counts the number ones in the input
2258 operand. SPU has such an instruction, but it counts the number of
2259 ones per byte, which then have to be accumulated.
2261 static SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) {
2262 EVT VT = Op.getValueType();
2263 EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
2264 VT, (128 / VT.getSizeInBits()));
2265 DebugLoc dl = Op.getDebugLoc();
2267 switch (VT.getSimpleVT().SimpleTy) {
2269 assert(false && "Invalid value type!");
2271 SDValue N = Op.getOperand(0);
2272 SDValue Elt0 = DAG.getConstant(0, MVT::i32);
2274 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2275 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2277 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i8, CNTB, Elt0);
2281 MachineFunction &MF = DAG.getMachineFunction();
2282 MachineRegisterInfo &RegInfo = MF.getRegInfo();
2284 unsigned CNTB_reg = RegInfo.createVirtualRegister(&SPU::R16CRegClass);
2286 SDValue N = Op.getOperand(0);
2287 SDValue Elt0 = DAG.getConstant(0, MVT::i16);
2288 SDValue Mask0 = DAG.getConstant(0x0f, MVT::i16);
2289 SDValue Shift1 = DAG.getConstant(8, MVT::i32);
2291 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2292 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2294 // CNTB_result becomes the chain to which all of the virtual registers
2295 // CNTB_reg, SUM1_reg become associated:
2296 SDValue CNTB_result =
2297 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, CNTB, Elt0);
2299 SDValue CNTB_rescopy =
2300 DAG.getCopyToReg(CNTB_result, dl, CNTB_reg, CNTB_result);
2302 SDValue Tmp1 = DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i16);
2304 return DAG.getNode(ISD::AND, dl, MVT::i16,
2305 DAG.getNode(ISD::ADD, dl, MVT::i16,
2306 DAG.getNode(ISD::SRL, dl, MVT::i16,
2313 MachineFunction &MF = DAG.getMachineFunction();
2314 MachineRegisterInfo &RegInfo = MF.getRegInfo();
2316 unsigned CNTB_reg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
2317 unsigned SUM1_reg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
2319 SDValue N = Op.getOperand(0);
2320 SDValue Elt0 = DAG.getConstant(0, MVT::i32);
2321 SDValue Mask0 = DAG.getConstant(0xff, MVT::i32);
2322 SDValue Shift1 = DAG.getConstant(16, MVT::i32);
2323 SDValue Shift2 = DAG.getConstant(8, MVT::i32);
2325 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2326 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2328 // CNTB_result becomes the chain to which all of the virtual registers
2329 // CNTB_reg, SUM1_reg become associated:
2330 SDValue CNTB_result =
2331 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, CNTB, Elt0);
2333 SDValue CNTB_rescopy =
2334 DAG.getCopyToReg(CNTB_result, dl, CNTB_reg, CNTB_result);
2337 DAG.getNode(ISD::SRL, dl, MVT::i32,
2338 DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i32),
2342 DAG.getNode(ISD::ADD, dl, MVT::i32, Comp1,
2343 DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i32));
2345 SDValue Sum1_rescopy =
2346 DAG.getCopyToReg(CNTB_result, dl, SUM1_reg, Sum1);
2349 DAG.getNode(ISD::SRL, dl, MVT::i32,
2350 DAG.getCopyFromReg(Sum1_rescopy, dl, SUM1_reg, MVT::i32),
2353 DAG.getNode(ISD::ADD, dl, MVT::i32, Comp2,
2354 DAG.getCopyFromReg(Sum1_rescopy, dl, SUM1_reg, MVT::i32));
2356 return DAG.getNode(ISD::AND, dl, MVT::i32, Sum2, Mask0);
2366 //! Lower ISD::FP_TO_SINT, ISD::FP_TO_UINT for i32
2368 f32->i32 passes through unchanged, whereas f64->i32 expands to a libcall.
2369 All conversions to i64 are expanded to a libcall.
2371 static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
2372 const SPUTargetLowering &TLI) {
2373 EVT OpVT = Op.getValueType();
2374 SDValue Op0 = Op.getOperand(0);
2375 EVT Op0VT = Op0.getValueType();
2377 if ((OpVT == MVT::i32 && Op0VT == MVT::f64)
2378 || OpVT == MVT::i64) {
2379 // Convert f32 / f64 to i32 / i64 via libcall.
2381 (Op.getOpcode() == ISD::FP_TO_SINT)
2382 ? RTLIB::getFPTOSINT(Op0VT, OpVT)
2383 : RTLIB::getFPTOUINT(Op0VT, OpVT);
2384 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpectd fp-to-int conversion!");
2386 return ExpandLibCall(LC, Op, DAG, false, Dummy, TLI);
2392 //! Lower ISD::SINT_TO_FP, ISD::UINT_TO_FP for i32
2394 i32->f32 passes through unchanged, whereas i32->f64 is expanded to a libcall.
2395 All conversions from i64 are expanded to a libcall.
2397 static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2398 const SPUTargetLowering &TLI) {
2399 EVT OpVT = Op.getValueType();
2400 SDValue Op0 = Op.getOperand(0);
2401 EVT Op0VT = Op0.getValueType();
2403 if ((OpVT == MVT::f64 && Op0VT == MVT::i32)
2404 || Op0VT == MVT::i64) {
2405 // Convert i32, i64 to f64 via libcall:
2407 (Op.getOpcode() == ISD::SINT_TO_FP)
2408 ? RTLIB::getSINTTOFP(Op0VT, OpVT)
2409 : RTLIB::getUINTTOFP(Op0VT, OpVT);
2410 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpectd int-to-fp conversion!");
2412 return ExpandLibCall(LC, Op, DAG, false, Dummy, TLI);
2418 //! Lower ISD::SETCC
2420 This handles MVT::f64 (double floating point) condition lowering
2422 static SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG,
2423 const TargetLowering &TLI) {
2424 CondCodeSDNode *CC = dyn_cast<CondCodeSDNode>(Op.getOperand(2));
2425 DebugLoc dl = Op.getDebugLoc();
2426 assert(CC != 0 && "LowerSETCC: CondCodeSDNode should not be null here!\n");
2428 SDValue lhs = Op.getOperand(0);
2429 SDValue rhs = Op.getOperand(1);
2430 EVT lhsVT = lhs.getValueType();
2431 assert(lhsVT == MVT::f64 && "LowerSETCC: type other than MVT::64\n");
2433 EVT ccResultVT = TLI.getSetCCResultType(lhs.getValueType());
2434 APInt ccResultOnes = APInt::getAllOnesValue(ccResultVT.getSizeInBits());
2435 EVT IntVT(MVT::i64);
2437 // Take advantage of the fact that (truncate (sra arg, 32)) is efficiently
2438 // selected to a NOP:
2439 SDValue i64lhs = DAG.getNode(ISD::BIT_CONVERT, dl, IntVT, lhs);
2441 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
2442 DAG.getNode(ISD::SRL, dl, IntVT,
2443 i64lhs, DAG.getConstant(32, MVT::i32)));
2444 SDValue lhsHi32abs =
2445 DAG.getNode(ISD::AND, dl, MVT::i32,
2446 lhsHi32, DAG.getConstant(0x7fffffff, MVT::i32));
2448 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, i64lhs);
2450 // SETO and SETUO only use the lhs operand:
2451 if (CC->get() == ISD::SETO) {
2452 // Evaluates to true if Op0 is not [SQ]NaN - lowers to the inverse of
2454 APInt ccResultAllOnes = APInt::getAllOnesValue(ccResultVT.getSizeInBits());
2455 return DAG.getNode(ISD::XOR, dl, ccResultVT,
2456 DAG.getSetCC(dl, ccResultVT,
2457 lhs, DAG.getConstantFP(0.0, lhsVT),
2459 DAG.getConstant(ccResultAllOnes, ccResultVT));
2460 } else if (CC->get() == ISD::SETUO) {
2461 // Evaluates to true if Op0 is [SQ]NaN
2462 return DAG.getNode(ISD::AND, dl, ccResultVT,
2463 DAG.getSetCC(dl, ccResultVT,
2465 DAG.getConstant(0x7ff00000, MVT::i32),
2467 DAG.getSetCC(dl, ccResultVT,
2469 DAG.getConstant(0, MVT::i32),
2473 SDValue i64rhs = DAG.getNode(ISD::BIT_CONVERT, dl, IntVT, rhs);
2475 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
2476 DAG.getNode(ISD::SRL, dl, IntVT,
2477 i64rhs, DAG.getConstant(32, MVT::i32)));
2479 // If a value is negative, subtract from the sign magnitude constant:
2480 SDValue signMag2TC = DAG.getConstant(0x8000000000000000ULL, IntVT);
2482 // Convert the sign-magnitude representation into 2's complement:
2483 SDValue lhsSelectMask = DAG.getNode(ISD::SRA, dl, ccResultVT,
2484 lhsHi32, DAG.getConstant(31, MVT::i32));
2485 SDValue lhsSignMag2TC = DAG.getNode(ISD::SUB, dl, IntVT, signMag2TC, i64lhs);
2487 DAG.getNode(ISD::SELECT, dl, IntVT,
2488 lhsSelectMask, lhsSignMag2TC, i64lhs);
2490 SDValue rhsSelectMask = DAG.getNode(ISD::SRA, dl, ccResultVT,
2491 rhsHi32, DAG.getConstant(31, MVT::i32));
2492 SDValue rhsSignMag2TC = DAG.getNode(ISD::SUB, dl, IntVT, signMag2TC, i64rhs);
2494 DAG.getNode(ISD::SELECT, dl, IntVT,
2495 rhsSelectMask, rhsSignMag2TC, i64rhs);
2499 switch (CC->get()) {
2502 compareOp = ISD::SETEQ; break;
2505 compareOp = ISD::SETGT; break;
2508 compareOp = ISD::SETGE; break;
2511 compareOp = ISD::SETLT; break;
2514 compareOp = ISD::SETLE; break;
2517 compareOp = ISD::SETNE; break;
2519 report_fatal_error("CellSPU ISel Select: unimplemented f64 condition");
2523 DAG.getSetCC(dl, ccResultVT, lhsSelect, rhsSelect,
2524 (ISD::CondCode) compareOp);
2526 if ((CC->get() & 0x8) == 0) {
2527 // Ordered comparison:
2528 SDValue lhsNaN = DAG.getSetCC(dl, ccResultVT,
2529 lhs, DAG.getConstantFP(0.0, MVT::f64),
2531 SDValue rhsNaN = DAG.getSetCC(dl, ccResultVT,
2532 rhs, DAG.getConstantFP(0.0, MVT::f64),
2534 SDValue ordered = DAG.getNode(ISD::AND, dl, ccResultVT, lhsNaN, rhsNaN);
2536 result = DAG.getNode(ISD::AND, dl, ccResultVT, ordered, result);
2542 //! Lower ISD::SELECT_CC
2544 ISD::SELECT_CC can (generally) be implemented directly on the SPU using the
2547 \note Need to revisit this in the future: if the code path through the true
2548 and false value computations is longer than the latency of a branch (6
2549 cycles), then it would be more advantageous to branch and insert a new basic
2550 block and branch on the condition. However, this code does not make that
2551 assumption, given the simplisitc uses so far.
2554 static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
2555 const TargetLowering &TLI) {
2556 EVT VT = Op.getValueType();
2557 SDValue lhs = Op.getOperand(0);
2558 SDValue rhs = Op.getOperand(1);
2559 SDValue trueval = Op.getOperand(2);
2560 SDValue falseval = Op.getOperand(3);
2561 SDValue condition = Op.getOperand(4);
2562 DebugLoc dl = Op.getDebugLoc();
2564 // NOTE: SELB's arguments: $rA, $rB, $mask
2566 // SELB selects bits from $rA where bits in $mask are 0, bits from $rB
2567 // where bits in $mask are 1. CCond will be inverted, having 1s where the
2568 // condition was true and 0s where the condition was false. Hence, the
2569 // arguments to SELB get reversed.
2571 // Note: Really should be ISD::SELECT instead of SPUISD::SELB, but LLVM's
2572 // legalizer insists on combining SETCC/SELECT into SELECT_CC, so we end up
2573 // with another "cannot select select_cc" assert:
2575 SDValue compare = DAG.getNode(ISD::SETCC, dl,
2576 TLI.getSetCCResultType(Op.getValueType()),
2577 lhs, rhs, condition);
2578 return DAG.getNode(SPUISD::SELB, dl, VT, falseval, trueval, compare);
2581 //! Custom lower ISD::TRUNCATE
2582 static SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG)
2584 // Type to truncate to
2585 EVT VT = Op.getValueType();
2586 MVT simpleVT = VT.getSimpleVT();
2587 EVT VecVT = EVT::getVectorVT(*DAG.getContext(),
2588 VT, (128 / VT.getSizeInBits()));
2589 DebugLoc dl = Op.getDebugLoc();
2591 // Type to truncate from
2592 SDValue Op0 = Op.getOperand(0);
2593 EVT Op0VT = Op0.getValueType();
2595 if (Op0VT.getSimpleVT() == MVT::i128 && simpleVT == MVT::i64) {
2596 // Create shuffle mask, least significant doubleword of quadword
2597 unsigned maskHigh = 0x08090a0b;
2598 unsigned maskLow = 0x0c0d0e0f;
2599 // Use a shuffle to perform the truncation
2600 SDValue shufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2601 DAG.getConstant(maskHigh, MVT::i32),
2602 DAG.getConstant(maskLow, MVT::i32),
2603 DAG.getConstant(maskHigh, MVT::i32),
2604 DAG.getConstant(maskLow, MVT::i32));
2606 SDValue truncShuffle = DAG.getNode(SPUISD::SHUFB, dl, VecVT,
2607 Op0, Op0, shufMask);
2609 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, truncShuffle);
2612 return SDValue(); // Leave the truncate unmolested
2616 * Emit the instruction sequence for i64/i32 -> i128 sign extend. The basic
2617 * algorithm is to duplicate the sign bit using rotmai to generate at
2618 * least one byte full of sign bits. Then propagate the "sign-byte" into
2619 * the leftmost words and the i64/i32 into the rightmost words using shufb.
2621 * @param Op The sext operand
2622 * @param DAG The current DAG
2623 * @return The SDValue with the entire instruction sequence
2625 static SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG)
2627 DebugLoc dl = Op.getDebugLoc();
2629 // Type to extend to
2630 MVT OpVT = Op.getValueType().getSimpleVT();
2632 // Type to extend from
2633 SDValue Op0 = Op.getOperand(0);
2634 MVT Op0VT = Op0.getValueType().getSimpleVT();
2636 // The type to extend to needs to be a i128 and
2637 // the type to extend from needs to be i64 or i32.
2638 assert((OpVT == MVT::i128 && (Op0VT == MVT::i64 || Op0VT == MVT::i32)) &&
2639 "LowerSIGN_EXTEND: input and/or output operand have wrong size");
2641 // Create shuffle mask
2642 unsigned mask1 = 0x10101010; // byte 0 - 3 and 4 - 7
2643 unsigned mask2 = Op0VT == MVT::i64 ? 0x00010203 : 0x10101010; // byte 8 - 11
2644 unsigned mask3 = Op0VT == MVT::i64 ? 0x04050607 : 0x00010203; // byte 12 - 15
2645 SDValue shufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2646 DAG.getConstant(mask1, MVT::i32),
2647 DAG.getConstant(mask1, MVT::i32),
2648 DAG.getConstant(mask2, MVT::i32),
2649 DAG.getConstant(mask3, MVT::i32));
2651 // Word wise arithmetic right shift to generate at least one byte
2652 // that contains sign bits.
2653 MVT mvt = Op0VT == MVT::i64 ? MVT::v2i64 : MVT::v4i32;
2654 SDValue sraVal = DAG.getNode(ISD::SRA,
2657 DAG.getNode(SPUISD::PREFSLOT2VEC, dl, mvt, Op0, Op0),
2658 DAG.getConstant(31, MVT::i32));
2660 // Shuffle bytes - Copy the sign bits into the upper 64 bits
2661 // and the input value into the lower 64 bits.
2662 SDValue extShuffle = DAG.getNode(SPUISD::SHUFB, dl, mvt,
2663 DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i128, Op0), sraVal, shufMask);
2665 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, extShuffle);
2668 //! Custom (target-specific) lowering entry point
2670 This is where LLVM's DAG selection process calls to do target-specific
2674 SPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
2676 unsigned Opc = (unsigned) Op.getOpcode();
2677 EVT VT = Op.getValueType();
2682 errs() << "SPUTargetLowering::LowerOperation(): need to lower this!\n";
2683 errs() << "Op.getOpcode() = " << Opc << "\n";
2684 errs() << "*Op.getNode():\n";
2685 Op.getNode()->dump();
2687 llvm_unreachable(0);
2693 return LowerLOAD(Op, DAG, SPUTM.getSubtargetImpl());
2695 return LowerSTORE(Op, DAG, SPUTM.getSubtargetImpl());
2696 case ISD::ConstantPool:
2697 return LowerConstantPool(Op, DAG, SPUTM.getSubtargetImpl());
2698 case ISD::GlobalAddress:
2699 return LowerGlobalAddress(Op, DAG, SPUTM.getSubtargetImpl());
2700 case ISD::JumpTable:
2701 return LowerJumpTable(Op, DAG, SPUTM.getSubtargetImpl());
2702 case ISD::ConstantFP:
2703 return LowerConstantFP(Op, DAG);
2705 // i8, i64 math ops:
2714 return LowerI8Math(Op, DAG, Opc, *this);
2718 case ISD::FP_TO_SINT:
2719 case ISD::FP_TO_UINT:
2720 return LowerFP_TO_INT(Op, DAG, *this);
2722 case ISD::SINT_TO_FP:
2723 case ISD::UINT_TO_FP:
2724 return LowerINT_TO_FP(Op, DAG, *this);
2726 // Vector-related lowering.
2727 case ISD::BUILD_VECTOR:
2728 return LowerBUILD_VECTOR(Op, DAG);
2729 case ISD::SCALAR_TO_VECTOR:
2730 return LowerSCALAR_TO_VECTOR(Op, DAG);
2731 case ISD::VECTOR_SHUFFLE:
2732 return LowerVECTOR_SHUFFLE(Op, DAG);
2733 case ISD::EXTRACT_VECTOR_ELT:
2734 return LowerEXTRACT_VECTOR_ELT(Op, DAG);
2735 case ISD::INSERT_VECTOR_ELT:
2736 return LowerINSERT_VECTOR_ELT(Op, DAG);
2738 // Look for ANDBI, ORBI and XORBI opportunities and lower appropriately:
2742 return LowerByteImmed(Op, DAG);
2744 // Vector and i8 multiply:
2747 return LowerI8Math(Op, DAG, Opc, *this);
2750 return LowerCTPOP(Op, DAG);
2752 case ISD::SELECT_CC:
2753 return LowerSELECT_CC(Op, DAG, *this);
2756 return LowerSETCC(Op, DAG, *this);
2759 return LowerTRUNCATE(Op, DAG);
2761 case ISD::SIGN_EXTEND:
2762 return LowerSIGN_EXTEND(Op, DAG);
2768 void SPUTargetLowering::ReplaceNodeResults(SDNode *N,
2769 SmallVectorImpl<SDValue>&Results,
2770 SelectionDAG &DAG) const
2773 unsigned Opc = (unsigned) N->getOpcode();
2774 EVT OpVT = N->getValueType(0);
2778 errs() << "SPUTargetLowering::ReplaceNodeResults(): need to fix this!\n";
2779 errs() << "Op.getOpcode() = " << Opc << "\n";
2780 errs() << "*Op.getNode():\n";
2788 /* Otherwise, return unchanged */
2791 //===----------------------------------------------------------------------===//
2792 // Target Optimization Hooks
2793 //===----------------------------------------------------------------------===//
2796 SPUTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
2799 TargetMachine &TM = getTargetMachine();
2801 const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
2802 SelectionDAG &DAG = DCI.DAG;
2803 SDValue Op0 = N->getOperand(0); // everything has at least one operand
2804 EVT NodeVT = N->getValueType(0); // The node's value type
2805 EVT Op0VT = Op0.getValueType(); // The first operand's result
2806 SDValue Result; // Initially, empty result
2807 DebugLoc dl = N->getDebugLoc();
2809 switch (N->getOpcode()) {
2812 SDValue Op1 = N->getOperand(1);
2814 if (Op0.getOpcode() == SPUISD::IndirectAddr
2815 || Op1.getOpcode() == SPUISD::IndirectAddr) {
2816 // Normalize the operands to reduce repeated code
2817 SDValue IndirectArg = Op0, AddArg = Op1;
2819 if (Op1.getOpcode() == SPUISD::IndirectAddr) {
2824 if (isa<ConstantSDNode>(AddArg)) {
2825 ConstantSDNode *CN0 = cast<ConstantSDNode > (AddArg);
2826 SDValue IndOp1 = IndirectArg.getOperand(1);
2828 if (CN0->isNullValue()) {
2829 // (add (SPUindirect <arg>, <arg>), 0) ->
2830 // (SPUindirect <arg>, <arg>)
2832 #if !defined(NDEBUG)
2833 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2835 << "Replace: (add (SPUindirect <arg>, <arg>), 0)\n"
2836 << "With: (SPUindirect <arg>, <arg>)\n";
2841 } else if (isa<ConstantSDNode>(IndOp1)) {
2842 // (add (SPUindirect <arg>, <const>), <const>) ->
2843 // (SPUindirect <arg>, <const + const>)
2844 ConstantSDNode *CN1 = cast<ConstantSDNode > (IndOp1);
2845 int64_t combinedConst = CN0->getSExtValue() + CN1->getSExtValue();
2846 SDValue combinedValue = DAG.getConstant(combinedConst, Op0VT);
2848 #if !defined(NDEBUG)
2849 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2851 << "Replace: (add (SPUindirect <arg>, " << CN1->getSExtValue()
2852 << "), " << CN0->getSExtValue() << ")\n"
2853 << "With: (SPUindirect <arg>, "
2854 << combinedConst << ")\n";
2858 return DAG.getNode(SPUISD::IndirectAddr, dl, Op0VT,
2859 IndirectArg, combinedValue);
2865 case ISD::SIGN_EXTEND:
2866 case ISD::ZERO_EXTEND:
2867 case ISD::ANY_EXTEND: {
2868 if (Op0.getOpcode() == SPUISD::VEC2PREFSLOT && NodeVT == Op0VT) {
2869 // (any_extend (SPUextract_elt0 <arg>)) ->
2870 // (SPUextract_elt0 <arg>)
2871 // Types must match, however...
2872 #if !defined(NDEBUG)
2873 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2874 errs() << "\nReplace: ";
2876 errs() << "\nWith: ";
2877 Op0.getNode()->dump(&DAG);
2886 case SPUISD::IndirectAddr: {
2887 if (!ST->usingLargeMem() && Op0.getOpcode() == SPUISD::AFormAddr) {
2888 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1));
2889 if (CN != 0 && CN->isNullValue()) {
2890 // (SPUindirect (SPUaform <addr>, 0), 0) ->
2891 // (SPUaform <addr>, 0)
2893 DEBUG(errs() << "Replace: ");
2894 DEBUG(N->dump(&DAG));
2895 DEBUG(errs() << "\nWith: ");
2896 DEBUG(Op0.getNode()->dump(&DAG));
2897 DEBUG(errs() << "\n");
2901 } else if (Op0.getOpcode() == ISD::ADD) {
2902 SDValue Op1 = N->getOperand(1);
2903 if (ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(Op1)) {
2904 // (SPUindirect (add <arg>, <arg>), 0) ->
2905 // (SPUindirect <arg>, <arg>)
2906 if (CN1->isNullValue()) {
2908 #if !defined(NDEBUG)
2909 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2911 << "Replace: (SPUindirect (add <arg>, <arg>), 0)\n"
2912 << "With: (SPUindirect <arg>, <arg>)\n";
2916 return DAG.getNode(SPUISD::IndirectAddr, dl, Op0VT,
2917 Op0.getOperand(0), Op0.getOperand(1));
2923 case SPUISD::SHLQUAD_L_BITS:
2924 case SPUISD::SHLQUAD_L_BYTES:
2925 case SPUISD::ROTBYTES_LEFT: {
2926 SDValue Op1 = N->getOperand(1);
2928 // Kill degenerate vector shifts:
2929 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
2930 if (CN->isNullValue()) {
2936 case SPUISD::PREFSLOT2VEC: {
2937 switch (Op0.getOpcode()) {
2940 case ISD::ANY_EXTEND:
2941 case ISD::ZERO_EXTEND:
2942 case ISD::SIGN_EXTEND: {
2943 // (SPUprefslot2vec (any|zero|sign_extend (SPUvec2prefslot <arg>))) ->
2945 // but only if the SPUprefslot2vec and <arg> types match.
2946 SDValue Op00 = Op0.getOperand(0);
2947 if (Op00.getOpcode() == SPUISD::VEC2PREFSLOT) {
2948 SDValue Op000 = Op00.getOperand(0);
2949 if (Op000.getValueType() == NodeVT) {
2955 case SPUISD::VEC2PREFSLOT: {
2956 // (SPUprefslot2vec (SPUvec2prefslot <arg>)) ->
2958 Result = Op0.getOperand(0);
2966 // Otherwise, return unchanged.
2968 if (Result.getNode()) {
2969 DEBUG(errs() << "\nReplace.SPU: ");
2970 DEBUG(N->dump(&DAG));
2971 DEBUG(errs() << "\nWith: ");
2972 DEBUG(Result.getNode()->dump(&DAG));
2973 DEBUG(errs() << "\n");
2980 //===----------------------------------------------------------------------===//
2981 // Inline Assembly Support
2982 //===----------------------------------------------------------------------===//
2984 /// getConstraintType - Given a constraint letter, return the type of
2985 /// constraint it is for this target.
2986 SPUTargetLowering::ConstraintType
2987 SPUTargetLowering::getConstraintType(const std::string &ConstraintLetter) const {
2988 if (ConstraintLetter.size() == 1) {
2989 switch (ConstraintLetter[0]) {
2996 return C_RegisterClass;
2999 return TargetLowering::getConstraintType(ConstraintLetter);
3002 std::pair<unsigned, const TargetRegisterClass*>
3003 SPUTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
3006 if (Constraint.size() == 1) {
3007 // GCC RS6000 Constraint Letters
3008 switch (Constraint[0]) {
3012 return std::make_pair(0U, SPU::R64CRegisterClass);
3013 return std::make_pair(0U, SPU::R32CRegisterClass);
3016 return std::make_pair(0U, SPU::R32FPRegisterClass);
3017 else if (VT == MVT::f64)
3018 return std::make_pair(0U, SPU::R64FPRegisterClass);
3021 return std::make_pair(0U, SPU::GPRCRegisterClass);
3025 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
3028 //! Compute used/known bits for a SPU operand
3030 SPUTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
3034 const SelectionDAG &DAG,
3035 unsigned Depth ) const {
3037 const uint64_t uint64_sizebits = sizeof(uint64_t) * CHAR_BIT;
3039 switch (Op.getOpcode()) {
3041 // KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
3047 case SPUISD::PREFSLOT2VEC:
3048 case SPUISD::LDRESULT:
3049 case SPUISD::VEC2PREFSLOT:
3050 case SPUISD::SHLQUAD_L_BITS:
3051 case SPUISD::SHLQUAD_L_BYTES:
3052 case SPUISD::VEC_ROTL:
3053 case SPUISD::VEC_ROTR:
3054 case SPUISD::ROTBYTES_LEFT:
3055 case SPUISD::SELECT_MASK:
3062 SPUTargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
3063 unsigned Depth) const {
3064 switch (Op.getOpcode()) {
3069 EVT VT = Op.getValueType();
3071 if (VT != MVT::i8 && VT != MVT::i16 && VT != MVT::i32) {
3074 return VT.getSizeInBits();
3079 // LowerAsmOperandForConstraint
3081 SPUTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
3082 char ConstraintLetter,
3083 std::vector<SDValue> &Ops,
3084 SelectionDAG &DAG) const {
3085 // Default, for the time being, to the base class handler
3086 TargetLowering::LowerAsmOperandForConstraint(Op, ConstraintLetter, Ops, DAG);
3089 /// isLegalAddressImmediate - Return true if the integer value can be used
3090 /// as the offset of the target addressing mode.
3091 bool SPUTargetLowering::isLegalAddressImmediate(int64_t V,
3092 const Type *Ty) const {
3093 // SPU's addresses are 256K:
3094 return (V > -(1 << 18) && V < (1 << 18) - 1);
3097 bool SPUTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const {
3102 SPUTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
3103 // The SPU target isn't yet aware of offsets.