2 //===-- SPUISelLowering.cpp - Cell SPU DAG Lowering Implementation --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the SPUTargetLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "SPURegisterNames.h"
15 #include "SPUISelLowering.h"
16 #include "SPUTargetMachine.h"
17 #include "SPUFrameInfo.h"
18 #include "SPUMachineFunction.h"
19 #include "llvm/Constants.h"
20 #include "llvm/Function.h"
21 #include "llvm/Intrinsics.h"
22 #include "llvm/CallingConv.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/SelectionDAG.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/Target/TargetOptions.h"
31 #include "llvm/ADT/VectorExtras.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/MathExtras.h"
35 #include "llvm/Support/raw_ostream.h"
40 // Used in getTargetNodeName() below
42 std::map<unsigned, const char *> node_names;
44 //! EVT mapping to useful data for Cell SPU
45 struct valtype_map_s {
50 const valtype_map_s valtype_map[] = {
61 const size_t n_valtype_map = sizeof(valtype_map) / sizeof(valtype_map[0]);
63 const valtype_map_s *getValueTypeMapEntry(EVT VT) {
64 const valtype_map_s *retval = 0;
66 for (size_t i = 0; i < n_valtype_map; ++i) {
67 if (valtype_map[i].valtype == VT) {
68 retval = valtype_map + i;
75 report_fatal_error("getValueTypeMapEntry returns NULL for " +
76 Twine(VT.getEVTString()));
83 //! Expand a library call into an actual call DAG node
86 This code is taken from SelectionDAGLegalize, since it is not exposed as
87 part of the LLVM SelectionDAG API.
91 ExpandLibCall(RTLIB::Libcall LC, SDValue Op, SelectionDAG &DAG,
92 bool isSigned, SDValue &Hi, const SPUTargetLowering &TLI) {
93 // The input chain to this libcall is the entry node of the function.
94 // Legalizing the call will automatically add the previous call to the
96 SDValue InChain = DAG.getEntryNode();
98 TargetLowering::ArgListTy Args;
99 TargetLowering::ArgListEntry Entry;
100 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
101 EVT ArgVT = Op.getOperand(i).getValueType();
102 const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
103 Entry.Node = Op.getOperand(i);
105 Entry.isSExt = isSigned;
106 Entry.isZExt = !isSigned;
107 Args.push_back(Entry);
109 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
112 // Splice the libcall in wherever FindInputOutputChains tells us to.
114 Op.getNode()->getValueType(0).getTypeForEVT(*DAG.getContext());
115 std::pair<SDValue, SDValue> CallInfo =
116 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
117 0, TLI.getLibcallCallingConv(LC), false,
118 /*isReturnValueUsed=*/true,
119 Callee, Args, DAG, Op.getDebugLoc());
121 return CallInfo.first;
125 SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
126 : TargetLowering(TM, new TargetLoweringObjectFileELF()),
128 // Fold away setcc operations if possible.
131 // Use _setjmp/_longjmp instead of setjmp/longjmp.
132 setUseUnderscoreSetJmp(true);
133 setUseUnderscoreLongJmp(true);
135 // Set RTLIB libcall names as used by SPU:
136 setLibcallName(RTLIB::DIV_F64, "__fast_divdf3");
138 // Set up the SPU's register classes:
139 addRegisterClass(MVT::i8, SPU::R8CRegisterClass);
140 addRegisterClass(MVT::i16, SPU::R16CRegisterClass);
141 addRegisterClass(MVT::i32, SPU::R32CRegisterClass);
142 addRegisterClass(MVT::i64, SPU::R64CRegisterClass);
143 addRegisterClass(MVT::f32, SPU::R32FPRegisterClass);
144 addRegisterClass(MVT::f64, SPU::R64FPRegisterClass);
145 addRegisterClass(MVT::i128, SPU::GPRCRegisterClass);
147 // SPU has no sign or zero extended loads for i1, i8, i16:
148 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
149 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
150 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
152 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
153 setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand);
155 setTruncStoreAction(MVT::i128, MVT::i64, Expand);
156 setTruncStoreAction(MVT::i128, MVT::i32, Expand);
157 setTruncStoreAction(MVT::i128, MVT::i16, Expand);
158 setTruncStoreAction(MVT::i128, MVT::i8, Expand);
160 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
162 // SPU constant load actions are custom lowered:
163 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
164 setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
166 // SPU's loads and stores have to be custom lowered:
167 for (unsigned sctype = (unsigned) MVT::i8; sctype < (unsigned) MVT::i128;
169 MVT::SimpleValueType VT = (MVT::SimpleValueType)sctype;
171 setOperationAction(ISD::LOAD, VT, Custom);
172 setOperationAction(ISD::STORE, VT, Custom);
173 setLoadExtAction(ISD::EXTLOAD, VT, Custom);
174 setLoadExtAction(ISD::ZEXTLOAD, VT, Custom);
175 setLoadExtAction(ISD::SEXTLOAD, VT, Custom);
177 for (unsigned stype = sctype - 1; stype >= (unsigned) MVT::i8; --stype) {
178 MVT::SimpleValueType StoreVT = (MVT::SimpleValueType) stype;
179 setTruncStoreAction(VT, StoreVT, Expand);
183 for (unsigned sctype = (unsigned) MVT::f32; sctype < (unsigned) MVT::f64;
185 MVT::SimpleValueType VT = (MVT::SimpleValueType) sctype;
187 setOperationAction(ISD::LOAD, VT, Custom);
188 setOperationAction(ISD::STORE, VT, Custom);
190 for (unsigned stype = sctype - 1; stype >= (unsigned) MVT::f32; --stype) {
191 MVT::SimpleValueType StoreVT = (MVT::SimpleValueType) stype;
192 setTruncStoreAction(VT, StoreVT, Expand);
196 // Expand the jumptable branches
197 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
198 setOperationAction(ISD::BR_CC, MVT::Other, Expand);
200 // Custom lower SELECT_CC for most cases, but expand by default
201 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
202 setOperationAction(ISD::SELECT_CC, MVT::i8, Custom);
203 setOperationAction(ISD::SELECT_CC, MVT::i16, Custom);
204 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
205 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
207 // SPU has no intrinsics for these particular operations:
208 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
210 // SPU has no division/remainder instructions
211 setOperationAction(ISD::SREM, MVT::i8, Expand);
212 setOperationAction(ISD::UREM, MVT::i8, Expand);
213 setOperationAction(ISD::SDIV, MVT::i8, Expand);
214 setOperationAction(ISD::UDIV, MVT::i8, Expand);
215 setOperationAction(ISD::SDIVREM, MVT::i8, Expand);
216 setOperationAction(ISD::UDIVREM, MVT::i8, Expand);
217 setOperationAction(ISD::SREM, MVT::i16, Expand);
218 setOperationAction(ISD::UREM, MVT::i16, Expand);
219 setOperationAction(ISD::SDIV, MVT::i16, Expand);
220 setOperationAction(ISD::UDIV, MVT::i16, Expand);
221 setOperationAction(ISD::SDIVREM, MVT::i16, Expand);
222 setOperationAction(ISD::UDIVREM, MVT::i16, Expand);
223 setOperationAction(ISD::SREM, MVT::i32, Expand);
224 setOperationAction(ISD::UREM, MVT::i32, Expand);
225 setOperationAction(ISD::SDIV, MVT::i32, Expand);
226 setOperationAction(ISD::UDIV, MVT::i32, Expand);
227 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
228 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
229 setOperationAction(ISD::SREM, MVT::i64, Expand);
230 setOperationAction(ISD::UREM, MVT::i64, Expand);
231 setOperationAction(ISD::SDIV, MVT::i64, Expand);
232 setOperationAction(ISD::UDIV, MVT::i64, Expand);
233 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
234 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
235 setOperationAction(ISD::SREM, MVT::i128, Expand);
236 setOperationAction(ISD::UREM, MVT::i128, Expand);
237 setOperationAction(ISD::SDIV, MVT::i128, Expand);
238 setOperationAction(ISD::UDIV, MVT::i128, Expand);
239 setOperationAction(ISD::SDIVREM, MVT::i128, Expand);
240 setOperationAction(ISD::UDIVREM, MVT::i128, Expand);
242 // We don't support sin/cos/sqrt/fmod
243 setOperationAction(ISD::FSIN , MVT::f64, Expand);
244 setOperationAction(ISD::FCOS , MVT::f64, Expand);
245 setOperationAction(ISD::FREM , MVT::f64, Expand);
246 setOperationAction(ISD::FSIN , MVT::f32, Expand);
247 setOperationAction(ISD::FCOS , MVT::f32, Expand);
248 setOperationAction(ISD::FREM , MVT::f32, Expand);
250 // Expand fsqrt to the appropriate libcall (NOTE: should use h/w fsqrt
252 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
253 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
255 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
256 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
258 // SPU can do rotate right and left, so legalize it... but customize for i8
259 // because instructions don't exist.
261 // FIXME: Change from "expand" to appropriate type once ROTR is supported in
263 setOperationAction(ISD::ROTR, MVT::i32, Expand /*Legal*/);
264 setOperationAction(ISD::ROTR, MVT::i16, Expand /*Legal*/);
265 setOperationAction(ISD::ROTR, MVT::i8, Expand /*Custom*/);
267 setOperationAction(ISD::ROTL, MVT::i32, Legal);
268 setOperationAction(ISD::ROTL, MVT::i16, Legal);
269 setOperationAction(ISD::ROTL, MVT::i8, Custom);
271 // SPU has no native version of shift left/right for i8
272 setOperationAction(ISD::SHL, MVT::i8, Custom);
273 setOperationAction(ISD::SRL, MVT::i8, Custom);
274 setOperationAction(ISD::SRA, MVT::i8, Custom);
276 // Make these operations legal and handle them during instruction selection:
277 setOperationAction(ISD::SHL, MVT::i64, Legal);
278 setOperationAction(ISD::SRL, MVT::i64, Legal);
279 setOperationAction(ISD::SRA, MVT::i64, Legal);
281 // Custom lower i8, i32 and i64 multiplications
282 setOperationAction(ISD::MUL, MVT::i8, Custom);
283 setOperationAction(ISD::MUL, MVT::i32, Legal);
284 setOperationAction(ISD::MUL, MVT::i64, Legal);
286 // Expand double-width multiplication
287 // FIXME: It would probably be reasonable to support some of these operations
288 setOperationAction(ISD::UMUL_LOHI, MVT::i8, Expand);
289 setOperationAction(ISD::SMUL_LOHI, MVT::i8, Expand);
290 setOperationAction(ISD::MULHU, MVT::i8, Expand);
291 setOperationAction(ISD::MULHS, MVT::i8, Expand);
292 setOperationAction(ISD::UMUL_LOHI, MVT::i16, Expand);
293 setOperationAction(ISD::SMUL_LOHI, MVT::i16, Expand);
294 setOperationAction(ISD::MULHU, MVT::i16, Expand);
295 setOperationAction(ISD::MULHS, MVT::i16, Expand);
296 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
297 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
298 setOperationAction(ISD::MULHU, MVT::i32, Expand);
299 setOperationAction(ISD::MULHS, MVT::i32, Expand);
300 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
301 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
302 setOperationAction(ISD::MULHU, MVT::i64, Expand);
303 setOperationAction(ISD::MULHS, MVT::i64, Expand);
305 // Need to custom handle (some) common i8, i64 math ops
306 setOperationAction(ISD::ADD, MVT::i8, Custom);
307 setOperationAction(ISD::ADD, MVT::i64, Legal);
308 setOperationAction(ISD::SUB, MVT::i8, Custom);
309 setOperationAction(ISD::SUB, MVT::i64, Legal);
311 // SPU does not have BSWAP. It does have i32 support CTLZ.
312 // CTPOP has to be custom lowered.
313 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
314 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
316 setOperationAction(ISD::CTPOP, MVT::i8, Custom);
317 setOperationAction(ISD::CTPOP, MVT::i16, Custom);
318 setOperationAction(ISD::CTPOP, MVT::i32, Custom);
319 setOperationAction(ISD::CTPOP, MVT::i64, Custom);
320 setOperationAction(ISD::CTPOP, MVT::i128, Expand);
322 setOperationAction(ISD::CTTZ , MVT::i8, Expand);
323 setOperationAction(ISD::CTTZ , MVT::i16, Expand);
324 setOperationAction(ISD::CTTZ , MVT::i32, Expand);
325 setOperationAction(ISD::CTTZ , MVT::i64, Expand);
326 setOperationAction(ISD::CTTZ , MVT::i128, Expand);
328 setOperationAction(ISD::CTLZ , MVT::i8, Promote);
329 setOperationAction(ISD::CTLZ , MVT::i16, Promote);
330 setOperationAction(ISD::CTLZ , MVT::i32, Legal);
331 setOperationAction(ISD::CTLZ , MVT::i64, Expand);
332 setOperationAction(ISD::CTLZ , MVT::i128, Expand);
334 // SPU has a version of select that implements (a&~c)|(b&c), just like
335 // select ought to work:
336 setOperationAction(ISD::SELECT, MVT::i8, Legal);
337 setOperationAction(ISD::SELECT, MVT::i16, Legal);
338 setOperationAction(ISD::SELECT, MVT::i32, Legal);
339 setOperationAction(ISD::SELECT, MVT::i64, Legal);
341 setOperationAction(ISD::SETCC, MVT::i8, Legal);
342 setOperationAction(ISD::SETCC, MVT::i16, Legal);
343 setOperationAction(ISD::SETCC, MVT::i32, Legal);
344 setOperationAction(ISD::SETCC, MVT::i64, Legal);
345 setOperationAction(ISD::SETCC, MVT::f64, Custom);
347 // Custom lower i128 -> i64 truncates
348 setOperationAction(ISD::TRUNCATE, MVT::i64, Custom);
350 // Custom lower i32/i64 -> i128 sign extend
351 setOperationAction(ISD::SIGN_EXTEND, MVT::i128, Custom);
353 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
354 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
355 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
356 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
357 // SPU has a legal FP -> signed INT instruction for f32, but for f64, need
358 // to expand to a libcall, hence the custom lowering:
359 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
360 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
361 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Expand);
362 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
363 setOperationAction(ISD::FP_TO_SINT, MVT::i128, Expand);
364 setOperationAction(ISD::FP_TO_UINT, MVT::i128, Expand);
366 // FDIV on SPU requires custom lowering
367 setOperationAction(ISD::FDIV, MVT::f64, Expand); // to libcall
369 // SPU has [U|S]INT_TO_FP for f32->i32, but not for f64->i32, f64->i64:
370 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
371 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
372 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
373 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
374 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
375 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
376 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
377 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
379 setOperationAction(ISD::BIT_CONVERT, MVT::i32, Legal);
380 setOperationAction(ISD::BIT_CONVERT, MVT::f32, Legal);
381 setOperationAction(ISD::BIT_CONVERT, MVT::i64, Legal);
382 setOperationAction(ISD::BIT_CONVERT, MVT::f64, Legal);
384 // We cannot sextinreg(i1). Expand to shifts.
385 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
387 // We want to legalize GlobalAddress and ConstantPool nodes into the
388 // appropriate instructions to materialize the address.
389 for (unsigned sctype = (unsigned) MVT::i8; sctype < (unsigned) MVT::f128;
391 MVT::SimpleValueType VT = (MVT::SimpleValueType)sctype;
393 setOperationAction(ISD::GlobalAddress, VT, Custom);
394 setOperationAction(ISD::ConstantPool, VT, Custom);
395 setOperationAction(ISD::JumpTable, VT, Custom);
398 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
399 setOperationAction(ISD::VASTART , MVT::Other, Custom);
401 // Use the default implementation.
402 setOperationAction(ISD::VAARG , MVT::Other, Expand);
403 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
404 setOperationAction(ISD::VAEND , MVT::Other, Expand);
405 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
406 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
407 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
408 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Expand);
410 // Cell SPU has instructions for converting between i64 and fp.
411 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
412 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
414 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT
415 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote);
417 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
418 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
420 // First set operation action for all vector types to expand. Then we
421 // will selectively turn on ones that can be effectively codegen'd.
422 addRegisterClass(MVT::v16i8, SPU::VECREGRegisterClass);
423 addRegisterClass(MVT::v8i16, SPU::VECREGRegisterClass);
424 addRegisterClass(MVT::v4i32, SPU::VECREGRegisterClass);
425 addRegisterClass(MVT::v2i64, SPU::VECREGRegisterClass);
426 addRegisterClass(MVT::v4f32, SPU::VECREGRegisterClass);
427 addRegisterClass(MVT::v2f64, SPU::VECREGRegisterClass);
429 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
430 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
431 MVT::SimpleValueType VT = (MVT::SimpleValueType)i;
433 // add/sub are legal for all supported vector VT's.
434 setOperationAction(ISD::ADD, VT, Legal);
435 setOperationAction(ISD::SUB, VT, Legal);
436 // mul has to be custom lowered.
437 setOperationAction(ISD::MUL, VT, Legal);
439 setOperationAction(ISD::AND, VT, Legal);
440 setOperationAction(ISD::OR, VT, Legal);
441 setOperationAction(ISD::XOR, VT, Legal);
442 setOperationAction(ISD::LOAD, VT, Legal);
443 setOperationAction(ISD::SELECT, VT, Legal);
444 setOperationAction(ISD::STORE, VT, Legal);
446 // These operations need to be expanded:
447 setOperationAction(ISD::SDIV, VT, Expand);
448 setOperationAction(ISD::SREM, VT, Expand);
449 setOperationAction(ISD::UDIV, VT, Expand);
450 setOperationAction(ISD::UREM, VT, Expand);
452 // Custom lower build_vector, constant pool spills, insert and
453 // extract vector elements:
454 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
455 setOperationAction(ISD::ConstantPool, VT, Custom);
456 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
457 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
458 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
459 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
462 setOperationAction(ISD::AND, MVT::v16i8, Custom);
463 setOperationAction(ISD::OR, MVT::v16i8, Custom);
464 setOperationAction(ISD::XOR, MVT::v16i8, Custom);
465 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
467 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
469 setShiftAmountType(MVT::i32);
470 setBooleanContents(ZeroOrNegativeOneBooleanContent);
472 setStackPointerRegisterToSaveRestore(SPU::R1);
474 // We have target-specific dag combine patterns for the following nodes:
475 setTargetDAGCombine(ISD::ADD);
476 setTargetDAGCombine(ISD::ZERO_EXTEND);
477 setTargetDAGCombine(ISD::SIGN_EXTEND);
478 setTargetDAGCombine(ISD::ANY_EXTEND);
480 computeRegisterProperties();
482 // Set pre-RA register scheduler default to BURR, which produces slightly
483 // better code than the default (could also be TDRR, but TargetLowering.h
484 // needs a mod to support that model):
485 setSchedulingPreference(Sched::RegPressure);
489 SPUTargetLowering::getTargetNodeName(unsigned Opcode) const
491 if (node_names.empty()) {
492 node_names[(unsigned) SPUISD::RET_FLAG] = "SPUISD::RET_FLAG";
493 node_names[(unsigned) SPUISD::Hi] = "SPUISD::Hi";
494 node_names[(unsigned) SPUISD::Lo] = "SPUISD::Lo";
495 node_names[(unsigned) SPUISD::PCRelAddr] = "SPUISD::PCRelAddr";
496 node_names[(unsigned) SPUISD::AFormAddr] = "SPUISD::AFormAddr";
497 node_names[(unsigned) SPUISD::IndirectAddr] = "SPUISD::IndirectAddr";
498 node_names[(unsigned) SPUISD::LDRESULT] = "SPUISD::LDRESULT";
499 node_names[(unsigned) SPUISD::CALL] = "SPUISD::CALL";
500 node_names[(unsigned) SPUISD::SHUFB] = "SPUISD::SHUFB";
501 node_names[(unsigned) SPUISD::SHUFFLE_MASK] = "SPUISD::SHUFFLE_MASK";
502 node_names[(unsigned) SPUISD::CNTB] = "SPUISD::CNTB";
503 node_names[(unsigned) SPUISD::PREFSLOT2VEC] = "SPUISD::PREFSLOT2VEC";
504 node_names[(unsigned) SPUISD::VEC2PREFSLOT] = "SPUISD::VEC2PREFSLOT";
505 node_names[(unsigned) SPUISD::SHLQUAD_L_BITS] = "SPUISD::SHLQUAD_L_BITS";
506 node_names[(unsigned) SPUISD::SHLQUAD_L_BYTES] = "SPUISD::SHLQUAD_L_BYTES";
507 node_names[(unsigned) SPUISD::VEC_ROTL] = "SPUISD::VEC_ROTL";
508 node_names[(unsigned) SPUISD::VEC_ROTR] = "SPUISD::VEC_ROTR";
509 node_names[(unsigned) SPUISD::ROTBYTES_LEFT] = "SPUISD::ROTBYTES_LEFT";
510 node_names[(unsigned) SPUISD::ROTBYTES_LEFT_BITS] =
511 "SPUISD::ROTBYTES_LEFT_BITS";
512 node_names[(unsigned) SPUISD::SELECT_MASK] = "SPUISD::SELECT_MASK";
513 node_names[(unsigned) SPUISD::SELB] = "SPUISD::SELB";
514 node_names[(unsigned) SPUISD::ADD64_MARKER] = "SPUISD::ADD64_MARKER";
515 node_names[(unsigned) SPUISD::SUB64_MARKER] = "SPUISD::SUB64_MARKER";
516 node_names[(unsigned) SPUISD::MUL64_MARKER] = "SPUISD::MUL64_MARKER";
519 std::map<unsigned, const char *>::iterator i = node_names.find(Opcode);
521 return ((i != node_names.end()) ? i->second : 0);
524 /// getFunctionAlignment - Return the Log2 alignment of this function.
525 unsigned SPUTargetLowering::getFunctionAlignment(const Function *) const {
529 //===----------------------------------------------------------------------===//
530 // Return the Cell SPU's SETCC result type
531 //===----------------------------------------------------------------------===//
533 MVT::SimpleValueType SPUTargetLowering::getSetCCResultType(EVT VT) const {
534 // i16 and i32 are valid SETCC result types
535 return ((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) ?
536 VT.getSimpleVT().SimpleTy :
540 //===----------------------------------------------------------------------===//
541 // Calling convention code:
542 //===----------------------------------------------------------------------===//
544 #include "SPUGenCallingConv.inc"
546 //===----------------------------------------------------------------------===//
547 // LowerOperation implementation
548 //===----------------------------------------------------------------------===//
550 /// Custom lower loads for CellSPU
552 All CellSPU loads and stores are aligned to 16-byte boundaries, so for elements
553 within a 16-byte block, we have to rotate to extract the requested element.
555 For extending loads, we also want to ensure that the following sequence is
556 emitted, e.g. for MVT::f32 extending load to MVT::f64:
560 %2 v16i8,ch = rotate %1
561 %3 v4f8, ch = bitconvert %2
562 %4 f32 = vec2perfslot %3
563 %5 f64 = fp_extend %4
567 LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
568 LoadSDNode *LN = cast<LoadSDNode>(Op);
569 SDValue the_chain = LN->getChain();
570 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
571 EVT InVT = LN->getMemoryVT();
572 EVT OutVT = Op.getValueType();
573 ISD::LoadExtType ExtType = LN->getExtensionType();
574 unsigned alignment = LN->getAlignment();
575 const valtype_map_s *vtm = getValueTypeMapEntry(InVT);
576 DebugLoc dl = Op.getDebugLoc();
578 switch (LN->getAddressingMode()) {
579 case ISD::UNINDEXED: {
581 SDValue basePtr = LN->getBasePtr();
584 if (alignment == 16) {
587 // Special cases for a known aligned load to simplify the base pointer
588 // and the rotation amount:
589 if (basePtr.getOpcode() == ISD::ADD
590 && (CN = dyn_cast<ConstantSDNode > (basePtr.getOperand(1))) != 0) {
591 // Known offset into basePtr
592 int64_t offset = CN->getSExtValue();
593 int64_t rotamt = int64_t((offset & 0xf) - vtm->prefslot_byte);
598 rotate = DAG.getConstant(rotamt, MVT::i16);
600 // Simplify the base pointer for this case:
601 basePtr = basePtr.getOperand(0);
602 if ((offset & ~0xf) > 0) {
603 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
605 DAG.getConstant((offset & ~0xf), PtrVT));
607 } else if ((basePtr.getOpcode() == SPUISD::AFormAddr)
608 || (basePtr.getOpcode() == SPUISD::IndirectAddr
609 && basePtr.getOperand(0).getOpcode() == SPUISD::Hi
610 && basePtr.getOperand(1).getOpcode() == SPUISD::Lo)) {
611 // Plain aligned a-form address: rotate into preferred slot
612 // Same for (SPUindirect (SPUhi ...), (SPUlo ...))
613 int64_t rotamt = -vtm->prefslot_byte;
616 rotate = DAG.getConstant(rotamt, MVT::i16);
618 // Offset the rotate amount by the basePtr and the preferred slot
620 int64_t rotamt = -vtm->prefslot_byte;
623 rotate = DAG.getNode(ISD::ADD, dl, PtrVT,
625 DAG.getConstant(rotamt, PtrVT));
628 // Unaligned load: must be more pessimistic about addressing modes:
629 if (basePtr.getOpcode() == ISD::ADD) {
630 MachineFunction &MF = DAG.getMachineFunction();
631 MachineRegisterInfo &RegInfo = MF.getRegInfo();
632 unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
635 SDValue Op0 = basePtr.getOperand(0);
636 SDValue Op1 = basePtr.getOperand(1);
638 if (isa<ConstantSDNode>(Op1)) {
639 // Convert the (add <ptr>, <const>) to an indirect address contained
640 // in a register. Note that this is done because we need to avoid
641 // creating a 0(reg) d-form address due to the SPU's block loads.
642 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
643 the_chain = DAG.getCopyToReg(the_chain, dl, VReg, basePtr, Flag);
644 basePtr = DAG.getCopyFromReg(the_chain, dl, VReg, PtrVT);
646 // Convert the (add <arg1>, <arg2>) to an indirect address, which
647 // will likely be lowered as a reg(reg) x-form address.
648 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
651 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
653 DAG.getConstant(0, PtrVT));
656 // Offset the rotate amount by the basePtr and the preferred slot
658 rotate = DAG.getNode(ISD::ADD, dl, PtrVT,
660 DAG.getConstant(-vtm->prefslot_byte, PtrVT));
663 // Re-emit as a v16i8 vector load
664 result = DAG.getLoad(MVT::v16i8, dl, the_chain, basePtr,
665 LN->getPointerInfo(),
666 LN->isVolatile(), LN->isNonTemporal(), 16);
669 the_chain = result.getValue(1);
671 // Rotate into the preferred slot:
672 result = DAG.getNode(SPUISD::ROTBYTES_LEFT, dl, MVT::v16i8,
673 result.getValue(0), rotate);
675 // Convert the loaded v16i8 vector to the appropriate vector type
676 // specified by the operand:
677 EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
678 InVT, (128 / InVT.getSizeInBits()));
679 result = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, InVT,
680 DAG.getNode(ISD::BIT_CONVERT, dl, vecVT, result));
682 // Handle extending loads by extending the scalar result:
683 if (ExtType == ISD::SEXTLOAD) {
684 result = DAG.getNode(ISD::SIGN_EXTEND, dl, OutVT, result);
685 } else if (ExtType == ISD::ZEXTLOAD) {
686 result = DAG.getNode(ISD::ZERO_EXTEND, dl, OutVT, result);
687 } else if (ExtType == ISD::EXTLOAD) {
688 unsigned NewOpc = ISD::ANY_EXTEND;
690 if (OutVT.isFloatingPoint())
691 NewOpc = ISD::FP_EXTEND;
693 result = DAG.getNode(NewOpc, dl, OutVT, result);
696 SDVTList retvts = DAG.getVTList(OutVT, MVT::Other);
697 SDValue retops[2] = {
702 result = DAG.getNode(SPUISD::LDRESULT, dl, retvts,
703 retops, sizeof(retops) / sizeof(retops[0]));
710 case ISD::LAST_INDEXED_MODE:
712 report_fatal_error("LowerLOAD: Got a LoadSDNode with an addr mode other "
714 Twine((unsigned)LN->getAddressingMode()));
722 /// Custom lower stores for CellSPU
724 All CellSPU stores are aligned to 16-byte boundaries, so for elements
725 within a 16-byte block, we have to generate a shuffle to insert the
726 requested element into its place, then store the resulting block.
729 LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
730 StoreSDNode *SN = cast<StoreSDNode>(Op);
731 SDValue Value = SN->getValue();
732 EVT VT = Value.getValueType();
733 EVT StVT = (!SN->isTruncatingStore() ? VT : SN->getMemoryVT());
734 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
735 DebugLoc dl = Op.getDebugLoc();
736 unsigned alignment = SN->getAlignment();
738 switch (SN->getAddressingMode()) {
739 case ISD::UNINDEXED: {
740 // The vector type we really want to load from the 16-byte chunk.
741 EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
742 VT, (128 / VT.getSizeInBits()));
744 SDValue alignLoadVec;
745 SDValue basePtr = SN->getBasePtr();
746 SDValue the_chain = SN->getChain();
747 SDValue insertEltOffs;
749 if (alignment == 16) {
751 // Special cases for a known aligned load to simplify the base pointer
752 // and insertion byte:
753 if (basePtr.getOpcode() == ISD::ADD
754 && (CN = dyn_cast<ConstantSDNode>(basePtr.getOperand(1))) != 0) {
755 // Known offset into basePtr
756 int64_t offset = CN->getSExtValue();
758 // Simplify the base pointer for this case:
759 basePtr = basePtr.getOperand(0);
760 insertEltOffs = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
762 DAG.getConstant((offset & 0xf), PtrVT));
764 if ((offset & ~0xf) > 0) {
765 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
767 DAG.getConstant((offset & ~0xf), PtrVT));
770 // Otherwise, assume it's at byte 0 of basePtr
771 insertEltOffs = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
773 DAG.getConstant(0, PtrVT));
774 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
776 DAG.getConstant(0, PtrVT));
779 // Unaligned load: must be more pessimistic about addressing modes:
780 if (basePtr.getOpcode() == ISD::ADD) {
781 MachineFunction &MF = DAG.getMachineFunction();
782 MachineRegisterInfo &RegInfo = MF.getRegInfo();
783 unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
786 SDValue Op0 = basePtr.getOperand(0);
787 SDValue Op1 = basePtr.getOperand(1);
789 if (isa<ConstantSDNode>(Op1)) {
790 // Convert the (add <ptr>, <const>) to an indirect address contained
791 // in a register. Note that this is done because we need to avoid
792 // creating a 0(reg) d-form address due to the SPU's block loads.
793 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
794 the_chain = DAG.getCopyToReg(the_chain, dl, VReg, basePtr, Flag);
795 basePtr = DAG.getCopyFromReg(the_chain, dl, VReg, PtrVT);
797 // Convert the (add <arg1>, <arg2>) to an indirect address, which
798 // will likely be lowered as a reg(reg) x-form address.
799 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
802 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
804 DAG.getConstant(0, PtrVT));
807 // Insertion point is solely determined by basePtr's contents
808 insertEltOffs = DAG.getNode(ISD::ADD, dl, PtrVT,
810 DAG.getConstant(0, PtrVT));
813 // Load the memory to which to store.
814 alignLoadVec = DAG.getLoad(vecVT, dl, the_chain, basePtr,
815 SN->getPointerInfo(),
816 SN->isVolatile(), SN->isNonTemporal(), 16);
819 the_chain = alignLoadVec.getValue(1);
821 LoadSDNode *LN = cast<LoadSDNode>(alignLoadVec);
822 SDValue theValue = SN->getValue();
826 && (theValue.getOpcode() == ISD::AssertZext
827 || theValue.getOpcode() == ISD::AssertSext)) {
828 // Drill down and get the value for zero- and sign-extended
830 theValue = theValue.getOperand(0);
833 // If the base pointer is already a D-form address, then just create
834 // a new D-form address with a slot offset and the orignal base pointer.
835 // Otherwise generate a D-form address with the slot offset relative
836 // to the stack pointer, which is always aligned.
838 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
839 errs() << "CellSPU LowerSTORE: basePtr = ";
840 basePtr.getNode()->dump(&DAG);
845 SDValue insertEltOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, vecVT,
847 SDValue vectorizeOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, vecVT,
850 result = DAG.getNode(SPUISD::SHUFB, dl, vecVT,
851 vectorizeOp, alignLoadVec,
852 DAG.getNode(ISD::BIT_CONVERT, dl,
853 MVT::v4i32, insertEltOp));
855 result = DAG.getStore(the_chain, dl, result, basePtr,
856 LN->getPointerInfo(),
857 LN->isVolatile(), LN->isNonTemporal(),
860 #if 0 && !defined(NDEBUG)
861 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
862 const SDValue ¤tRoot = DAG.getRoot();
865 errs() << "------- CellSPU:LowerStore result:\n";
867 errs() << "-------\n";
868 DAG.setRoot(currentRoot);
879 case ISD::LAST_INDEXED_MODE:
881 report_fatal_error("LowerLOAD: Got a LoadSDNode with an addr mode other "
883 Twine((unsigned)SN->getAddressingMode()));
891 //! Generate the address of a constant pool entry.
893 LowerConstantPool(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
894 EVT PtrVT = Op.getValueType();
895 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
896 const Constant *C = CP->getConstVal();
897 SDValue CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment());
898 SDValue Zero = DAG.getConstant(0, PtrVT);
899 const TargetMachine &TM = DAG.getTarget();
900 // FIXME there is no actual debug info here
901 DebugLoc dl = Op.getDebugLoc();
903 if (TM.getRelocationModel() == Reloc::Static) {
904 if (!ST->usingLargeMem()) {
905 // Just return the SDValue with the constant pool address in it.
906 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, CPI, Zero);
908 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, CPI, Zero);
909 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, CPI, Zero);
910 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
914 llvm_unreachable("LowerConstantPool: Relocation model other than static"
919 //! Alternate entry point for generating the address of a constant pool entry
921 SPU::LowerConstantPool(SDValue Op, SelectionDAG &DAG, const SPUTargetMachine &TM) {
922 return ::LowerConstantPool(Op, DAG, TM.getSubtargetImpl());
926 LowerJumpTable(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
927 EVT PtrVT = Op.getValueType();
928 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
929 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
930 SDValue Zero = DAG.getConstant(0, PtrVT);
931 const TargetMachine &TM = DAG.getTarget();
932 // FIXME there is no actual debug info here
933 DebugLoc dl = Op.getDebugLoc();
935 if (TM.getRelocationModel() == Reloc::Static) {
936 if (!ST->usingLargeMem()) {
937 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, JTI, Zero);
939 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, JTI, Zero);
940 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, JTI, Zero);
941 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
945 llvm_unreachable("LowerJumpTable: Relocation model other than static"
951 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
952 EVT PtrVT = Op.getValueType();
953 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
954 const GlobalValue *GV = GSDN->getGlobal();
955 SDValue GA = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(),
956 PtrVT, GSDN->getOffset());
957 const TargetMachine &TM = DAG.getTarget();
958 SDValue Zero = DAG.getConstant(0, PtrVT);
959 // FIXME there is no actual debug info here
960 DebugLoc dl = Op.getDebugLoc();
962 if (TM.getRelocationModel() == Reloc::Static) {
963 if (!ST->usingLargeMem()) {
964 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, GA, Zero);
966 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, GA, Zero);
967 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, GA, Zero);
968 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
971 report_fatal_error("LowerGlobalAddress: Relocation model other than static"
979 //! Custom lower double precision floating point constants
981 LowerConstantFP(SDValue Op, SelectionDAG &DAG) {
982 EVT VT = Op.getValueType();
983 // FIXME there is no actual debug info here
984 DebugLoc dl = Op.getDebugLoc();
986 if (VT == MVT::f64) {
987 ConstantFPSDNode *FP = cast<ConstantFPSDNode>(Op.getNode());
990 "LowerConstantFP: Node is not ConstantFPSDNode");
992 uint64_t dbits = DoubleToBits(FP->getValueAPF().convertToDouble());
993 SDValue T = DAG.getConstant(dbits, MVT::i64);
994 SDValue Tvec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T);
995 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
996 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Tvec));
1003 SPUTargetLowering::LowerFormalArguments(SDValue Chain,
1004 CallingConv::ID CallConv, bool isVarArg,
1005 const SmallVectorImpl<ISD::InputArg>
1007 DebugLoc dl, SelectionDAG &DAG,
1008 SmallVectorImpl<SDValue> &InVals)
1011 MachineFunction &MF = DAG.getMachineFunction();
1012 MachineFrameInfo *MFI = MF.getFrameInfo();
1013 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1014 SPUFunctionInfo *FuncInfo = MF.getInfo<SPUFunctionInfo>();
1016 unsigned ArgOffset = SPUFrameInfo::minStackSize();
1017 unsigned ArgRegIdx = 0;
1018 unsigned StackSlotSize = SPUFrameInfo::stackSlotSize();
1020 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1022 SmallVector<CCValAssign, 16> ArgLocs;
1023 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
1025 // FIXME: allow for other calling conventions
1026 CCInfo.AnalyzeFormalArguments(Ins, CCC_SPU);
1028 // Add DAG nodes to load the arguments or copy them out of registers.
1029 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
1030 EVT ObjectVT = Ins[ArgNo].VT;
1031 unsigned ObjSize = ObjectVT.getSizeInBits()/8;
1033 CCValAssign &VA = ArgLocs[ArgNo];
1035 if (VA.isRegLoc()) {
1036 const TargetRegisterClass *ArgRegClass;
1038 switch (ObjectVT.getSimpleVT().SimpleTy) {
1040 report_fatal_error("LowerFormalArguments Unhandled argument type: " +
1041 Twine(ObjectVT.getEVTString()));
1043 ArgRegClass = &SPU::R8CRegClass;
1046 ArgRegClass = &SPU::R16CRegClass;
1049 ArgRegClass = &SPU::R32CRegClass;
1052 ArgRegClass = &SPU::R64CRegClass;
1055 ArgRegClass = &SPU::GPRCRegClass;
1058 ArgRegClass = &SPU::R32FPRegClass;
1061 ArgRegClass = &SPU::R64FPRegClass;
1069 ArgRegClass = &SPU::VECREGRegClass;
1073 unsigned VReg = RegInfo.createVirtualRegister(ArgRegClass);
1074 RegInfo.addLiveIn(VA.getLocReg(), VReg);
1075 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
1078 // We need to load the argument to a virtual register if we determined
1079 // above that we ran out of physical registers of the appropriate type
1080 // or we're forced to do vararg
1081 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset, true);
1082 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
1083 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(),
1085 ArgOffset += StackSlotSize;
1088 InVals.push_back(ArgVal);
1090 Chain = ArgVal.getOperand(0);
1095 // FIXME: we should be able to query the argument registers from
1096 // tablegen generated code.
1097 static const unsigned ArgRegs[] = {
1098 SPU::R3, SPU::R4, SPU::R5, SPU::R6, SPU::R7, SPU::R8, SPU::R9,
1099 SPU::R10, SPU::R11, SPU::R12, SPU::R13, SPU::R14, SPU::R15, SPU::R16,
1100 SPU::R17, SPU::R18, SPU::R19, SPU::R20, SPU::R21, SPU::R22, SPU::R23,
1101 SPU::R24, SPU::R25, SPU::R26, SPU::R27, SPU::R28, SPU::R29, SPU::R30,
1102 SPU::R31, SPU::R32, SPU::R33, SPU::R34, SPU::R35, SPU::R36, SPU::R37,
1103 SPU::R38, SPU::R39, SPU::R40, SPU::R41, SPU::R42, SPU::R43, SPU::R44,
1104 SPU::R45, SPU::R46, SPU::R47, SPU::R48, SPU::R49, SPU::R50, SPU::R51,
1105 SPU::R52, SPU::R53, SPU::R54, SPU::R55, SPU::R56, SPU::R57, SPU::R58,
1106 SPU::R59, SPU::R60, SPU::R61, SPU::R62, SPU::R63, SPU::R64, SPU::R65,
1107 SPU::R66, SPU::R67, SPU::R68, SPU::R69, SPU::R70, SPU::R71, SPU::R72,
1108 SPU::R73, SPU::R74, SPU::R75, SPU::R76, SPU::R77, SPU::R78, SPU::R79
1110 // size of ArgRegs array
1111 unsigned NumArgRegs = 77;
1113 // We will spill (79-3)+1 registers to the stack
1114 SmallVector<SDValue, 79-3+1> MemOps;
1116 // Create the frame slot
1117 for (; ArgRegIdx != NumArgRegs; ++ArgRegIdx) {
1118 FuncInfo->setVarArgsFrameIndex(
1119 MFI->CreateFixedObject(StackSlotSize, ArgOffset, true));
1120 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
1121 unsigned VReg = MF.addLiveIn(ArgRegs[ArgRegIdx], &SPU::R32CRegClass);
1122 SDValue ArgVal = DAG.getRegister(VReg, MVT::v16i8);
1123 SDValue Store = DAG.getStore(Chain, dl, ArgVal, FIN, MachinePointerInfo(),
1125 Chain = Store.getOperand(0);
1126 MemOps.push_back(Store);
1128 // Increment address by stack slot size for the next stored argument
1129 ArgOffset += StackSlotSize;
1131 if (!MemOps.empty())
1132 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1133 &MemOps[0], MemOps.size());
1139 /// isLSAAddress - Return the immediate to use if the specified
1140 /// value is representable as a LSA address.
1141 static SDNode *isLSAAddress(SDValue Op, SelectionDAG &DAG) {
1142 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
1145 int Addr = C->getZExtValue();
1146 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero.
1147 (Addr << 14 >> 14) != Addr)
1148 return 0; // Top 14 bits have to be sext of immediate.
1150 return DAG.getConstant((int)C->getZExtValue() >> 2, MVT::i32).getNode();
1154 SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
1155 CallingConv::ID CallConv, bool isVarArg,
1157 const SmallVectorImpl<ISD::OutputArg> &Outs,
1158 const SmallVectorImpl<SDValue> &OutVals,
1159 const SmallVectorImpl<ISD::InputArg> &Ins,
1160 DebugLoc dl, SelectionDAG &DAG,
1161 SmallVectorImpl<SDValue> &InVals) const {
1162 // CellSPU target does not yet support tail call optimization.
1165 const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
1166 unsigned NumOps = Outs.size();
1167 unsigned StackSlotSize = SPUFrameInfo::stackSlotSize();
1169 SmallVector<CCValAssign, 16> ArgLocs;
1170 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
1172 // FIXME: allow for other calling conventions
1173 CCInfo.AnalyzeCallOperands(Outs, CCC_SPU);
1175 const unsigned NumArgRegs = ArgLocs.size();
1178 // Handy pointer type
1179 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1181 // Set up a copy of the stack pointer for use loading and storing any
1182 // arguments that may not fit in the registers available for argument
1184 SDValue StackPtr = DAG.getRegister(SPU::R1, MVT::i32);
1186 // Figure out which arguments are going to go in registers, and which in
1188 unsigned ArgOffset = SPUFrameInfo::minStackSize(); // Just below [LR]
1189 unsigned ArgRegIdx = 0;
1191 // Keep track of registers passing arguments
1192 std::vector<std::pair<unsigned, SDValue> > RegsToPass;
1193 // And the arguments passed on the stack
1194 SmallVector<SDValue, 8> MemOpChains;
1196 for (; ArgRegIdx != NumOps; ++ArgRegIdx) {
1197 SDValue Arg = OutVals[ArgRegIdx];
1198 CCValAssign &VA = ArgLocs[ArgRegIdx];
1200 // PtrOff will be used to store the current argument to the stack if a
1201 // register cannot be found for it.
1202 SDValue PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
1203 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
1205 switch (Arg.getValueType().getSimpleVT().SimpleTy) {
1206 default: llvm_unreachable("Unexpected ValueType for argument!");
1220 if (ArgRegIdx != NumArgRegs) {
1221 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1223 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
1224 MachinePointerInfo(),
1226 ArgOffset += StackSlotSize;
1232 // Accumulate how many bytes are to be pushed on the stack, including the
1233 // linkage area, and parameter passing area. According to the SPU ABI,
1234 // we minimally need space for [LR] and [SP].
1235 unsigned NumStackBytes = ArgOffset - SPUFrameInfo::minStackSize();
1237 // Insert a call sequence start
1238 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumStackBytes,
1241 if (!MemOpChains.empty()) {
1242 // Adjust the stack pointer for the stack arguments.
1243 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1244 &MemOpChains[0], MemOpChains.size());
1247 // Build a sequence of copy-to-reg nodes chained together with token chain
1248 // and flag operands which copy the outgoing args into the appropriate regs.
1250 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1251 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1252 RegsToPass[i].second, InFlag);
1253 InFlag = Chain.getValue(1);
1256 SmallVector<SDValue, 8> Ops;
1257 unsigned CallOpc = SPUISD::CALL;
1259 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
1260 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
1261 // node so that legalize doesn't hack it.
1262 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1263 const GlobalValue *GV = G->getGlobal();
1264 EVT CalleeVT = Callee.getValueType();
1265 SDValue Zero = DAG.getConstant(0, PtrVT);
1266 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, CalleeVT);
1268 if (!ST->usingLargeMem()) {
1269 // Turn calls to targets that are defined (i.e., have bodies) into BRSL
1270 // style calls, otherwise, external symbols are BRASL calls. This assumes
1271 // that declared/defined symbols are in the same compilation unit and can
1272 // be reached through PC-relative jumps.
1275 // This may be an unsafe assumption for JIT and really large compilation
1277 if (GV->isDeclaration()) {
1278 Callee = DAG.getNode(SPUISD::AFormAddr, dl, CalleeVT, GA, Zero);
1280 Callee = DAG.getNode(SPUISD::PCRelAddr, dl, CalleeVT, GA, Zero);
1283 // "Large memory" mode: Turn all calls into indirect calls with a X-form
1285 Callee = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, GA, Zero);
1287 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1288 EVT CalleeVT = Callee.getValueType();
1289 SDValue Zero = DAG.getConstant(0, PtrVT);
1290 SDValue ExtSym = DAG.getTargetExternalSymbol(S->getSymbol(),
1291 Callee.getValueType());
1293 if (!ST->usingLargeMem()) {
1294 Callee = DAG.getNode(SPUISD::AFormAddr, dl, CalleeVT, ExtSym, Zero);
1296 Callee = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, ExtSym, Zero);
1298 } else if (SDNode *Dest = isLSAAddress(Callee, DAG)) {
1299 // If this is an absolute destination address that appears to be a legal
1300 // local store address, use the munged value.
1301 Callee = SDValue(Dest, 0);
1304 Ops.push_back(Chain);
1305 Ops.push_back(Callee);
1307 // Add argument registers to the end of the list so that they are known live
1309 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1310 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1311 RegsToPass[i].second.getValueType()));
1313 if (InFlag.getNode())
1314 Ops.push_back(InFlag);
1315 // Returns a chain and a flag for retval copy to use.
1316 Chain = DAG.getNode(CallOpc, dl, DAG.getVTList(MVT::Other, MVT::Flag),
1317 &Ops[0], Ops.size());
1318 InFlag = Chain.getValue(1);
1320 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumStackBytes, true),
1321 DAG.getIntPtrConstant(0, true), InFlag);
1323 InFlag = Chain.getValue(1);
1325 // If the function returns void, just return the chain.
1329 // Now handle the return value(s)
1330 SmallVector<CCValAssign, 16> RVLocs;
1331 CCState CCRetInfo(CallConv, isVarArg, getTargetMachine(),
1332 RVLocs, *DAG.getContext());
1333 CCRetInfo.AnalyzeCallResult(Ins, CCC_SPU);
1336 // If the call has results, copy the values out of the ret val registers.
1337 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1338 CCValAssign VA = RVLocs[i];
1340 SDValue Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
1342 Chain = Val.getValue(1);
1343 InFlag = Val.getValue(2);
1344 InVals.push_back(Val);
1351 SPUTargetLowering::LowerReturn(SDValue Chain,
1352 CallingConv::ID CallConv, bool isVarArg,
1353 const SmallVectorImpl<ISD::OutputArg> &Outs,
1354 const SmallVectorImpl<SDValue> &OutVals,
1355 DebugLoc dl, SelectionDAG &DAG) const {
1357 SmallVector<CCValAssign, 16> RVLocs;
1358 CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
1359 RVLocs, *DAG.getContext());
1360 CCInfo.AnalyzeReturn(Outs, RetCC_SPU);
1362 // If this is the first return lowered for this function, add the regs to the
1363 // liveout set for the function.
1364 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
1365 for (unsigned i = 0; i != RVLocs.size(); ++i)
1366 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
1371 // Copy the result values into the output registers.
1372 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1373 CCValAssign &VA = RVLocs[i];
1374 assert(VA.isRegLoc() && "Can only return in registers!");
1375 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
1377 Flag = Chain.getValue(1);
1381 return DAG.getNode(SPUISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
1383 return DAG.getNode(SPUISD::RET_FLAG, dl, MVT::Other, Chain);
1387 //===----------------------------------------------------------------------===//
1388 // Vector related lowering:
1389 //===----------------------------------------------------------------------===//
1391 static ConstantSDNode *
1392 getVecImm(SDNode *N) {
1393 SDValue OpVal(0, 0);
1395 // Check to see if this buildvec has a single non-undef value in its elements.
1396 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
1397 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
1398 if (OpVal.getNode() == 0)
1399 OpVal = N->getOperand(i);
1400 else if (OpVal != N->getOperand(i))
1404 if (OpVal.getNode() != 0) {
1405 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
1413 /// get_vec_i18imm - Test if this vector is a vector filled with the same value
1414 /// and the value fits into an unsigned 18-bit constant, and if so, return the
1416 SDValue SPU::get_vec_u18imm(SDNode *N, SelectionDAG &DAG,
1418 if (ConstantSDNode *CN = getVecImm(N)) {
1419 uint64_t Value = CN->getZExtValue();
1420 if (ValueType == MVT::i64) {
1421 uint64_t UValue = CN->getZExtValue();
1422 uint32_t upper = uint32_t(UValue >> 32);
1423 uint32_t lower = uint32_t(UValue);
1426 Value = Value >> 32;
1428 if (Value <= 0x3ffff)
1429 return DAG.getTargetConstant(Value, ValueType);
1435 /// get_vec_i16imm - Test if this vector is a vector filled with the same value
1436 /// and the value fits into a signed 16-bit constant, and if so, return the
1438 SDValue SPU::get_vec_i16imm(SDNode *N, SelectionDAG &DAG,
1440 if (ConstantSDNode *CN = getVecImm(N)) {
1441 int64_t Value = CN->getSExtValue();
1442 if (ValueType == MVT::i64) {
1443 uint64_t UValue = CN->getZExtValue();
1444 uint32_t upper = uint32_t(UValue >> 32);
1445 uint32_t lower = uint32_t(UValue);
1448 Value = Value >> 32;
1450 if (Value >= -(1 << 15) && Value <= ((1 << 15) - 1)) {
1451 return DAG.getTargetConstant(Value, ValueType);
1458 /// get_vec_i10imm - Test if this vector is a vector filled with the same value
1459 /// and the value fits into a signed 10-bit constant, and if so, return the
1461 SDValue SPU::get_vec_i10imm(SDNode *N, SelectionDAG &DAG,
1463 if (ConstantSDNode *CN = getVecImm(N)) {
1464 int64_t Value = CN->getSExtValue();
1465 if (ValueType == MVT::i64) {
1466 uint64_t UValue = CN->getZExtValue();
1467 uint32_t upper = uint32_t(UValue >> 32);
1468 uint32_t lower = uint32_t(UValue);
1471 Value = Value >> 32;
1473 if (isInt<10>(Value))
1474 return DAG.getTargetConstant(Value, ValueType);
1480 /// get_vec_i8imm - Test if this vector is a vector filled with the same value
1481 /// and the value fits into a signed 8-bit constant, and if so, return the
1484 /// @note: The incoming vector is v16i8 because that's the only way we can load
1485 /// constant vectors. Thus, we test to see if the upper and lower bytes are the
1487 SDValue SPU::get_vec_i8imm(SDNode *N, SelectionDAG &DAG,
1489 if (ConstantSDNode *CN = getVecImm(N)) {
1490 int Value = (int) CN->getZExtValue();
1491 if (ValueType == MVT::i16
1492 && Value <= 0xffff /* truncated from uint64_t */
1493 && ((short) Value >> 8) == ((short) Value & 0xff))
1494 return DAG.getTargetConstant(Value & 0xff, ValueType);
1495 else if (ValueType == MVT::i8
1496 && (Value & 0xff) == Value)
1497 return DAG.getTargetConstant(Value, ValueType);
1503 /// get_ILHUvec_imm - Test if this vector is a vector filled with the same value
1504 /// and the value fits into a signed 16-bit constant, and if so, return the
1506 SDValue SPU::get_ILHUvec_imm(SDNode *N, SelectionDAG &DAG,
1508 if (ConstantSDNode *CN = getVecImm(N)) {
1509 uint64_t Value = CN->getZExtValue();
1510 if ((ValueType == MVT::i32
1511 && ((unsigned) Value & 0xffff0000) == (unsigned) Value)
1512 || (ValueType == MVT::i64 && (Value & 0xffff0000) == Value))
1513 return DAG.getTargetConstant(Value >> 16, ValueType);
1519 /// get_v4i32_imm - Catch-all for general 32-bit constant vectors
1520 SDValue SPU::get_v4i32_imm(SDNode *N, SelectionDAG &DAG) {
1521 if (ConstantSDNode *CN = getVecImm(N)) {
1522 return DAG.getTargetConstant((unsigned) CN->getZExtValue(), MVT::i32);
1528 /// get_v4i32_imm - Catch-all for general 64-bit constant vectors
1529 SDValue SPU::get_v2i64_imm(SDNode *N, SelectionDAG &DAG) {
1530 if (ConstantSDNode *CN = getVecImm(N)) {
1531 return DAG.getTargetConstant((unsigned) CN->getZExtValue(), MVT::i64);
1537 //! Lower a BUILD_VECTOR instruction creatively:
1539 LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
1540 EVT VT = Op.getValueType();
1541 EVT EltVT = VT.getVectorElementType();
1542 DebugLoc dl = Op.getDebugLoc();
1543 BuildVectorSDNode *BCN = dyn_cast<BuildVectorSDNode>(Op.getNode());
1544 assert(BCN != 0 && "Expected BuildVectorSDNode in SPU LowerBUILD_VECTOR");
1545 unsigned minSplatBits = EltVT.getSizeInBits();
1547 if (minSplatBits < 16)
1550 APInt APSplatBits, APSplatUndef;
1551 unsigned SplatBitSize;
1554 if (!BCN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
1555 HasAnyUndefs, minSplatBits)
1556 || minSplatBits < SplatBitSize)
1557 return SDValue(); // Wasn't a constant vector or splat exceeded min
1559 uint64_t SplatBits = APSplatBits.getZExtValue();
1561 switch (VT.getSimpleVT().SimpleTy) {
1563 report_fatal_error("CellSPU: Unhandled VT in LowerBUILD_VECTOR, VT = " +
1564 Twine(VT.getEVTString()));
1567 uint32_t Value32 = uint32_t(SplatBits);
1568 assert(SplatBitSize == 32
1569 && "LowerBUILD_VECTOR: Unexpected floating point vector element.");
1570 // NOTE: pretend the constant is an integer. LLVM won't load FP constants
1571 SDValue T = DAG.getConstant(Value32, MVT::i32);
1572 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32,
1573 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, T,T,T,T));
1577 uint64_t f64val = uint64_t(SplatBits);
1578 assert(SplatBitSize == 64
1579 && "LowerBUILD_VECTOR: 64-bit float vector size > 8 bytes.");
1580 // NOTE: pretend the constant is an integer. LLVM won't load FP constants
1581 SDValue T = DAG.getConstant(f64val, MVT::i64);
1582 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64,
1583 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T));
1587 // 8-bit constants have to be expanded to 16-bits
1588 unsigned short Value16 = SplatBits /* | (SplatBits << 8) */;
1589 SmallVector<SDValue, 8> Ops;
1591 Ops.assign(8, DAG.getConstant(Value16, MVT::i16));
1592 return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
1593 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i16, &Ops[0], Ops.size()));
1596 unsigned short Value16 = SplatBits;
1597 SDValue T = DAG.getConstant(Value16, EltVT);
1598 SmallVector<SDValue, 8> Ops;
1601 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], Ops.size());
1604 SDValue T = DAG.getConstant(unsigned(SplatBits), VT.getVectorElementType());
1605 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, T, T, T, T);
1608 return SPU::LowerV2I64Splat(VT, DAG, SplatBits, dl);
1618 SPU::LowerV2I64Splat(EVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
1620 uint32_t upper = uint32_t(SplatVal >> 32);
1621 uint32_t lower = uint32_t(SplatVal);
1623 if (upper == lower) {
1624 // Magic constant that can be matched by IL, ILA, et. al.
1625 SDValue Val = DAG.getTargetConstant(upper, MVT::i32);
1626 return DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
1627 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1628 Val, Val, Val, Val));
1630 bool upper_special, lower_special;
1632 // NOTE: This code creates common-case shuffle masks that can be easily
1633 // detected as common expressions. It is not attempting to create highly
1634 // specialized masks to replace any and all 0's, 0xff's and 0x80's.
1636 // Detect if the upper or lower half is a special shuffle mask pattern:
1637 upper_special = (upper == 0 || upper == 0xffffffff || upper == 0x80000000);
1638 lower_special = (lower == 0 || lower == 0xffffffff || lower == 0x80000000);
1640 // Both upper and lower are special, lower to a constant pool load:
1641 if (lower_special && upper_special) {
1642 SDValue SplatValCN = DAG.getConstant(SplatVal, MVT::i64);
1643 return DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64,
1644 SplatValCN, SplatValCN);
1649 SmallVector<SDValue, 16> ShufBytes;
1652 // Create lower vector if not a special pattern
1653 if (!lower_special) {
1654 SDValue LO32C = DAG.getConstant(lower, MVT::i32);
1655 LO32 = DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
1656 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1657 LO32C, LO32C, LO32C, LO32C));
1660 // Create upper vector if not a special pattern
1661 if (!upper_special) {
1662 SDValue HI32C = DAG.getConstant(upper, MVT::i32);
1663 HI32 = DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
1664 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1665 HI32C, HI32C, HI32C, HI32C));
1668 // If either upper or lower are special, then the two input operands are
1669 // the same (basically, one of them is a "don't care")
1675 for (int i = 0; i < 4; ++i) {
1677 for (int j = 0; j < 4; ++j) {
1679 bool process_upper, process_lower;
1681 process_upper = (upper_special && (i & 1) == 0);
1682 process_lower = (lower_special && (i & 1) == 1);
1684 if (process_upper || process_lower) {
1685 if ((process_upper && upper == 0)
1686 || (process_lower && lower == 0))
1688 else if ((process_upper && upper == 0xffffffff)
1689 || (process_lower && lower == 0xffffffff))
1691 else if ((process_upper && upper == 0x80000000)
1692 || (process_lower && lower == 0x80000000))
1693 val |= (j == 0 ? 0xe0 : 0x80);
1695 val |= i * 4 + j + ((i & 1) * 16);
1698 ShufBytes.push_back(DAG.getConstant(val, MVT::i32));
1701 return DAG.getNode(SPUISD::SHUFB, dl, OpVT, HI32, LO32,
1702 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1703 &ShufBytes[0], ShufBytes.size()));
1707 /// LowerVECTOR_SHUFFLE - Lower a vector shuffle (V1, V2, V3) to something on
1708 /// which the Cell can operate. The code inspects V3 to ascertain whether the
1709 /// permutation vector, V3, is monotonically increasing with one "exception"
1710 /// element, e.g., (0, 1, _, 3). If this is the case, then generate a
1711 /// SHUFFLE_MASK synthetic instruction. Otherwise, spill V3 to the constant pool.
1712 /// In either case, the net result is going to eventually invoke SHUFB to
1713 /// permute/shuffle the bytes from V1 and V2.
1715 /// SHUFFLE_MASK is eventually selected as one of the C*D instructions, generate
1716 /// control word for byte/halfword/word insertion. This takes care of a single
1717 /// element move from V2 into V1.
1719 /// SPUISD::SHUFB is eventually selected as Cell's <i>shufb</i> instructions.
1720 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
1721 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
1722 SDValue V1 = Op.getOperand(0);
1723 SDValue V2 = Op.getOperand(1);
1724 DebugLoc dl = Op.getDebugLoc();
1726 if (V2.getOpcode() == ISD::UNDEF) V2 = V1;
1728 // If we have a single element being moved from V1 to V2, this can be handled
1729 // using the C*[DX] compute mask instructions, but the vector elements have
1730 // to be monotonically increasing with one exception element, and the source
1731 // slot of the element to move must be the same as the destination.
1732 EVT VecVT = V1.getValueType();
1733 EVT EltVT = VecVT.getVectorElementType();
1734 unsigned EltsFromV2 = 0;
1735 unsigned V2EltOffset = 0;
1736 unsigned V2EltIdx0 = 0;
1737 unsigned CurrElt = 0;
1738 unsigned MaxElts = VecVT.getVectorNumElements();
1739 unsigned PrevElt = 0;
1740 bool monotonic = true;
1743 EVT maskVT; // which of the c?d instructions to use
1745 if (EltVT == MVT::i8) {
1747 maskVT = MVT::v16i8;
1748 } else if (EltVT == MVT::i16) {
1750 maskVT = MVT::v8i16;
1751 } else if (EltVT == MVT::i32 || EltVT == MVT::f32) {
1753 maskVT = MVT::v4i32;
1754 } else if (EltVT == MVT::i64 || EltVT == MVT::f64) {
1756 maskVT = MVT::v2i64;
1758 llvm_unreachable("Unhandled vector type in LowerVECTOR_SHUFFLE");
1760 for (unsigned i = 0; i != MaxElts; ++i) {
1761 if (SVN->getMaskElt(i) < 0)
1764 unsigned SrcElt = SVN->getMaskElt(i);
1767 if (SrcElt >= V2EltIdx0) {
1768 // TODO: optimize for the monotonic case when several consecutive
1769 // elements are taken form V2. Do we ever get such a case?
1770 if (EltsFromV2 == 0 && CurrElt == (SrcElt - V2EltIdx0))
1771 V2EltOffset = (SrcElt - V2EltIdx0) * (EltVT.getSizeInBits()/8);
1775 } else if (CurrElt != SrcElt) {
1783 if (PrevElt > 0 && SrcElt < MaxElts) {
1784 if ((PrevElt == SrcElt - 1)
1785 || (PrevElt == MaxElts - 1 && SrcElt == 0)) {
1791 } else if (i == 0 || (PrevElt==0 && SrcElt==1)) {
1792 // First time or after a "wrap around"
1795 // This isn't a rotation, takes elements from vector 2
1801 if (EltsFromV2 == 1 && monotonic) {
1802 // Compute mask and shuffle
1803 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1805 // As SHUFFLE_MASK becomes a c?d instruction, feed it an address
1806 // R1 ($sp) is used here only as it is guaranteed to have last bits zero
1807 SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
1808 DAG.getRegister(SPU::R1, PtrVT),
1809 DAG.getConstant(V2EltOffset, MVT::i32));
1810 SDValue ShufMaskOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl,
1813 // Use shuffle mask in SHUFB synthetic instruction:
1814 return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V2, V1,
1816 } else if (rotate) {
1819 rotamt *= EltVT.getSizeInBits()/8;
1820 return DAG.getNode(SPUISD::ROTBYTES_LEFT, dl, V1.getValueType(),
1821 V1, DAG.getConstant(rotamt, MVT::i16));
1823 // Convert the SHUFFLE_VECTOR mask's input element units to the
1825 unsigned BytesPerElement = EltVT.getSizeInBits()/8;
1827 SmallVector<SDValue, 16> ResultMask;
1828 for (unsigned i = 0, e = MaxElts; i != e; ++i) {
1829 unsigned SrcElt = SVN->getMaskElt(i) < 0 ? 0 : SVN->getMaskElt(i);
1831 for (unsigned j = 0; j < BytesPerElement; ++j)
1832 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,MVT::i8));
1834 SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8,
1835 &ResultMask[0], ResultMask.size());
1836 return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V1, V2, VPermMask);
1840 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
1841 SDValue Op0 = Op.getOperand(0); // Op0 = the scalar
1842 DebugLoc dl = Op.getDebugLoc();
1844 if (Op0.getNode()->getOpcode() == ISD::Constant) {
1845 // For a constant, build the appropriate constant vector, which will
1846 // eventually simplify to a vector register load.
1848 ConstantSDNode *CN = cast<ConstantSDNode>(Op0.getNode());
1849 SmallVector<SDValue, 16> ConstVecValues;
1853 // Create a constant vector:
1854 switch (Op.getValueType().getSimpleVT().SimpleTy) {
1855 default: llvm_unreachable("Unexpected constant value type in "
1856 "LowerSCALAR_TO_VECTOR");
1857 case MVT::v16i8: n_copies = 16; VT = MVT::i8; break;
1858 case MVT::v8i16: n_copies = 8; VT = MVT::i16; break;
1859 case MVT::v4i32: n_copies = 4; VT = MVT::i32; break;
1860 case MVT::v4f32: n_copies = 4; VT = MVT::f32; break;
1861 case MVT::v2i64: n_copies = 2; VT = MVT::i64; break;
1862 case MVT::v2f64: n_copies = 2; VT = MVT::f64; break;
1865 SDValue CValue = DAG.getConstant(CN->getZExtValue(), VT);
1866 for (size_t j = 0; j < n_copies; ++j)
1867 ConstVecValues.push_back(CValue);
1869 return DAG.getNode(ISD::BUILD_VECTOR, dl, Op.getValueType(),
1870 &ConstVecValues[0], ConstVecValues.size());
1872 // Otherwise, copy the value from one register to another:
1873 switch (Op0.getValueType().getSimpleVT().SimpleTy) {
1874 default: llvm_unreachable("Unexpected value type in LowerSCALAR_TO_VECTOR");
1881 return DAG.getNode(SPUISD::PREFSLOT2VEC, dl, Op.getValueType(), Op0, Op0);
1888 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
1889 EVT VT = Op.getValueType();
1890 SDValue N = Op.getOperand(0);
1891 SDValue Elt = Op.getOperand(1);
1892 DebugLoc dl = Op.getDebugLoc();
1895 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
1896 // Constant argument:
1897 int EltNo = (int) C->getZExtValue();
1900 if (VT == MVT::i8 && EltNo >= 16)
1901 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i8 extraction slot > 15");
1902 else if (VT == MVT::i16 && EltNo >= 8)
1903 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i16 extraction slot > 7");
1904 else if (VT == MVT::i32 && EltNo >= 4)
1905 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i32 extraction slot > 4");
1906 else if (VT == MVT::i64 && EltNo >= 2)
1907 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i64 extraction slot > 2");
1909 if (EltNo == 0 && (VT == MVT::i32 || VT == MVT::i64)) {
1910 // i32 and i64: Element 0 is the preferred slot
1911 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, N);
1914 // Need to generate shuffle mask and extract:
1915 int prefslot_begin = -1, prefslot_end = -1;
1916 int elt_byte = EltNo * VT.getSizeInBits() / 8;
1918 switch (VT.getSimpleVT().SimpleTy) {
1920 assert(false && "Invalid value type!");
1922 prefslot_begin = prefslot_end = 3;
1926 prefslot_begin = 2; prefslot_end = 3;
1931 prefslot_begin = 0; prefslot_end = 3;
1936 prefslot_begin = 0; prefslot_end = 7;
1941 assert(prefslot_begin != -1 && prefslot_end != -1 &&
1942 "LowerEXTRACT_VECTOR_ELT: preferred slots uninitialized");
1944 unsigned int ShufBytes[16] = {
1945 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1947 for (int i = 0; i < 16; ++i) {
1948 // zero fill uppper part of preferred slot, don't care about the
1950 unsigned int mask_val;
1951 if (i <= prefslot_end) {
1953 ((i < prefslot_begin)
1955 : elt_byte + (i - prefslot_begin));
1957 ShufBytes[i] = mask_val;
1959 ShufBytes[i] = ShufBytes[i % (prefslot_end + 1)];
1962 SDValue ShufMask[4];
1963 for (unsigned i = 0; i < sizeof(ShufMask)/sizeof(ShufMask[0]); ++i) {
1964 unsigned bidx = i * 4;
1965 unsigned int bits = ((ShufBytes[bidx] << 24) |
1966 (ShufBytes[bidx+1] << 16) |
1967 (ShufBytes[bidx+2] << 8) |
1969 ShufMask[i] = DAG.getConstant(bits, MVT::i32);
1972 SDValue ShufMaskVec =
1973 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1974 &ShufMask[0], sizeof(ShufMask)/sizeof(ShufMask[0]));
1976 retval = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
1977 DAG.getNode(SPUISD::SHUFB, dl, N.getValueType(),
1978 N, N, ShufMaskVec));
1980 // Variable index: Rotate the requested element into slot 0, then replicate
1981 // slot 0 across the vector
1982 EVT VecVT = N.getValueType();
1983 if (!VecVT.isSimple() || !VecVT.isVector()) {
1984 report_fatal_error("LowerEXTRACT_VECTOR_ELT: Must have a simple, 128-bit"
1988 // Make life easier by making sure the index is zero-extended to i32
1989 if (Elt.getValueType() != MVT::i32)
1990 Elt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Elt);
1992 // Scale the index to a bit/byte shift quantity
1994 APInt(32, uint64_t(16 / N.getValueType().getVectorNumElements()), false);
1995 unsigned scaleShift = scaleFactor.logBase2();
1998 if (scaleShift > 0) {
1999 // Scale the shift factor:
2000 Elt = DAG.getNode(ISD::SHL, dl, MVT::i32, Elt,
2001 DAG.getConstant(scaleShift, MVT::i32));
2004 vecShift = DAG.getNode(SPUISD::SHLQUAD_L_BYTES, dl, VecVT, N, Elt);
2006 // Replicate the bytes starting at byte 0 across the entire vector (for
2007 // consistency with the notion of a unified register set)
2010 switch (VT.getSimpleVT().SimpleTy) {
2012 report_fatal_error("LowerEXTRACT_VECTOR_ELT(varable): Unhandled vector"
2016 SDValue factor = DAG.getConstant(0x00000000, MVT::i32);
2017 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2018 factor, factor, factor, factor);
2022 SDValue factor = DAG.getConstant(0x00010001, MVT::i32);
2023 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2024 factor, factor, factor, factor);
2029 SDValue factor = DAG.getConstant(0x00010203, MVT::i32);
2030 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2031 factor, factor, factor, factor);
2036 SDValue loFactor = DAG.getConstant(0x00010203, MVT::i32);
2037 SDValue hiFactor = DAG.getConstant(0x04050607, MVT::i32);
2038 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2039 loFactor, hiFactor, loFactor, hiFactor);
2044 retval = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
2045 DAG.getNode(SPUISD::SHUFB, dl, VecVT,
2046 vecShift, vecShift, replicate));
2052 static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
2053 SDValue VecOp = Op.getOperand(0);
2054 SDValue ValOp = Op.getOperand(1);
2055 SDValue IdxOp = Op.getOperand(2);
2056 DebugLoc dl = Op.getDebugLoc();
2057 EVT VT = Op.getValueType();
2058 EVT eltVT = ValOp.getValueType();
2060 // use 0 when the lane to insert to is 'undef'
2062 if (IdxOp.getOpcode() != ISD::UNDEF) {
2063 ConstantSDNode *CN = cast<ConstantSDNode>(IdxOp);
2064 assert(CN != 0 && "LowerINSERT_VECTOR_ELT: Index is not constant!");
2065 Offset = (CN->getSExtValue()) * eltVT.getSizeInBits()/8;
2068 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2069 // Use $sp ($1) because it's always 16-byte aligned and it's available:
2070 SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
2071 DAG.getRegister(SPU::R1, PtrVT),
2072 DAG.getConstant(Offset, PtrVT));
2073 // widen the mask when dealing with half vectors
2074 EVT maskVT = EVT::getVectorVT(*(DAG.getContext()), VT.getVectorElementType(),
2075 128/ VT.getVectorElementType().getSizeInBits());
2076 SDValue ShufMask = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, maskVT, Pointer);
2079 DAG.getNode(SPUISD::SHUFB, dl, VT,
2080 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, ValOp),
2082 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4i32, ShufMask));
2087 static SDValue LowerI8Math(SDValue Op, SelectionDAG &DAG, unsigned Opc,
2088 const TargetLowering &TLI)
2090 SDValue N0 = Op.getOperand(0); // Everything has at least one operand
2091 DebugLoc dl = Op.getDebugLoc();
2092 EVT ShiftVT = TLI.getShiftAmountTy();
2094 assert(Op.getValueType() == MVT::i8);
2097 llvm_unreachable("Unhandled i8 math operator");
2101 // 8-bit addition: Promote the arguments up to 16-bits and truncate
2103 SDValue N1 = Op.getOperand(1);
2104 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2105 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2106 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2107 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2112 // 8-bit subtraction: Promote the arguments up to 16-bits and truncate
2114 SDValue N1 = Op.getOperand(1);
2115 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2116 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2117 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2118 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2122 SDValue N1 = Op.getOperand(1);
2123 EVT N1VT = N1.getValueType();
2125 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, N0);
2126 if (!N1VT.bitsEq(ShiftVT)) {
2127 unsigned N1Opc = N1.getValueType().bitsLT(ShiftVT)
2130 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2133 // Replicate lower 8-bits into upper 8:
2135 DAG.getNode(ISD::OR, dl, MVT::i16, N0,
2136 DAG.getNode(ISD::SHL, dl, MVT::i16,
2137 N0, DAG.getConstant(8, MVT::i32)));
2139 // Truncate back down to i8
2140 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2141 DAG.getNode(Opc, dl, MVT::i16, ExpandArg, N1));
2145 SDValue N1 = Op.getOperand(1);
2146 EVT N1VT = N1.getValueType();
2148 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, N0);
2149 if (!N1VT.bitsEq(ShiftVT)) {
2150 unsigned N1Opc = ISD::ZERO_EXTEND;
2152 if (N1.getValueType().bitsGT(ShiftVT))
2153 N1Opc = ISD::TRUNCATE;
2155 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2158 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2159 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2162 SDValue N1 = Op.getOperand(1);
2163 EVT N1VT = N1.getValueType();
2165 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2166 if (!N1VT.bitsEq(ShiftVT)) {
2167 unsigned N1Opc = ISD::SIGN_EXTEND;
2169 if (N1VT.bitsGT(ShiftVT))
2170 N1Opc = ISD::TRUNCATE;
2171 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2174 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2175 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2178 SDValue N1 = Op.getOperand(1);
2180 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2181 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2182 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2183 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2191 //! Lower byte immediate operations for v16i8 vectors:
2193 LowerByteImmed(SDValue Op, SelectionDAG &DAG) {
2196 EVT VT = Op.getValueType();
2197 DebugLoc dl = Op.getDebugLoc();
2199 ConstVec = Op.getOperand(0);
2200 Arg = Op.getOperand(1);
2201 if (ConstVec.getNode()->getOpcode() != ISD::BUILD_VECTOR) {
2202 if (ConstVec.getNode()->getOpcode() == ISD::BIT_CONVERT) {
2203 ConstVec = ConstVec.getOperand(0);
2205 ConstVec = Op.getOperand(1);
2206 Arg = Op.getOperand(0);
2207 if (ConstVec.getNode()->getOpcode() == ISD::BIT_CONVERT) {
2208 ConstVec = ConstVec.getOperand(0);
2213 if (ConstVec.getNode()->getOpcode() == ISD::BUILD_VECTOR) {
2214 BuildVectorSDNode *BCN = dyn_cast<BuildVectorSDNode>(ConstVec.getNode());
2215 assert(BCN != 0 && "Expected BuildVectorSDNode in SPU LowerByteImmed");
2217 APInt APSplatBits, APSplatUndef;
2218 unsigned SplatBitSize;
2220 unsigned minSplatBits = VT.getVectorElementType().getSizeInBits();
2222 if (BCN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
2223 HasAnyUndefs, minSplatBits)
2224 && minSplatBits <= SplatBitSize) {
2225 uint64_t SplatBits = APSplatBits.getZExtValue();
2226 SDValue tc = DAG.getTargetConstant(SplatBits & 0xff, MVT::i8);
2228 SmallVector<SDValue, 16> tcVec;
2229 tcVec.assign(16, tc);
2230 return DAG.getNode(Op.getNode()->getOpcode(), dl, VT, Arg,
2231 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &tcVec[0], tcVec.size()));
2235 // These operations (AND, OR, XOR) are legal, they just couldn't be custom
2236 // lowered. Return the operation, rather than a null SDValue.
2240 //! Custom lowering for CTPOP (count population)
2242 Custom lowering code that counts the number ones in the input
2243 operand. SPU has such an instruction, but it counts the number of
2244 ones per byte, which then have to be accumulated.
2246 static SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) {
2247 EVT VT = Op.getValueType();
2248 EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
2249 VT, (128 / VT.getSizeInBits()));
2250 DebugLoc dl = Op.getDebugLoc();
2252 switch (VT.getSimpleVT().SimpleTy) {
2254 assert(false && "Invalid value type!");
2256 SDValue N = Op.getOperand(0);
2257 SDValue Elt0 = DAG.getConstant(0, MVT::i32);
2259 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2260 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2262 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i8, CNTB, Elt0);
2266 MachineFunction &MF = DAG.getMachineFunction();
2267 MachineRegisterInfo &RegInfo = MF.getRegInfo();
2269 unsigned CNTB_reg = RegInfo.createVirtualRegister(&SPU::R16CRegClass);
2271 SDValue N = Op.getOperand(0);
2272 SDValue Elt0 = DAG.getConstant(0, MVT::i16);
2273 SDValue Mask0 = DAG.getConstant(0x0f, MVT::i16);
2274 SDValue Shift1 = DAG.getConstant(8, MVT::i32);
2276 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2277 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2279 // CNTB_result becomes the chain to which all of the virtual registers
2280 // CNTB_reg, SUM1_reg become associated:
2281 SDValue CNTB_result =
2282 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, CNTB, Elt0);
2284 SDValue CNTB_rescopy =
2285 DAG.getCopyToReg(CNTB_result, dl, CNTB_reg, CNTB_result);
2287 SDValue Tmp1 = DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i16);
2289 return DAG.getNode(ISD::AND, dl, MVT::i16,
2290 DAG.getNode(ISD::ADD, dl, MVT::i16,
2291 DAG.getNode(ISD::SRL, dl, MVT::i16,
2298 MachineFunction &MF = DAG.getMachineFunction();
2299 MachineRegisterInfo &RegInfo = MF.getRegInfo();
2301 unsigned CNTB_reg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
2302 unsigned SUM1_reg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
2304 SDValue N = Op.getOperand(0);
2305 SDValue Elt0 = DAG.getConstant(0, MVT::i32);
2306 SDValue Mask0 = DAG.getConstant(0xff, MVT::i32);
2307 SDValue Shift1 = DAG.getConstant(16, MVT::i32);
2308 SDValue Shift2 = DAG.getConstant(8, MVT::i32);
2310 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2311 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2313 // CNTB_result becomes the chain to which all of the virtual registers
2314 // CNTB_reg, SUM1_reg become associated:
2315 SDValue CNTB_result =
2316 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, CNTB, Elt0);
2318 SDValue CNTB_rescopy =
2319 DAG.getCopyToReg(CNTB_result, dl, CNTB_reg, CNTB_result);
2322 DAG.getNode(ISD::SRL, dl, MVT::i32,
2323 DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i32),
2327 DAG.getNode(ISD::ADD, dl, MVT::i32, Comp1,
2328 DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i32));
2330 SDValue Sum1_rescopy =
2331 DAG.getCopyToReg(CNTB_result, dl, SUM1_reg, Sum1);
2334 DAG.getNode(ISD::SRL, dl, MVT::i32,
2335 DAG.getCopyFromReg(Sum1_rescopy, dl, SUM1_reg, MVT::i32),
2338 DAG.getNode(ISD::ADD, dl, MVT::i32, Comp2,
2339 DAG.getCopyFromReg(Sum1_rescopy, dl, SUM1_reg, MVT::i32));
2341 return DAG.getNode(ISD::AND, dl, MVT::i32, Sum2, Mask0);
2351 //! Lower ISD::FP_TO_SINT, ISD::FP_TO_UINT for i32
2353 f32->i32 passes through unchanged, whereas f64->i32 expands to a libcall.
2354 All conversions to i64 are expanded to a libcall.
2356 static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
2357 const SPUTargetLowering &TLI) {
2358 EVT OpVT = Op.getValueType();
2359 SDValue Op0 = Op.getOperand(0);
2360 EVT Op0VT = Op0.getValueType();
2362 if ((OpVT == MVT::i32 && Op0VT == MVT::f64)
2363 || OpVT == MVT::i64) {
2364 // Convert f32 / f64 to i32 / i64 via libcall.
2366 (Op.getOpcode() == ISD::FP_TO_SINT)
2367 ? RTLIB::getFPTOSINT(Op0VT, OpVT)
2368 : RTLIB::getFPTOUINT(Op0VT, OpVT);
2369 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpectd fp-to-int conversion!");
2371 return ExpandLibCall(LC, Op, DAG, false, Dummy, TLI);
2377 //! Lower ISD::SINT_TO_FP, ISD::UINT_TO_FP for i32
2379 i32->f32 passes through unchanged, whereas i32->f64 is expanded to a libcall.
2380 All conversions from i64 are expanded to a libcall.
2382 static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2383 const SPUTargetLowering &TLI) {
2384 EVT OpVT = Op.getValueType();
2385 SDValue Op0 = Op.getOperand(0);
2386 EVT Op0VT = Op0.getValueType();
2388 if ((OpVT == MVT::f64 && Op0VT == MVT::i32)
2389 || Op0VT == MVT::i64) {
2390 // Convert i32, i64 to f64 via libcall:
2392 (Op.getOpcode() == ISD::SINT_TO_FP)
2393 ? RTLIB::getSINTTOFP(Op0VT, OpVT)
2394 : RTLIB::getUINTTOFP(Op0VT, OpVT);
2395 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpectd int-to-fp conversion!");
2397 return ExpandLibCall(LC, Op, DAG, false, Dummy, TLI);
2403 //! Lower ISD::SETCC
2405 This handles MVT::f64 (double floating point) condition lowering
2407 static SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG,
2408 const TargetLowering &TLI) {
2409 CondCodeSDNode *CC = dyn_cast<CondCodeSDNode>(Op.getOperand(2));
2410 DebugLoc dl = Op.getDebugLoc();
2411 assert(CC != 0 && "LowerSETCC: CondCodeSDNode should not be null here!\n");
2413 SDValue lhs = Op.getOperand(0);
2414 SDValue rhs = Op.getOperand(1);
2415 EVT lhsVT = lhs.getValueType();
2416 assert(lhsVT == MVT::f64 && "LowerSETCC: type other than MVT::64\n");
2418 EVT ccResultVT = TLI.getSetCCResultType(lhs.getValueType());
2419 APInt ccResultOnes = APInt::getAllOnesValue(ccResultVT.getSizeInBits());
2420 EVT IntVT(MVT::i64);
2422 // Take advantage of the fact that (truncate (sra arg, 32)) is efficiently
2423 // selected to a NOP:
2424 SDValue i64lhs = DAG.getNode(ISD::BIT_CONVERT, dl, IntVT, lhs);
2426 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
2427 DAG.getNode(ISD::SRL, dl, IntVT,
2428 i64lhs, DAG.getConstant(32, MVT::i32)));
2429 SDValue lhsHi32abs =
2430 DAG.getNode(ISD::AND, dl, MVT::i32,
2431 lhsHi32, DAG.getConstant(0x7fffffff, MVT::i32));
2433 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, i64lhs);
2435 // SETO and SETUO only use the lhs operand:
2436 if (CC->get() == ISD::SETO) {
2437 // Evaluates to true if Op0 is not [SQ]NaN - lowers to the inverse of
2439 APInt ccResultAllOnes = APInt::getAllOnesValue(ccResultVT.getSizeInBits());
2440 return DAG.getNode(ISD::XOR, dl, ccResultVT,
2441 DAG.getSetCC(dl, ccResultVT,
2442 lhs, DAG.getConstantFP(0.0, lhsVT),
2444 DAG.getConstant(ccResultAllOnes, ccResultVT));
2445 } else if (CC->get() == ISD::SETUO) {
2446 // Evaluates to true if Op0 is [SQ]NaN
2447 return DAG.getNode(ISD::AND, dl, ccResultVT,
2448 DAG.getSetCC(dl, ccResultVT,
2450 DAG.getConstant(0x7ff00000, MVT::i32),
2452 DAG.getSetCC(dl, ccResultVT,
2454 DAG.getConstant(0, MVT::i32),
2458 SDValue i64rhs = DAG.getNode(ISD::BIT_CONVERT, dl, IntVT, rhs);
2460 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
2461 DAG.getNode(ISD::SRL, dl, IntVT,
2462 i64rhs, DAG.getConstant(32, MVT::i32)));
2464 // If a value is negative, subtract from the sign magnitude constant:
2465 SDValue signMag2TC = DAG.getConstant(0x8000000000000000ULL, IntVT);
2467 // Convert the sign-magnitude representation into 2's complement:
2468 SDValue lhsSelectMask = DAG.getNode(ISD::SRA, dl, ccResultVT,
2469 lhsHi32, DAG.getConstant(31, MVT::i32));
2470 SDValue lhsSignMag2TC = DAG.getNode(ISD::SUB, dl, IntVT, signMag2TC, i64lhs);
2472 DAG.getNode(ISD::SELECT, dl, IntVT,
2473 lhsSelectMask, lhsSignMag2TC, i64lhs);
2475 SDValue rhsSelectMask = DAG.getNode(ISD::SRA, dl, ccResultVT,
2476 rhsHi32, DAG.getConstant(31, MVT::i32));
2477 SDValue rhsSignMag2TC = DAG.getNode(ISD::SUB, dl, IntVT, signMag2TC, i64rhs);
2479 DAG.getNode(ISD::SELECT, dl, IntVT,
2480 rhsSelectMask, rhsSignMag2TC, i64rhs);
2484 switch (CC->get()) {
2487 compareOp = ISD::SETEQ; break;
2490 compareOp = ISD::SETGT; break;
2493 compareOp = ISD::SETGE; break;
2496 compareOp = ISD::SETLT; break;
2499 compareOp = ISD::SETLE; break;
2502 compareOp = ISD::SETNE; break;
2504 report_fatal_error("CellSPU ISel Select: unimplemented f64 condition");
2508 DAG.getSetCC(dl, ccResultVT, lhsSelect, rhsSelect,
2509 (ISD::CondCode) compareOp);
2511 if ((CC->get() & 0x8) == 0) {
2512 // Ordered comparison:
2513 SDValue lhsNaN = DAG.getSetCC(dl, ccResultVT,
2514 lhs, DAG.getConstantFP(0.0, MVT::f64),
2516 SDValue rhsNaN = DAG.getSetCC(dl, ccResultVT,
2517 rhs, DAG.getConstantFP(0.0, MVT::f64),
2519 SDValue ordered = DAG.getNode(ISD::AND, dl, ccResultVT, lhsNaN, rhsNaN);
2521 result = DAG.getNode(ISD::AND, dl, ccResultVT, ordered, result);
2527 //! Lower ISD::SELECT_CC
2529 ISD::SELECT_CC can (generally) be implemented directly on the SPU using the
2532 \note Need to revisit this in the future: if the code path through the true
2533 and false value computations is longer than the latency of a branch (6
2534 cycles), then it would be more advantageous to branch and insert a new basic
2535 block and branch on the condition. However, this code does not make that
2536 assumption, given the simplisitc uses so far.
2539 static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
2540 const TargetLowering &TLI) {
2541 EVT VT = Op.getValueType();
2542 SDValue lhs = Op.getOperand(0);
2543 SDValue rhs = Op.getOperand(1);
2544 SDValue trueval = Op.getOperand(2);
2545 SDValue falseval = Op.getOperand(3);
2546 SDValue condition = Op.getOperand(4);
2547 DebugLoc dl = Op.getDebugLoc();
2549 // NOTE: SELB's arguments: $rA, $rB, $mask
2551 // SELB selects bits from $rA where bits in $mask are 0, bits from $rB
2552 // where bits in $mask are 1. CCond will be inverted, having 1s where the
2553 // condition was true and 0s where the condition was false. Hence, the
2554 // arguments to SELB get reversed.
2556 // Note: Really should be ISD::SELECT instead of SPUISD::SELB, but LLVM's
2557 // legalizer insists on combining SETCC/SELECT into SELECT_CC, so we end up
2558 // with another "cannot select select_cc" assert:
2560 SDValue compare = DAG.getNode(ISD::SETCC, dl,
2561 TLI.getSetCCResultType(Op.getValueType()),
2562 lhs, rhs, condition);
2563 return DAG.getNode(SPUISD::SELB, dl, VT, falseval, trueval, compare);
2566 //! Custom lower ISD::TRUNCATE
2567 static SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG)
2569 // Type to truncate to
2570 EVT VT = Op.getValueType();
2571 MVT simpleVT = VT.getSimpleVT();
2572 EVT VecVT = EVT::getVectorVT(*DAG.getContext(),
2573 VT, (128 / VT.getSizeInBits()));
2574 DebugLoc dl = Op.getDebugLoc();
2576 // Type to truncate from
2577 SDValue Op0 = Op.getOperand(0);
2578 EVT Op0VT = Op0.getValueType();
2580 if (Op0VT.getSimpleVT() == MVT::i128 && simpleVT == MVT::i64) {
2581 // Create shuffle mask, least significant doubleword of quadword
2582 unsigned maskHigh = 0x08090a0b;
2583 unsigned maskLow = 0x0c0d0e0f;
2584 // Use a shuffle to perform the truncation
2585 SDValue shufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2586 DAG.getConstant(maskHigh, MVT::i32),
2587 DAG.getConstant(maskLow, MVT::i32),
2588 DAG.getConstant(maskHigh, MVT::i32),
2589 DAG.getConstant(maskLow, MVT::i32));
2591 SDValue truncShuffle = DAG.getNode(SPUISD::SHUFB, dl, VecVT,
2592 Op0, Op0, shufMask);
2594 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, truncShuffle);
2597 return SDValue(); // Leave the truncate unmolested
2601 * Emit the instruction sequence for i64/i32 -> i128 sign extend. The basic
2602 * algorithm is to duplicate the sign bit using rotmai to generate at
2603 * least one byte full of sign bits. Then propagate the "sign-byte" into
2604 * the leftmost words and the i64/i32 into the rightmost words using shufb.
2606 * @param Op The sext operand
2607 * @param DAG The current DAG
2608 * @return The SDValue with the entire instruction sequence
2610 static SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG)
2612 DebugLoc dl = Op.getDebugLoc();
2614 // Type to extend to
2615 MVT OpVT = Op.getValueType().getSimpleVT();
2617 // Type to extend from
2618 SDValue Op0 = Op.getOperand(0);
2619 MVT Op0VT = Op0.getValueType().getSimpleVT();
2621 // The type to extend to needs to be a i128 and
2622 // the type to extend from needs to be i64 or i32.
2623 assert((OpVT == MVT::i128 && (Op0VT == MVT::i64 || Op0VT == MVT::i32)) &&
2624 "LowerSIGN_EXTEND: input and/or output operand have wrong size");
2626 // Create shuffle mask
2627 unsigned mask1 = 0x10101010; // byte 0 - 3 and 4 - 7
2628 unsigned mask2 = Op0VT == MVT::i64 ? 0x00010203 : 0x10101010; // byte 8 - 11
2629 unsigned mask3 = Op0VT == MVT::i64 ? 0x04050607 : 0x00010203; // byte 12 - 15
2630 SDValue shufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2631 DAG.getConstant(mask1, MVT::i32),
2632 DAG.getConstant(mask1, MVT::i32),
2633 DAG.getConstant(mask2, MVT::i32),
2634 DAG.getConstant(mask3, MVT::i32));
2636 // Word wise arithmetic right shift to generate at least one byte
2637 // that contains sign bits.
2638 MVT mvt = Op0VT == MVT::i64 ? MVT::v2i64 : MVT::v4i32;
2639 SDValue sraVal = DAG.getNode(ISD::SRA,
2642 DAG.getNode(SPUISD::PREFSLOT2VEC, dl, mvt, Op0, Op0),
2643 DAG.getConstant(31, MVT::i32));
2645 // reinterpret as a i128 (SHUFB requires it). This gets lowered away.
2646 SDValue extended = SDValue(DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
2648 DAG.getTargetConstant(
2649 SPU::GPRCRegClass.getID(),
2651 // Shuffle bytes - Copy the sign bits into the upper 64 bits
2652 // and the input value into the lower 64 bits.
2653 SDValue extShuffle = DAG.getNode(SPUISD::SHUFB, dl, mvt,
2654 extended, sraVal, shufMask);
2655 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, extShuffle);
2658 //! Custom (target-specific) lowering entry point
2660 This is where LLVM's DAG selection process calls to do target-specific
2664 SPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
2666 unsigned Opc = (unsigned) Op.getOpcode();
2667 EVT VT = Op.getValueType();
2672 errs() << "SPUTargetLowering::LowerOperation(): need to lower this!\n";
2673 errs() << "Op.getOpcode() = " << Opc << "\n";
2674 errs() << "*Op.getNode():\n";
2675 Op.getNode()->dump();
2677 llvm_unreachable(0);
2683 return LowerLOAD(Op, DAG, SPUTM.getSubtargetImpl());
2685 return LowerSTORE(Op, DAG, SPUTM.getSubtargetImpl());
2686 case ISD::ConstantPool:
2687 return LowerConstantPool(Op, DAG, SPUTM.getSubtargetImpl());
2688 case ISD::GlobalAddress:
2689 return LowerGlobalAddress(Op, DAG, SPUTM.getSubtargetImpl());
2690 case ISD::JumpTable:
2691 return LowerJumpTable(Op, DAG, SPUTM.getSubtargetImpl());
2692 case ISD::ConstantFP:
2693 return LowerConstantFP(Op, DAG);
2695 // i8, i64 math ops:
2704 return LowerI8Math(Op, DAG, Opc, *this);
2708 case ISD::FP_TO_SINT:
2709 case ISD::FP_TO_UINT:
2710 return LowerFP_TO_INT(Op, DAG, *this);
2712 case ISD::SINT_TO_FP:
2713 case ISD::UINT_TO_FP:
2714 return LowerINT_TO_FP(Op, DAG, *this);
2716 // Vector-related lowering.
2717 case ISD::BUILD_VECTOR:
2718 return LowerBUILD_VECTOR(Op, DAG);
2719 case ISD::SCALAR_TO_VECTOR:
2720 return LowerSCALAR_TO_VECTOR(Op, DAG);
2721 case ISD::VECTOR_SHUFFLE:
2722 return LowerVECTOR_SHUFFLE(Op, DAG);
2723 case ISD::EXTRACT_VECTOR_ELT:
2724 return LowerEXTRACT_VECTOR_ELT(Op, DAG);
2725 case ISD::INSERT_VECTOR_ELT:
2726 return LowerINSERT_VECTOR_ELT(Op, DAG);
2728 // Look for ANDBI, ORBI and XORBI opportunities and lower appropriately:
2732 return LowerByteImmed(Op, DAG);
2734 // Vector and i8 multiply:
2737 return LowerI8Math(Op, DAG, Opc, *this);
2740 return LowerCTPOP(Op, DAG);
2742 case ISD::SELECT_CC:
2743 return LowerSELECT_CC(Op, DAG, *this);
2746 return LowerSETCC(Op, DAG, *this);
2749 return LowerTRUNCATE(Op, DAG);
2751 case ISD::SIGN_EXTEND:
2752 return LowerSIGN_EXTEND(Op, DAG);
2758 void SPUTargetLowering::ReplaceNodeResults(SDNode *N,
2759 SmallVectorImpl<SDValue>&Results,
2760 SelectionDAG &DAG) const
2763 unsigned Opc = (unsigned) N->getOpcode();
2764 EVT OpVT = N->getValueType(0);
2768 errs() << "SPUTargetLowering::ReplaceNodeResults(): need to fix this!\n";
2769 errs() << "Op.getOpcode() = " << Opc << "\n";
2770 errs() << "*Op.getNode():\n";
2778 /* Otherwise, return unchanged */
2781 //===----------------------------------------------------------------------===//
2782 // Target Optimization Hooks
2783 //===----------------------------------------------------------------------===//
2786 SPUTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
2789 TargetMachine &TM = getTargetMachine();
2791 const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
2792 SelectionDAG &DAG = DCI.DAG;
2793 SDValue Op0 = N->getOperand(0); // everything has at least one operand
2794 EVT NodeVT = N->getValueType(0); // The node's value type
2795 EVT Op0VT = Op0.getValueType(); // The first operand's result
2796 SDValue Result; // Initially, empty result
2797 DebugLoc dl = N->getDebugLoc();
2799 switch (N->getOpcode()) {
2802 SDValue Op1 = N->getOperand(1);
2804 if (Op0.getOpcode() == SPUISD::IndirectAddr
2805 || Op1.getOpcode() == SPUISD::IndirectAddr) {
2806 // Normalize the operands to reduce repeated code
2807 SDValue IndirectArg = Op0, AddArg = Op1;
2809 if (Op1.getOpcode() == SPUISD::IndirectAddr) {
2814 if (isa<ConstantSDNode>(AddArg)) {
2815 ConstantSDNode *CN0 = cast<ConstantSDNode > (AddArg);
2816 SDValue IndOp1 = IndirectArg.getOperand(1);
2818 if (CN0->isNullValue()) {
2819 // (add (SPUindirect <arg>, <arg>), 0) ->
2820 // (SPUindirect <arg>, <arg>)
2822 #if !defined(NDEBUG)
2823 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2825 << "Replace: (add (SPUindirect <arg>, <arg>), 0)\n"
2826 << "With: (SPUindirect <arg>, <arg>)\n";
2831 } else if (isa<ConstantSDNode>(IndOp1)) {
2832 // (add (SPUindirect <arg>, <const>), <const>) ->
2833 // (SPUindirect <arg>, <const + const>)
2834 ConstantSDNode *CN1 = cast<ConstantSDNode > (IndOp1);
2835 int64_t combinedConst = CN0->getSExtValue() + CN1->getSExtValue();
2836 SDValue combinedValue = DAG.getConstant(combinedConst, Op0VT);
2838 #if !defined(NDEBUG)
2839 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2841 << "Replace: (add (SPUindirect <arg>, " << CN1->getSExtValue()
2842 << "), " << CN0->getSExtValue() << ")\n"
2843 << "With: (SPUindirect <arg>, "
2844 << combinedConst << ")\n";
2848 return DAG.getNode(SPUISD::IndirectAddr, dl, Op0VT,
2849 IndirectArg, combinedValue);
2855 case ISD::SIGN_EXTEND:
2856 case ISD::ZERO_EXTEND:
2857 case ISD::ANY_EXTEND: {
2858 if (Op0.getOpcode() == SPUISD::VEC2PREFSLOT && NodeVT == Op0VT) {
2859 // (any_extend (SPUextract_elt0 <arg>)) ->
2860 // (SPUextract_elt0 <arg>)
2861 // Types must match, however...
2862 #if !defined(NDEBUG)
2863 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2864 errs() << "\nReplace: ";
2866 errs() << "\nWith: ";
2867 Op0.getNode()->dump(&DAG);
2876 case SPUISD::IndirectAddr: {
2877 if (!ST->usingLargeMem() && Op0.getOpcode() == SPUISD::AFormAddr) {
2878 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1));
2879 if (CN != 0 && CN->isNullValue()) {
2880 // (SPUindirect (SPUaform <addr>, 0), 0) ->
2881 // (SPUaform <addr>, 0)
2883 DEBUG(errs() << "Replace: ");
2884 DEBUG(N->dump(&DAG));
2885 DEBUG(errs() << "\nWith: ");
2886 DEBUG(Op0.getNode()->dump(&DAG));
2887 DEBUG(errs() << "\n");
2891 } else if (Op0.getOpcode() == ISD::ADD) {
2892 SDValue Op1 = N->getOperand(1);
2893 if (ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(Op1)) {
2894 // (SPUindirect (add <arg>, <arg>), 0) ->
2895 // (SPUindirect <arg>, <arg>)
2896 if (CN1->isNullValue()) {
2898 #if !defined(NDEBUG)
2899 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2901 << "Replace: (SPUindirect (add <arg>, <arg>), 0)\n"
2902 << "With: (SPUindirect <arg>, <arg>)\n";
2906 return DAG.getNode(SPUISD::IndirectAddr, dl, Op0VT,
2907 Op0.getOperand(0), Op0.getOperand(1));
2913 case SPUISD::SHLQUAD_L_BITS:
2914 case SPUISD::SHLQUAD_L_BYTES:
2915 case SPUISD::ROTBYTES_LEFT: {
2916 SDValue Op1 = N->getOperand(1);
2918 // Kill degenerate vector shifts:
2919 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
2920 if (CN->isNullValue()) {
2926 case SPUISD::PREFSLOT2VEC: {
2927 switch (Op0.getOpcode()) {
2930 case ISD::ANY_EXTEND:
2931 case ISD::ZERO_EXTEND:
2932 case ISD::SIGN_EXTEND: {
2933 // (SPUprefslot2vec (any|zero|sign_extend (SPUvec2prefslot <arg>))) ->
2935 // but only if the SPUprefslot2vec and <arg> types match.
2936 SDValue Op00 = Op0.getOperand(0);
2937 if (Op00.getOpcode() == SPUISD::VEC2PREFSLOT) {
2938 SDValue Op000 = Op00.getOperand(0);
2939 if (Op000.getValueType() == NodeVT) {
2945 case SPUISD::VEC2PREFSLOT: {
2946 // (SPUprefslot2vec (SPUvec2prefslot <arg>)) ->
2948 Result = Op0.getOperand(0);
2956 // Otherwise, return unchanged.
2958 if (Result.getNode()) {
2959 DEBUG(errs() << "\nReplace.SPU: ");
2960 DEBUG(N->dump(&DAG));
2961 DEBUG(errs() << "\nWith: ");
2962 DEBUG(Result.getNode()->dump(&DAG));
2963 DEBUG(errs() << "\n");
2970 //===----------------------------------------------------------------------===//
2971 // Inline Assembly Support
2972 //===----------------------------------------------------------------------===//
2974 /// getConstraintType - Given a constraint letter, return the type of
2975 /// constraint it is for this target.
2976 SPUTargetLowering::ConstraintType
2977 SPUTargetLowering::getConstraintType(const std::string &ConstraintLetter) const {
2978 if (ConstraintLetter.size() == 1) {
2979 switch (ConstraintLetter[0]) {
2986 return C_RegisterClass;
2989 return TargetLowering::getConstraintType(ConstraintLetter);
2992 std::pair<unsigned, const TargetRegisterClass*>
2993 SPUTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
2996 if (Constraint.size() == 1) {
2997 // GCC RS6000 Constraint Letters
2998 switch (Constraint[0]) {
3002 return std::make_pair(0U, SPU::R64CRegisterClass);
3003 return std::make_pair(0U, SPU::R32CRegisterClass);
3006 return std::make_pair(0U, SPU::R32FPRegisterClass);
3007 else if (VT == MVT::f64)
3008 return std::make_pair(0U, SPU::R64FPRegisterClass);
3011 return std::make_pair(0U, SPU::GPRCRegisterClass);
3015 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
3018 //! Compute used/known bits for a SPU operand
3020 SPUTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
3024 const SelectionDAG &DAG,
3025 unsigned Depth ) const {
3027 const uint64_t uint64_sizebits = sizeof(uint64_t) * CHAR_BIT;
3029 switch (Op.getOpcode()) {
3031 // KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
3037 case SPUISD::PREFSLOT2VEC:
3038 case SPUISD::LDRESULT:
3039 case SPUISD::VEC2PREFSLOT:
3040 case SPUISD::SHLQUAD_L_BITS:
3041 case SPUISD::SHLQUAD_L_BYTES:
3042 case SPUISD::VEC_ROTL:
3043 case SPUISD::VEC_ROTR:
3044 case SPUISD::ROTBYTES_LEFT:
3045 case SPUISD::SELECT_MASK:
3052 SPUTargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
3053 unsigned Depth) const {
3054 switch (Op.getOpcode()) {
3059 EVT VT = Op.getValueType();
3061 if (VT != MVT::i8 && VT != MVT::i16 && VT != MVT::i32) {
3064 return VT.getSizeInBits();
3069 // LowerAsmOperandForConstraint
3071 SPUTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
3072 char ConstraintLetter,
3073 std::vector<SDValue> &Ops,
3074 SelectionDAG &DAG) const {
3075 // Default, for the time being, to the base class handler
3076 TargetLowering::LowerAsmOperandForConstraint(Op, ConstraintLetter, Ops, DAG);
3079 /// isLegalAddressImmediate - Return true if the integer value can be used
3080 /// as the offset of the target addressing mode.
3081 bool SPUTargetLowering::isLegalAddressImmediate(int64_t V,
3082 const Type *Ty) const {
3083 // SPU's addresses are 256K:
3084 return (V > -(1 << 18) && V < (1 << 18) - 1);
3087 bool SPUTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const {
3092 SPUTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
3093 // The SPU target isn't yet aware of offsets.
3097 // can we compare to Imm without writing it into a register?
3098 bool SPUTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
3099 //ceqi, cgti, etc. all take s10 operand
3100 return isInt<10>(Imm);
3104 SPUTargetLowering::isLegalAddressingMode(const AddrMode &AM,
3105 const Type * ) const{
3107 // A-form: 18bit absolute address.
3108 if (AM.BaseGV && !AM.HasBaseReg && AM.Scale == 0 && AM.BaseOffs == 0)
3111 // D-form: reg + 14bit offset
3112 if (AM.BaseGV ==0 && AM.HasBaseReg && AM.Scale == 0 && isInt<14>(AM.BaseOffs))
3116 if (AM.BaseGV == 0 && AM.HasBaseReg && AM.Scale == 1 && AM.BaseOffs ==0)