2 //===-- SPUISelLowering.cpp - Cell SPU DAG Lowering Implementation --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the SPUTargetLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "SPURegisterNames.h"
15 #include "SPUISelLowering.h"
16 #include "SPUTargetMachine.h"
17 #include "SPUFrameInfo.h"
18 #include "SPUMachineFunction.h"
19 #include "llvm/Constants.h"
20 #include "llvm/Function.h"
21 #include "llvm/Intrinsics.h"
22 #include "llvm/CallingConv.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/SelectionDAG.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/Target/TargetOptions.h"
31 #include "llvm/ADT/VectorExtras.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/MathExtras.h"
35 #include "llvm/Support/raw_ostream.h"
40 // Used in getTargetNodeName() below
42 std::map<unsigned, const char *> node_names;
44 //! EVT mapping to useful data for Cell SPU
45 struct valtype_map_s {
50 const valtype_map_s valtype_map[] = {
61 const size_t n_valtype_map = sizeof(valtype_map) / sizeof(valtype_map[0]);
63 const valtype_map_s *getValueTypeMapEntry(EVT VT) {
64 const valtype_map_s *retval = 0;
66 for (size_t i = 0; i < n_valtype_map; ++i) {
67 if (valtype_map[i].valtype == VT) {
68 retval = valtype_map + i;
75 report_fatal_error("getValueTypeMapEntry returns NULL for " +
76 Twine(VT.getEVTString()));
83 //! Expand a library call into an actual call DAG node
86 This code is taken from SelectionDAGLegalize, since it is not exposed as
87 part of the LLVM SelectionDAG API.
91 ExpandLibCall(RTLIB::Libcall LC, SDValue Op, SelectionDAG &DAG,
92 bool isSigned, SDValue &Hi, const SPUTargetLowering &TLI) {
93 // The input chain to this libcall is the entry node of the function.
94 // Legalizing the call will automatically add the previous call to the
96 SDValue InChain = DAG.getEntryNode();
98 TargetLowering::ArgListTy Args;
99 TargetLowering::ArgListEntry Entry;
100 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
101 EVT ArgVT = Op.getOperand(i).getValueType();
102 const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
103 Entry.Node = Op.getOperand(i);
105 Entry.isSExt = isSigned;
106 Entry.isZExt = !isSigned;
107 Args.push_back(Entry);
109 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
112 // Splice the libcall in wherever FindInputOutputChains tells us to.
114 Op.getNode()->getValueType(0).getTypeForEVT(*DAG.getContext());
115 std::pair<SDValue, SDValue> CallInfo =
116 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
117 0, TLI.getLibcallCallingConv(LC), false,
118 /*isReturnValueUsed=*/true,
119 Callee, Args, DAG, Op.getDebugLoc());
121 return CallInfo.first;
125 SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
126 : TargetLowering(TM, new TargetLoweringObjectFileELF()),
128 // Fold away setcc operations if possible.
131 // Use _setjmp/_longjmp instead of setjmp/longjmp.
132 setUseUnderscoreSetJmp(true);
133 setUseUnderscoreLongJmp(true);
135 // Set RTLIB libcall names as used by SPU:
136 setLibcallName(RTLIB::DIV_F64, "__fast_divdf3");
138 // Set up the SPU's register classes:
139 addRegisterClass(MVT::i8, SPU::R8CRegisterClass);
140 addRegisterClass(MVT::i16, SPU::R16CRegisterClass);
141 addRegisterClass(MVT::i32, SPU::R32CRegisterClass);
142 addRegisterClass(MVT::i64, SPU::R64CRegisterClass);
143 addRegisterClass(MVT::f32, SPU::R32FPRegisterClass);
144 addRegisterClass(MVT::f64, SPU::R64FPRegisterClass);
145 addRegisterClass(MVT::i128, SPU::GPRCRegisterClass);
147 // SPU has no sign or zero extended loads for i1, i8, i16:
148 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
149 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
150 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
152 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
153 setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand);
155 setTruncStoreAction(MVT::i128, MVT::i64, Expand);
156 setTruncStoreAction(MVT::i128, MVT::i32, Expand);
157 setTruncStoreAction(MVT::i128, MVT::i16, Expand);
158 setTruncStoreAction(MVT::i128, MVT::i8, Expand);
160 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
162 // SPU constant load actions are custom lowered:
163 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
164 setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
166 // SPU's loads and stores have to be custom lowered:
167 for (unsigned sctype = (unsigned) MVT::i8; sctype < (unsigned) MVT::i128;
169 MVT::SimpleValueType VT = (MVT::SimpleValueType)sctype;
171 setOperationAction(ISD::LOAD, VT, Custom);
172 setOperationAction(ISD::STORE, VT, Custom);
173 setLoadExtAction(ISD::EXTLOAD, VT, Custom);
174 setLoadExtAction(ISD::ZEXTLOAD, VT, Custom);
175 setLoadExtAction(ISD::SEXTLOAD, VT, Custom);
177 for (unsigned stype = sctype - 1; stype >= (unsigned) MVT::i8; --stype) {
178 MVT::SimpleValueType StoreVT = (MVT::SimpleValueType) stype;
179 setTruncStoreAction(VT, StoreVT, Expand);
183 for (unsigned sctype = (unsigned) MVT::f32; sctype < (unsigned) MVT::f64;
185 MVT::SimpleValueType VT = (MVT::SimpleValueType) sctype;
187 setOperationAction(ISD::LOAD, VT, Custom);
188 setOperationAction(ISD::STORE, VT, Custom);
190 for (unsigned stype = sctype - 1; stype >= (unsigned) MVT::f32; --stype) {
191 MVT::SimpleValueType StoreVT = (MVT::SimpleValueType) stype;
192 setTruncStoreAction(VT, StoreVT, Expand);
196 // Expand the jumptable branches
197 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
198 setOperationAction(ISD::BR_CC, MVT::Other, Expand);
200 // Custom lower SELECT_CC for most cases, but expand by default
201 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
202 setOperationAction(ISD::SELECT_CC, MVT::i8, Custom);
203 setOperationAction(ISD::SELECT_CC, MVT::i16, Custom);
204 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
205 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
207 // SPU has no intrinsics for these particular operations:
208 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
210 // SPU has no division/remainder instructions
211 setOperationAction(ISD::SREM, MVT::i8, Expand);
212 setOperationAction(ISD::UREM, MVT::i8, Expand);
213 setOperationAction(ISD::SDIV, MVT::i8, Expand);
214 setOperationAction(ISD::UDIV, MVT::i8, Expand);
215 setOperationAction(ISD::SDIVREM, MVT::i8, Expand);
216 setOperationAction(ISD::UDIVREM, MVT::i8, Expand);
217 setOperationAction(ISD::SREM, MVT::i16, Expand);
218 setOperationAction(ISD::UREM, MVT::i16, Expand);
219 setOperationAction(ISD::SDIV, MVT::i16, Expand);
220 setOperationAction(ISD::UDIV, MVT::i16, Expand);
221 setOperationAction(ISD::SDIVREM, MVT::i16, Expand);
222 setOperationAction(ISD::UDIVREM, MVT::i16, Expand);
223 setOperationAction(ISD::SREM, MVT::i32, Expand);
224 setOperationAction(ISD::UREM, MVT::i32, Expand);
225 setOperationAction(ISD::SDIV, MVT::i32, Expand);
226 setOperationAction(ISD::UDIV, MVT::i32, Expand);
227 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
228 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
229 setOperationAction(ISD::SREM, MVT::i64, Expand);
230 setOperationAction(ISD::UREM, MVT::i64, Expand);
231 setOperationAction(ISD::SDIV, MVT::i64, Expand);
232 setOperationAction(ISD::UDIV, MVT::i64, Expand);
233 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
234 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
235 setOperationAction(ISD::SREM, MVT::i128, Expand);
236 setOperationAction(ISD::UREM, MVT::i128, Expand);
237 setOperationAction(ISD::SDIV, MVT::i128, Expand);
238 setOperationAction(ISD::UDIV, MVT::i128, Expand);
239 setOperationAction(ISD::SDIVREM, MVT::i128, Expand);
240 setOperationAction(ISD::UDIVREM, MVT::i128, Expand);
242 // We don't support sin/cos/sqrt/fmod
243 setOperationAction(ISD::FSIN , MVT::f64, Expand);
244 setOperationAction(ISD::FCOS , MVT::f64, Expand);
245 setOperationAction(ISD::FREM , MVT::f64, Expand);
246 setOperationAction(ISD::FSIN , MVT::f32, Expand);
247 setOperationAction(ISD::FCOS , MVT::f32, Expand);
248 setOperationAction(ISD::FREM , MVT::f32, Expand);
250 // Expand fsqrt to the appropriate libcall (NOTE: should use h/w fsqrt
252 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
253 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
255 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
256 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
258 // SPU can do rotate right and left, so legalize it... but customize for i8
259 // because instructions don't exist.
261 // FIXME: Change from "expand" to appropriate type once ROTR is supported in
263 setOperationAction(ISD::ROTR, MVT::i32, Expand /*Legal*/);
264 setOperationAction(ISD::ROTR, MVT::i16, Expand /*Legal*/);
265 setOperationAction(ISD::ROTR, MVT::i8, Expand /*Custom*/);
267 setOperationAction(ISD::ROTL, MVT::i32, Legal);
268 setOperationAction(ISD::ROTL, MVT::i16, Legal);
269 setOperationAction(ISD::ROTL, MVT::i8, Custom);
271 // SPU has no native version of shift left/right for i8
272 setOperationAction(ISD::SHL, MVT::i8, Custom);
273 setOperationAction(ISD::SRL, MVT::i8, Custom);
274 setOperationAction(ISD::SRA, MVT::i8, Custom);
276 // Make these operations legal and handle them during instruction selection:
277 setOperationAction(ISD::SHL, MVT::i64, Legal);
278 setOperationAction(ISD::SRL, MVT::i64, Legal);
279 setOperationAction(ISD::SRA, MVT::i64, Legal);
281 // Custom lower i8, i32 and i64 multiplications
282 setOperationAction(ISD::MUL, MVT::i8, Custom);
283 setOperationAction(ISD::MUL, MVT::i32, Legal);
284 setOperationAction(ISD::MUL, MVT::i64, Legal);
286 // Expand double-width multiplication
287 // FIXME: It would probably be reasonable to support some of these operations
288 setOperationAction(ISD::UMUL_LOHI, MVT::i8, Expand);
289 setOperationAction(ISD::SMUL_LOHI, MVT::i8, Expand);
290 setOperationAction(ISD::MULHU, MVT::i8, Expand);
291 setOperationAction(ISD::MULHS, MVT::i8, Expand);
292 setOperationAction(ISD::UMUL_LOHI, MVT::i16, Expand);
293 setOperationAction(ISD::SMUL_LOHI, MVT::i16, Expand);
294 setOperationAction(ISD::MULHU, MVT::i16, Expand);
295 setOperationAction(ISD::MULHS, MVT::i16, Expand);
296 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
297 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
298 setOperationAction(ISD::MULHU, MVT::i32, Expand);
299 setOperationAction(ISD::MULHS, MVT::i32, Expand);
300 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
301 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
302 setOperationAction(ISD::MULHU, MVT::i64, Expand);
303 setOperationAction(ISD::MULHS, MVT::i64, Expand);
305 // Need to custom handle (some) common i8, i64 math ops
306 setOperationAction(ISD::ADD, MVT::i8, Custom);
307 setOperationAction(ISD::ADD, MVT::i64, Legal);
308 setOperationAction(ISD::SUB, MVT::i8, Custom);
309 setOperationAction(ISD::SUB, MVT::i64, Legal);
311 // SPU does not have BSWAP. It does have i32 support CTLZ.
312 // CTPOP has to be custom lowered.
313 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
314 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
316 setOperationAction(ISD::CTPOP, MVT::i8, Custom);
317 setOperationAction(ISD::CTPOP, MVT::i16, Custom);
318 setOperationAction(ISD::CTPOP, MVT::i32, Custom);
319 setOperationAction(ISD::CTPOP, MVT::i64, Custom);
320 setOperationAction(ISD::CTPOP, MVT::i128, Expand);
322 setOperationAction(ISD::CTTZ , MVT::i8, Expand);
323 setOperationAction(ISD::CTTZ , MVT::i16, Expand);
324 setOperationAction(ISD::CTTZ , MVT::i32, Expand);
325 setOperationAction(ISD::CTTZ , MVT::i64, Expand);
326 setOperationAction(ISD::CTTZ , MVT::i128, Expand);
328 setOperationAction(ISD::CTLZ , MVT::i8, Promote);
329 setOperationAction(ISD::CTLZ , MVT::i16, Promote);
330 setOperationAction(ISD::CTLZ , MVT::i32, Legal);
331 setOperationAction(ISD::CTLZ , MVT::i64, Expand);
332 setOperationAction(ISD::CTLZ , MVT::i128, Expand);
334 // SPU has a version of select that implements (a&~c)|(b&c), just like
335 // select ought to work:
336 setOperationAction(ISD::SELECT, MVT::i8, Legal);
337 setOperationAction(ISD::SELECT, MVT::i16, Legal);
338 setOperationAction(ISD::SELECT, MVT::i32, Legal);
339 setOperationAction(ISD::SELECT, MVT::i64, Legal);
341 setOperationAction(ISD::SETCC, MVT::i8, Legal);
342 setOperationAction(ISD::SETCC, MVT::i16, Legal);
343 setOperationAction(ISD::SETCC, MVT::i32, Legal);
344 setOperationAction(ISD::SETCC, MVT::i64, Legal);
345 setOperationAction(ISD::SETCC, MVT::f64, Custom);
347 // Custom lower i128 -> i64 truncates
348 setOperationAction(ISD::TRUNCATE, MVT::i64, Custom);
350 // Custom lower i32/i64 -> i128 sign extend
351 setOperationAction(ISD::SIGN_EXTEND, MVT::i128, Custom);
353 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
354 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
355 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
356 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
357 // SPU has a legal FP -> signed INT instruction for f32, but for f64, need
358 // to expand to a libcall, hence the custom lowering:
359 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
360 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
361 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Expand);
362 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
363 setOperationAction(ISD::FP_TO_SINT, MVT::i128, Expand);
364 setOperationAction(ISD::FP_TO_UINT, MVT::i128, Expand);
366 // FDIV on SPU requires custom lowering
367 setOperationAction(ISD::FDIV, MVT::f64, Expand); // to libcall
369 // SPU has [U|S]INT_TO_FP for f32->i32, but not for f64->i32, f64->i64:
370 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
371 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
372 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
373 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
374 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
375 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
376 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
377 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
379 setOperationAction(ISD::BIT_CONVERT, MVT::i32, Legal);
380 setOperationAction(ISD::BIT_CONVERT, MVT::f32, Legal);
381 setOperationAction(ISD::BIT_CONVERT, MVT::i64, Legal);
382 setOperationAction(ISD::BIT_CONVERT, MVT::f64, Legal);
384 // We cannot sextinreg(i1). Expand to shifts.
385 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
387 // We want to legalize GlobalAddress and ConstantPool nodes into the
388 // appropriate instructions to materialize the address.
389 for (unsigned sctype = (unsigned) MVT::i8; sctype < (unsigned) MVT::f128;
391 MVT::SimpleValueType VT = (MVT::SimpleValueType)sctype;
393 setOperationAction(ISD::GlobalAddress, VT, Custom);
394 setOperationAction(ISD::ConstantPool, VT, Custom);
395 setOperationAction(ISD::JumpTable, VT, Custom);
398 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
399 setOperationAction(ISD::VASTART , MVT::Other, Custom);
401 // Use the default implementation.
402 setOperationAction(ISD::VAARG , MVT::Other, Expand);
403 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
404 setOperationAction(ISD::VAEND , MVT::Other, Expand);
405 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
406 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
407 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
408 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Expand);
410 // Cell SPU has instructions for converting between i64 and fp.
411 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
412 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
414 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT
415 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote);
417 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
418 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
420 // First set operation action for all vector types to expand. Then we
421 // will selectively turn on ones that can be effectively codegen'd.
422 addRegisterClass(MVT::v16i8, SPU::VECREGRegisterClass);
423 addRegisterClass(MVT::v8i16, SPU::VECREGRegisterClass);
424 addRegisterClass(MVT::v4i32, SPU::VECREGRegisterClass);
425 addRegisterClass(MVT::v2i64, SPU::VECREGRegisterClass);
426 addRegisterClass(MVT::v4f32, SPU::VECREGRegisterClass);
427 addRegisterClass(MVT::v2f64, SPU::VECREGRegisterClass);
429 // "Odd size" vector classes that we're willing to support:
430 addRegisterClass(MVT::v2i32, SPU::VECREGRegisterClass);
432 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
433 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
434 MVT::SimpleValueType VT = (MVT::SimpleValueType)i;
436 // add/sub are legal for all supported vector VT's.
437 setOperationAction(ISD::ADD, VT, Legal);
438 setOperationAction(ISD::SUB, VT, Legal);
439 // mul has to be custom lowered.
440 setOperationAction(ISD::MUL, VT, Legal);
442 setOperationAction(ISD::AND, VT, Legal);
443 setOperationAction(ISD::OR, VT, Legal);
444 setOperationAction(ISD::XOR, VT, Legal);
445 setOperationAction(ISD::LOAD, VT, Legal);
446 setOperationAction(ISD::SELECT, VT, Legal);
447 setOperationAction(ISD::STORE, VT, Legal);
449 // These operations need to be expanded:
450 setOperationAction(ISD::SDIV, VT, Expand);
451 setOperationAction(ISD::SREM, VT, Expand);
452 setOperationAction(ISD::UDIV, VT, Expand);
453 setOperationAction(ISD::UREM, VT, Expand);
455 // Custom lower build_vector, constant pool spills, insert and
456 // extract vector elements:
457 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
458 setOperationAction(ISD::ConstantPool, VT, Custom);
459 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
460 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
461 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
462 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
465 setOperationAction(ISD::AND, MVT::v16i8, Custom);
466 setOperationAction(ISD::OR, MVT::v16i8, Custom);
467 setOperationAction(ISD::XOR, MVT::v16i8, Custom);
468 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
470 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
472 setShiftAmountType(MVT::i32);
473 setBooleanContents(ZeroOrNegativeOneBooleanContent);
475 setStackPointerRegisterToSaveRestore(SPU::R1);
477 // We have target-specific dag combine patterns for the following nodes:
478 setTargetDAGCombine(ISD::ADD);
479 setTargetDAGCombine(ISD::ZERO_EXTEND);
480 setTargetDAGCombine(ISD::SIGN_EXTEND);
481 setTargetDAGCombine(ISD::ANY_EXTEND);
483 computeRegisterProperties();
485 // Set pre-RA register scheduler default to BURR, which produces slightly
486 // better code than the default (could also be TDRR, but TargetLowering.h
487 // needs a mod to support that model):
488 setSchedulingPreference(Sched::RegPressure);
492 SPUTargetLowering::getTargetNodeName(unsigned Opcode) const
494 if (node_names.empty()) {
495 node_names[(unsigned) SPUISD::RET_FLAG] = "SPUISD::RET_FLAG";
496 node_names[(unsigned) SPUISD::Hi] = "SPUISD::Hi";
497 node_names[(unsigned) SPUISD::Lo] = "SPUISD::Lo";
498 node_names[(unsigned) SPUISD::PCRelAddr] = "SPUISD::PCRelAddr";
499 node_names[(unsigned) SPUISD::AFormAddr] = "SPUISD::AFormAddr";
500 node_names[(unsigned) SPUISD::IndirectAddr] = "SPUISD::IndirectAddr";
501 node_names[(unsigned) SPUISD::LDRESULT] = "SPUISD::LDRESULT";
502 node_names[(unsigned) SPUISD::CALL] = "SPUISD::CALL";
503 node_names[(unsigned) SPUISD::SHUFB] = "SPUISD::SHUFB";
504 node_names[(unsigned) SPUISD::SHUFFLE_MASK] = "SPUISD::SHUFFLE_MASK";
505 node_names[(unsigned) SPUISD::CNTB] = "SPUISD::CNTB";
506 node_names[(unsigned) SPUISD::PREFSLOT2VEC] = "SPUISD::PREFSLOT2VEC";
507 node_names[(unsigned) SPUISD::VEC2PREFSLOT] = "SPUISD::VEC2PREFSLOT";
508 node_names[(unsigned) SPUISD::SHLQUAD_L_BITS] = "SPUISD::SHLQUAD_L_BITS";
509 node_names[(unsigned) SPUISD::SHLQUAD_L_BYTES] = "SPUISD::SHLQUAD_L_BYTES";
510 node_names[(unsigned) SPUISD::VEC_ROTL] = "SPUISD::VEC_ROTL";
511 node_names[(unsigned) SPUISD::VEC_ROTR] = "SPUISD::VEC_ROTR";
512 node_names[(unsigned) SPUISD::ROTBYTES_LEFT] = "SPUISD::ROTBYTES_LEFT";
513 node_names[(unsigned) SPUISD::ROTBYTES_LEFT_BITS] =
514 "SPUISD::ROTBYTES_LEFT_BITS";
515 node_names[(unsigned) SPUISD::SELECT_MASK] = "SPUISD::SELECT_MASK";
516 node_names[(unsigned) SPUISD::SELB] = "SPUISD::SELB";
517 node_names[(unsigned) SPUISD::ADD64_MARKER] = "SPUISD::ADD64_MARKER";
518 node_names[(unsigned) SPUISD::SUB64_MARKER] = "SPUISD::SUB64_MARKER";
519 node_names[(unsigned) SPUISD::MUL64_MARKER] = "SPUISD::MUL64_MARKER";
522 std::map<unsigned, const char *>::iterator i = node_names.find(Opcode);
524 return ((i != node_names.end()) ? i->second : 0);
527 /// getFunctionAlignment - Return the Log2 alignment of this function.
528 unsigned SPUTargetLowering::getFunctionAlignment(const Function *) const {
532 //===----------------------------------------------------------------------===//
533 // Return the Cell SPU's SETCC result type
534 //===----------------------------------------------------------------------===//
536 MVT::SimpleValueType SPUTargetLowering::getSetCCResultType(EVT VT) const {
537 // i16 and i32 are valid SETCC result types
538 return ((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) ?
539 VT.getSimpleVT().SimpleTy :
543 //===----------------------------------------------------------------------===//
544 // Calling convention code:
545 //===----------------------------------------------------------------------===//
547 #include "SPUGenCallingConv.inc"
549 //===----------------------------------------------------------------------===//
550 // LowerOperation implementation
551 //===----------------------------------------------------------------------===//
553 /// Custom lower loads for CellSPU
555 All CellSPU loads and stores are aligned to 16-byte boundaries, so for elements
556 within a 16-byte block, we have to rotate to extract the requested element.
558 For extending loads, we also want to ensure that the following sequence is
559 emitted, e.g. for MVT::f32 extending load to MVT::f64:
563 %2 v16i8,ch = rotate %1
564 %3 v4f8, ch = bitconvert %2
565 %4 f32 = vec2perfslot %3
566 %5 f64 = fp_extend %4
570 LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
571 LoadSDNode *LN = cast<LoadSDNode>(Op);
572 SDValue the_chain = LN->getChain();
573 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
574 EVT InVT = LN->getMemoryVT();
575 EVT OutVT = Op.getValueType();
576 ISD::LoadExtType ExtType = LN->getExtensionType();
577 unsigned alignment = LN->getAlignment();
578 const valtype_map_s *vtm = getValueTypeMapEntry(InVT);
579 DebugLoc dl = Op.getDebugLoc();
581 switch (LN->getAddressingMode()) {
582 case ISD::UNINDEXED: {
584 SDValue basePtr = LN->getBasePtr();
587 if (alignment == 16) {
590 // Special cases for a known aligned load to simplify the base pointer
591 // and the rotation amount:
592 if (basePtr.getOpcode() == ISD::ADD
593 && (CN = dyn_cast<ConstantSDNode > (basePtr.getOperand(1))) != 0) {
594 // Known offset into basePtr
595 int64_t offset = CN->getSExtValue();
596 int64_t rotamt = int64_t((offset & 0xf) - vtm->prefslot_byte);
601 rotate = DAG.getConstant(rotamt, MVT::i16);
603 // Simplify the base pointer for this case:
604 basePtr = basePtr.getOperand(0);
605 if ((offset & ~0xf) > 0) {
606 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
608 DAG.getConstant((offset & ~0xf), PtrVT));
610 } else if ((basePtr.getOpcode() == SPUISD::AFormAddr)
611 || (basePtr.getOpcode() == SPUISD::IndirectAddr
612 && basePtr.getOperand(0).getOpcode() == SPUISD::Hi
613 && basePtr.getOperand(1).getOpcode() == SPUISD::Lo)) {
614 // Plain aligned a-form address: rotate into preferred slot
615 // Same for (SPUindirect (SPUhi ...), (SPUlo ...))
616 int64_t rotamt = -vtm->prefslot_byte;
619 rotate = DAG.getConstant(rotamt, MVT::i16);
621 // Offset the rotate amount by the basePtr and the preferred slot
623 int64_t rotamt = -vtm->prefslot_byte;
626 rotate = DAG.getNode(ISD::ADD, dl, PtrVT,
628 DAG.getConstant(rotamt, PtrVT));
631 // Unaligned load: must be more pessimistic about addressing modes:
632 if (basePtr.getOpcode() == ISD::ADD) {
633 MachineFunction &MF = DAG.getMachineFunction();
634 MachineRegisterInfo &RegInfo = MF.getRegInfo();
635 unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
638 SDValue Op0 = basePtr.getOperand(0);
639 SDValue Op1 = basePtr.getOperand(1);
641 if (isa<ConstantSDNode>(Op1)) {
642 // Convert the (add <ptr>, <const>) to an indirect address contained
643 // in a register. Note that this is done because we need to avoid
644 // creating a 0(reg) d-form address due to the SPU's block loads.
645 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
646 the_chain = DAG.getCopyToReg(the_chain, dl, VReg, basePtr, Flag);
647 basePtr = DAG.getCopyFromReg(the_chain, dl, VReg, PtrVT);
649 // Convert the (add <arg1>, <arg2>) to an indirect address, which
650 // will likely be lowered as a reg(reg) x-form address.
651 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
654 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
656 DAG.getConstant(0, PtrVT));
659 // Offset the rotate amount by the basePtr and the preferred slot
661 rotate = DAG.getNode(ISD::ADD, dl, PtrVT,
663 DAG.getConstant(-vtm->prefslot_byte, PtrVT));
666 // Re-emit as a v16i8 vector load
667 result = DAG.getLoad(MVT::v16i8, dl, the_chain, basePtr,
668 LN->getSrcValue(), LN->getSrcValueOffset(),
669 LN->isVolatile(), LN->isNonTemporal(), 16);
672 the_chain = result.getValue(1);
674 // Rotate into the preferred slot:
675 result = DAG.getNode(SPUISD::ROTBYTES_LEFT, dl, MVT::v16i8,
676 result.getValue(0), rotate);
678 // Convert the loaded v16i8 vector to the appropriate vector type
679 // specified by the operand:
680 EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
681 InVT, (128 / InVT.getSizeInBits()));
682 result = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, InVT,
683 DAG.getNode(ISD::BIT_CONVERT, dl, vecVT, result));
685 // Handle extending loads by extending the scalar result:
686 if (ExtType == ISD::SEXTLOAD) {
687 result = DAG.getNode(ISD::SIGN_EXTEND, dl, OutVT, result);
688 } else if (ExtType == ISD::ZEXTLOAD) {
689 result = DAG.getNode(ISD::ZERO_EXTEND, dl, OutVT, result);
690 } else if (ExtType == ISD::EXTLOAD) {
691 unsigned NewOpc = ISD::ANY_EXTEND;
693 if (OutVT.isFloatingPoint())
694 NewOpc = ISD::FP_EXTEND;
696 result = DAG.getNode(NewOpc, dl, OutVT, result);
699 SDVTList retvts = DAG.getVTList(OutVT, MVT::Other);
700 SDValue retops[2] = {
705 result = DAG.getNode(SPUISD::LDRESULT, dl, retvts,
706 retops, sizeof(retops) / sizeof(retops[0]));
713 case ISD::LAST_INDEXED_MODE:
715 report_fatal_error("LowerLOAD: Got a LoadSDNode with an addr mode other "
717 Twine((unsigned)LN->getAddressingMode()));
725 /// Custom lower stores for CellSPU
727 All CellSPU stores are aligned to 16-byte boundaries, so for elements
728 within a 16-byte block, we have to generate a shuffle to insert the
729 requested element into its place, then store the resulting block.
732 LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
733 StoreSDNode *SN = cast<StoreSDNode>(Op);
734 SDValue Value = SN->getValue();
735 EVT VT = Value.getValueType();
736 EVT StVT = (!SN->isTruncatingStore() ? VT : SN->getMemoryVT());
737 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
738 DebugLoc dl = Op.getDebugLoc();
739 unsigned alignment = SN->getAlignment();
741 switch (SN->getAddressingMode()) {
742 case ISD::UNINDEXED: {
743 // The vector type we really want to load from the 16-byte chunk.
744 EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
745 VT, (128 / VT.getSizeInBits()));
747 SDValue alignLoadVec;
748 SDValue basePtr = SN->getBasePtr();
749 SDValue the_chain = SN->getChain();
750 SDValue insertEltOffs;
752 if (alignment == 16) {
755 // Special cases for a known aligned load to simplify the base pointer
756 // and insertion byte:
757 if (basePtr.getOpcode() == ISD::ADD
758 && (CN = dyn_cast<ConstantSDNode>(basePtr.getOperand(1))) != 0) {
759 // Known offset into basePtr
760 int64_t offset = CN->getSExtValue();
762 // Simplify the base pointer for this case:
763 basePtr = basePtr.getOperand(0);
764 insertEltOffs = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
766 DAG.getConstant((offset & 0xf), PtrVT));
768 if ((offset & ~0xf) > 0) {
769 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
771 DAG.getConstant((offset & ~0xf), PtrVT));
774 // Otherwise, assume it's at byte 0 of basePtr
775 insertEltOffs = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
777 DAG.getConstant(0, PtrVT));
780 // Unaligned load: must be more pessimistic about addressing modes:
781 if (basePtr.getOpcode() == ISD::ADD) {
782 MachineFunction &MF = DAG.getMachineFunction();
783 MachineRegisterInfo &RegInfo = MF.getRegInfo();
784 unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
787 SDValue Op0 = basePtr.getOperand(0);
788 SDValue Op1 = basePtr.getOperand(1);
790 if (isa<ConstantSDNode>(Op1)) {
791 // Convert the (add <ptr>, <const>) to an indirect address contained
792 // in a register. Note that this is done because we need to avoid
793 // creating a 0(reg) d-form address due to the SPU's block loads.
794 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
795 the_chain = DAG.getCopyToReg(the_chain, dl, VReg, basePtr, Flag);
796 basePtr = DAG.getCopyFromReg(the_chain, dl, VReg, PtrVT);
798 // Convert the (add <arg1>, <arg2>) to an indirect address, which
799 // will likely be lowered as a reg(reg) x-form address.
800 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Op0, Op1);
803 basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
805 DAG.getConstant(0, PtrVT));
808 // Insertion point is solely determined by basePtr's contents
809 insertEltOffs = DAG.getNode(ISD::ADD, dl, PtrVT,
811 DAG.getConstant(0, PtrVT));
814 // Re-emit as a v16i8 vector load
815 alignLoadVec = DAG.getLoad(MVT::v16i8, dl, the_chain, basePtr,
816 SN->getSrcValue(), SN->getSrcValueOffset(),
817 SN->isVolatile(), SN->isNonTemporal(), 16);
820 the_chain = alignLoadVec.getValue(1);
822 LoadSDNode *LN = cast<LoadSDNode>(alignLoadVec);
823 SDValue theValue = SN->getValue();
827 && (theValue.getOpcode() == ISD::AssertZext
828 || theValue.getOpcode() == ISD::AssertSext)) {
829 // Drill down and get the value for zero- and sign-extended
831 theValue = theValue.getOperand(0);
834 // If the base pointer is already a D-form address, then just create
835 // a new D-form address with a slot offset and the orignal base pointer.
836 // Otherwise generate a D-form address with the slot offset relative
837 // to the stack pointer, which is always aligned.
839 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
840 errs() << "CellSPU LowerSTORE: basePtr = ";
841 basePtr.getNode()->dump(&DAG);
846 SDValue insertEltOp =
847 DAG.getNode(SPUISD::SHUFFLE_MASK, dl, vecVT, insertEltOffs);
848 SDValue vectorizeOp =
849 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, vecVT, theValue);
851 result = DAG.getNode(SPUISD::SHUFB, dl, vecVT,
852 vectorizeOp, alignLoadVec,
853 DAG.getNode(ISD::BIT_CONVERT, dl,
854 MVT::v4i32, insertEltOp));
856 result = DAG.getStore(the_chain, dl, result, basePtr,
857 LN->getSrcValue(), LN->getSrcValueOffset(),
858 LN->isVolatile(), LN->isNonTemporal(),
861 #if 0 && !defined(NDEBUG)
862 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
863 const SDValue ¤tRoot = DAG.getRoot();
866 errs() << "------- CellSPU:LowerStore result:\n";
868 errs() << "-------\n";
869 DAG.setRoot(currentRoot);
880 case ISD::LAST_INDEXED_MODE:
882 report_fatal_error("LowerLOAD: Got a LoadSDNode with an addr mode other "
884 Twine((unsigned)SN->getAddressingMode()));
892 //! Generate the address of a constant pool entry.
894 LowerConstantPool(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
895 EVT PtrVT = Op.getValueType();
896 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
897 const Constant *C = CP->getConstVal();
898 SDValue CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment());
899 SDValue Zero = DAG.getConstant(0, PtrVT);
900 const TargetMachine &TM = DAG.getTarget();
901 // FIXME there is no actual debug info here
902 DebugLoc dl = Op.getDebugLoc();
904 if (TM.getRelocationModel() == Reloc::Static) {
905 if (!ST->usingLargeMem()) {
906 // Just return the SDValue with the constant pool address in it.
907 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, CPI, Zero);
909 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, CPI, Zero);
910 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, CPI, Zero);
911 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
915 llvm_unreachable("LowerConstantPool: Relocation model other than static"
920 //! Alternate entry point for generating the address of a constant pool entry
922 SPU::LowerConstantPool(SDValue Op, SelectionDAG &DAG, const SPUTargetMachine &TM) {
923 return ::LowerConstantPool(Op, DAG, TM.getSubtargetImpl());
927 LowerJumpTable(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
928 EVT PtrVT = Op.getValueType();
929 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
930 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
931 SDValue Zero = DAG.getConstant(0, PtrVT);
932 const TargetMachine &TM = DAG.getTarget();
933 // FIXME there is no actual debug info here
934 DebugLoc dl = Op.getDebugLoc();
936 if (TM.getRelocationModel() == Reloc::Static) {
937 if (!ST->usingLargeMem()) {
938 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, JTI, Zero);
940 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, JTI, Zero);
941 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, JTI, Zero);
942 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
946 llvm_unreachable("LowerJumpTable: Relocation model other than static"
952 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
953 EVT PtrVT = Op.getValueType();
954 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
955 const GlobalValue *GV = GSDN->getGlobal();
956 SDValue GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset());
957 const TargetMachine &TM = DAG.getTarget();
958 SDValue Zero = DAG.getConstant(0, PtrVT);
959 // FIXME there is no actual debug info here
960 DebugLoc dl = Op.getDebugLoc();
962 if (TM.getRelocationModel() == Reloc::Static) {
963 if (!ST->usingLargeMem()) {
964 return DAG.getNode(SPUISD::AFormAddr, dl, PtrVT, GA, Zero);
966 SDValue Hi = DAG.getNode(SPUISD::Hi, dl, PtrVT, GA, Zero);
967 SDValue Lo = DAG.getNode(SPUISD::Lo, dl, PtrVT, GA, Zero);
968 return DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, Hi, Lo);
971 report_fatal_error("LowerGlobalAddress: Relocation model other than static"
979 //! Custom lower double precision floating point constants
981 LowerConstantFP(SDValue Op, SelectionDAG &DAG) {
982 EVT VT = Op.getValueType();
983 // FIXME there is no actual debug info here
984 DebugLoc dl = Op.getDebugLoc();
986 if (VT == MVT::f64) {
987 ConstantFPSDNode *FP = cast<ConstantFPSDNode>(Op.getNode());
990 "LowerConstantFP: Node is not ConstantFPSDNode");
992 uint64_t dbits = DoubleToBits(FP->getValueAPF().convertToDouble());
993 SDValue T = DAG.getConstant(dbits, MVT::i64);
994 SDValue Tvec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T);
995 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
996 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Tvec));
1003 SPUTargetLowering::LowerFormalArguments(SDValue Chain,
1004 CallingConv::ID CallConv, bool isVarArg,
1005 const SmallVectorImpl<ISD::InputArg>
1007 DebugLoc dl, SelectionDAG &DAG,
1008 SmallVectorImpl<SDValue> &InVals)
1011 MachineFunction &MF = DAG.getMachineFunction();
1012 MachineFrameInfo *MFI = MF.getFrameInfo();
1013 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1014 SPUFunctionInfo *FuncInfo = MF.getInfo<SPUFunctionInfo>();
1016 const unsigned *ArgRegs = SPURegisterInfo::getArgRegs();
1017 const unsigned NumArgRegs = SPURegisterInfo::getNumArgRegs();
1019 unsigned ArgOffset = SPUFrameInfo::minStackSize();
1020 unsigned ArgRegIdx = 0;
1021 unsigned StackSlotSize = SPUFrameInfo::stackSlotSize();
1023 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1025 // Add DAG nodes to load the arguments or copy them out of registers.
1026 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
1027 EVT ObjectVT = Ins[ArgNo].VT;
1028 unsigned ObjSize = ObjectVT.getSizeInBits()/8;
1031 if (ArgRegIdx < NumArgRegs) {
1032 const TargetRegisterClass *ArgRegClass;
1034 switch (ObjectVT.getSimpleVT().SimpleTy) {
1036 report_fatal_error("LowerFormalArguments Unhandled argument type: " +
1037 Twine(ObjectVT.getEVTString()));
1039 ArgRegClass = &SPU::R8CRegClass;
1042 ArgRegClass = &SPU::R16CRegClass;
1045 ArgRegClass = &SPU::R32CRegClass;
1048 ArgRegClass = &SPU::R64CRegClass;
1051 ArgRegClass = &SPU::GPRCRegClass;
1054 ArgRegClass = &SPU::R32FPRegClass;
1057 ArgRegClass = &SPU::R64FPRegClass;
1065 ArgRegClass = &SPU::VECREGRegClass;
1069 unsigned VReg = RegInfo.createVirtualRegister(ArgRegClass);
1070 RegInfo.addLiveIn(ArgRegs[ArgRegIdx], VReg);
1071 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
1074 // We need to load the argument to a virtual register if we determined
1075 // above that we ran out of physical registers of the appropriate type
1076 // or we're forced to do vararg
1077 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset, true);
1078 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
1079 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, NULL, 0, false, false, 0);
1080 ArgOffset += StackSlotSize;
1083 InVals.push_back(ArgVal);
1085 Chain = ArgVal.getOperand(0);
1090 // unsigned int ptr_size = PtrVT.getSizeInBits() / 8;
1091 // We will spill (79-3)+1 registers to the stack
1092 SmallVector<SDValue, 79-3+1> MemOps;
1094 // Create the frame slot
1096 for (; ArgRegIdx != NumArgRegs; ++ArgRegIdx) {
1097 FuncInfo->setVarArgsFrameIndex(
1098 MFI->CreateFixedObject(StackSlotSize, ArgOffset, true));
1099 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
1100 unsigned VReg = MF.addLiveIn(ArgRegs[ArgRegIdx], &SPU::R32CRegClass);
1101 SDValue ArgVal = DAG.getRegister(VReg, MVT::v16i8);
1102 SDValue Store = DAG.getStore(Chain, dl, ArgVal, FIN, NULL, 0,
1104 Chain = Store.getOperand(0);
1105 MemOps.push_back(Store);
1107 // Increment address by stack slot size for the next stored argument
1108 ArgOffset += StackSlotSize;
1110 if (!MemOps.empty())
1111 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1112 &MemOps[0], MemOps.size());
1118 /// isLSAAddress - Return the immediate to use if the specified
1119 /// value is representable as a LSA address.
1120 static SDNode *isLSAAddress(SDValue Op, SelectionDAG &DAG) {
1121 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
1124 int Addr = C->getZExtValue();
1125 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero.
1126 (Addr << 14 >> 14) != Addr)
1127 return 0; // Top 14 bits have to be sext of immediate.
1129 return DAG.getConstant((int)C->getZExtValue() >> 2, MVT::i32).getNode();
1133 SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
1134 CallingConv::ID CallConv, bool isVarArg,
1136 const SmallVectorImpl<ISD::OutputArg> &Outs,
1137 const SmallVectorImpl<ISD::InputArg> &Ins,
1138 DebugLoc dl, SelectionDAG &DAG,
1139 SmallVectorImpl<SDValue> &InVals) const {
1140 // CellSPU target does not yet support tail call optimization.
1143 const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
1144 unsigned NumOps = Outs.size();
1145 unsigned StackSlotSize = SPUFrameInfo::stackSlotSize();
1146 const unsigned *ArgRegs = SPURegisterInfo::getArgRegs();
1147 const unsigned NumArgRegs = SPURegisterInfo::getNumArgRegs();
1149 // Handy pointer type
1150 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1152 // Set up a copy of the stack pointer for use loading and storing any
1153 // arguments that may not fit in the registers available for argument
1155 SDValue StackPtr = DAG.getRegister(SPU::R1, MVT::i32);
1157 // Figure out which arguments are going to go in registers, and which in
1159 unsigned ArgOffset = SPUFrameInfo::minStackSize(); // Just below [LR]
1160 unsigned ArgRegIdx = 0;
1162 // Keep track of registers passing arguments
1163 std::vector<std::pair<unsigned, SDValue> > RegsToPass;
1164 // And the arguments passed on the stack
1165 SmallVector<SDValue, 8> MemOpChains;
1167 for (unsigned i = 0; i != NumOps; ++i) {
1168 SDValue Arg = Outs[i].Val;
1170 // PtrOff will be used to store the current argument to the stack if a
1171 // register cannot be found for it.
1172 SDValue PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
1173 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
1175 switch (Arg.getValueType().getSimpleVT().SimpleTy) {
1176 default: llvm_unreachable("Unexpected ValueType for argument!");
1190 if (ArgRegIdx != NumArgRegs) {
1191 RegsToPass.push_back(std::make_pair(ArgRegs[ArgRegIdx++], Arg));
1193 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0,
1195 ArgOffset += StackSlotSize;
1201 // Accumulate how many bytes are to be pushed on the stack, including the
1202 // linkage area, and parameter passing area. According to the SPU ABI,
1203 // we minimally need space for [LR] and [SP].
1204 unsigned NumStackBytes = ArgOffset - SPUFrameInfo::minStackSize();
1206 // Insert a call sequence start
1207 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumStackBytes,
1210 if (!MemOpChains.empty()) {
1211 // Adjust the stack pointer for the stack arguments.
1212 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1213 &MemOpChains[0], MemOpChains.size());
1216 // Build a sequence of copy-to-reg nodes chained together with token chain
1217 // and flag operands which copy the outgoing args into the appropriate regs.
1219 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1220 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1221 RegsToPass[i].second, InFlag);
1222 InFlag = Chain.getValue(1);
1225 SmallVector<SDValue, 8> Ops;
1226 unsigned CallOpc = SPUISD::CALL;
1228 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
1229 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
1230 // node so that legalize doesn't hack it.
1231 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1232 const GlobalValue *GV = G->getGlobal();
1233 EVT CalleeVT = Callee.getValueType();
1234 SDValue Zero = DAG.getConstant(0, PtrVT);
1235 SDValue GA = DAG.getTargetGlobalAddress(GV, CalleeVT);
1237 if (!ST->usingLargeMem()) {
1238 // Turn calls to targets that are defined (i.e., have bodies) into BRSL
1239 // style calls, otherwise, external symbols are BRASL calls. This assumes
1240 // that declared/defined symbols are in the same compilation unit and can
1241 // be reached through PC-relative jumps.
1244 // This may be an unsafe assumption for JIT and really large compilation
1246 if (GV->isDeclaration()) {
1247 Callee = DAG.getNode(SPUISD::AFormAddr, dl, CalleeVT, GA, Zero);
1249 Callee = DAG.getNode(SPUISD::PCRelAddr, dl, CalleeVT, GA, Zero);
1252 // "Large memory" mode: Turn all calls into indirect calls with a X-form
1254 Callee = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, GA, Zero);
1256 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1257 EVT CalleeVT = Callee.getValueType();
1258 SDValue Zero = DAG.getConstant(0, PtrVT);
1259 SDValue ExtSym = DAG.getTargetExternalSymbol(S->getSymbol(),
1260 Callee.getValueType());
1262 if (!ST->usingLargeMem()) {
1263 Callee = DAG.getNode(SPUISD::AFormAddr, dl, CalleeVT, ExtSym, Zero);
1265 Callee = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, ExtSym, Zero);
1267 } else if (SDNode *Dest = isLSAAddress(Callee, DAG)) {
1268 // If this is an absolute destination address that appears to be a legal
1269 // local store address, use the munged value.
1270 Callee = SDValue(Dest, 0);
1273 Ops.push_back(Chain);
1274 Ops.push_back(Callee);
1276 // Add argument registers to the end of the list so that they are known live
1278 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1279 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1280 RegsToPass[i].second.getValueType()));
1282 if (InFlag.getNode())
1283 Ops.push_back(InFlag);
1284 // Returns a chain and a flag for retval copy to use.
1285 Chain = DAG.getNode(CallOpc, dl, DAG.getVTList(MVT::Other, MVT::Flag),
1286 &Ops[0], Ops.size());
1287 InFlag = Chain.getValue(1);
1289 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumStackBytes, true),
1290 DAG.getIntPtrConstant(0, true), InFlag);
1292 InFlag = Chain.getValue(1);
1294 // If the function returns void, just return the chain.
1298 // If the call has results, copy the values out of the ret val registers.
1299 switch (Ins[0].VT.getSimpleVT().SimpleTy) {
1300 default: llvm_unreachable("Unexpected ret value!");
1301 case MVT::Other: break;
1303 if (Ins.size() > 1 && Ins[1].VT == MVT::i32) {
1304 Chain = DAG.getCopyFromReg(Chain, dl, SPU::R4,
1305 MVT::i32, InFlag).getValue(1);
1306 InVals.push_back(Chain.getValue(0));
1307 Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, MVT::i32,
1308 Chain.getValue(2)).getValue(1);
1309 InVals.push_back(Chain.getValue(0));
1311 Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, MVT::i32,
1312 InFlag).getValue(1);
1313 InVals.push_back(Chain.getValue(0));
1328 Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, Ins[0].VT,
1329 InFlag).getValue(1);
1330 InVals.push_back(Chain.getValue(0));
1338 SPUTargetLowering::LowerReturn(SDValue Chain,
1339 CallingConv::ID CallConv, bool isVarArg,
1340 const SmallVectorImpl<ISD::OutputArg> &Outs,
1341 DebugLoc dl, SelectionDAG &DAG) const {
1343 SmallVector<CCValAssign, 16> RVLocs;
1344 CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
1345 RVLocs, *DAG.getContext());
1346 CCInfo.AnalyzeReturn(Outs, RetCC_SPU);
1348 // If this is the first return lowered for this function, add the regs to the
1349 // liveout set for the function.
1350 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
1351 for (unsigned i = 0; i != RVLocs.size(); ++i)
1352 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
1357 // Copy the result values into the output registers.
1358 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1359 CCValAssign &VA = RVLocs[i];
1360 assert(VA.isRegLoc() && "Can only return in registers!");
1361 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
1363 Flag = Chain.getValue(1);
1367 return DAG.getNode(SPUISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
1369 return DAG.getNode(SPUISD::RET_FLAG, dl, MVT::Other, Chain);
1373 //===----------------------------------------------------------------------===//
1374 // Vector related lowering:
1375 //===----------------------------------------------------------------------===//
1377 static ConstantSDNode *
1378 getVecImm(SDNode *N) {
1379 SDValue OpVal(0, 0);
1381 // Check to see if this buildvec has a single non-undef value in its elements.
1382 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
1383 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
1384 if (OpVal.getNode() == 0)
1385 OpVal = N->getOperand(i);
1386 else if (OpVal != N->getOperand(i))
1390 if (OpVal.getNode() != 0) {
1391 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
1399 /// get_vec_i18imm - Test if this vector is a vector filled with the same value
1400 /// and the value fits into an unsigned 18-bit constant, and if so, return the
1402 SDValue SPU::get_vec_u18imm(SDNode *N, SelectionDAG &DAG,
1404 if (ConstantSDNode *CN = getVecImm(N)) {
1405 uint64_t Value = CN->getZExtValue();
1406 if (ValueType == MVT::i64) {
1407 uint64_t UValue = CN->getZExtValue();
1408 uint32_t upper = uint32_t(UValue >> 32);
1409 uint32_t lower = uint32_t(UValue);
1412 Value = Value >> 32;
1414 if (Value <= 0x3ffff)
1415 return DAG.getTargetConstant(Value, ValueType);
1421 /// get_vec_i16imm - Test if this vector is a vector filled with the same value
1422 /// and the value fits into a signed 16-bit constant, and if so, return the
1424 SDValue SPU::get_vec_i16imm(SDNode *N, SelectionDAG &DAG,
1426 if (ConstantSDNode *CN = getVecImm(N)) {
1427 int64_t Value = CN->getSExtValue();
1428 if (ValueType == MVT::i64) {
1429 uint64_t UValue = CN->getZExtValue();
1430 uint32_t upper = uint32_t(UValue >> 32);
1431 uint32_t lower = uint32_t(UValue);
1434 Value = Value >> 32;
1436 if (Value >= -(1 << 15) && Value <= ((1 << 15) - 1)) {
1437 return DAG.getTargetConstant(Value, ValueType);
1444 /// get_vec_i10imm - Test if this vector is a vector filled with the same value
1445 /// and the value fits into a signed 10-bit constant, and if so, return the
1447 SDValue SPU::get_vec_i10imm(SDNode *N, SelectionDAG &DAG,
1449 if (ConstantSDNode *CN = getVecImm(N)) {
1450 int64_t Value = CN->getSExtValue();
1451 if (ValueType == MVT::i64) {
1452 uint64_t UValue = CN->getZExtValue();
1453 uint32_t upper = uint32_t(UValue >> 32);
1454 uint32_t lower = uint32_t(UValue);
1457 Value = Value >> 32;
1459 if (isInt<10>(Value))
1460 return DAG.getTargetConstant(Value, ValueType);
1466 /// get_vec_i8imm - Test if this vector is a vector filled with the same value
1467 /// and the value fits into a signed 8-bit constant, and if so, return the
1470 /// @note: The incoming vector is v16i8 because that's the only way we can load
1471 /// constant vectors. Thus, we test to see if the upper and lower bytes are the
1473 SDValue SPU::get_vec_i8imm(SDNode *N, SelectionDAG &DAG,
1475 if (ConstantSDNode *CN = getVecImm(N)) {
1476 int Value = (int) CN->getZExtValue();
1477 if (ValueType == MVT::i16
1478 && Value <= 0xffff /* truncated from uint64_t */
1479 && ((short) Value >> 8) == ((short) Value & 0xff))
1480 return DAG.getTargetConstant(Value & 0xff, ValueType);
1481 else if (ValueType == MVT::i8
1482 && (Value & 0xff) == Value)
1483 return DAG.getTargetConstant(Value, ValueType);
1489 /// get_ILHUvec_imm - Test if this vector is a vector filled with the same value
1490 /// and the value fits into a signed 16-bit constant, and if so, return the
1492 SDValue SPU::get_ILHUvec_imm(SDNode *N, SelectionDAG &DAG,
1494 if (ConstantSDNode *CN = getVecImm(N)) {
1495 uint64_t Value = CN->getZExtValue();
1496 if ((ValueType == MVT::i32
1497 && ((unsigned) Value & 0xffff0000) == (unsigned) Value)
1498 || (ValueType == MVT::i64 && (Value & 0xffff0000) == Value))
1499 return DAG.getTargetConstant(Value >> 16, ValueType);
1505 /// get_v4i32_imm - Catch-all for general 32-bit constant vectors
1506 SDValue SPU::get_v4i32_imm(SDNode *N, SelectionDAG &DAG) {
1507 if (ConstantSDNode *CN = getVecImm(N)) {
1508 return DAG.getTargetConstant((unsigned) CN->getZExtValue(), MVT::i32);
1514 /// get_v4i32_imm - Catch-all for general 64-bit constant vectors
1515 SDValue SPU::get_v2i64_imm(SDNode *N, SelectionDAG &DAG) {
1516 if (ConstantSDNode *CN = getVecImm(N)) {
1517 return DAG.getTargetConstant((unsigned) CN->getZExtValue(), MVT::i64);
1523 //! Lower a BUILD_VECTOR instruction creatively:
1525 LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
1526 EVT VT = Op.getValueType();
1527 EVT EltVT = VT.getVectorElementType();
1528 DebugLoc dl = Op.getDebugLoc();
1529 BuildVectorSDNode *BCN = dyn_cast<BuildVectorSDNode>(Op.getNode());
1530 assert(BCN != 0 && "Expected BuildVectorSDNode in SPU LowerBUILD_VECTOR");
1531 unsigned minSplatBits = EltVT.getSizeInBits();
1533 if (minSplatBits < 16)
1536 APInt APSplatBits, APSplatUndef;
1537 unsigned SplatBitSize;
1540 if (!BCN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
1541 HasAnyUndefs, minSplatBits)
1542 || minSplatBits < SplatBitSize)
1543 return SDValue(); // Wasn't a constant vector or splat exceeded min
1545 uint64_t SplatBits = APSplatBits.getZExtValue();
1547 switch (VT.getSimpleVT().SimpleTy) {
1549 report_fatal_error("CellSPU: Unhandled VT in LowerBUILD_VECTOR, VT = " +
1550 Twine(VT.getEVTString()));
1553 uint32_t Value32 = uint32_t(SplatBits);
1554 assert(SplatBitSize == 32
1555 && "LowerBUILD_VECTOR: Unexpected floating point vector element.");
1556 // NOTE: pretend the constant is an integer. LLVM won't load FP constants
1557 SDValue T = DAG.getConstant(Value32, MVT::i32);
1558 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32,
1559 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, T,T,T,T));
1563 uint64_t f64val = uint64_t(SplatBits);
1564 assert(SplatBitSize == 64
1565 && "LowerBUILD_VECTOR: 64-bit float vector size > 8 bytes.");
1566 // NOTE: pretend the constant is an integer. LLVM won't load FP constants
1567 SDValue T = DAG.getConstant(f64val, MVT::i64);
1568 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64,
1569 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T));
1573 // 8-bit constants have to be expanded to 16-bits
1574 unsigned short Value16 = SplatBits /* | (SplatBits << 8) */;
1575 SmallVector<SDValue, 8> Ops;
1577 Ops.assign(8, DAG.getConstant(Value16, MVT::i16));
1578 return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
1579 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i16, &Ops[0], Ops.size()));
1582 unsigned short Value16 = SplatBits;
1583 SDValue T = DAG.getConstant(Value16, EltVT);
1584 SmallVector<SDValue, 8> Ops;
1587 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], Ops.size());
1590 SDValue T = DAG.getConstant(unsigned(SplatBits), VT.getVectorElementType());
1591 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, T, T, T, T);
1594 SDValue T = DAG.getConstant(unsigned(SplatBits), VT.getVectorElementType());
1595 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, T, T);
1598 return SPU::LowerV2I64Splat(VT, DAG, SplatBits, dl);
1608 SPU::LowerV2I64Splat(EVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
1610 uint32_t upper = uint32_t(SplatVal >> 32);
1611 uint32_t lower = uint32_t(SplatVal);
1613 if (upper == lower) {
1614 // Magic constant that can be matched by IL, ILA, et. al.
1615 SDValue Val = DAG.getTargetConstant(upper, MVT::i32);
1616 return DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
1617 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1618 Val, Val, Val, Val));
1620 bool upper_special, lower_special;
1622 // NOTE: This code creates common-case shuffle masks that can be easily
1623 // detected as common expressions. It is not attempting to create highly
1624 // specialized masks to replace any and all 0's, 0xff's and 0x80's.
1626 // Detect if the upper or lower half is a special shuffle mask pattern:
1627 upper_special = (upper == 0 || upper == 0xffffffff || upper == 0x80000000);
1628 lower_special = (lower == 0 || lower == 0xffffffff || lower == 0x80000000);
1630 // Both upper and lower are special, lower to a constant pool load:
1631 if (lower_special && upper_special) {
1632 SDValue SplatValCN = DAG.getConstant(SplatVal, MVT::i64);
1633 return DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64,
1634 SplatValCN, SplatValCN);
1639 SmallVector<SDValue, 16> ShufBytes;
1642 // Create lower vector if not a special pattern
1643 if (!lower_special) {
1644 SDValue LO32C = DAG.getConstant(lower, MVT::i32);
1645 LO32 = DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
1646 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1647 LO32C, LO32C, LO32C, LO32C));
1650 // Create upper vector if not a special pattern
1651 if (!upper_special) {
1652 SDValue HI32C = DAG.getConstant(upper, MVT::i32);
1653 HI32 = DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
1654 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1655 HI32C, HI32C, HI32C, HI32C));
1658 // If either upper or lower are special, then the two input operands are
1659 // the same (basically, one of them is a "don't care")
1665 for (int i = 0; i < 4; ++i) {
1667 for (int j = 0; j < 4; ++j) {
1669 bool process_upper, process_lower;
1671 process_upper = (upper_special && (i & 1) == 0);
1672 process_lower = (lower_special && (i & 1) == 1);
1674 if (process_upper || process_lower) {
1675 if ((process_upper && upper == 0)
1676 || (process_lower && lower == 0))
1678 else if ((process_upper && upper == 0xffffffff)
1679 || (process_lower && lower == 0xffffffff))
1681 else if ((process_upper && upper == 0x80000000)
1682 || (process_lower && lower == 0x80000000))
1683 val |= (j == 0 ? 0xe0 : 0x80);
1685 val |= i * 4 + j + ((i & 1) * 16);
1688 ShufBytes.push_back(DAG.getConstant(val, MVT::i32));
1691 return DAG.getNode(SPUISD::SHUFB, dl, OpVT, HI32, LO32,
1692 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1693 &ShufBytes[0], ShufBytes.size()));
1697 /// LowerVECTOR_SHUFFLE - Lower a vector shuffle (V1, V2, V3) to something on
1698 /// which the Cell can operate. The code inspects V3 to ascertain whether the
1699 /// permutation vector, V3, is monotonically increasing with one "exception"
1700 /// element, e.g., (0, 1, _, 3). If this is the case, then generate a
1701 /// SHUFFLE_MASK synthetic instruction. Otherwise, spill V3 to the constant pool.
1702 /// In either case, the net result is going to eventually invoke SHUFB to
1703 /// permute/shuffle the bytes from V1 and V2.
1705 /// SHUFFLE_MASK is eventually selected as one of the C*D instructions, generate
1706 /// control word for byte/halfword/word insertion. This takes care of a single
1707 /// element move from V2 into V1.
1709 /// SPUISD::SHUFB is eventually selected as Cell's <i>shufb</i> instructions.
1710 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
1711 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
1712 SDValue V1 = Op.getOperand(0);
1713 SDValue V2 = Op.getOperand(1);
1714 DebugLoc dl = Op.getDebugLoc();
1716 if (V2.getOpcode() == ISD::UNDEF) V2 = V1;
1718 // If we have a single element being moved from V1 to V2, this can be handled
1719 // using the C*[DX] compute mask instructions, but the vector elements have
1720 // to be monotonically increasing with one exception element.
1721 EVT VecVT = V1.getValueType();
1722 EVT EltVT = VecVT.getVectorElementType();
1723 unsigned EltsFromV2 = 0;
1725 unsigned V2EltIdx0 = 0;
1726 unsigned CurrElt = 0;
1727 unsigned MaxElts = VecVT.getVectorNumElements();
1728 unsigned PrevElt = 0;
1730 bool monotonic = true;
1732 EVT maskVT; // which of the c?d instructions to use
1734 if (EltVT == MVT::i8) {
1736 maskVT = MVT::v16i8;
1737 } else if (EltVT == MVT::i16) {
1739 maskVT = MVT::v8i16;
1740 } else if (EltVT == MVT::i32 || EltVT == MVT::f32) {
1742 maskVT = MVT::v4i32;
1743 } else if (EltVT == MVT::i64 || EltVT == MVT::f64) {
1745 maskVT = MVT::v2i64;
1747 llvm_unreachable("Unhandled vector type in LowerVECTOR_SHUFFLE");
1749 for (unsigned i = 0; i != MaxElts; ++i) {
1750 if (SVN->getMaskElt(i) < 0)
1753 unsigned SrcElt = SVN->getMaskElt(i);
1756 if (SrcElt >= V2EltIdx0) {
1757 if (1 >= (++EltsFromV2)) {
1758 V2Elt = (V2EltIdx0 - SrcElt) << 2;
1760 } else if (CurrElt != SrcElt) {
1768 if (PrevElt > 0 && SrcElt < MaxElts) {
1769 if ((PrevElt == SrcElt - 1)
1770 || (PrevElt == MaxElts - 1 && SrcElt == 0)) {
1777 } else if (i == 0) {
1778 // First time through, need to keep track of previous element
1781 // This isn't a rotation, takes elements from vector 2
1787 if (EltsFromV2 == 1 && monotonic) {
1788 // Compute mask and shuffle
1789 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1791 // As SHUFFLE_MASK becomes a c?d instruction, feed it an address
1792 // R1 ($sp) is used here only as it is guaranteed to have last bits zero
1793 SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
1794 DAG.getRegister(SPU::R1, PtrVT),
1795 DAG.getConstant(V2Elt, MVT::i32));
1796 SDValue ShufMaskOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl,
1799 // Use shuffle mask in SHUFB synthetic instruction:
1800 return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V2, V1,
1802 } else if (rotate) {
1803 int rotamt = (MaxElts - V0Elt) * EltVT.getSizeInBits()/8;
1805 return DAG.getNode(SPUISD::ROTBYTES_LEFT, dl, V1.getValueType(),
1806 V1, DAG.getConstant(rotamt, MVT::i16));
1808 // Convert the SHUFFLE_VECTOR mask's input element units to the
1810 unsigned BytesPerElement = EltVT.getSizeInBits()/8;
1812 SmallVector<SDValue, 16> ResultMask;
1813 for (unsigned i = 0, e = MaxElts; i != e; ++i) {
1814 unsigned SrcElt = SVN->getMaskElt(i) < 0 ? 0 : SVN->getMaskElt(i);
1816 for (unsigned j = 0; j < BytesPerElement; ++j)
1817 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,MVT::i8));
1820 SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8,
1821 &ResultMask[0], ResultMask.size());
1822 return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V1, V2, VPermMask);
1826 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
1827 SDValue Op0 = Op.getOperand(0); // Op0 = the scalar
1828 DebugLoc dl = Op.getDebugLoc();
1830 if (Op0.getNode()->getOpcode() == ISD::Constant) {
1831 // For a constant, build the appropriate constant vector, which will
1832 // eventually simplify to a vector register load.
1834 ConstantSDNode *CN = cast<ConstantSDNode>(Op0.getNode());
1835 SmallVector<SDValue, 16> ConstVecValues;
1839 // Create a constant vector:
1840 switch (Op.getValueType().getSimpleVT().SimpleTy) {
1841 default: llvm_unreachable("Unexpected constant value type in "
1842 "LowerSCALAR_TO_VECTOR");
1843 case MVT::v16i8: n_copies = 16; VT = MVT::i8; break;
1844 case MVT::v8i16: n_copies = 8; VT = MVT::i16; break;
1845 case MVT::v4i32: n_copies = 4; VT = MVT::i32; break;
1846 case MVT::v4f32: n_copies = 4; VT = MVT::f32; break;
1847 case MVT::v2i64: n_copies = 2; VT = MVT::i64; break;
1848 case MVT::v2f64: n_copies = 2; VT = MVT::f64; break;
1851 SDValue CValue = DAG.getConstant(CN->getZExtValue(), VT);
1852 for (size_t j = 0; j < n_copies; ++j)
1853 ConstVecValues.push_back(CValue);
1855 return DAG.getNode(ISD::BUILD_VECTOR, dl, Op.getValueType(),
1856 &ConstVecValues[0], ConstVecValues.size());
1858 // Otherwise, copy the value from one register to another:
1859 switch (Op0.getValueType().getSimpleVT().SimpleTy) {
1860 default: llvm_unreachable("Unexpected value type in LowerSCALAR_TO_VECTOR");
1867 return DAG.getNode(SPUISD::PREFSLOT2VEC, dl, Op.getValueType(), Op0, Op0);
1874 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
1875 EVT VT = Op.getValueType();
1876 SDValue N = Op.getOperand(0);
1877 SDValue Elt = Op.getOperand(1);
1878 DebugLoc dl = Op.getDebugLoc();
1881 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
1882 // Constant argument:
1883 int EltNo = (int) C->getZExtValue();
1886 if (VT == MVT::i8 && EltNo >= 16)
1887 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i8 extraction slot > 15");
1888 else if (VT == MVT::i16 && EltNo >= 8)
1889 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i16 extraction slot > 7");
1890 else if (VT == MVT::i32 && EltNo >= 4)
1891 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i32 extraction slot > 4");
1892 else if (VT == MVT::i64 && EltNo >= 2)
1893 llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i64 extraction slot > 2");
1895 if (EltNo == 0 && (VT == MVT::i32 || VT == MVT::i64)) {
1896 // i32 and i64: Element 0 is the preferred slot
1897 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, N);
1900 // Need to generate shuffle mask and extract:
1901 int prefslot_begin = -1, prefslot_end = -1;
1902 int elt_byte = EltNo * VT.getSizeInBits() / 8;
1904 switch (VT.getSimpleVT().SimpleTy) {
1906 assert(false && "Invalid value type!");
1908 prefslot_begin = prefslot_end = 3;
1912 prefslot_begin = 2; prefslot_end = 3;
1917 prefslot_begin = 0; prefslot_end = 3;
1922 prefslot_begin = 0; prefslot_end = 7;
1927 assert(prefslot_begin != -1 && prefslot_end != -1 &&
1928 "LowerEXTRACT_VECTOR_ELT: preferred slots uninitialized");
1930 unsigned int ShufBytes[16] = {
1931 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1933 for (int i = 0; i < 16; ++i) {
1934 // zero fill uppper part of preferred slot, don't care about the
1936 unsigned int mask_val;
1937 if (i <= prefslot_end) {
1939 ((i < prefslot_begin)
1941 : elt_byte + (i - prefslot_begin));
1943 ShufBytes[i] = mask_val;
1945 ShufBytes[i] = ShufBytes[i % (prefslot_end + 1)];
1948 SDValue ShufMask[4];
1949 for (unsigned i = 0; i < sizeof(ShufMask)/sizeof(ShufMask[0]); ++i) {
1950 unsigned bidx = i * 4;
1951 unsigned int bits = ((ShufBytes[bidx] << 24) |
1952 (ShufBytes[bidx+1] << 16) |
1953 (ShufBytes[bidx+2] << 8) |
1955 ShufMask[i] = DAG.getConstant(bits, MVT::i32);
1958 SDValue ShufMaskVec =
1959 DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
1960 &ShufMask[0], sizeof(ShufMask)/sizeof(ShufMask[0]));
1962 retval = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
1963 DAG.getNode(SPUISD::SHUFB, dl, N.getValueType(),
1964 N, N, ShufMaskVec));
1966 // Variable index: Rotate the requested element into slot 0, then replicate
1967 // slot 0 across the vector
1968 EVT VecVT = N.getValueType();
1969 if (!VecVT.isSimple() || !VecVT.isVector() || !VecVT.is128BitVector()) {
1970 report_fatal_error("LowerEXTRACT_VECTOR_ELT: Must have a simple, 128-bit"
1974 // Make life easier by making sure the index is zero-extended to i32
1975 if (Elt.getValueType() != MVT::i32)
1976 Elt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Elt);
1978 // Scale the index to a bit/byte shift quantity
1980 APInt(32, uint64_t(16 / N.getValueType().getVectorNumElements()), false);
1981 unsigned scaleShift = scaleFactor.logBase2();
1984 if (scaleShift > 0) {
1985 // Scale the shift factor:
1986 Elt = DAG.getNode(ISD::SHL, dl, MVT::i32, Elt,
1987 DAG.getConstant(scaleShift, MVT::i32));
1990 vecShift = DAG.getNode(SPUISD::SHLQUAD_L_BYTES, dl, VecVT, N, Elt);
1992 // Replicate the bytes starting at byte 0 across the entire vector (for
1993 // consistency with the notion of a unified register set)
1996 switch (VT.getSimpleVT().SimpleTy) {
1998 report_fatal_error("LowerEXTRACT_VECTOR_ELT(varable): Unhandled vector"
2002 SDValue factor = DAG.getConstant(0x00000000, MVT::i32);
2003 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2004 factor, factor, factor, factor);
2008 SDValue factor = DAG.getConstant(0x00010001, MVT::i32);
2009 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2010 factor, factor, factor, factor);
2015 SDValue factor = DAG.getConstant(0x00010203, MVT::i32);
2016 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2017 factor, factor, factor, factor);
2022 SDValue loFactor = DAG.getConstant(0x00010203, MVT::i32);
2023 SDValue hiFactor = DAG.getConstant(0x04050607, MVT::i32);
2024 replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2025 loFactor, hiFactor, loFactor, hiFactor);
2030 retval = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
2031 DAG.getNode(SPUISD::SHUFB, dl, VecVT,
2032 vecShift, vecShift, replicate));
2038 static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
2039 SDValue VecOp = Op.getOperand(0);
2040 SDValue ValOp = Op.getOperand(1);
2041 SDValue IdxOp = Op.getOperand(2);
2042 DebugLoc dl = Op.getDebugLoc();
2043 EVT VT = Op.getValueType();
2045 // use 0 when the lane to insert to is 'undef'
2047 if (IdxOp.getOpcode() != ISD::UNDEF) {
2048 ConstantSDNode *CN = cast<ConstantSDNode>(IdxOp);
2049 assert(CN != 0 && "LowerINSERT_VECTOR_ELT: Index is not constant!");
2050 Idx = (CN->getSExtValue());
2053 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2054 // Use $sp ($1) because it's always 16-byte aligned and it's available:
2055 SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
2056 DAG.getRegister(SPU::R1, PtrVT),
2057 DAG.getConstant(Idx, PtrVT));
2058 SDValue ShufMask = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, VT, Pointer);
2061 DAG.getNode(SPUISD::SHUFB, dl, VT,
2062 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, ValOp),
2064 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4i32, ShufMask));
2069 static SDValue LowerI8Math(SDValue Op, SelectionDAG &DAG, unsigned Opc,
2070 const TargetLowering &TLI)
2072 SDValue N0 = Op.getOperand(0); // Everything has at least one operand
2073 DebugLoc dl = Op.getDebugLoc();
2074 EVT ShiftVT = TLI.getShiftAmountTy();
2076 assert(Op.getValueType() == MVT::i8);
2079 llvm_unreachable("Unhandled i8 math operator");
2083 // 8-bit addition: Promote the arguments up to 16-bits and truncate
2085 SDValue N1 = Op.getOperand(1);
2086 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2087 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2088 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2089 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2094 // 8-bit subtraction: Promote the arguments up to 16-bits and truncate
2096 SDValue N1 = Op.getOperand(1);
2097 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2098 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2099 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2100 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2104 SDValue N1 = Op.getOperand(1);
2105 EVT N1VT = N1.getValueType();
2107 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, N0);
2108 if (!N1VT.bitsEq(ShiftVT)) {
2109 unsigned N1Opc = N1.getValueType().bitsLT(ShiftVT)
2112 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2115 // Replicate lower 8-bits into upper 8:
2117 DAG.getNode(ISD::OR, dl, MVT::i16, N0,
2118 DAG.getNode(ISD::SHL, dl, MVT::i16,
2119 N0, DAG.getConstant(8, MVT::i32)));
2121 // Truncate back down to i8
2122 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2123 DAG.getNode(Opc, dl, MVT::i16, ExpandArg, N1));
2127 SDValue N1 = Op.getOperand(1);
2128 EVT N1VT = N1.getValueType();
2130 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, N0);
2131 if (!N1VT.bitsEq(ShiftVT)) {
2132 unsigned N1Opc = ISD::ZERO_EXTEND;
2134 if (N1.getValueType().bitsGT(ShiftVT))
2135 N1Opc = ISD::TRUNCATE;
2137 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2140 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2141 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2144 SDValue N1 = Op.getOperand(1);
2145 EVT N1VT = N1.getValueType();
2147 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2148 if (!N1VT.bitsEq(ShiftVT)) {
2149 unsigned N1Opc = ISD::SIGN_EXTEND;
2151 if (N1VT.bitsGT(ShiftVT))
2152 N1Opc = ISD::TRUNCATE;
2153 N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
2156 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2157 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2160 SDValue N1 = Op.getOperand(1);
2162 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
2163 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
2164 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
2165 DAG.getNode(Opc, dl, MVT::i16, N0, N1));
2173 //! Lower byte immediate operations for v16i8 vectors:
2175 LowerByteImmed(SDValue Op, SelectionDAG &DAG) {
2178 EVT VT = Op.getValueType();
2179 DebugLoc dl = Op.getDebugLoc();
2181 ConstVec = Op.getOperand(0);
2182 Arg = Op.getOperand(1);
2183 if (ConstVec.getNode()->getOpcode() != ISD::BUILD_VECTOR) {
2184 if (ConstVec.getNode()->getOpcode() == ISD::BIT_CONVERT) {
2185 ConstVec = ConstVec.getOperand(0);
2187 ConstVec = Op.getOperand(1);
2188 Arg = Op.getOperand(0);
2189 if (ConstVec.getNode()->getOpcode() == ISD::BIT_CONVERT) {
2190 ConstVec = ConstVec.getOperand(0);
2195 if (ConstVec.getNode()->getOpcode() == ISD::BUILD_VECTOR) {
2196 BuildVectorSDNode *BCN = dyn_cast<BuildVectorSDNode>(ConstVec.getNode());
2197 assert(BCN != 0 && "Expected BuildVectorSDNode in SPU LowerByteImmed");
2199 APInt APSplatBits, APSplatUndef;
2200 unsigned SplatBitSize;
2202 unsigned minSplatBits = VT.getVectorElementType().getSizeInBits();
2204 if (BCN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
2205 HasAnyUndefs, minSplatBits)
2206 && minSplatBits <= SplatBitSize) {
2207 uint64_t SplatBits = APSplatBits.getZExtValue();
2208 SDValue tc = DAG.getTargetConstant(SplatBits & 0xff, MVT::i8);
2210 SmallVector<SDValue, 16> tcVec;
2211 tcVec.assign(16, tc);
2212 return DAG.getNode(Op.getNode()->getOpcode(), dl, VT, Arg,
2213 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &tcVec[0], tcVec.size()));
2217 // These operations (AND, OR, XOR) are legal, they just couldn't be custom
2218 // lowered. Return the operation, rather than a null SDValue.
2222 //! Custom lowering for CTPOP (count population)
2224 Custom lowering code that counts the number ones in the input
2225 operand. SPU has such an instruction, but it counts the number of
2226 ones per byte, which then have to be accumulated.
2228 static SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) {
2229 EVT VT = Op.getValueType();
2230 EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
2231 VT, (128 / VT.getSizeInBits()));
2232 DebugLoc dl = Op.getDebugLoc();
2234 switch (VT.getSimpleVT().SimpleTy) {
2236 assert(false && "Invalid value type!");
2238 SDValue N = Op.getOperand(0);
2239 SDValue Elt0 = DAG.getConstant(0, MVT::i32);
2241 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2242 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2244 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i8, CNTB, Elt0);
2248 MachineFunction &MF = DAG.getMachineFunction();
2249 MachineRegisterInfo &RegInfo = MF.getRegInfo();
2251 unsigned CNTB_reg = RegInfo.createVirtualRegister(&SPU::R16CRegClass);
2253 SDValue N = Op.getOperand(0);
2254 SDValue Elt0 = DAG.getConstant(0, MVT::i16);
2255 SDValue Mask0 = DAG.getConstant(0x0f, MVT::i16);
2256 SDValue Shift1 = DAG.getConstant(8, MVT::i32);
2258 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2259 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2261 // CNTB_result becomes the chain to which all of the virtual registers
2262 // CNTB_reg, SUM1_reg become associated:
2263 SDValue CNTB_result =
2264 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, CNTB, Elt0);
2266 SDValue CNTB_rescopy =
2267 DAG.getCopyToReg(CNTB_result, dl, CNTB_reg, CNTB_result);
2269 SDValue Tmp1 = DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i16);
2271 return DAG.getNode(ISD::AND, dl, MVT::i16,
2272 DAG.getNode(ISD::ADD, dl, MVT::i16,
2273 DAG.getNode(ISD::SRL, dl, MVT::i16,
2280 MachineFunction &MF = DAG.getMachineFunction();
2281 MachineRegisterInfo &RegInfo = MF.getRegInfo();
2283 unsigned CNTB_reg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
2284 unsigned SUM1_reg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
2286 SDValue N = Op.getOperand(0);
2287 SDValue Elt0 = DAG.getConstant(0, MVT::i32);
2288 SDValue Mask0 = DAG.getConstant(0xff, MVT::i32);
2289 SDValue Shift1 = DAG.getConstant(16, MVT::i32);
2290 SDValue Shift2 = DAG.getConstant(8, MVT::i32);
2292 SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
2293 SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
2295 // CNTB_result becomes the chain to which all of the virtual registers
2296 // CNTB_reg, SUM1_reg become associated:
2297 SDValue CNTB_result =
2298 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, CNTB, Elt0);
2300 SDValue CNTB_rescopy =
2301 DAG.getCopyToReg(CNTB_result, dl, CNTB_reg, CNTB_result);
2304 DAG.getNode(ISD::SRL, dl, MVT::i32,
2305 DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i32),
2309 DAG.getNode(ISD::ADD, dl, MVT::i32, Comp1,
2310 DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i32));
2312 SDValue Sum1_rescopy =
2313 DAG.getCopyToReg(CNTB_result, dl, SUM1_reg, Sum1);
2316 DAG.getNode(ISD::SRL, dl, MVT::i32,
2317 DAG.getCopyFromReg(Sum1_rescopy, dl, SUM1_reg, MVT::i32),
2320 DAG.getNode(ISD::ADD, dl, MVT::i32, Comp2,
2321 DAG.getCopyFromReg(Sum1_rescopy, dl, SUM1_reg, MVT::i32));
2323 return DAG.getNode(ISD::AND, dl, MVT::i32, Sum2, Mask0);
2333 //! Lower ISD::FP_TO_SINT, ISD::FP_TO_UINT for i32
2335 f32->i32 passes through unchanged, whereas f64->i32 expands to a libcall.
2336 All conversions to i64 are expanded to a libcall.
2338 static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
2339 const SPUTargetLowering &TLI) {
2340 EVT OpVT = Op.getValueType();
2341 SDValue Op0 = Op.getOperand(0);
2342 EVT Op0VT = Op0.getValueType();
2344 if ((OpVT == MVT::i32 && Op0VT == MVT::f64)
2345 || OpVT == MVT::i64) {
2346 // Convert f32 / f64 to i32 / i64 via libcall.
2348 (Op.getOpcode() == ISD::FP_TO_SINT)
2349 ? RTLIB::getFPTOSINT(Op0VT, OpVT)
2350 : RTLIB::getFPTOUINT(Op0VT, OpVT);
2351 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpectd fp-to-int conversion!");
2353 return ExpandLibCall(LC, Op, DAG, false, Dummy, TLI);
2359 //! Lower ISD::SINT_TO_FP, ISD::UINT_TO_FP for i32
2361 i32->f32 passes through unchanged, whereas i32->f64 is expanded to a libcall.
2362 All conversions from i64 are expanded to a libcall.
2364 static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2365 const SPUTargetLowering &TLI) {
2366 EVT OpVT = Op.getValueType();
2367 SDValue Op0 = Op.getOperand(0);
2368 EVT Op0VT = Op0.getValueType();
2370 if ((OpVT == MVT::f64 && Op0VT == MVT::i32)
2371 || Op0VT == MVT::i64) {
2372 // Convert i32, i64 to f64 via libcall:
2374 (Op.getOpcode() == ISD::SINT_TO_FP)
2375 ? RTLIB::getSINTTOFP(Op0VT, OpVT)
2376 : RTLIB::getUINTTOFP(Op0VT, OpVT);
2377 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpectd int-to-fp conversion!");
2379 return ExpandLibCall(LC, Op, DAG, false, Dummy, TLI);
2385 //! Lower ISD::SETCC
2387 This handles MVT::f64 (double floating point) condition lowering
2389 static SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG,
2390 const TargetLowering &TLI) {
2391 CondCodeSDNode *CC = dyn_cast<CondCodeSDNode>(Op.getOperand(2));
2392 DebugLoc dl = Op.getDebugLoc();
2393 assert(CC != 0 && "LowerSETCC: CondCodeSDNode should not be null here!\n");
2395 SDValue lhs = Op.getOperand(0);
2396 SDValue rhs = Op.getOperand(1);
2397 EVT lhsVT = lhs.getValueType();
2398 assert(lhsVT == MVT::f64 && "LowerSETCC: type other than MVT::64\n");
2400 EVT ccResultVT = TLI.getSetCCResultType(lhs.getValueType());
2401 APInt ccResultOnes = APInt::getAllOnesValue(ccResultVT.getSizeInBits());
2402 EVT IntVT(MVT::i64);
2404 // Take advantage of the fact that (truncate (sra arg, 32)) is efficiently
2405 // selected to a NOP:
2406 SDValue i64lhs = DAG.getNode(ISD::BIT_CONVERT, dl, IntVT, lhs);
2408 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
2409 DAG.getNode(ISD::SRL, dl, IntVT,
2410 i64lhs, DAG.getConstant(32, MVT::i32)));
2411 SDValue lhsHi32abs =
2412 DAG.getNode(ISD::AND, dl, MVT::i32,
2413 lhsHi32, DAG.getConstant(0x7fffffff, MVT::i32));
2415 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, i64lhs);
2417 // SETO and SETUO only use the lhs operand:
2418 if (CC->get() == ISD::SETO) {
2419 // Evaluates to true if Op0 is not [SQ]NaN - lowers to the inverse of
2421 APInt ccResultAllOnes = APInt::getAllOnesValue(ccResultVT.getSizeInBits());
2422 return DAG.getNode(ISD::XOR, dl, ccResultVT,
2423 DAG.getSetCC(dl, ccResultVT,
2424 lhs, DAG.getConstantFP(0.0, lhsVT),
2426 DAG.getConstant(ccResultAllOnes, ccResultVT));
2427 } else if (CC->get() == ISD::SETUO) {
2428 // Evaluates to true if Op0 is [SQ]NaN
2429 return DAG.getNode(ISD::AND, dl, ccResultVT,
2430 DAG.getSetCC(dl, ccResultVT,
2432 DAG.getConstant(0x7ff00000, MVT::i32),
2434 DAG.getSetCC(dl, ccResultVT,
2436 DAG.getConstant(0, MVT::i32),
2440 SDValue i64rhs = DAG.getNode(ISD::BIT_CONVERT, dl, IntVT, rhs);
2442 DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
2443 DAG.getNode(ISD::SRL, dl, IntVT,
2444 i64rhs, DAG.getConstant(32, MVT::i32)));
2446 // If a value is negative, subtract from the sign magnitude constant:
2447 SDValue signMag2TC = DAG.getConstant(0x8000000000000000ULL, IntVT);
2449 // Convert the sign-magnitude representation into 2's complement:
2450 SDValue lhsSelectMask = DAG.getNode(ISD::SRA, dl, ccResultVT,
2451 lhsHi32, DAG.getConstant(31, MVT::i32));
2452 SDValue lhsSignMag2TC = DAG.getNode(ISD::SUB, dl, IntVT, signMag2TC, i64lhs);
2454 DAG.getNode(ISD::SELECT, dl, IntVT,
2455 lhsSelectMask, lhsSignMag2TC, i64lhs);
2457 SDValue rhsSelectMask = DAG.getNode(ISD::SRA, dl, ccResultVT,
2458 rhsHi32, DAG.getConstant(31, MVT::i32));
2459 SDValue rhsSignMag2TC = DAG.getNode(ISD::SUB, dl, IntVT, signMag2TC, i64rhs);
2461 DAG.getNode(ISD::SELECT, dl, IntVT,
2462 rhsSelectMask, rhsSignMag2TC, i64rhs);
2466 switch (CC->get()) {
2469 compareOp = ISD::SETEQ; break;
2472 compareOp = ISD::SETGT; break;
2475 compareOp = ISD::SETGE; break;
2478 compareOp = ISD::SETLT; break;
2481 compareOp = ISD::SETLE; break;
2484 compareOp = ISD::SETNE; break;
2486 report_fatal_error("CellSPU ISel Select: unimplemented f64 condition");
2490 DAG.getSetCC(dl, ccResultVT, lhsSelect, rhsSelect,
2491 (ISD::CondCode) compareOp);
2493 if ((CC->get() & 0x8) == 0) {
2494 // Ordered comparison:
2495 SDValue lhsNaN = DAG.getSetCC(dl, ccResultVT,
2496 lhs, DAG.getConstantFP(0.0, MVT::f64),
2498 SDValue rhsNaN = DAG.getSetCC(dl, ccResultVT,
2499 rhs, DAG.getConstantFP(0.0, MVT::f64),
2501 SDValue ordered = DAG.getNode(ISD::AND, dl, ccResultVT, lhsNaN, rhsNaN);
2503 result = DAG.getNode(ISD::AND, dl, ccResultVT, ordered, result);
2509 //! Lower ISD::SELECT_CC
2511 ISD::SELECT_CC can (generally) be implemented directly on the SPU using the
2514 \note Need to revisit this in the future: if the code path through the true
2515 and false value computations is longer than the latency of a branch (6
2516 cycles), then it would be more advantageous to branch and insert a new basic
2517 block and branch on the condition. However, this code does not make that
2518 assumption, given the simplisitc uses so far.
2521 static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
2522 const TargetLowering &TLI) {
2523 EVT VT = Op.getValueType();
2524 SDValue lhs = Op.getOperand(0);
2525 SDValue rhs = Op.getOperand(1);
2526 SDValue trueval = Op.getOperand(2);
2527 SDValue falseval = Op.getOperand(3);
2528 SDValue condition = Op.getOperand(4);
2529 DebugLoc dl = Op.getDebugLoc();
2531 // NOTE: SELB's arguments: $rA, $rB, $mask
2533 // SELB selects bits from $rA where bits in $mask are 0, bits from $rB
2534 // where bits in $mask are 1. CCond will be inverted, having 1s where the
2535 // condition was true and 0s where the condition was false. Hence, the
2536 // arguments to SELB get reversed.
2538 // Note: Really should be ISD::SELECT instead of SPUISD::SELB, but LLVM's
2539 // legalizer insists on combining SETCC/SELECT into SELECT_CC, so we end up
2540 // with another "cannot select select_cc" assert:
2542 SDValue compare = DAG.getNode(ISD::SETCC, dl,
2543 TLI.getSetCCResultType(Op.getValueType()),
2544 lhs, rhs, condition);
2545 return DAG.getNode(SPUISD::SELB, dl, VT, falseval, trueval, compare);
2548 //! Custom lower ISD::TRUNCATE
2549 static SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG)
2551 // Type to truncate to
2552 EVT VT = Op.getValueType();
2553 MVT simpleVT = VT.getSimpleVT();
2554 EVT VecVT = EVT::getVectorVT(*DAG.getContext(),
2555 VT, (128 / VT.getSizeInBits()));
2556 DebugLoc dl = Op.getDebugLoc();
2558 // Type to truncate from
2559 SDValue Op0 = Op.getOperand(0);
2560 EVT Op0VT = Op0.getValueType();
2562 if (Op0VT.getSimpleVT() == MVT::i128 && simpleVT == MVT::i64) {
2563 // Create shuffle mask, least significant doubleword of quadword
2564 unsigned maskHigh = 0x08090a0b;
2565 unsigned maskLow = 0x0c0d0e0f;
2566 // Use a shuffle to perform the truncation
2567 SDValue shufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2568 DAG.getConstant(maskHigh, MVT::i32),
2569 DAG.getConstant(maskLow, MVT::i32),
2570 DAG.getConstant(maskHigh, MVT::i32),
2571 DAG.getConstant(maskLow, MVT::i32));
2573 SDValue truncShuffle = DAG.getNode(SPUISD::SHUFB, dl, VecVT,
2574 Op0, Op0, shufMask);
2576 return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, truncShuffle);
2579 return SDValue(); // Leave the truncate unmolested
2583 * Emit the instruction sequence for i64/i32 -> i128 sign extend. The basic
2584 * algorithm is to duplicate the sign bit using rotmai to generate at
2585 * least one byte full of sign bits. Then propagate the "sign-byte" into
2586 * the leftmost words and the i64/i32 into the rightmost words using shufb.
2588 * @param Op The sext operand
2589 * @param DAG The current DAG
2590 * @return The SDValue with the entire instruction sequence
2592 static SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG)
2594 DebugLoc dl = Op.getDebugLoc();
2596 // Type to extend to
2597 MVT OpVT = Op.getValueType().getSimpleVT();
2599 // Type to extend from
2600 SDValue Op0 = Op.getOperand(0);
2601 MVT Op0VT = Op0.getValueType().getSimpleVT();
2603 // The type to extend to needs to be a i128 and
2604 // the type to extend from needs to be i64 or i32.
2605 assert((OpVT == MVT::i128 && (Op0VT == MVT::i64 || Op0VT == MVT::i32)) &&
2606 "LowerSIGN_EXTEND: input and/or output operand have wrong size");
2608 // Create shuffle mask
2609 unsigned mask1 = 0x10101010; // byte 0 - 3 and 4 - 7
2610 unsigned mask2 = Op0VT == MVT::i64 ? 0x00010203 : 0x10101010; // byte 8 - 11
2611 unsigned mask3 = Op0VT == MVT::i64 ? 0x04050607 : 0x00010203; // byte 12 - 15
2612 SDValue shufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
2613 DAG.getConstant(mask1, MVT::i32),
2614 DAG.getConstant(mask1, MVT::i32),
2615 DAG.getConstant(mask2, MVT::i32),
2616 DAG.getConstant(mask3, MVT::i32));
2618 // Word wise arithmetic right shift to generate at least one byte
2619 // that contains sign bits.
2620 MVT mvt = Op0VT == MVT::i64 ? MVT::v2i64 : MVT::v4i32;
2621 SDValue sraVal = DAG.getNode(ISD::SRA,
2624 DAG.getNode(SPUISD::PREFSLOT2VEC, dl, mvt, Op0, Op0),
2625 DAG.getConstant(31, MVT::i32));
2627 // Shuffle bytes - Copy the sign bits into the upper 64 bits
2628 // and the input value into the lower 64 bits.
2629 SDValue extShuffle = DAG.getNode(SPUISD::SHUFB, dl, mvt,
2630 DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i128, Op0), sraVal, shufMask);
2632 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, extShuffle);
2635 //! Custom (target-specific) lowering entry point
2637 This is where LLVM's DAG selection process calls to do target-specific
2641 SPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
2643 unsigned Opc = (unsigned) Op.getOpcode();
2644 EVT VT = Op.getValueType();
2649 errs() << "SPUTargetLowering::LowerOperation(): need to lower this!\n";
2650 errs() << "Op.getOpcode() = " << Opc << "\n";
2651 errs() << "*Op.getNode():\n";
2652 Op.getNode()->dump();
2654 llvm_unreachable(0);
2660 return LowerLOAD(Op, DAG, SPUTM.getSubtargetImpl());
2662 return LowerSTORE(Op, DAG, SPUTM.getSubtargetImpl());
2663 case ISD::ConstantPool:
2664 return LowerConstantPool(Op, DAG, SPUTM.getSubtargetImpl());
2665 case ISD::GlobalAddress:
2666 return LowerGlobalAddress(Op, DAG, SPUTM.getSubtargetImpl());
2667 case ISD::JumpTable:
2668 return LowerJumpTable(Op, DAG, SPUTM.getSubtargetImpl());
2669 case ISD::ConstantFP:
2670 return LowerConstantFP(Op, DAG);
2672 // i8, i64 math ops:
2681 return LowerI8Math(Op, DAG, Opc, *this);
2685 case ISD::FP_TO_SINT:
2686 case ISD::FP_TO_UINT:
2687 return LowerFP_TO_INT(Op, DAG, *this);
2689 case ISD::SINT_TO_FP:
2690 case ISD::UINT_TO_FP:
2691 return LowerINT_TO_FP(Op, DAG, *this);
2693 // Vector-related lowering.
2694 case ISD::BUILD_VECTOR:
2695 return LowerBUILD_VECTOR(Op, DAG);
2696 case ISD::SCALAR_TO_VECTOR:
2697 return LowerSCALAR_TO_VECTOR(Op, DAG);
2698 case ISD::VECTOR_SHUFFLE:
2699 return LowerVECTOR_SHUFFLE(Op, DAG);
2700 case ISD::EXTRACT_VECTOR_ELT:
2701 return LowerEXTRACT_VECTOR_ELT(Op, DAG);
2702 case ISD::INSERT_VECTOR_ELT:
2703 return LowerINSERT_VECTOR_ELT(Op, DAG);
2705 // Look for ANDBI, ORBI and XORBI opportunities and lower appropriately:
2709 return LowerByteImmed(Op, DAG);
2711 // Vector and i8 multiply:
2714 return LowerI8Math(Op, DAG, Opc, *this);
2717 return LowerCTPOP(Op, DAG);
2719 case ISD::SELECT_CC:
2720 return LowerSELECT_CC(Op, DAG, *this);
2723 return LowerSETCC(Op, DAG, *this);
2726 return LowerTRUNCATE(Op, DAG);
2728 case ISD::SIGN_EXTEND:
2729 return LowerSIGN_EXTEND(Op, DAG);
2735 void SPUTargetLowering::ReplaceNodeResults(SDNode *N,
2736 SmallVectorImpl<SDValue>&Results,
2737 SelectionDAG &DAG) const
2740 unsigned Opc = (unsigned) N->getOpcode();
2741 EVT OpVT = N->getValueType(0);
2745 errs() << "SPUTargetLowering::ReplaceNodeResults(): need to fix this!\n";
2746 errs() << "Op.getOpcode() = " << Opc << "\n";
2747 errs() << "*Op.getNode():\n";
2755 /* Otherwise, return unchanged */
2758 //===----------------------------------------------------------------------===//
2759 // Target Optimization Hooks
2760 //===----------------------------------------------------------------------===//
2763 SPUTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
2766 TargetMachine &TM = getTargetMachine();
2768 const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
2769 SelectionDAG &DAG = DCI.DAG;
2770 SDValue Op0 = N->getOperand(0); // everything has at least one operand
2771 EVT NodeVT = N->getValueType(0); // The node's value type
2772 EVT Op0VT = Op0.getValueType(); // The first operand's result
2773 SDValue Result; // Initially, empty result
2774 DebugLoc dl = N->getDebugLoc();
2776 switch (N->getOpcode()) {
2779 SDValue Op1 = N->getOperand(1);
2781 if (Op0.getOpcode() == SPUISD::IndirectAddr
2782 || Op1.getOpcode() == SPUISD::IndirectAddr) {
2783 // Normalize the operands to reduce repeated code
2784 SDValue IndirectArg = Op0, AddArg = Op1;
2786 if (Op1.getOpcode() == SPUISD::IndirectAddr) {
2791 if (isa<ConstantSDNode>(AddArg)) {
2792 ConstantSDNode *CN0 = cast<ConstantSDNode > (AddArg);
2793 SDValue IndOp1 = IndirectArg.getOperand(1);
2795 if (CN0->isNullValue()) {
2796 // (add (SPUindirect <arg>, <arg>), 0) ->
2797 // (SPUindirect <arg>, <arg>)
2799 #if !defined(NDEBUG)
2800 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2802 << "Replace: (add (SPUindirect <arg>, <arg>), 0)\n"
2803 << "With: (SPUindirect <arg>, <arg>)\n";
2808 } else if (isa<ConstantSDNode>(IndOp1)) {
2809 // (add (SPUindirect <arg>, <const>), <const>) ->
2810 // (SPUindirect <arg>, <const + const>)
2811 ConstantSDNode *CN1 = cast<ConstantSDNode > (IndOp1);
2812 int64_t combinedConst = CN0->getSExtValue() + CN1->getSExtValue();
2813 SDValue combinedValue = DAG.getConstant(combinedConst, Op0VT);
2815 #if !defined(NDEBUG)
2816 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2818 << "Replace: (add (SPUindirect <arg>, " << CN1->getSExtValue()
2819 << "), " << CN0->getSExtValue() << ")\n"
2820 << "With: (SPUindirect <arg>, "
2821 << combinedConst << ")\n";
2825 return DAG.getNode(SPUISD::IndirectAddr, dl, Op0VT,
2826 IndirectArg, combinedValue);
2832 case ISD::SIGN_EXTEND:
2833 case ISD::ZERO_EXTEND:
2834 case ISD::ANY_EXTEND: {
2835 if (Op0.getOpcode() == SPUISD::VEC2PREFSLOT && NodeVT == Op0VT) {
2836 // (any_extend (SPUextract_elt0 <arg>)) ->
2837 // (SPUextract_elt0 <arg>)
2838 // Types must match, however...
2839 #if !defined(NDEBUG)
2840 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2841 errs() << "\nReplace: ";
2843 errs() << "\nWith: ";
2844 Op0.getNode()->dump(&DAG);
2853 case SPUISD::IndirectAddr: {
2854 if (!ST->usingLargeMem() && Op0.getOpcode() == SPUISD::AFormAddr) {
2855 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1));
2856 if (CN != 0 && CN->isNullValue()) {
2857 // (SPUindirect (SPUaform <addr>, 0), 0) ->
2858 // (SPUaform <addr>, 0)
2860 DEBUG(errs() << "Replace: ");
2861 DEBUG(N->dump(&DAG));
2862 DEBUG(errs() << "\nWith: ");
2863 DEBUG(Op0.getNode()->dump(&DAG));
2864 DEBUG(errs() << "\n");
2868 } else if (Op0.getOpcode() == ISD::ADD) {
2869 SDValue Op1 = N->getOperand(1);
2870 if (ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(Op1)) {
2871 // (SPUindirect (add <arg>, <arg>), 0) ->
2872 // (SPUindirect <arg>, <arg>)
2873 if (CN1->isNullValue()) {
2875 #if !defined(NDEBUG)
2876 if (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) {
2878 << "Replace: (SPUindirect (add <arg>, <arg>), 0)\n"
2879 << "With: (SPUindirect <arg>, <arg>)\n";
2883 return DAG.getNode(SPUISD::IndirectAddr, dl, Op0VT,
2884 Op0.getOperand(0), Op0.getOperand(1));
2890 case SPUISD::SHLQUAD_L_BITS:
2891 case SPUISD::SHLQUAD_L_BYTES:
2892 case SPUISD::ROTBYTES_LEFT: {
2893 SDValue Op1 = N->getOperand(1);
2895 // Kill degenerate vector shifts:
2896 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
2897 if (CN->isNullValue()) {
2903 case SPUISD::PREFSLOT2VEC: {
2904 switch (Op0.getOpcode()) {
2907 case ISD::ANY_EXTEND:
2908 case ISD::ZERO_EXTEND:
2909 case ISD::SIGN_EXTEND: {
2910 // (SPUprefslot2vec (any|zero|sign_extend (SPUvec2prefslot <arg>))) ->
2912 // but only if the SPUprefslot2vec and <arg> types match.
2913 SDValue Op00 = Op0.getOperand(0);
2914 if (Op00.getOpcode() == SPUISD::VEC2PREFSLOT) {
2915 SDValue Op000 = Op00.getOperand(0);
2916 if (Op000.getValueType() == NodeVT) {
2922 case SPUISD::VEC2PREFSLOT: {
2923 // (SPUprefslot2vec (SPUvec2prefslot <arg>)) ->
2925 Result = Op0.getOperand(0);
2933 // Otherwise, return unchanged.
2935 if (Result.getNode()) {
2936 DEBUG(errs() << "\nReplace.SPU: ");
2937 DEBUG(N->dump(&DAG));
2938 DEBUG(errs() << "\nWith: ");
2939 DEBUG(Result.getNode()->dump(&DAG));
2940 DEBUG(errs() << "\n");
2947 //===----------------------------------------------------------------------===//
2948 // Inline Assembly Support
2949 //===----------------------------------------------------------------------===//
2951 /// getConstraintType - Given a constraint letter, return the type of
2952 /// constraint it is for this target.
2953 SPUTargetLowering::ConstraintType
2954 SPUTargetLowering::getConstraintType(const std::string &ConstraintLetter) const {
2955 if (ConstraintLetter.size() == 1) {
2956 switch (ConstraintLetter[0]) {
2963 return C_RegisterClass;
2966 return TargetLowering::getConstraintType(ConstraintLetter);
2969 std::pair<unsigned, const TargetRegisterClass*>
2970 SPUTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
2973 if (Constraint.size() == 1) {
2974 // GCC RS6000 Constraint Letters
2975 switch (Constraint[0]) {
2979 return std::make_pair(0U, SPU::R64CRegisterClass);
2980 return std::make_pair(0U, SPU::R32CRegisterClass);
2983 return std::make_pair(0U, SPU::R32FPRegisterClass);
2984 else if (VT == MVT::f64)
2985 return std::make_pair(0U, SPU::R64FPRegisterClass);
2988 return std::make_pair(0U, SPU::GPRCRegisterClass);
2992 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
2995 //! Compute used/known bits for a SPU operand
2997 SPUTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
3001 const SelectionDAG &DAG,
3002 unsigned Depth ) const {
3004 const uint64_t uint64_sizebits = sizeof(uint64_t) * CHAR_BIT;
3006 switch (Op.getOpcode()) {
3008 // KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
3014 case SPUISD::PREFSLOT2VEC:
3015 case SPUISD::LDRESULT:
3016 case SPUISD::VEC2PREFSLOT:
3017 case SPUISD::SHLQUAD_L_BITS:
3018 case SPUISD::SHLQUAD_L_BYTES:
3019 case SPUISD::VEC_ROTL:
3020 case SPUISD::VEC_ROTR:
3021 case SPUISD::ROTBYTES_LEFT:
3022 case SPUISD::SELECT_MASK:
3029 SPUTargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
3030 unsigned Depth) const {
3031 switch (Op.getOpcode()) {
3036 EVT VT = Op.getValueType();
3038 if (VT != MVT::i8 && VT != MVT::i16 && VT != MVT::i32) {
3041 return VT.getSizeInBits();
3046 // LowerAsmOperandForConstraint
3048 SPUTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
3049 char ConstraintLetter,
3050 std::vector<SDValue> &Ops,
3051 SelectionDAG &DAG) const {
3052 // Default, for the time being, to the base class handler
3053 TargetLowering::LowerAsmOperandForConstraint(Op, ConstraintLetter, Ops, DAG);
3056 /// isLegalAddressImmediate - Return true if the integer value can be used
3057 /// as the offset of the target addressing mode.
3058 bool SPUTargetLowering::isLegalAddressImmediate(int64_t V,
3059 const Type *Ty) const {
3060 // SPU's addresses are 256K:
3061 return (V > -(1 << 18) && V < (1 << 18) - 1);
3064 bool SPUTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const {
3069 SPUTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
3070 // The SPU target isn't yet aware of offsets.