1 //===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the SystemZTargetLowering class.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "systemz-lower"
16 #include "SystemZISelLowering.h"
17 #include "SystemZCallingConv.h"
18 #include "SystemZConstantPoolValue.h"
19 #include "SystemZMachineFunctionInfo.h"
20 #include "SystemZTargetMachine.h"
21 #include "llvm/CodeGen/CallingConvLower.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
31 // Represents a sequence for extracting a 0/1 value from an IPM result:
32 // (((X ^ XORValue) + AddValue) >> Bit)
33 struct IPMConversion {
34 IPMConversion(unsigned xorValue, int64_t addValue, unsigned bit)
35 : XORValue(xorValue), AddValue(addValue), Bit(bit) {}
43 // Classify VT as either 32 or 64 bit.
44 static bool is32Bit(EVT VT) {
45 switch (VT.getSimpleVT().SimpleTy) {
51 llvm_unreachable("Unsupported type");
55 // Return a version of MachineOperand that can be safely used before the
57 static MachineOperand earlyUseOperand(MachineOperand Op) {
63 SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm)
64 : TargetLowering(tm, new TargetLoweringObjectFileELF()),
65 Subtarget(*tm.getSubtargetImpl()), TM(tm) {
66 MVT PtrVT = getPointerTy();
68 // Set up the register classes.
69 if (Subtarget.hasHighWord())
70 addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass);
72 addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass);
73 addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass);
74 addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass);
75 addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass);
76 addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass);
78 // Compute derived properties from the register classes
79 computeRegisterProperties();
81 // Set up special registers.
82 setExceptionPointerRegister(SystemZ::R6D);
83 setExceptionSelectorRegister(SystemZ::R7D);
84 setStackPointerRegisterToSaveRestore(SystemZ::R15D);
86 // TODO: It may be better to default to latency-oriented scheduling, however
87 // LLVM's current latency-oriented scheduler can't handle physreg definitions
88 // such as SystemZ has with CC, so set this to the register-pressure
89 // scheduler, because it can.
90 setSchedulingPreference(Sched::RegPressure);
92 setBooleanContents(ZeroOrOneBooleanContent);
93 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
95 // Instructions are strings of 2-byte aligned 2-byte values.
96 setMinFunctionAlignment(2);
98 // Handle operations that are handled in a similar way for all types.
99 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
100 I <= MVT::LAST_FP_VALUETYPE;
102 MVT VT = MVT::SimpleValueType(I);
103 if (isTypeLegal(VT)) {
104 // Lower SET_CC into an IPM-based sequence.
105 setOperationAction(ISD::SETCC, VT, Custom);
107 // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE).
108 setOperationAction(ISD::SELECT, VT, Expand);
110 // Lower SELECT_CC and BR_CC into separate comparisons and branches.
111 setOperationAction(ISD::SELECT_CC, VT, Custom);
112 setOperationAction(ISD::BR_CC, VT, Custom);
116 // Expand jump table branches as address arithmetic followed by an
118 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
120 // Expand BRCOND into a BR_CC (see above).
121 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
123 // Handle integer types.
124 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
125 I <= MVT::LAST_INTEGER_VALUETYPE;
127 MVT VT = MVT::SimpleValueType(I);
128 if (isTypeLegal(VT)) {
129 // Expand individual DIV and REMs into DIVREMs.
130 setOperationAction(ISD::SDIV, VT, Expand);
131 setOperationAction(ISD::UDIV, VT, Expand);
132 setOperationAction(ISD::SREM, VT, Expand);
133 setOperationAction(ISD::UREM, VT, Expand);
134 setOperationAction(ISD::SDIVREM, VT, Custom);
135 setOperationAction(ISD::UDIVREM, VT, Custom);
137 // Expand ATOMIC_LOAD and ATOMIC_STORE using ATOMIC_CMP_SWAP.
138 // FIXME: probably much too conservative.
139 setOperationAction(ISD::ATOMIC_LOAD, VT, Expand);
140 setOperationAction(ISD::ATOMIC_STORE, VT, Expand);
142 // No special instructions for these.
143 setOperationAction(ISD::CTPOP, VT, Expand);
144 setOperationAction(ISD::CTTZ, VT, Expand);
145 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
146 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
147 setOperationAction(ISD::ROTR, VT, Expand);
149 // Use *MUL_LOHI where possible instead of MULH*.
150 setOperationAction(ISD::MULHS, VT, Expand);
151 setOperationAction(ISD::MULHU, VT, Expand);
152 setOperationAction(ISD::SMUL_LOHI, VT, Custom);
153 setOperationAction(ISD::UMUL_LOHI, VT, Custom);
155 // We have instructions for signed but not unsigned FP conversion.
156 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
160 // Type legalization will convert 8- and 16-bit atomic operations into
161 // forms that operate on i32s (but still keeping the original memory VT).
162 // Lower them into full i32 operations.
163 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom);
164 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom);
165 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom);
166 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom);
167 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom);
168 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom);
169 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom);
170 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom);
171 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom);
172 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom);
173 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom);
174 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
176 // We have instructions for signed but not unsigned FP conversion.
177 // Handle unsigned 32-bit types as signed 64-bit types.
178 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote);
179 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
181 // We have native support for a 64-bit CTLZ, via FLOGR.
182 setOperationAction(ISD::CTLZ, MVT::i32, Promote);
183 setOperationAction(ISD::CTLZ, MVT::i64, Legal);
185 // Give LowerOperation the chance to replace 64-bit ORs with subregs.
186 setOperationAction(ISD::OR, MVT::i64, Custom);
188 // FIXME: Can we support these natively?
189 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
190 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
191 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
193 // We have native instructions for i8, i16 and i32 extensions, but not i1.
194 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
195 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
196 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
197 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
199 // Handle the various types of symbolic address.
200 setOperationAction(ISD::ConstantPool, PtrVT, Custom);
201 setOperationAction(ISD::GlobalAddress, PtrVT, Custom);
202 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom);
203 setOperationAction(ISD::BlockAddress, PtrVT, Custom);
204 setOperationAction(ISD::JumpTable, PtrVT, Custom);
206 // We need to handle dynamic allocations specially because of the
207 // 160-byte area at the bottom of the stack.
208 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
210 // Use custom expanders so that we can force the function to use
212 setOperationAction(ISD::STACKSAVE, MVT::Other, Custom);
213 setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom);
215 // Handle prefetches with PFD or PFDRL.
216 setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
218 // Handle floating-point types.
219 for (unsigned I = MVT::FIRST_FP_VALUETYPE;
220 I <= MVT::LAST_FP_VALUETYPE;
222 MVT VT = MVT::SimpleValueType(I);
223 if (isTypeLegal(VT)) {
224 // We can use FI for FRINT.
225 setOperationAction(ISD::FRINT, VT, Legal);
227 // We can use the extended form of FI for other rounding operations.
228 if (Subtarget.hasFPExtension()) {
229 setOperationAction(ISD::FNEARBYINT, VT, Legal);
230 setOperationAction(ISD::FFLOOR, VT, Legal);
231 setOperationAction(ISD::FCEIL, VT, Legal);
232 setOperationAction(ISD::FTRUNC, VT, Legal);
233 setOperationAction(ISD::FROUND, VT, Legal);
236 // No special instructions for these.
237 setOperationAction(ISD::FSIN, VT, Expand);
238 setOperationAction(ISD::FCOS, VT, Expand);
239 setOperationAction(ISD::FREM, VT, Expand);
243 // We have fused multiply-addition for f32 and f64 but not f128.
244 setOperationAction(ISD::FMA, MVT::f32, Legal);
245 setOperationAction(ISD::FMA, MVT::f64, Legal);
246 setOperationAction(ISD::FMA, MVT::f128, Expand);
248 // Needed so that we don't try to implement f128 constant loads using
249 // a load-and-extend of a f80 constant (in cases where the constant
250 // would fit in an f80).
251 setLoadExtAction(ISD::EXTLOAD, MVT::f80, Expand);
253 // Floating-point truncation and stores need to be done separately.
254 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
255 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
256 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
258 // We have 64-bit FPR<->GPR moves, but need special handling for
260 setOperationAction(ISD::BITCAST, MVT::i32, Custom);
261 setOperationAction(ISD::BITCAST, MVT::f32, Custom);
263 // VASTART and VACOPY need to deal with the SystemZ-specific varargs
264 // structure, but VAEND is a no-op.
265 setOperationAction(ISD::VASTART, MVT::Other, Custom);
266 setOperationAction(ISD::VACOPY, MVT::Other, Custom);
267 setOperationAction(ISD::VAEND, MVT::Other, Expand);
269 // We want to use MVC in preference to even a single load/store pair.
270 MaxStoresPerMemcpy = 0;
271 MaxStoresPerMemcpyOptSize = 0;
273 // The main memset sequence is a byte store followed by an MVC.
274 // Two STC or MV..I stores win over that, but the kind of fused stores
275 // generated by target-independent code don't when the byte value is
276 // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better
277 // than "STC;MVC". Handle the choice in target-specific code instead.
278 MaxStoresPerMemset = 0;
279 MaxStoresPerMemsetOptSize = 0;
283 SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
284 VT = VT.getScalarType();
289 switch (VT.getSimpleVT().SimpleTy) {
302 bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
303 // We can load zero using LZ?R and negative zero using LZ?R;LC?BR.
304 return Imm.isZero() || Imm.isNegZero();
307 bool SystemZTargetLowering::allowsUnalignedMemoryAccesses(EVT VT,
309 // Unaligned accesses should never be slower than the expanded version.
310 // We check specifically for aligned accesses in the few cases where
311 // they are required.
317 bool SystemZTargetLowering::isLegalAddressingMode(const AddrMode &AM,
319 // Punt on globals for now, although they can be used in limited
320 // RELATIVE LONG cases.
324 // Require a 20-bit signed offset.
325 if (!isInt<20>(AM.BaseOffs))
328 // Indexing is OK but no scale factor can be applied.
329 return AM.Scale == 0 || AM.Scale == 1;
332 bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const {
333 if (!FromType->isIntegerTy() || !ToType->isIntegerTy())
335 unsigned FromBits = FromType->getPrimitiveSizeInBits();
336 unsigned ToBits = ToType->getPrimitiveSizeInBits();
337 return FromBits > ToBits;
340 bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const {
341 if (!FromVT.isInteger() || !ToVT.isInteger())
343 unsigned FromBits = FromVT.getSizeInBits();
344 unsigned ToBits = ToVT.getSizeInBits();
345 return FromBits > ToBits;
348 //===----------------------------------------------------------------------===//
349 // Inline asm support
350 //===----------------------------------------------------------------------===//
352 TargetLowering::ConstraintType
353 SystemZTargetLowering::getConstraintType(const std::string &Constraint) const {
354 if (Constraint.size() == 1) {
355 switch (Constraint[0]) {
356 case 'a': // Address register
357 case 'd': // Data register (equivalent to 'r')
358 case 'f': // Floating-point register
359 case 'h': // High-part register
360 case 'r': // General-purpose register
361 return C_RegisterClass;
363 case 'Q': // Memory with base and unsigned 12-bit displacement
364 case 'R': // Likewise, plus an index
365 case 'S': // Memory with base and signed 20-bit displacement
366 case 'T': // Likewise, plus an index
367 case 'm': // Equivalent to 'T'.
370 case 'I': // Unsigned 8-bit constant
371 case 'J': // Unsigned 12-bit constant
372 case 'K': // Signed 16-bit constant
373 case 'L': // Signed 20-bit displacement (on all targets we support)
374 case 'M': // 0x7fffffff
381 return TargetLowering::getConstraintType(Constraint);
384 TargetLowering::ConstraintWeight SystemZTargetLowering::
385 getSingleConstraintMatchWeight(AsmOperandInfo &info,
386 const char *constraint) const {
387 ConstraintWeight weight = CW_Invalid;
388 Value *CallOperandVal = info.CallOperandVal;
389 // If we don't have a value, we can't do a match,
390 // but allow it at the lowest weight.
391 if (CallOperandVal == NULL)
393 Type *type = CallOperandVal->getType();
394 // Look at the constraint type.
395 switch (*constraint) {
397 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
400 case 'a': // Address register
401 case 'd': // Data register (equivalent to 'r')
402 case 'h': // High-part register
403 case 'r': // General-purpose register
404 if (CallOperandVal->getType()->isIntegerTy())
405 weight = CW_Register;
408 case 'f': // Floating-point register
409 if (type->isFloatingPointTy())
410 weight = CW_Register;
413 case 'I': // Unsigned 8-bit constant
414 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
415 if (isUInt<8>(C->getZExtValue()))
416 weight = CW_Constant;
419 case 'J': // Unsigned 12-bit constant
420 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
421 if (isUInt<12>(C->getZExtValue()))
422 weight = CW_Constant;
425 case 'K': // Signed 16-bit constant
426 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
427 if (isInt<16>(C->getSExtValue()))
428 weight = CW_Constant;
431 case 'L': // Signed 20-bit displacement (on all targets we support)
432 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
433 if (isInt<20>(C->getSExtValue()))
434 weight = CW_Constant;
437 case 'M': // 0x7fffffff
438 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
439 if (C->getZExtValue() == 0x7fffffff)
440 weight = CW_Constant;
446 // Parse a "{tNNN}" register constraint for which the register type "t"
447 // has already been verified. MC is the class associated with "t" and
448 // Map maps 0-based register numbers to LLVM register numbers.
449 static std::pair<unsigned, const TargetRegisterClass *>
450 parseRegisterNumber(const std::string &Constraint,
451 const TargetRegisterClass *RC, const unsigned *Map) {
452 assert(*(Constraint.end()-1) == '}' && "Missing '}'");
453 if (isdigit(Constraint[2])) {
454 std::string Suffix(Constraint.data() + 2, Constraint.size() - 2);
455 unsigned Index = atoi(Suffix.c_str());
456 if (Index < 16 && Map[Index])
457 return std::make_pair(Map[Index], RC);
459 return std::make_pair(0u, static_cast<TargetRegisterClass*>(0));
462 std::pair<unsigned, const TargetRegisterClass *> SystemZTargetLowering::
463 getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const {
464 if (Constraint.size() == 1) {
465 // GCC Constraint Letters
466 switch (Constraint[0]) {
468 case 'd': // Data register (equivalent to 'r')
469 case 'r': // General-purpose register
471 return std::make_pair(0U, &SystemZ::GR64BitRegClass);
472 else if (VT == MVT::i128)
473 return std::make_pair(0U, &SystemZ::GR128BitRegClass);
474 return std::make_pair(0U, &SystemZ::GR32BitRegClass);
476 case 'a': // Address register
478 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
479 else if (VT == MVT::i128)
480 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
481 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
483 case 'h': // High-part register (an LLVM extension)
484 return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
486 case 'f': // Floating-point register
488 return std::make_pair(0U, &SystemZ::FP64BitRegClass);
489 else if (VT == MVT::f128)
490 return std::make_pair(0U, &SystemZ::FP128BitRegClass);
491 return std::make_pair(0U, &SystemZ::FP32BitRegClass);
494 if (Constraint[0] == '{') {
495 // We need to override the default register parsing for GPRs and FPRs
496 // because the interpretation depends on VT. The internal names of
497 // the registers are also different from the external names
498 // (F0D and F0S instead of F0, etc.).
499 if (Constraint[1] == 'r') {
501 return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass,
502 SystemZMC::GR32Regs);
504 return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass,
505 SystemZMC::GR128Regs);
506 return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass,
507 SystemZMC::GR64Regs);
509 if (Constraint[1] == 'f') {
511 return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass,
512 SystemZMC::FP32Regs);
514 return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass,
515 SystemZMC::FP128Regs);
516 return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass,
517 SystemZMC::FP64Regs);
520 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
523 void SystemZTargetLowering::
524 LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
525 std::vector<SDValue> &Ops,
526 SelectionDAG &DAG) const {
527 // Only support length 1 constraints for now.
528 if (Constraint.length() == 1) {
529 switch (Constraint[0]) {
530 case 'I': // Unsigned 8-bit constant
531 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
532 if (isUInt<8>(C->getZExtValue()))
533 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(),
537 case 'J': // Unsigned 12-bit constant
538 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
539 if (isUInt<12>(C->getZExtValue()))
540 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(),
544 case 'K': // Signed 16-bit constant
545 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
546 if (isInt<16>(C->getSExtValue()))
547 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(),
551 case 'L': // Signed 20-bit displacement (on all targets we support)
552 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
553 if (isInt<20>(C->getSExtValue()))
554 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(),
558 case 'M': // 0x7fffffff
559 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
560 if (C->getZExtValue() == 0x7fffffff)
561 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(),
566 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
569 //===----------------------------------------------------------------------===//
570 // Calling conventions
571 //===----------------------------------------------------------------------===//
573 #include "SystemZGenCallingConv.inc"
575 bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType,
576 Type *ToType) const {
577 return isTruncateFree(FromType, ToType);
580 bool SystemZTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
581 if (!CI->isTailCall())
586 // Value is a value that has been passed to us in the location described by VA
587 // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining
588 // any loads onto Chain.
589 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDLoc DL,
590 CCValAssign &VA, SDValue Chain,
592 // If the argument has been promoted from a smaller type, insert an
593 // assertion to capture this.
594 if (VA.getLocInfo() == CCValAssign::SExt)
595 Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value,
596 DAG.getValueType(VA.getValVT()));
597 else if (VA.getLocInfo() == CCValAssign::ZExt)
598 Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value,
599 DAG.getValueType(VA.getValVT()));
602 Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value);
603 else if (VA.getLocInfo() == CCValAssign::Indirect)
604 Value = DAG.getLoad(VA.getValVT(), DL, Chain, Value,
605 MachinePointerInfo(), false, false, false, 0);
607 assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo");
611 // Value is a value of type VA.getValVT() that we need to copy into
612 // the location described by VA. Return a copy of Value converted to
613 // VA.getValVT(). The caller is responsible for handling indirect values.
614 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDLoc DL,
615 CCValAssign &VA, SDValue Value) {
616 switch (VA.getLocInfo()) {
617 case CCValAssign::SExt:
618 return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value);
619 case CCValAssign::ZExt:
620 return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value);
621 case CCValAssign::AExt:
622 return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value);
623 case CCValAssign::Full:
626 llvm_unreachable("Unhandled getLocInfo()");
630 SDValue SystemZTargetLowering::
631 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
632 const SmallVectorImpl<ISD::InputArg> &Ins,
633 SDLoc DL, SelectionDAG &DAG,
634 SmallVectorImpl<SDValue> &InVals) const {
635 MachineFunction &MF = DAG.getMachineFunction();
636 MachineFrameInfo *MFI = MF.getFrameInfo();
637 MachineRegisterInfo &MRI = MF.getRegInfo();
638 SystemZMachineFunctionInfo *FuncInfo =
639 MF.getInfo<SystemZMachineFunctionInfo>();
640 const SystemZFrameLowering *TFL =
641 static_cast<const SystemZFrameLowering *>(TM.getFrameLowering());
643 // Assign locations to all of the incoming arguments.
644 SmallVector<CCValAssign, 16> ArgLocs;
645 CCState CCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext());
646 CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ);
648 unsigned NumFixedGPRs = 0;
649 unsigned NumFixedFPRs = 0;
650 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
652 CCValAssign &VA = ArgLocs[I];
653 EVT LocVT = VA.getLocVT();
655 // Arguments passed in registers
656 const TargetRegisterClass *RC;
657 switch (LocVT.getSimpleVT().SimpleTy) {
659 // Integers smaller than i64 should be promoted to i64.
660 llvm_unreachable("Unexpected argument type");
663 RC = &SystemZ::GR32BitRegClass;
667 RC = &SystemZ::GR64BitRegClass;
671 RC = &SystemZ::FP32BitRegClass;
675 RC = &SystemZ::FP64BitRegClass;
679 unsigned VReg = MRI.createVirtualRegister(RC);
680 MRI.addLiveIn(VA.getLocReg(), VReg);
681 ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
683 assert(VA.isMemLoc() && "Argument not register or memory");
685 // Create the frame index object for this incoming parameter.
686 int FI = MFI->CreateFixedObject(LocVT.getSizeInBits() / 8,
687 VA.getLocMemOffset(), true);
689 // Create the SelectionDAG nodes corresponding to a load
690 // from this parameter. Unpromoted ints and floats are
691 // passed as right-justified 8-byte values.
692 EVT PtrVT = getPointerTy();
693 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
694 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
695 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4));
696 ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN,
697 MachinePointerInfo::getFixedStack(FI),
698 false, false, false, 0);
701 // Convert the value of the argument register into the value that's
703 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue));
707 // Save the number of non-varargs registers for later use by va_start, etc.
708 FuncInfo->setVarArgsFirstGPR(NumFixedGPRs);
709 FuncInfo->setVarArgsFirstFPR(NumFixedFPRs);
711 // Likewise the address (in the form of a frame index) of where the
712 // first stack vararg would be. The 1-byte size here is arbitrary.
713 int64_t StackSize = CCInfo.getNextStackOffset();
714 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize, true));
716 // ...and a similar frame index for the caller-allocated save area
717 // that will be used to store the incoming registers.
718 int64_t RegSaveOffset = TFL->getOffsetOfLocalArea();
719 unsigned RegSaveIndex = MFI->CreateFixedObject(1, RegSaveOffset, true);
720 FuncInfo->setRegSaveFrameIndex(RegSaveIndex);
722 // Store the FPR varargs in the reserved frame slots. (We store the
723 // GPRs as part of the prologue.)
724 if (NumFixedFPRs < SystemZ::NumArgFPRs) {
725 SDValue MemOps[SystemZ::NumArgFPRs];
726 for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) {
727 unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]);
728 int FI = MFI->CreateFixedObject(8, RegSaveOffset + Offset, true);
729 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
730 unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I],
731 &SystemZ::FP64BitRegClass);
732 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64);
733 MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN,
734 MachinePointerInfo::getFixedStack(FI),
738 // Join the stores, which are independent of one another.
739 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
740 &MemOps[NumFixedFPRs],
741 SystemZ::NumArgFPRs - NumFixedFPRs);
748 static bool canUseSiblingCall(CCState ArgCCInfo,
749 SmallVectorImpl<CCValAssign> &ArgLocs) {
750 // Punt if there are any indirect or stack arguments, or if the call
751 // needs the call-saved argument register R6.
752 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
753 CCValAssign &VA = ArgLocs[I];
754 if (VA.getLocInfo() == CCValAssign::Indirect)
758 unsigned Reg = VA.getLocReg();
759 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
766 SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
767 SmallVectorImpl<SDValue> &InVals) const {
768 SelectionDAG &DAG = CLI.DAG;
770 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
771 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
772 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
773 SDValue Chain = CLI.Chain;
774 SDValue Callee = CLI.Callee;
775 bool &IsTailCall = CLI.IsTailCall;
776 CallingConv::ID CallConv = CLI.CallConv;
777 bool IsVarArg = CLI.IsVarArg;
778 MachineFunction &MF = DAG.getMachineFunction();
779 EVT PtrVT = getPointerTy();
781 // Analyze the operands of the call, assigning locations to each operand.
782 SmallVector<CCValAssign, 16> ArgLocs;
783 CCState ArgCCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext());
784 ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ);
786 // We don't support GuaranteedTailCallOpt, only automatically-detected
788 if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs))
791 // Get a count of how many bytes are to be pushed on the stack.
792 unsigned NumBytes = ArgCCInfo.getNextStackOffset();
794 // Mark the start of the call.
796 Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes, PtrVT, true),
799 // Copy argument values to their designated locations.
800 SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass;
801 SmallVector<SDValue, 8> MemOpChains;
803 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
804 CCValAssign &VA = ArgLocs[I];
805 SDValue ArgValue = OutVals[I];
807 if (VA.getLocInfo() == CCValAssign::Indirect) {
808 // Store the argument in a stack slot and pass its address.
809 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
810 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
811 MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, SpillSlot,
812 MachinePointerInfo::getFixedStack(FI),
814 ArgValue = SpillSlot;
816 ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue);
819 // Queue up the argument copies and emit them at the end.
820 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
822 assert(VA.isMemLoc() && "Argument not register or memory");
824 // Work out the address of the stack slot. Unpromoted ints and
825 // floats are passed as right-justified 8-byte values.
826 if (!StackPtr.getNode())
827 StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT);
828 unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset();
829 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
831 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
832 DAG.getIntPtrConstant(Offset));
835 MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, Address,
836 MachinePointerInfo(),
841 // Join the stores, which are independent of one another.
842 if (!MemOpChains.empty())
843 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
844 &MemOpChains[0], MemOpChains.size());
846 // Accept direct calls by converting symbolic call addresses to the
847 // associated Target* opcodes. Force %r1 to be used for indirect
850 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
851 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT);
852 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
853 } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
854 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT);
855 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
856 } else if (IsTailCall) {
857 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue);
858 Glue = Chain.getValue(1);
859 Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType());
862 // Build a sequence of copy-to-reg nodes, chained and glued together.
863 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) {
864 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first,
865 RegsToPass[I].second, Glue);
866 Glue = Chain.getValue(1);
869 // The first call operand is the chain and the second is the target address.
870 SmallVector<SDValue, 8> Ops;
871 Ops.push_back(Chain);
872 Ops.push_back(Callee);
874 // Add argument registers to the end of the list so that they are
875 // known live into the call.
876 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I)
877 Ops.push_back(DAG.getRegister(RegsToPass[I].first,
878 RegsToPass[I].second.getValueType()));
880 // Glue the call to the argument copies, if any.
885 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
887 return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, &Ops[0], Ops.size());
888 Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, &Ops[0], Ops.size());
889 Glue = Chain.getValue(1);
891 // Mark the end of the call, which is glued to the call itself.
892 Chain = DAG.getCALLSEQ_END(Chain,
893 DAG.getConstant(NumBytes, PtrVT, true),
894 DAG.getConstant(0, PtrVT, true),
896 Glue = Chain.getValue(1);
898 // Assign locations to each value returned by this call.
899 SmallVector<CCValAssign, 16> RetLocs;
900 CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext());
901 RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ);
903 // Copy all of the result registers out of their specified physreg.
904 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
905 CCValAssign &VA = RetLocs[I];
907 // Copy the value out, gluing the copy to the end of the call sequence.
908 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(),
909 VA.getLocVT(), Glue);
910 Chain = RetValue.getValue(1);
911 Glue = RetValue.getValue(2);
913 // Convert the value of the return register into the value that's
915 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue));
922 SystemZTargetLowering::LowerReturn(SDValue Chain,
923 CallingConv::ID CallConv, bool IsVarArg,
924 const SmallVectorImpl<ISD::OutputArg> &Outs,
925 const SmallVectorImpl<SDValue> &OutVals,
926 SDLoc DL, SelectionDAG &DAG) const {
927 MachineFunction &MF = DAG.getMachineFunction();
929 // Assign locations to each returned value.
930 SmallVector<CCValAssign, 16> RetLocs;
931 CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext());
932 RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ);
934 // Quick exit for void returns
936 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain);
938 // Copy the result values into the output registers.
940 SmallVector<SDValue, 4> RetOps;
941 RetOps.push_back(Chain);
942 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
943 CCValAssign &VA = RetLocs[I];
944 SDValue RetValue = OutVals[I];
946 // Make the return register live on exit.
947 assert(VA.isRegLoc() && "Can only return in registers!");
949 // Promote the value as required.
950 RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue);
952 // Chain and glue the copies together.
953 unsigned Reg = VA.getLocReg();
954 Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue);
955 Glue = Chain.getValue(1);
956 RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT()));
959 // Update chain and glue.
962 RetOps.push_back(Glue);
964 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other,
965 RetOps.data(), RetOps.size());
968 // CC is a comparison that will be implemented using an integer or
969 // floating-point comparison. Return the condition code mask for
970 // a branch on true. In the integer case, CCMASK_CMP_UO is set for
971 // unsigned comparisons and clear for signed ones. In the floating-point
972 // case, CCMASK_CMP_UO has its normal mask meaning (unordered).
973 static unsigned CCMaskForCondCode(ISD::CondCode CC) {
975 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
976 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
977 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
981 llvm_unreachable("Invalid integer condition!");
990 case ISD::SETO: return SystemZ::CCMASK_CMP_O;
991 case ISD::SETUO: return SystemZ::CCMASK_CMP_UO;
996 // Return a sequence for getting a 1 from an IPM result when CC has a
997 // value in CCMask and a 0 when CC has a value in CCValid & ~CCMask.
998 // The handling of CC values outside CCValid doesn't matter.
999 static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) {
1000 // Deal with cases where the result can be taken directly from a bit
1001 // of the IPM result.
1002 if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_3)))
1003 return IPMConversion(0, 0, SystemZ::IPM_CC);
1004 if (CCMask == (CCValid & (SystemZ::CCMASK_2 | SystemZ::CCMASK_3)))
1005 return IPMConversion(0, 0, SystemZ::IPM_CC + 1);
1007 // Deal with cases where we can add a value to force the sign bit
1008 // to contain the right value. Putting the bit in 31 means we can
1009 // use SRL rather than RISBG(L), and also makes it easier to get a
1010 // 0/-1 value, so it has priority over the other tests below.
1012 // These sequences rely on the fact that the upper two bits of the
1013 // IPM result are zero.
1014 uint64_t TopBit = uint64_t(1) << 31;
1015 if (CCMask == (CCValid & SystemZ::CCMASK_0))
1016 return IPMConversion(0, -(1 << SystemZ::IPM_CC), 31);
1017 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_1)))
1018 return IPMConversion(0, -(2 << SystemZ::IPM_CC), 31);
1019 if (CCMask == (CCValid & (SystemZ::CCMASK_0
1021 | SystemZ::CCMASK_2)))
1022 return IPMConversion(0, -(3 << SystemZ::IPM_CC), 31);
1023 if (CCMask == (CCValid & SystemZ::CCMASK_3))
1024 return IPMConversion(0, TopBit - (3 << SystemZ::IPM_CC), 31);
1025 if (CCMask == (CCValid & (SystemZ::CCMASK_1
1027 | SystemZ::CCMASK_3)))
1028 return IPMConversion(0, TopBit - (1 << SystemZ::IPM_CC), 31);
1030 // Next try inverting the value and testing a bit. 0/1 could be
1031 // handled this way too, but we dealt with that case above.
1032 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_2)))
1033 return IPMConversion(-1, 0, SystemZ::IPM_CC);
1035 // Handle cases where adding a value forces a non-sign bit to contain
1037 if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_2)))
1038 return IPMConversion(0, 1 << SystemZ::IPM_CC, SystemZ::IPM_CC + 1);
1039 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3)))
1040 return IPMConversion(0, -(1 << SystemZ::IPM_CC), SystemZ::IPM_CC + 1);
1042 // The remaing cases are 1, 2, 0/1/3 and 0/2/3. All these are
1043 // can be done by inverting the low CC bit and applying one of the
1044 // sign-based extractions above.
1045 if (CCMask == (CCValid & SystemZ::CCMASK_1))
1046 return IPMConversion(1 << SystemZ::IPM_CC, -(1 << SystemZ::IPM_CC), 31);
1047 if (CCMask == (CCValid & SystemZ::CCMASK_2))
1048 return IPMConversion(1 << SystemZ::IPM_CC,
1049 TopBit - (3 << SystemZ::IPM_CC), 31);
1050 if (CCMask == (CCValid & (SystemZ::CCMASK_0
1052 | SystemZ::CCMASK_3)))
1053 return IPMConversion(1 << SystemZ::IPM_CC, -(3 << SystemZ::IPM_CC), 31);
1054 if (CCMask == (CCValid & (SystemZ::CCMASK_0
1056 | SystemZ::CCMASK_3)))
1057 return IPMConversion(1 << SystemZ::IPM_CC,
1058 TopBit - (1 << SystemZ::IPM_CC), 31);
1060 llvm_unreachable("Unexpected CC combination");
1063 // If a comparison described by IsUnsigned, CCMask, CmpOp0 and CmpOp1
1064 // can be converted to a comparison against zero, adjust the operands
1066 static void adjustZeroCmp(SelectionDAG &DAG, bool &IsUnsigned,
1067 SDValue &CmpOp0, SDValue &CmpOp1,
1072 ConstantSDNode *ConstOp1 = dyn_cast<ConstantSDNode>(CmpOp1.getNode());
1076 int64_t Value = ConstOp1->getSExtValue();
1077 if ((Value == -1 && CCMask == SystemZ::CCMASK_CMP_GT) ||
1078 (Value == -1 && CCMask == SystemZ::CCMASK_CMP_LE) ||
1079 (Value == 1 && CCMask == SystemZ::CCMASK_CMP_LT) ||
1080 (Value == 1 && CCMask == SystemZ::CCMASK_CMP_GE)) {
1081 CCMask ^= SystemZ::CCMASK_CMP_EQ;
1082 CmpOp1 = DAG.getConstant(0, CmpOp1.getValueType());
1086 // If a comparison described by IsUnsigned, CCMask, CmpOp0 and CmpOp1
1087 // is suitable for CLI(Y), CHHSI or CLHHSI, adjust the operands as necessary.
1088 static void adjustSubwordCmp(SelectionDAG &DAG, bool &IsUnsigned,
1089 SDValue &CmpOp0, SDValue &CmpOp1,
1091 // For us to make any changes, it must a comparison between a single-use
1092 // load and a constant.
1093 if (!CmpOp0.hasOneUse() ||
1094 CmpOp0.getOpcode() != ISD::LOAD ||
1095 CmpOp1.getOpcode() != ISD::Constant)
1098 // We must have an 8- or 16-bit load.
1099 LoadSDNode *Load = cast<LoadSDNode>(CmpOp0);
1100 unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits();
1101 if (NumBits != 8 && NumBits != 16)
1104 // The load must be an extending one and the constant must be within the
1105 // range of the unextended value.
1106 ConstantSDNode *Constant = cast<ConstantSDNode>(CmpOp1);
1107 uint64_t Value = Constant->getZExtValue();
1108 uint64_t Mask = (1 << NumBits) - 1;
1109 if (Load->getExtensionType() == ISD::SEXTLOAD) {
1110 int64_t SignedValue = Constant->getSExtValue();
1111 if (uint64_t(SignedValue) + (1ULL << (NumBits - 1)) > Mask)
1113 // Unsigned comparison between two sign-extended values is equivalent
1114 // to unsigned comparison between two zero-extended values.
1117 else if (CCMask == SystemZ::CCMASK_CMP_EQ ||
1118 CCMask == SystemZ::CCMASK_CMP_NE)
1119 // Any choice of IsUnsigned is OK for equality comparisons.
1120 // We could use either CHHSI or CLHHSI for 16-bit comparisons,
1121 // but since we use CLHHSI for zero extensions, it seems better
1122 // to be consistent and do the same here.
1123 Value &= Mask, IsUnsigned = true;
1124 else if (NumBits == 8) {
1125 // Try to treat the comparison as unsigned, so that we can use CLI.
1126 // Adjust CCMask and Value as necessary.
1127 if (Value == 0 && CCMask == SystemZ::CCMASK_CMP_LT)
1128 // Test whether the high bit of the byte is set.
1129 Value = 127, CCMask = SystemZ::CCMASK_CMP_GT, IsUnsigned = true;
1130 else if (Value == 0 && CCMask == SystemZ::CCMASK_CMP_GE)
1131 // Test whether the high bit of the byte is clear.
1132 Value = 128, CCMask = SystemZ::CCMASK_CMP_LT, IsUnsigned = true;
1134 // No instruction exists for this combination.
1137 } else if (Load->getExtensionType() == ISD::ZEXTLOAD) {
1140 // Signed comparison between two zero-extended values is equivalent
1141 // to unsigned comparison.
1146 // Make sure that the first operand is an i32 of the right extension type.
1147 ISD::LoadExtType ExtType = IsUnsigned ? ISD::ZEXTLOAD : ISD::SEXTLOAD;
1148 if (CmpOp0.getValueType() != MVT::i32 ||
1149 Load->getExtensionType() != ExtType)
1150 CmpOp0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32,
1151 Load->getChain(), Load->getBasePtr(),
1152 Load->getPointerInfo(), Load->getMemoryVT(),
1153 Load->isVolatile(), Load->isNonTemporal(),
1154 Load->getAlignment());
1156 // Make sure that the second operand is an i32 with the right value.
1157 if (CmpOp1.getValueType() != MVT::i32 ||
1158 Value != Constant->getZExtValue())
1159 CmpOp1 = DAG.getConstant(Value, MVT::i32);
1162 // Return true if Op is either an unextended load, or a load suitable
1163 // for integer register-memory comparisons of type ICmpType.
1164 static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) {
1165 LoadSDNode *Load = dyn_cast<LoadSDNode>(Op.getNode());
1167 // There are no instructions to compare a register with a memory byte.
1168 if (Load->getMemoryVT() == MVT::i8)
1170 // Otherwise decide on extension type.
1171 switch (Load->getExtensionType()) {
1172 case ISD::NON_EXTLOAD:
1175 return ICmpType != SystemZICMP::UnsignedOnly;
1177 return ICmpType != SystemZICMP::SignedOnly;
1185 // Return true if it is better to swap comparison operands Op0 and Op1.
1186 // ICmpType is the type of an integer comparison.
1187 static bool shouldSwapCmpOperands(SDValue Op0, SDValue Op1,
1188 unsigned ICmpType) {
1189 // Leave f128 comparisons alone, since they have no memory forms.
1190 if (Op0.getValueType() == MVT::f128)
1193 // Always keep a floating-point constant second, since comparisons with
1194 // zero can use LOAD TEST and comparisons with other constants make a
1195 // natural memory operand.
1196 if (isa<ConstantFPSDNode>(Op1))
1199 // Never swap comparisons with zero since there are many ways to optimize
1201 ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1);
1202 if (COp1 && COp1->getZExtValue() == 0)
1205 // Look for cases where Cmp0 is a single-use load and Cmp1 isn't.
1206 // In that case we generally prefer the memory to be second.
1207 if ((isNaturalMemoryOperand(Op0, ICmpType) && Op0.hasOneUse()) &&
1208 !(isNaturalMemoryOperand(Op1, ICmpType) && Op1.hasOneUse())) {
1209 // The only exceptions are when the second operand is a constant and
1210 // we can use things like CHHSI.
1213 // The unsigned memory-immediate instructions can handle 16-bit
1214 // unsigned integers.
1215 if (ICmpType != SystemZICMP::SignedOnly &&
1216 isUInt<16>(COp1->getZExtValue()))
1218 // The signed memory-immediate instructions can handle 16-bit
1220 if (ICmpType != SystemZICMP::UnsignedOnly &&
1221 isInt<16>(COp1->getSExtValue()))
1228 // Return true if shift operation N has an in-range constant shift value.
1229 // Store it in ShiftVal if so.
1230 static bool isSimpleShift(SDValue N, unsigned &ShiftVal) {
1231 ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1));
1235 uint64_t Amount = Shift->getZExtValue();
1236 if (Amount >= N.getValueType().getSizeInBits())
1243 // Check whether an AND with Mask is suitable for a TEST UNDER MASK
1244 // instruction and whether the CC value is descriptive enough to handle
1245 // a comparison of type Opcode between the AND result and CmpVal.
1246 // CCMask says which comparison result is being tested and BitSize is
1247 // the number of bits in the operands. If TEST UNDER MASK can be used,
1248 // return the corresponding CC mask, otherwise return 0.
1249 static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask,
1250 uint64_t Mask, uint64_t CmpVal,
1251 unsigned ICmpType) {
1252 assert(Mask != 0 && "ANDs with zero should have been removed by now");
1254 // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL.
1255 if (!SystemZ::isImmLL(Mask) && !SystemZ::isImmLH(Mask) &&
1256 !SystemZ::isImmHL(Mask) && !SystemZ::isImmHH(Mask))
1259 // Work out the masks for the lowest and highest bits.
1260 unsigned HighShift = 63 - countLeadingZeros(Mask);
1261 uint64_t High = uint64_t(1) << HighShift;
1262 uint64_t Low = uint64_t(1) << countTrailingZeros(Mask);
1264 // Signed ordered comparisons are effectively unsigned if the sign
1266 bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly);
1268 // Check for equality comparisons with 0, or the equivalent.
1270 if (CCMask == SystemZ::CCMASK_CMP_EQ)
1271 return SystemZ::CCMASK_TM_ALL_0;
1272 if (CCMask == SystemZ::CCMASK_CMP_NE)
1273 return SystemZ::CCMASK_TM_SOME_1;
1275 if (EffectivelyUnsigned && CmpVal <= Low) {
1276 if (CCMask == SystemZ::CCMASK_CMP_LT)
1277 return SystemZ::CCMASK_TM_ALL_0;
1278 if (CCMask == SystemZ::CCMASK_CMP_GE)
1279 return SystemZ::CCMASK_TM_SOME_1;
1281 if (EffectivelyUnsigned && CmpVal < Low) {
1282 if (CCMask == SystemZ::CCMASK_CMP_LE)
1283 return SystemZ::CCMASK_TM_ALL_0;
1284 if (CCMask == SystemZ::CCMASK_CMP_GT)
1285 return SystemZ::CCMASK_TM_SOME_1;
1288 // Check for equality comparisons with the mask, or the equivalent.
1289 if (CmpVal == Mask) {
1290 if (CCMask == SystemZ::CCMASK_CMP_EQ)
1291 return SystemZ::CCMASK_TM_ALL_1;
1292 if (CCMask == SystemZ::CCMASK_CMP_NE)
1293 return SystemZ::CCMASK_TM_SOME_0;
1295 if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) {
1296 if (CCMask == SystemZ::CCMASK_CMP_GT)
1297 return SystemZ::CCMASK_TM_ALL_1;
1298 if (CCMask == SystemZ::CCMASK_CMP_LE)
1299 return SystemZ::CCMASK_TM_SOME_0;
1301 if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) {
1302 if (CCMask == SystemZ::CCMASK_CMP_GE)
1303 return SystemZ::CCMASK_TM_ALL_1;
1304 if (CCMask == SystemZ::CCMASK_CMP_LT)
1305 return SystemZ::CCMASK_TM_SOME_0;
1308 // Check for ordered comparisons with the top bit.
1309 if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) {
1310 if (CCMask == SystemZ::CCMASK_CMP_LE)
1311 return SystemZ::CCMASK_TM_MSB_0;
1312 if (CCMask == SystemZ::CCMASK_CMP_GT)
1313 return SystemZ::CCMASK_TM_MSB_1;
1315 if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) {
1316 if (CCMask == SystemZ::CCMASK_CMP_LT)
1317 return SystemZ::CCMASK_TM_MSB_0;
1318 if (CCMask == SystemZ::CCMASK_CMP_GE)
1319 return SystemZ::CCMASK_TM_MSB_1;
1322 // If there are just two bits, we can do equality checks for Low and High
1324 if (Mask == Low + High) {
1325 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == Low)
1326 return SystemZ::CCMASK_TM_MIXED_MSB_0;
1327 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == Low)
1328 return SystemZ::CCMASK_TM_MIXED_MSB_0 ^ SystemZ::CCMASK_ANY;
1329 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High)
1330 return SystemZ::CCMASK_TM_MIXED_MSB_1;
1331 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High)
1332 return SystemZ::CCMASK_TM_MIXED_MSB_1 ^ SystemZ::CCMASK_ANY;
1335 // Looks like we've exhausted our options.
1339 // See whether the comparison (Opcode CmpOp0, CmpOp1, ICmpType) can be
1340 // implemented as a TEST UNDER MASK instruction when the condition being
1341 // tested is as described by CCValid and CCMask. Update the arguments
1342 // with the TM version if so.
1343 static void adjustForTestUnderMask(SelectionDAG &DAG, unsigned &Opcode,
1344 SDValue &CmpOp0, SDValue &CmpOp1,
1345 unsigned &CCValid, unsigned &CCMask,
1346 unsigned &ICmpType) {
1347 // Check that we have a comparison with a constant.
1348 ConstantSDNode *ConstCmpOp1 = dyn_cast<ConstantSDNode>(CmpOp1);
1351 uint64_t CmpVal = ConstCmpOp1->getZExtValue();
1353 // Check whether the nonconstant input is an AND with a constant mask.
1354 if (CmpOp0.getOpcode() != ISD::AND)
1356 SDValue AndOp0 = CmpOp0.getOperand(0);
1357 SDValue AndOp1 = CmpOp0.getOperand(1);
1358 ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(AndOp1.getNode());
1361 uint64_t MaskVal = Mask->getZExtValue();
1363 // Check whether the combination of mask, comparison value and comparison
1364 // type are suitable.
1365 unsigned BitSize = CmpOp0.getValueType().getSizeInBits();
1366 unsigned NewCCMask, ShiftVal;
1367 if (ICmpType != SystemZICMP::SignedOnly &&
1368 AndOp0.getOpcode() == ISD::SHL &&
1369 isSimpleShift(AndOp0, ShiftVal) &&
1370 (NewCCMask = getTestUnderMaskCond(BitSize, CCMask, MaskVal >> ShiftVal,
1372 SystemZICMP::Any))) {
1373 AndOp0 = AndOp0.getOperand(0);
1374 AndOp1 = DAG.getConstant(MaskVal >> ShiftVal, AndOp0.getValueType());
1375 } else if (ICmpType != SystemZICMP::SignedOnly &&
1376 AndOp0.getOpcode() == ISD::SRL &&
1377 isSimpleShift(AndOp0, ShiftVal) &&
1378 (NewCCMask = getTestUnderMaskCond(BitSize, CCMask,
1379 MaskVal << ShiftVal,
1381 SystemZICMP::UnsignedOnly))) {
1382 AndOp0 = AndOp0.getOperand(0);
1383 AndOp1 = DAG.getConstant(MaskVal << ShiftVal, AndOp0.getValueType());
1385 NewCCMask = getTestUnderMaskCond(BitSize, CCMask, MaskVal, CmpVal,
1391 // Go ahead and make the change.
1392 Opcode = SystemZISD::TM;
1395 ICmpType = (bool(NewCCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) !=
1396 bool(NewCCMask & SystemZ::CCMASK_TM_MIXED_MSB_1));
1397 CCValid = SystemZ::CCMASK_TM;
1401 // Return a target node that compares CmpOp0 with CmpOp1 and stores a
1402 // 2-bit result in CC. Set CCValid to the CCMASK_* of all possible
1403 // 2-bit results and CCMask to the subset of those results that are
1404 // associated with Cond.
1405 static SDValue emitCmp(const SystemZTargetMachine &TM, SelectionDAG &DAG,
1406 SDLoc DL, SDValue CmpOp0, SDValue CmpOp1,
1407 ISD::CondCode Cond, unsigned &CCValid,
1409 bool IsUnsigned = false;
1410 CCMask = CCMaskForCondCode(Cond);
1411 unsigned Opcode, ICmpType = 0;
1412 if (CmpOp0.getValueType().isFloatingPoint()) {
1413 CCValid = SystemZ::CCMASK_FCMP;
1414 Opcode = SystemZISD::FCMP;
1416 IsUnsigned = CCMask & SystemZ::CCMASK_CMP_UO;
1417 CCValid = SystemZ::CCMASK_ICMP;
1419 adjustZeroCmp(DAG, IsUnsigned, CmpOp0, CmpOp1, CCMask);
1420 adjustSubwordCmp(DAG, IsUnsigned, CmpOp0, CmpOp1, CCMask);
1421 Opcode = SystemZISD::ICMP;
1422 // Choose the type of comparison. Equality and inequality tests can
1423 // use either signed or unsigned comparisons. The choice also doesn't
1424 // matter if both sign bits are known to be clear. In those cases we
1425 // want to give the main isel code the freedom to choose whichever
1427 if (CCMask == SystemZ::CCMASK_CMP_EQ ||
1428 CCMask == SystemZ::CCMASK_CMP_NE ||
1429 (DAG.SignBitIsZero(CmpOp0) && DAG.SignBitIsZero(CmpOp1)))
1430 ICmpType = SystemZICMP::Any;
1431 else if (IsUnsigned)
1432 ICmpType = SystemZICMP::UnsignedOnly;
1434 ICmpType = SystemZICMP::SignedOnly;
1437 if (shouldSwapCmpOperands(CmpOp0, CmpOp1, ICmpType)) {
1438 std::swap(CmpOp0, CmpOp1);
1439 CCMask = ((CCMask & SystemZ::CCMASK_CMP_EQ) |
1440 (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) |
1441 (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) |
1442 (CCMask & SystemZ::CCMASK_CMP_UO));
1445 adjustForTestUnderMask(DAG, Opcode, CmpOp0, CmpOp1, CCValid, CCMask,
1447 if (Opcode == SystemZISD::ICMP || Opcode == SystemZISD::TM)
1448 return DAG.getNode(Opcode, DL, MVT::Glue, CmpOp0, CmpOp1,
1449 DAG.getConstant(ICmpType, MVT::i32));
1450 return DAG.getNode(Opcode, DL, MVT::Glue, CmpOp0, CmpOp1);
1453 // Implement a 32-bit *MUL_LOHI operation by extending both operands to
1454 // 64 bits. Extend is the extension type to use. Store the high part
1455 // in Hi and the low part in Lo.
1456 static void lowerMUL_LOHI32(SelectionDAG &DAG, SDLoc DL,
1457 unsigned Extend, SDValue Op0, SDValue Op1,
1458 SDValue &Hi, SDValue &Lo) {
1459 Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0);
1460 Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1);
1461 SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1);
1462 Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul, DAG.getConstant(32, MVT::i64));
1463 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi);
1464 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul);
1467 // Lower a binary operation that produces two VT results, one in each
1468 // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation,
1469 // Extend extends Op0 to a GR128, and Opcode performs the GR128 operation
1470 // on the extended Op0 and (unextended) Op1. Store the even register result
1471 // in Even and the odd register result in Odd.
1472 static void lowerGR128Binary(SelectionDAG &DAG, SDLoc DL, EVT VT,
1473 unsigned Extend, unsigned Opcode,
1474 SDValue Op0, SDValue Op1,
1475 SDValue &Even, SDValue &Odd) {
1476 SDNode *In128 = DAG.getMachineNode(Extend, DL, MVT::Untyped, Op0);
1477 SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped,
1478 SDValue(In128, 0), Op1);
1479 bool Is32Bit = is32Bit(VT);
1480 Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result);
1481 Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result);
1484 SDValue SystemZTargetLowering::lowerSETCC(SDValue Op,
1485 SelectionDAG &DAG) const {
1486 SDValue CmpOp0 = Op.getOperand(0);
1487 SDValue CmpOp1 = Op.getOperand(1);
1488 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
1491 unsigned CCValid, CCMask;
1492 SDValue Glue = emitCmp(TM, DAG, DL, CmpOp0, CmpOp1, CC, CCValid, CCMask);
1494 IPMConversion Conversion = getIPMConversion(CCValid, CCMask);
1495 SDValue Result = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue);
1497 if (Conversion.XORValue)
1498 Result = DAG.getNode(ISD::XOR, DL, MVT::i32, Result,
1499 DAG.getConstant(Conversion.XORValue, MVT::i32));
1501 if (Conversion.AddValue)
1502 Result = DAG.getNode(ISD::ADD, DL, MVT::i32, Result,
1503 DAG.getConstant(Conversion.AddValue, MVT::i32));
1505 // The SHR/AND sequence should get optimized to an RISBG.
1506 Result = DAG.getNode(ISD::SRL, DL, MVT::i32, Result,
1507 DAG.getConstant(Conversion.Bit, MVT::i32));
1508 if (Conversion.Bit != 31)
1509 Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result,
1510 DAG.getConstant(1, MVT::i32));
1514 SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
1515 SDValue Chain = Op.getOperand(0);
1516 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
1517 SDValue CmpOp0 = Op.getOperand(2);
1518 SDValue CmpOp1 = Op.getOperand(3);
1519 SDValue Dest = Op.getOperand(4);
1522 unsigned CCValid, CCMask;
1523 SDValue Flags = emitCmp(TM, DAG, DL, CmpOp0, CmpOp1, CC, CCValid, CCMask);
1524 return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(),
1525 Chain, DAG.getConstant(CCValid, MVT::i32),
1526 DAG.getConstant(CCMask, MVT::i32), Dest, Flags);
1529 SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
1530 SelectionDAG &DAG) const {
1531 SDValue CmpOp0 = Op.getOperand(0);
1532 SDValue CmpOp1 = Op.getOperand(1);
1533 SDValue TrueOp = Op.getOperand(2);
1534 SDValue FalseOp = Op.getOperand(3);
1535 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
1538 unsigned CCValid, CCMask;
1539 SDValue Flags = emitCmp(TM, DAG, DL, CmpOp0, CmpOp1, CC, CCValid, CCMask);
1541 SmallVector<SDValue, 5> Ops;
1542 Ops.push_back(TrueOp);
1543 Ops.push_back(FalseOp);
1544 Ops.push_back(DAG.getConstant(CCValid, MVT::i32));
1545 Ops.push_back(DAG.getConstant(CCMask, MVT::i32));
1546 Ops.push_back(Flags);
1548 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
1549 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, &Ops[0], Ops.size());
1552 SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node,
1553 SelectionDAG &DAG) const {
1555 const GlobalValue *GV = Node->getGlobal();
1556 int64_t Offset = Node->getOffset();
1557 EVT PtrVT = getPointerTy();
1558 Reloc::Model RM = TM.getRelocationModel();
1559 CodeModel::Model CM = TM.getCodeModel();
1562 if (Subtarget.isPC32DBLSymbol(GV, RM, CM)) {
1563 // Assign anchors at 1<<12 byte boundaries.
1564 uint64_t Anchor = Offset & ~uint64_t(0xfff);
1565 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor);
1566 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
1568 // The offset can be folded into the address if it is aligned to a halfword.
1570 if (Offset != 0 && (Offset & 1) == 0) {
1571 SDValue Full = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor + Offset);
1572 Result = DAG.getNode(SystemZISD::PCREL_OFFSET, DL, PtrVT, Full, Result);
1576 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT);
1577 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
1578 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
1579 MachinePointerInfo::getGOT(), false, false, false, 0);
1582 // If there was a non-zero offset that we didn't fold, create an explicit
1585 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result,
1586 DAG.getConstant(Offset, PtrVT));
1591 SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
1592 SelectionDAG &DAG) const {
1594 const GlobalValue *GV = Node->getGlobal();
1595 EVT PtrVT = getPointerTy();
1596 TLSModel::Model model = TM.getTLSModel(GV);
1598 if (model != TLSModel::LocalExec)
1599 llvm_unreachable("only local-exec TLS mode supported");
1601 // The high part of the thread pointer is in access register 0.
1602 SDValue TPHi = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32,
1603 DAG.getConstant(0, MVT::i32));
1604 TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi);
1606 // The low part of the thread pointer is in access register 1.
1607 SDValue TPLo = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32,
1608 DAG.getConstant(1, MVT::i32));
1609 TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo);
1611 // Merge them into a single 64-bit address.
1612 SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi,
1613 DAG.getConstant(32, PtrVT));
1614 SDValue TP = DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo);
1616 // Get the offset of GA from the thread pointer.
1617 SystemZConstantPoolValue *CPV =
1618 SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF);
1620 // Force the offset into the constant pool and load it from there.
1621 SDValue CPAddr = DAG.getConstantPool(CPV, PtrVT, 8);
1622 SDValue Offset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(),
1623 CPAddr, MachinePointerInfo::getConstantPool(),
1624 false, false, false, 0);
1626 // Add the base and offset together.
1627 return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset);
1630 SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node,
1631 SelectionDAG &DAG) const {
1633 const BlockAddress *BA = Node->getBlockAddress();
1634 int64_t Offset = Node->getOffset();
1635 EVT PtrVT = getPointerTy();
1637 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset);
1638 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
1642 SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT,
1643 SelectionDAG &DAG) const {
1645 EVT PtrVT = getPointerTy();
1646 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
1648 // Use LARL to load the address of the table.
1649 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
1652 SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP,
1653 SelectionDAG &DAG) const {
1655 EVT PtrVT = getPointerTy();
1658 if (CP->isMachineConstantPoolEntry())
1659 Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
1660 CP->getAlignment());
1662 Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
1663 CP->getAlignment(), CP->getOffset());
1665 // Use LARL to load the address of the constant pool entry.
1666 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
1669 SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op,
1670 SelectionDAG &DAG) const {
1672 SDValue In = Op.getOperand(0);
1673 EVT InVT = In.getValueType();
1674 EVT ResVT = Op.getValueType();
1676 if (InVT == MVT::i32 && ResVT == MVT::f32) {
1678 if (Subtarget.hasHighWord()) {
1679 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL,
1681 In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL,
1682 MVT::i64, SDValue(U64, 0), In);
1684 In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In);
1685 In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64,
1686 DAG.getConstant(32, MVT::i64));
1688 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64);
1689 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32,
1690 DL, MVT::f32, Out64);
1692 if (InVT == MVT::f32 && ResVT == MVT::i32) {
1693 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64);
1694 SDValue In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL,
1695 MVT::f64, SDValue(U64, 0), In);
1696 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, In64);
1697 if (Subtarget.hasHighWord())
1698 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL,
1700 SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64,
1701 DAG.getConstant(32, MVT::i64));
1702 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift);
1704 llvm_unreachable("Unexpected bitcast combination");
1707 SDValue SystemZTargetLowering::lowerVASTART(SDValue Op,
1708 SelectionDAG &DAG) const {
1709 MachineFunction &MF = DAG.getMachineFunction();
1710 SystemZMachineFunctionInfo *FuncInfo =
1711 MF.getInfo<SystemZMachineFunctionInfo>();
1712 EVT PtrVT = getPointerTy();
1714 SDValue Chain = Op.getOperand(0);
1715 SDValue Addr = Op.getOperand(1);
1716 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1719 // The initial values of each field.
1720 const unsigned NumFields = 4;
1721 SDValue Fields[NumFields] = {
1722 DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), PtrVT),
1723 DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), PtrVT),
1724 DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT),
1725 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT)
1728 // Store each field into its respective slot.
1729 SDValue MemOps[NumFields];
1730 unsigned Offset = 0;
1731 for (unsigned I = 0; I < NumFields; ++I) {
1732 SDValue FieldAddr = Addr;
1734 FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr,
1735 DAG.getIntPtrConstant(Offset));
1736 MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr,
1737 MachinePointerInfo(SV, Offset),
1741 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps, NumFields);
1744 SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op,
1745 SelectionDAG &DAG) const {
1746 SDValue Chain = Op.getOperand(0);
1747 SDValue DstPtr = Op.getOperand(1);
1748 SDValue SrcPtr = Op.getOperand(2);
1749 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
1750 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
1753 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32),
1754 /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false,
1755 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
1758 SDValue SystemZTargetLowering::
1759 lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
1760 SDValue Chain = Op.getOperand(0);
1761 SDValue Size = Op.getOperand(1);
1764 unsigned SPReg = getStackPointerRegisterToSaveRestore();
1766 // Get a reference to the stack pointer.
1767 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64);
1769 // Get the new stack pointer value.
1770 SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, Size);
1772 // Copy the new stack pointer back.
1773 Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP);
1775 // The allocated data lives above the 160 bytes allocated for the standard
1776 // frame, plus any outgoing stack arguments. We don't know how much that
1777 // amounts to yet, so emit a special ADJDYNALLOC placeholder.
1778 SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64);
1779 SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust);
1781 SDValue Ops[2] = { Result, Chain };
1782 return DAG.getMergeValues(Ops, 2, DL);
1785 SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op,
1786 SelectionDAG &DAG) const {
1787 EVT VT = Op.getValueType();
1791 // Just do a normal 64-bit multiplication and extract the results.
1792 // We define this so that it can be used for constant division.
1793 lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0),
1794 Op.getOperand(1), Ops[1], Ops[0]);
1796 // Do a full 128-bit multiplication based on UMUL_LOHI64:
1798 // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64)
1800 // but using the fact that the upper halves are either all zeros
1803 // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64)
1805 // and grouping the right terms together since they are quicker than the
1808 // (ll * rl) - (((lh & rl) + (ll & rh)) << 64)
1809 SDValue C63 = DAG.getConstant(63, MVT::i64);
1810 SDValue LL = Op.getOperand(0);
1811 SDValue RL = Op.getOperand(1);
1812 SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63);
1813 SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63);
1814 // UMUL_LOHI64 returns the low result in the odd register and the high
1815 // result in the even register. SMUL_LOHI is defined to return the
1816 // low half first, so the results are in reverse order.
1817 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64,
1818 LL, RL, Ops[1], Ops[0]);
1819 SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH);
1820 SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL);
1821 SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL);
1822 Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum);
1824 return DAG.getMergeValues(Ops, 2, DL);
1827 SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op,
1828 SelectionDAG &DAG) const {
1829 EVT VT = Op.getValueType();
1833 // Just do a normal 64-bit multiplication and extract the results.
1834 // We define this so that it can be used for constant division.
1835 lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0),
1836 Op.getOperand(1), Ops[1], Ops[0]);
1838 // UMUL_LOHI64 returns the low result in the odd register and the high
1839 // result in the even register. UMUL_LOHI is defined to return the
1840 // low half first, so the results are in reverse order.
1841 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64,
1842 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
1843 return DAG.getMergeValues(Ops, 2, DL);
1846 SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op,
1847 SelectionDAG &DAG) const {
1848 SDValue Op0 = Op.getOperand(0);
1849 SDValue Op1 = Op.getOperand(1);
1850 EVT VT = Op.getValueType();
1854 // We use DSGF for 32-bit division.
1856 Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0);
1857 Opcode = SystemZISD::SDIVREM32;
1858 } else if (DAG.ComputeNumSignBits(Op1) > 32) {
1859 Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1);
1860 Opcode = SystemZISD::SDIVREM32;
1862 Opcode = SystemZISD::SDIVREM64;
1864 // DSG(F) takes a 64-bit dividend, so the even register in the GR128
1865 // input is "don't care". The instruction returns the remainder in
1866 // the even register and the quotient in the odd register.
1868 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, Opcode,
1869 Op0, Op1, Ops[1], Ops[0]);
1870 return DAG.getMergeValues(Ops, 2, DL);
1873 SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op,
1874 SelectionDAG &DAG) const {
1875 EVT VT = Op.getValueType();
1878 // DL(G) uses a double-width dividend, so we need to clear the even
1879 // register in the GR128 input. The instruction returns the remainder
1880 // in the even register and the quotient in the odd register.
1883 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_32, SystemZISD::UDIVREM32,
1884 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
1886 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_64, SystemZISD::UDIVREM64,
1887 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
1888 return DAG.getMergeValues(Ops, 2, DL);
1891 SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {
1892 assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation");
1894 // Get the known-zero masks for each operand.
1895 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) };
1896 APInt KnownZero[2], KnownOne[2];
1897 DAG.ComputeMaskedBits(Ops[0], KnownZero[0], KnownOne[0]);
1898 DAG.ComputeMaskedBits(Ops[1], KnownZero[1], KnownOne[1]);
1900 // See if the upper 32 bits of one operand and the lower 32 bits of the
1901 // other are known zero. They are the low and high operands respectively.
1902 uint64_t Masks[] = { KnownZero[0].getZExtValue(),
1903 KnownZero[1].getZExtValue() };
1905 if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff)
1907 else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff)
1912 SDValue LowOp = Ops[Low];
1913 SDValue HighOp = Ops[High];
1915 // If the high part is a constant, we're better off using IILH.
1916 if (HighOp.getOpcode() == ISD::Constant)
1919 // If the low part is a constant that is outside the range of LHI,
1920 // then we're better off using IILF.
1921 if (LowOp.getOpcode() == ISD::Constant) {
1922 int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue());
1923 if (!isInt<16>(Value))
1927 // Check whether the high part is an AND that doesn't change the
1928 // high 32 bits and just masks out low bits. We can skip it if so.
1929 if (HighOp.getOpcode() == ISD::AND &&
1930 HighOp.getOperand(1).getOpcode() == ISD::Constant) {
1931 ConstantSDNode *MaskNode = cast<ConstantSDNode>(HighOp.getOperand(1));
1932 uint64_t Mask = MaskNode->getZExtValue() | Masks[High];
1933 if ((Mask >> 32) == 0xffffffff)
1934 HighOp = HighOp.getOperand(0);
1937 // Take advantage of the fact that all GR32 operations only change the
1938 // low 32 bits by truncating Low to an i32 and inserting it directly
1939 // using a subreg. The interesting cases are those where the truncation
1942 SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp);
1943 return DAG.getTargetInsertSubreg(SystemZ::subreg_l32, DL,
1944 MVT::i64, HighOp, Low32);
1947 // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first
1948 // two into the fullword ATOMIC_LOADW_* operation given by Opcode.
1949 SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op,
1951 unsigned Opcode) const {
1952 AtomicSDNode *Node = cast<AtomicSDNode>(Op.getNode());
1954 // 32-bit operations need no code outside the main loop.
1955 EVT NarrowVT = Node->getMemoryVT();
1956 EVT WideVT = MVT::i32;
1957 if (NarrowVT == WideVT)
1960 int64_t BitSize = NarrowVT.getSizeInBits();
1961 SDValue ChainIn = Node->getChain();
1962 SDValue Addr = Node->getBasePtr();
1963 SDValue Src2 = Node->getVal();
1964 MachineMemOperand *MMO = Node->getMemOperand();
1966 EVT PtrVT = Addr.getValueType();
1968 // Convert atomic subtracts of constants into additions.
1969 if (Opcode == SystemZISD::ATOMIC_LOADW_SUB)
1970 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Src2)) {
1971 Opcode = SystemZISD::ATOMIC_LOADW_ADD;
1972 Src2 = DAG.getConstant(-Const->getSExtValue(), Src2.getValueType());
1975 // Get the address of the containing word.
1976 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
1977 DAG.getConstant(-4, PtrVT));
1979 // Get the number of bits that the word must be rotated left in order
1980 // to bring the field to the top bits of a GR32.
1981 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
1982 DAG.getConstant(3, PtrVT));
1983 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);
1985 // Get the complementing shift amount, for rotating a field in the top
1986 // bits back to its proper position.
1987 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
1988 DAG.getConstant(0, WideVT), BitShift);
1990 // Extend the source operand to 32 bits and prepare it for the inner loop.
1991 // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other
1992 // operations require the source to be shifted in advance. (This shift
1993 // can be folded if the source is constant.) For AND and NAND, the lower
1994 // bits must be set, while for other opcodes they should be left clear.
1995 if (Opcode != SystemZISD::ATOMIC_SWAPW)
1996 Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2,
1997 DAG.getConstant(32 - BitSize, WideVT));
1998 if (Opcode == SystemZISD::ATOMIC_LOADW_AND ||
1999 Opcode == SystemZISD::ATOMIC_LOADW_NAND)
2000 Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2,
2001 DAG.getConstant(uint32_t(-1) >> BitSize, WideVT));
2003 // Construct the ATOMIC_LOADW_* node.
2004 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other);
2005 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
2006 DAG.getConstant(BitSize, WideVT) };
2007 SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops,
2008 array_lengthof(Ops),
2011 // Rotate the result of the final CS so that the field is in the lower
2012 // bits of a GR32, then truncate it.
2013 SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift,
2014 DAG.getConstant(BitSize, WideVT));
2015 SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift);
2017 SDValue RetOps[2] = { Result, AtomicOp.getValue(1) };
2018 return DAG.getMergeValues(RetOps, 2, DL);
2021 // Node is an 8- or 16-bit ATOMIC_CMP_SWAP operation. Lower the first two
2022 // into a fullword ATOMIC_CMP_SWAPW operation.
2023 SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op,
2024 SelectionDAG &DAG) const {
2025 AtomicSDNode *Node = cast<AtomicSDNode>(Op.getNode());
2027 // We have native support for 32-bit compare and swap.
2028 EVT NarrowVT = Node->getMemoryVT();
2029 EVT WideVT = MVT::i32;
2030 if (NarrowVT == WideVT)
2033 int64_t BitSize = NarrowVT.getSizeInBits();
2034 SDValue ChainIn = Node->getOperand(0);
2035 SDValue Addr = Node->getOperand(1);
2036 SDValue CmpVal = Node->getOperand(2);
2037 SDValue SwapVal = Node->getOperand(3);
2038 MachineMemOperand *MMO = Node->getMemOperand();
2040 EVT PtrVT = Addr.getValueType();
2042 // Get the address of the containing word.
2043 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
2044 DAG.getConstant(-4, PtrVT));
2046 // Get the number of bits that the word must be rotated left in order
2047 // to bring the field to the top bits of a GR32.
2048 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
2049 DAG.getConstant(3, PtrVT));
2050 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);
2052 // Get the complementing shift amount, for rotating a field in the top
2053 // bits back to its proper position.
2054 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
2055 DAG.getConstant(0, WideVT), BitShift);
2057 // Construct the ATOMIC_CMP_SWAPW node.
2058 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other);
2059 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
2060 NegBitShift, DAG.getConstant(BitSize, WideVT) };
2061 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL,
2062 VTList, Ops, array_lengthof(Ops),
2067 SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op,
2068 SelectionDAG &DAG) const {
2069 MachineFunction &MF = DAG.getMachineFunction();
2070 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
2071 return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op),
2072 SystemZ::R15D, Op.getValueType());
2075 SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op,
2076 SelectionDAG &DAG) const {
2077 MachineFunction &MF = DAG.getMachineFunction();
2078 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
2079 return DAG.getCopyToReg(Op.getOperand(0), SDLoc(Op),
2080 SystemZ::R15D, Op.getOperand(1));
2083 SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op,
2084 SelectionDAG &DAG) const {
2085 bool IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
2087 // Just preserve the chain.
2088 return Op.getOperand(0);
2090 bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
2091 unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ;
2092 MemIntrinsicSDNode *Node = cast<MemIntrinsicSDNode>(Op.getNode());
2095 DAG.getConstant(Code, MVT::i32),
2098 return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, SDLoc(Op),
2099 Node->getVTList(), Ops, array_lengthof(Ops),
2100 Node->getMemoryVT(), Node->getMemOperand());
2103 SDValue SystemZTargetLowering::LowerOperation(SDValue Op,
2104 SelectionDAG &DAG) const {
2105 switch (Op.getOpcode()) {
2107 return lowerBR_CC(Op, DAG);
2108 case ISD::SELECT_CC:
2109 return lowerSELECT_CC(Op, DAG);
2111 return lowerSETCC(Op, DAG);
2112 case ISD::GlobalAddress:
2113 return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG);
2114 case ISD::GlobalTLSAddress:
2115 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG);
2116 case ISD::BlockAddress:
2117 return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG);
2118 case ISD::JumpTable:
2119 return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG);
2120 case ISD::ConstantPool:
2121 return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG);
2123 return lowerBITCAST(Op, DAG);
2125 return lowerVASTART(Op, DAG);
2127 return lowerVACOPY(Op, DAG);
2128 case ISD::DYNAMIC_STACKALLOC:
2129 return lowerDYNAMIC_STACKALLOC(Op, DAG);
2130 case ISD::SMUL_LOHI:
2131 return lowerSMUL_LOHI(Op, DAG);
2132 case ISD::UMUL_LOHI:
2133 return lowerUMUL_LOHI(Op, DAG);
2135 return lowerSDIVREM(Op, DAG);
2137 return lowerUDIVREM(Op, DAG);
2139 return lowerOR(Op, DAG);
2140 case ISD::ATOMIC_SWAP:
2141 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_SWAPW);
2142 case ISD::ATOMIC_LOAD_ADD:
2143 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD);
2144 case ISD::ATOMIC_LOAD_SUB:
2145 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB);
2146 case ISD::ATOMIC_LOAD_AND:
2147 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_AND);
2148 case ISD::ATOMIC_LOAD_OR:
2149 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_OR);
2150 case ISD::ATOMIC_LOAD_XOR:
2151 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR);
2152 case ISD::ATOMIC_LOAD_NAND:
2153 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND);
2154 case ISD::ATOMIC_LOAD_MIN:
2155 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN);
2156 case ISD::ATOMIC_LOAD_MAX:
2157 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX);
2158 case ISD::ATOMIC_LOAD_UMIN:
2159 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN);
2160 case ISD::ATOMIC_LOAD_UMAX:
2161 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX);
2162 case ISD::ATOMIC_CMP_SWAP:
2163 return lowerATOMIC_CMP_SWAP(Op, DAG);
2164 case ISD::STACKSAVE:
2165 return lowerSTACKSAVE(Op, DAG);
2166 case ISD::STACKRESTORE:
2167 return lowerSTACKRESTORE(Op, DAG);
2169 return lowerPREFETCH(Op, DAG);
2171 llvm_unreachable("Unexpected node to lower");
2175 const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
2176 #define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME
2181 OPCODE(PCREL_WRAPPER);
2182 OPCODE(PCREL_OFFSET);
2187 OPCODE(SELECT_CCMASK);
2188 OPCODE(ADJDYNALLOC);
2189 OPCODE(EXTRACT_ACCESS);
2190 OPCODE(UMUL_LOHI64);
2206 OPCODE(SEARCH_STRING);
2208 OPCODE(ATOMIC_SWAPW);
2209 OPCODE(ATOMIC_LOADW_ADD);
2210 OPCODE(ATOMIC_LOADW_SUB);
2211 OPCODE(ATOMIC_LOADW_AND);
2212 OPCODE(ATOMIC_LOADW_OR);
2213 OPCODE(ATOMIC_LOADW_XOR);
2214 OPCODE(ATOMIC_LOADW_NAND);
2215 OPCODE(ATOMIC_LOADW_MIN);
2216 OPCODE(ATOMIC_LOADW_MAX);
2217 OPCODE(ATOMIC_LOADW_UMIN);
2218 OPCODE(ATOMIC_LOADW_UMAX);
2219 OPCODE(ATOMIC_CMP_SWAPW);
2226 //===----------------------------------------------------------------------===//
2228 //===----------------------------------------------------------------------===//
2230 // Create a new basic block after MBB.
2231 static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) {
2232 MachineFunction &MF = *MBB->getParent();
2233 MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock());
2234 MF.insert(llvm::next(MachineFunction::iterator(MBB)), NewMBB);
2238 // Split MBB after MI and return the new block (the one that contains
2239 // instructions after MI).
2240 static MachineBasicBlock *splitBlockAfter(MachineInstr *MI,
2241 MachineBasicBlock *MBB) {
2242 MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
2243 NewMBB->splice(NewMBB->begin(), MBB,
2244 llvm::next(MachineBasicBlock::iterator(MI)),
2246 NewMBB->transferSuccessorsAndUpdatePHIs(MBB);
2250 // Split MBB before MI and return the new block (the one that contains MI).
2251 static MachineBasicBlock *splitBlockBefore(MachineInstr *MI,
2252 MachineBasicBlock *MBB) {
2253 MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
2254 NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end());
2255 NewMBB->transferSuccessorsAndUpdatePHIs(MBB);
2259 // Force base value Base into a register before MI. Return the register.
2260 static unsigned forceReg(MachineInstr *MI, MachineOperand &Base,
2261 const SystemZInstrInfo *TII) {
2263 return Base.getReg();
2265 MachineBasicBlock *MBB = MI->getParent();
2266 MachineFunction &MF = *MBB->getParent();
2267 MachineRegisterInfo &MRI = MF.getRegInfo();
2269 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
2270 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LA), Reg)
2271 .addOperand(Base).addImm(0).addReg(0);
2275 // Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI.
2277 SystemZTargetLowering::emitSelect(MachineInstr *MI,
2278 MachineBasicBlock *MBB) const {
2279 const SystemZInstrInfo *TII = TM.getInstrInfo();
2281 unsigned DestReg = MI->getOperand(0).getReg();
2282 unsigned TrueReg = MI->getOperand(1).getReg();
2283 unsigned FalseReg = MI->getOperand(2).getReg();
2284 unsigned CCValid = MI->getOperand(3).getImm();
2285 unsigned CCMask = MI->getOperand(4).getImm();
2286 DebugLoc DL = MI->getDebugLoc();
2288 MachineBasicBlock *StartMBB = MBB;
2289 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB);
2290 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB);
2293 // BRC CCMask, JoinMBB
2294 // # fallthrough to FalseMBB
2296 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
2297 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB);
2298 MBB->addSuccessor(JoinMBB);
2299 MBB->addSuccessor(FalseMBB);
2302 // # fallthrough to JoinMBB
2304 MBB->addSuccessor(JoinMBB);
2307 // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ]
2310 BuildMI(*MBB, MI, DL, TII->get(SystemZ::PHI), DestReg)
2311 .addReg(TrueReg).addMBB(StartMBB)
2312 .addReg(FalseReg).addMBB(FalseMBB);
2314 MI->eraseFromParent();
2318 // Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI.
2319 // StoreOpcode is the store to use and Invert says whether the store should
2320 // happen when the condition is false rather than true. If a STORE ON
2321 // CONDITION is available, STOCOpcode is its opcode, otherwise it is 0.
2323 SystemZTargetLowering::emitCondStore(MachineInstr *MI,
2324 MachineBasicBlock *MBB,
2325 unsigned StoreOpcode, unsigned STOCOpcode,
2326 bool Invert) const {
2327 const SystemZInstrInfo *TII = TM.getInstrInfo();
2329 unsigned SrcReg = MI->getOperand(0).getReg();
2330 MachineOperand Base = MI->getOperand(1);
2331 int64_t Disp = MI->getOperand(2).getImm();
2332 unsigned IndexReg = MI->getOperand(3).getReg();
2333 unsigned CCValid = MI->getOperand(4).getImm();
2334 unsigned CCMask = MI->getOperand(5).getImm();
2335 DebugLoc DL = MI->getDebugLoc();
2337 StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp);
2339 // Use STOCOpcode if possible. We could use different store patterns in
2340 // order to avoid matching the index register, but the performance trade-offs
2341 // might be more complicated in that case.
2342 if (STOCOpcode && !IndexReg && TM.getSubtargetImpl()->hasLoadStoreOnCond()) {
2345 BuildMI(*MBB, MI, DL, TII->get(STOCOpcode))
2346 .addReg(SrcReg).addOperand(Base).addImm(Disp)
2347 .addImm(CCValid).addImm(CCMask);
2348 MI->eraseFromParent();
2352 // Get the condition needed to branch around the store.
2356 MachineBasicBlock *StartMBB = MBB;
2357 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB);
2358 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB);
2361 // BRC CCMask, JoinMBB
2362 // # fallthrough to FalseMBB
2364 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
2365 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB);
2366 MBB->addSuccessor(JoinMBB);
2367 MBB->addSuccessor(FalseMBB);
2370 // store %SrcReg, %Disp(%Index,%Base)
2371 // # fallthrough to JoinMBB
2373 BuildMI(MBB, DL, TII->get(StoreOpcode))
2374 .addReg(SrcReg).addOperand(Base).addImm(Disp).addReg(IndexReg);
2375 MBB->addSuccessor(JoinMBB);
2377 MI->eraseFromParent();
2381 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_*
2382 // or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that
2383 // performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}.
2384 // BitSize is the width of the field in bits, or 0 if this is a partword
2385 // ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize
2386 // is one of the operands. Invert says whether the field should be
2387 // inverted after performing BinOpcode (e.g. for NAND).
2389 SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI,
2390 MachineBasicBlock *MBB,
2393 bool Invert) const {
2394 const SystemZInstrInfo *TII = TM.getInstrInfo();
2395 MachineFunction &MF = *MBB->getParent();
2396 MachineRegisterInfo &MRI = MF.getRegInfo();
2397 bool IsSubWord = (BitSize < 32);
2399 // Extract the operands. Base can be a register or a frame index.
2400 // Src2 can be a register or immediate.
2401 unsigned Dest = MI->getOperand(0).getReg();
2402 MachineOperand Base = earlyUseOperand(MI->getOperand(1));
2403 int64_t Disp = MI->getOperand(2).getImm();
2404 MachineOperand Src2 = earlyUseOperand(MI->getOperand(3));
2405 unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0);
2406 unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0);
2407 DebugLoc DL = MI->getDebugLoc();
2409 BitSize = MI->getOperand(6).getImm();
2411 // Subword operations use 32-bit registers.
2412 const TargetRegisterClass *RC = (BitSize <= 32 ?
2413 &SystemZ::GR32BitRegClass :
2414 &SystemZ::GR64BitRegClass);
2415 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG;
2416 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG;
2418 // Get the right opcodes for the displacement.
2419 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp);
2420 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp);
2421 assert(LOpcode && CSOpcode && "Displacement out of range");
2423 // Create virtual registers for temporary results.
2424 unsigned OrigVal = MRI.createVirtualRegister(RC);
2425 unsigned OldVal = MRI.createVirtualRegister(RC);
2426 unsigned NewVal = (BinOpcode || IsSubWord ?
2427 MRI.createVirtualRegister(RC) : Src2.getReg());
2428 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
2429 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
2431 // Insert a basic block for the main loop.
2432 MachineBasicBlock *StartMBB = MBB;
2433 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
2434 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
2438 // %OrigVal = L Disp(%Base)
2439 // # fall through to LoopMMB
2441 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal)
2442 .addOperand(Base).addImm(Disp).addReg(0);
2443 MBB->addSuccessor(LoopMBB);
2446 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ]
2447 // %RotatedOldVal = RLL %OldVal, 0(%BitShift)
2448 // %RotatedNewVal = OP %RotatedOldVal, %Src2
2449 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift)
2450 // %Dest = CS %OldVal, %NewVal, Disp(%Base)
2452 // # fall through to DoneMMB
2454 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
2455 .addReg(OrigVal).addMBB(StartMBB)
2456 .addReg(Dest).addMBB(LoopMBB);
2458 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal)
2459 .addReg(OldVal).addReg(BitShift).addImm(0);
2461 // Perform the operation normally and then invert every bit of the field.
2462 unsigned Tmp = MRI.createVirtualRegister(RC);
2463 BuildMI(MBB, DL, TII->get(BinOpcode), Tmp)
2464 .addReg(RotatedOldVal).addOperand(Src2);
2466 // XILF with the upper BitSize bits set.
2467 BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal)
2468 .addReg(Tmp).addImm(uint32_t(~0 << (32 - BitSize)));
2469 else if (BitSize == 32)
2470 // XILF with every bit set.
2471 BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal)
2472 .addReg(Tmp).addImm(~uint32_t(0));
2474 // Use LCGR and add -1 to the result, which is more compact than
2475 // an XILF, XILH pair.
2476 unsigned Tmp2 = MRI.createVirtualRegister(RC);
2477 BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp);
2478 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal)
2479 .addReg(Tmp2).addImm(-1);
2481 } else if (BinOpcode)
2482 // A simply binary operation.
2483 BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal)
2484 .addReg(RotatedOldVal).addOperand(Src2);
2486 // Use RISBG to rotate Src2 into position and use it to replace the
2487 // field in RotatedOldVal.
2488 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal)
2489 .addReg(RotatedOldVal).addReg(Src2.getReg())
2490 .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize);
2492 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal)
2493 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0);
2494 BuildMI(MBB, DL, TII->get(CSOpcode), Dest)
2495 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp);
2496 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
2497 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
2498 MBB->addSuccessor(LoopMBB);
2499 MBB->addSuccessor(DoneMBB);
2501 MI->eraseFromParent();
2505 // Implement EmitInstrWithCustomInserter for pseudo
2506 // ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the
2507 // instruction that should be used to compare the current field with the
2508 // minimum or maximum value. KeepOldMask is the BRC condition-code mask
2509 // for when the current field should be kept. BitSize is the width of
2510 // the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction.
2512 SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI,
2513 MachineBasicBlock *MBB,
2514 unsigned CompareOpcode,
2515 unsigned KeepOldMask,
2516 unsigned BitSize) const {
2517 const SystemZInstrInfo *TII = TM.getInstrInfo();
2518 MachineFunction &MF = *MBB->getParent();
2519 MachineRegisterInfo &MRI = MF.getRegInfo();
2520 bool IsSubWord = (BitSize < 32);
2522 // Extract the operands. Base can be a register or a frame index.
2523 unsigned Dest = MI->getOperand(0).getReg();
2524 MachineOperand Base = earlyUseOperand(MI->getOperand(1));
2525 int64_t Disp = MI->getOperand(2).getImm();
2526 unsigned Src2 = MI->getOperand(3).getReg();
2527 unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0);
2528 unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0);
2529 DebugLoc DL = MI->getDebugLoc();
2531 BitSize = MI->getOperand(6).getImm();
2533 // Subword operations use 32-bit registers.
2534 const TargetRegisterClass *RC = (BitSize <= 32 ?
2535 &SystemZ::GR32BitRegClass :
2536 &SystemZ::GR64BitRegClass);
2537 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG;
2538 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG;
2540 // Get the right opcodes for the displacement.
2541 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp);
2542 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp);
2543 assert(LOpcode && CSOpcode && "Displacement out of range");
2545 // Create virtual registers for temporary results.
2546 unsigned OrigVal = MRI.createVirtualRegister(RC);
2547 unsigned OldVal = MRI.createVirtualRegister(RC);
2548 unsigned NewVal = MRI.createVirtualRegister(RC);
2549 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
2550 unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2);
2551 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
2553 // Insert 3 basic blocks for the loop.
2554 MachineBasicBlock *StartMBB = MBB;
2555 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
2556 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
2557 MachineBasicBlock *UseAltMBB = emitBlockAfter(LoopMBB);
2558 MachineBasicBlock *UpdateMBB = emitBlockAfter(UseAltMBB);
2562 // %OrigVal = L Disp(%Base)
2563 // # fall through to LoopMMB
2565 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal)
2566 .addOperand(Base).addImm(Disp).addReg(0);
2567 MBB->addSuccessor(LoopMBB);
2570 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ]
2571 // %RotatedOldVal = RLL %OldVal, 0(%BitShift)
2572 // CompareOpcode %RotatedOldVal, %Src2
2573 // BRC KeepOldMask, UpdateMBB
2575 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
2576 .addReg(OrigVal).addMBB(StartMBB)
2577 .addReg(Dest).addMBB(UpdateMBB);
2579 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal)
2580 .addReg(OldVal).addReg(BitShift).addImm(0);
2581 BuildMI(MBB, DL, TII->get(CompareOpcode))
2582 .addReg(RotatedOldVal).addReg(Src2);
2583 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
2584 .addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB);
2585 MBB->addSuccessor(UpdateMBB);
2586 MBB->addSuccessor(UseAltMBB);
2589 // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0
2590 // # fall through to UpdateMMB
2593 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal)
2594 .addReg(RotatedOldVal).addReg(Src2)
2595 .addImm(32).addImm(31 + BitSize).addImm(0);
2596 MBB->addSuccessor(UpdateMBB);
2599 // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ],
2600 // [ %RotatedAltVal, UseAltMBB ]
2601 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift)
2602 // %Dest = CS %OldVal, %NewVal, Disp(%Base)
2604 // # fall through to DoneMMB
2606 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal)
2607 .addReg(RotatedOldVal).addMBB(LoopMBB)
2608 .addReg(RotatedAltVal).addMBB(UseAltMBB);
2610 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal)
2611 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0);
2612 BuildMI(MBB, DL, TII->get(CSOpcode), Dest)
2613 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp);
2614 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
2615 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
2616 MBB->addSuccessor(LoopMBB);
2617 MBB->addSuccessor(DoneMBB);
2619 MI->eraseFromParent();
2623 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW
2626 SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI,
2627 MachineBasicBlock *MBB) const {
2628 const SystemZInstrInfo *TII = TM.getInstrInfo();
2629 MachineFunction &MF = *MBB->getParent();
2630 MachineRegisterInfo &MRI = MF.getRegInfo();
2632 // Extract the operands. Base can be a register or a frame index.
2633 unsigned Dest = MI->getOperand(0).getReg();
2634 MachineOperand Base = earlyUseOperand(MI->getOperand(1));
2635 int64_t Disp = MI->getOperand(2).getImm();
2636 unsigned OrigCmpVal = MI->getOperand(3).getReg();
2637 unsigned OrigSwapVal = MI->getOperand(4).getReg();
2638 unsigned BitShift = MI->getOperand(5).getReg();
2639 unsigned NegBitShift = MI->getOperand(6).getReg();
2640 int64_t BitSize = MI->getOperand(7).getImm();
2641 DebugLoc DL = MI->getDebugLoc();
2643 const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass;
2645 // Get the right opcodes for the displacement.
2646 unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp);
2647 unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp);
2648 assert(LOpcode && CSOpcode && "Displacement out of range");
2650 // Create virtual registers for temporary results.
2651 unsigned OrigOldVal = MRI.createVirtualRegister(RC);
2652 unsigned OldVal = MRI.createVirtualRegister(RC);
2653 unsigned CmpVal = MRI.createVirtualRegister(RC);
2654 unsigned SwapVal = MRI.createVirtualRegister(RC);
2655 unsigned StoreVal = MRI.createVirtualRegister(RC);
2656 unsigned RetryOldVal = MRI.createVirtualRegister(RC);
2657 unsigned RetryCmpVal = MRI.createVirtualRegister(RC);
2658 unsigned RetrySwapVal = MRI.createVirtualRegister(RC);
2660 // Insert 2 basic blocks for the loop.
2661 MachineBasicBlock *StartMBB = MBB;
2662 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
2663 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
2664 MachineBasicBlock *SetMBB = emitBlockAfter(LoopMBB);
2668 // %OrigOldVal = L Disp(%Base)
2669 // # fall through to LoopMMB
2671 BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal)
2672 .addOperand(Base).addImm(Disp).addReg(0);
2673 MBB->addSuccessor(LoopMBB);
2676 // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ]
2677 // %CmpVal = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ]
2678 // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ]
2679 // %Dest = RLL %OldVal, BitSize(%BitShift)
2680 // ^^ The low BitSize bits contain the field
2682 // %RetryCmpVal = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0
2683 // ^^ Replace the upper 32-BitSize bits of the
2684 // comparison value with those that we loaded,
2685 // so that we can use a full word comparison.
2686 // CR %Dest, %RetryCmpVal
2688 // # Fall through to SetMBB
2690 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
2691 .addReg(OrigOldVal).addMBB(StartMBB)
2692 .addReg(RetryOldVal).addMBB(SetMBB);
2693 BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal)
2694 .addReg(OrigCmpVal).addMBB(StartMBB)
2695 .addReg(RetryCmpVal).addMBB(SetMBB);
2696 BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal)
2697 .addReg(OrigSwapVal).addMBB(StartMBB)
2698 .addReg(RetrySwapVal).addMBB(SetMBB);
2699 BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest)
2700 .addReg(OldVal).addReg(BitShift).addImm(BitSize);
2701 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal)
2702 .addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0);
2703 BuildMI(MBB, DL, TII->get(SystemZ::CR))
2704 .addReg(Dest).addReg(RetryCmpVal);
2705 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
2706 .addImm(SystemZ::CCMASK_ICMP)
2707 .addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB);
2708 MBB->addSuccessor(DoneMBB);
2709 MBB->addSuccessor(SetMBB);
2712 // %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0
2713 // ^^ Replace the upper 32-BitSize bits of the new
2714 // value with those that we loaded.
2715 // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift)
2716 // ^^ Rotate the new field to its proper position.
2717 // %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base)
2719 // # fall through to ExitMMB
2721 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal)
2722 .addReg(SwapVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0);
2723 BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal)
2724 .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize);
2725 BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal)
2726 .addReg(OldVal).addReg(StoreVal).addOperand(Base).addImm(Disp);
2727 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
2728 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
2729 MBB->addSuccessor(LoopMBB);
2730 MBB->addSuccessor(DoneMBB);
2732 MI->eraseFromParent();
2736 // Emit an extension from a GR32 or GR64 to a GR128. ClearEven is true
2737 // if the high register of the GR128 value must be cleared or false if
2738 // it's "don't care". SubReg is subreg_l32 when extending a GR32
2739 // and subreg_l64 when extending a GR64.
2741 SystemZTargetLowering::emitExt128(MachineInstr *MI,
2742 MachineBasicBlock *MBB,
2743 bool ClearEven, unsigned SubReg) const {
2744 const SystemZInstrInfo *TII = TM.getInstrInfo();
2745 MachineFunction &MF = *MBB->getParent();
2746 MachineRegisterInfo &MRI = MF.getRegInfo();
2747 DebugLoc DL = MI->getDebugLoc();
2749 unsigned Dest = MI->getOperand(0).getReg();
2750 unsigned Src = MI->getOperand(1).getReg();
2751 unsigned In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
2753 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128);
2755 unsigned NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
2756 unsigned Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
2758 BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64)
2760 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128)
2761 .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_h64);
2764 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest)
2765 .addReg(In128).addReg(Src).addImm(SubReg);
2767 MI->eraseFromParent();
2772 SystemZTargetLowering::emitMemMemWrapper(MachineInstr *MI,
2773 MachineBasicBlock *MBB,
2774 unsigned Opcode) const {
2775 const SystemZInstrInfo *TII = TM.getInstrInfo();
2776 MachineFunction &MF = *MBB->getParent();
2777 MachineRegisterInfo &MRI = MF.getRegInfo();
2778 DebugLoc DL = MI->getDebugLoc();
2780 MachineOperand DestBase = earlyUseOperand(MI->getOperand(0));
2781 uint64_t DestDisp = MI->getOperand(1).getImm();
2782 MachineOperand SrcBase = earlyUseOperand(MI->getOperand(2));
2783 uint64_t SrcDisp = MI->getOperand(3).getImm();
2784 uint64_t Length = MI->getOperand(4).getImm();
2786 // When generating more than one CLC, all but the last will need to
2787 // branch to the end when a difference is found.
2788 MachineBasicBlock *EndMBB = (Length > 256 && Opcode == SystemZ::CLC ?
2789 splitBlockAfter(MI, MBB) : 0);
2791 // Check for the loop form, in which operand 5 is the trip count.
2792 if (MI->getNumExplicitOperands() > 5) {
2793 bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase);
2795 uint64_t StartCountReg = MI->getOperand(5).getReg();
2796 uint64_t StartSrcReg = forceReg(MI, SrcBase, TII);
2797 uint64_t StartDestReg = (HaveSingleBase ? StartSrcReg :
2798 forceReg(MI, DestBase, TII));
2800 const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass;
2801 uint64_t ThisSrcReg = MRI.createVirtualRegister(RC);
2802 uint64_t ThisDestReg = (HaveSingleBase ? ThisSrcReg :
2803 MRI.createVirtualRegister(RC));
2804 uint64_t NextSrcReg = MRI.createVirtualRegister(RC);
2805 uint64_t NextDestReg = (HaveSingleBase ? NextSrcReg :
2806 MRI.createVirtualRegister(RC));
2808 RC = &SystemZ::GR64BitRegClass;
2809 uint64_t ThisCountReg = MRI.createVirtualRegister(RC);
2810 uint64_t NextCountReg = MRI.createVirtualRegister(RC);
2812 MachineBasicBlock *StartMBB = MBB;
2813 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
2814 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
2815 MachineBasicBlock *NextMBB = (EndMBB ? emitBlockAfter(LoopMBB) : LoopMBB);
2818 // # fall through to LoopMMB
2819 MBB->addSuccessor(LoopMBB);
2822 // %ThisDestReg = phi [ %StartDestReg, StartMBB ],
2823 // [ %NextDestReg, NextMBB ]
2824 // %ThisSrcReg = phi [ %StartSrcReg, StartMBB ],
2825 // [ %NextSrcReg, NextMBB ]
2826 // %ThisCountReg = phi [ %StartCountReg, StartMBB ],
2827 // [ %NextCountReg, NextMBB ]
2828 // ( PFD 2, 768+DestDisp(%ThisDestReg) )
2829 // Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg)
2832 // The prefetch is used only for MVC. The JLH is used only for CLC.
2835 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg)
2836 .addReg(StartDestReg).addMBB(StartMBB)
2837 .addReg(NextDestReg).addMBB(NextMBB);
2838 if (!HaveSingleBase)
2839 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg)
2840 .addReg(StartSrcReg).addMBB(StartMBB)
2841 .addReg(NextSrcReg).addMBB(NextMBB);
2842 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg)
2843 .addReg(StartCountReg).addMBB(StartMBB)
2844 .addReg(NextCountReg).addMBB(NextMBB);
2845 if (Opcode == SystemZ::MVC)
2846 BuildMI(MBB, DL, TII->get(SystemZ::PFD))
2847 .addImm(SystemZ::PFD_WRITE)
2848 .addReg(ThisDestReg).addImm(DestDisp + 768).addReg(0);
2849 BuildMI(MBB, DL, TII->get(Opcode))
2850 .addReg(ThisDestReg).addImm(DestDisp).addImm(256)
2851 .addReg(ThisSrcReg).addImm(SrcDisp);
2853 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
2854 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
2856 MBB->addSuccessor(EndMBB);
2857 MBB->addSuccessor(NextMBB);
2861 // %NextDestReg = LA 256(%ThisDestReg)
2862 // %NextSrcReg = LA 256(%ThisSrcReg)
2863 // %NextCountReg = AGHI %ThisCountReg, -1
2864 // CGHI %NextCountReg, 0
2866 // # fall through to DoneMMB
2868 // The AGHI, CGHI and JLH should be converted to BRCTG by later passes.
2871 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg)
2872 .addReg(ThisDestReg).addImm(256).addReg(0);
2873 if (!HaveSingleBase)
2874 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg)
2875 .addReg(ThisSrcReg).addImm(256).addReg(0);
2876 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg)
2877 .addReg(ThisCountReg).addImm(-1);
2878 BuildMI(MBB, DL, TII->get(SystemZ::CGHI))
2879 .addReg(NextCountReg).addImm(0);
2880 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
2881 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
2883 MBB->addSuccessor(LoopMBB);
2884 MBB->addSuccessor(DoneMBB);
2886 DestBase = MachineOperand::CreateReg(NextDestReg, false);
2887 SrcBase = MachineOperand::CreateReg(NextSrcReg, false);
2891 // Handle any remaining bytes with straight-line code.
2892 while (Length > 0) {
2893 uint64_t ThisLength = std::min(Length, uint64_t(256));
2894 // The previous iteration might have created out-of-range displacements.
2895 // Apply them using LAY if so.
2896 if (!isUInt<12>(DestDisp)) {
2897 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
2898 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LAY), Reg)
2899 .addOperand(DestBase).addImm(DestDisp).addReg(0);
2900 DestBase = MachineOperand::CreateReg(Reg, false);
2903 if (!isUInt<12>(SrcDisp)) {
2904 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
2905 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LAY), Reg)
2906 .addOperand(SrcBase).addImm(SrcDisp).addReg(0);
2907 SrcBase = MachineOperand::CreateReg(Reg, false);
2910 BuildMI(*MBB, MI, DL, TII->get(Opcode))
2911 .addOperand(DestBase).addImm(DestDisp).addImm(ThisLength)
2912 .addOperand(SrcBase).addImm(SrcDisp);
2913 DestDisp += ThisLength;
2914 SrcDisp += ThisLength;
2915 Length -= ThisLength;
2916 // If there's another CLC to go, branch to the end if a difference
2918 if (EndMBB && Length > 0) {
2919 MachineBasicBlock *NextMBB = splitBlockBefore(MI, MBB);
2920 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
2921 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
2923 MBB->addSuccessor(EndMBB);
2924 MBB->addSuccessor(NextMBB);
2929 MBB->addSuccessor(EndMBB);
2931 MBB->addLiveIn(SystemZ::CC);
2934 MI->eraseFromParent();
2938 // Decompose string pseudo-instruction MI into a loop that continually performs
2939 // Opcode until CC != 3.
2941 SystemZTargetLowering::emitStringWrapper(MachineInstr *MI,
2942 MachineBasicBlock *MBB,
2943 unsigned Opcode) const {
2944 const SystemZInstrInfo *TII = TM.getInstrInfo();
2945 MachineFunction &MF = *MBB->getParent();
2946 MachineRegisterInfo &MRI = MF.getRegInfo();
2947 DebugLoc DL = MI->getDebugLoc();
2949 uint64_t End1Reg = MI->getOperand(0).getReg();
2950 uint64_t Start1Reg = MI->getOperand(1).getReg();
2951 uint64_t Start2Reg = MI->getOperand(2).getReg();
2952 uint64_t CharReg = MI->getOperand(3).getReg();
2954 const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass;
2955 uint64_t This1Reg = MRI.createVirtualRegister(RC);
2956 uint64_t This2Reg = MRI.createVirtualRegister(RC);
2957 uint64_t End2Reg = MRI.createVirtualRegister(RC);
2959 MachineBasicBlock *StartMBB = MBB;
2960 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
2961 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
2964 // # fall through to LoopMMB
2965 MBB->addSuccessor(LoopMBB);
2968 // %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ]
2969 // %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ]
2971 // %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L
2973 // # fall through to DoneMMB
2975 // The load of R0L can be hoisted by post-RA LICM.
2978 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg)
2979 .addReg(Start1Reg).addMBB(StartMBB)
2980 .addReg(End1Reg).addMBB(LoopMBB);
2981 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg)
2982 .addReg(Start2Reg).addMBB(StartMBB)
2983 .addReg(End2Reg).addMBB(LoopMBB);
2984 BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0L).addReg(CharReg);
2985 BuildMI(MBB, DL, TII->get(Opcode))
2986 .addReg(End1Reg, RegState::Define).addReg(End2Reg, RegState::Define)
2987 .addReg(This1Reg).addReg(This2Reg);
2988 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
2989 .addImm(SystemZ::CCMASK_ANY).addImm(SystemZ::CCMASK_3).addMBB(LoopMBB);
2990 MBB->addSuccessor(LoopMBB);
2991 MBB->addSuccessor(DoneMBB);
2993 DoneMBB->addLiveIn(SystemZ::CC);
2995 MI->eraseFromParent();
2999 MachineBasicBlock *SystemZTargetLowering::
3000 EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const {
3001 switch (MI->getOpcode()) {
3002 case SystemZ::Select32Mux:
3003 case SystemZ::Select32:
3004 case SystemZ::SelectF32:
3005 case SystemZ::Select64:
3006 case SystemZ::SelectF64:
3007 case SystemZ::SelectF128:
3008 return emitSelect(MI, MBB);
3010 case SystemZ::CondStore8Mux:
3011 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, false);
3012 case SystemZ::CondStore8MuxInv:
3013 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, true);
3014 case SystemZ::CondStore16Mux:
3015 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, false);
3016 case SystemZ::CondStore16MuxInv:
3017 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, true);
3018 case SystemZ::CondStore8:
3019 return emitCondStore(MI, MBB, SystemZ::STC, 0, false);
3020 case SystemZ::CondStore8Inv:
3021 return emitCondStore(MI, MBB, SystemZ::STC, 0, true);
3022 case SystemZ::CondStore16:
3023 return emitCondStore(MI, MBB, SystemZ::STH, 0, false);
3024 case SystemZ::CondStore16Inv:
3025 return emitCondStore(MI, MBB, SystemZ::STH, 0, true);
3026 case SystemZ::CondStore32:
3027 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false);
3028 case SystemZ::CondStore32Inv:
3029 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true);
3030 case SystemZ::CondStore64:
3031 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false);
3032 case SystemZ::CondStore64Inv:
3033 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true);
3034 case SystemZ::CondStoreF32:
3035 return emitCondStore(MI, MBB, SystemZ::STE, 0, false);
3036 case SystemZ::CondStoreF32Inv:
3037 return emitCondStore(MI, MBB, SystemZ::STE, 0, true);
3038 case SystemZ::CondStoreF64:
3039 return emitCondStore(MI, MBB, SystemZ::STD, 0, false);
3040 case SystemZ::CondStoreF64Inv:
3041 return emitCondStore(MI, MBB, SystemZ::STD, 0, true);
3043 case SystemZ::AEXT128_64:
3044 return emitExt128(MI, MBB, false, SystemZ::subreg_l64);
3045 case SystemZ::ZEXT128_32:
3046 return emitExt128(MI, MBB, true, SystemZ::subreg_l32);
3047 case SystemZ::ZEXT128_64:
3048 return emitExt128(MI, MBB, true, SystemZ::subreg_l64);
3050 case SystemZ::ATOMIC_SWAPW:
3051 return emitAtomicLoadBinary(MI, MBB, 0, 0);
3052 case SystemZ::ATOMIC_SWAP_32:
3053 return emitAtomicLoadBinary(MI, MBB, 0, 32);
3054 case SystemZ::ATOMIC_SWAP_64:
3055 return emitAtomicLoadBinary(MI, MBB, 0, 64);
3057 case SystemZ::ATOMIC_LOADW_AR:
3058 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0);
3059 case SystemZ::ATOMIC_LOADW_AFI:
3060 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0);
3061 case SystemZ::ATOMIC_LOAD_AR:
3062 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32);
3063 case SystemZ::ATOMIC_LOAD_AHI:
3064 return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32);
3065 case SystemZ::ATOMIC_LOAD_AFI:
3066 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32);
3067 case SystemZ::ATOMIC_LOAD_AGR:
3068 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64);
3069 case SystemZ::ATOMIC_LOAD_AGHI:
3070 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64);
3071 case SystemZ::ATOMIC_LOAD_AGFI:
3072 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64);
3074 case SystemZ::ATOMIC_LOADW_SR:
3075 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0);
3076 case SystemZ::ATOMIC_LOAD_SR:
3077 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32);
3078 case SystemZ::ATOMIC_LOAD_SGR:
3079 return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64);
3081 case SystemZ::ATOMIC_LOADW_NR:
3082 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0);
3083 case SystemZ::ATOMIC_LOADW_NILH:
3084 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0);
3085 case SystemZ::ATOMIC_LOAD_NR:
3086 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32);
3087 case SystemZ::ATOMIC_LOAD_NILL:
3088 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32);
3089 case SystemZ::ATOMIC_LOAD_NILH:
3090 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32);
3091 case SystemZ::ATOMIC_LOAD_NILF:
3092 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32);
3093 case SystemZ::ATOMIC_LOAD_NGR:
3094 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64);
3095 case SystemZ::ATOMIC_LOAD_NILL64:
3096 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64);
3097 case SystemZ::ATOMIC_LOAD_NILH64:
3098 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64);
3099 case SystemZ::ATOMIC_LOAD_NIHL64:
3100 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64);
3101 case SystemZ::ATOMIC_LOAD_NIHH64:
3102 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64);
3103 case SystemZ::ATOMIC_LOAD_NILF64:
3104 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64);
3105 case SystemZ::ATOMIC_LOAD_NIHF64:
3106 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64);
3108 case SystemZ::ATOMIC_LOADW_OR:
3109 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0);
3110 case SystemZ::ATOMIC_LOADW_OILH:
3111 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 0);
3112 case SystemZ::ATOMIC_LOAD_OR:
3113 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32);
3114 case SystemZ::ATOMIC_LOAD_OILL:
3115 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 32);
3116 case SystemZ::ATOMIC_LOAD_OILH:
3117 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 32);
3118 case SystemZ::ATOMIC_LOAD_OILF:
3119 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 32);
3120 case SystemZ::ATOMIC_LOAD_OGR:
3121 return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64);
3122 case SystemZ::ATOMIC_LOAD_OILL64:
3123 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL64, 64);
3124 case SystemZ::ATOMIC_LOAD_OILH64:
3125 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH64, 64);
3126 case SystemZ::ATOMIC_LOAD_OIHL64:
3127 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL64, 64);
3128 case SystemZ::ATOMIC_LOAD_OIHH64:
3129 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH64, 64);
3130 case SystemZ::ATOMIC_LOAD_OILF64:
3131 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF64, 64);
3132 case SystemZ::ATOMIC_LOAD_OIHF64:
3133 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF64, 64);
3135 case SystemZ::ATOMIC_LOADW_XR:
3136 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0);
3137 case SystemZ::ATOMIC_LOADW_XILF:
3138 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 0);
3139 case SystemZ::ATOMIC_LOAD_XR:
3140 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32);
3141 case SystemZ::ATOMIC_LOAD_XILF:
3142 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 32);
3143 case SystemZ::ATOMIC_LOAD_XGR:
3144 return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64);
3145 case SystemZ::ATOMIC_LOAD_XILF64:
3146 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF64, 64);
3147 case SystemZ::ATOMIC_LOAD_XIHF64:
3148 return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF64, 64);
3150 case SystemZ::ATOMIC_LOADW_NRi:
3151 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true);
3152 case SystemZ::ATOMIC_LOADW_NILHi:
3153 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0, true);
3154 case SystemZ::ATOMIC_LOAD_NRi:
3155 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true);
3156 case SystemZ::ATOMIC_LOAD_NILLi:
3157 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32, true);
3158 case SystemZ::ATOMIC_LOAD_NILHi:
3159 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32, true);
3160 case SystemZ::ATOMIC_LOAD_NILFi:
3161 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32, true);
3162 case SystemZ::ATOMIC_LOAD_NGRi:
3163 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true);
3164 case SystemZ::ATOMIC_LOAD_NILL64i:
3165 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64, true);
3166 case SystemZ::ATOMIC_LOAD_NILH64i:
3167 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64, true);
3168 case SystemZ::ATOMIC_LOAD_NIHL64i:
3169 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64, true);
3170 case SystemZ::ATOMIC_LOAD_NIHH64i:
3171 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64, true);
3172 case SystemZ::ATOMIC_LOAD_NILF64i:
3173 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64, true);
3174 case SystemZ::ATOMIC_LOAD_NIHF64i:
3175 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64, true);
3177 case SystemZ::ATOMIC_LOADW_MIN:
3178 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
3179 SystemZ::CCMASK_CMP_LE, 0);
3180 case SystemZ::ATOMIC_LOAD_MIN_32:
3181 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
3182 SystemZ::CCMASK_CMP_LE, 32);
3183 case SystemZ::ATOMIC_LOAD_MIN_64:
3184 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR,
3185 SystemZ::CCMASK_CMP_LE, 64);
3187 case SystemZ::ATOMIC_LOADW_MAX:
3188 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
3189 SystemZ::CCMASK_CMP_GE, 0);
3190 case SystemZ::ATOMIC_LOAD_MAX_32:
3191 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
3192 SystemZ::CCMASK_CMP_GE, 32);
3193 case SystemZ::ATOMIC_LOAD_MAX_64:
3194 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR,
3195 SystemZ::CCMASK_CMP_GE, 64);
3197 case SystemZ::ATOMIC_LOADW_UMIN:
3198 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
3199 SystemZ::CCMASK_CMP_LE, 0);
3200 case SystemZ::ATOMIC_LOAD_UMIN_32:
3201 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
3202 SystemZ::CCMASK_CMP_LE, 32);
3203 case SystemZ::ATOMIC_LOAD_UMIN_64:
3204 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR,
3205 SystemZ::CCMASK_CMP_LE, 64);
3207 case SystemZ::ATOMIC_LOADW_UMAX:
3208 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
3209 SystemZ::CCMASK_CMP_GE, 0);
3210 case SystemZ::ATOMIC_LOAD_UMAX_32:
3211 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
3212 SystemZ::CCMASK_CMP_GE, 32);
3213 case SystemZ::ATOMIC_LOAD_UMAX_64:
3214 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR,
3215 SystemZ::CCMASK_CMP_GE, 64);
3217 case SystemZ::ATOMIC_CMP_SWAPW:
3218 return emitAtomicCmpSwapW(MI, MBB);
3219 case SystemZ::MVCSequence:
3220 case SystemZ::MVCLoop:
3221 return emitMemMemWrapper(MI, MBB, SystemZ::MVC);
3222 case SystemZ::NCSequence:
3223 case SystemZ::NCLoop:
3224 return emitMemMemWrapper(MI, MBB, SystemZ::NC);
3225 case SystemZ::OCSequence:
3226 case SystemZ::OCLoop:
3227 return emitMemMemWrapper(MI, MBB, SystemZ::OC);
3228 case SystemZ::XCSequence:
3229 case SystemZ::XCLoop:
3230 return emitMemMemWrapper(MI, MBB, SystemZ::XC);
3231 case SystemZ::CLCSequence:
3232 case SystemZ::CLCLoop:
3233 return emitMemMemWrapper(MI, MBB, SystemZ::CLC);
3234 case SystemZ::CLSTLoop:
3235 return emitStringWrapper(MI, MBB, SystemZ::CLST);
3236 case SystemZ::MVSTLoop:
3237 return emitStringWrapper(MI, MBB, SystemZ::MVST);
3238 case SystemZ::SRSTLoop:
3239 return emitStringWrapper(MI, MBB, SystemZ::SRST);
3241 llvm_unreachable("Unexpected instr type to insert");