X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=blobdiff_plain;f=lib%2FTarget%2FSystemZ%2FSystemZISelLowering.cpp;h=32db11298624a740d4f7334b36d94649ac580e8a;hp=da4ad38e46567f0884c3e7b29378e6dac561ec99;hb=974a445bd90795248274493eda5cdbf6721910f7;hpb=35b7bebe1162326c38217ff80d4a49fbbffcc365 diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp index da4ad38e465..32db1129862 100644 --- a/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -22,9 +22,44 @@ #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" +#include using namespace llvm; +namespace { +// Represents a sequence for extracting a 0/1 value from an IPM result: +// (((X ^ XORValue) + AddValue) >> Bit) +struct IPMConversion { + IPMConversion(unsigned xorValue, int64_t addValue, unsigned bit) + : XORValue(xorValue), AddValue(addValue), Bit(bit) {} + + int64_t XORValue; + int64_t AddValue; + unsigned Bit; +}; + +// Represents information about a comparison. +struct Comparison { + Comparison(SDValue Op0In, SDValue Op1In) + : Op0(Op0In), Op1(Op1In), Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {} + + // The operands to the comparison. + SDValue Op0, Op1; + + // The opcode that should be used to compare Op0 and Op1. + unsigned Opcode; + + // A SystemZICMP value. Only used for integer comparisons. + unsigned ICmpType; + + // The mask of CC values that Opcode can produce. + unsigned CCValid; + + // The mask of CC values for which the original condition is true. + unsigned CCMask; +}; +} + // Classify VT as either 32 or 64 bit. static bool is32Bit(EVT VT) { switch (VT.getSimpleVT().SimpleTy) { @@ -51,7 +86,10 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm) MVT PtrVT = getPointerTy(); // Set up the register classes. - addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass); + if (Subtarget.hasHighWord()) + addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass); + else + addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass); addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass); addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass); addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass); @@ -83,8 +121,8 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm) ++I) { MVT VT = MVT::SimpleValueType(I); if (isTypeLegal(VT)) { - // Expand SETCC(X, Y, COND) into SELECT_CC(X, Y, 1, 0, COND). - setOperationAction(ISD::SETCC, VT, Expand); + // Lower SET_CC into an IPM-based sequence. + setOperationAction(ISD::SETCC, VT, Custom); // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE). setOperationAction(ISD::SELECT, VT, Expand); @@ -116,10 +154,14 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm) setOperationAction(ISD::SDIVREM, VT, Custom); setOperationAction(ISD::UDIVREM, VT, Custom); - // Expand ATOMIC_LOAD and ATOMIC_STORE using ATOMIC_CMP_SWAP. - // FIXME: probably much too conservative. - setOperationAction(ISD::ATOMIC_LOAD, VT, Expand); - setOperationAction(ISD::ATOMIC_STORE, VT, Expand); + // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and + // stores, putting a serialization instruction after the stores. + setOperationAction(ISD::ATOMIC_LOAD, VT, Custom); + setOperationAction(ISD::ATOMIC_STORE, VT, Custom); + + // Lower ATOMIC_LOAD_SUB into ATOMIC_LOAD_ADD if LAA and LAAG are + // available, or if the operand is constant. + setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); // No special instructions for these. setOperationAction(ISD::CTPOP, VT, Expand); @@ -128,9 +170,11 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm) setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); setOperationAction(ISD::ROTR, VT, Expand); - // Use *MUL_LOHI where possible and a wider multiplication otherwise. + // Use *MUL_LOHI where possible instead of MULH*. setOperationAction(ISD::MULHS, VT, Expand); setOperationAction(ISD::MULHU, VT, Expand); + setOperationAction(ISD::SMUL_LOHI, VT, Custom); + setOperationAction(ISD::UMUL_LOHI, VT, Custom); // We have instructions for signed but not unsigned FP conversion. setOperationAction(ISD::FP_TO_UINT, VT, Expand); @@ -165,14 +209,6 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm) // Give LowerOperation the chance to replace 64-bit ORs with subregs. setOperationAction(ISD::OR, MVT::i64, Custom); - // The architecture has 32-bit SMUL_LOHI and UMUL_LOHI (MR and MLR), - // but they aren't really worth using. There is no 64-bit SMUL_LOHI, - // but there is a 64-bit UMUL_LOHI: MLGR. - setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); - setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); - setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); - setOperationAction(ISD::UMUL_LOHI, MVT::i64, Custom); - // FIXME: Can we support these natively? setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); @@ -200,10 +236,8 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm) setOperationAction(ISD::STACKSAVE, MVT::Other, Custom); setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom); - // Expand these using getExceptionSelectorRegister() and - // getExceptionPointerRegister(). - setOperationAction(ISD::EXCEPTIONADDR, PtrVT, Expand); - setOperationAction(ISD::EHSELECTION, PtrVT, Expand); + // Handle prefetches with PFD or PFDRL. + setOperationAction(ISD::PREFETCH, MVT::Other, Custom); // Handle floating-point types. for (unsigned I = MVT::FIRST_FP_VALUETYPE; @@ -214,6 +248,15 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm) // We can use FI for FRINT. setOperationAction(ISD::FRINT, VT, Legal); + // We can use the extended form of FI for other rounding operations. + if (Subtarget.hasFPExtension()) { + setOperationAction(ISD::FNEARBYINT, VT, Legal); + setOperationAction(ISD::FFLOOR, VT, Legal); + setOperationAction(ISD::FCEIL, VT, Legal); + setOperationAction(ISD::FTRUNC, VT, Legal); + setOperationAction(ISD::FROUND, VT, Legal); + } + // No special instructions for these. setOperationAction(ISD::FSIN, VT, Expand); setOperationAction(ISD::FCOS, VT, Expand); @@ -246,6 +289,43 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm) setOperationAction(ISD::VASTART, MVT::Other, Custom); setOperationAction(ISD::VACOPY, MVT::Other, Custom); setOperationAction(ISD::VAEND, MVT::Other, Expand); + + // We want to use MVC in preference to even a single load/store pair. + MaxStoresPerMemcpy = 0; + MaxStoresPerMemcpyOptSize = 0; + + // The main memset sequence is a byte store followed by an MVC. + // Two STC or MV..I stores win over that, but the kind of fused stores + // generated by target-independent code don't when the byte value is + // variable. E.g. "STC ;MHI ,257;STH " is not better + // than "STC;MVC". Handle the choice in target-specific code instead. + MaxStoresPerMemset = 0; + MaxStoresPerMemsetOptSize = 0; +} + +EVT SystemZTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { + if (!VT.isVector()) + return MVT::i32; + return VT.changeVectorElementTypeToInteger(); +} + +bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { + VT = VT.getScalarType(); + + if (!VT.isSimple()) + return false; + + switch (VT.getSimpleVT().SimpleTy) { + case MVT::f32: + case MVT::f64: + return true; + case MVT::f128: + return false; + default: + break; + } + + return false; } bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { @@ -263,6 +343,37 @@ bool SystemZTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, return true; } +bool SystemZTargetLowering::isLegalAddressingMode(const AddrMode &AM, + Type *Ty) const { + // Punt on globals for now, although they can be used in limited + // RELATIVE LONG cases. + if (AM.BaseGV) + return false; + + // Require a 20-bit signed offset. + if (!isInt<20>(AM.BaseOffs)) + return false; + + // Indexing is OK but no scale factor can be applied. + return AM.Scale == 0 || AM.Scale == 1; +} + +bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const { + if (!FromType->isIntegerTy() || !ToType->isIntegerTy()) + return false; + unsigned FromBits = FromType->getPrimitiveSizeInBits(); + unsigned ToBits = ToType->getPrimitiveSizeInBits(); + return FromBits > ToBits; +} + +bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const { + if (!FromVT.isInteger() || !ToVT.isInteger()) + return false; + unsigned FromBits = FromVT.getSizeInBits(); + unsigned ToBits = ToVT.getSizeInBits(); + return FromBits > ToBits; +} + //===----------------------------------------------------------------------===// // Inline asm support //===----------------------------------------------------------------------===// @@ -274,6 +385,7 @@ SystemZTargetLowering::getConstraintType(const std::string &Constraint) const { case 'a': // Address register case 'd': // Data register (equivalent to 'r') case 'f': // Floating-point register + case 'h': // High-part register case 'r': // General-purpose register return C_RegisterClass; @@ -316,6 +428,7 @@ getSingleConstraintMatchWeight(AsmOperandInfo &info, case 'a': // Address register case 'd': // Data register (equivalent to 'r') + case 'h': // High-part register case 'r': // General-purpose register if (CallOperandVal->getType()->isIntegerTy()) weight = CW_Register; @@ -359,6 +472,22 @@ getSingleConstraintMatchWeight(AsmOperandInfo &info, return weight; } +// Parse a "{tNNN}" register constraint for which the register type "t" +// has already been verified. MC is the class associated with "t" and +// Map maps 0-based register numbers to LLVM register numbers. +static std::pair +parseRegisterNumber(const std::string &Constraint, + const TargetRegisterClass *RC, const unsigned *Map) { + assert(*(Constraint.end()-1) == '}' && "Missing '}'"); + if (isdigit(Constraint[2])) { + std::string Suffix(Constraint.data() + 2, Constraint.size() - 2); + unsigned Index = atoi(Suffix.c_str()); + if (Index < 16 && Map[Index]) + return std::make_pair(Map[Index], RC); + } + return std::make_pair(0u, static_cast(0)); +} + std::pair SystemZTargetLowering:: getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const { if (Constraint.size() == 1) { @@ -380,6 +509,9 @@ getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const { return std::make_pair(0U, &SystemZ::ADDR128BitRegClass); return std::make_pair(0U, &SystemZ::ADDR32BitRegClass); + case 'h': // High-part register (an LLVM extension) + return std::make_pair(0U, &SystemZ::GRH32BitRegClass); + case 'f': // Floating-point register if (VT == MVT::f64) return std::make_pair(0U, &SystemZ::FP64BitRegClass); @@ -388,6 +520,32 @@ getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const { return std::make_pair(0U, &SystemZ::FP32BitRegClass); } } + if (Constraint[0] == '{') { + // We need to override the default register parsing for GPRs and FPRs + // because the interpretation depends on VT. The internal names of + // the registers are also different from the external names + // (F0D and F0S instead of F0, etc.). + if (Constraint[1] == 'r') { + if (VT == MVT::i32) + return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass, + SystemZMC::GR32Regs); + if (VT == MVT::i128) + return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass, + SystemZMC::GR128Regs); + return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass, + SystemZMC::GR64Regs); + } + if (Constraint[1] == 'f') { + if (VT == MVT::f32) + return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass, + SystemZMC::FP32Regs); + if (VT == MVT::f128) + return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass, + SystemZMC::FP128Regs); + return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass, + SystemZMC::FP64Regs); + } + } return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); } @@ -443,6 +601,17 @@ LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, #include "SystemZGenCallingConv.inc" +bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType, + Type *ToType) const { + return isTruncateFree(FromType, ToType); +} + +bool SystemZTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { + if (!CI->isTailCall()) + return false; + return true; +} + // Value is a value that has been passed to us in the location described by VA // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining // any loads onto Chain. @@ -605,36 +774,56 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, return Chain; } +static bool canUseSiblingCall(CCState ArgCCInfo, + SmallVectorImpl &ArgLocs) { + // Punt if there are any indirect or stack arguments, or if the call + // needs the call-saved argument register R6. + for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { + CCValAssign &VA = ArgLocs[I]; + if (VA.getLocInfo() == CCValAssign::Indirect) + return false; + if (!VA.isRegLoc()) + return false; + unsigned Reg = VA.getLocReg(); + if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D) + return false; + } + return true; +} + SDValue SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI, SmallVectorImpl &InVals) const { SelectionDAG &DAG = CLI.DAG; SDLoc &DL = CLI.DL; - SmallVector &Outs = CLI.Outs; - SmallVector &OutVals = CLI.OutVals; - SmallVector &Ins = CLI.Ins; + SmallVectorImpl &Outs = CLI.Outs; + SmallVectorImpl &OutVals = CLI.OutVals; + SmallVectorImpl &Ins = CLI.Ins; SDValue Chain = CLI.Chain; SDValue Callee = CLI.Callee; - bool &isTailCall = CLI.IsTailCall; + bool &IsTailCall = CLI.IsTailCall; CallingConv::ID CallConv = CLI.CallConv; bool IsVarArg = CLI.IsVarArg; MachineFunction &MF = DAG.getMachineFunction(); EVT PtrVT = getPointerTy(); - // SystemZ target does not yet support tail call optimization. - isTailCall = false; - // Analyze the operands of the call, assigning locations to each operand. SmallVector ArgLocs; CCState ArgCCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext()); ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ); + // We don't support GuaranteedTailCallOpt, only automatically-detected + // sibling calls. + if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs)) + IsTailCall = false; + // Get a count of how many bytes are to be pushed on the stack. unsigned NumBytes = ArgCCInfo.getNextStackOffset(); // Mark the start of the call. - Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes, PtrVT, true), - DL); + if (!IsTailCall) + Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes, PtrVT, true), + DL); // Copy argument values to their designated locations. SmallVector, 9> RegsToPass; @@ -683,22 +872,27 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI, Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOpChains[0], MemOpChains.size()); - // Build a sequence of copy-to-reg nodes, chained and glued together. - SDValue Glue; - for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) { - Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first, - RegsToPass[I].second, Glue); - Glue = Chain.getValue(1); - } - // Accept direct calls by converting symbolic call addresses to the - // associated Target* opcodes. + // associated Target* opcodes. Force %r1 to be used for indirect + // tail calls. + SDValue Glue; if (GlobalAddressSDNode *G = dyn_cast(Callee)) { Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT); Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); } else if (ExternalSymbolSDNode *E = dyn_cast(Callee)) { Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT); Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); + } else if (IsTailCall) { + Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue); + Glue = Chain.getValue(1); + Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType()); + } + + // Build a sequence of copy-to-reg nodes, chained and glued together. + for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) { + Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first, + RegsToPass[I].second, Glue); + Glue = Chain.getValue(1); } // The first call operand is the chain and the second is the target address. @@ -718,6 +912,8 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI, // Emit the call. SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); + if (IsTailCall) + return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, &Ops[0], Ops.size()); Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, &Ops[0], Ops.size()); Glue = Chain.getValue(1); @@ -798,6 +994,11 @@ SystemZTargetLowering::LowerReturn(SDValue Chain, RetOps.data(), RetOps.size()); } +SDValue SystemZTargetLowering:: +prepareVolatileOrAtomicLoad(SDValue Chain, SDLoc DL, SelectionDAG &DAG) const { + return DAG.getNode(SystemZISD::SERIALIZE, DL, MVT::Other, Chain); +} + // CC is a comparison that will be implemented using an integer or // floating-point comparison. Return the condition code mask for // a branch on true. In the integer case, CCMASK_CMP_UO is set for @@ -826,143 +1027,604 @@ static unsigned CCMaskForCondCode(ISD::CondCode CC) { #undef CONV } -// If a comparison described by IsUnsigned, CCMask, CmpOp0 and CmpOp1 -// is suitable for CLI(Y), CHHSI or CLHHSI, adjust the operands as necessary. -static void adjustSubwordCmp(SelectionDAG &DAG, bool &IsUnsigned, - SDValue &CmpOp0, SDValue &CmpOp1, - unsigned &CCMask) { +// Return a sequence for getting a 1 from an IPM result when CC has a +// value in CCMask and a 0 when CC has a value in CCValid & ~CCMask. +// The handling of CC values outside CCValid doesn't matter. +static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) { + // Deal with cases where the result can be taken directly from a bit + // of the IPM result. + if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_3))) + return IPMConversion(0, 0, SystemZ::IPM_CC); + if (CCMask == (CCValid & (SystemZ::CCMASK_2 | SystemZ::CCMASK_3))) + return IPMConversion(0, 0, SystemZ::IPM_CC + 1); + + // Deal with cases where we can add a value to force the sign bit + // to contain the right value. Putting the bit in 31 means we can + // use SRL rather than RISBG(L), and also makes it easier to get a + // 0/-1 value, so it has priority over the other tests below. + // + // These sequences rely on the fact that the upper two bits of the + // IPM result are zero. + uint64_t TopBit = uint64_t(1) << 31; + if (CCMask == (CCValid & SystemZ::CCMASK_0)) + return IPMConversion(0, -(1 << SystemZ::IPM_CC), 31); + if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_1))) + return IPMConversion(0, -(2 << SystemZ::IPM_CC), 31); + if (CCMask == (CCValid & (SystemZ::CCMASK_0 + | SystemZ::CCMASK_1 + | SystemZ::CCMASK_2))) + return IPMConversion(0, -(3 << SystemZ::IPM_CC), 31); + if (CCMask == (CCValid & SystemZ::CCMASK_3)) + return IPMConversion(0, TopBit - (3 << SystemZ::IPM_CC), 31); + if (CCMask == (CCValid & (SystemZ::CCMASK_1 + | SystemZ::CCMASK_2 + | SystemZ::CCMASK_3))) + return IPMConversion(0, TopBit - (1 << SystemZ::IPM_CC), 31); + + // Next try inverting the value and testing a bit. 0/1 could be + // handled this way too, but we dealt with that case above. + if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_2))) + return IPMConversion(-1, 0, SystemZ::IPM_CC); + + // Handle cases where adding a value forces a non-sign bit to contain + // the right value. + if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_2))) + return IPMConversion(0, 1 << SystemZ::IPM_CC, SystemZ::IPM_CC + 1); + if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3))) + return IPMConversion(0, -(1 << SystemZ::IPM_CC), SystemZ::IPM_CC + 1); + + // The remaing cases are 1, 2, 0/1/3 and 0/2/3. All these are + // can be done by inverting the low CC bit and applying one of the + // sign-based extractions above. + if (CCMask == (CCValid & SystemZ::CCMASK_1)) + return IPMConversion(1 << SystemZ::IPM_CC, -(1 << SystemZ::IPM_CC), 31); + if (CCMask == (CCValid & SystemZ::CCMASK_2)) + return IPMConversion(1 << SystemZ::IPM_CC, + TopBit - (3 << SystemZ::IPM_CC), 31); + if (CCMask == (CCValid & (SystemZ::CCMASK_0 + | SystemZ::CCMASK_1 + | SystemZ::CCMASK_3))) + return IPMConversion(1 << SystemZ::IPM_CC, -(3 << SystemZ::IPM_CC), 31); + if (CCMask == (CCValid & (SystemZ::CCMASK_0 + | SystemZ::CCMASK_2 + | SystemZ::CCMASK_3))) + return IPMConversion(1 << SystemZ::IPM_CC, + TopBit - (1 << SystemZ::IPM_CC), 31); + + llvm_unreachable("Unexpected CC combination"); +} + +// If C can be converted to a comparison against zero, adjust the operands +// as necessary. +static void adjustZeroCmp(SelectionDAG &DAG, Comparison &C) { + if (C.ICmpType == SystemZICMP::UnsignedOnly) + return; + + ConstantSDNode *ConstOp1 = dyn_cast(C.Op1.getNode()); + if (!ConstOp1) + return; + + int64_t Value = ConstOp1->getSExtValue(); + if ((Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_GT) || + (Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_LE) || + (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_LT) || + (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_GE)) { + C.CCMask ^= SystemZ::CCMASK_CMP_EQ; + C.Op1 = DAG.getConstant(0, C.Op1.getValueType()); + } +} + +// If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI, +// adjust the operands as necessary. +static void adjustSubwordCmp(SelectionDAG &DAG, Comparison &C) { // For us to make any changes, it must a comparison between a single-use // load and a constant. - if (!CmpOp0.hasOneUse() || - CmpOp0.getOpcode() != ISD::LOAD || - CmpOp1.getOpcode() != ISD::Constant) + if (!C.Op0.hasOneUse() || + C.Op0.getOpcode() != ISD::LOAD || + C.Op1.getOpcode() != ISD::Constant) return; // We must have an 8- or 16-bit load. - LoadSDNode *Load = cast(CmpOp0); + LoadSDNode *Load = cast(C.Op0); unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits(); if (NumBits != 8 && NumBits != 16) return; // The load must be an extending one and the constant must be within the // range of the unextended value. - ConstantSDNode *Constant = cast(CmpOp1); - uint64_t Value = Constant->getZExtValue(); + ConstantSDNode *ConstOp1 = cast(C.Op1); + uint64_t Value = ConstOp1->getZExtValue(); uint64_t Mask = (1 << NumBits) - 1; if (Load->getExtensionType() == ISD::SEXTLOAD) { - int64_t SignedValue = Constant->getSExtValue(); - if (uint64_t(SignedValue) + (1ULL << (NumBits - 1)) > Mask) + // Make sure that ConstOp1 is in range of C.Op0. + int64_t SignedValue = ConstOp1->getSExtValue(); + if (uint64_t(SignedValue) + (uint64_t(1) << (NumBits - 1)) > Mask) return; - // Unsigned comparison between two sign-extended values is equivalent - // to unsigned comparison between two zero-extended values. - if (IsUnsigned) + if (C.ICmpType != SystemZICMP::SignedOnly) { + // Unsigned comparison between two sign-extended values is equivalent + // to unsigned comparison between two zero-extended values. Value &= Mask; - else if (CCMask == SystemZ::CCMASK_CMP_EQ || - CCMask == SystemZ::CCMASK_CMP_NE) - // Any choice of IsUnsigned is OK for equality comparisons. - // We could use either CHHSI or CLHHSI for 16-bit comparisons, - // but since we use CLHHSI for zero extensions, it seems better - // to be consistent and do the same here. - Value &= Mask, IsUnsigned = true; - else if (NumBits == 8) { + } else if (NumBits == 8) { // Try to treat the comparison as unsigned, so that we can use CLI. // Adjust CCMask and Value as necessary. - if (Value == 0 && CCMask == SystemZ::CCMASK_CMP_LT) + if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_LT) // Test whether the high bit of the byte is set. - Value = 127, CCMask = SystemZ::CCMASK_CMP_GT, IsUnsigned = true; - else if (SignedValue == -1 && CCMask == SystemZ::CCMASK_CMP_GT) + Value = 127, C.CCMask = SystemZ::CCMASK_CMP_GT; + else if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_GE) // Test whether the high bit of the byte is clear. - Value = 128, CCMask = SystemZ::CCMASK_CMP_LT, IsUnsigned = true; + Value = 128, C.CCMask = SystemZ::CCMASK_CMP_LT; else // No instruction exists for this combination. return; + C.ICmpType = SystemZICMP::UnsignedOnly; } } else if (Load->getExtensionType() == ISD::ZEXTLOAD) { if (Value > Mask) return; - // Signed comparison between two zero-extended values is equivalent - // to unsigned comparison. - IsUnsigned = true; + assert(C.ICmpType == SystemZICMP::Any && + "Signedness shouldn't matter here."); } else return; // Make sure that the first operand is an i32 of the right extension type. - ISD::LoadExtType ExtType = IsUnsigned ? ISD::ZEXTLOAD : ISD::SEXTLOAD; - if (CmpOp0.getValueType() != MVT::i32 || + ISD::LoadExtType ExtType = (C.ICmpType == SystemZICMP::SignedOnly ? + ISD::SEXTLOAD : + ISD::ZEXTLOAD); + if (C.Op0.getValueType() != MVT::i32 || Load->getExtensionType() != ExtType) - CmpOp0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, - Load->getChain(), Load->getBasePtr(), - Load->getPointerInfo(), Load->getMemoryVT(), - Load->isVolatile(), Load->isNonTemporal(), - Load->getAlignment()); + C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, + Load->getChain(), Load->getBasePtr(), + Load->getPointerInfo(), Load->getMemoryVT(), + Load->isVolatile(), Load->isNonTemporal(), + Load->getAlignment()); // Make sure that the second operand is an i32 with the right value. - if (CmpOp1.getValueType() != MVT::i32 || - Value != Constant->getZExtValue()) - CmpOp1 = DAG.getConstant(Value, MVT::i32); -} - -// Return true if a comparison described by CCMask, CmpOp0 and CmpOp1 -// is an equality comparison that is better implemented using unsigned -// rather than signed comparison instructions. -static bool preferUnsignedComparison(SelectionDAG &DAG, SDValue CmpOp0, - SDValue CmpOp1, unsigned CCMask) { - // The test must be for equality or inequality. - if (CCMask != SystemZ::CCMASK_CMP_EQ && CCMask != SystemZ::CCMASK_CMP_NE) + if (C.Op1.getValueType() != MVT::i32 || + Value != ConstOp1->getZExtValue()) + C.Op1 = DAG.getConstant(Value, MVT::i32); +} + +// Return true if Op is either an unextended load, or a load suitable +// for integer register-memory comparisons of type ICmpType. +static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) { + LoadSDNode *Load = dyn_cast(Op.getNode()); + if (Load) { + // There are no instructions to compare a register with a memory byte. + if (Load->getMemoryVT() == MVT::i8) + return false; + // Otherwise decide on extension type. + switch (Load->getExtensionType()) { + case ISD::NON_EXTLOAD: + return true; + case ISD::SEXTLOAD: + return ICmpType != SystemZICMP::UnsignedOnly; + case ISD::ZEXTLOAD: + return ICmpType != SystemZICMP::SignedOnly; + default: + break; + } + } + return false; +} + +// Return true if it is better to swap the operands of C. +static bool shouldSwapCmpOperands(const Comparison &C) { + // Leave f128 comparisons alone, since they have no memory forms. + if (C.Op0.getValueType() == MVT::f128) return false; - if (CmpOp1.getOpcode() == ISD::Constant) { - uint64_t Value = cast(CmpOp1)->getSExtValue(); + // Always keep a floating-point constant second, since comparisons with + // zero can use LOAD TEST and comparisons with other constants make a + // natural memory operand. + if (isa(C.Op1)) + return false; - // If we're comparing with memory, prefer unsigned comparisons for - // values that are in the unsigned 16-bit range but not the signed - // 16-bit range. We want to use CLFHSI and CLGHSI. - if (CmpOp0.hasOneUse() && - ISD::isNormalLoad(CmpOp0.getNode()) && - (Value >= 32768 && Value < 65536)) - return true; + // Never swap comparisons with zero since there are many ways to optimize + // those later. + ConstantSDNode *ConstOp1 = dyn_cast(C.Op1); + if (ConstOp1 && ConstOp1->getZExtValue() == 0) + return false; + + // Also keep natural memory operands second if the loaded value is + // only used here. Several comparisons have memory forms. + if (isNaturalMemoryOperand(C.Op1, C.ICmpType) && C.Op1.hasOneUse()) + return false; - // Use unsigned comparisons for values that are in the CLGFI range - // but not in the CGFI range. - if (CmpOp0.getValueType() == MVT::i64 && (Value >> 31) == 1) + // Look for cases where Cmp0 is a single-use load and Cmp1 isn't. + // In that case we generally prefer the memory to be second. + if (isNaturalMemoryOperand(C.Op0, C.ICmpType) && C.Op0.hasOneUse()) { + // The only exceptions are when the second operand is a constant and + // we can use things like CHHSI. + if (!ConstOp1) return true; + // The unsigned memory-immediate instructions can handle 16-bit + // unsigned integers. + if (C.ICmpType != SystemZICMP::SignedOnly && + isUInt<16>(ConstOp1->getZExtValue())) + return false; + // The signed memory-immediate instructions can handle 16-bit + // signed integers. + if (C.ICmpType != SystemZICMP::UnsignedOnly && + isInt<16>(ConstOp1->getSExtValue())) + return false; + return true; + } + // Try to promote the use of CGFR and CLGFR. + unsigned Opcode0 = C.Op0.getOpcode(); + if (C.ICmpType != SystemZICMP::UnsignedOnly && Opcode0 == ISD::SIGN_EXTEND) + return true; + if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::ZERO_EXTEND) + return true; + if (C.ICmpType != SystemZICMP::SignedOnly && + Opcode0 == ISD::AND && + C.Op0.getOperand(1).getOpcode() == ISD::Constant && + cast(C.Op0.getOperand(1))->getZExtValue() == 0xffffffff) + return true; + + return false; +} + +// Return a version of comparison CC mask CCMask in which the LT and GT +// actions are swapped. +static unsigned reverseCCMask(unsigned CCMask) { + return ((CCMask & SystemZ::CCMASK_CMP_EQ) | + (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) | + (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) | + (CCMask & SystemZ::CCMASK_CMP_UO)); +} + +// Check whether C tests for equality between X and Y and whether X - Y +// or Y - X is also computed. In that case it's better to compare the +// result of the subtraction against zero. +static void adjustForSubtraction(SelectionDAG &DAG, Comparison &C) { + if (C.CCMask == SystemZ::CCMASK_CMP_EQ || + C.CCMask == SystemZ::CCMASK_CMP_NE) { + for (SDNode::use_iterator I = C.Op0->use_begin(), E = C.Op0->use_end(); + I != E; ++I) { + SDNode *N = *I; + if (N->getOpcode() == ISD::SUB && + ((N->getOperand(0) == C.Op0 && N->getOperand(1) == C.Op1) || + (N->getOperand(0) == C.Op1 && N->getOperand(1) == C.Op0))) { + C.Op0 = SDValue(N, 0); + C.Op1 = DAG.getConstant(0, N->getValueType(0)); + return; + } + } + } +} + +// Check whether C compares a floating-point value with zero and if that +// floating-point value is also negated. In this case we can use the +// negation to set CC, so avoiding separate LOAD AND TEST and +// LOAD (NEGATIVE/COMPLEMENT) instructions. +static void adjustForFNeg(Comparison &C) { + ConstantFPSDNode *C1 = dyn_cast(C.Op1); + if (C1 && C1->isZero()) { + for (SDNode::use_iterator I = C.Op0->use_begin(), E = C.Op0->use_end(); + I != E; ++I) { + SDNode *N = *I; + if (N->getOpcode() == ISD::FNEG) { + C.Op0 = SDValue(N, 0); + C.CCMask = reverseCCMask(C.CCMask); + return; + } + } + } +} + +// Check whether C compares (shl X, 32) with 0 and whether X is +// also sign-extended. In that case it is better to test the result +// of the sign extension using LTGFR. +// +// This case is important because InstCombine transforms a comparison +// with (sext (trunc X)) into a comparison with (shl X, 32). +static void adjustForLTGFR(Comparison &C) { + // Check for a comparison between (shl X, 32) and 0. + if (C.Op0.getOpcode() == ISD::SHL && + C.Op0.getValueType() == MVT::i64 && + C.Op1.getOpcode() == ISD::Constant && + cast(C.Op1)->getZExtValue() == 0) { + ConstantSDNode *C1 = dyn_cast(C.Op0.getOperand(1)); + if (C1 && C1->getZExtValue() == 32) { + SDValue ShlOp0 = C.Op0.getOperand(0); + // See whether X has any SIGN_EXTEND_INREG uses. + for (SDNode::use_iterator I = ShlOp0->use_begin(), E = ShlOp0->use_end(); + I != E; ++I) { + SDNode *N = *I; + if (N->getOpcode() == ISD::SIGN_EXTEND_INREG && + cast(N->getOperand(1))->getVT() == MVT::i32) { + C.Op0 = SDValue(N, 0); + return; + } + } + } + } +} + +// If C compares the truncation of an extending load, try to compare +// the untruncated value instead. This exposes more opportunities to +// reuse CC. +static void adjustICmpTruncate(SelectionDAG &DAG, Comparison &C) { + if (C.Op0.getOpcode() == ISD::TRUNCATE && + C.Op0.getOperand(0).getOpcode() == ISD::LOAD && + C.Op1.getOpcode() == ISD::Constant && + cast(C.Op1)->getZExtValue() == 0) { + LoadSDNode *L = cast(C.Op0.getOperand(0)); + if (L->getMemoryVT().getStoreSizeInBits() + <= C.Op0.getValueType().getSizeInBits()) { + unsigned Type = L->getExtensionType(); + if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) || + (Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) { + C.Op0 = C.Op0.getOperand(0); + C.Op1 = DAG.getConstant(0, C.Op0.getValueType()); + } + } + } +} + +// Return true if shift operation N has an in-range constant shift value. +// Store it in ShiftVal if so. +static bool isSimpleShift(SDValue N, unsigned &ShiftVal) { + ConstantSDNode *Shift = dyn_cast(N.getOperand(1)); + if (!Shift) return false; + + uint64_t Amount = Shift->getZExtValue(); + if (Amount >= N.getValueType().getSizeInBits()) + return false; + + ShiftVal = Amount; + return true; +} + +// Check whether an AND with Mask is suitable for a TEST UNDER MASK +// instruction and whether the CC value is descriptive enough to handle +// a comparison of type Opcode between the AND result and CmpVal. +// CCMask says which comparison result is being tested and BitSize is +// the number of bits in the operands. If TEST UNDER MASK can be used, +// return the corresponding CC mask, otherwise return 0. +static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, + uint64_t Mask, uint64_t CmpVal, + unsigned ICmpType) { + assert(Mask != 0 && "ANDs with zero should have been removed by now"); + + // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL. + if (!SystemZ::isImmLL(Mask) && !SystemZ::isImmLH(Mask) && + !SystemZ::isImmHL(Mask) && !SystemZ::isImmHH(Mask)) + return 0; + + // Work out the masks for the lowest and highest bits. + unsigned HighShift = 63 - countLeadingZeros(Mask); + uint64_t High = uint64_t(1) << HighShift; + uint64_t Low = uint64_t(1) << countTrailingZeros(Mask); + + // Signed ordered comparisons are effectively unsigned if the sign + // bit is dropped. + bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly); + + // Check for equality comparisons with 0, or the equivalent. + if (CmpVal == 0) { + if (CCMask == SystemZ::CCMASK_CMP_EQ) + return SystemZ::CCMASK_TM_ALL_0; + if (CCMask == SystemZ::CCMASK_CMP_NE) + return SystemZ::CCMASK_TM_SOME_1; + } + if (EffectivelyUnsigned && CmpVal <= Low) { + if (CCMask == SystemZ::CCMASK_CMP_LT) + return SystemZ::CCMASK_TM_ALL_0; + if (CCMask == SystemZ::CCMASK_CMP_GE) + return SystemZ::CCMASK_TM_SOME_1; + } + if (EffectivelyUnsigned && CmpVal < Low) { + if (CCMask == SystemZ::CCMASK_CMP_LE) + return SystemZ::CCMASK_TM_ALL_0; + if (CCMask == SystemZ::CCMASK_CMP_GT) + return SystemZ::CCMASK_TM_SOME_1; } - // Prefer CL for zero-extended loads. - if (CmpOp1.getOpcode() == ISD::ZERO_EXTEND || - ISD::isZEXTLoad(CmpOp1.getNode())) - return true; + // Check for equality comparisons with the mask, or the equivalent. + if (CmpVal == Mask) { + if (CCMask == SystemZ::CCMASK_CMP_EQ) + return SystemZ::CCMASK_TM_ALL_1; + if (CCMask == SystemZ::CCMASK_CMP_NE) + return SystemZ::CCMASK_TM_SOME_0; + } + if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) { + if (CCMask == SystemZ::CCMASK_CMP_GT) + return SystemZ::CCMASK_TM_ALL_1; + if (CCMask == SystemZ::CCMASK_CMP_LE) + return SystemZ::CCMASK_TM_SOME_0; + } + if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) { + if (CCMask == SystemZ::CCMASK_CMP_GE) + return SystemZ::CCMASK_TM_ALL_1; + if (CCMask == SystemZ::CCMASK_CMP_LT) + return SystemZ::CCMASK_TM_SOME_0; + } - // ...and for "in-register" zero extensions. - if (CmpOp1.getOpcode() == ISD::AND && CmpOp1.getValueType() == MVT::i64) { - SDValue Mask = CmpOp1.getOperand(1); - if (Mask.getOpcode() == ISD::Constant && - cast(Mask)->getZExtValue() == 0xffffffff) - return true; + // Check for ordered comparisons with the top bit. + if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) { + if (CCMask == SystemZ::CCMASK_CMP_LE) + return SystemZ::CCMASK_TM_MSB_0; + if (CCMask == SystemZ::CCMASK_CMP_GT) + return SystemZ::CCMASK_TM_MSB_1; + } + if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) { + if (CCMask == SystemZ::CCMASK_CMP_LT) + return SystemZ::CCMASK_TM_MSB_0; + if (CCMask == SystemZ::CCMASK_CMP_GE) + return SystemZ::CCMASK_TM_MSB_1; } - return false; + // If there are just two bits, we can do equality checks for Low and High + // as well. + if (Mask == Low + High) { + if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == Low) + return SystemZ::CCMASK_TM_MIXED_MSB_0; + if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == Low) + return SystemZ::CCMASK_TM_MIXED_MSB_0 ^ SystemZ::CCMASK_ANY; + if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High) + return SystemZ::CCMASK_TM_MIXED_MSB_1; + if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High) + return SystemZ::CCMASK_TM_MIXED_MSB_1 ^ SystemZ::CCMASK_ANY; + } + + // Looks like we've exhausted our options. + return 0; +} + +// See whether C can be implemented as a TEST UNDER MASK instruction. +// Update the arguments with the TM version if so. +static void adjustForTestUnderMask(SelectionDAG &DAG, Comparison &C) { + // Check that we have a comparison with a constant. + ConstantSDNode *ConstOp1 = dyn_cast(C.Op1); + if (!ConstOp1) + return; + uint64_t CmpVal = ConstOp1->getZExtValue(); + + // Check whether the nonconstant input is an AND with a constant mask. + Comparison NewC(C); + uint64_t MaskVal; + ConstantSDNode *Mask = 0; + if (C.Op0.getOpcode() == ISD::AND) { + NewC.Op0 = C.Op0.getOperand(0); + NewC.Op1 = C.Op0.getOperand(1); + Mask = dyn_cast(NewC.Op1); + if (!Mask) + return; + MaskVal = Mask->getZExtValue(); + } else { + // There is no instruction to compare with a 64-bit immediate + // so use TMHH instead if possible. We need an unsigned ordered + // comparison with an i64 immediate. + if (NewC.Op0.getValueType() != MVT::i64 || + NewC.CCMask == SystemZ::CCMASK_CMP_EQ || + NewC.CCMask == SystemZ::CCMASK_CMP_NE || + NewC.ICmpType == SystemZICMP::SignedOnly) + return; + // Convert LE and GT comparisons into LT and GE. + if (NewC.CCMask == SystemZ::CCMASK_CMP_LE || + NewC.CCMask == SystemZ::CCMASK_CMP_GT) { + if (CmpVal == uint64_t(-1)) + return; + CmpVal += 1; + NewC.CCMask ^= SystemZ::CCMASK_CMP_EQ; + } + // If the low N bits of Op1 are zero than the low N bits of Op0 can + // be masked off without changing the result. + MaskVal = -(CmpVal & -CmpVal); + NewC.ICmpType = SystemZICMP::UnsignedOnly; + } + + // Check whether the combination of mask, comparison value and comparison + // type are suitable. + unsigned BitSize = NewC.Op0.getValueType().getSizeInBits(); + unsigned NewCCMask, ShiftVal; + if (NewC.ICmpType != SystemZICMP::SignedOnly && + NewC.Op0.getOpcode() == ISD::SHL && + isSimpleShift(NewC.Op0, ShiftVal) && + (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, + MaskVal >> ShiftVal, + CmpVal >> ShiftVal, + SystemZICMP::Any))) { + NewC.Op0 = NewC.Op0.getOperand(0); + MaskVal >>= ShiftVal; + } else if (NewC.ICmpType != SystemZICMP::SignedOnly && + NewC.Op0.getOpcode() == ISD::SRL && + isSimpleShift(NewC.Op0, ShiftVal) && + (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, + MaskVal << ShiftVal, + CmpVal << ShiftVal, + SystemZICMP::UnsignedOnly))) { + NewC.Op0 = NewC.Op0.getOperand(0); + MaskVal <<= ShiftVal; + } else { + NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, MaskVal, CmpVal, + NewC.ICmpType); + if (!NewCCMask) + return; + } + + // Go ahead and make the change. + C.Opcode = SystemZISD::TM; + C.Op0 = NewC.Op0; + if (Mask && Mask->getZExtValue() == MaskVal) + C.Op1 = SDValue(Mask, 0); + else + C.Op1 = DAG.getConstant(MaskVal, C.Op0.getValueType()); + C.CCValid = SystemZ::CCMASK_TM; + C.CCMask = NewCCMask; } -// Return a target node that compares CmpOp0 and CmpOp1. Set CCMask to the -// 4-bit condition-code mask for CC. -static SDValue emitCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, - ISD::CondCode CC, unsigned &CCMask) { - bool IsUnsigned = false; - CCMask = CCMaskForCondCode(CC); - if (!CmpOp0.getValueType().isFloatingPoint()) { - IsUnsigned = CCMask & SystemZ::CCMASK_CMP_UO; - CCMask &= ~SystemZ::CCMASK_CMP_UO; - adjustSubwordCmp(DAG, IsUnsigned, CmpOp0, CmpOp1, CCMask); - if (preferUnsignedComparison(DAG, CmpOp0, CmpOp1, CCMask)) - IsUnsigned = true; +// Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1. +static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, + ISD::CondCode Cond) { + Comparison C(CmpOp0, CmpOp1); + C.CCMask = CCMaskForCondCode(Cond); + if (C.Op0.getValueType().isFloatingPoint()) { + C.CCValid = SystemZ::CCMASK_FCMP; + C.Opcode = SystemZISD::FCMP; + adjustForFNeg(C); + } else { + C.CCValid = SystemZ::CCMASK_ICMP; + C.Opcode = SystemZISD::ICMP; + // Choose the type of comparison. Equality and inequality tests can + // use either signed or unsigned comparisons. The choice also doesn't + // matter if both sign bits are known to be clear. In those cases we + // want to give the main isel code the freedom to choose whichever + // form fits best. + if (C.CCMask == SystemZ::CCMASK_CMP_EQ || + C.CCMask == SystemZ::CCMASK_CMP_NE || + (DAG.SignBitIsZero(C.Op0) && DAG.SignBitIsZero(C.Op1))) + C.ICmpType = SystemZICMP::Any; + else if (C.CCMask & SystemZ::CCMASK_CMP_UO) + C.ICmpType = SystemZICMP::UnsignedOnly; + else + C.ICmpType = SystemZICMP::SignedOnly; + C.CCMask &= ~SystemZ::CCMASK_CMP_UO; + adjustZeroCmp(DAG, C); + adjustSubwordCmp(DAG, C); + adjustForSubtraction(DAG, C); + adjustForLTGFR(C); + adjustICmpTruncate(DAG, C); + } + + if (shouldSwapCmpOperands(C)) { + std::swap(C.Op0, C.Op1); + C.CCMask = reverseCCMask(C.CCMask); } - SDLoc DL(CmpOp0); - return DAG.getNode((IsUnsigned ? SystemZISD::UCMP : SystemZISD::CMP), - DL, MVT::Glue, CmpOp0, CmpOp1); + adjustForTestUnderMask(DAG, C); + return C; +} + +// Emit the comparison instruction described by C. +static SDValue emitCmp(SelectionDAG &DAG, SDLoc DL, Comparison &C) { + if (C.Opcode == SystemZISD::ICMP) + return DAG.getNode(SystemZISD::ICMP, DL, MVT::Glue, C.Op0, C.Op1, + DAG.getConstant(C.ICmpType, MVT::i32)); + if (C.Opcode == SystemZISD::TM) { + bool RegisterOnly = (bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) != + bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_1)); + return DAG.getNode(SystemZISD::TM, DL, MVT::Glue, C.Op0, C.Op1, + DAG.getConstant(RegisterOnly, MVT::i32)); + } + return DAG.getNode(C.Opcode, DL, MVT::Glue, C.Op0, C.Op1); +} + +// Implement a 32-bit *MUL_LOHI operation by extending both operands to +// 64 bits. Extend is the extension type to use. Store the high part +// in Hi and the low part in Lo. +static void lowerMUL_LOHI32(SelectionDAG &DAG, SDLoc DL, + unsigned Extend, SDValue Op0, SDValue Op1, + SDValue &Hi, SDValue &Lo) { + Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0); + Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1); + SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1); + Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul, DAG.getConstant(32, MVT::i64)); + Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi); + Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul); } // Lower a binary operation that produces two VT results, one in each @@ -978,14 +1640,45 @@ static void lowerGR128Binary(SelectionDAG &DAG, SDLoc DL, EVT VT, SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, SDValue(In128, 0), Op1); bool Is32Bit = is32Bit(VT); - SDValue SubReg0 = DAG.getTargetConstant(SystemZ::even128(Is32Bit), VT); - SDValue SubReg1 = DAG.getTargetConstant(SystemZ::odd128(Is32Bit), VT); - SDNode *Reg0 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, - VT, Result, SubReg0); - SDNode *Reg1 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, - VT, Result, SubReg1); - Even = SDValue(Reg0, 0); - Odd = SDValue(Reg1, 0); + Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result); + Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result); +} + +// Return an i32 value that is 1 if the CC value produced by Glue is +// in the mask CCMask and 0 otherwise. CC is known to have a value +// in CCValid, so other values can be ignored. +static SDValue emitSETCC(SelectionDAG &DAG, SDLoc DL, SDValue Glue, + unsigned CCValid, unsigned CCMask) { + IPMConversion Conversion = getIPMConversion(CCValid, CCMask); + SDValue Result = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue); + + if (Conversion.XORValue) + Result = DAG.getNode(ISD::XOR, DL, MVT::i32, Result, + DAG.getConstant(Conversion.XORValue, MVT::i32)); + + if (Conversion.AddValue) + Result = DAG.getNode(ISD::ADD, DL, MVT::i32, Result, + DAG.getConstant(Conversion.AddValue, MVT::i32)); + + // The SHR/AND sequence should get optimized to an RISBG. + Result = DAG.getNode(ISD::SRL, DL, MVT::i32, Result, + DAG.getConstant(Conversion.Bit, MVT::i32)); + if (Conversion.Bit != 31) + Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result, + DAG.getConstant(1, MVT::i32)); + return Result; +} + +SDValue SystemZTargetLowering::lowerSETCC(SDValue Op, + SelectionDAG &DAG) const { + SDValue CmpOp0 = Op.getOperand(0); + SDValue CmpOp1 = Op.getOperand(1); + ISD::CondCode CC = cast(Op.getOperand(2))->get(); + SDLoc DL(Op); + + Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC)); + SDValue Glue = emitCmp(DAG, DL, C); + return emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask); } SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const { @@ -996,10 +1689,33 @@ SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const { SDValue Dest = Op.getOperand(4); SDLoc DL(Op); - unsigned CCMask; - SDValue Flags = emitCmp(DAG, CmpOp0, CmpOp1, CC, CCMask); + Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC)); + SDValue Glue = emitCmp(DAG, DL, C); return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(), - Chain, DAG.getConstant(CCMask, MVT::i32), Dest, Flags); + Chain, DAG.getConstant(C.CCValid, MVT::i32), + DAG.getConstant(C.CCMask, MVT::i32), Dest, Glue); +} + +// Return true if Pos is CmpOp and Neg is the negative of CmpOp, +// allowing Pos and Neg to be wider than CmpOp. +static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) { + return (Neg.getOpcode() == ISD::SUB && + Neg.getOperand(0).getOpcode() == ISD::Constant && + cast(Neg.getOperand(0))->getZExtValue() == 0 && + Neg.getOperand(1) == Pos && + (Pos == CmpOp || + (Pos.getOpcode() == ISD::SIGN_EXTEND && + Pos.getOperand(0) == CmpOp))); +} + +// Return the absolute or negative absolute of Op; IsNegative decides which. +static SDValue getAbsolute(SelectionDAG &DAG, SDLoc DL, SDValue Op, + bool IsNegative) { + Op = DAG.getNode(SystemZISD::IABS, DL, Op.getValueType(), Op); + if (IsNegative) + Op = DAG.getNode(ISD::SUB, DL, Op.getValueType(), + DAG.getConstant(0, Op.getValueType()), Op); + return Op; } SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op, @@ -1011,14 +1727,53 @@ SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op, ISD::CondCode CC = cast(Op.getOperand(4))->get(); SDLoc DL(Op); - unsigned CCMask; - SDValue Flags = emitCmp(DAG, CmpOp0, CmpOp1, CC, CCMask); + Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC)); + + // Check for absolute and negative-absolute selections, including those + // where the comparison value is sign-extended (for LPGFR and LNGFR). + // This check supplements the one in DAGCombiner. + if (C.Opcode == SystemZISD::ICMP && + C.CCMask != SystemZ::CCMASK_CMP_EQ && + C.CCMask != SystemZ::CCMASK_CMP_NE && + C.Op1.getOpcode() == ISD::Constant && + cast(C.Op1)->getZExtValue() == 0) { + if (isAbsolute(C.Op0, TrueOp, FalseOp)) + return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT); + if (isAbsolute(C.Op0, FalseOp, TrueOp)) + return getAbsolute(DAG, DL, FalseOp, C.CCMask & SystemZ::CCMASK_CMP_GT); + } + + SDValue Glue = emitCmp(DAG, DL, C); + + // Special case for handling -1/0 results. The shifts we use here + // should get optimized with the IPM conversion sequence. + ConstantSDNode *TrueC = dyn_cast(TrueOp); + ConstantSDNode *FalseC = dyn_cast(FalseOp); + if (TrueC && FalseC) { + int64_t TrueVal = TrueC->getSExtValue(); + int64_t FalseVal = FalseC->getSExtValue(); + if ((TrueVal == -1 && FalseVal == 0) || (TrueVal == 0 && FalseVal == -1)) { + // Invert the condition if we want -1 on false. + if (TrueVal == 0) + C.CCMask ^= C.CCValid; + SDValue Result = emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask); + EVT VT = Op.getValueType(); + // Extend the result to VT. Upper bits are ignored. + if (!is32Bit(VT)) + Result = DAG.getNode(ISD::ANY_EXTEND, DL, VT, Result); + // Sign-extend from the low bit. + SDValue ShAmt = DAG.getConstant(VT.getSizeInBits() - 1, MVT::i32); + SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Result, ShAmt); + return DAG.getNode(ISD::SRA, DL, VT, Shl, ShAmt); + } + } - SmallVector Ops; + SmallVector Ops; Ops.push_back(TrueOp); Ops.push_back(FalseOp); - Ops.push_back(DAG.getConstant(CCMask, MVT::i32)); - Ops.push_back(Flags); + Ops.push_back(DAG.getConstant(C.CCValid, MVT::i32)); + Ops.push_back(DAG.getConstant(C.CCMask, MVT::i32)); + Ops.push_back(Glue); SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, &Ops[0], Ops.size()); @@ -1035,18 +1790,18 @@ SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node, SDValue Result; if (Subtarget.isPC32DBLSymbol(GV, RM, CM)) { - // Make sure that the offset is aligned to a halfword. If it isn't, - // create an "anchor" at the previous 12-bit boundary. - // FIXME check whether there is a better way of handling this. - if (Offset & 1) { - Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, - Offset & ~uint64_t(0xfff)); - Offset &= 0xfff; - } else { - Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Offset); + // Assign anchors at 1<<12 byte boundaries. + uint64_t Anchor = Offset & ~uint64_t(0xfff); + Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor); + Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); + + // The offset can be folded into the address if it is aligned to a halfword. + Offset -= Anchor; + if (Offset != 0 && (Offset & 1) == 0) { + SDValue Full = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor + Offset); + Result = DAG.getNode(SystemZISD::PCREL_OFFSET, DL, PtrVT, Full, Result); Offset = 0; } - Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); } else { Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT); Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); @@ -1148,24 +1903,33 @@ SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op, EVT InVT = In.getValueType(); EVT ResVT = Op.getValueType(); - SDValue SubReg32 = DAG.getTargetConstant(SystemZ::subreg_32bit, MVT::i64); - SDValue Shift32 = DAG.getConstant(32, MVT::i64); if (InVT == MVT::i32 && ResVT == MVT::f32) { - SDValue In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In); - SDValue Shift = DAG.getNode(ISD::SHL, DL, MVT::i64, In64, Shift32); - SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, Shift); - SDNode *Out = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, - MVT::f32, Out64, SubReg32); - return SDValue(Out, 0); + SDValue In64; + if (Subtarget.hasHighWord()) { + SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, + MVT::i64); + In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL, + MVT::i64, SDValue(U64, 0), In); + } else { + In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In); + In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64, + DAG.getConstant(32, MVT::i64)); + } + SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64); + return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, + DL, MVT::f32, Out64); } if (InVT == MVT::f32 && ResVT == MVT::i32) { SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64); - SDNode *In64 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, - MVT::f64, SDValue(U64, 0), In, SubReg32); - SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, SDValue(In64, 0)); - SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64, Shift32); - SDValue Out = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift); - return Out; + SDValue In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL, + MVT::f64, SDValue(U64, 0), In); + SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, In64); + if (Subtarget.hasHighWord()) + return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL, + MVT::i32, Out64); + SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64, + DAG.getConstant(32, MVT::i64)); + return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift); } llvm_unreachable("Unexpected bitcast combination"); } @@ -1248,18 +2012,64 @@ lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { return DAG.getMergeValues(Ops, 2, DL); } -SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op, +SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); SDLoc DL(Op); - assert(!is32Bit(VT) && "Only support 64-bit UMUL_LOHI"); + SDValue Ops[2]; + if (is32Bit(VT)) + // Just do a normal 64-bit multiplication and extract the results. + // We define this so that it can be used for constant division. + lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0), + Op.getOperand(1), Ops[1], Ops[0]); + else { + // Do a full 128-bit multiplication based on UMUL_LOHI64: + // + // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64) + // + // but using the fact that the upper halves are either all zeros + // or all ones: + // + // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64) + // + // and grouping the right terms together since they are quicker than the + // multiplication: + // + // (ll * rl) - (((lh & rl) + (ll & rh)) << 64) + SDValue C63 = DAG.getConstant(63, MVT::i64); + SDValue LL = Op.getOperand(0); + SDValue RL = Op.getOperand(1); + SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63); + SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63); + // UMUL_LOHI64 returns the low result in the odd register and the high + // result in the even register. SMUL_LOHI is defined to return the + // low half first, so the results are in reverse order. + lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64, + LL, RL, Ops[1], Ops[0]); + SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH); + SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL); + SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL); + Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum); + } + return DAG.getMergeValues(Ops, 2, DL); +} - // UMUL_LOHI64 returns the low result in the odd register and the high - // result in the even register. UMUL_LOHI is defined to return the - // low half first, so the results are in reverse order. +SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op, + SelectionDAG &DAG) const { + EVT VT = Op.getValueType(); + SDLoc DL(Op); SDValue Ops[2]; - lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64, - Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); + if (is32Bit(VT)) + // Just do a normal 64-bit multiplication and extract the results. + // We define this so that it can be used for constant division. + lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0), + Op.getOperand(1), Ops[1], Ops[0]); + else + // UMUL_LOHI64 returns the low result in the odd register and the high + // result in the even register. UMUL_LOHI is defined to return the + // low half first, so the results are in reverse order. + lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64, + Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); return DAG.getMergeValues(Ops, 2, DL); } @@ -1348,10 +2158,10 @@ SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const { // high 32 bits and just masks out low bits. We can skip it if so. if (HighOp.getOpcode() == ISD::AND && HighOp.getOperand(1).getOpcode() == ISD::Constant) { - ConstantSDNode *MaskNode = cast(HighOp.getOperand(1)); - uint64_t Mask = MaskNode->getZExtValue() | Masks[High]; - if ((Mask >> 32) == 0xffffffff) - HighOp = HighOp.getOperand(0); + SDValue HighOp0 = HighOp.getOperand(0); + uint64_t Mask = cast(HighOp.getOperand(1))->getZExtValue(); + if (DAG.MaskedValueIsZero(HighOp0, APInt(64, ~(Mask | 0xffffffff)))) + HighOp = HighOp0; } // Take advantage of the fact that all GR32 operations only change the @@ -1360,17 +2170,36 @@ SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const { // can be folded. SDLoc DL(Op); SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp); - SDValue SubReg32 = DAG.getTargetConstant(SystemZ::subreg_32bit, MVT::i64); - SDNode *Result = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, - MVT::i64, HighOp, Low32, SubReg32); - return SDValue(Result, 0); + return DAG.getTargetInsertSubreg(SystemZ::subreg_l32, DL, + MVT::i64, HighOp, Low32); +} + +// Op is an atomic load. Lower it into a normal volatile load. +SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op, + SelectionDAG &DAG) const { + AtomicSDNode *Node = cast(Op.getNode()); + return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), Op.getValueType(), + Node->getChain(), Node->getBasePtr(), + Node->getMemoryVT(), Node->getMemOperand()); +} + +// Op is an atomic store. Lower it into a normal volatile store followed +// by a serialization. +SDValue SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op, + SelectionDAG &DAG) const { + AtomicSDNode *Node = cast(Op.getNode()); + SDValue Chain = DAG.getTruncStore(Node->getChain(), SDLoc(Op), Node->getVal(), + Node->getBasePtr(), Node->getMemoryVT(), + Node->getMemOperand()); + return SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op), MVT::Other, + Chain), 0); } // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first // two into the fullword ATOMIC_LOADW_* operation given by Opcode. -SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op, - SelectionDAG &DAG, - unsigned Opcode) const { +SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op, + SelectionDAG &DAG, + unsigned Opcode) const { AtomicSDNode *Node = cast(Op.getNode()); // 32-bit operations need no code outside the main loop. @@ -1440,6 +2269,44 @@ SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op, return DAG.getMergeValues(RetOps, 2, DL); } +// Op is an ATOMIC_LOAD_SUB operation. Lower 8- and 16-bit operations +// into ATOMIC_LOADW_SUBs and decide whether to convert 32- and 64-bit +// operations into additions. +SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op, + SelectionDAG &DAG) const { + AtomicSDNode *Node = cast(Op.getNode()); + EVT MemVT = Node->getMemoryVT(); + if (MemVT == MVT::i32 || MemVT == MVT::i64) { + // A full-width operation. + assert(Op.getValueType() == MemVT && "Mismatched VTs"); + SDValue Src2 = Node->getVal(); + SDValue NegSrc2; + SDLoc DL(Src2); + + if (ConstantSDNode *Op2 = dyn_cast(Src2)) { + // Use an addition if the operand is constant and either LAA(G) is + // available or the negative value is in the range of A(G)FHI. + int64_t Value = (-Op2->getAPIntValue()).getSExtValue(); + if (isInt<32>(Value) || TM.getSubtargetImpl()->hasInterlockedAccess1()) + NegSrc2 = DAG.getConstant(Value, MemVT); + } else if (TM.getSubtargetImpl()->hasInterlockedAccess1()) + // Use LAA(G) if available. + NegSrc2 = DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, MemVT), + Src2); + + if (NegSrc2.getNode()) + return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, MemVT, + Node->getChain(), Node->getBasePtr(), NegSrc2, + Node->getMemOperand(), Node->getOrdering(), + Node->getSynchScope()); + + // Use the node as-is. + return Op; + } + + return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB); +} + // Node is an 8- or 16-bit ATOMIC_CMP_SWAP operation. Lower the first two // into a fullword ATOMIC_CMP_SWAPW operation. SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op, @@ -1502,6 +2369,26 @@ SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op, SystemZ::R15D, Op.getOperand(1)); } +SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op, + SelectionDAG &DAG) const { + bool IsData = cast(Op.getOperand(4))->getZExtValue(); + if (!IsData) + // Just preserve the chain. + return Op.getOperand(0); + + bool IsWrite = cast(Op.getOperand(2))->getZExtValue(); + unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ; + MemIntrinsicSDNode *Node = cast(Op.getNode()); + SDValue Ops[] = { + Op.getOperand(0), + DAG.getConstant(Code, MVT::i32), + Op.getOperand(1) + }; + return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, SDLoc(Op), + Node->getVTList(), Ops, array_lengthof(Ops), + Node->getMemoryVT(), Node->getMemOperand()); +} + SDValue SystemZTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { switch (Op.getOpcode()) { @@ -1509,6 +2396,8 @@ SDValue SystemZTargetLowering::LowerOperation(SDValue Op, return lowerBR_CC(Op, DAG); case ISD::SELECT_CC: return lowerSELECT_CC(Op, DAG); + case ISD::SETCC: + return lowerSETCC(Op, DAG); case ISD::GlobalAddress: return lowerGlobalAddress(cast(Op), DAG); case ISD::GlobalTLSAddress: @@ -1527,6 +2416,8 @@ SDValue SystemZTargetLowering::LowerOperation(SDValue Op, return lowerVACOPY(Op, DAG); case ISD::DYNAMIC_STACKALLOC: return lowerDYNAMIC_STACKALLOC(Op, DAG); + case ISD::SMUL_LOHI: + return lowerSMUL_LOHI(Op, DAG); case ISD::UMUL_LOHI: return lowerUMUL_LOHI(Op, DAG); case ISD::SDIVREM: @@ -1536,33 +2427,39 @@ SDValue SystemZTargetLowering::LowerOperation(SDValue Op, case ISD::OR: return lowerOR(Op, DAG); case ISD::ATOMIC_SWAP: - return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_SWAPW); + return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_SWAPW); + case ISD::ATOMIC_STORE: + return lowerATOMIC_STORE(Op, DAG); + case ISD::ATOMIC_LOAD: + return lowerATOMIC_LOAD(Op, DAG); case ISD::ATOMIC_LOAD_ADD: - return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD); + return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD); case ISD::ATOMIC_LOAD_SUB: - return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB); + return lowerATOMIC_LOAD_SUB(Op, DAG); case ISD::ATOMIC_LOAD_AND: - return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_AND); + return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_AND); case ISD::ATOMIC_LOAD_OR: - return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_OR); + return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_OR); case ISD::ATOMIC_LOAD_XOR: - return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR); + return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR); case ISD::ATOMIC_LOAD_NAND: - return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND); + return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND); case ISD::ATOMIC_LOAD_MIN: - return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN); + return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN); case ISD::ATOMIC_LOAD_MAX: - return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX); + return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX); case ISD::ATOMIC_LOAD_UMIN: - return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN); + return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN); case ISD::ATOMIC_LOAD_UMAX: - return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX); + return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX); case ISD::ATOMIC_CMP_SWAP: return lowerATOMIC_CMP_SWAP(Op, DAG); case ISD::STACKSAVE: return lowerSTACKSAVE(Op, DAG); case ISD::STACKRESTORE: return lowerSTACKRESTORE(Op, DAG); + case ISD::PREFETCH: + return lowerPREFETCH(Op, DAG); default: llvm_unreachable("Unexpected node to lower"); } @@ -1573,9 +2470,13 @@ const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const { switch (Opcode) { OPCODE(RET_FLAG); OPCODE(CALL); + OPCODE(SIBCALL); OPCODE(PCREL_WRAPPER); - OPCODE(CMP); - OPCODE(UCMP); + OPCODE(PCREL_OFFSET); + OPCODE(IABS); + OPCODE(ICMP); + OPCODE(FCMP); + OPCODE(TM); OPCODE(BR_CCMASK); OPCODE(SELECT_CCMASK); OPCODE(ADJDYNALLOC); @@ -1584,6 +2485,21 @@ const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const { OPCODE(SDIVREM64); OPCODE(UDIVREM32); OPCODE(UDIVREM64); + OPCODE(MVC); + OPCODE(MVC_LOOP); + OPCODE(NC); + OPCODE(NC_LOOP); + OPCODE(OC); + OPCODE(OC_LOOP); + OPCODE(XC); + OPCODE(XC_LOOP); + OPCODE(CLC); + OPCODE(CLC_LOOP); + OPCODE(STRCMP); + OPCODE(STPCPY); + OPCODE(SEARCH_STRING); + OPCODE(IPM); + OPCODE(SERIALIZE); OPCODE(ATOMIC_SWAPW); OPCODE(ATOMIC_LOADW_ADD); OPCODE(ATOMIC_LOADW_SUB); @@ -1596,6 +2512,7 @@ const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const { OPCODE(ATOMIC_LOADW_UMIN); OPCODE(ATOMIC_LOADW_UMAX); OPCODE(ATOMIC_CMP_SWAPW); + OPCODE(PREFETCH); } return NULL; #undef OPCODE @@ -1625,32 +2542,29 @@ static MachineBasicBlock *splitBlockAfter(MachineInstr *MI, return NewMBB; } -bool SystemZTargetLowering:: -convertPrevCompareToBranch(MachineBasicBlock *MBB, - MachineBasicBlock::iterator MBBI, - unsigned CCMask, MachineBasicBlock *Target) const { - MachineBasicBlock::iterator Compare = MBBI; - MachineBasicBlock::iterator Begin = MBB->begin(); - do - { - if (Compare == Begin) - return false; - --Compare; - } - while (Compare->isDebugValue()); +// Split MBB before MI and return the new block (the one that contains MI). +static MachineBasicBlock *splitBlockBefore(MachineInstr *MI, + MachineBasicBlock *MBB) { + MachineBasicBlock *NewMBB = emitBlockAfter(MBB); + NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end()); + NewMBB->transferSuccessorsAndUpdatePHIs(MBB); + return NewMBB; +} - const SystemZInstrInfo *TII = TM.getInstrInfo(); - unsigned FusedOpcode = TII->getCompareAndBranch(Compare->getOpcode(), - Compare); - if (!FusedOpcode) - return false; +// Force base value Base into a register before MI. Return the register. +static unsigned forceReg(MachineInstr *MI, MachineOperand &Base, + const SystemZInstrInfo *TII) { + if (Base.isReg()) + return Base.getReg(); - DebugLoc DL = Compare->getDebugLoc(); - BuildMI(*MBB, MBBI, DL, TII->get(FusedOpcode)) - .addOperand(Compare->getOperand(0)).addOperand(Compare->getOperand(1)) - .addImm(CCMask).addMBB(Target); - Compare->removeFromParent(); - return true; + MachineBasicBlock *MBB = MI->getParent(); + MachineFunction &MF = *MBB->getParent(); + MachineRegisterInfo &MRI = MF.getRegInfo(); + + unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); + BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LA), Reg) + .addOperand(Base).addImm(0).addReg(0); + return Reg; } // Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI. @@ -1662,25 +2576,20 @@ SystemZTargetLowering::emitSelect(MachineInstr *MI, unsigned DestReg = MI->getOperand(0).getReg(); unsigned TrueReg = MI->getOperand(1).getReg(); unsigned FalseReg = MI->getOperand(2).getReg(); - unsigned CCMask = MI->getOperand(3).getImm(); + unsigned CCValid = MI->getOperand(3).getImm(); + unsigned CCMask = MI->getOperand(4).getImm(); DebugLoc DL = MI->getDebugLoc(); MachineBasicBlock *StartMBB = MBB; - MachineBasicBlock *JoinMBB = splitBlockAfter(MI, MBB); + MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB); MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); // StartMBB: // BRC CCMask, JoinMBB // # fallthrough to FalseMBB - // - // The original DAG glues comparisons to their uses, both to ensure - // that no CC-clobbering instructions are inserted between them, and - // to ensure that comparison results are not reused. This means that - // this Select is the sole user of any preceding comparison instruction - // and that we can try to use a fused compare and branch instead. MBB = StartMBB; - if (!convertPrevCompareToBranch(MBB, MI, CCMask, JoinMBB)) - BuildMI(MBB, DL, TII->get(SystemZ::BRC)).addImm(CCMask).addMBB(JoinMBB); + BuildMI(MBB, DL, TII->get(SystemZ::BRC)) + .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); MBB->addSuccessor(JoinMBB); MBB->addSuccessor(FalseMBB); @@ -1693,7 +2602,7 @@ SystemZTargetLowering::emitSelect(MachineInstr *MI, // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ] // ... MBB = JoinMBB; - BuildMI(*MBB, MBB->begin(), DL, TII->get(SystemZ::PHI), DestReg) + BuildMI(*MBB, MI, DL, TII->get(SystemZ::PHI), DestReg) .addReg(TrueReg).addMBB(StartMBB) .addReg(FalseReg).addMBB(FalseMBB); @@ -1703,42 +2612,52 @@ SystemZTargetLowering::emitSelect(MachineInstr *MI, // Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI. // StoreOpcode is the store to use and Invert says whether the store should -// happen when the condition is false rather than true. +// happen when the condition is false rather than true. If a STORE ON +// CONDITION is available, STOCOpcode is its opcode, otherwise it is 0. MachineBasicBlock * SystemZTargetLowering::emitCondStore(MachineInstr *MI, MachineBasicBlock *MBB, - unsigned StoreOpcode, bool Invert) const { + unsigned StoreOpcode, unsigned STOCOpcode, + bool Invert) const { const SystemZInstrInfo *TII = TM.getInstrInfo(); - MachineOperand Base = MI->getOperand(0); - int64_t Disp = MI->getOperand(1).getImm(); - unsigned IndexReg = MI->getOperand(2).getReg(); - unsigned SrcReg = MI->getOperand(3).getReg(); - unsigned CCMask = MI->getOperand(4).getImm(); + unsigned SrcReg = MI->getOperand(0).getReg(); + MachineOperand Base = MI->getOperand(1); + int64_t Disp = MI->getOperand(2).getImm(); + unsigned IndexReg = MI->getOperand(3).getReg(); + unsigned CCValid = MI->getOperand(4).getImm(); + unsigned CCMask = MI->getOperand(5).getImm(); DebugLoc DL = MI->getDebugLoc(); StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp); + // Use STOCOpcode if possible. We could use different store patterns in + // order to avoid matching the index register, but the performance trade-offs + // might be more complicated in that case. + if (STOCOpcode && !IndexReg && TM.getSubtargetImpl()->hasLoadStoreOnCond()) { + if (Invert) + CCMask ^= CCValid; + BuildMI(*MBB, MI, DL, TII->get(STOCOpcode)) + .addReg(SrcReg).addOperand(Base).addImm(Disp) + .addImm(CCValid).addImm(CCMask); + MI->eraseFromParent(); + return MBB; + } + // Get the condition needed to branch around the store. if (!Invert) - CCMask = CCMask ^ SystemZ::CCMASK_ANY; + CCMask ^= CCValid; MachineBasicBlock *StartMBB = MBB; - MachineBasicBlock *JoinMBB = splitBlockAfter(MI, MBB); + MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB); MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); // StartMBB: // BRC CCMask, JoinMBB // # fallthrough to FalseMBB - // - // The original DAG glues comparisons to their uses, both to ensure - // that no CC-clobbering instructions are inserted between them, and - // to ensure that comparison results are not reused. This means that - // this CondStore is the sole user of any preceding comparison instruction - // and that we can try to use a fused compare and branch instead. MBB = StartMBB; - if (!convertPrevCompareToBranch(MBB, MI, CCMask, JoinMBB)) - BuildMI(MBB, DL, TII->get(SystemZ::BRC)).addImm(CCMask).addMBB(JoinMBB); + BuildMI(MBB, DL, TII->get(SystemZ::BRC)) + .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); MBB->addSuccessor(JoinMBB); MBB->addSuccessor(FalseMBB); @@ -1770,7 +2689,6 @@ SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI, const SystemZInstrInfo *TII = TM.getInstrInfo(); MachineFunction &MF = *MBB->getParent(); MachineRegisterInfo &MRI = MF.getRegInfo(); - unsigned MaskNE = CCMaskForCondCode(ISD::SETNE); bool IsSubWord = (BitSize < 32); // Extract the operands. Base can be a register or a frame index. @@ -1807,7 +2725,7 @@ SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI, // Insert a basic block for the main loop. MachineBasicBlock *StartMBB = MBB; - MachineBasicBlock *DoneMBB = splitBlockAfter(MI, MBB); + MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); // StartMBB: @@ -1841,11 +2759,11 @@ SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI, .addReg(RotatedOldVal).addOperand(Src2); if (BitSize < 32) // XILF with the upper BitSize bits set. - BuildMI(MBB, DL, TII->get(SystemZ::XILF32), RotatedNewVal) + BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal) .addReg(Tmp).addImm(uint32_t(~0 << (32 - BitSize))); else if (BitSize == 32) // XILF with every bit set. - BuildMI(MBB, DL, TII->get(SystemZ::XILF32), RotatedNewVal) + BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal) .addReg(Tmp).addImm(~uint32_t(0)); else { // Use LCGR and add -1 to the result, which is more compact than @@ -1870,7 +2788,8 @@ SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI, .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); BuildMI(MBB, DL, TII->get(CSOpcode), Dest) .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); - BuildMI(MBB, DL, TII->get(SystemZ::BRC)).addImm(MaskNE).addMBB(LoopMBB); + BuildMI(MBB, DL, TII->get(SystemZ::BRC)) + .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); MBB->addSuccessor(LoopMBB); MBB->addSuccessor(DoneMBB); @@ -1893,7 +2812,6 @@ SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI, const SystemZInstrInfo *TII = TM.getInstrInfo(); MachineFunction &MF = *MBB->getParent(); MachineRegisterInfo &MRI = MF.getRegInfo(); - unsigned MaskNE = CCMaskForCondCode(ISD::SETNE); bool IsSubWord = (BitSize < 32); // Extract the operands. Base can be a register or a frame index. @@ -1929,7 +2847,7 @@ SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI, // Insert 3 basic blocks for the loop. MachineBasicBlock *StartMBB = MBB; - MachineBasicBlock *DoneMBB = splitBlockAfter(MI, MBB); + MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); MachineBasicBlock *UseAltMBB = emitBlockAfter(LoopMBB); MachineBasicBlock *UpdateMBB = emitBlockAfter(UseAltMBB); @@ -1955,17 +2873,10 @@ SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI, if (IsSubWord) BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) .addReg(OldVal).addReg(BitShift).addImm(0); - unsigned FusedOpcode = TII->getCompareAndBranch(CompareOpcode); - if (FusedOpcode) - BuildMI(MBB, DL, TII->get(FusedOpcode)) - .addReg(RotatedOldVal).addReg(Src2) - .addImm(KeepOldMask).addMBB(UpdateMBB); - else { - BuildMI(MBB, DL, TII->get(CompareOpcode)) - .addReg(RotatedOldVal).addReg(Src2); - BuildMI(MBB, DL, TII->get(SystemZ::BRC)) - .addImm(KeepOldMask).addMBB(UpdateMBB); - } + BuildMI(MBB, DL, TII->get(CompareOpcode)) + .addReg(RotatedOldVal).addReg(Src2); + BuildMI(MBB, DL, TII->get(SystemZ::BRC)) + .addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB); MBB->addSuccessor(UpdateMBB); MBB->addSuccessor(UseAltMBB); @@ -1995,7 +2906,8 @@ SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI, .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); BuildMI(MBB, DL, TII->get(CSOpcode), Dest) .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); - BuildMI(MBB, DL, TII->get(SystemZ::BRC)).addImm(MaskNE).addMBB(LoopMBB); + BuildMI(MBB, DL, TII->get(SystemZ::BRC)) + .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); MBB->addSuccessor(LoopMBB); MBB->addSuccessor(DoneMBB); @@ -2011,7 +2923,6 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI, const SystemZInstrInfo *TII = TM.getInstrInfo(); MachineFunction &MF = *MBB->getParent(); MachineRegisterInfo &MRI = MF.getRegInfo(); - unsigned MaskNE = CCMaskForCondCode(ISD::SETNE); // Extract the operands. Base can be a register or a frame index. unsigned Dest = MI->getOperand(0).getReg(); @@ -2043,7 +2954,7 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI, // Insert 2 basic blocks for the loop. MachineBasicBlock *StartMBB = MBB; - MachineBasicBlock *DoneMBB = splitBlockAfter(MI, MBB); + MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); MachineBasicBlock *SetMBB = emitBlockAfter(LoopMBB); @@ -2067,7 +2978,8 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI, // ^^ Replace the upper 32-BitSize bits of the // comparison value with those that we loaded, // so that we can use a full word comparison. - // CRJNE %Dest, %RetryCmpVal, DoneMBB + // CR %Dest, %RetryCmpVal + // JNE DoneMBB // # Fall through to SetMBB MBB = LoopMBB; BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) @@ -2083,9 +2995,11 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI, .addReg(OldVal).addReg(BitShift).addImm(BitSize); BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal) .addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); - BuildMI(MBB, DL, TII->get(SystemZ::CRJ)) - .addReg(Dest).addReg(RetryCmpVal) - .addImm(MaskNE).addMBB(DoneMBB); + BuildMI(MBB, DL, TII->get(SystemZ::CR)) + .addReg(Dest).addReg(RetryCmpVal); + BuildMI(MBB, DL, TII->get(SystemZ::BRC)) + .addImm(SystemZ::CCMASK_ICMP) + .addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB); MBB->addSuccessor(DoneMBB); MBB->addSuccessor(SetMBB); @@ -2105,7 +3019,8 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI, .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize); BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal) .addReg(OldVal).addReg(StoreVal).addOperand(Base).addImm(Disp); - BuildMI(MBB, DL, TII->get(SystemZ::BRC)).addImm(MaskNE).addMBB(LoopMBB); + BuildMI(MBB, DL, TII->get(SystemZ::BRC)) + .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); MBB->addSuccessor(LoopMBB); MBB->addSuccessor(DoneMBB); @@ -2115,8 +3030,8 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI, // Emit an extension from a GR32 or GR64 to a GR128. ClearEven is true // if the high register of the GR128 value must be cleared or false if -// it's "don't care". SubReg is subreg_odd32 when extending a GR32 -// and subreg_odd when extending a GR64. +// it's "don't care". SubReg is subreg_l32 when extending a GR32 +// and subreg_l64 when extending a GR64. MachineBasicBlock * SystemZTargetLowering::emitExt128(MachineInstr *MI, MachineBasicBlock *MBB, @@ -2138,7 +3053,7 @@ SystemZTargetLowering::emitExt128(MachineInstr *MI, BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64) .addImm(0); BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128) - .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_high); + .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_h64); In128 = NewIn128; } BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest) @@ -2148,9 +3063,238 @@ SystemZTargetLowering::emitExt128(MachineInstr *MI, return MBB; } +MachineBasicBlock * +SystemZTargetLowering::emitMemMemWrapper(MachineInstr *MI, + MachineBasicBlock *MBB, + unsigned Opcode) const { + const SystemZInstrInfo *TII = TM.getInstrInfo(); + MachineFunction &MF = *MBB->getParent(); + MachineRegisterInfo &MRI = MF.getRegInfo(); + DebugLoc DL = MI->getDebugLoc(); + + MachineOperand DestBase = earlyUseOperand(MI->getOperand(0)); + uint64_t DestDisp = MI->getOperand(1).getImm(); + MachineOperand SrcBase = earlyUseOperand(MI->getOperand(2)); + uint64_t SrcDisp = MI->getOperand(3).getImm(); + uint64_t Length = MI->getOperand(4).getImm(); + + // When generating more than one CLC, all but the last will need to + // branch to the end when a difference is found. + MachineBasicBlock *EndMBB = (Length > 256 && Opcode == SystemZ::CLC ? + splitBlockAfter(MI, MBB) : 0); + + // Check for the loop form, in which operand 5 is the trip count. + if (MI->getNumExplicitOperands() > 5) { + bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase); + + uint64_t StartCountReg = MI->getOperand(5).getReg(); + uint64_t StartSrcReg = forceReg(MI, SrcBase, TII); + uint64_t StartDestReg = (HaveSingleBase ? StartSrcReg : + forceReg(MI, DestBase, TII)); + + const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass; + uint64_t ThisSrcReg = MRI.createVirtualRegister(RC); + uint64_t ThisDestReg = (HaveSingleBase ? ThisSrcReg : + MRI.createVirtualRegister(RC)); + uint64_t NextSrcReg = MRI.createVirtualRegister(RC); + uint64_t NextDestReg = (HaveSingleBase ? NextSrcReg : + MRI.createVirtualRegister(RC)); + + RC = &SystemZ::GR64BitRegClass; + uint64_t ThisCountReg = MRI.createVirtualRegister(RC); + uint64_t NextCountReg = MRI.createVirtualRegister(RC); + + MachineBasicBlock *StartMBB = MBB; + MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); + MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); + MachineBasicBlock *NextMBB = (EndMBB ? emitBlockAfter(LoopMBB) : LoopMBB); + + // StartMBB: + // # fall through to LoopMMB + MBB->addSuccessor(LoopMBB); + + // LoopMBB: + // %ThisDestReg = phi [ %StartDestReg, StartMBB ], + // [ %NextDestReg, NextMBB ] + // %ThisSrcReg = phi [ %StartSrcReg, StartMBB ], + // [ %NextSrcReg, NextMBB ] + // %ThisCountReg = phi [ %StartCountReg, StartMBB ], + // [ %NextCountReg, NextMBB ] + // ( PFD 2, 768+DestDisp(%ThisDestReg) ) + // Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg) + // ( JLH EndMBB ) + // + // The prefetch is used only for MVC. The JLH is used only for CLC. + MBB = LoopMBB; + + BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg) + .addReg(StartDestReg).addMBB(StartMBB) + .addReg(NextDestReg).addMBB(NextMBB); + if (!HaveSingleBase) + BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg) + .addReg(StartSrcReg).addMBB(StartMBB) + .addReg(NextSrcReg).addMBB(NextMBB); + BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg) + .addReg(StartCountReg).addMBB(StartMBB) + .addReg(NextCountReg).addMBB(NextMBB); + if (Opcode == SystemZ::MVC) + BuildMI(MBB, DL, TII->get(SystemZ::PFD)) + .addImm(SystemZ::PFD_WRITE) + .addReg(ThisDestReg).addImm(DestDisp + 768).addReg(0); + BuildMI(MBB, DL, TII->get(Opcode)) + .addReg(ThisDestReg).addImm(DestDisp).addImm(256) + .addReg(ThisSrcReg).addImm(SrcDisp); + if (EndMBB) { + BuildMI(MBB, DL, TII->get(SystemZ::BRC)) + .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) + .addMBB(EndMBB); + MBB->addSuccessor(EndMBB); + MBB->addSuccessor(NextMBB); + } + + // NextMBB: + // %NextDestReg = LA 256(%ThisDestReg) + // %NextSrcReg = LA 256(%ThisSrcReg) + // %NextCountReg = AGHI %ThisCountReg, -1 + // CGHI %NextCountReg, 0 + // JLH LoopMBB + // # fall through to DoneMMB + // + // The AGHI, CGHI and JLH should be converted to BRCTG by later passes. + MBB = NextMBB; + + BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg) + .addReg(ThisDestReg).addImm(256).addReg(0); + if (!HaveSingleBase) + BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg) + .addReg(ThisSrcReg).addImm(256).addReg(0); + BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg) + .addReg(ThisCountReg).addImm(-1); + BuildMI(MBB, DL, TII->get(SystemZ::CGHI)) + .addReg(NextCountReg).addImm(0); + BuildMI(MBB, DL, TII->get(SystemZ::BRC)) + .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) + .addMBB(LoopMBB); + MBB->addSuccessor(LoopMBB); + MBB->addSuccessor(DoneMBB); + + DestBase = MachineOperand::CreateReg(NextDestReg, false); + SrcBase = MachineOperand::CreateReg(NextSrcReg, false); + Length &= 255; + MBB = DoneMBB; + } + // Handle any remaining bytes with straight-line code. + while (Length > 0) { + uint64_t ThisLength = std::min(Length, uint64_t(256)); + // The previous iteration might have created out-of-range displacements. + // Apply them using LAY if so. + if (!isUInt<12>(DestDisp)) { + unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); + BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LAY), Reg) + .addOperand(DestBase).addImm(DestDisp).addReg(0); + DestBase = MachineOperand::CreateReg(Reg, false); + DestDisp = 0; + } + if (!isUInt<12>(SrcDisp)) { + unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); + BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LAY), Reg) + .addOperand(SrcBase).addImm(SrcDisp).addReg(0); + SrcBase = MachineOperand::CreateReg(Reg, false); + SrcDisp = 0; + } + BuildMI(*MBB, MI, DL, TII->get(Opcode)) + .addOperand(DestBase).addImm(DestDisp).addImm(ThisLength) + .addOperand(SrcBase).addImm(SrcDisp); + DestDisp += ThisLength; + SrcDisp += ThisLength; + Length -= ThisLength; + // If there's another CLC to go, branch to the end if a difference + // was found. + if (EndMBB && Length > 0) { + MachineBasicBlock *NextMBB = splitBlockBefore(MI, MBB); + BuildMI(MBB, DL, TII->get(SystemZ::BRC)) + .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) + .addMBB(EndMBB); + MBB->addSuccessor(EndMBB); + MBB->addSuccessor(NextMBB); + MBB = NextMBB; + } + } + if (EndMBB) { + MBB->addSuccessor(EndMBB); + MBB = EndMBB; + MBB->addLiveIn(SystemZ::CC); + } + + MI->eraseFromParent(); + return MBB; +} + +// Decompose string pseudo-instruction MI into a loop that continually performs +// Opcode until CC != 3. +MachineBasicBlock * +SystemZTargetLowering::emitStringWrapper(MachineInstr *MI, + MachineBasicBlock *MBB, + unsigned Opcode) const { + const SystemZInstrInfo *TII = TM.getInstrInfo(); + MachineFunction &MF = *MBB->getParent(); + MachineRegisterInfo &MRI = MF.getRegInfo(); + DebugLoc DL = MI->getDebugLoc(); + + uint64_t End1Reg = MI->getOperand(0).getReg(); + uint64_t Start1Reg = MI->getOperand(1).getReg(); + uint64_t Start2Reg = MI->getOperand(2).getReg(); + uint64_t CharReg = MI->getOperand(3).getReg(); + + const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass; + uint64_t This1Reg = MRI.createVirtualRegister(RC); + uint64_t This2Reg = MRI.createVirtualRegister(RC); + uint64_t End2Reg = MRI.createVirtualRegister(RC); + + MachineBasicBlock *StartMBB = MBB; + MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); + MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); + + // StartMBB: + // # fall through to LoopMMB + MBB->addSuccessor(LoopMBB); + + // LoopMBB: + // %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ] + // %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ] + // R0L = %CharReg + // %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L + // JO LoopMBB + // # fall through to DoneMMB + // + // The load of R0L can be hoisted by post-RA LICM. + MBB = LoopMBB; + + BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg) + .addReg(Start1Reg).addMBB(StartMBB) + .addReg(End1Reg).addMBB(LoopMBB); + BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg) + .addReg(Start2Reg).addMBB(StartMBB) + .addReg(End2Reg).addMBB(LoopMBB); + BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0L).addReg(CharReg); + BuildMI(MBB, DL, TII->get(Opcode)) + .addReg(End1Reg, RegState::Define).addReg(End2Reg, RegState::Define) + .addReg(This1Reg).addReg(This2Reg); + BuildMI(MBB, DL, TII->get(SystemZ::BRC)) + .addImm(SystemZ::CCMASK_ANY).addImm(SystemZ::CCMASK_3).addMBB(LoopMBB); + MBB->addSuccessor(LoopMBB); + MBB->addSuccessor(DoneMBB); + + DoneMBB->addLiveIn(SystemZ::CC); + + MI->eraseFromParent(); + return DoneMBB; +} + MachineBasicBlock *SystemZTargetLowering:: EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const { switch (MI->getOpcode()) { + case SystemZ::Select32Mux: case SystemZ::Select32: case SystemZ::SelectF32: case SystemZ::Select64: @@ -2158,49 +3302,45 @@ EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const { case SystemZ::SelectF128: return emitSelect(MI, MBB); - case SystemZ::CondStore8_32: - return emitCondStore(MI, MBB, SystemZ::STC32, false); - case SystemZ::CondStore8_32Inv: - return emitCondStore(MI, MBB, SystemZ::STC32, true); - case SystemZ::CondStore16_32: - return emitCondStore(MI, MBB, SystemZ::STH32, false); - case SystemZ::CondStore16_32Inv: - return emitCondStore(MI, MBB, SystemZ::STH32, true); - case SystemZ::CondStore32_32: - return emitCondStore(MI, MBB, SystemZ::ST32, false); - case SystemZ::CondStore32_32Inv: - return emitCondStore(MI, MBB, SystemZ::ST32, true); + case SystemZ::CondStore8Mux: + return emitCondStore(MI, MBB, SystemZ::STCMux, 0, false); + case SystemZ::CondStore8MuxInv: + return emitCondStore(MI, MBB, SystemZ::STCMux, 0, true); + case SystemZ::CondStore16Mux: + return emitCondStore(MI, MBB, SystemZ::STHMux, 0, false); + case SystemZ::CondStore16MuxInv: + return emitCondStore(MI, MBB, SystemZ::STHMux, 0, true); case SystemZ::CondStore8: - return emitCondStore(MI, MBB, SystemZ::STC, false); + return emitCondStore(MI, MBB, SystemZ::STC, 0, false); case SystemZ::CondStore8Inv: - return emitCondStore(MI, MBB, SystemZ::STC, true); + return emitCondStore(MI, MBB, SystemZ::STC, 0, true); case SystemZ::CondStore16: - return emitCondStore(MI, MBB, SystemZ::STH, false); + return emitCondStore(MI, MBB, SystemZ::STH, 0, false); case SystemZ::CondStore16Inv: - return emitCondStore(MI, MBB, SystemZ::STH, true); + return emitCondStore(MI, MBB, SystemZ::STH, 0, true); case SystemZ::CondStore32: - return emitCondStore(MI, MBB, SystemZ::ST, false); + return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false); case SystemZ::CondStore32Inv: - return emitCondStore(MI, MBB, SystemZ::ST, true); + return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true); case SystemZ::CondStore64: - return emitCondStore(MI, MBB, SystemZ::STG, false); + return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false); case SystemZ::CondStore64Inv: - return emitCondStore(MI, MBB, SystemZ::STG, true); + return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true); case SystemZ::CondStoreF32: - return emitCondStore(MI, MBB, SystemZ::STE, false); + return emitCondStore(MI, MBB, SystemZ::STE, 0, false); case SystemZ::CondStoreF32Inv: - return emitCondStore(MI, MBB, SystemZ::STE, true); + return emitCondStore(MI, MBB, SystemZ::STE, 0, true); case SystemZ::CondStoreF64: - return emitCondStore(MI, MBB, SystemZ::STD, false); + return emitCondStore(MI, MBB, SystemZ::STD, 0, false); case SystemZ::CondStoreF64Inv: - return emitCondStore(MI, MBB, SystemZ::STD, true); + return emitCondStore(MI, MBB, SystemZ::STD, 0, true); case SystemZ::AEXT128_64: - return emitExt128(MI, MBB, false, SystemZ::subreg_low); + return emitExt128(MI, MBB, false, SystemZ::subreg_l64); case SystemZ::ZEXT128_32: - return emitExt128(MI, MBB, true, SystemZ::subreg_low32); + return emitExt128(MI, MBB, true, SystemZ::subreg_l32); case SystemZ::ZEXT128_64: - return emitExt128(MI, MBB, true, SystemZ::subreg_low); + return emitExt128(MI, MBB, true, SystemZ::subreg_l64); case SystemZ::ATOMIC_SWAPW: return emitAtomicLoadBinary(MI, MBB, 0, 0); @@ -2236,98 +3376,98 @@ EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const { case SystemZ::ATOMIC_LOADW_NR: return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0); case SystemZ::ATOMIC_LOADW_NILH: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 0); + return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0); case SystemZ::ATOMIC_LOAD_NR: return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32); - case SystemZ::ATOMIC_LOAD_NILL32: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL32, 32); - case SystemZ::ATOMIC_LOAD_NILH32: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 32); - case SystemZ::ATOMIC_LOAD_NILF32: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF32, 32); - case SystemZ::ATOMIC_LOAD_NGR: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64); case SystemZ::ATOMIC_LOAD_NILL: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 64); + return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32); case SystemZ::ATOMIC_LOAD_NILH: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 64); - case SystemZ::ATOMIC_LOAD_NIHL: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL, 64); - case SystemZ::ATOMIC_LOAD_NIHH: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH, 64); + return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32); case SystemZ::ATOMIC_LOAD_NILF: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 64); - case SystemZ::ATOMIC_LOAD_NIHF: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF, 64); + return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32); + case SystemZ::ATOMIC_LOAD_NGR: + return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64); + case SystemZ::ATOMIC_LOAD_NILL64: + return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64); + case SystemZ::ATOMIC_LOAD_NILH64: + return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64); + case SystemZ::ATOMIC_LOAD_NIHL64: + return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64); + case SystemZ::ATOMIC_LOAD_NIHH64: + return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64); + case SystemZ::ATOMIC_LOAD_NILF64: + return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64); + case SystemZ::ATOMIC_LOAD_NIHF64: + return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64); case SystemZ::ATOMIC_LOADW_OR: return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0); case SystemZ::ATOMIC_LOADW_OILH: - return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH32, 0); + return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 0); case SystemZ::ATOMIC_LOAD_OR: return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32); - case SystemZ::ATOMIC_LOAD_OILL32: - return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL32, 32); - case SystemZ::ATOMIC_LOAD_OILH32: - return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH32, 32); - case SystemZ::ATOMIC_LOAD_OILF32: - return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF32, 32); - case SystemZ::ATOMIC_LOAD_OGR: - return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64); case SystemZ::ATOMIC_LOAD_OILL: - return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 64); + return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 32); case SystemZ::ATOMIC_LOAD_OILH: - return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 64); - case SystemZ::ATOMIC_LOAD_OIHL: - return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL, 64); - case SystemZ::ATOMIC_LOAD_OIHH: - return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH, 64); + return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 32); case SystemZ::ATOMIC_LOAD_OILF: - return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 64); - case SystemZ::ATOMIC_LOAD_OIHF: - return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF, 64); + return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 32); + case SystemZ::ATOMIC_LOAD_OGR: + return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64); + case SystemZ::ATOMIC_LOAD_OILL64: + return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL64, 64); + case SystemZ::ATOMIC_LOAD_OILH64: + return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH64, 64); + case SystemZ::ATOMIC_LOAD_OIHL64: + return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL64, 64); + case SystemZ::ATOMIC_LOAD_OIHH64: + return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH64, 64); + case SystemZ::ATOMIC_LOAD_OILF64: + return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF64, 64); + case SystemZ::ATOMIC_LOAD_OIHF64: + return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF64, 64); case SystemZ::ATOMIC_LOADW_XR: return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0); case SystemZ::ATOMIC_LOADW_XILF: - return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF32, 0); + return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 0); case SystemZ::ATOMIC_LOAD_XR: return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32); - case SystemZ::ATOMIC_LOAD_XILF32: - return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF32, 32); + case SystemZ::ATOMIC_LOAD_XILF: + return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 32); case SystemZ::ATOMIC_LOAD_XGR: return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64); - case SystemZ::ATOMIC_LOAD_XILF: - return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 64); - case SystemZ::ATOMIC_LOAD_XIHF: - return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF, 64); + case SystemZ::ATOMIC_LOAD_XILF64: + return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF64, 64); + case SystemZ::ATOMIC_LOAD_XIHF64: + return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF64, 64); case SystemZ::ATOMIC_LOADW_NRi: return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true); case SystemZ::ATOMIC_LOADW_NILHi: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 0, true); + return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0, true); case SystemZ::ATOMIC_LOAD_NRi: return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true); - case SystemZ::ATOMIC_LOAD_NILL32i: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL32, 32, true); - case SystemZ::ATOMIC_LOAD_NILH32i: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 32, true); - case SystemZ::ATOMIC_LOAD_NILF32i: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF32, 32, true); - case SystemZ::ATOMIC_LOAD_NGRi: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true); case SystemZ::ATOMIC_LOAD_NILLi: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 64, true); + return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32, true); case SystemZ::ATOMIC_LOAD_NILHi: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 64, true); - case SystemZ::ATOMIC_LOAD_NIHLi: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL, 64, true); - case SystemZ::ATOMIC_LOAD_NIHHi: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH, 64, true); + return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32, true); case SystemZ::ATOMIC_LOAD_NILFi: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 64, true); - case SystemZ::ATOMIC_LOAD_NIHFi: - return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF, 64, true); + return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32, true); + case SystemZ::ATOMIC_LOAD_NGRi: + return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true); + case SystemZ::ATOMIC_LOAD_NILL64i: + return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64, true); + case SystemZ::ATOMIC_LOAD_NILH64i: + return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64, true); + case SystemZ::ATOMIC_LOAD_NIHL64i: + return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64, true); + case SystemZ::ATOMIC_LOAD_NIHH64i: + return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64, true); + case SystemZ::ATOMIC_LOAD_NILF64i: + return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64, true); + case SystemZ::ATOMIC_LOAD_NIHF64i: + return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64, true); case SystemZ::ATOMIC_LOADW_MIN: return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, @@ -2371,16 +3511,27 @@ EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const { case SystemZ::ATOMIC_CMP_SWAPW: return emitAtomicCmpSwapW(MI, MBB); - case SystemZ::BRC: - // The original DAG glues comparisons to their uses, both to ensure - // that no CC-clobbering instructions are inserted between them, and - // to ensure that comparison results are not reused. This means that - // a BRC is the sole user of a preceding comparison and that we can - // try to use a fused compare and branch instead. - if (convertPrevCompareToBranch(MBB, MI, MI->getOperand(0).getImm(), - MI->getOperand(1).getMBB())) - MI->eraseFromParent(); - return MBB; + case SystemZ::MVCSequence: + case SystemZ::MVCLoop: + return emitMemMemWrapper(MI, MBB, SystemZ::MVC); + case SystemZ::NCSequence: + case SystemZ::NCLoop: + return emitMemMemWrapper(MI, MBB, SystemZ::NC); + case SystemZ::OCSequence: + case SystemZ::OCLoop: + return emitMemMemWrapper(MI, MBB, SystemZ::OC); + case SystemZ::XCSequence: + case SystemZ::XCLoop: + return emitMemMemWrapper(MI, MBB, SystemZ::XC); + case SystemZ::CLCSequence: + case SystemZ::CLCLoop: + return emitMemMemWrapper(MI, MBB, SystemZ::CLC); + case SystemZ::CLSTLoop: + return emitStringWrapper(MI, MBB, SystemZ::CLST); + case SystemZ::MVSTLoop: + return emitStringWrapper(MI, MBB, SystemZ::MVST); + case SystemZ::SRSTLoop: + return emitStringWrapper(MI, MBB, SystemZ::SRST); default: llvm_unreachable("Unexpected instr type to insert"); }