X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FARM%2FARMISelDAGToDAG.cpp;h=0626240333dabd297d4aa97c4f743d2eafe84243;hb=d78ebe1e12a9aae01cb11d4d10a3d0600407e5ca;hp=2c9481b86c557830fd6f195a8f81fdd16cad0857;hpb=e837dead3c8dc3445ef6a0e2322179c57e264a13;p=oota-llvm.git diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp index 2c9481b86c5..0626240333d 100644 --- a/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -14,8 +14,8 @@ #define DEBUG_TYPE "arm-isel" #include "ARM.h" #include "ARMBaseInstrInfo.h" -#include "ARMAddressingModes.h" #include "ARMTargetMachine.h" +#include "MCTargetDesc/ARMAddressingModes.h" #include "llvm/CallingConv.h" #include "llvm/Constants.h" #include "llvm/DerivedTypes.h" @@ -90,13 +90,20 @@ public: bool hasNoVMLxHazardUse(SDNode *N) const; bool isShifterOpProfitable(const SDValue &Shift, ARM_AM::ShiftOpc ShOpcVal, unsigned ShAmt); - bool SelectShifterOperandReg(SDValue N, SDValue &A, + bool SelectRegShifterOperand(SDValue N, SDValue &A, SDValue &B, SDValue &C, bool CheckProfitability = true); - bool SelectShiftShifterOperandReg(SDValue N, SDValue &A, + bool SelectImmShifterOperand(SDValue N, SDValue &A, + SDValue &B, bool CheckProfitability = true); + bool SelectShiftRegShifterOperand(SDValue N, SDValue &A, SDValue &B, SDValue &C) { // Don't apply the profitability check - return SelectShifterOperandReg(N, A, B, C, false); + return SelectRegShifterOperand(N, A, B, C, false); + } + bool SelectShiftImmShifterOperand(SDValue N, SDValue &A, + SDValue &B) { + // Don't apply the profitability check + return SelectImmShifterOperand(N, A, B, false); } bool SelectAddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm); @@ -122,8 +129,13 @@ public: return true; } - bool SelectAddrMode2Offset(SDNode *Op, SDValue N, + bool SelectAddrMode2OffsetReg(SDNode *Op, SDValue N, + SDValue &Offset, SDValue &Opc); + bool SelectAddrMode2OffsetImm(SDNode *Op, SDValue N, + SDValue &Offset, SDValue &Opc); + bool SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N, SDValue &Offset, SDValue &Opc); + bool SelectAddrOffsetNone(SDValue N, SDValue &Base); bool SelectAddrMode3(SDValue N, SDValue &Base, SDValue &Offset, SDValue &Opc); bool SelectAddrMode3Offset(SDNode *Op, SDValue N, @@ -242,6 +254,8 @@ private: SDNode *SelectConcatVector(SDNode *N); + SDNode *SelectAtomic64(SDNode *Node, unsigned Opc); + /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for /// inline asm expressions. virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op, @@ -291,10 +305,10 @@ static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) { /// (N * Scale) where (N in [\arg RangeMin, \arg RangeMax). /// /// \param ScaledConstant [out] - On success, the pre-scaled constant value. -static bool isScaledConstantInRange(SDValue Node, unsigned Scale, +static bool isScaledConstantInRange(SDValue Node, int Scale, int RangeMin, int RangeMax, int &ScaledConstant) { - assert(Scale && "Invalid scale!"); + assert(Scale > 0 && "Invalid scale!"); // Check that this is a constant. const ConstantSDNode *C = dyn_cast(Node); @@ -365,7 +379,30 @@ bool ARMDAGToDAGISel::isShifterOpProfitable(const SDValue &Shift, return ShOpcVal == ARM_AM::lsl && ShAmt == 2; } -bool ARMDAGToDAGISel::SelectShifterOperandReg(SDValue N, +bool ARMDAGToDAGISel::SelectImmShifterOperand(SDValue N, + SDValue &BaseReg, + SDValue &Opc, + bool CheckProfitability) { + if (DisableShifterOp) + return false; + + ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode()); + + // Don't match base register only case. That is matched to a separate + // lower complexity pattern with explicit register operand. + if (ShOpcVal == ARM_AM::no_shift) return false; + + BaseReg = N.getOperand(0); + unsigned ShImmVal = 0; + ConstantSDNode *RHS = dyn_cast(N.getOperand(1)); + if (!RHS) return false; + ShImmVal = RHS->getZExtValue() & 31; + Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal), + MVT::i32); + return true; +} + +bool ARMDAGToDAGISel::SelectRegShifterOperand(SDValue N, SDValue &BaseReg, SDValue &ShReg, SDValue &Opc, @@ -373,7 +410,7 @@ bool ARMDAGToDAGISel::SelectShifterOperandReg(SDValue N, if (DisableShifterOp) return false; - ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N); + ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode()); // Don't match base register only case. That is matched to a separate // lower complexity pattern with explicit register operand. @@ -381,19 +418,18 @@ bool ARMDAGToDAGISel::SelectShifterOperandReg(SDValue N, BaseReg = N.getOperand(0); unsigned ShImmVal = 0; - if (ConstantSDNode *RHS = dyn_cast(N.getOperand(1))) { - ShReg = CurDAG->getRegister(0, MVT::i32); - ShImmVal = RHS->getZExtValue() & 31; - } else { - ShReg = N.getOperand(1); - if (CheckProfitability && !isShifterOpProfitable(N, ShOpcVal, ShImmVal)) - return false; - } + ConstantSDNode *RHS = dyn_cast(N.getOperand(1)); + if (RHS) return false; + + ShReg = N.getOperand(1); + if (CheckProfitability && !isShifterOpProfitable(N, ShOpcVal, ShImmVal)) + return false; Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal), MVT::i32); return true; } + bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm) { @@ -483,13 +519,10 @@ bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset, return false; } - if (Subtarget->isCortexA9() && !N.hasOneUse()) - // Compute R +/- (R << N) and reuse it. - return false; - // Otherwise this is R +/- [possibly shifted] R. ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::SUB ? ARM_AM::sub:ARM_AM::add; - ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(1)); + ARM_AM::ShiftOpc ShOpcVal = + ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode()); unsigned ShAmt = 0; Base = N.getOperand(0); @@ -515,7 +548,7 @@ bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset, // Try matching (R shl C) + (R). if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift && !(Subtarget->isCortexA9() || N.getOperand(0).hasOneUse())) { - ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0)); + ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode()); if (ShOpcVal != ARM_AM::no_shift) { // Check to see if the RHS of the shift is a constant, if not, we can't // fold it. @@ -630,7 +663,8 @@ AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N, // Otherwise this is R +/- [possibly shifted] R. ARM_AM::AddrOpc AddSub = N.getOpcode() != ISD::SUB ? ARM_AM::add:ARM_AM::sub; - ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(1)); + ARM_AM::ShiftOpc ShOpcVal = + ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode()); unsigned ShAmt = 0; Base = N.getOperand(0); @@ -656,7 +690,7 @@ AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N, // Try matching (R shl C) + (R). if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift && !(Subtarget->isCortexA9() || N.getOperand(0).hasOneUse())) { - ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0)); + ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode()); if (ShOpcVal != ARM_AM::no_shift) { // Check to see if the RHS of the shift is a constant, if not, we can't // fold it. @@ -683,7 +717,7 @@ AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N, return AM2_SHOP; } -bool ARMDAGToDAGISel::SelectAddrMode2Offset(SDNode *Op, SDValue N, +bool ARMDAGToDAGISel::SelectAddrMode2OffsetReg(SDNode *Op, SDValue N, SDValue &Offset, SDValue &Opc) { unsigned Opcode = Op->getOpcode(); ISD::MemIndexedMode AM = (Opcode == ISD::LOAD) @@ -692,16 +726,11 @@ bool ARMDAGToDAGISel::SelectAddrMode2Offset(SDNode *Op, SDValue N, ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC) ? ARM_AM::add : ARM_AM::sub; int Val; - if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits. - Offset = CurDAG->getRegister(0, MVT::i32); - Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val, - ARM_AM::no_shift), - MVT::i32); - return true; - } + if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) + return false; Offset = N; - ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N); + ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode()); unsigned ShAmt = 0; if (ShOpcVal != ARM_AM::no_shift) { // Check to see if the RHS of the shift is a constant, if not, we can't fold @@ -724,6 +753,50 @@ bool ARMDAGToDAGISel::SelectAddrMode2Offset(SDNode *Op, SDValue N, return true; } +bool ARMDAGToDAGISel::SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N, + SDValue &Offset, SDValue &Opc) { + unsigned Opcode = Op->getOpcode(); + ISD::MemIndexedMode AM = (Opcode == ISD::LOAD) + ? cast(Op)->getAddressingMode() + : cast(Op)->getAddressingMode(); + ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC) + ? ARM_AM::add : ARM_AM::sub; + int Val; + if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits. + if (AddSub == ARM_AM::sub) Val *= -1; + Offset = CurDAG->getRegister(0, MVT::i32); + Opc = CurDAG->getTargetConstant(Val, MVT::i32); + return true; + } + + return false; +} + + +bool ARMDAGToDAGISel::SelectAddrMode2OffsetImm(SDNode *Op, SDValue N, + SDValue &Offset, SDValue &Opc) { + unsigned Opcode = Op->getOpcode(); + ISD::MemIndexedMode AM = (Opcode == ISD::LOAD) + ? cast(Op)->getAddressingMode() + : cast(Op)->getAddressingMode(); + ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC) + ? ARM_AM::add : ARM_AM::sub; + int Val; + if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits. + Offset = CurDAG->getRegister(0, MVT::i32); + Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val, + ARM_AM::no_shift), + MVT::i32); + return true; + } + + return false; +} + +bool ARMDAGToDAGISel::SelectAddrOffsetNone(SDValue N, SDValue &Base) { + Base = N; + return true; +} bool ARMDAGToDAGISel::SelectAddrMode3(SDValue N, SDValue &Base, SDValue &Offset, @@ -1079,7 +1152,7 @@ bool ARMDAGToDAGISel::SelectT2ShifterOperandReg(SDValue N, SDValue &BaseReg, if (DisableShifterOp) return false; - ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N); + ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode()); // Don't match base register only case. That is matched to a separate // lower complexity pattern with explicit register operand. @@ -1208,21 +1281,15 @@ bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N, return false; } - if (Subtarget->isCortexA9() && !N.hasOneUse()) { - // Compute R + (R << [1,2,3]) and reuse it. - Base = N; - return false; - } - // Look for (R + R) or (R + (R << [1,2,3])). unsigned ShAmt = 0; Base = N.getOperand(0); OffReg = N.getOperand(1); // Swap if it is ((R << c) + R). - ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(OffReg); + ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(OffReg.getOpcode()); if (ShOpcVal != ARM_AM::lsl) { - ShOpcVal = ARM_AM::getShiftOpcForNode(Base); + ShOpcVal = ARM_AM::getShiftOpcForNode(Base.getOpcode()); if (ShOpcVal == ARM_AM::lsl) std::swap(Base, OffReg); } @@ -1266,10 +1333,19 @@ SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) { bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC); unsigned Opcode = 0; bool Match = false; - if (LoadedVT == MVT::i32 && - SelectAddrMode2Offset(N, LD->getOffset(), Offset, AMOpc)) { - Opcode = isPre ? ARM::LDR_PRE : ARM::LDR_POST; + if (LoadedVT == MVT::i32 && isPre && + SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) { + Opcode = ARM::LDR_PRE_IMM; + Match = true; + } else if (LoadedVT == MVT::i32 && !isPre && + SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) { + Opcode = ARM::LDR_POST_IMM; + Match = true; + } else if (LoadedVT == MVT::i32 && + SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) { + Opcode = isPre ? ARM::LDR_PRE_REG : ARM::LDR_POST_REG; Match = true; + } else if (LoadedVT == MVT::i16 && SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) { Match = true; @@ -1283,20 +1359,37 @@ SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) { Opcode = isPre ? ARM::LDRSB_PRE : ARM::LDRSB_POST; } } else { - if (SelectAddrMode2Offset(N, LD->getOffset(), Offset, AMOpc)) { + if (isPre && + SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) { + Match = true; + Opcode = ARM::LDRB_PRE_IMM; + } else if (!isPre && + SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) { + Match = true; + Opcode = ARM::LDRB_POST_IMM; + } else if (SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) { Match = true; - Opcode = isPre ? ARM::LDRB_PRE : ARM::LDRB_POST; + Opcode = isPre ? ARM::LDRB_PRE_REG : ARM::LDRB_POST_REG; } } } if (Match) { - SDValue Chain = LD->getChain(); - SDValue Base = LD->getBasePtr(); - SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG), - CurDAG->getRegister(0, MVT::i32), Chain }; - return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, MVT::i32, - MVT::Other, Ops, 6); + if (Opcode == ARM::LDR_PRE_IMM || Opcode == ARM::LDRB_PRE_IMM) { + SDValue Chain = LD->getChain(); + SDValue Base = LD->getBasePtr(); + SDValue Ops[]= { Base, AMOpc, getAL(CurDAG), + CurDAG->getRegister(0, MVT::i32), Chain }; + return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, + MVT::i32, MVT::Other, Ops, 5); + } else { + SDValue Chain = LD->getChain(); + SDValue Base = LD->getBasePtr(); + SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG), + CurDAG->getRegister(0, MVT::i32), Chain }; + return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, + MVT::i32, MVT::Other, Ops, 6); + } } return NULL; @@ -1966,7 +2059,8 @@ SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N, Srl_imm)) { assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!"); - unsigned Width = CountTrailingOnes_32(And_imm); + // Note: The width operand is encoded as width-1. + unsigned Width = CountTrailingOnes_32(And_imm) - 1; unsigned LSB = Srl_imm; SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); SDValue Ops[] = { N->getOperand(0).getOperand(0), @@ -1986,7 +2080,8 @@ SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N, unsigned Srl_imm = 0; if (isInt32Immediate(N->getOperand(1), Srl_imm)) { assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!"); - unsigned Width = 32 - Srl_imm; + // Note: The width operand is encoded as width-1. + unsigned Width = 32 - Srl_imm - 1; int LSB = Srl_imm - Shl_imm; if (LSB < 0) return NULL; @@ -2034,10 +2129,16 @@ SelectARMCMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal, SDValue CPTmp0; SDValue CPTmp1; SDValue CPTmp2; - if (SelectShifterOperandReg(TrueVal, CPTmp0, CPTmp1, CPTmp2)) { + if (SelectImmShifterOperand(TrueVal, CPTmp0, CPTmp2)) { + SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32); + SDValue Ops[] = { FalseVal, CPTmp0, CPTmp2, CC, CCR, InFlag }; + return CurDAG->SelectNodeTo(N, ARM::MOVCCsi, MVT::i32, Ops, 6); + } + + if (SelectRegShifterOperand(TrueVal, CPTmp0, CPTmp1, CPTmp2)) { SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32); SDValue Ops[] = { FalseVal, CPTmp0, CPTmp1, CPTmp2, CC, CCR, InFlag }; - return CurDAG->SelectNodeTo(N, ARM::MOVCCs, MVT::i32, Ops, 7); + return CurDAG->SelectNodeTo(N, ARM::MOVCCsr, MVT::i32, Ops, 7); } return 0; } @@ -2207,6 +2308,25 @@ SDNode *ARMDAGToDAGISel::SelectConcatVector(SDNode *N) { return PairDRegs(VT, N->getOperand(0), N->getOperand(1)); } +SDNode *ARMDAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) { + SmallVector Ops; + Ops.push_back(Node->getOperand(1)); // Ptr + Ops.push_back(Node->getOperand(2)); // Low part of Val1 + Ops.push_back(Node->getOperand(3)); // High part of Val1 + if (Opc == ARM::ATOMCMPXCHG6432) { + Ops.push_back(Node->getOperand(4)); // Low part of Val2 + Ops.push_back(Node->getOperand(5)); // High part of Val2 + } + Ops.push_back(Node->getOperand(0)); // Chain + MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); + MemOp[0] = cast(Node)->getMemOperand(); + SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(), + MVT::i32, MVT::i32, MVT::Other, + Ops.data() ,Ops.size()); + cast(ResNode)->setMemRefs(MemOp, MemOp + 1); + return ResNode; +} + SDNode *ARMDAGToDAGISel::Select(SDNode *N) { DebugLoc dl = N->getDebugLoc(); @@ -2269,8 +2389,9 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { int FI = cast(N)->getIndex(); SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); if (Subtarget->isThumb1Only()) { - return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, TFI, - CurDAG->getTargetConstant(0, MVT::i32)); + SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32), + getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) }; + return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, Ops, 4); } else { unsigned Opc = ((Subtarget->isThumb() && Subtarget->hasThumb2()) ? ARM::t2ADDri : ARM::ADDri); @@ -2307,7 +2428,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { return CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops, 6); } else { SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 }; - return CurDAG->SelectNodeTo(N, ARM::ADDrs, MVT::i32, Ops, 7); + return CurDAG->SelectNodeTo(N, ARM::ADDrsi, MVT::i32, Ops, 7); } } if (isPowerOf2_32(RHSV+1)) { // 2^n-1? @@ -2323,7 +2444,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops, 6); } else { SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 }; - return CurDAG->SelectNodeTo(N, ARM::RSBrs, MVT::i32, Ops, 7); + return CurDAG->SelectNodeTo(N, ARM::RSBrsi, MVT::i32, Ops, 7); } } } @@ -2986,6 +3107,23 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { case ISD::CONCAT_VECTORS: return SelectConcatVector(N); + + case ARMISD::ATOMOR64_DAG: + return SelectAtomic64(N, ARM::ATOMOR6432); + case ARMISD::ATOMXOR64_DAG: + return SelectAtomic64(N, ARM::ATOMXOR6432); + case ARMISD::ATOMADD64_DAG: + return SelectAtomic64(N, ARM::ATOMADD6432); + case ARMISD::ATOMSUB64_DAG: + return SelectAtomic64(N, ARM::ATOMSUB6432); + case ARMISD::ATOMNAND64_DAG: + return SelectAtomic64(N, ARM::ATOMNAND6432); + case ARMISD::ATOMAND64_DAG: + return SelectAtomic64(N, ARM::ATOMAND6432); + case ARMISD::ATOMSWAP64_DAG: + return SelectAtomic64(N, ARM::ATOMSWAP6432); + case ARMISD::ATOMCMPXCHG64_DAG: + return SelectAtomic64(N, ARM::ATOMCMPXCHG6432); } return SelectCode(N);