setOperationAction(ISD::MEMBARRIER , MVT::Other, Expand);
// Expand certain atomics
- setOperationAction(ISD::ATOMIC_CMP_SWAP_8 , MVT::i8, Custom);
- setOperationAction(ISD::ATOMIC_CMP_SWAP_16, MVT::i16, Custom);
- setOperationAction(ISD::ATOMIC_CMP_SWAP_32, MVT::i32, Custom);
- setOperationAction(ISD::ATOMIC_CMP_SWAP_64, MVT::i64, Custom);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i8, Custom);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i16, Custom);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_SUB_8 , MVT::i8, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_SUB_16, MVT::i16, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_SUB_32, MVT::i32, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_SUB_64, MVT::i64, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i8, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i16, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom);
if (!Subtarget->is64Bit()) {
- setOperationAction(ISD::ATOMIC_LOAD_ADD_64, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_SUB_64, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_AND_64, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_OR_64, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_XOR_64, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_NAND_64, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_SWAP_64, MVT::i64, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Custom);
+ setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom);
}
// Use the default ISD::DBG_STOPPOINT, ISD::DECLARE expansion.
}
switch (SetCCOpcode) {
- default: break;
+ default: assert(0 && "Invalid integer condition!");
case ISD::SETEQ: X86CC = X86::COND_E; break;
case ISD::SETGT: X86CC = X86::COND_G; break;
case ISD::SETGE: X86CC = X86::COND_GE; break;
case ISD::SETULE: X86CC = X86::COND_BE; break;
case ISD::SETUGE: X86CC = X86::COND_AE; break;
}
- } else {
- // First determine if it is required or is profitable to flip the operands.
-
- // If LHS is a foldable load, but RHS is not, flip the condition.
- if ((ISD::isNON_EXTLoad(LHS.getNode()) && LHS.hasOneUse()) &&
- !(ISD::isNON_EXTLoad(RHS.getNode()) && RHS.hasOneUse())) {
- SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
- std::swap(LHS, RHS);
- }
+ return true;
+ }
+
+ // First determine if it is required or is profitable to flip the operands.
- switch (SetCCOpcode) {
- default: break;
- case ISD::SETOLT:
- case ISD::SETOLE:
- case ISD::SETUGT:
- case ISD::SETUGE:
- std::swap(LHS, RHS);
- break;
- }
+ // If LHS is a foldable load, but RHS is not, flip the condition.
+ if ((ISD::isNON_EXTLoad(LHS.getNode()) && LHS.hasOneUse()) &&
+ !(ISD::isNON_EXTLoad(RHS.getNode()) && RHS.hasOneUse())) {
+ SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
+ std::swap(LHS, RHS);
+ }
- // On a floating point condition, the flags are set as follows:
- // ZF PF CF op
- // 0 | 0 | 0 | X > Y
- // 0 | 0 | 1 | X < Y
- // 1 | 0 | 0 | X == Y
- // 1 | 1 | 1 | unordered
- switch (SetCCOpcode) {
- default: break;
- case ISD::SETUEQ:
- case ISD::SETEQ:
- X86CC = X86::COND_E;
- break;
- case ISD::SETOLT: // flipped
- case ISD::SETOGT:
- case ISD::SETGT:
- X86CC = X86::COND_A;
- break;
- case ISD::SETOLE: // flipped
- case ISD::SETOGE:
- case ISD::SETGE:
- X86CC = X86::COND_AE;
- break;
- case ISD::SETUGT: // flipped
- case ISD::SETULT:
- case ISD::SETLT:
- X86CC = X86::COND_B;
- break;
- case ISD::SETUGE: // flipped
- case ISD::SETULE:
- case ISD::SETLE:
- X86CC = X86::COND_BE;
- break;
- case ISD::SETONE:
- case ISD::SETNE:
- X86CC = X86::COND_NE;
- break;
- case ISD::SETUO:
- X86CC = X86::COND_P;
- break;
- case ISD::SETO:
- X86CC = X86::COND_NP;
- break;
- }
+ switch (SetCCOpcode) {
+ default: break;
+ case ISD::SETOLT:
+ case ISD::SETOLE:
+ case ISD::SETUGT:
+ case ISD::SETUGE:
+ std::swap(LHS, RHS);
+ break;
}
- return X86CC != X86::COND_INVALID;
+ // On a floating point condition, the flags are set as follows:
+ // ZF PF CF op
+ // 0 | 0 | 0 | X > Y
+ // 0 | 0 | 1 | X < Y
+ // 1 | 0 | 0 | X == Y
+ // 1 | 1 | 1 | unordered
+ switch (SetCCOpcode) {
+ default: return false;
+ case ISD::SETUEQ:
+ case ISD::SETEQ: X86CC = X86::COND_E; return true;
+ case ISD::SETOLT: // flipped
+ case ISD::SETOGT:
+ case ISD::SETGT: X86CC = X86::COND_A; return true;
+ case ISD::SETOLE: // flipped
+ case ISD::SETOGE:
+ case ISD::SETGE: X86CC = X86::COND_AE; return true;
+ case ISD::SETUGT: // flipped
+ case ISD::SETULT:
+ case ISD::SETLT: X86CC = X86::COND_B; return true;
+ case ISD::SETUGE: // flipped
+ case ISD::SETULE:
+ case ISD::SETLE: X86CC = X86::COND_BE; return true;
+ case ISD::SETONE:
+ case ISD::SETNE: X86CC = X86::COND_NE; return true;
+ case ISD::SETUO: X86CC = X86::COND_P; return true;
+ case ISD::SETO: X86CC = X86::COND_NP; return true;
+ }
}
/// hasFPCMov - is there a floating point cmov for the specific X86 condition
return cast<ConstantSDNode>(ElementBase)->getZExtValue() < NumElems;
}
+/// getSplatMaskEltNo - Given a splat mask, return the index to the element
+/// we want to splat.
+static SDValue getSplatMaskEltNo(SDNode *N) {
+ assert(isSplatMask(N) && "Not a splat mask");
+ unsigned NumElems = N->getNumOperands();
+ SDValue ElementBase;
+ unsigned i = 0;
+ for (; i != NumElems; ++i) {
+ SDValue Elt = N->getOperand(i);
+ if (isa<ConstantSDNode>(Elt))
+ return Elt;
+ }
+ assert(0 && " No splat value found!");
+ return SDValue();
+}
+
+
/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies
/// a splat of a single element and it's a 2 or 4 element mask.
bool X86::isSplatMask(SDNode *N) {
return Op;
SDValue V1 = Op.getOperand(0);
SDValue Mask = Op.getOperand(2);
- unsigned NumElems = Mask.getNumOperands();
+ unsigned MaskNumElems = Mask.getNumOperands();
+ unsigned NumElems = MaskNumElems;
// Special handling of v4f32 -> v4i32.
if (VT != MVT::v4f32) {
- Mask = getUnpacklMask(NumElems, DAG);
+ // Find which element we want to splat.
+ SDNode* EltNoNode = getSplatMaskEltNo(Mask.getNode()).getNode();
+ unsigned EltNo = cast<ConstantSDNode>(EltNoNode)->getZExtValue();
+ // unpack elements to the correct location
while (NumElems > 4) {
+ if (EltNo < NumElems/2) {
+ Mask = getUnpacklMask(MaskNumElems, DAG);
+ } else {
+ Mask = getUnpackhMask(MaskNumElems, DAG);
+ EltNo -= NumElems/2;
+ }
V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask);
NumElems >>= 1;
}
- Mask = getZeroVector(MVT::v4i32, true, DAG);
+ SDValue Cst = DAG.getConstant(EltNo, MVT::i32);
+ Mask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst);
}
V1 = DAG.getNode(ISD::BIT_CONVERT, PVT, V1);
++V2InOrder;
} else if (EltIdx < 8) {
V1Elts.push_back(Elt);
+ V2Elts.push_back(DAG.getConstant(i+8, MaskEVT));
++V1FromV1;
} else {
+ V1Elts.push_back(Elt);
V2Elts.push_back(DAG.getConstant(EltIdx-8, MaskEVT));
++V2FromV2;
}
bool isFP = Op.getOperand(1).getValueType().isFloatingPoint();
unsigned X86CC;
- if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC,
- Op0, Op1, DAG)) {
- Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1);
- return DAG.getNode(X86ISD::SETCC, MVT::i8,
- DAG.getConstant(X86CC, MVT::i8), Cond);
- }
-
- assert(0 && "Illegal SetCC!");
- return SDValue();
+ if (!translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC,
+ Op0, Op1, DAG))
+ assert(0 && "Illegal SetCC!");
+
+
+ Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1);
+ return DAG.getNode(X86ISD::SETCC, MVT::i8,
+ DAG.getConstant(X86CC, MVT::i8), Cond);
}
SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
MVT T = Node->getValueType(0);
SDValue negOp = DAG.getNode(ISD::SUB, T,
DAG.getConstant(0, T), Node->getOperand(2));
- return DAG.getAtomic((Op.getOpcode()==ISD::ATOMIC_LOAD_SUB_8 ?
- ISD::ATOMIC_LOAD_ADD_8 :
- Op.getOpcode()==ISD::ATOMIC_LOAD_SUB_16 ?
- ISD::ATOMIC_LOAD_ADD_16 :
- Op.getOpcode()==ISD::ATOMIC_LOAD_SUB_32 ?
- ISD::ATOMIC_LOAD_ADD_32 :
- ISD::ATOMIC_LOAD_ADD_64),
+ return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD,
+ cast<AtomicSDNode>(Node)->getMemoryVT(),
Node->getOperand(0),
Node->getOperand(1), negOp,
cast<AtomicSDNode>(Node)->getSrcValue(),
SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
switch (Op.getOpcode()) {
default: assert(0 && "Should not custom lower this!");
- case ISD::ATOMIC_CMP_SWAP_8:
- case ISD::ATOMIC_CMP_SWAP_16:
- case ISD::ATOMIC_CMP_SWAP_32:
- case ISD::ATOMIC_CMP_SWAP_64: return LowerCMP_SWAP(Op,DAG);
- case ISD::ATOMIC_LOAD_SUB_8:
- case ISD::ATOMIC_LOAD_SUB_16:
- case ISD::ATOMIC_LOAD_SUB_32: return LowerLOAD_SUB(Op,DAG);
- case ISD::ATOMIC_LOAD_SUB_64: return LowerLOAD_SUB(Op,DAG);
+ case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op,DAG);
+ case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
Results.push_back(edx.getValue(1));
return;
}
- case ISD::ATOMIC_CMP_SWAP_64: {
+ case ISD::ATOMIC_CMP_SWAP: {
MVT T = N->getValueType(0);
assert (T == MVT::i64 && "Only know how to expand i64 Cmp and Swap");
SDValue cpInL, cpInH;
Results.push_back(cpOutH.getValue(1));
return;
}
- case ISD::ATOMIC_LOAD_ADD_64:
+ case ISD::ATOMIC_LOAD_ADD:
ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMADD64_DAG);
return;
- case ISD::ATOMIC_LOAD_AND_64:
+ case ISD::ATOMIC_LOAD_AND:
ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMAND64_DAG);
return;
- case ISD::ATOMIC_LOAD_NAND_64:
+ case ISD::ATOMIC_LOAD_NAND:
ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMNAND64_DAG);
return;
- case ISD::ATOMIC_LOAD_OR_64:
+ case ISD::ATOMIC_LOAD_OR:
ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMOR64_DAG);
return;
- case ISD::ATOMIC_LOAD_SUB_64:
+ case ISD::ATOMIC_LOAD_SUB:
ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMSUB64_DAG);
return;
- case ISD::ATOMIC_LOAD_XOR_64:
+ case ISD::ATOMIC_LOAD_XOR:
ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMXOR64_DAG);
return;
- case ISD::ATOMIC_SWAP_64:
+ case ISD::ATOMIC_SWAP:
ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMSWAP64_DAG);
return;
}
case X86ISD::CALL: return "X86ISD::CALL";
case X86ISD::TAILCALL: return "X86ISD::TAILCALL";
case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
+ case X86ISD::BT: return "X86ISD::BT";
case X86ISD::CMP: return "X86ISD::CMP";
case X86ISD::COMI: return "X86ISD::COMI";
case X86ISD::UCOMI: return "X86ISD::UCOMI";