setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::FABS, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::FSIN, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::FCOS, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::FREM, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::FPOWI, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::FSQRT, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::FCOPYSIGN, (MVT::ValueType)VT, Expand);
}
if (Subtarget->hasMMX()) {
setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
+ setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
+ setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
+ setOperationAction(ISD::FABS, MVT::v4f32, Custom);
setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
+ setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
+ setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
+ setOperationAction(ISD::FABS, MVT::v2f64, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) {
MVT::ValueType VT = Op.getValueType();
- const Type *OpNTy = MVT::getTypeForValueType(VT);
+ MVT::ValueType EltVT = VT;
+ if (MVT::isVector(VT))
+ EltVT = MVT::getVectorElementType(VT);
+ const Type *OpNTy = MVT::getTypeForValueType(EltVT);
std::vector<Constant*> CV;
- if (VT == MVT::f64) {
- CV.push_back(ConstantFP::get(OpNTy, BitsToDouble(~(1ULL << 63))));
- CV.push_back(ConstantFP::get(OpNTy, 0.0));
+ if (EltVT == MVT::f64) {
+ Constant *C = ConstantFP::get(OpNTy, BitsToDouble(~(1ULL << 63)));
+ CV.push_back(C);
+ CV.push_back(C);
} else {
- CV.push_back(ConstantFP::get(OpNTy, BitsToFloat(~(1U << 31))));
- CV.push_back(ConstantFP::get(OpNTy, 0.0));
- CV.push_back(ConstantFP::get(OpNTy, 0.0));
- CV.push_back(ConstantFP::get(OpNTy, 0.0));
+ Constant *C = ConstantFP::get(OpNTy, BitsToFloat(~(1U << 31)));
+ CV.push_back(C);
+ CV.push_back(C);
+ CV.push_back(C);
+ CV.push_back(C);
}
Constant *CS = ConstantStruct::get(CV);
SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4);
SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) {
MVT::ValueType VT = Op.getValueType();
- const Type *OpNTy = MVT::getTypeForValueType(VT);
+ MVT::ValueType EltVT = VT;
+ if (MVT::isVector(VT))
+ EltVT = MVT::getVectorElementType(VT);
+ const Type *OpNTy = MVT::getTypeForValueType(EltVT);
std::vector<Constant*> CV;
- if (VT == MVT::f64) {
- CV.push_back(ConstantFP::get(OpNTy, BitsToDouble(1ULL << 63)));
- CV.push_back(ConstantFP::get(OpNTy, 0.0));
+ if (EltVT == MVT::f64) {
+ Constant *C = ConstantFP::get(OpNTy, BitsToDouble(1ULL << 63));
+ CV.push_back(C);
+ CV.push_back(C);
} else {
- CV.push_back(ConstantFP::get(OpNTy, BitsToFloat(1U << 31)));
- CV.push_back(ConstantFP::get(OpNTy, 0.0));
- CV.push_back(ConstantFP::get(OpNTy, 0.0));
- CV.push_back(ConstantFP::get(OpNTy, 0.0));
+ Constant *C = ConstantFP::get(OpNTy, BitsToFloat(1U << 31));
+ CV.push_back(C);
+ CV.push_back(C);
+ CV.push_back(C);
+ CV.push_back(C);
}
Constant *CS = ConstantStruct::get(CV);
SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4);
// bytes in one go. Touching the stack at 4K increments is necessary to ensure
// that the guard pages used by the OS virtual memory manager are allocated in
// correct sequence.
-SDOperand X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op,
- SelectionDAG &DAG) {
+SDOperand
+X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op,
+ SelectionDAG &DAG) {
assert(Subtarget->isTargetCygMing() &&
"This should be used only on Cygwin/Mingw targets");
SDOperand Size = Op.getOperand(1);
// FIXME: Ensure alignment here
- TargetLowering::ArgListTy Args;
- TargetLowering::ArgListEntry Entry;
+ SDOperand Flag;
+
MVT::ValueType IntPtr = getPointerTy();
MVT::ValueType SPTy = (Subtarget->is64Bit() ? MVT::i64 : MVT::i32);
- const Type *IntPtrTy = getTargetData()->getIntPtrType();
-
- Entry.Node = Size;
- Entry.Ty = IntPtrTy;
- Entry.isInReg = true; // Should pass in EAX
- Args.push_back(Entry);
- std::pair<SDOperand, SDOperand> CallResult =
- LowerCallTo(Chain, IntPtrTy, false, false, CallingConv::C, false,
- DAG.getExternalSymbol("_alloca", IntPtr), Args, DAG);
-
- SDOperand SP = DAG.getCopyFromReg(CallResult.second, X86StackPtr, SPTy);
+
+ Chain = DAG.getCopyToReg(Chain, X86::EAX, Size, Flag);
+ Flag = Chain.getValue(1);
+
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
+ SDOperand Ops[] = { Chain,
+ DAG.getTargetExternalSymbol("_alloca", IntPtr),
+ DAG.getRegister(X86::EAX, IntPtr),
+ Flag };
+ Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops, 4);
+ Flag = Chain.getValue(1);
+
+ Chain = DAG.getCopyFromReg(Chain, X86StackPtr, SPTy).getValue(1);
std::vector<MVT::ValueType> Tys;
Tys.push_back(SPTy);
Tys.push_back(MVT::Other);
- SDOperand Ops[2] = { SP, CallResult.second };
- return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2);
+ SDOperand Ops1[2] = { Chain.getValue(0), Chain };
+ return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops1, 2);
}
SDOperand
case X86ISD::PINSRW: return "X86ISD::PINSRW";
case X86ISD::FMAX: return "X86ISD::FMAX";
case X86ISD::FMIN: return "X86ISD::FMIN";
+ case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
+ case X86ISD::FRCP: return "X86ISD::FRCP";
case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
case X86ISD::THREAD_POINTER: return "X86ISD::THREAD_POINTER";
}