//
// The LLVM Compiler Infrastructure
//
-// This file was developed by the LLVM research group and is distributed under
-// the University of Illinois Open Source License. See LICENSE.TXT for details.
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
Names[RTLIB::NEG_I64] = "__negdi2";
Names[RTLIB::ADD_F32] = "__addsf3";
Names[RTLIB::ADD_F64] = "__adddf3";
+ Names[RTLIB::ADD_F80] = "__addxf3";
Names[RTLIB::ADD_PPCF128] = "__gcc_qadd";
Names[RTLIB::SUB_F32] = "__subsf3";
Names[RTLIB::SUB_F64] = "__subdf3";
+ Names[RTLIB::SUB_F80] = "__subxf3";
Names[RTLIB::SUB_PPCF128] = "__gcc_qsub";
Names[RTLIB::MUL_F32] = "__mulsf3";
Names[RTLIB::MUL_F64] = "__muldf3";
+ Names[RTLIB::MUL_F80] = "__mulxf3";
Names[RTLIB::MUL_PPCF128] = "__gcc_qmul";
Names[RTLIB::DIV_F32] = "__divsf3";
Names[RTLIB::DIV_F64] = "__divdf3";
+ Names[RTLIB::DIV_F80] = "__divxf3";
Names[RTLIB::DIV_PPCF128] = "__gcc_qdiv";
Names[RTLIB::REM_F32] = "fmodf";
Names[RTLIB::REM_F64] = "fmod";
+ Names[RTLIB::REM_F80] = "fmodl";
Names[RTLIB::REM_PPCF128] = "fmodl";
- Names[RTLIB::NEG_F32] = "__negsf2";
- Names[RTLIB::NEG_F64] = "__negdf2";
Names[RTLIB::POWI_F32] = "__powisf2";
Names[RTLIB::POWI_F64] = "__powidf2";
Names[RTLIB::POWI_F80] = "__powixf2";
Names[RTLIB::SQRT_PPCF128] = "sqrtl";
Names[RTLIB::SIN_F32] = "sinf";
Names[RTLIB::SIN_F64] = "sin";
+ Names[RTLIB::SIN_F80] = "sinl";
+ Names[RTLIB::SIN_PPCF128] = "sinl";
Names[RTLIB::COS_F32] = "cosf";
Names[RTLIB::COS_F64] = "cos";
+ Names[RTLIB::COS_F80] = "cosl";
+ Names[RTLIB::COS_PPCF128] = "cosl";
Names[RTLIB::POW_F32] = "powf";
Names[RTLIB::POW_F64] = "pow";
Names[RTLIB::POW_F80] = "powl";
// All operations default to being supported.
memset(OpActions, 0, sizeof(OpActions));
memset(LoadXActions, 0, sizeof(LoadXActions));
- memset(&StoreXActions, 0, sizeof(StoreXActions));
+ memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
memset(&IndexedModeActions, 0, sizeof(IndexedModeActions));
memset(&ConvertActions, 0, sizeof(ConvertActions));
- // Set all indexed load / store to expand.
+ // Set default actions for various operations.
for (unsigned VT = 0; VT != (unsigned)MVT::LAST_VALUETYPE; ++VT) {
+ // Default all indexed load / store to expand.
for (unsigned IM = (unsigned)ISD::PRE_INC;
IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
setIndexedLoadAction(IM, (MVT::ValueType)VT, Expand);
setIndexedStoreAction(IM, (MVT::ValueType)VT, Expand);
}
+
+ // These operations default to expand.
+ setOperationAction(ISD::FGETSIGN, (MVT::ValueType)VT, Expand);
}
+ // Default ISD::TRAP to expand (which turns it into abort).
+ setOperationAction(ISD::TRAP, MVT::Other, Expand);
+
IsLittleEndian = TD->isLittleEndian();
UsesGlobalOffsetTable = false;
ShiftAmountTy = SetCCResultTy = PointerTy = getValueType(TD->getIntPtrType());
unsigned NumVectorRegs = 1;
+ // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
+ // could break down into LHS/RHS like LegalizeDAG does.
+ if (!isPowerOf2_32(NumElts)) {
+ NumVectorRegs = NumElts;
+ NumElts = 1;
+ }
+
// Divide the input until we get to a supported size. This will always
// end with a scalar if the target doesn't support vectors.
while (NumElts > 1 &&
KnownZero |= ~InMask & DemandedMask;
break;
}
+ case ISD::FGETSIGN:
+ // All bits are zero except the low bit.
+ KnownZero = MVT::getIntVTBitMask(Op.getValueType()) ^ 1;
+ break;
+ case ISD::BIT_CONVERT:
+#if 0
+ // If this is an FP->Int bitcast and if the sign bit is the only thing that
+ // is demanded, turn this into a FGETSIGN.
+ if (DemandedMask == MVT::getIntVTSignBit(Op.getValueType()) &&
+ MVT::isFloatingPoint(Op.getOperand(0).getValueType()) &&
+ !MVT::isVector(Op.getOperand(0).getValueType())) {
+ // Only do this xform if FGETSIGN is valid or if before legalize.
+ if (!TLO.AfterLegalize ||
+ isOperationLegal(ISD::FGETSIGN, Op.getValueType())) {
+ // Make a FGETSIGN + SHL to move the sign bit into the appropriate
+ // place. We expect the SHL to be eliminated by other optimizations.
+ SDOperand Sign = TLO.DAG.getNode(ISD::FGETSIGN, Op.getValueType(),
+ Op.getOperand(0));
+ unsigned ShVal = MVT::getSizeInBits(Op.getValueType())-1;
+ SDOperand ShAmt = TLO.DAG.getConstant(ShVal, getShiftAmountTy());
+ return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, Op.getValueType(),
+ Sign, ShAmt));
+ }
+ }
+#endif
+ break;
case ISD::ADD:
case ISD::SUB:
case ISD::INTRINSIC_WO_CHAIN:
// Constant fold or commute setcc.
SDOperand O = DAG.FoldSetCC(VT, N0, N1, Cond);
if (O.Val) return O;
+ } else if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1.Val)) {
+ // If the RHS of an FP comparison is a constant, simplify it away in
+ // some cases.
+ if (CFP->getValueAPF().isNaN()) {
+ // If an operand is known to be a nan, we can fold it.
+ switch (ISD::getUnorderedFlavor(Cond)) {
+ default: assert(0 && "Unknown flavor!");
+ case 0: // Known false.
+ return DAG.getConstant(0, VT);
+ case 1: // Known true.
+ return DAG.getConstant(1, VT);
+ case 2: // Undefined.
+ return DAG.getNode(ISD::UNDEF, VT);
+ }
+ }
+
+ // Otherwise, we know the RHS is not a NaN. Simplify the node to drop the
+ // constant if knowing that the operand is non-nan is enough. We prefer to
+ // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to
+ // materialize 0.0.
+ if (Cond == ISD::SETO || Cond == ISD::SETUO)
+ return DAG.getSetCC(VT, N0, N0, Cond);
}
if (N0 == N1) {