static void InitLibcallNames(const char **Names) {
Names[RTLIB::SHL_I32] = "__ashlsi3";
Names[RTLIB::SHL_I64] = "__ashldi3";
+ Names[RTLIB::SHL_I128] = "__ashlti3";
Names[RTLIB::SRL_I32] = "__lshrsi3";
Names[RTLIB::SRL_I64] = "__lshrdi3";
+ Names[RTLIB::SRL_I128] = "__lshrti3";
Names[RTLIB::SRA_I32] = "__ashrsi3";
Names[RTLIB::SRA_I64] = "__ashrdi3";
+ Names[RTLIB::SRA_I128] = "__ashrti3";
Names[RTLIB::MUL_I32] = "__mulsi3";
Names[RTLIB::MUL_I64] = "__muldi3";
+ Names[RTLIB::MUL_I128] = "__multi3";
Names[RTLIB::SDIV_I32] = "__divsi3";
Names[RTLIB::SDIV_I64] = "__divdi3";
+ Names[RTLIB::SDIV_I128] = "__divti3";
Names[RTLIB::UDIV_I32] = "__udivsi3";
Names[RTLIB::UDIV_I64] = "__udivdi3";
+ Names[RTLIB::UDIV_I128] = "__udivti3";
Names[RTLIB::SREM_I32] = "__modsi3";
Names[RTLIB::SREM_I64] = "__moddi3";
+ Names[RTLIB::SREM_I128] = "__modti3";
Names[RTLIB::UREM_I32] = "__umodsi3";
Names[RTLIB::UREM_I64] = "__umoddi3";
+ Names[RTLIB::UREM_I128] = "__umodti3";
Names[RTLIB::NEG_I32] = "__negsi2";
Names[RTLIB::NEG_I64] = "__negdi2";
Names[RTLIB::ADD_F32] = "__addsf3";
Names[RTLIB::SQRT_F64] = "sqrt";
Names[RTLIB::SQRT_F80] = "sqrtl";
Names[RTLIB::SQRT_PPCF128] = "sqrtl";
+ Names[RTLIB::LOG_F32] = "logf";
+ Names[RTLIB::LOG_F64] = "log";
+ Names[RTLIB::LOG_F80] = "logl";
+ Names[RTLIB::LOG_PPCF128] = "logl";
+ Names[RTLIB::LOG2_F32] = "log2f";
+ Names[RTLIB::LOG2_F64] = "log2";
+ Names[RTLIB::LOG2_F80] = "log2l";
+ Names[RTLIB::LOG2_PPCF128] = "log2l";
+ Names[RTLIB::LOG10_F32] = "log10f";
+ Names[RTLIB::LOG10_F64] = "log10";
+ Names[RTLIB::LOG10_F80] = "log10l";
+ Names[RTLIB::LOG10_PPCF128] = "log10l";
+ Names[RTLIB::EXP_F32] = "expf";
+ Names[RTLIB::EXP_F64] = "exp";
+ Names[RTLIB::EXP_F80] = "expl";
+ Names[RTLIB::EXP_PPCF128] = "expl";
+ Names[RTLIB::EXP2_F32] = "exp2f";
+ Names[RTLIB::EXP2_F64] = "exp2";
+ Names[RTLIB::EXP2_F80] = "exp2l";
+ Names[RTLIB::EXP2_PPCF128] = "exp2l";
Names[RTLIB::SIN_F32] = "sinf";
Names[RTLIB::SIN_F64] = "sin";
Names[RTLIB::SIN_F80] = "sinl";
Names[RTLIB::POW_F64] = "pow";
Names[RTLIB::POW_F80] = "powl";
Names[RTLIB::POW_PPCF128] = "powl";
+ Names[RTLIB::CEIL_F32] = "ceilf";
+ Names[RTLIB::CEIL_F64] = "ceil";
+ Names[RTLIB::CEIL_F80] = "ceill";
+ Names[RTLIB::CEIL_PPCF128] = "ceill";
+ Names[RTLIB::TRUNC_F32] = "truncf";
+ Names[RTLIB::TRUNC_F64] = "trunc";
+ Names[RTLIB::TRUNC_F80] = "truncl";
+ Names[RTLIB::TRUNC_PPCF128] = "truncl";
+ Names[RTLIB::RINT_F32] = "rintf";
+ Names[RTLIB::RINT_F64] = "rint";
+ Names[RTLIB::RINT_F80] = "rintl";
+ Names[RTLIB::RINT_PPCF128] = "rintl";
+ Names[RTLIB::NEARBYINT_F32] = "nearbyintf";
+ Names[RTLIB::NEARBYINT_F64] = "nearbyint";
+ Names[RTLIB::NEARBYINT_F80] = "nearbyintl";
+ Names[RTLIB::NEARBYINT_PPCF128] = "nearbyintl";
+ Names[RTLIB::FLOOR_F32] = "floorf";
+ Names[RTLIB::FLOOR_F64] = "floor";
+ Names[RTLIB::FLOOR_F80] = "floorl";
+ Names[RTLIB::FLOOR_PPCF128] = "floorl";
Names[RTLIB::FPEXT_F32_F64] = "__extendsfdf2";
Names[RTLIB::FPROUND_F64_F32] = "__truncdfsf2";
+ Names[RTLIB::FPROUND_F80_F32] = "__truncxfsf2";
+ Names[RTLIB::FPROUND_PPCF128_F32] = "__trunctfsf2";
+ Names[RTLIB::FPROUND_F80_F64] = "__truncxfdf2";
+ Names[RTLIB::FPROUND_PPCF128_F64] = "__trunctfdf2";
Names[RTLIB::FPTOSINT_F32_I32] = "__fixsfsi";
Names[RTLIB::FPTOSINT_F32_I64] = "__fixsfdi";
Names[RTLIB::FPTOSINT_F32_I128] = "__fixsfti";
Names[RTLIB::FPTOSINT_F64_I32] = "__fixdfsi";
Names[RTLIB::FPTOSINT_F64_I64] = "__fixdfdi";
Names[RTLIB::FPTOSINT_F64_I128] = "__fixdfti";
+ Names[RTLIB::FPTOSINT_F80_I32] = "__fixxfsi";
Names[RTLIB::FPTOSINT_F80_I64] = "__fixxfdi";
Names[RTLIB::FPTOSINT_F80_I128] = "__fixxfti";
+ Names[RTLIB::FPTOSINT_PPCF128_I32] = "__fixtfsi";
Names[RTLIB::FPTOSINT_PPCF128_I64] = "__fixtfdi";
Names[RTLIB::FPTOSINT_PPCF128_I128] = "__fixtfti";
Names[RTLIB::FPTOUINT_F32_I32] = "__fixunssfsi";
Names[RTLIB::FPTOUINT_F80_I32] = "__fixunsxfsi";
Names[RTLIB::FPTOUINT_F80_I64] = "__fixunsxfdi";
Names[RTLIB::FPTOUINT_F80_I128] = "__fixunsxfti";
+ Names[RTLIB::FPTOUINT_PPCF128_I32] = "__fixunstfsi";
Names[RTLIB::FPTOUINT_PPCF128_I64] = "__fixunstfdi";
Names[RTLIB::FPTOUINT_PPCF128_I128] = "__fixunstfti";
Names[RTLIB::SINTTOFP_I32_F32] = "__floatsisf";
Names[RTLIB::SINTTOFP_I32_F64] = "__floatsidf";
+ Names[RTLIB::SINTTOFP_I32_F80] = "__floatsixf";
+ Names[RTLIB::SINTTOFP_I32_PPCF128] = "__floatsitf";
Names[RTLIB::SINTTOFP_I64_F32] = "__floatdisf";
Names[RTLIB::SINTTOFP_I64_F64] = "__floatdidf";
Names[RTLIB::SINTTOFP_I64_F80] = "__floatdixf";
Names[RTLIB::SINTTOFP_I128_PPCF128] = "__floattitf";
Names[RTLIB::UINTTOFP_I32_F32] = "__floatunsisf";
Names[RTLIB::UINTTOFP_I32_F64] = "__floatunsidf";
+ Names[RTLIB::UINTTOFP_I32_F80] = "__floatunsixf";
+ Names[RTLIB::UINTTOFP_I32_PPCF128] = "__floatunsitf";
Names[RTLIB::UINTTOFP_I64_F32] = "__floatundisf";
Names[RTLIB::UINTTOFP_I64_F64] = "__floatundidf";
+ Names[RTLIB::UINTTOFP_I64_F80] = "__floatundixf";
+ Names[RTLIB::UINTTOFP_I64_PPCF128] = "__floatunditf";
+ Names[RTLIB::UINTTOFP_I128_F32] = "__floatuntisf";
+ Names[RTLIB::UINTTOFP_I128_F64] = "__floatuntidf";
+ Names[RTLIB::UINTTOFP_I128_F80] = "__floatuntixf";
+ Names[RTLIB::UINTTOFP_I128_PPCF128] = "__floatuntitf";
Names[RTLIB::OEQ_F32] = "__eqsf2";
Names[RTLIB::OEQ_F64] = "__eqdf2";
Names[RTLIB::UNE_F32] = "__nesf2";
Names[RTLIB::O_F64] = "__unorddf2";
}
+/// getFPEXT - Return the FPEXT_*_* value for the given types, or
+/// UNKNOWN_LIBCALL if there is none.
+RTLIB::Libcall RTLIB::getFPEXT(MVT OpVT, MVT RetVT) {
+ if (OpVT == MVT::f32) {
+ if (RetVT == MVT::f64)
+ return FPEXT_F32_F64;
+ }
+ return UNKNOWN_LIBCALL;
+}
+
+/// getFPROUND - Return the FPROUND_*_* value for the given types, or
+/// UNKNOWN_LIBCALL if there is none.
+RTLIB::Libcall RTLIB::getFPROUND(MVT OpVT, MVT RetVT) {
+ if (RetVT == MVT::f32) {
+ if (OpVT == MVT::f64)
+ return FPROUND_F64_F32;
+ if (OpVT == MVT::f80)
+ return FPROUND_F80_F32;
+ if (OpVT == MVT::ppcf128)
+ return FPROUND_PPCF128_F32;
+ } else if (RetVT == MVT::f64) {
+ if (OpVT == MVT::f80)
+ return FPROUND_F80_F64;
+ if (OpVT == MVT::ppcf128)
+ return FPROUND_PPCF128_F64;
+ }
+ return UNKNOWN_LIBCALL;
+}
+
+/// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
+/// UNKNOWN_LIBCALL if there is none.
+RTLIB::Libcall RTLIB::getFPTOSINT(MVT OpVT, MVT RetVT) {
+ if (OpVT == MVT::f32) {
+ if (RetVT == MVT::i32)
+ return FPTOSINT_F32_I32;
+ if (RetVT == MVT::i64)
+ return FPTOSINT_F32_I64;
+ if (RetVT == MVT::i128)
+ return FPTOSINT_F32_I128;
+ } else if (OpVT == MVT::f64) {
+ if (RetVT == MVT::i32)
+ return FPTOSINT_F64_I32;
+ if (RetVT == MVT::i64)
+ return FPTOSINT_F64_I64;
+ if (RetVT == MVT::i128)
+ return FPTOSINT_F64_I128;
+ } else if (OpVT == MVT::f80) {
+ if (RetVT == MVT::i32)
+ return FPTOSINT_F80_I32;
+ if (RetVT == MVT::i64)
+ return FPTOSINT_F80_I64;
+ if (RetVT == MVT::i128)
+ return FPTOSINT_F80_I128;
+ } else if (OpVT == MVT::ppcf128) {
+ if (RetVT == MVT::i32)
+ return FPTOSINT_PPCF128_I32;
+ if (RetVT == MVT::i64)
+ return FPTOSINT_PPCF128_I64;
+ if (RetVT == MVT::i128)
+ return FPTOSINT_PPCF128_I128;
+ }
+ return UNKNOWN_LIBCALL;
+}
+
+/// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
+/// UNKNOWN_LIBCALL if there is none.
+RTLIB::Libcall RTLIB::getFPTOUINT(MVT OpVT, MVT RetVT) {
+ if (OpVT == MVT::f32) {
+ if (RetVT == MVT::i32)
+ return FPTOUINT_F32_I32;
+ if (RetVT == MVT::i64)
+ return FPTOUINT_F32_I64;
+ if (RetVT == MVT::i128)
+ return FPTOUINT_F32_I128;
+ } else if (OpVT == MVT::f64) {
+ if (RetVT == MVT::i32)
+ return FPTOUINT_F64_I32;
+ if (RetVT == MVT::i64)
+ return FPTOUINT_F64_I64;
+ if (RetVT == MVT::i128)
+ return FPTOUINT_F64_I128;
+ } else if (OpVT == MVT::f80) {
+ if (RetVT == MVT::i32)
+ return FPTOUINT_F80_I32;
+ if (RetVT == MVT::i64)
+ return FPTOUINT_F80_I64;
+ if (RetVT == MVT::i128)
+ return FPTOUINT_F80_I128;
+ } else if (OpVT == MVT::ppcf128) {
+ if (RetVT == MVT::i32)
+ return FPTOUINT_PPCF128_I32;
+ if (RetVT == MVT::i64)
+ return FPTOUINT_PPCF128_I64;
+ if (RetVT == MVT::i128)
+ return FPTOUINT_PPCF128_I128;
+ }
+ return UNKNOWN_LIBCALL;
+}
+
+/// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
+/// UNKNOWN_LIBCALL if there is none.
+RTLIB::Libcall RTLIB::getSINTTOFP(MVT OpVT, MVT RetVT) {
+ if (OpVT == MVT::i32) {
+ if (RetVT == MVT::f32)
+ return SINTTOFP_I32_F32;
+ else if (RetVT == MVT::f64)
+ return SINTTOFP_I32_F64;
+ else if (RetVT == MVT::f80)
+ return SINTTOFP_I32_F80;
+ else if (RetVT == MVT::ppcf128)
+ return SINTTOFP_I32_PPCF128;
+ } else if (OpVT == MVT::i64) {
+ if (RetVT == MVT::f32)
+ return SINTTOFP_I64_F32;
+ else if (RetVT == MVT::f64)
+ return SINTTOFP_I64_F64;
+ else if (RetVT == MVT::f80)
+ return SINTTOFP_I64_F80;
+ else if (RetVT == MVT::ppcf128)
+ return SINTTOFP_I64_PPCF128;
+ } else if (OpVT == MVT::i128) {
+ if (RetVT == MVT::f32)
+ return SINTTOFP_I128_F32;
+ else if (RetVT == MVT::f64)
+ return SINTTOFP_I128_F64;
+ else if (RetVT == MVT::f80)
+ return SINTTOFP_I128_F80;
+ else if (RetVT == MVT::ppcf128)
+ return SINTTOFP_I128_PPCF128;
+ }
+ return UNKNOWN_LIBCALL;
+}
+
+/// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
+/// UNKNOWN_LIBCALL if there is none.
+RTLIB::Libcall RTLIB::getUINTTOFP(MVT OpVT, MVT RetVT) {
+ if (OpVT == MVT::i32) {
+ if (RetVT == MVT::f32)
+ return UINTTOFP_I32_F32;
+ else if (RetVT == MVT::f64)
+ return UINTTOFP_I32_F64;
+ else if (RetVT == MVT::f80)
+ return UINTTOFP_I32_F80;
+ else if (RetVT == MVT::ppcf128)
+ return UINTTOFP_I32_PPCF128;
+ } else if (OpVT == MVT::i64) {
+ if (RetVT == MVT::f32)
+ return UINTTOFP_I64_F32;
+ else if (RetVT == MVT::f64)
+ return UINTTOFP_I64_F64;
+ else if (RetVT == MVT::f80)
+ return UINTTOFP_I64_F80;
+ else if (RetVT == MVT::ppcf128)
+ return UINTTOFP_I64_PPCF128;
+ } else if (OpVT == MVT::i128) {
+ if (RetVT == MVT::f32)
+ return UINTTOFP_I128_F32;
+ else if (RetVT == MVT::f64)
+ return UINTTOFP_I128_F64;
+ else if (RetVT == MVT::f80)
+ return UINTTOFP_I128_F80;
+ else if (RetVT == MVT::ppcf128)
+ return UINTTOFP_I128_PPCF128;
+ }
+ return UNKNOWN_LIBCALL;
+}
+
/// InitCmpLibcallCCs - Set default comparison libcall CC.
///
static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
"Fixed size array in TargetLowering is not large enough!");
// All operations default to being supported.
memset(OpActions, 0, sizeof(OpActions));
- memset(LoadXActions, 0, sizeof(LoadXActions));
+ memset(LoadExtActions, 0, sizeof(LoadExtActions));
memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
memset(ConvertActions, 0, sizeof(ConvertActions));
+ memset(CondCodeActions, 0, sizeof(CondCodeActions));
// Set default actions for various operations.
for (unsigned VT = 0; VT != (unsigned)MVT::LAST_VALUETYPE; ++VT) {
// Default all indexed load / store to expand.
for (unsigned IM = (unsigned)ISD::PRE_INC;
IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
- setIndexedLoadAction(IM, (MVT::ValueType)VT, Expand);
- setIndexedStoreAction(IM, (MVT::ValueType)VT, Expand);
+ setIndexedLoadAction(IM, (MVT::SimpleValueType)VT, Expand);
+ setIndexedStoreAction(IM, (MVT::SimpleValueType)VT, Expand);
}
// These operations default to expand.
- setOperationAction(ISD::FGETSIGN, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::FGETSIGN, (MVT::SimpleValueType)VT, Expand);
}
// Most targets ignore the @llvm.prefetch intrinsic.
setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
setOperationAction(ISD::ConstantFP, MVT::f80, Expand);
+ // These library functions default to expand.
+ setOperationAction(ISD::FLOG , MVT::f64, Expand);
+ setOperationAction(ISD::FLOG2, MVT::f64, Expand);
+ setOperationAction(ISD::FLOG10,MVT::f64, Expand);
+ setOperationAction(ISD::FEXP , MVT::f64, Expand);
+ setOperationAction(ISD::FEXP2, MVT::f64, Expand);
+ setOperationAction(ISD::FLOG , MVT::f32, Expand);
+ setOperationAction(ISD::FLOG2, MVT::f32, Expand);
+ setOperationAction(ISD::FLOG10,MVT::f32, Expand);
+ setOperationAction(ISD::FEXP , MVT::f32, Expand);
+ setOperationAction(ISD::FEXP2, MVT::f32, Expand);
+
// Default ISD::TRAP to expand (which turns it into abort).
setOperationAction(ISD::TRAP, MVT::Other, Expand);
InitCmpLibcallCCs(CmpLibcallCCs);
// Tell Legalize whether the assembler supports DEBUG_LOC.
- if (!TM.getTargetAsmInfo()->hasDotLocAndDotFile())
+ const TargetAsmInfo *TASM = TM.getTargetAsmInfo();
+ if (!TASM || !TASM->hasDotLocAndDotFile())
setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
}
// Everything defaults to needing one register.
for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
NumRegistersForVT[i] = 1;
- RegisterTypeForVT[i] = TransformToType[i] = i;
+ RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
}
// ...except isVoid, which doesn't need any registers.
NumRegistersForVT[MVT::isVoid] = 0;
// Find the largest integer register class.
- unsigned LargestIntReg = MVT::i128;
+ unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
for (; RegClassForVT[LargestIntReg] == 0; --LargestIntReg)
assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
// Every integer value type larger than this largest register takes twice as
// many registers to represent as the previous ValueType.
- for (MVT::ValueType ExpandedReg = LargestIntReg + 1;
- MVT::isInteger(ExpandedReg); ++ExpandedReg) {
+ for (unsigned ExpandedReg = LargestIntReg + 1; ; ++ExpandedReg) {
+ MVT EVT = (MVT::SimpleValueType)ExpandedReg;
+ if (!EVT.isInteger())
+ break;
NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
- RegisterTypeForVT[ExpandedReg] = LargestIntReg;
- TransformToType[ExpandedReg] = ExpandedReg - 1;
- ValueTypeActions.setTypeAction(ExpandedReg, Expand);
+ RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
+ TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1);
+ ValueTypeActions.setTypeAction(EVT, Expand);
}
// Inspect all of the ValueType's smaller than the largest integer
// register to see which ones need promotion.
- MVT::ValueType LegalIntReg = LargestIntReg;
- for (MVT::ValueType IntReg = LargestIntReg - 1;
- IntReg >= MVT::i1; --IntReg) {
- if (isTypeLegal(IntReg)) {
+ unsigned LegalIntReg = LargestIntReg;
+ for (unsigned IntReg = LargestIntReg - 1;
+ IntReg >= (unsigned)MVT::i1; --IntReg) {
+ MVT IVT = (MVT::SimpleValueType)IntReg;
+ if (isTypeLegal(IVT)) {
LegalIntReg = IntReg;
} else {
- RegisterTypeForVT[IntReg] = TransformToType[IntReg] = LegalIntReg;
- ValueTypeActions.setTypeAction(IntReg, Promote);
+ RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
+ (MVT::SimpleValueType)LegalIntReg;
+ ValueTypeActions.setTypeAction(IVT, Promote);
}
}
}
// Loop over all of the vector value types to see which need transformations.
- for (MVT::ValueType i = MVT::FIRST_VECTOR_VALUETYPE;
- i <= MVT::LAST_VECTOR_VALUETYPE; ++i) {
- if (!isTypeLegal(i)) {
- MVT::ValueType IntermediateVT, RegisterVT;
+ for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
+ i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
+ MVT VT = (MVT::SimpleValueType)i;
+ if (!isTypeLegal(VT)) {
+ MVT IntermediateVT, RegisterVT;
unsigned NumIntermediates;
NumRegistersForVT[i] =
- getVectorTypeBreakdown(i,
+ getVectorTypeBreakdown(VT,
IntermediateVT, NumIntermediates,
RegisterVT);
RegisterTypeForVT[i] = RegisterVT;
TransformToType[i] = MVT::Other; // this isn't actually used
- ValueTypeActions.setTypeAction(i, Expand);
+ ValueTypeActions.setTypeAction(VT, Promote);
}
}
}
}
-MVT::ValueType
-TargetLowering::getSetCCResultType(const SDOperand &) const {
+MVT TargetLowering::getSetCCResultType(const SDValue &) const {
return getValueType(TD->getIntPtrType());
}
/// register. It also returns the VT and quantity of the intermediate values
/// before they are promoted/expanded.
///
-unsigned TargetLowering::getVectorTypeBreakdown(MVT::ValueType VT,
- MVT::ValueType &IntermediateVT,
+unsigned TargetLowering::getVectorTypeBreakdown(MVT VT,
+ MVT &IntermediateVT,
unsigned &NumIntermediates,
- MVT::ValueType &RegisterVT) const {
+ MVT &RegisterVT) const {
// Figure out the right, legal destination reg to copy into.
- unsigned NumElts = MVT::getVectorNumElements(VT);
- MVT::ValueType EltTy = MVT::getVectorElementType(VT);
+ unsigned NumElts = VT.getVectorNumElements();
+ MVT EltTy = VT.getVectorElementType();
unsigned NumVectorRegs = 1;
// Divide the input until we get to a supported size. This will always
// end with a scalar if the target doesn't support vectors.
- while (NumElts > 1 &&
- !isTypeLegal(MVT::getVectorType(EltTy, NumElts))) {
+ while (NumElts > 1 && !isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) {
NumElts >>= 1;
NumVectorRegs <<= 1;
}
NumIntermediates = NumVectorRegs;
- MVT::ValueType NewVT = MVT::getVectorType(EltTy, NumElts);
+ MVT NewVT = MVT::getVectorVT(EltTy, NumElts);
if (!isTypeLegal(NewVT))
NewVT = EltTy;
IntermediateVT = NewVT;
- MVT::ValueType DestVT = getTypeToTransformTo(NewVT);
+ MVT DestVT = getTypeToTransformTo(NewVT);
RegisterVT = DestVT;
- if (DestVT < NewVT) {
+ if (DestVT.bitsLT(NewVT)) {
// Value is expanded, e.g. i64 -> i16.
- return NumVectorRegs*(MVT::getSizeInBits(NewVT)/MVT::getSizeInBits(DestVT));
+ return NumVectorRegs*(NewVT.getSizeInBits()/DestVT.getSizeInBits());
} else {
// Otherwise, promotion or legal types use the same number of registers as
// the vector decimated to the appropriate level.
return 1;
}
+/// getWidenVectorType: given a vector type, returns the type to widen to
+/// (e.g., v7i8 to v8i8). If the vector type is legal, it returns itself.
+/// If there is no vector type that we want to widen to, returns MVT::Other
+/// When and where to widen is target dependent based on the cost of
+/// scalarizing vs using the wider vector type.
+MVT TargetLowering::getWidenVectorType(MVT VT) {
+ assert(VT.isVector());
+ if (isTypeLegal(VT))
+ return VT;
+
+ // Default is not to widen until moved to LegalizeTypes
+ return MVT::Other;
+}
+
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area. This is the actual
/// alignment, not its logarithm.
return TD->getCallFrameTypeAlignment(Ty);
}
-SDOperand TargetLowering::getPICJumpTableRelocBase(SDOperand Table,
- SelectionDAG &DAG) const {
+SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table,
+ SelectionDAG &DAG) const {
if (usesGlobalOffsetTable())
return DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, getPointerTy());
return Table;
}
+bool
+TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
+ // Assume that everything is safe in static mode.
+ if (getTargetMachine().getRelocationModel() == Reloc::Static)
+ return true;
+
+ // In dynamic-no-pic mode, assume that known defined values are safe.
+ if (getTargetMachine().getRelocationModel() == Reloc::DynamicNoPIC &&
+ GA &&
+ !GA->getGlobal()->isDeclaration() &&
+ !GA->getGlobal()->mayBeOverridden())
+ return true;
+
+ // Otherwise assume nothing is safe.
+ return false;
+}
+
//===----------------------------------------------------------------------===//
// Optimization Methods
//===----------------------------------------------------------------------===//
/// specified instruction is a constant integer. If so, check to see if there
/// are any bits set in the constant that are not demanded. If so, shrink the
/// constant and return true.
-bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDOperand Op,
+bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDValue Op,
const APInt &Demanded) {
// FIXME: ISD::SELECT, ISD::SELECT_CC
switch(Op.getOpcode()) {
case ISD::XOR:
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
if (C->getAPIntValue().intersects(~Demanded)) {
- MVT::ValueType VT = Op.getValueType();
- SDOperand New = DAG.getNode(Op.getOpcode(), VT, Op.getOperand(0),
+ MVT VT = Op.getValueType();
+ SDValue New = DAG.getNode(Op.getOpcode(), VT, Op.getOperand(0),
DAG.getConstant(Demanded &
C->getAPIntValue(),
VT));
/// analyze the expression and return a mask of KnownOne and KnownZero bits for
/// the expression (used to simplify the caller). The KnownZero/One bits may
/// only be accurate for those bits in the DemandedMask.
-bool TargetLowering::SimplifyDemandedBits(SDOperand Op,
+bool TargetLowering::SimplifyDemandedBits(SDValue Op,
const APInt &DemandedMask,
APInt &KnownZero,
APInt &KnownOne,
KnownZero = KnownOne = APInt(BitWidth, 0);
// Other users may use these bits.
- if (!Op.Val->hasOneUse()) {
+ if (!Op.getNode()->hasOneUse()) {
if (Depth != 0) {
// If not at the root, Just compute the KnownZero/KnownOne bits to
// simplify things downstream.
// e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known
if ((KnownOne & KnownOne2) == KnownOne) {
- MVT::ValueType VT = Op.getValueType();
- SDOperand ANDC = TLO.DAG.getConstant(~KnownOne & NewMask, VT);
+ MVT VT = Op.getValueType();
+ SDValue ANDC = TLO.DAG.getConstant(~KnownOne & NewMask, VT);
return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, VT, Op.getOperand(0),
ANDC));
}
// if we can expand it to have all bits set, do it
if (Expanded.isAllOnesValue()) {
if (Expanded != C->getAPIntValue()) {
- MVT::ValueType VT = Op.getValueType();
- SDOperand New = TLO.DAG.getNode(Op.getOpcode(), VT, Op.getOperand(0),
+ MVT VT = Op.getValueType();
+ SDValue New = TLO.DAG.getNode(Op.getOpcode(), VT, Op.getOperand(0),
TLO.DAG.getConstant(Expanded, VT));
return TLO.CombineTo(Op, New);
}
break;
case ISD::SHL:
if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
- unsigned ShAmt = SA->getValue();
- SDOperand InOp = Op.getOperand(0);
+ unsigned ShAmt = SA->getZExtValue();
+ SDValue InOp = Op.getOperand(0);
// If the shift count is an invalid immediate, don't do anything.
if (ShAmt >= BitWidth)
if (InOp.getOpcode() == ISD::SRL &&
isa<ConstantSDNode>(InOp.getOperand(1))) {
if (ShAmt && (NewMask & APInt::getLowBitsSet(BitWidth, ShAmt)) == 0) {
- unsigned C1 = cast<ConstantSDNode>(InOp.getOperand(1))->getValue();
+ unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue();
unsigned Opc = ISD::SHL;
int Diff = ShAmt-C1;
if (Diff < 0) {
Opc = ISD::SRL;
}
- SDOperand NewSA =
+ SDValue NewSA =
TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType());
- MVT::ValueType VT = Op.getValueType();
+ MVT VT = Op.getValueType();
return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, VT,
InOp.getOperand(0), NewSA));
}
if (SimplifyDemandedBits(Op.getOperand(0), NewMask.lshr(ShAmt),
KnownZero, KnownOne, TLO, Depth+1))
return true;
- KnownZero <<= SA->getValue();
- KnownOne <<= SA->getValue();
+ KnownZero <<= SA->getZExtValue();
+ KnownOne <<= SA->getZExtValue();
// low bits known zero.
- KnownZero |= APInt::getLowBitsSet(BitWidth, SA->getValue());
+ KnownZero |= APInt::getLowBitsSet(BitWidth, SA->getZExtValue());
}
break;
case ISD::SRL:
if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
- MVT::ValueType VT = Op.getValueType();
- unsigned ShAmt = SA->getValue();
- unsigned VTSize = MVT::getSizeInBits(VT);
- SDOperand InOp = Op.getOperand(0);
+ MVT VT = Op.getValueType();
+ unsigned ShAmt = SA->getZExtValue();
+ unsigned VTSize = VT.getSizeInBits();
+ SDValue InOp = Op.getOperand(0);
// If the shift count is an invalid immediate, don't do anything.
if (ShAmt >= BitWidth)
if (InOp.getOpcode() == ISD::SHL &&
isa<ConstantSDNode>(InOp.getOperand(1))) {
if (ShAmt && (NewMask & APInt::getHighBitsSet(VTSize, ShAmt)) == 0) {
- unsigned C1 = cast<ConstantSDNode>(InOp.getOperand(1))->getValue();
+ unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue();
unsigned Opc = ISD::SRL;
int Diff = ShAmt-C1;
if (Diff < 0) {
Opc = ISD::SHL;
}
- SDOperand NewSA =
+ SDValue NewSA =
TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType());
return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, VT,
InOp.getOperand(0), NewSA));
break;
case ISD::SRA:
if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
- MVT::ValueType VT = Op.getValueType();
- unsigned ShAmt = SA->getValue();
+ MVT VT = Op.getValueType();
+ unsigned ShAmt = SA->getZExtValue();
// If the shift count is an invalid immediate, don't do anything.
if (ShAmt >= BitWidth)
// demand the input sign bit.
APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
if (HighBits.intersects(NewMask))
- InDemandedMask |= APInt::getSignBit(MVT::getSizeInBits(VT));
+ InDemandedMask |= APInt::getSignBit(VT.getSizeInBits());
if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask,
KnownZero, KnownOne, TLO, Depth+1))
}
break;
case ISD::SIGN_EXTEND_INREG: {
- MVT::ValueType EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
+ MVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
// Sign extension. Compute the demanded bits in the result that are not
// present in the input.
APInt NewBits = APInt::getHighBitsSet(BitWidth,
- BitWidth - MVT::getSizeInBits(EVT)) &
+ BitWidth - EVT.getSizeInBits()) &
NewMask;
// If none of the extended bits are demanded, eliminate the sextinreg.
if (NewBits == 0)
return TLO.CombineTo(Op, Op.getOperand(0));
- APInt InSignBit = APInt::getSignBit(MVT::getSizeInBits(EVT));
+ APInt InSignBit = APInt::getSignBit(EVT.getSizeInBits());
InSignBit.zext(BitWidth);
APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth,
- MVT::getSizeInBits(EVT)) &
+ EVT.getSizeInBits()) &
NewMask;
// Since the sign extended bits are demanded, we know that the sign
break;
}
case ISD::SIGN_EXTEND: {
- MVT::ValueType InVT = Op.getOperand(0).getValueType();
- unsigned InBits = MVT::getSizeInBits(InVT);
+ MVT InVT = Op.getOperand(0).getValueType();
+ unsigned InBits = InVT.getSizeInBits();
APInt InMask = APInt::getLowBitsSet(BitWidth, InBits);
APInt InSignBit = APInt::getBitsSet(BitWidth, InBits - 1, InBits);
APInt NewBits = ~InMask & NewMask;
// If the input is only used by this truncate, see if we can shrink it based
// on the known demanded bits.
- if (Op.getOperand(0).Val->hasOneUse()) {
- SDOperand In = Op.getOperand(0);
+ if (Op.getOperand(0).getNode()->hasOneUse()) {
+ SDValue In = Op.getOperand(0);
unsigned InBitWidth = In.getValueSizeInBits();
switch (In.getOpcode()) {
default: break;
if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(In.getOperand(1))){
APInt HighBits = APInt::getHighBitsSet(InBitWidth,
InBitWidth - BitWidth);
- HighBits = HighBits.lshr(ShAmt->getValue());
+ HighBits = HighBits.lshr(ShAmt->getZExtValue());
HighBits.trunc(BitWidth);
- if (ShAmt->getValue() < BitWidth && !(HighBits & NewMask)) {
+ if (ShAmt->getZExtValue() < BitWidth && !(HighBits & NewMask)) {
// None of the shifted in bits are needed. Add a truncate of the
// shift input, then shift it.
- SDOperand NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE,
+ SDValue NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE,
Op.getValueType(),
In.getOperand(0));
return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL,Op.getValueType(),
break;
}
case ISD::AssertZext: {
- MVT::ValueType VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
+ MVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
APInt InMask = APInt::getLowBitsSet(BitWidth,
- MVT::getSizeInBits(VT));
+ VT.getSizeInBits());
if (SimplifyDemandedBits(Op.getOperand(0), InMask & NewMask,
KnownZero, KnownOne, TLO, Depth+1))
return true;
#if 0
// If this is an FP->Int bitcast and if the sign bit is the only thing that
// is demanded, turn this into a FGETSIGN.
- if (NewMask == MVT::getIntVTSignBit(Op.getValueType()) &&
+ if (NewMask == MVT::getIntegerVTSignBit(Op.getValueType()) &&
MVT::isFloatingPoint(Op.getOperand(0).getValueType()) &&
!MVT::isVector(Op.getOperand(0).getValueType())) {
// Only do this xform if FGETSIGN is valid or if before legalize.
isOperationLegal(ISD::FGETSIGN, Op.getValueType())) {
// Make a FGETSIGN + SHL to move the sign bit into the appropriate
// place. We expect the SHL to be eliminated by other optimizations.
- SDOperand Sign = TLO.DAG.getNode(ISD::FGETSIGN, Op.getValueType(),
+ SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, Op.getValueType(),
Op.getOperand(0));
- unsigned ShVal = MVT::getSizeInBits(Op.getValueType())-1;
- SDOperand ShAmt = TLO.DAG.getConstant(ShVal, getShiftAmountTy());
+ unsigned ShVal = Op.getValueType().getSizeInBits()-1;
+ SDValue ShAmt = TLO.DAG.getConstant(ShVal, getShiftAmountTy());
return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, Op.getValueType(),
Sign, ShAmt));
}
/// computeMaskedBitsForTargetNode - Determine which of the bits specified
/// in Mask are known to be either zero or one and return them in the
/// KnownZero/KnownOne bitsets.
-void TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
+void TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
const APInt &Mask,
APInt &KnownZero,
APInt &KnownOne,
/// ComputeNumSignBitsForTargetNode - This method can be implemented by
/// targets that want to expose additional information about sign bits to the
/// DAG Combiner.
-unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDOperand Op,
+unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
unsigned Depth) const {
assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
/// SimplifySetCC - Try to simplify a setcc built with the specified operands
-/// and cc. If it is unable to simplify it, return a null SDOperand.
-SDOperand
-TargetLowering::SimplifySetCC(MVT::ValueType VT, SDOperand N0, SDOperand N1,
+/// and cc. If it is unable to simplify it, return a null SDValue.
+SDValue
+TargetLowering::SimplifySetCC(MVT VT, SDValue N0, SDValue N1,
ISD::CondCode Cond, bool foldBooleans,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
case ISD::SETTRUE2: return DAG.getConstant(1, VT);
}
- if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val)) {
+ if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
const APInt &C1 = N1C->getAPIntValue();
- if (isa<ConstantSDNode>(N0.Val)) {
+ if (isa<ConstantSDNode>(N0.getNode())) {
return DAG.FoldSetCC(VT, N0, N1, Cond);
} else {
// If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an
if (N0.getOpcode() == ISD::SRL && (C1 == 0 || C1 == 1) &&
N0.getOperand(0).getOpcode() == ISD::CTLZ &&
N0.getOperand(1).getOpcode() == ISD::Constant) {
- unsigned ShAmt = cast<ConstantSDNode>(N0.getOperand(1))->getValue();
+ unsigned ShAmt = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
- ShAmt == Log2_32(MVT::getSizeInBits(N0.getValueType()))) {
+ ShAmt == Log2_32(N0.getValueType().getSizeInBits())) {
if ((C1 == 0) == (Cond == ISD::SETEQ)) {
// (srl (ctlz x), 5) == 0 -> X != 0
// (srl (ctlz x), 5) != 1 -> X != 0
// (srl (ctlz x), 5) == 1 -> X == 0
Cond = ISD::SETEQ;
}
- SDOperand Zero = DAG.getConstant(0, N0.getValueType());
+ SDValue Zero = DAG.getConstant(0, N0.getValueType());
return DAG.getSetCC(VT, N0.getOperand(0).getOperand(0),
Zero, Cond);
}
}
-
+
+ // If the LHS is '(and load, const)', the RHS is 0,
+ // the test is for equality or unsigned, and all 1 bits of the const are
+ // in the same partial word, see if we can shorten the load.
+ if (DCI.isBeforeLegalize() &&
+ N0.getOpcode() == ISD::AND && C1 == 0 &&
+ isa<LoadSDNode>(N0.getOperand(0)) &&
+ N0.getOperand(0).getNode()->hasOneUse() &&
+ isa<ConstantSDNode>(N0.getOperand(1))) {
+ LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0));
+ uint64_t Mask = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
+ uint64_t bestMask = 0;
+ unsigned bestWidth = 0, bestOffset = 0;
+ if (!Lod->isVolatile() && Lod->isUnindexed()) {
+ unsigned origWidth = N0.getValueType().getSizeInBits();
+ // We can narrow (e.g.) 16-bit extending loads on 32-bit target to
+ // 8 bits, but have to be careful...
+ if (Lod->getExtensionType() != ISD::NON_EXTLOAD)
+ origWidth = Lod->getMemoryVT().getSizeInBits();
+ for (unsigned width = origWidth / 2; width>=8; width /= 2) {
+ uint64_t newMask = (1ULL << width) - 1;
+ for (unsigned offset=0; offset<origWidth/width; offset++) {
+ if ((newMask & Mask)==Mask) {
+ if (!TD->isLittleEndian())
+ bestOffset = (origWidth/width - offset - 1) * (width/8);
+ else
+ bestOffset = (uint64_t)offset * (width/8);
+ bestMask = Mask >> (offset * 8);
+ bestWidth = width;
+ break;
+ }
+ newMask = newMask << width;
+ }
+ }
+ }
+ if (bestWidth) {
+ MVT newVT = MVT::getIntegerVT(bestWidth);
+ if (newVT.isRound()) {
+ MVT PtrType = Lod->getOperand(1).getValueType();
+ SDValue Ptr = Lod->getBasePtr();
+ if (bestOffset != 0)
+ Ptr = DAG.getNode(ISD::ADD, PtrType, Lod->getBasePtr(),
+ DAG.getConstant(bestOffset, PtrType));
+ unsigned NewAlign = MinAlign(Lod->getAlignment(), bestOffset);
+ SDValue NewLoad = DAG.getLoad(newVT, Lod->getChain(), Ptr,
+ Lod->getSrcValue(),
+ Lod->getSrcValueOffset() + bestOffset,
+ false, NewAlign);
+ return DAG.getSetCC(VT, DAG.getNode(ISD::AND, newVT, NewLoad,
+ DAG.getConstant(bestMask, newVT)),
+ DAG.getConstant(0LL, newVT), Cond);
+ }
+ }
+ }
+
// If the LHS is a ZERO_EXTEND, perform the comparison on the input.
if (N0.getOpcode() == ISD::ZERO_EXTEND) {
- unsigned InSize = MVT::getSizeInBits(N0.getOperand(0).getValueType());
+ unsigned InSize = N0.getOperand(0).getValueType().getSizeInBits();
// If the comparison constant has bits in the upper part, the
// zero-extended value could never match.
}
} else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
(Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
- MVT::ValueType ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT();
- unsigned ExtSrcTyBits = MVT::getSizeInBits(ExtSrcTy);
- MVT::ValueType ExtDstTy = N0.getValueType();
- unsigned ExtDstTyBits = MVT::getSizeInBits(ExtDstTy);
+ MVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT();
+ unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits();
+ MVT ExtDstTy = N0.getValueType();
+ unsigned ExtDstTyBits = ExtDstTy.getSizeInBits();
// If the extended part has any inconsistent bits, it cannot ever
// compare equal. In other words, they have to be all ones or all
if ((C1 & ExtBits) != 0 && (C1 & ExtBits) != ExtBits)
return DAG.getConstant(Cond == ISD::SETNE, VT);
- SDOperand ZextOp;
- MVT::ValueType Op0Ty = N0.getOperand(0).getValueType();
+ SDValue ZextOp;
+ MVT Op0Ty = N0.getOperand(0).getValueType();
if (Op0Ty == ExtSrcTy) {
ZextOp = N0.getOperand(0);
} else {
DAG.getConstant(Imm, Op0Ty));
}
if (!DCI.isCalledByLegalizer())
- DCI.AddToWorklist(ZextOp.Val);
+ DCI.AddToWorklist(ZextOp.getNode());
// Otherwise, make this a use of a zext.
return DAG.getSetCC(VT, ZextOp,
DAG.getConstant(C1 & APInt::getLowBitsSet(
// SETCC (SETCC), [0|1], [EQ|NE] -> SETCC
if (N0.getOpcode() == ISD::SETCC) {
- bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (N1C->getValue() != 1);
+ bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (N1C->getZExtValue() != 1);
if (TrueWhenTrue)
return N0;
// Invert the condition.
ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
CC = ISD::getSetCCInverse(CC,
- MVT::isInteger(N0.getOperand(0).getValueType()));
+ N0.getOperand(0).getValueType().isInteger());
return DAG.getSetCC(VT, N0.getOperand(0), N0.getOperand(1), CC);
}
APInt::getHighBitsSet(BitWidth,
BitWidth-1))) {
// Okay, get the un-inverted input value.
- SDOperand Val;
+ SDValue Val;
if (N0.getOpcode() == ISD::XOR)
Val = N0.getOperand(0);
else {
}
APInt MinVal, MaxVal;
- unsigned OperandBitSize = MVT::getSizeInBits(N1C->getValueType(0));
+ unsigned OperandBitSize = N1C->getValueType(0).getSizeInBits();
if (ISD::isSignedIntSetCC(Cond)) {
MinVal = APInt::getSignedMinValue(OperandBitSize);
MaxVal = APInt::getSignedMaxValue(OperandBitSize);
dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3
// Perform the xform if the AND RHS is a single bit.
- if (isPowerOf2_64(AndRHS->getValue())) {
+ if (isPowerOf2_64(AndRHS->getZExtValue())) {
return DAG.getNode(ISD::SRL, VT, N0,
- DAG.getConstant(Log2_64(AndRHS->getValue()),
+ DAG.getConstant(Log2_64(AndRHS->getZExtValue()),
getShiftAmountTy()));
}
- } else if (Cond == ISD::SETEQ && C1 == AndRHS->getValue()) {
+ } else if (Cond == ISD::SETEQ && C1 == AndRHS->getZExtValue()) {
// (X & 8) == 8 --> (X & 8) >> 3
// Perform the xform if C1 is a single bit.
if (C1.isPowerOf2()) {
}
}
}
- } else if (isa<ConstantSDNode>(N0.Val)) {
+ } else if (isa<ConstantSDNode>(N0.getNode())) {
// Ensure that the constant occurs on the RHS.
return DAG.getSetCC(VT, N1, N0, ISD::getSetCCSwappedOperands(Cond));
}
- if (isa<ConstantFPSDNode>(N0.Val)) {
+ if (isa<ConstantFPSDNode>(N0.getNode())) {
// Constant fold or commute setcc.
- SDOperand O = DAG.FoldSetCC(VT, N0, N1, Cond);
- if (O.Val) return O;
- } else if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1.Val)) {
+ SDValue O = DAG.FoldSetCC(VT, N0, N1, Cond);
+ if (O.getNode()) return O;
+ } else if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1.getNode())) {
// If the RHS of an FP comparison is a constant, simplify it away in
// some cases.
if (CFP->getValueAPF().isNaN()) {
if (N0 == N1) {
// We can always fold X == X for integer setcc's.
- if (MVT::isInteger(N0.getValueType()))
+ if (N0.getValueType().isInteger())
return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT);
unsigned UOF = ISD::getUnorderedFlavor(Cond);
if (UOF == 2) // FP operators that are undefined on NaNs.
}
if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
- MVT::isInteger(N0.getValueType())) {
+ N0.getValueType().isInteger()) {
if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB ||
N0.getOpcode() == ISD::XOR) {
// Simplify (X+Y) == (X+Z) --> Y == Z
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(N1)) {
if (ConstantSDNode *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
// Turn (X+C1) == C2 --> X == C2-C1
- if (N0.getOpcode() == ISD::ADD && N0.Val->hasOneUse()) {
+ if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) {
return DAG.getSetCC(VT, N0.getOperand(0),
- DAG.getConstant(RHSC->getValue()-LHSR->getValue(),
+ DAG.getConstant(RHSC->getAPIntValue()-
+ LHSR->getAPIntValue(),
N0.getValueType()), Cond);
}
// Turn (C1-X) == C2 --> X == C1-C2
if (ConstantSDNode *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
- if (N0.getOpcode() == ISD::SUB && N0.Val->hasOneUse()) {
+ if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) {
return
DAG.getSetCC(VT, N0.getOperand(1),
DAG.getConstant(SUBC->getAPIntValue() -
if (DAG.isCommutativeBinOp(N0.getOpcode()))
return DAG.getSetCC(VT, N0.getOperand(0),
DAG.getConstant(0, N0.getValueType()), Cond);
- else if (N0.Val->hasOneUse()) {
+ else if (N0.getNode()->hasOneUse()) {
assert(N0.getOpcode() == ISD::SUB && "Unexpected operation!");
// (Z-X) == X --> Z == X<<1
- SDOperand SH = DAG.getNode(ISD::SHL, N1.getValueType(),
+ SDValue SH = DAG.getNode(ISD::SHL, N1.getValueType(),
N1,
DAG.getConstant(1, getShiftAmountTy()));
if (!DCI.isCalledByLegalizer())
- DCI.AddToWorklist(SH.Val);
+ DCI.AddToWorklist(SH.getNode());
return DAG.getSetCC(VT, N0.getOperand(0), SH, Cond);
}
}
if (DAG.isCommutativeBinOp(N1.getOpcode())) {
return DAG.getSetCC(VT, N1.getOperand(0),
DAG.getConstant(0, N1.getValueType()), Cond);
- } else if (N1.Val->hasOneUse()) {
+ } else if (N1.getNode()->hasOneUse()) {
assert(N1.getOpcode() == ISD::SUB && "Unexpected operation!");
// X == (Z-X) --> X<<1 == Z
- SDOperand SH = DAG.getNode(ISD::SHL, N1.getValueType(), N0,
+ SDValue SH = DAG.getNode(ISD::SHL, N1.getValueType(), N0,
DAG.getConstant(1, getShiftAmountTy()));
if (!DCI.isCalledByLegalizer())
- DCI.AddToWorklist(SH.Val);
+ DCI.AddToWorklist(SH.getNode());
return DAG.getSetCC(VT, SH, N1.getOperand(0), Cond);
}
}
}
// Fold away ALL boolean setcc's.
- SDOperand Temp;
+ SDValue Temp;
if (N0.getValueType() == MVT::i1 && foldBooleans) {
switch (Cond) {
default: assert(0 && "Unknown integer setcc!");
Temp = DAG.getNode(ISD::XOR, MVT::i1, N0, N1);
N0 = DAG.getNode(ISD::XOR, MVT::i1, Temp, DAG.getConstant(1, MVT::i1));
if (!DCI.isCalledByLegalizer())
- DCI.AddToWorklist(Temp.Val);
+ DCI.AddToWorklist(Temp.getNode());
break;
case ISD::SETNE: // X != Y --> (X^Y)
N0 = DAG.getNode(ISD::XOR, MVT::i1, N0, N1);
Temp = DAG.getNode(ISD::XOR, MVT::i1, N0, DAG.getConstant(1, MVT::i1));
N0 = DAG.getNode(ISD::AND, MVT::i1, N1, Temp);
if (!DCI.isCalledByLegalizer())
- DCI.AddToWorklist(Temp.Val);
+ DCI.AddToWorklist(Temp.getNode());
break;
case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> Y^1 & X
case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> Y^1 & X
Temp = DAG.getNode(ISD::XOR, MVT::i1, N1, DAG.getConstant(1, MVT::i1));
N0 = DAG.getNode(ISD::AND, MVT::i1, N0, Temp);
if (!DCI.isCalledByLegalizer())
- DCI.AddToWorklist(Temp.Val);
+ DCI.AddToWorklist(Temp.getNode());
break;
case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> X^1 | Y
case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> X^1 | Y
Temp = DAG.getNode(ISD::XOR, MVT::i1, N0, DAG.getConstant(1, MVT::i1));
N0 = DAG.getNode(ISD::OR, MVT::i1, N1, Temp);
if (!DCI.isCalledByLegalizer())
- DCI.AddToWorklist(Temp.Val);
+ DCI.AddToWorklist(Temp.getNode());
break;
case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> Y^1 | X
case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> Y^1 | X
}
if (VT != MVT::i1) {
if (!DCI.isCalledByLegalizer())
- DCI.AddToWorklist(N0.Val);
+ DCI.AddToWorklist(N0.getNode());
// FIXME: If running after legalize, we probably can't do this.
N0 = DAG.getNode(ISD::ZERO_EXTEND, VT, N0);
}
}
// Could not fold it.
- return SDOperand();
+ return SDValue();
}
/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
bool TargetLowering::isGAPlusOffset(SDNode *N, GlobalValue* &GA,
int64_t &Offset) const {
if (isa<GlobalAddressSDNode>(N)) {
- GA = cast<GlobalAddressSDNode>(N)->getGlobal();
+ GlobalAddressSDNode *GASD = cast<GlobalAddressSDNode>(N);
+ GA = GASD->getGlobal();
+ Offset += GASD->getOffset();
return true;
}
if (N->getOpcode() == ISD::ADD) {
- SDOperand N1 = N->getOperand(0);
- SDOperand N2 = N->getOperand(1);
- if (isGAPlusOffset(N1.Val, GA, Offset)) {
+ SDValue N1 = N->getOperand(0);
+ SDValue N2 = N->getOperand(1);
+ if (isGAPlusOffset(N1.getNode(), GA, Offset)) {
ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2);
if (V) {
- Offset += V->getSignExtended();
+ Offset += V->getSExtValue();
return true;
}
- } else if (isGAPlusOffset(N2.Val, GA, Offset)) {
+ } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) {
ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1);
if (V) {
- Offset += V->getSignExtended();
+ Offset += V->getSExtValue();
return true;
}
}
/// location that the 'Base' load is loading from.
bool TargetLowering::isConsecutiveLoad(SDNode *LD, SDNode *Base,
unsigned Bytes, int Dist,
- MachineFrameInfo *MFI) const {
- if (LD->getOperand(0).Val != Base->getOperand(0).Val)
+ const MachineFrameInfo *MFI) const {
+ if (LD->getOperand(0).getNode() != Base->getOperand(0).getNode())
return false;
- MVT::ValueType VT = LD->getValueType(0);
- if (MVT::getSizeInBits(VT) / 8 != Bytes)
+ MVT VT = LD->getValueType(0);
+ if (VT.getSizeInBits() / 8 != Bytes)
return false;
- SDOperand Loc = LD->getOperand(1);
- SDOperand BaseLoc = Base->getOperand(1);
+ SDValue Loc = LD->getOperand(1);
+ SDValue BaseLoc = Base->getOperand(1);
if (Loc.getOpcode() == ISD::FrameIndex) {
if (BaseLoc.getOpcode() != ISD::FrameIndex)
return false;
GlobalValue *GV2 = NULL;
int64_t Offset1 = 0;
int64_t Offset2 = 0;
- bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1);
- bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2);
+ bool isGA1 = isGAPlusOffset(Loc.getNode(), GV1, Offset1);
+ bool isGA2 = isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
if (isGA1 && isGA2 && GV1 == GV2)
return Offset1 == (Offset2 + Dist*Bytes);
return false;
}
-SDOperand TargetLowering::
+SDValue TargetLowering::
PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const {
// Default implementation: no optimization.
- return SDOperand();
+ return SDValue();
}
//===----------------------------------------------------------------------===//
/// LowerXConstraint - try to replace an X constraint, which matches anything,
/// with another that has more specific requirements based on the type of the
/// corresponding operand.
-const char *TargetLowering::LowerXConstraint(MVT::ValueType ConstraintVT) const{
- if (MVT::isInteger(ConstraintVT))
+const char *TargetLowering::LowerXConstraint(MVT ConstraintVT) const{
+ if (ConstraintVT.isInteger())
return "r";
- if (MVT::isFloatingPoint(ConstraintVT))
+ if (ConstraintVT.isFloatingPoint())
return "f"; // works for many targets
return 0;
}
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
/// vector. If it is invalid, don't add anything to Ops.
-void TargetLowering::LowerAsmOperandForConstraint(SDOperand Op,
+void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
char ConstraintLetter,
- std::vector<SDOperand> &Ops,
+ bool hasMemory,
+ std::vector<SDValue> &Ops,
SelectionDAG &DAG) const {
switch (ConstraintLetter) {
default: break;
if (GA) { // Either &GV or &GV+C
if (ConstraintLetter != 'n') {
int64_t Offs = GA->getOffset();
- if (C) Offs += C->getValue();
+ if (C) Offs += C->getZExtValue();
Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(),
Op.getValueType(), Offs));
return;
if (C) { // just C, no GV.
// Simple constants are not allowed for 's'.
if (ConstraintLetter != 's') {
- Ops.push_back(DAG.getTargetConstant(C->getValue(), Op.getValueType()));
+ Ops.push_back(DAG.getTargetConstant(C->getAPIntValue(),
+ Op.getValueType()));
return;
}
}
std::vector<unsigned> TargetLowering::
getRegClassForInlineAsmConstraint(const std::string &Constraint,
- MVT::ValueType VT) const {
+ MVT VT) const {
return std::vector<unsigned>();
}
std::pair<unsigned, const TargetRegisterClass*> TargetLowering::
getRegForInlineAsmConstraint(const std::string &Constraint,
- MVT::ValueType VT) const {
+ MVT VT) const {
if (Constraint[0] != '{')
return std::pair<unsigned, const TargetRegisterClass*>(0, 0);
assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?");
//===----------------------------------------------------------------------===//
// Constraint Selection.
+/// isMatchingInputConstraint - Return true of this is an input operand that is
+/// a matching constraint like "4".
+bool TargetLowering::AsmOperandInfo::isMatchingInputConstraint() const {
+ assert(!ConstraintCode.empty() && "No known constraint!");
+ return isdigit(ConstraintCode[0]);
+}
+
+/// getMatchedOperand - If this is an input matching constraint, this method
+/// returns the output operand it matches.
+unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const {
+ assert(!ConstraintCode.empty() && "No known constraint!");
+ return atoi(ConstraintCode.c_str());
+}
+
+
/// getConstraintGenerality - Return an integer indicating how general CT
/// is.
static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
/// 'm' over 'r', for example.
///
static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
- const TargetLowering &TLI,
- SDOperand Op, SelectionDAG *DAG) {
+ bool hasMemory, const TargetLowering &TLI,
+ SDValue Op, SelectionDAG *DAG) {
assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options");
unsigned BestIdx = 0;
TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown;
// For example, on X86 we might have an 'rI' constraint. If the operand
// is an integer in the range [0..31] we want to use I (saving a load
// of a register), otherwise we must use 'r'.
- if (CType == TargetLowering::C_Other && Op.Val) {
+ if (CType == TargetLowering::C_Other && Op.getNode()) {
assert(OpInfo.Codes[i].size() == 1 &&
"Unhandled multi-letter 'other' constraint");
- std::vector<SDOperand> ResultOps;
- TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i][0],
+ std::vector<SDValue> ResultOps;
+ TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i][0], hasMemory,
ResultOps, *DAG);
if (!ResultOps.empty()) {
BestType = CType;
/// type to use for the specific AsmOperandInfo, setting
/// OpInfo.ConstraintCode and OpInfo.ConstraintType.
void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
- SDOperand Op,
+ SDValue Op,
+ bool hasMemory,
SelectionDAG *DAG) const {
assert(!OpInfo.Codes.empty() && "Must have at least one constraint");
OpInfo.ConstraintCode = OpInfo.Codes[0];
OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
} else {
- ChooseConstraint(OpInfo, *this, Op, DAG);
+ ChooseConstraint(OpInfo, hasMemory, *this, Op, DAG);
}
// 'X' matches anything.
/// return a DAG expression to select that will generate the same value by
/// multiplying by a magic number. See:
/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
-SDOperand TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
- std::vector<SDNode*>* Created) const {
- MVT::ValueType VT = N->getValueType(0);
+SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
+ std::vector<SDNode*>* Created) const {
+ MVT VT = N->getValueType(0);
// Check to see if we can do this.
if (!isTypeLegal(VT) || (VT != MVT::i32 && VT != MVT::i64))
- return SDOperand(); // BuildSDIV only operates on i32 or i64
+ return SDValue(); // BuildSDIV only operates on i32 or i64
- int64_t d = cast<ConstantSDNode>(N->getOperand(1))->getSignExtended();
+ int64_t d = cast<ConstantSDNode>(N->getOperand(1))->getSExtValue();
ms magics = (VT == MVT::i32) ? magic32(d) : magic64(d);
// Multiply the numerator (operand 0) by the magic value
- SDOperand Q;
+ SDValue Q;
if (isOperationLegal(ISD::MULHS, VT))
Q = DAG.getNode(ISD::MULHS, VT, N->getOperand(0),
DAG.getConstant(magics.m, VT));
else if (isOperationLegal(ISD::SMUL_LOHI, VT))
- Q = SDOperand(DAG.getNode(ISD::SMUL_LOHI, DAG.getVTList(VT, VT),
+ Q = SDValue(DAG.getNode(ISD::SMUL_LOHI, DAG.getVTList(VT, VT),
N->getOperand(0),
- DAG.getConstant(magics.m, VT)).Val, 1);
+ DAG.getConstant(magics.m, VT)).getNode(), 1);
else
- return SDOperand(); // No mulhs or equvialent
+ return SDValue(); // No mulhs or equvialent
// If d > 0 and m < 0, add the numerator
if (d > 0 && magics.m < 0) {
Q = DAG.getNode(ISD::ADD, VT, Q, N->getOperand(0));
if (Created)
- Created->push_back(Q.Val);
+ Created->push_back(Q.getNode());
}
// If d < 0 and m > 0, subtract the numerator.
if (d < 0 && magics.m > 0) {
Q = DAG.getNode(ISD::SUB, VT, Q, N->getOperand(0));
if (Created)
- Created->push_back(Q.Val);
+ Created->push_back(Q.getNode());
}
// Shift right algebraic if shift value is nonzero
if (magics.s > 0) {
Q = DAG.getNode(ISD::SRA, VT, Q,
DAG.getConstant(magics.s, getShiftAmountTy()));
if (Created)
- Created->push_back(Q.Val);
+ Created->push_back(Q.getNode());
}
// Extract the sign bit and add it to the quotient
- SDOperand T =
- DAG.getNode(ISD::SRL, VT, Q, DAG.getConstant(MVT::getSizeInBits(VT)-1,
+ SDValue T =
+ DAG.getNode(ISD::SRL, VT, Q, DAG.getConstant(VT.getSizeInBits()-1,
getShiftAmountTy()));
if (Created)
- Created->push_back(T.Val);
+ Created->push_back(T.getNode());
return DAG.getNode(ISD::ADD, VT, Q, T);
}
/// return a DAG expression to select that will generate the same value by
/// multiplying by a magic number. See:
/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
-SDOperand TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG,
- std::vector<SDNode*>* Created) const {
- MVT::ValueType VT = N->getValueType(0);
+SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG,
+ std::vector<SDNode*>* Created) const {
+ MVT VT = N->getValueType(0);
// Check to see if we can do this.
if (!isTypeLegal(VT) || (VT != MVT::i32 && VT != MVT::i64))
- return SDOperand(); // BuildUDIV only operates on i32 or i64
+ return SDValue(); // BuildUDIV only operates on i32 or i64
- uint64_t d = cast<ConstantSDNode>(N->getOperand(1))->getValue();
+ uint64_t d = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
mu magics = (VT == MVT::i32) ? magicu32(d) : magicu64(d);
// Multiply the numerator (operand 0) by the magic value
- SDOperand Q;
+ SDValue Q;
if (isOperationLegal(ISD::MULHU, VT))
Q = DAG.getNode(ISD::MULHU, VT, N->getOperand(0),
DAG.getConstant(magics.m, VT));
else if (isOperationLegal(ISD::UMUL_LOHI, VT))
- Q = SDOperand(DAG.getNode(ISD::UMUL_LOHI, DAG.getVTList(VT, VT),
+ Q = SDValue(DAG.getNode(ISD::UMUL_LOHI, DAG.getVTList(VT, VT),
N->getOperand(0),
- DAG.getConstant(magics.m, VT)).Val, 1);
+ DAG.getConstant(magics.m, VT)).getNode(), 1);
else
- return SDOperand(); // No mulhu or equvialent
+ return SDValue(); // No mulhu or equvialent
if (Created)
- Created->push_back(Q.Val);
+ Created->push_back(Q.getNode());
if (magics.a == 0) {
return DAG.getNode(ISD::SRL, VT, Q,
DAG.getConstant(magics.s, getShiftAmountTy()));
} else {
- SDOperand NPQ = DAG.getNode(ISD::SUB, VT, N->getOperand(0), Q);
+ SDValue NPQ = DAG.getNode(ISD::SUB, VT, N->getOperand(0), Q);
if (Created)
- Created->push_back(NPQ.Val);
+ Created->push_back(NPQ.getNode());
NPQ = DAG.getNode(ISD::SRL, VT, NPQ,
DAG.getConstant(1, getShiftAmountTy()));
if (Created)
- Created->push_back(NPQ.Val);
+ Created->push_back(NPQ.getNode());
NPQ = DAG.getNode(ISD::ADD, VT, NPQ, Q);
if (Created)
- Created->push_back(NPQ.Val);
+ Created->push_back(NPQ.getNode());
return DAG.getNode(ISD::SRL, VT, NPQ,
DAG.getConstant(magics.s-1, getShiftAmountTy()));
}