getLoadXAction(LType, VT) == Custom;
}
- /// getStoreXAction - Return how this store with truncation should be treated:
- /// either it is legal, needs to be promoted to a larger size, needs to be
- /// expanded to some other code sequence, or the target has a custom expander
- /// for it.
- LegalizeAction getStoreXAction(MVT::ValueType VT) const {
- if (MVT::isExtendedVT(VT)) return getTypeAction(VT);
- return (LegalizeAction)((StoreXActions >> (2*VT)) & 3);
+ /// getTruncStoreAction - Return how this store with truncation should be
+ /// treated: either it is legal, needs to be promoted to a larger size, needs
+ /// to be expanded to some other code sequence, or the target has a custom
+ /// expander for it.
+ LegalizeAction getTruncStoreAction(MVT::ValueType ValVT,
+ MVT::ValueType MemVT) const {
+ assert(ValVT < MVT::LAST_VALUETYPE && MemVT < 32 &&
+ "Table isn't big enough!");
+ return (LegalizeAction)((TruncStoreActions[ValVT] >> (2*MemVT)) & 3);
}
- /// isStoreXLegal - Return true if the specified store with truncation is
+ /// isTruncStoreLegal - Return true if the specified store with truncation is
/// legal on this target.
- bool isStoreXLegal(MVT::ValueType VT) const {
- return getStoreXAction(VT) == Legal || getStoreXAction(VT) == Custom;
+ bool isTruncStoreLegal(MVT::ValueType ValVT, MVT::ValueType MemVT) const {
+ return getTruncStoreAction(ValVT, MemVT) == Legal ||
+ getTruncStoreAction(ValVT, MemVT) == Custom;
}
/// getIndexedLoadAction - Return how the indexed load should be treated:
LoadXActions[ExtType] |= (uint64_t)Action << VT*2;
}
- /// setStoreXAction - Indicate that the specified store with truncation does
+ /// setTruncStoreAction - Indicate that the specified truncating store does
/// not work with the with specified type and indicate what to do about it.
- void setStoreXAction(MVT::ValueType VT, LegalizeAction Action) {
- assert(VT < 32 && "Table isn't big enough!");
- StoreXActions &= ~(uint64_t(3UL) << VT*2);
- StoreXActions |= (uint64_t)Action << VT*2;
+ void setTruncStoreAction(MVT::ValueType ValVT, MVT::ValueType MemVT,
+ LegalizeAction Action) {
+ assert(ValVT < MVT::LAST_VALUETYPE && MemVT < 32 &&
+ "Table isn't big enough!");
+ TruncStoreActions[ValVT] &= ~(uint64_t(3UL) << MemVT*2);
+ TruncStoreActions[ValVT] |= (uint64_t)Action << MemVT*2;
}
/// setIndexedLoadAction - Indicate that the specified indexed load does or
/// with the load.
uint64_t LoadXActions[ISD::LAST_LOADX_TYPE];
- /// StoreXActions - For each store with truncation of each value type, keep a
- /// LegalizeAction that indicates how instruction selection should deal with
- /// the store.
- uint64_t StoreXActions;
+ /// TruncStoreActions - For each truncating store, keep a LegalizeAction that
+ /// indicates how instruction selection should deal with the store.
+ uint64_t TruncStoreActions[MVT::LAST_VALUETYPE];
/// IndexedModeActions - For each indexed mode and each value type, keep a
/// pair of LegalizeAction that indicates how instruction selection should
if (N1C && N0.getOpcode() == ISD::LOAD) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
if (LN0->getExtensionType() != ISD::SEXTLOAD &&
- LN0->getAddressingMode() == ISD::UNINDEXED &&
- N0.hasOneUse()) {
+ LN0->isUnindexed() && N0.hasOneUse()) {
MVT::ValueType EVT, LoadedVT;
if (N1C->getValue() == 255)
EVT = MVT::i8;
SDOperand Ptr;
MVT::ValueType VT;
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
- if (LD->getAddressingMode() != ISD::UNINDEXED)
+ if (LD->isIndexed())
return false;
VT = LD->getLoadedVT();
if (!TLI.isIndexedLoadLegal(ISD::PRE_INC, VT) &&
return false;
Ptr = LD->getBasePtr();
} else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
- if (ST->getAddressingMode() != ISD::UNINDEXED)
+ if (ST->isIndexed())
return false;
VT = ST->getStoredVT();
if (!TLI.isIndexedStoreLegal(ISD::PRE_INC, VT) &&
SDOperand Ptr;
MVT::ValueType VT;
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
- if (LD->getAddressingMode() != ISD::UNINDEXED)
+ if (LD->isIndexed())
return false;
VT = LD->getLoadedVT();
if (!TLI.isIndexedLoadLegal(ISD::POST_INC, VT) &&
return false;
Ptr = LD->getBasePtr();
} else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
- if (ST->getAddressingMode() != ISD::UNINDEXED)
+ if (ST->isIndexed())
return false;
VT = ST->getStoredVT();
if (!TLI.isIndexedStoreLegal(ISD::POST_INC, VT) &&
// If this is a store of a bit convert, store the input value if the
// resultant store does not need a higher alignment than the original.
if (Value.getOpcode() == ISD::BIT_CONVERT && !ST->isTruncatingStore() &&
- ST->getAddressingMode() == ISD::UNINDEXED) {
+ ST->isUnindexed()) {
unsigned Align = ST->getAlignment();
MVT::ValueType SVT = Value.getOperand(0).getValueType();
unsigned OrigAlign = TLI.getTargetMachine().getTargetData()->
return SDOperand(N, 0);
// FIXME: is there such a thing as a truncating indexed store?
- if (ST->isTruncatingStore() && ST->getAddressingMode() == ISD::UNINDEXED &&
+ if (ST->isTruncatingStore() && ST->isUnindexed() &&
MVT::isInteger(Value.getValueType())) {
// See if we can simplify the input to this truncstore with knowledge that
// only the low bits are being used. For example:
// is dead/noop.
if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Value)) {
if (Ld->getBasePtr() == Ptr && ST->getStoredVT() == Ld->getLoadedVT() &&
- ST->getAddressingMode() == ISD::UNINDEXED &&
- !ST->isVolatile() &&
+ ST->isUnindexed() && !ST->isVolatile() &&
// There can't be any side effects between the load and store, such as
// a call or store.
Chain.reachesChainWithoutSideEffects(SDOperand(Ld, 1))) {
}
}
+ // If this is an FP_ROUND or TRUNC followed by a store, fold this into a
+ // truncating store. We can do this even if this is already a truncstore.
+ if ((Value.getOpcode() == ISD::FP_ROUND || Value.getOpcode() == ISD::TRUNCATE)
+ && TLI.isTypeLegal(Value.getOperand(0).getValueType()) &&
+ Value.Val->hasOneUse() && ST->isUnindexed() &&
+ TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
+ ST->getStoredVT())) {
+ return DAG.getTruncStore(Chain, Value.getOperand(0), Ptr, ST->getSrcValue(),
+ ST->getSrcValueOffset(), ST->getStoredVT(),
+ ST->isVolatile(), ST->getAlignment());
+ }
+
return SDOperand();
}
break;
}
} else {
- // Truncating store
- assert(isTypeLegal(ST->getValue().getValueType()) &&
- "Cannot handle illegal TRUNCSTORE yet!");
- Tmp3 = LegalizeOp(ST->getValue());
+ switch (getTypeAction(ST->getValue().getValueType())) {
+ case Legal:
+ Tmp3 = LegalizeOp(ST->getValue());
+ break;
+ case Promote:
+ // We can promote the value, the truncstore will still take care of it.
+ Tmp3 = PromoteOp(ST->getValue());
+ break;
+ case Expand:
+ // Just store the low part. This may become a non-trunc store, so make
+ // sure to use getTruncStore, not UpdateNodeOperands below.
+ ExpandOp(ST->getValue(), Tmp3, Tmp4);
+ return DAG.getTruncStore(Tmp1, Tmp3, Tmp2, ST->getSrcValue(),
+ SVOffset, MVT::i8, isVolatile, Alignment);
+ }
- // The only promote case we handle is TRUNCSTORE:i1 X into
- // -> TRUNCSTORE:i8 (and X, 1)
- if (ST->getStoredVT() == MVT::i1 &&
- TLI.getStoreXAction(MVT::i1) == TargetLowering::Promote) {
+ // Unconditionally promote TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1)
+ if (ST->getStoredVT() == MVT::i1) {
// Promote the bool to a mask then store.
Tmp3 = DAG.getNode(ISD::AND, Tmp3.getValueType(), Tmp3,
DAG.getConstant(1, Tmp3.getValueType()));
}
MVT::ValueType StVT = cast<StoreSDNode>(Result.Val)->getStoredVT();
- switch (TLI.getStoreXAction(StVT)) {
+ switch (TLI.getTruncStoreAction(ST->getValue().getValueType(), StVT)) {
default: assert(0 && "This action is not supported yet!");
case TargetLowering::Legal:
// If this is an unaligned store and the target doesn't support it,
}
break;
case TargetLowering::Custom:
- Tmp1 = TLI.LowerOperation(Result, DAG);
- if (Tmp1.Val) Result = Tmp1;
+ Result = TLI.LowerOperation(Result, DAG);
break;
}
}
// All operations default to being supported.
memset(OpActions, 0, sizeof(OpActions));
memset(LoadXActions, 0, sizeof(LoadXActions));
- memset(&StoreXActions, 0, sizeof(StoreXActions));
+ memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
memset(&IndexedModeActions, 0, sizeof(IndexedModeActions));
memset(&ConvertActions, 0, sizeof(ConvertActions));
if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb()) {
addRegisterClass(MVT::f32, ARM::SPRRegisterClass);
addRegisterClass(MVT::f64, ARM::DPRRegisterClass);
+
+ setTruncStoreAction(MVT::f64, MVT::f32, Expand);
}
computeRegisterProperties();
def : ARMPat<(extloadi8 addrmode2:$addr), (LDRB addrmode2:$addr)>;
def : ARMPat<(extloadi16 addrmode3:$addr), (LDRH addrmode3:$addr)>;
-// truncstore i1 -> truncstore i8
-def : ARMPat<(truncstorei1 GPR:$src, addrmode2:$dst),
- (STRB GPR:$src, addrmode2:$dst)>;
-def : ARMPat<(pre_truncsti1 GPR:$src, GPR:$base, am2offset:$offset),
- (STRB_PRE GPR:$src, GPR:$base, am2offset:$offset)>;
-def : ARMPat<(post_truncsti1 GPR:$src, GPR:$base, am2offset:$offset),
- (STRB_POST GPR:$src, GPR:$base, am2offset:$offset)>;
-
// smul* and smla*
def : ARMV5TEPat<(mul (sra (shl GPR:$a, 16), 16), (sra (shl GPR:$b, 16), 16)),
(SMULBB GPR:$a, GPR:$b)>;
def : ThumbPat<(extloadi8 t_addrmode_s1:$addr), (tLDRB t_addrmode_s1:$addr)>;
def : ThumbPat<(extloadi16 t_addrmode_s2:$addr), (tLDRH t_addrmode_s2:$addr)>;
-// truncstore i1 -> truncstore i8
-def : ThumbPat<(truncstorei1 GPR:$src, t_addrmode_s1:$dst),
- (tSTRB GPR:$src, t_addrmode_s1:$dst)>;
-
// Large immediate handling.
// Two piece imms.
setLoadXAction(ISD::SEXTLOAD, MVT::i8, Expand);
setLoadXAction(ISD::SEXTLOAD, MVT::i16, Expand);
- setStoreXAction(MVT::i1, Promote);
-
// setOperationAction(ISD::BRIND, MVT::Other, Expand);
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
setOperationAction(ISD::BR_CC, MVT::Other, Expand);
setLoadXAction(ISD::EXTLOAD, MVT::i1, Custom);
setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
setLoadXAction(ISD::ZEXTLOAD, MVT::i1, Promote);
- setStoreXAction(MVT::i1, Custom);
+ setTruncStoreAction(MVT::i8, MVT::i1, Custom);
+ setTruncStoreAction(MVT::i16, MVT::i1, Custom);
+ setTruncStoreAction(MVT::i32, MVT::i1, Custom);
+ setTruncStoreAction(MVT::i64, MVT::i1, Custom);
+ setTruncStoreAction(MVT::i128, MVT::i1, Custom);
setLoadXAction(ISD::EXTLOAD, MVT::i8, Custom);
setLoadXAction(ISD::SEXTLOAD, MVT::i8, Custom);
setLoadXAction(ISD::ZEXTLOAD, MVT::i8, Custom);
- setStoreXAction(MVT::i8, Custom);
-
+ setTruncStoreAction(MVT::i8 , MVT::i8, Custom);
+ setTruncStoreAction(MVT::i16 , MVT::i8, Custom);
+ setTruncStoreAction(MVT::i32 , MVT::i8, Custom);
+ setTruncStoreAction(MVT::i64 , MVT::i8, Custom);
+ setTruncStoreAction(MVT::i128, MVT::i8, Custom);
+
setLoadXAction(ISD::EXTLOAD, MVT::i16, Custom);
setLoadXAction(ISD::SEXTLOAD, MVT::i16, Custom);
setLoadXAction(ISD::ZEXTLOAD, MVT::i16, Custom);
setLoadXAction(ISD::ZEXTLOAD, MVT::i1, Promote);
setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
- // Store operations for i1 types must be promoted
- setStoreXAction(MVT::i1, Promote);
-
// Mips does not have these NodeTypes below.
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
setOperationAction(ISD::BR_CC, MVT::Other, Expand);
def : Pat<(i32 (extloadi1 addr:$src)), (LBu addr:$src)>;
def : Pat<(i32 (extloadi8 addr:$src)), (LBu addr:$src)>;
def : Pat<(i32 (extloadi16 addr:$src)), (LHu addr:$src)>;
-def : Pat<(truncstorei1 CPURegs:$src, addr:$addr),
- (SB CPURegs:$src, addr:$addr)>;
// some peepholes
def : Pat<(store (i32 0), addr:$dst), (SW ZERO, addr:$dst)>;
// PowerPC has an i16 but no i8 (or i1) SEXTLOAD
setLoadXAction(ISD::SEXTLOAD, MVT::i1, Expand);
setLoadXAction(ISD::SEXTLOAD, MVT::i8, Expand);
-
- // PowerPC does not have truncstore for i1.
- setStoreXAction(MVT::i1, Promote);
-
+
+ setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+
// PowerPC has pre-inc load and store's.
setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
// zextload bool -> zextload byte
def : Pat<(i32 (zextloadi1 ADDRrr:$src)), (LDUBrr ADDRrr:$src)>;
def : Pat<(i32 (zextloadi1 ADDRri:$src)), (LDUBri ADDRri:$src)>;
-
-// truncstore bool -> truncstore byte.
-def : Pat<(truncstorei1 IntRegs:$src, ADDRrr:$addr),
- (STBrr ADDRrr:$addr, IntRegs:$src)>;
-def : Pat<(truncstorei1 IntRegs:$src, ADDRri:$addr),
- (STBri ADDRri:$addr, IntRegs:$src)>;
}]>;
// truncstore fragments.
-def truncstorei1 : PatFrag<(ops node:$val, node:$ptr),
- (st node:$val, node:$ptr), [{
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
- return ST->isTruncatingStore() && ST->getStoredVT() == MVT::i1 &&
- ST->getAddressingMode() == ISD::UNINDEXED;
- return false;
-}]>;
def truncstorei8 : PatFrag<(ops node:$val, node:$ptr),
(st node:$val, node:$ptr), [{
if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
setLoadXAction(ISD::SEXTLOAD, MVT::i1, Expand);
+ // We don't accept any truncstore of integer registers.
+ setTruncStoreAction(MVT::i64, MVT::i32, Expand);
+ setTruncStoreAction(MVT::i64, MVT::i16, Expand);
+ setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
+ setTruncStoreAction(MVT::i32, MVT::i16, Expand);
+ setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
+ setTruncStoreAction(MVT::i16, MVT::i8, Expand);
+
// Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
// operation.
setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64);
}
+ setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+
// Custom lower v2i64 and v2f64 selects.
setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
def : Pat<(subc GR32:$src1, i32immSExt8:$src2),
(SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
-def : Pat<(truncstorei1 (i8 imm:$src), addr:$dst),
- (MOV8mi addr:$dst, imm:$src)>;
-def : Pat<(truncstorei1 GR8:$src, addr:$dst),
- (MOV8mr addr:$dst, GR8:$src)>;
-
// Comparisons.
// TEST R,R is smaller than CMP R,0
--- /dev/null
+; RUN: llvm-as < %s | llc -march=x86 | not grep flds
+
+define void @foo(x86_fp80 %a, x86_fp80 %b, float* %fp) {
+ %c = add x86_fp80 %a, %b
+ %d = fptrunc x86_fp80 %c to float
+ store float %d, float* %fp
+ ret void
+}