setTargetDAGCombine(ISD::MUL);
setTargetDAGCombine(ISD::SELECT_CC);
+ setTargetDAGCombine(ISD::STORE);
setSchedulingPreference(Sched::RegPressure);
setJumpIsExpensive(true);
return DAG.getConstant(Src0 >> Offset, MVT::i32);
}
+static bool usesAllNormalStores(SDNode *LoadVal) {
+ for (SDNode::use_iterator I = LoadVal->use_begin(); !I.atEnd(); ++I) {
+ if (!ISD::isNormalStore(*I))
+ return false;
+ }
+
+ return true;
+}
+
+// If we have a copy of an illegal type, replace it with a load / store of an
+// equivalently sized legal type. This avoids intermediate bit pack / unpack
+// instructions emitted when handling extloads and truncstores. Ideally we could
+// recognize the pack / unpack pattern to eliminate it.
+SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ if (!DCI.isBeforeLegalize())
+ return SDValue();
+
+ StoreSDNode *SN = cast<StoreSDNode>(N);
+ SDValue Value = SN->getValue();
+ EVT VT = Value.getValueType();
+
+ if (isTypeLegal(VT) || SN->isVolatile() || !ISD::isNormalLoad(Value.getNode()))
+ return SDValue();
+
+ LoadSDNode *LoadVal = cast<LoadSDNode>(Value);
+ if (LoadVal->isVolatile() || !usesAllNormalStores(LoadVal))
+ return SDValue();
+
+ EVT MemVT = LoadVal->getMemoryVT();
+
+ SDLoc SL(N);
+ SelectionDAG &DAG = DCI.DAG;
+ EVT LoadVT = getEquivalentMemType(*DAG.getContext(), MemVT);
+
+ SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD,
+ LoadVT, SL,
+ LoadVal->getChain(),
+ LoadVal->getBasePtr(),
+ LoadVal->getOffset(),
+ LoadVT,
+ LoadVal->getMemOperand());
+
+ SDValue CastLoad = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad.getValue(0));
+ DCI.CombineTo(LoadVal, CastLoad, NewLoad.getValue(1), false);
+
+ return DAG.getStore(SN->getChain(), SL, NewLoad,
+ SN->getBasePtr(), SN->getMemOperand());
+}
+
SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
EVT VT = N->getValueType(0);
}
SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
- DAGCombinerInfo &DCI) const {
+ DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
SDLoc DL(N);
break;
}
+
+ case ISD::STORE:
+ return performStoreCombine(N, DCI);
}
return SDValue();
}
--- /dev/null
+; RUN: llc -march=r600 -mcpu=tahiti < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+; FUNC-LABEL: @test_copy_v4i8
+; SI: BUFFER_LOAD_DWORD [[REG:v[0-9]+]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: S_ENDPGM
+define void @test_copy_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
+ %val = load <4 x i8> addrspace(1)* %in, align 4
+ store <4 x i8> %val, <4 x i8> addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @test_copy_v4i8_x2
+; SI: BUFFER_LOAD_DWORD [[REG:v[0-9]+]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: S_ENDPGM
+define void @test_copy_v4i8_x2(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %in) nounwind {
+ %val = load <4 x i8> addrspace(1)* %in, align 4
+ store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
+ store <4 x i8> %val, <4 x i8> addrspace(1)* %out1, align 4
+ ret void
+}
+
+; FUNC-LABEL: @test_copy_v4i8_x3
+; SI: BUFFER_LOAD_DWORD [[REG:v[0-9]+]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: S_ENDPGM
+define void @test_copy_v4i8_x3(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %out2, <4 x i8> addrspace(1)* %in) nounwind {
+ %val = load <4 x i8> addrspace(1)* %in, align 4
+ store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
+ store <4 x i8> %val, <4 x i8> addrspace(1)* %out1, align 4
+ store <4 x i8> %val, <4 x i8> addrspace(1)* %out2, align 4
+ ret void
+}
+
+; FUNC-LABEL: @test_copy_v4i8_x4
+; SI: BUFFER_LOAD_DWORD [[REG:v[0-9]+]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: BUFFER_STORE_DWORD [[REG]]
+; SI: S_ENDPGM
+define void @test_copy_v4i8_x4(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %out2, <4 x i8> addrspace(1)* %out3, <4 x i8> addrspace(1)* %in) nounwind {
+ %val = load <4 x i8> addrspace(1)* %in, align 4
+ store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
+ store <4 x i8> %val, <4 x i8> addrspace(1)* %out1, align 4
+ store <4 x i8> %val, <4 x i8> addrspace(1)* %out2, align 4
+ store <4 x i8> %val, <4 x i8> addrspace(1)* %out3, align 4
+ ret void
+}
+
+; FUNC-LABEL: @test_copy_v4i8_extra_use
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI-DAG: V_ADD
+; SI-DAG: V_ADD
+; SI-DAG: V_ADD
+; SI-DAG: V_ADD
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI_DAG: BUFFER_STORE_BYTE
+
+; After scalarizing v4i8 loads is fixed.
+; XSI: BUFFER_LOAD_DWORD
+; XSI: V_BFE
+; XSI: V_ADD
+; XSI: V_ADD
+; XSI: V_ADD
+; XSI: BUFFER_STORE_DWORD
+; XSI: BUFFER_STORE_DWORD
+
+; SI: S_ENDPGM
+define void @test_copy_v4i8_extra_use(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %in) nounwind {
+ %val = load <4 x i8> addrspace(1)* %in, align 4
+ %add = add <4 x i8> %val, <i8 9, i8 9, i8 9, i8 9>
+ store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
+ store <4 x i8> %add, <4 x i8> addrspace(1)* %out1, align 4
+ ret void
+}
+
+; FUNC-LABEL: @test_copy_v4i8_x2_extra_use
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI-DAG: V_ADD
+; SI-DAG: V_ADD
+; SI-DAG: V_ADD
+; SI-DAG: V_ADD
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI_DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI-DAG: BUFFER_STORE_BYTE
+; SI_DAG: BUFFER_STORE_BYTE
+
+; XSI: BUFFER_LOAD_DWORD
+; XSI: BFE
+; XSI: BUFFER_STORE_DWORD
+; XSI: V_ADD
+; XSI: BUFFER_STORE_DWORD
+; XSI-NEXT: BUFFER_STORE_DWORD
+
+; SI: S_ENDPGM
+define void @test_copy_v4i8_x2_extra_use(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %out2, <4 x i8> addrspace(1)* %in) nounwind {
+ %val = load <4 x i8> addrspace(1)* %in, align 4
+ %add = add <4 x i8> %val, <i8 9, i8 9, i8 9, i8 9>
+ store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
+ store <4 x i8> %add, <4 x i8> addrspace(1)* %out1, align 4
+ store <4 x i8> %val, <4 x i8> addrspace(1)* %out2, align 4
+ ret void
+}
+
+; FUNC-LABEL: @test_copy_v3i8
+; SI-NOT: BFE
+; SI-NOT: BFI
+; SI: S_ENDPGM
+define void @test_copy_v3i8(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(1)* %in) nounwind {
+ %val = load <3 x i8> addrspace(1)* %in, align 4
+ store <3 x i8> %val, <3 x i8> addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @test_copy_v4i8_volatile_load
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: S_ENDPGM
+define void @test_copy_v4i8_volatile_load(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
+ %val = load volatile <4 x i8> addrspace(1)* %in, align 4
+ store <4 x i8> %val, <4 x i8> addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: @test_copy_v4i8_volatile_store
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_LOAD_UBYTE
+; SI: BUFFER_STORE_BYTE
+; SI: BUFFER_STORE_BYTE
+; SI: BUFFER_STORE_BYTE
+; SI: BUFFER_STORE_BYTE
+; SI: S_ENDPGM
+define void @test_copy_v4i8_volatile_store(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
+ %val = load <4 x i8> addrspace(1)* %in, align 4
+ store volatile <4 x i8> %val, <4 x i8> addrspace(1)* %out, align 4
+ ret void
+}
; SI-ALLOCA: V_MOVRELS_B32_e32
; SI-ALLOCA: V_MOVRELS_B32_e32
-; SI-PROMOTE: DS_WRITE_B64
-; SI-PROMOTE: DS_WRITE_B64
-; SI-PROMOTE: DS_READ_B64
-; SI-PROMOTE: DS_READ_B64
+; SI-PROMOTE: DS_WRITE_B32
+; SI-PROMOTE: DS_WRITE_B32
+; SI-PROMOTE: DS_WRITE_B32
+; SI-PROMOTE: DS_WRITE_B32
+; SI-PROMOTE: DS_READ_B32
+; SI-PROMOTE: DS_READ_B32
+; SI-PROMOTE: DS_READ_B32
+; SI-PROMOTE: DS_READ_B32
define void @private_access_v2f64_alloca(<2 x double> addrspace(1)* noalias %out, <2 x double> addrspace(1)* noalias %in, i32 %b) nounwind {
%val = load <2 x double> addrspace(1)* %in, align 16
%array = alloca <2 x double>, i32 16, align 16
; SI-ALLOCA: V_MOVRELS_B32_e32
; SI-ALLOCA: V_MOVRELS_B32_e32
-; SI-PROMOTE: DS_WRITE_B64
-; SI-PROMOTE: DS_WRITE_B64
-; SI-PROMOTE: DS_READ_B64
-; SI-PROMOTE: DS_READ_B64
+; SI-PROMOTE: DS_WRITE_B32
+; SI-PROMOTE: DS_WRITE_B32
+; SI-PROMOTE: DS_WRITE_B32
+; SI-PROMOTE: DS_WRITE_B32
+; SI-PROMOTE: DS_READ_B32
+; SI-PROMOTE: DS_READ_B32
+; SI-PROMOTE: DS_READ_B32
+; SI-PROMOTE: DS_READ_B32
define void @private_access_v2i64_alloca(<2 x i64> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %in, i32 %b) nounwind {
%val = load <2 x i64> addrspace(1)* %in, align 16
%array = alloca <2 x i64>, i32 16, align 16