From: Eli Friedman Date: Thu, 15 Sep 2011 22:18:49 +0000 (+0000) Subject: Use a more efficient lowering for Unordered/Monotonic atomic load/store on Thumb1. X-Git-Url: http://plrg.eecs.uci.edu/git/?a=commitdiff_plain;h=7cc156647ff448f03898b3d80ecdc22d46430b57;p=oota-llvm.git Use a more efficient lowering for Unordered/Monotonic atomic load/store on Thumb1. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@139865 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index f6a63f58988..02189870f8a 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -639,8 +639,10 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand); setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand); setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Expand); + // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the + // Unordered/Monotonic case. + setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); + setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); // Since the libcalls include locking, fold in the fences setShouldFoldAtomicFences(true); } @@ -4861,6 +4863,18 @@ static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { Op.getOperand(1), Op.getOperand(2)); } +static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG, + const ARMSubtarget *ST) { + // Monotonic load/store is legal for all targets + if (cast(Op)->getOrdering() <= Monotonic) + return Op; + + // Aquire/Release load/store is not legal for targets without a + // dmb or equivalent available. + return SDValue(); +} + + static void ReplaceATOMIC_OP_64(SDNode *Node, SmallVectorImpl& Results, SelectionDAG &DAG, unsigned NewOp) { @@ -4945,6 +4959,8 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { case ISD::ADDE: case ISD::SUBC: case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); + case ISD::ATOMIC_LOAD: + case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG, Subtarget); } return SDValue(); } diff --git a/lib/Target/ARM/ARMInstrThumb.td b/lib/Target/ARM/ARMInstrThumb.td index 43fd4ed8c4f..f8198c86946 100644 --- a/lib/Target/ARM/ARMInstrThumb.td +++ b/lib/Target/ARM/ARMInstrThumb.td @@ -1363,6 +1363,31 @@ def : T1Pat<(sextloadi16 t_addrmode_rrs2:$addr), def : T1Pat<(sextloadi16 t_addrmode_is2:$addr), (tASRri (tLSLri (tLDRHi t_addrmode_is2:$addr), 16), 16)>; +def : T1Pat<(atomic_load_8 t_addrmode_is1:$src), + (tLDRBi t_addrmode_is1:$src)>; +def : T1Pat<(atomic_load_8 t_addrmode_rrs1:$src), + (tLDRBr t_addrmode_rrs1:$src)>; +def : T1Pat<(atomic_load_16 t_addrmode_is2:$src), + (tLDRHi t_addrmode_is2:$src)>; +def : T1Pat<(atomic_load_16 t_addrmode_rrs2:$src), + (tLDRHr t_addrmode_rrs2:$src)>; +def : T1Pat<(atomic_load_32 t_addrmode_is4:$src), + (tLDRi t_addrmode_is4:$src)>; +def : T1Pat<(atomic_load_32 t_addrmode_rrs4:$src), + (tLDRr t_addrmode_rrs4:$src)>; +def : T1Pat<(atomic_store_8 t_addrmode_is1:$ptr, tGPR:$val), + (tSTRBi tGPR:$val, t_addrmode_is1:$ptr)>; +def : T1Pat<(atomic_store_8 t_addrmode_rrs1:$ptr, tGPR:$val), + (tSTRBr tGPR:$val, t_addrmode_rrs1:$ptr)>; +def : T1Pat<(atomic_store_16 t_addrmode_is2:$ptr, tGPR:$val), + (tSTRHi tGPR:$val, t_addrmode_is2:$ptr)>; +def : T1Pat<(atomic_store_16 t_addrmode_rrs2:$ptr, tGPR:$val), + (tSTRHr tGPR:$val, t_addrmode_rrs2:$ptr)>; +def : T1Pat<(atomic_store_32 t_addrmode_is4:$ptr, tGPR:$val), + (tSTRi tGPR:$val, t_addrmode_is4:$ptr)>; +def : T1Pat<(atomic_store_32 t_addrmode_rrs4:$ptr, tGPR:$val), + (tSTRr tGPR:$val, t_addrmode_rrs4:$ptr)>; + // Large immediate handling. // Two piece imms.