From 7f47ce966219b8dbc37cf8c289660dd83923289f Mon Sep 17 00:00:00 2001 From: Richard Osborne Date: Thu, 16 Jul 2009 10:21:18 +0000 Subject: [PATCH] Custom lower unaligned 32 bit stores and loads into libcalls. This is a big code size win since before they were expanding to upto 16 instructions. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@75901 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/XCore/XCoreISelLowering.cpp | 89 +++++++++++++++++++++++++- lib/Target/XCore/XCoreISelLowering.h | 2 + test/CodeGen/XCore/unaligned_load.ll | 9 +++ test/CodeGen/XCore/unaligned_store.ll | 9 +++ 4 files changed, 108 insertions(+), 1 deletion(-) create mode 100644 test/CodeGen/XCore/unaligned_load.ll create mode 100644 test/CodeGen/XCore/unaligned_store.ll diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp index d65495076e4..88502549a59 100644 --- a/lib/Target/XCore/XCoreISelLowering.cpp +++ b/lib/Target/XCore/XCoreISelLowering.cpp @@ -131,7 +131,11 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM) setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand); setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Expand); - + + // Custom expand misaligned loads / stores. + setOperationAction(ISD::LOAD, MVT::i32, Custom); + setOperationAction(ISD::STORE, MVT::i32, Custom); + // Varargs setOperationAction(ISD::VAEND, MVT::Other, Expand); setOperationAction(ISD::VACOPY, MVT::Other, Expand); @@ -159,6 +163,8 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) { case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); case ISD::ConstantPool: return LowerConstantPool(Op, DAG); case ISD::JumpTable: return LowerJumpTable(Op, DAG); + case ISD::LOAD: return LowerLOAD(Op, DAG); + case ISD::STORE: return LowerSTORE(Op, DAG); case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); case ISD::VAARG: return LowerVAARG(Op, DAG); case ISD::VASTART: return LowerVASTART(Op, DAG); @@ -319,6 +325,87 @@ LowerJumpTable(SDValue Op, SelectionDAG &DAG) return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, JTI); } +SDValue XCoreTargetLowering:: +LowerLOAD(SDValue Op, SelectionDAG &DAG) +{ + LoadSDNode *LD = cast(Op); + assert(LD->getExtensionType() == ISD::NON_EXTLOAD && "Unexpected extension type"); + assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load MVT"); + if (allowsUnalignedMemoryAccesses()) { + return SDValue(); + } + unsigned ABIAlignment = getTargetData()-> + getABITypeAlignment(LD->getMemoryVT().getTypeForMVT(*DAG.getContext())); + // Leave aligned load alone. + if (LD->getAlignment() >= ABIAlignment) { + return SDValue(); + } + SDValue Chain = LD->getChain(); + SDValue BasePtr = LD->getBasePtr(); + DebugLoc dl = Op.getDebugLoc(); + + // Lower to a call to __misaligned_load(BasePtr). + const Type *IntPtrTy = getTargetData()->getIntPtrType(); + TargetLowering::ArgListTy Args; + TargetLowering::ArgListEntry Entry; + + Entry.Ty = IntPtrTy; + Entry.Node = BasePtr; + Args.push_back(Entry); + + std::pair CallResult = + LowerCallTo(Chain, IntPtrTy, false, false, + false, false, 0, CallingConv::C, false, + DAG.getExternalSymbol("__misaligned_load", getPointerTy()), + Args, DAG, dl); + + SDValue Ops[] = + { CallResult.first, CallResult.second }; + + return DAG.getMergeValues(Ops, 2, dl); +} + +SDValue XCoreTargetLowering:: +LowerSTORE(SDValue Op, SelectionDAG &DAG) +{ + StoreSDNode *ST = cast(Op); + assert(!ST->isTruncatingStore() && "Unexpected store type"); + assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store MVT"); + if (allowsUnalignedMemoryAccesses()) { + return SDValue(); + } + unsigned ABIAlignment = getTargetData()-> + getABITypeAlignment(ST->getMemoryVT().getTypeForMVT(*DAG.getContext())); + // Leave aligned store alone. + if (ST->getAlignment() >= ABIAlignment) { + return SDValue(); + } + SDValue Chain = ST->getChain(); + SDValue BasePtr = ST->getBasePtr(); + SDValue Value = ST->getValue(); + DebugLoc dl = Op.getDebugLoc(); + + // Lower to a call to __misaligned_store(BasePtr, Value). + const Type *IntPtrTy = getTargetData()->getIntPtrType(); + TargetLowering::ArgListTy Args; + TargetLowering::ArgListEntry Entry; + + Entry.Ty = IntPtrTy; + Entry.Node = BasePtr; + Args.push_back(Entry); + + Entry.Node = Value; + Args.push_back(Entry); + + std::pair CallResult = + LowerCallTo(Chain, Type::VoidTy, false, false, + false, false, 0, CallingConv::C, false, + DAG.getExternalSymbol("__misaligned_store", getPointerTy()), + Args, DAG, dl); + + return CallResult.second; +} + SDValue XCoreTargetLowering:: ExpandADDSUB(SDNode *N, SelectionDAG &DAG) { diff --git a/lib/Target/XCore/XCoreISelLowering.h b/lib/Target/XCore/XCoreISelLowering.h index 753ea819c2b..e7a48a97265 100644 --- a/lib/Target/XCore/XCoreISelLowering.h +++ b/lib/Target/XCore/XCoreISelLowering.h @@ -101,6 +101,8 @@ namespace llvm { SelectionDAG &DAG); // Lower Operand specifics + SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG); + SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG); SDValue LowerRET(SDValue Op, SelectionDAG &DAG); SDValue LowerCALL(SDValue Op, SelectionDAG &DAG); SDValue LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG); diff --git a/test/CodeGen/XCore/unaligned_load.ll b/test/CodeGen/XCore/unaligned_load.ll new file mode 100644 index 00000000000..a6a50893b91 --- /dev/null +++ b/test/CodeGen/XCore/unaligned_load.ll @@ -0,0 +1,9 @@ +; RUN: llvm-as < %s | llc -march=xcore > %t1.s +; RUN: grep "bl __misaligned_load" %t1.s | count 1 + +; Byte aligned load. Expands to call to __misaligned_load. +define i32 @align1(i32* %p) nounwind { +entry: + %0 = load i32* %p, align 1 ; [#uses=1] + ret i32 %0 +} diff --git a/test/CodeGen/XCore/unaligned_store.ll b/test/CodeGen/XCore/unaligned_store.ll new file mode 100644 index 00000000000..b7a519299fd --- /dev/null +++ b/test/CodeGen/XCore/unaligned_store.ll @@ -0,0 +1,9 @@ +; RUN: llvm-as < %s | llc -march=xcore > %t1.s +; RUN: grep "bl __misaligned_store" %t1.s | count 1 + +; Byte aligned store. Expands to call to __misaligned_store. +define void @align1(i32* %p, i32 %val) nounwind { +entry: + store i32 %val, i32* %p, align 1 + ret void +} -- 2.34.1