From: Akira Hatanaka Date: Sat, 8 Oct 2011 02:24:10 +0000 (+0000) Subject: Add patterns for unaligned load and store instructions and enable the X-Git-Url: http://plrg.eecs.uci.edu/git/?a=commitdiff_plain;h=cb518ee5dd41b5c648cff2d4f258a6f26572598c;p=oota-llvm.git Add patterns for unaligned load and store instructions and enable the instruction selector to generate them. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@141471 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/Mips/MipsAsmPrinter.cpp b/lib/Target/Mips/MipsAsmPrinter.cpp index 96d8643fc35..e6beb56dc56 100644 --- a/lib/Target/Mips/MipsAsmPrinter.cpp +++ b/lib/Target/Mips/MipsAsmPrinter.cpp @@ -44,6 +44,11 @@ using namespace llvm; +static bool isUnalignedLoadStore(unsigned Opc) { + return Opc == Mips::ULW || Opc == Mips::ULH || Opc == Mips::ULHu || + Opc == Mips::USW || Opc == Mips::USH; +} + void MipsAsmPrinter::EmitInstruction(const MachineInstr *MI) { SmallString<128> Str; raw_svector_ostream OS(Str); @@ -58,29 +63,15 @@ void MipsAsmPrinter::EmitInstruction(const MachineInstr *MI) { MCInst TmpInst0; MCInstLowering.Lower(MI, TmpInst0); - // Convert aligned loads/stores to their unaligned counterparts. - if (!MI->memoperands_empty()) { - unsigned NaturalAlignment, UnalignedOpc; - - switch (Opc) { - case Mips::LW: NaturalAlignment = 4; UnalignedOpc = Mips::ULW; break; - case Mips::SW: NaturalAlignment = 4; UnalignedOpc = Mips::USW; break; - case Mips::LH: NaturalAlignment = 2; UnalignedOpc = Mips::ULH; break; - case Mips::LHu: NaturalAlignment = 2; UnalignedOpc = Mips::ULHu; break; - case Mips::SH: NaturalAlignment = 2; UnalignedOpc = Mips::USH; break; - default: NaturalAlignment = 0; - } - - if ((*MI->memoperands_begin())->getAlignment() < NaturalAlignment) { - MCInst Directive; - Directive.setOpcode(Mips::MACRO); - OutStreamer.EmitInstruction(Directive); - TmpInst0.setOpcode(UnalignedOpc); - OutStreamer.EmitInstruction(TmpInst0); - Directive.setOpcode(Mips::NOMACRO); - OutStreamer.EmitInstruction(Directive); - return; - } + // Enclose unaligned load or store with .macro & .nomacro directives. + if (isUnalignedLoadStore(Opc)) { + MCInst Directive; + Directive.setOpcode(Mips::MACRO); + OutStreamer.EmitInstruction(Directive); + OutStreamer.EmitInstruction(TmpInst0); + Directive.setOpcode(Mips::NOMACRO); + OutStreamer.EmitInstruction(Directive); + return; } OutStreamer.EmitInstruction(TmpInst0); diff --git a/lib/Target/Mips/MipsInstrInfo.td b/lib/Target/Mips/MipsInstrInfo.td index 75e6b04bed7..e446e6a143d 100644 --- a/lib/Target/Mips/MipsInstrInfo.td +++ b/lib/Target/Mips/MipsInstrInfo.td @@ -188,6 +188,45 @@ def immZExt5 : PatLeaf<(imm), [{ // since load and store instructions from stack used it. def addr : ComplexPattern; +//===----------------------------------------------------------------------===// +// Pattern fragment for load/store +//===----------------------------------------------------------------------===// +class UnalignedLoad : PatFrag<(ops node:$ptr), (Node node:$ptr), [{ + LoadSDNode *LD = cast(N); + return LD->getMemoryVT().getSizeInBits()/8 > LD->getAlignment(); +}]>; + +class AlignedLoad : PatFrag<(ops node:$ptr), (Node node:$ptr), [{ + LoadSDNode *LD = cast(N); + return LD->getMemoryVT().getSizeInBits()/8 <= LD->getAlignment(); +}]>; + +class UnalignedStore : PatFrag<(ops node:$val, node:$ptr), + (Node node:$val, node:$ptr), [{ + StoreSDNode *SD = cast(N); + return SD->getMemoryVT().getSizeInBits()/8 > SD->getAlignment(); +}]>; + +class AlignedStore : PatFrag<(ops node:$val, node:$ptr), + (Node node:$val, node:$ptr), [{ + StoreSDNode *SD = cast(N); + return SD->getMemoryVT().getSizeInBits()/8 <= SD->getAlignment(); +}]>; + +// Load/Store PatFrags. +def sextloadi16_a : AlignedLoad; +def zextloadi16_a : AlignedLoad; +def extloadi16_a : AlignedLoad; +def load_a : AlignedLoad; +def truncstorei16_a : AlignedStore; +def store_a : AlignedStore; +def sextloadi16_u : UnalignedLoad; +def zextloadi16_u : UnalignedLoad; +def extloadi16_u : UnalignedLoad; +def load_u : UnalignedLoad; +def truncstorei16_u : UnalignedStore; +def store_u : UnalignedStore; + //===----------------------------------------------------------------------===// // Instructions specific format //===----------------------------------------------------------------------===// @@ -274,15 +313,19 @@ class LoadUpper op, string instr_asm>: // Memory Load/Store let canFoldAsLoad = 1 in -class LoadM op, string instr_asm, PatFrag OpNode>: +class LoadM op, string instr_asm, PatFrag OpNode, bit Pseudo = 0>: FI; + [(set CPURegs:$dst, (OpNode addr:$addr))], IILoad> { + let isPseudo = Pseudo; +} -class StoreM op, string instr_asm, PatFrag OpNode>: +class StoreM op, string instr_asm, PatFrag OpNode, bit Pseudo = 0>: FI; + [(OpNode CPURegs:$dst, addr:$addr)], IIStore> { + let isPseudo = Pseudo; +} // Conditional Branch let isBranch = 1, isTerminator=1, hasDelaySlot = 1 in { @@ -498,19 +541,6 @@ let usesCustomInserter = 1 in { def ATOMIC_CMP_SWAP_I32 : AtomicCmpSwap; } -// Unaligned loads and stores. -// Replaces LW or SW during MCInstLowering if memory access is unaligned. -def ULW : - MipsPseudo<(outs CPURegs:$dst), (ins mem:$addr), "ulw\t$dst, $addr", []>; -def ULH : - MipsPseudo<(outs CPURegs:$dst), (ins mem:$addr), "ulh\t$dst, $addr", []>; -def ULHu : - MipsPseudo<(outs CPURegs:$dst), (ins mem:$addr), "ulhu\t$dst, $addr", []>; -def USW : - MipsPseudo<(outs), (ins CPURegs:$dst, mem:$addr), "usw\t$dst, $addr", []>; -def USH : - MipsPseudo<(outs), (ins CPURegs:$dst, mem:$addr), "ush\t$dst, $addr", []>; - //===----------------------------------------------------------------------===// // Instruction definition //===----------------------------------------------------------------------===// @@ -556,14 +586,22 @@ let Predicates = [HasMips32r2] in { } /// Load and Store Instructions +/// aligned def LB : LoadM<0x20, "lb", sextloadi8>; def LBu : LoadM<0x24, "lbu", zextloadi8>; -def LH : LoadM<0x21, "lh", sextloadi16>; -def LHu : LoadM<0x25, "lhu", zextloadi16>; -def LW : LoadM<0x23, "lw", load>; +def LH : LoadM<0x21, "lh", sextloadi16_a>; +def LHu : LoadM<0x25, "lhu", zextloadi16_a>; +def LW : LoadM<0x23, "lw", load_a>; def SB : StoreM<0x28, "sb", truncstorei8>; -def SH : StoreM<0x29, "sh", truncstorei16>; -def SW : StoreM<0x2b, "sw", store>; +def SH : StoreM<0x29, "sh", truncstorei16_a>; +def SW : StoreM<0x2b, "sw", store_a>; + +/// unaligned +def ULH : LoadM<0x21, "ulh", sextloadi16_u, 1>; +def ULHu : LoadM<0x25, "ulhu", zextloadi16_u, 1>; +def ULW : LoadM<0x23, "ulw", load_u, 1>; +def USH : StoreM<0x29, "ush", truncstorei16_u, 1>; +def USW : StoreM<0x2b, "usw", store_u, 1>; let hasSideEffects = 1 in def SYNC : MipsInst<(outs), (ins i32imm:$stype), "sync $stype", @@ -789,7 +827,8 @@ def : Pat<(not CPURegs:$in), // extended load and stores def : Pat<(extloadi1 addr:$src), (LBu addr:$src)>; def : Pat<(extloadi8 addr:$src), (LBu addr:$src)>; -def : Pat<(extloadi16 addr:$src), (LHu addr:$src)>; +def : Pat<(extloadi16_a addr:$src), (LHu addr:$src)>; +def : Pat<(extloadi16_u addr:$src), (ULHu addr:$src)>; // peepholes def : Pat<(store (i32 0), addr:$dst), (SW ZERO, addr:$dst)>;