From: Dan Gohman Date: Wed, 3 Dec 2008 18:15:48 +0000 (+0000) Subject: Rename isSimpleLoad to canFoldAsLoad, to better reflect its meaning. X-Git-Url: http://plrg.eecs.uci.edu/git/?a=commitdiff_plain;h=15511cf1660cfd6bb8b8e8fca2db9450f50430ee;p=oota-llvm.git Rename isSimpleLoad to canFoldAsLoad, to better reflect its meaning. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@60487 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/docs/TableGenFundamentals.html b/docs/TableGenFundamentals.html index 48087274a17..526d6d47917 100644 --- a/docs/TableGenFundamentals.html +++ b/docs/TableGenFundamentals.html @@ -138,7 +138,7 @@ file prints this (at the time of this writing):

bit isIndirectBranch = 0; bit isBarrier = 0; bit isCall = 0; - bit isSimpleLoad = 0; + bit canFoldAsLoad = 0; bit mayLoad = 0; bit mayStore = 0; bit isImplicitDef = 0; diff --git a/include/llvm/Target/Target.td b/include/llvm/Target/Target.td index c3daf09accc..99b314c29b8 100644 --- a/include/llvm/Target/Target.td +++ b/include/llvm/Target/Target.td @@ -189,7 +189,7 @@ class Instruction { bit isIndirectBranch = 0; // Is this instruction an indirect branch? bit isBarrier = 0; // Can control flow fall through this instruction? bit isCall = 0; // Is this instruction a call instruction? - bit isSimpleLoad = 0; // Can this be folded as a memory operand? + bit canFoldAsLoad = 0; // Can this be folded as a simple memory operand? bit mayLoad = 0; // Is it possible for this inst to read memory? bit mayStore = 0; // Is it possible for this inst to write memory? bit isTwoAddress = 0; // Is this a two address instruction? diff --git a/include/llvm/Target/TargetInstrDesc.h b/include/llvm/Target/TargetInstrDesc.h index 70893e313b8..02616af5fcc 100644 --- a/include/llvm/Target/TargetInstrDesc.h +++ b/include/llvm/Target/TargetInstrDesc.h @@ -90,7 +90,7 @@ namespace TID { Predicable, NotDuplicable, DelaySlot, - SimpleLoad, + FoldableAsLoad, MayLoad, MayStore, UnmodeledSideEffects, @@ -301,7 +301,7 @@ public: return Flags & (1 << TID::DelaySlot); } - /// isSimpleLoad - Return true for instructions that can be folded as + /// canFoldAsLoad - Return true for instructions that can be folded as /// memory operands in other instructions. The most common use for this /// is instructions that are simple loads from memory that don't modify /// the loaded value in any way, but it can also be used for instructions @@ -309,8 +309,8 @@ public: /// on x86, to allow them to be folded when it is beneficial. /// This should only be set on instructions that return a value in their /// only virtual register definition. - bool isSimpleLoad() const { - return Flags & (1 << TID::SimpleLoad); + bool canFoldAsLoad() const { + return Flags & (1 << TID::FoldableAsLoad); } //===--------------------------------------------------------------------===// diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp index 1de31b9da2c..eb237efa120 100644 --- a/lib/CodeGen/LiveIntervalAnalysis.cpp +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -1841,7 +1841,7 @@ addIntervalsForSpills(const LiveInterval &li, int LdSlot = 0; bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); bool isLoad = isLoadSS || - (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad())); + (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad())); bool IsFirstRange = true; for (LiveInterval::Ranges::const_iterator I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { @@ -1927,7 +1927,7 @@ addIntervalsForSpills(const LiveInterval &li, int LdSlot = 0; bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); bool isLoad = isLoadSS || - (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad()); + (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad()); rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI, Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, CanDelete, vrm, rc, ReMatIds, loopInfo, @@ -2056,7 +2056,7 @@ addIntervalsForSpills(const LiveInterval &li, int LdSlot = 0; bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); // If the rematerializable def is a load, also try to fold it. - if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad()) + if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad()) Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, Ops, isLoadSS, LdSlot, VReg); unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI); diff --git a/lib/Target/ARM/ARMInstrInfo.td b/lib/Target/ARM/ARMInstrInfo.td index dd39fd6ec69..51368e9d330 100644 --- a/lib/Target/ARM/ARMInstrInfo.td +++ b/lib/Target/ARM/ARMInstrInfo.td @@ -479,7 +479,7 @@ def PICADD : AXI1<0b0100, (outs GPR:$dst), (ins GPR:$a, pclabel:$cp, pred:$p), [(set GPR:$dst, (ARMpic_add GPR:$a, imm:$cp))]>; let AddedComplexity = 10 in { -let isSimpleLoad = 1 in +let canFoldAsLoad = 1 in def PICLDR : AXI2ldw<(outs GPR:$dst), (ins addrmodepc:$addr, pred:$p), Pseudo, "${addr:label}:\n\tldr$p $dst, $addr", [(set GPR:$dst, (load addrmodepc:$addr))]>; @@ -614,13 +614,13 @@ let isBranch = 1, isTerminator = 1 in { // // Load -let isSimpleLoad = 1 in +let canFoldAsLoad = 1 in def LDR : AI2ldw<(outs GPR:$dst), (ins addrmode2:$addr), LdFrm, "ldr", " $dst, $addr", [(set GPR:$dst, (load addrmode2:$addr))]>; // Special LDR for loads from non-pc-relative constpools. -let isSimpleLoad = 1, mayLoad = 1, isReMaterializable = 1 in +let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1 in def LDRcp : AI2ldw<(outs GPR:$dst), (ins addrmode2:$addr), LdFrm, "ldr", " $dst, $addr", []>; diff --git a/lib/Target/ARM/ARMInstrThumb.td b/lib/Target/ARM/ARMInstrThumb.td index cceba8f6b4c..af6eca044a5 100644 --- a/lib/Target/ARM/ARMInstrThumb.td +++ b/lib/Target/ARM/ARMInstrThumb.td @@ -192,7 +192,7 @@ let isBranch = 1, isTerminator = 1 in // Load Store Instructions. // -let isSimpleLoad = 1 in +let canFoldAsLoad = 1 in def tLDR : TI4<(outs GPR:$dst), (ins t_addrmode_s4:$addr), "ldr $dst, $addr", [(set GPR:$dst, (load t_addrmode_s4:$addr))]>; @@ -213,25 +213,25 @@ def tLDRSH : TI2<(outs GPR:$dst), (ins t_addrmode_rr:$addr), "ldrsh $dst, $addr", [(set GPR:$dst, (sextloadi16 t_addrmode_rr:$addr))]>; -let isSimpleLoad = 1 in +let canFoldAsLoad = 1 in def tLDRspi : TIs<(outs GPR:$dst), (ins t_addrmode_sp:$addr), "ldr $dst, $addr", [(set GPR:$dst, (load t_addrmode_sp:$addr))]>; // Special instruction for restore. It cannot clobber condition register // when it's expanded by eliminateCallFramePseudoInstr(). -let isSimpleLoad = 1, mayLoad = 1 in +let canFoldAsLoad = 1, mayLoad = 1 in def tRestore : TIs<(outs GPR:$dst), (ins t_addrmode_sp:$addr), "ldr $dst, $addr", []>; // Load tconstpool -let isSimpleLoad = 1 in +let canFoldAsLoad = 1 in def tLDRpci : TIs<(outs GPR:$dst), (ins i32imm:$addr), "ldr $dst, $addr", [(set GPR:$dst, (load (ARMWrapper tconstpool:$addr)))]>; // Special LDR for loads from non-pc-relative constpools. -let isSimpleLoad = 1, mayLoad = 1, isReMaterializable = 1 in +let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1 in def tLDRcp : TIs<(outs GPR:$dst), (ins i32imm:$addr), "ldr $dst, $addr", []>; diff --git a/lib/Target/ARM/ARMInstrVFP.td b/lib/Target/ARM/ARMInstrVFP.td index 1391dffceee..222d60a61c2 100644 --- a/lib/Target/ARM/ARMInstrVFP.td +++ b/lib/Target/ARM/ARMInstrVFP.td @@ -34,7 +34,7 @@ def arm_fmdrr : SDNode<"ARMISD::FMDRR", SDT_FMDRR>; // Load / store Instructions. // -let isSimpleLoad = 1 in { +let canFoldAsLoad = 1 in { def FLDD : ADI5<0b1101, 0b01, (outs DPR:$dst), (ins addrmode5:$addr), "fldd", " $dst, $addr", [(set DPR:$dst, (load addrmode5:$addr))]>; @@ -42,7 +42,7 @@ def FLDD : ADI5<0b1101, 0b01, (outs DPR:$dst), (ins addrmode5:$addr), def FLDS : ASI5<0b1101, 0b01, (outs SPR:$dst), (ins addrmode5:$addr), "flds", " $dst, $addr", [(set SPR:$dst, (load addrmode5:$addr))]>; -} // isSimpleLoad +} // canFoldAsLoad def FSTD : ADI5<0b1101, 0b00, (outs), (ins DPR:$src, addrmode5:$addr), "fstd", " $src, $addr", diff --git a/lib/Target/Alpha/AlphaInstrFormats.td b/lib/Target/Alpha/AlphaInstrFormats.td index 6eb59e0eeca..6d82875fad2 100644 --- a/lib/Target/Alpha/AlphaInstrFormats.td +++ b/lib/Target/Alpha/AlphaInstrFormats.td @@ -41,7 +41,7 @@ class InstAlpha op, string asmstr, InstrItinClass itin> : Instruction { class MForm opcode, bit load, string asmstr, list pattern, InstrItinClass itin> : InstAlpha { let Pattern = pattern; - let isSimpleLoad = load; + let canFoldAsLoad = load; let Defs = [R28]; //We may use this for frame index calculations, so reserve it here bits<5> Ra; diff --git a/lib/Target/CellSPU/SPUInstrInfo.td b/lib/Target/CellSPU/SPUInstrInfo.td index 9b6df875a42..5d6d8af0cee 100644 --- a/lib/Target/CellSPU/SPUInstrInfo.td +++ b/lib/Target/CellSPU/SPUInstrInfo.td @@ -47,7 +47,7 @@ def DWARF_LOC : Pseudo<(outs), (ins i32imm:$line, i32imm:$col, i32imm:$fi // finally the X-form with the register-register. //===----------------------------------------------------------------------===// -let isSimpleLoad = 1 in { +let canFoldAsLoad = 1 in { class LoadDFormVec : RI10Form<0b00101100, (outs VECREG:$rT), (ins memri10:$src), "lqd\t$rT, $src", diff --git a/lib/Target/IA64/IA64InstrInfo.td b/lib/Target/IA64/IA64InstrInfo.td index eaed25e15e9..2ab9897bdde 100644 --- a/lib/Target/IA64/IA64InstrInfo.td +++ b/lib/Target/IA64/IA64InstrInfo.td @@ -546,7 +546,7 @@ let mayStore = 1 in { "stf.spill [$dstPtr] = $value">, isM; } -let isSimpleLoad = 1 in { +let canFoldAsLoad = 1 in { def LD1 : AForm<0x03, 0x0b, (outs GR:$dst), (ins GR:$srcPtr), "ld1 $dst = [$srcPtr]">, isM; def LD2 : AForm<0x03, 0x0b, (outs GR:$dst), (ins GR:$srcPtr), diff --git a/lib/Target/Mips/MipsInstrInfo.td b/lib/Target/Mips/MipsInstrInfo.td index e6ac6fbbf31..e30dfbf29e9 100644 --- a/lib/Target/Mips/MipsInstrInfo.td +++ b/lib/Target/Mips/MipsInstrInfo.td @@ -224,7 +224,7 @@ class LoadUpper op, string instr_asm>: [], IIAlu>; // Memory Load/Store -let isSimpleLoad = 1, hasDelaySlot = 1 in +let canFoldAsLoad = 1, hasDelaySlot = 1 in class LoadM op, string instr_asm, PatFrag OpNode>: FI< op, (outs CPURegs:$dst), diff --git a/lib/Target/PowerPC/PPCInstr64Bit.td b/lib/Target/PowerPC/PPCInstr64Bit.td index a2f1834165b..417c8ed6e90 100644 --- a/lib/Target/PowerPC/PPCInstr64Bit.td +++ b/lib/Target/PowerPC/PPCInstr64Bit.td @@ -468,7 +468,7 @@ def RLDICR : MDForm_1<30, 1, // Sign extending loads. -let isSimpleLoad = 1, PPC970_Unit = 2 in { +let canFoldAsLoad = 1, PPC970_Unit = 2 in { def LHA8: DForm_1<42, (outs G8RC:$rD), (ins memri:$src), "lha $rD, $src", LdStLHA, [(set G8RC:$rD, (sextloadi16 iaddr:$src))]>, @@ -498,7 +498,7 @@ def LHAU8 : DForm_1<43, (outs G8RC:$rD, ptr_rc:$ea_result), (ins symbolLo:$disp, } // Zero extending loads. -let isSimpleLoad = 1, PPC970_Unit = 2 in { +let canFoldAsLoad = 1, PPC970_Unit = 2 in { def LBZ8 : DForm_1<34, (outs G8RC:$rD), (ins memri:$src), "lbz $rD, $src", LdStGeneral, [(set G8RC:$rD, (zextloadi8 iaddr:$src))]>; @@ -539,7 +539,7 @@ def LWZU8 : DForm_1<33, (outs G8RC:$rD, ptr_rc:$ea_result), (ins memri:$addr), // Full 8-byte loads. -let isSimpleLoad = 1, PPC970_Unit = 2 in { +let canFoldAsLoad = 1, PPC970_Unit = 2 in { def LD : DSForm_1<58, 0, (outs G8RC:$rD), (ins memrix:$src), "ld $rD, $src", LdStLD, [(set G8RC:$rD, (load ixaddr:$src))]>, isPPC64; diff --git a/lib/Target/PowerPC/PPCInstrAltivec.td b/lib/Target/PowerPC/PPCInstrAltivec.td index 8fdab72ddcb..c90fbc91015 100644 --- a/lib/Target/PowerPC/PPCInstrAltivec.td +++ b/lib/Target/PowerPC/PPCInstrAltivec.td @@ -199,7 +199,7 @@ def MTVSCR : VXForm_5<1604, (outs), (ins VRRC:$vB), "mtvscr $vB", LdStGeneral, [(int_ppc_altivec_mtvscr VRRC:$vB)]>; -let isSimpleLoad = 1, PPC970_Unit = 2 in { // Loads. +let canFoldAsLoad = 1, PPC970_Unit = 2 in { // Loads. def LVEBX: XForm_1<31, 7, (outs VRRC:$vD), (ins memrr:$src), "lvebx $vD, $src", LdStGeneral, [(set VRRC:$vD, (int_ppc_altivec_lvebx xoaddr:$src))]>; diff --git a/lib/Target/PowerPC/PPCInstrInfo.td b/lib/Target/PowerPC/PPCInstrInfo.td index 4106c4582a6..772e25ad232 100644 --- a/lib/Target/PowerPC/PPCInstrInfo.td +++ b/lib/Target/PowerPC/PPCInstrInfo.td @@ -659,7 +659,7 @@ def TRAP : XForm_24<31, 4, (outs), (ins), "trap", LdStGeneral, [(trap)]>; // // Unindexed (r+i) Loads. -let isSimpleLoad = 1, PPC970_Unit = 2 in { +let canFoldAsLoad = 1, PPC970_Unit = 2 in { def LBZ : DForm_1<34, (outs GPRC:$rD), (ins memri:$src), "lbz $rD, $src", LdStGeneral, [(set GPRC:$rD, (zextloadi8 iaddr:$src))]>; @@ -718,7 +718,7 @@ def LFDU : DForm_1<51, (outs F8RC:$rD, ptr_rc:$ea_result), (ins memri:$addr), // Indexed (r+r) Loads. // -let isSimpleLoad = 1, PPC970_Unit = 2 in { +let canFoldAsLoad = 1, PPC970_Unit = 2 in { def LBZX : XForm_1<31, 87, (outs GPRC:$rD), (ins memrr:$src), "lbzx $rD, $src", LdStGeneral, [(set GPRC:$rD, (zextloadi8 xaddr:$src))]>; diff --git a/lib/Target/X86/X86Instr64bit.td b/lib/Target/X86/X86Instr64bit.td index 77533188e4a..b7c6381caa5 100644 --- a/lib/Target/X86/X86Instr64bit.td +++ b/lib/Target/X86/X86Instr64bit.td @@ -232,7 +232,7 @@ def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src), [(set GR64:$dst, i64immSExt32:$src)]>; } -let isSimpleLoad = 1 in +let canFoldAsLoad = 1 in def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src), "mov{q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (load addr:$src))]>; diff --git a/lib/Target/X86/X86InstrFPStack.td b/lib/Target/X86/X86InstrFPStack.td index b07b6efddc8..2ae904f3243 100644 --- a/lib/Target/X86/X86InstrFPStack.td +++ b/lib/Target/X86/X86InstrFPStack.td @@ -342,7 +342,7 @@ def CMOVNP_F : FPI<0xD8, AddRegFrm, (outs RST:$op), (ins), "fcmovnu\t{$op, %st(0)|%ST(0), $op}">, DB; // Floating point loads & stores. -let isSimpleLoad = 1 in { +let canFoldAsLoad = 1 in { def LD_Fp32m : FpIf32<(outs RFP32:$dst), (ins f32mem:$src), ZeroArgFP, [(set RFP32:$dst, (loadf32 addr:$src))]>; let isReMaterializable = 1, mayHaveSideEffects = 1 in diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td index a4fe7e88872..5899f211c25 100644 --- a/lib/Target/X86/X86InstrInfo.td +++ b/lib/Target/X86/X86InstrInfo.td @@ -677,7 +677,7 @@ def MOV32mi : Ii32<0xC7, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src), "mov{l}\t{$src, $dst|$dst, $src}", [(store (i32 imm:$src), addr:$dst)]>; -let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in { +let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in { def MOV8rm : I<0x8A, MRMSrcMem, (outs GR8 :$dst), (ins i8mem :$src), "mov{b}\t{$src, $dst|$dst, $src}", [(set GR8:$dst, (load addr:$src))]>; @@ -2666,7 +2666,7 @@ def MOV32_rr : I<0x89, MRMDestReg, (outs GR32_:$dst), (ins GR32_:$src), "mov{l}\t{$src, $dst|$dst, $src}", []>; } // neverHasSideEffects -let isSimpleLoad = 1, mayLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in { +let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in { def MOV16_rm : I<0x8B, MRMSrcMem, (outs GR16_:$dst), (ins i16mem:$src), "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize; def MOV32_rm : I<0x8B, MRMSrcMem, (outs GR32_:$dst), (ins i32mem:$src), diff --git a/lib/Target/X86/X86InstrMMX.td b/lib/Target/X86/X86InstrMMX.td index f412f843455..3bd9af5a9f4 100644 --- a/lib/Target/X86/X86InstrMMX.td +++ b/lib/Target/X86/X86InstrMMX.td @@ -146,7 +146,7 @@ def MMX_FEMMS : MMXI<0x0E, RawFrm, (outs), (ins), "femms", [(int_x86_mmx_femms)] def MMX_MOVD64rr : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR32:$src), "movd\t{$src, $dst|$dst, $src}", [(set VR64:$dst, (v2i32 (scalar_to_vector GR32:$src)))]>; -let isSimpleLoad = 1, isReMaterializable = 1 in +let canFoldAsLoad = 1, isReMaterializable = 1 in def MMX_MOVD64rm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst), (ins i32mem:$src), "movd\t{$src, $dst|$dst, $src}", [(set VR64:$dst, (v2i32 (scalar_to_vector (loadi32 addr:$src))))]>; @@ -165,7 +165,7 @@ def MMX_MOVD64from64rr : MMXRI<0x7E, MRMSrcReg, (outs GR64:$dst), (ins VR64:$src let neverHasSideEffects = 1 in def MMX_MOVQ64rr : MMXI<0x6F, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src), "movq\t{$src, $dst|$dst, $src}", []>; -let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in +let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in def MMX_MOVQ64rm : MMXI<0x6F, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src), "movq\t{$src, $dst|$dst, $src}", [(set VR64:$dst, (load_mmx addr:$src))]>; diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index 58e923d6df5..576c7e691e7 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -315,7 +315,7 @@ let Uses = [EFLAGS], usesCustomDAGSchedInserter = 1 in { let neverHasSideEffects = 1 in def MOVSSrr : SSI<0x10, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src), "movss\t{$src, $dst|$dst, $src}", []>; -let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in +let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in def MOVSSrm : SSI<0x10, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src), "movss\t{$src, $dst|$dst, $src}", [(set FR32:$dst, (loadf32 addr:$src))]>; @@ -474,7 +474,7 @@ def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src), // Alias instruction to load FR32 from f128mem using movaps. Upper bits are // disregarded. -let isSimpleLoad = 1 in +let canFoldAsLoad = 1 in def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src), "movaps\t{$src, $dst|$dst, $src}", [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>; @@ -667,7 +667,7 @@ defm MIN : sse1_fp_binop_rm<0x5D, "min", X86fmin, let neverHasSideEffects = 1 in def MOVAPSrr : PSI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "movaps\t{$src, $dst|$dst, $src}", []>; -let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in +let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in def MOVAPSrm : PSI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), "movaps\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (alignedloadv4f32 addr:$src))]>; @@ -679,7 +679,7 @@ def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src), let neverHasSideEffects = 1 in def MOVUPSrr : PSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "movups\t{$src, $dst|$dst, $src}", []>; -let isSimpleLoad = 1 in +let canFoldAsLoad = 1 in def MOVUPSrm : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), "movups\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (loadv4f32 addr:$src))]>; @@ -688,7 +688,7 @@ def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src), [(store (v4f32 VR128:$src), addr:$dst)]>; // Intrinsic forms of MOVUPS load and store -let isSimpleLoad = 1 in +let canFoldAsLoad = 1 in def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), "movups\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>; @@ -987,9 +987,9 @@ def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst), "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>; // Alias instructions that map zero vector to pxor / xorp* for sse. -// We set isSimpleLoad because this can be converted to a constant-pool +// We set canFoldAsLoad because this can be converted to a constant-pool // load of an all-zeros value if folding it would be beneficial. -let isReMaterializable = 1, isAsCheapAsAMove = 1, isSimpleLoad = 1 in +let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1 in def V_SET0 : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "xorps\t$dst, $dst", [(set VR128:$dst, (v4i32 immAllZerosV))]>; @@ -1063,7 +1063,7 @@ def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))), let neverHasSideEffects = 1 in def MOVSDrr : SDI<0x10, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src), "movsd\t{$src, $dst|$dst, $src}", []>; -let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in +let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in def MOVSDrm : SDI<0x10, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src), "movsd\t{$src, $dst|$dst, $src}", [(set FR64:$dst, (loadf64 addr:$src))]>; @@ -1215,7 +1215,7 @@ def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src), // Alias instruction to load FR64 from f128mem using movapd. Upper bits are // disregarded. -let isSimpleLoad = 1 in +let canFoldAsLoad = 1 in def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src), "movapd\t{$src, $dst|$dst, $src}", [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>; @@ -1410,7 +1410,7 @@ defm MIN : sse2_fp_binop_rm<0x5D, "min", X86fmin, let neverHasSideEffects = 1 in def MOVAPDrr : PDI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "movapd\t{$src, $dst|$dst, $src}", []>; -let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in +let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in def MOVAPDrm : PDI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), "movapd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (alignedloadv2f64 addr:$src))]>; @@ -1422,7 +1422,7 @@ def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src), let neverHasSideEffects = 1 in def MOVUPDrr : PDI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "movupd\t{$src, $dst|$dst, $src}", []>; -let isSimpleLoad = 1 in +let canFoldAsLoad = 1 in def MOVUPDrm : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), "movupd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (loadv2f64 addr:$src))]>; @@ -1790,7 +1790,7 @@ let Constraints = "$src1 = $dst" in { let neverHasSideEffects = 1 in def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "movdqa\t{$src, $dst|$dst, $src}", []>; -let isSimpleLoad = 1, mayLoad = 1 in +let canFoldAsLoad = 1, mayLoad = 1 in def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), "movdqa\t{$src, $dst|$dst, $src}", [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>; @@ -1798,7 +1798,7 @@ let mayStore = 1 in def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src), "movdqa\t{$src, $dst|$dst, $src}", [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>; -let isSimpleLoad = 1, mayLoad = 1 in +let canFoldAsLoad = 1, mayLoad = 1 in def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), "movdqu\t{$src, $dst|$dst, $src}", [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>, @@ -1810,7 +1810,7 @@ def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src), XS, Requires<[HasSSE2]>; // Intrinsic forms of MOVDQU load and store -let isSimpleLoad = 1 in +let canFoldAsLoad = 1 in def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), "movdqu\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>, @@ -2255,9 +2255,9 @@ def : Pat<(membarrier (i8 imm:$ll), (i8 imm:$ls), (i8 imm:$sl), (i8 imm:$ss), (i8 1)), (MFENCE)>; // Alias instructions that map zero vector to pxor / xorp* for sse. -// We set isSimpleLoad because this can be converted to a constant-pool +// We set canFoldAsLoad because this can be converted to a constant-pool // load of an all-ones value if folding it would be beneficial. -let isReMaterializable = 1, isAsCheapAsAMove = 1, isSimpleLoad = 1 in +let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1 in def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "pcmpeqd\t$dst, $dst", [(set VR128:$dst, (v4i32 immAllOnesV))]>; diff --git a/utils/TableGen/CodeGenInstruction.cpp b/utils/TableGen/CodeGenInstruction.cpp index a8f747647dc..185db01418b 100644 --- a/utils/TableGen/CodeGenInstruction.cpp +++ b/utils/TableGen/CodeGenInstruction.cpp @@ -84,7 +84,7 @@ CodeGenInstruction::CodeGenInstruction(Record *R, const std::string &AsmStr) isIndirectBranch = R->getValueAsBit("isIndirectBranch"); isBarrier = R->getValueAsBit("isBarrier"); isCall = R->getValueAsBit("isCall"); - isSimpleLoad = R->getValueAsBit("isSimpleLoad"); + canFoldAsLoad = R->getValueAsBit("canFoldAsLoad"); mayLoad = R->getValueAsBit("mayLoad"); mayStore = R->getValueAsBit("mayStore"); bool isTwoAddress = R->getValueAsBit("isTwoAddress"); diff --git a/utils/TableGen/CodeGenInstruction.h b/utils/TableGen/CodeGenInstruction.h index 0727e38c538..f4afd5e45ba 100644 --- a/utils/TableGen/CodeGenInstruction.h +++ b/utils/TableGen/CodeGenInstruction.h @@ -89,7 +89,7 @@ namespace llvm { bool isIndirectBranch; bool isBarrier; bool isCall; - bool isSimpleLoad; + bool canFoldAsLoad; bool mayLoad, mayStore; bool isPredicable; bool isConvertibleToThreeAddress; diff --git a/utils/TableGen/InstrInfoEmitter.cpp b/utils/TableGen/InstrInfoEmitter.cpp index 705901f91df..6201690b10e 100644 --- a/utils/TableGen/InstrInfoEmitter.cpp +++ b/utils/TableGen/InstrInfoEmitter.cpp @@ -262,7 +262,7 @@ void InstrInfoEmitter::emitRecord(const CodeGenInstruction &Inst, unsigned Num, if (Inst.isBarrier) OS << "|(1<