From 344d9db97062736cd66da6c07baa9108b6cfa419 Mon Sep 17 00:00:00 2001 From: Evan Cheng Date: Thu, 7 Oct 2010 23:12:15 +0000 Subject: [PATCH] Code refactoring. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@116002 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/ARM/ARMBaseInstrInfo.cpp | 248 ++++++++++++++++------------ lib/Target/ARM/ARMBaseInstrInfo.h | 16 ++ 2 files changed, 160 insertions(+), 104 deletions(-) diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp index 823126be30c..a2baf3b9e2c 100644 --- a/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -1646,6 +1646,145 @@ ARMBaseInstrInfo::getNumMicroOps(const MachineInstr *MI, } } +int +ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData, + const TargetInstrDesc &DefTID, + unsigned DefClass, + unsigned DefIdx, unsigned DefAlign) const { + int RegNo = (int)(DefIdx+1) - DefTID.getNumOperands() + 1; + if (RegNo <= 0) + // Def is the address writeback. + return ItinData->getOperandCycle(DefClass, DefIdx); + + int DefCycle; + if (Subtarget.isCortexA8()) { + // (regno / 2) + (regno % 2) + 1 + DefCycle = RegNo / 2 + 1; + if (RegNo % 2) + ++DefCycle; + } else if (Subtarget.isCortexA9()) { + DefCycle = RegNo; + bool isSLoad = false; + switch (DefTID.getOpcode()) { + default: break; + case ARM::VLDMS: + case ARM::VLDMS_UPD: + isSLoad = true; + break; + } + // If there are odd number of 'S' registers or if it's not 64-bit aligned, + // then it takes an extra cycle. + if ((isSLoad && (RegNo % 2)) || DefAlign < 8) + ++DefCycle; + } else { + // Assume the worst. + DefCycle = RegNo + 2; + } + + return DefCycle; +} + +int +ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData, + const TargetInstrDesc &DefTID, + unsigned DefClass, + unsigned DefIdx, unsigned DefAlign) const { + int RegNo = (int)(DefIdx+1) - DefTID.getNumOperands() + 1; + if (RegNo <= 0) + // Def is the address writeback. + return ItinData->getOperandCycle(DefClass, DefIdx); + + int DefCycle; + if (Subtarget.isCortexA8()) { + // 4 registers would be issued: 1, 2, 1. + // 5 registers would be issued: 1, 2, 2. + DefCycle = RegNo / 2; + if (DefCycle < 1) + DefCycle = 1; + // Result latency is issue cycle + 2: E2. + DefCycle += 2; + } else if (Subtarget.isCortexA9()) { + DefCycle = (RegNo / 2); + // If there are odd number of registers or if it's not 64-bit aligned, + // then it takes an extra AGU (Address Generation Unit) cycle. + if ((RegNo % 2) || DefAlign < 8) + ++DefCycle; + // Result latency is AGU cycles + 2. + DefCycle += 2; + } else { + // Assume the worst. + DefCycle = RegNo + 2; + } + + return DefCycle; +} + +int +ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData, + const TargetInstrDesc &UseTID, + unsigned UseClass, + unsigned UseIdx, unsigned UseAlign) const { + int RegNo = (int)(UseIdx+1) - UseTID.getNumOperands() + 1; + if (RegNo <= 0) + return ItinData->getOperandCycle(UseClass, UseIdx); + + int UseCycle; + if (Subtarget.isCortexA8()) { + // (regno / 2) + (regno % 2) + 1 + UseCycle = RegNo / 2 + 1; + if (RegNo % 2) + ++UseCycle; + } else if (Subtarget.isCortexA9()) { + UseCycle = RegNo; + bool isSStore = false; + switch (UseTID.getOpcode()) { + default: break; + case ARM::VSTMS: + case ARM::VSTMS_UPD: + isSStore = true; + break; + } + // If there are odd number of 'S' registers or if it's not 64-bit aligned, + // then it takes an extra cycle. + if ((isSStore && (RegNo % 2)) || UseAlign < 8) + ++UseCycle; + } else { + // Assume the worst. + UseCycle = RegNo + 2; + } + + return UseCycle; +} + +int +ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData, + const TargetInstrDesc &UseTID, + unsigned UseClass, + unsigned UseIdx, unsigned UseAlign) const { + int RegNo = (int)(UseIdx+1) - UseTID.getNumOperands() + 1; + if (RegNo <= 0) + return ItinData->getOperandCycle(UseClass, UseIdx); + + int UseCycle; + if (Subtarget.isCortexA8()) { + UseCycle = RegNo / 2; + if (UseCycle < 2) + UseCycle = 2; + // Read in E3. + UseCycle += 2; + } else if (Subtarget.isCortexA9()) { + UseCycle = (RegNo / 2); + // If there are odd number of registers or if it's not 64-bit aligned, + // then it takes an extra AGU (Address Generation Unit) cycle. + if ((RegNo % 2) || UseAlign < 8) + ++UseCycle; + } else { + // Assume the worst. + UseCycle = 1; + } + return UseCycle; +} + int ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, const TargetInstrDesc &DefTID, @@ -1671,35 +1810,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, case ARM::VLDMS: case ARM::VLDMD_UPD: case ARM::VLDMS_UPD: { - int RegNo = (int)(DefIdx+1) - DefTID.getNumOperands() + 1; - if (RegNo <= 0) { - // Def is the address writeback. - DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); - break; - } - if (Subtarget.isCortexA8()) { - // (regno / 2) + (regno % 2) + 1 - DefCycle = RegNo / 2 + 1; - if (RegNo % 2) - ++DefCycle; - } else if (Subtarget.isCortexA9()) { - DefCycle = RegNo; - bool isSLoad = false; - switch (UseTID.getOpcode()) { - default: break; - case ARM::VLDMS: - case ARM::VLDMS_UPD: - isSLoad = true; - break; - } - // If there are odd number of 'S' registers or if it's not 64-bit aligned, - // then it takes an extra cycle. - if ((isSLoad && (RegNo % 2)) || DefAlign < 8) - ++DefCycle; - } else { - // Assume the worst. - DefCycle = RegNo + 2; - } + DefCycle = getVLDMDefCycle(ItinData, DefTID, DefClass, DefIdx, DefAlign); break; } case ARM::LDM_RET: @@ -1712,32 +1823,8 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, case ARM::t2LDM: case ARM::t2LDM_UPD: { LdmBypass = 1; - int RegNo = (int)(DefIdx+1) - DefTID.getNumOperands() + 1; - if (RegNo <= 0) { - // Def is the address writeback. - DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); - break; - } - if (Subtarget.isCortexA8()) { - // 4 registers would be issued: 1, 2, 1. - // 5 registers would be issued: 1, 2, 2. - DefCycle = RegNo / 2; - if (DefCycle < 1) - DefCycle = 1; - // Result latency is issue cycle + 2: E2. - DefCycle += 2; - } else if (Subtarget.isCortexA9()) { - DefCycle = (RegNo / 2); - // If there are odd number of registers or if it's not 64-bit aligned, - // then it takes an extra AGU (Address Generation Unit) cycle. - if ((RegNo % 2) || DefAlign < 8) - ++DefCycle; - // Result latency is AGU cycles + 2. - DefCycle += 2; - } else { - // Assume the worst. - DefCycle = RegNo + 2; - } + DefCycle = getLDMDefCycle(ItinData, DefTID, DefClass, DefIdx, DefAlign); + break; } } @@ -1754,34 +1841,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, case ARM::VSTMS: case ARM::VSTMD_UPD: case ARM::VSTMS_UPD: { - int RegNo = (int)(UseIdx+1) - UseTID.getNumOperands() + 1; - if (RegNo <= 0) { - UseCycle = ItinData->getOperandCycle(UseClass, UseIdx); - break; - } - if (Subtarget.isCortexA8()) { - // (regno / 2) + (regno % 2) + 1 - UseCycle = RegNo / 2 + 1; - if (RegNo % 2) - ++UseCycle; - } else if (Subtarget.isCortexA9()) { - UseCycle = RegNo; - bool isSStore = false; - switch (UseTID.getOpcode()) { - default: break; - case ARM::VSTMS: - case ARM::VSTMS_UPD: - isSStore = true; - break; - } - // If there are odd number of 'S' registers or if it's not 64-bit aligned, - // then it takes an extra cycle. - if ((isSStore && (RegNo % 2)) || UseAlign < 8) - ++UseCycle; - } else { - // Assume the worst. - UseCycle = RegNo + 2; - } + UseCycle = getVSTMUseCycle(ItinData, UseTID, UseClass, UseIdx, UseAlign); break; } case ARM::STM: @@ -1791,27 +1851,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, case ARM::tPOP: case ARM::t2STM: case ARM::t2STM_UPD: { - int RegNo = (int)(UseIdx+1) - UseTID.getNumOperands() + 1; - if (RegNo <= 0) { - UseCycle = ItinData->getOperandCycle(UseClass, UseIdx); - break; - } - if (Subtarget.isCortexA8()) { - UseCycle = RegNo / 2; - if (UseCycle < 2) - UseCycle = 2; - // Read in E3. - UseCycle += 2; - } else if (Subtarget.isCortexA9()) { - UseCycle = (RegNo / 2); - // If there are odd number of registers or if it's not 64-bit aligned, - // then it takes an extra AGU (Address Generation Unit) cycle. - if ((RegNo % 2) || UseAlign < 8) - ++UseCycle; - } else { - // Assume the worst. - UseCycle = 1; - } + UseCycle = getSTMUseCycle(ItinData, UseTID, UseClass, UseIdx, UseAlign); break; } } diff --git a/lib/Target/ARM/ARMBaseInstrInfo.h b/lib/Target/ARM/ARMBaseInstrInfo.h index 04fee641f1e..1c6aef1bd66 100644 --- a/lib/Target/ARM/ARMBaseInstrInfo.h +++ b/lib/Target/ARM/ARMBaseInstrInfo.h @@ -350,6 +350,22 @@ public: SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const; private: + int getVLDMDefCycle(const InstrItineraryData *ItinData, + const TargetInstrDesc &DefTID, + unsigned DefClass, + unsigned DefIdx, unsigned DefAlign) const; + int getLDMDefCycle(const InstrItineraryData *ItinData, + const TargetInstrDesc &DefTID, + unsigned DefClass, + unsigned DefIdx, unsigned DefAlign) const; + int getVSTMUseCycle(const InstrItineraryData *ItinData, + const TargetInstrDesc &UseTID, + unsigned UseClass, + unsigned UseIdx, unsigned UseAlign) const; + int getSTMUseCycle(const InstrItineraryData *ItinData, + const TargetInstrDesc &UseTID, + unsigned UseClass, + unsigned UseIdx, unsigned UseAlign) const; int getOperandLatency(const InstrItineraryData *ItinData, const TargetInstrDesc &DefTID, unsigned DefIdx, unsigned DefAlign, -- 2.34.1