void MyGCPrinter::finishAssembly(AsmPrinter &AP) {
MCStreamer &OS = AP.OutStreamer;
- unsigned IntPtrSize = AP.TM.getDataLayout()->getPointerSize();
+ unsigned IntPtrSize = AP.TM.getSubtargetImpl()->getDataLayout()->getPointerSize();
// Put this in the data section.
OS.SwitchSection(AP.getObjFileLowering().getDataSection());
#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
namespace llvm {
/// @param vrm Map of virtual registers to physical registers for this
/// function. If NULL, no virtual register map updates will
/// be done. This could be the case if called before Regalloc.
- LiveRangeEdit(LiveInterval *parent,
- SmallVectorImpl<unsigned> &newRegs,
- MachineFunction &MF,
- LiveIntervals &lis,
- VirtRegMap *vrm,
+ LiveRangeEdit(LiveInterval *parent, SmallVectorImpl<unsigned> &newRegs,
+ MachineFunction &MF, LiveIntervals &lis, VirtRegMap *vrm,
Delegate *delegate = nullptr)
- : Parent(parent), NewRegs(newRegs),
- MRI(MF.getRegInfo()), LIS(lis), VRM(vrm),
- TII(*MF.getTarget().getInstrInfo()),
- TheDelegate(delegate),
- FirstNew(newRegs.size()),
- ScannedRemattable(false) { MRI.setDelegate(this); }
+ : Parent(parent), NewRegs(newRegs), MRI(MF.getRegInfo()), LIS(lis),
+ VRM(vrm), TII(*MF.getTarget().getSubtargetImpl()->getInstrInfo()),
+ TheDelegate(delegate), FirstNew(newRegs.size()),
+ ScannedRemattable(false) {
+ MRI.setDelegate(this);
+ }
~LiveRangeEdit() { MRI.resetDelegate(this); }
#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <vector>
namespace llvm {
~MachineRegisterInfo();
const TargetRegisterInfo *getTargetRegisterInfo() const {
- return TM.getRegisterInfo();
+ return TM.getSubtargetImpl()->getRegisterInfo();
}
void resetDelegate(Delegate *delegate) {
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/IR/BasicBlock.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Pass.h"
namespace llvm {
virtual ~SelectionDAGISel();
const TargetLowering *getTargetLowering() const {
- return TM.getTargetLowering();
+ return TM.getSubtargetImpl()->getTargetLowering();
}
void getAnalysisUsage(AnalysisUsage &AU) const override;
class DataLayout;
class TargetLibraryInfo;
class TargetFrameLowering;
-class TargetInstrInfo;
class TargetIntrinsicInfo;
class TargetJITInfo;
class TargetLowering;
/// \brief Reset the target options based on the function's attributes.
void resetTargetOptions(const MachineFunction *MF) const;
- // Interfaces to the major aspects of target machine information:
- //
- // -- Instruction opcode and operand information
- // -- Pipelines and scheduling information
- // -- Stack frame information
- // -- Selection DAG lowering information
- //
- // N.B. These objects may change during compilation. It's not safe to cache
- // them between functions.
- virtual const TargetInstrInfo *getInstrInfo() const { return nullptr; }
- virtual const TargetFrameLowering *getFrameLowering() const {
- return nullptr;
- }
- virtual const TargetLowering *getTargetLowering() const { return nullptr; }
- virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const {
- return nullptr;
- }
- virtual const DataLayout *getDataLayout() const { return nullptr; }
-
/// getMCAsmInfo - Return target specific asm information.
///
const MCAsmInfo *getMCAsmInfo() const { return AsmInfo; }
return *static_cast<const STC*>(getSubtargetImpl());
}
- /// getRegisterInfo - If register information is available, return it. If
- /// not, return null. This is kept separate from RegInfo until RegInfo has
- /// details of graph coloring register allocation removed from it.
- ///
- virtual const TargetRegisterInfo *getRegisterInfo() const { return nullptr; }
-
/// getIntrinsicInfo - If intrinsic information is available, return it. If
/// not, return null.
///
- virtual const TargetIntrinsicInfo *getIntrinsicInfo() const { return nullptr;}
-
- /// getJITInfo - If this target supports a JIT, return information for it,
- /// otherwise return null.
- ///
- virtual TargetJITInfo *getJITInfo() { return nullptr; }
-
- /// getInstrItineraryData - Returns instruction itinerary data for the target
- /// or specific subtarget.
- ///
- virtual const InstrItineraryData *getInstrItineraryData() const {
+ virtual const TargetIntrinsicInfo *getIntrinsicInfo() const {
return nullptr;
}
namespace llvm {
+class DataLayout;
class MachineFunction;
class MachineInstr;
class SDep;
class SUnit;
+class TargetFrameLowering;
+class TargetInstrInfo;
+class TargetJITInfo;
+class TargetLowering;
class TargetRegisterClass;
+class TargetRegisterInfo;
class TargetSchedModel;
+class TargetSelectionDAGInfo;
struct MachineSchedPolicy;
template <typename T> class SmallVectorImpl;
virtual ~TargetSubtargetInfo();
+ // Interfaces to the major aspects of target machine information:
+ //
+ // -- Instruction opcode and operand information
+ // -- Pipelines and scheduling information
+ // -- Stack frame information
+ // -- Selection DAG lowering information
+ //
+ // N.B. These objects may change during compilation. It's not safe to cache
+ // them between functions.
+ virtual const TargetInstrInfo *getInstrInfo() const { return nullptr; }
+ virtual const TargetFrameLowering *getFrameLowering() const {
+ return nullptr;
+ }
+ virtual const TargetLowering *getTargetLowering() const { return nullptr; }
+ virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const {
+ return nullptr;
+ }
+ virtual const DataLayout *getDataLayout() const { return nullptr; }
+
+ /// getRegisterInfo - If register information is available, return it. If
+ /// not, return null. This is kept separate from RegInfo until RegInfo has
+ /// details of graph coloring register allocation removed from it.
+ ///
+ virtual const TargetRegisterInfo *getRegisterInfo() const { return nullptr; }
+
+ /// getJITInfo - If this target supports a JIT, return information for it,
+ /// otherwise return null.
+ ///
+ virtual TargetJITInfo *getJITInfo() { return nullptr; }
+
+ /// getInstrItineraryData - Returns instruction itinerary data for the target
+ /// or specific subtarget.
+ ///
+ virtual const InstrItineraryData *getInstrItineraryData() const {
+ return nullptr;
+ }
+
/// Resolve a SchedClass at runtime, where SchedClass identifies an
/// MCSchedClassDesc with the isVariant property. This may return the ID of
/// another variant SchedClass, but repeated invocation must quickly terminate
return((KillIndices[Reg] != ~0u) && (DefIndices[Reg] == ~0u));
}
-
-
-AggressiveAntiDepBreaker::
-AggressiveAntiDepBreaker(MachineFunction& MFi,
- const RegisterClassInfo &RCI,
- TargetSubtargetInfo::RegClassVector& CriticalPathRCs) :
- AntiDepBreaker(), MF(MFi),
- MRI(MF.getRegInfo()),
- TII(MF.getTarget().getInstrInfo()),
- TRI(MF.getTarget().getRegisterInfo()),
- RegClassInfo(RCI),
- State(nullptr) {
+AggressiveAntiDepBreaker::AggressiveAntiDepBreaker(
+ MachineFunction &MFi, const RegisterClassInfo &RCI,
+ TargetSubtargetInfo::RegClassVector &CriticalPathRCs)
+ : AntiDepBreaker(), MF(MFi), MRI(MF.getRegInfo()),
+ TII(MF.getTarget().getSubtargetImpl()->getInstrInfo()),
+ TRI(MF.getTarget().getSubtargetImpl()->getRegisterInfo()),
+ RegClassInfo(RCI), State(nullptr) {
/* Collect a bitset of all registers that are only broken if they
are on the critical path. */
for (unsigned i = 0, e = CriticalPathRCs.size(); i < e; ++i) {
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Transforms/Utils/GlobalStatus.h"
+
using namespace llvm;
/// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
return false;
}
- return returnTypeIsEligibleForTailCall(ExitBB->getParent(), I, Ret,
- *TM.getTargetLowering());
+ return returnTypeIsEligibleForTailCall(
+ ExitBB->getParent(), I, Ret, *TM.getSubtargetImpl()->getTargetLowering());
}
bool llvm::returnTypeIsEligibleForTailCall(const Function *F,
}
AsmPrinter::AsmPrinter(TargetMachine &tm, MCStreamer &Streamer)
- : MachineFunctionPass(ID),
- TM(tm), MAI(tm.getMCAsmInfo()), MII(tm.getInstrInfo()),
- OutContext(Streamer.getContext()),
- OutStreamer(Streamer),
- LastMI(nullptr), LastFn(0), Counter(~0U), SetCounter(0) {
+ : MachineFunctionPass(ID), TM(tm), MAI(tm.getMCAsmInfo()),
+ MII(tm.getSubtargetImpl()->getInstrInfo()),
+ OutContext(Streamer.getContext()), OutStreamer(Streamer), LastMI(nullptr),
+ LastFn(0), Counter(~0U), SetCounter(0) {
DD = nullptr; MMI = nullptr; LI = nullptr; MF = nullptr;
CurrentFnSym = CurrentFnSymForSize = nullptr;
GCMetadataPrinters = nullptr;
}
const TargetLoweringObjectFile &AsmPrinter::getObjFileLowering() const {
- return TM.getTargetLowering()->getObjFileLowering();
+ return TM.getSubtargetImpl()->getTargetLowering()->getObjFileLowering();
}
/// getDataLayout - Return information about data layout.
const DataLayout &AsmPrinter::getDataLayout() const {
- return *TM.getDataLayout();
+ return *TM.getSubtargetImpl()->getDataLayout();
}
const MCSubtargetInfo &AsmPrinter::getSubtargetInfo() const {
OutStreamer.InitSections();
- Mang = new Mangler(TM.getDataLayout());
+ Mang = new Mangler(TM.getSubtargetImpl()->getDataLayout());
// Emit the version-min deplyment target directive if needed.
//
SectionKind GVKind = TargetLoweringObjectFile::getKindForGlobal(GV, TM);
- const DataLayout *DL = TM.getDataLayout();
+ const DataLayout *DL = TM.getSubtargetImpl()->getDataLayout();
uint64_t Size = DL->getTypeAllocSize(GV->getType()->getElementType());
// If the alignment is specified, we *must* obey it. Overaligning a global
// We assume a single instruction only has a spill or reload, not
// both.
const MachineMemOperand *MMO;
- if (TM.getInstrInfo()->isLoadFromStackSlotPostFE(&MI, FI)) {
+ if (TM.getSubtargetImpl()->getInstrInfo()->isLoadFromStackSlotPostFE(&MI,
+ FI)) {
if (FrameInfo->isSpillSlotObjectIndex(FI)) {
MMO = *MI.memoperands_begin();
CommentOS << MMO->getSize() << "-byte Reload\n";
}
- } else if (TM.getInstrInfo()->hasLoadFromStackSlot(&MI, MMO, FI)) {
+ } else if (TM.getSubtargetImpl()->getInstrInfo()->hasLoadFromStackSlot(
+ &MI, MMO, FI)) {
if (FrameInfo->isSpillSlotObjectIndex(FI))
CommentOS << MMO->getSize() << "-byte Folded Reload\n";
- } else if (TM.getInstrInfo()->isStoreToStackSlotPostFE(&MI, FI)) {
+ } else if (TM.getSubtargetImpl()->getInstrInfo()->isStoreToStackSlotPostFE(
+ &MI, FI)) {
if (FrameInfo->isSpillSlotObjectIndex(FI)) {
MMO = *MI.memoperands_begin();
CommentOS << MMO->getSize() << "-byte Spill\n";
}
- } else if (TM.getInstrInfo()->hasStoreToStackSlot(&MI, MMO, FI)) {
+ } else if (TM.getSubtargetImpl()->getInstrInfo()->hasStoreToStackSlot(
+ &MI, MMO, FI)) {
if (FrameInfo->isSpillSlotObjectIndex(FI))
CommentOS << MMO->getSize() << "-byte Folded Spill\n";
}
/// that is an implicit def.
void AsmPrinter::emitImplicitDef(const MachineInstr *MI) const {
unsigned RegNo = MI->getOperand(0).getReg();
- OutStreamer.AddComment(Twine("implicit-def: ") +
- TM.getRegisterInfo()->getName(RegNo));
+ OutStreamer.AddComment(
+ Twine("implicit-def: ") +
+ TM.getSubtargetImpl()->getRegisterInfo()->getName(RegNo));
OutStreamer.AddBlankLine();
}
const MachineOperand &Op = MI->getOperand(i);
assert(Op.isReg() && "KILL instruction must have only register operands");
Str += ' ';
- Str += AP.TM.getRegisterInfo()->getName(Op.getReg());
+ Str += AP.TM.getSubtargetImpl()->getRegisterInfo()->getName(Op.getReg());
Str += (Op.isDef() ? "<def>" : "<kill>");
}
AP.OutStreamer.AddComment(Str);
Reg = MI->getOperand(0).getReg();
} else {
assert(MI->getOperand(0).isFI() && "Unknown operand type");
- const TargetFrameLowering *TFI = AP.TM.getFrameLowering();
+ const TargetFrameLowering *TFI =
+ AP.TM.getSubtargetImpl()->getFrameLowering();
Offset += TFI->getFrameIndexReference(*AP.MF,
MI->getOperand(0).getIndex(), Reg);
Deref = true;
}
if (Deref)
OS << '[';
- OS << AP.TM.getRegisterInfo()->getName(Reg);
+ OS << AP.TM.getSubtargetImpl()->getRegisterInfo()->getName(Reg);
}
if (Deref)
// labels from collapsing together. Just emit a noop.
if ((MAI->hasSubsectionsViaSymbols() && !HasAnyRealCode) || RequiresNoop) {
MCInst Noop;
- TM.getInstrInfo()->getNoopForMachoTarget(Noop);
+ TM.getSubtargetImpl()->getInstrInfo()->getNoopForMachoTarget(Noop);
if (Noop.getOpcode()) {
OutStreamer.AddComment("avoids zero-length function");
OutStreamer.EmitInstruction(Noop, getSubtargetInfo());
unsigned Arch = Triple(getTargetTriple()).getArch();
bool IsThumb = (Arch == Triple::thumb || Arch == Triple::thumbeb);
MCInst TrapInst;
- TM.getInstrInfo()->getTrap(TrapInst);
+ TM.getSubtargetImpl()->getInstrInfo()->getTrap(TrapInst);
for (const auto &KV : JITI->getTables()) {
uint64_t Count = 0;
for (const auto &FunPair : KV.second) {
const MCSymbolRefExpr *TargetSymRef =
MCSymbolRefExpr::Create(TargetSymbol, MCSymbolRefExpr::VK_PLT,
OutContext);
- TM.getInstrInfo()->getUnconditionalBranch(JumpToFun, TargetSymRef);
+ TM.getSubtargetImpl()->getInstrInfo()->getUnconditionalBranch(
+ JumpToFun, TargetSymRef);
OutStreamer.EmitInstruction(JumpToFun, getSubtargetInfo());
++Count;
}
const MachineConstantPoolEntry &CPE = CP[i];
unsigned Align = CPE.getAlignment();
- SectionKind Kind = CPE.getSectionKind(TM.getDataLayout());
+ SectionKind Kind =
+ CPE.getSectionKind(TM.getSubtargetImpl()->getDataLayout());
const Constant *C = nullptr;
if (!CPE.isMachineConstantPoolEntry())
OutStreamer.EmitZeros(NewOffset - Offset);
Type *Ty = CPE.getType();
- Offset = NewOffset + TM.getDataLayout()->getTypeAllocSize(Ty);
+ Offset = NewOffset +
+ TM.getSubtargetImpl()->getDataLayout()->getTypeAllocSize(Ty);
OutStreamer.EmitLabel(Sym);
if (CPE.isMachineConstantPoolEntry())
/// by the current function to the current output stream.
///
void AsmPrinter::EmitJumpTableInfo() {
- const DataLayout *DL = MF->getTarget().getDataLayout();
+ const DataLayout *DL = MF->getTarget().getSubtargetImpl()->getDataLayout();
const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
if (!MJTI) return;
if (MJTI->getEntryKind() == MachineJumpTableInfo::EK_Inline) return;
JTInDiffSection = true;
}
- EmitAlignment(Log2_32(MJTI->getEntryAlignment(*TM.getDataLayout())));
+ EmitAlignment(Log2_32(
+ MJTI->getEntryAlignment(*TM.getSubtargetImpl()->getDataLayout())));
// Jump tables in code sections are marked with a data_region directive
// where that's supported.
if (MJTI->getEntryKind() == MachineJumpTableInfo::EK_LabelDifference32 &&
MAI->hasSetDirective()) {
SmallPtrSet<const MachineBasicBlock*, 16> EmittedSets;
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
const MCExpr *Base = TLI->getPICJumpTableRelocBaseExpr(MF,JTI,OutContext);
for (unsigned ii = 0, ee = JTBBs.size(); ii != ee; ++ii) {
const MachineBasicBlock *MBB = JTBBs[ii];
case MachineJumpTableInfo::EK_Inline:
llvm_unreachable("Cannot emit EK_Inline jump table entry");
case MachineJumpTableInfo::EK_Custom32:
- Value = TM.getTargetLowering()->LowerCustomJumpTableEntry(MJTI, MBB, UID,
- OutContext);
+ Value =
+ TM.getSubtargetImpl()->getTargetLowering()->LowerCustomJumpTableEntry(
+ MJTI, MBB, UID, OutContext);
break;
case MachineJumpTableInfo::EK_BlockAddress:
// EK_BlockAddress - Each entry is a plain address of block, e.g.:
assert(Value && "Unknown entry kind!");
- unsigned EntrySize = MJTI->getEntrySize(*TM.getDataLayout());
+ unsigned EntrySize =
+ MJTI->getEntrySize(*TM.getSubtargetImpl()->getDataLayout());
OutStreamer.EmitValue(Value, EntrySize);
}
}
// Emit the function pointers in the target-specific order
- const DataLayout *DL = TM.getDataLayout();
+ const DataLayout *DL = TM.getSubtargetImpl()->getDataLayout();
unsigned Align = Log2_32(DL->getPointerPrefAlignment());
std::stable_sort(Structors.begin(), Structors.end(),
[](const Structor &L,
// if required for correctness.
//
void AsmPrinter::EmitAlignment(unsigned NumBits, const GlobalObject *GV) const {
- if (GV) NumBits = getGVAlignmentLog2(GV, *TM.getDataLayout(), NumBits);
+ if (GV)
+ NumBits = getGVAlignmentLog2(GV, *TM.getSubtargetImpl()->getDataLayout(),
+ NumBits);
if (NumBits == 0) return; // 1-byte aligned: no need to emit alignment.
// If the code isn't optimized, there may be outstanding folding
// opportunities. Attempt to fold the expression using DataLayout as a
// last resort before giving up.
- if (Constant *C =
- ConstantFoldConstantExpression(CE, AP.TM.getDataLayout()))
+ if (Constant *C = ConstantFoldConstantExpression(
+ CE, AP.TM.getSubtargetImpl()->getDataLayout()))
if (C != CE)
return lowerConstant(C, AP);
report_fatal_error(OS.str());
}
case Instruction::GetElementPtr: {
- const DataLayout &DL = *AP.TM.getDataLayout();
+ const DataLayout &DL = *AP.TM.getSubtargetImpl()->getDataLayout();
// Generate a symbolic expression for the byte address
APInt OffsetAI(DL.getPointerTypeSizeInBits(CE->getType()), 0);
cast<GEPOperator>(CE)->accumulateConstantOffset(DL, OffsetAI);
return lowerConstant(CE->getOperand(0), AP);
case Instruction::IntToPtr: {
- const DataLayout &DL = *AP.TM.getDataLayout();
+ const DataLayout &DL = *AP.TM.getSubtargetImpl()->getDataLayout();
// Handle casts to pointers by changing them into casts to the appropriate
// integer type. This promotes constant folding and simplifies this code.
Constant *Op = CE->getOperand(0);
}
case Instruction::PtrToInt: {
- const DataLayout &DL = *AP.TM.getDataLayout();
+ const DataLayout &DL = *AP.TM.getSubtargetImpl()->getDataLayout();
// Support only foldable casts to/from pointers that can be eliminated by
// changing the pointer to the appropriately sized integer type.
Constant *Op = CE->getOperand(0);
if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
if (CI->getBitWidth() > 64) return -1;
- uint64_t Size = TM.getDataLayout()->getTypeAllocSize(V->getType());
+ uint64_t Size =
+ TM.getSubtargetImpl()->getDataLayout()->getTypeAllocSize(V->getType());
uint64_t Value = CI->getZExtValue();
// Make sure the constant is at least 8 bits long and has a power
// See if we can aggregate this into a .fill, if so, emit it as such.
int Value = isRepeatedByteSequence(CDS, AP.TM);
if (Value != -1) {
- uint64_t Bytes = AP.TM.getDataLayout()->getTypeAllocSize(CDS->getType());
+ uint64_t Bytes =
+ AP.TM.getSubtargetImpl()->getDataLayout()->getTypeAllocSize(
+ CDS->getType());
// Don't emit a 1-byte object as a .fill.
if (Bytes > 1)
return AP.OutStreamer.EmitFill(Bytes, Value);
}
}
- const DataLayout &DL = *AP.TM.getDataLayout();
+ const DataLayout &DL = *AP.TM.getSubtargetImpl()->getDataLayout();
unsigned Size = DL.getTypeAllocSize(CDS->getType());
unsigned EmittedSize = DL.getTypeAllocSize(CDS->getType()->getElementType()) *
CDS->getNumElements();
int Value = isRepeatedByteSequence(CA, AP.TM);
if (Value != -1) {
- uint64_t Bytes = AP.TM.getDataLayout()->getTypeAllocSize(CA->getType());
+ uint64_t Bytes =
+ AP.TM.getSubtargetImpl()->getDataLayout()->getTypeAllocSize(
+ CA->getType());
AP.OutStreamer.EmitFill(Bytes, Value);
}
else {
for (unsigned i = 0, e = CV->getType()->getNumElements(); i != e; ++i)
emitGlobalConstantImpl(CV->getOperand(i), AP);
- const DataLayout &DL = *AP.TM.getDataLayout();
+ const DataLayout &DL = *AP.TM.getSubtargetImpl()->getDataLayout();
unsigned Size = DL.getTypeAllocSize(CV->getType());
unsigned EmittedSize = DL.getTypeAllocSize(CV->getType()->getElementType()) *
CV->getType()->getNumElements();
static void emitGlobalConstantStruct(const ConstantStruct *CS, AsmPrinter &AP) {
// Print the fields in successive locations. Pad to align if needed!
- const DataLayout *DL = AP.TM.getDataLayout();
+ const DataLayout *DL = AP.TM.getSubtargetImpl()->getDataLayout();
unsigned Size = DL->getTypeAllocSize(CS->getType());
const StructLayout *Layout = DL->getStructLayout(CS->getType());
uint64_t SizeSoFar = 0;
// PPC's long double has odd notions of endianness compared to how LLVM
// handles it: p[0] goes first for *big* endian on PPC.
- if (AP.TM.getDataLayout()->isBigEndian() &&
+ if (AP.TM.getSubtargetImpl()->getDataLayout()->isBigEndian() &&
!CFP->getType()->isPPC_FP128Ty()) {
int Chunk = API.getNumWords() - 1;
}
// Emit the tail padding for the long double.
- const DataLayout &DL = *AP.TM.getDataLayout();
+ const DataLayout &DL = *AP.TM.getSubtargetImpl()->getDataLayout();
AP.OutStreamer.EmitZeros(DL.getTypeAllocSize(CFP->getType()) -
DL.getTypeStoreSize(CFP->getType()));
}
static void emitGlobalConstantLargeInt(const ConstantInt *CI, AsmPrinter &AP) {
- const DataLayout *DL = AP.TM.getDataLayout();
+ const DataLayout *DL = AP.TM.getSubtargetImpl()->getDataLayout();
unsigned BitWidth = CI->getBitWidth();
// Copy the value as we may massage the layout for constants whose bit width
// Emit the extra bits after the 64-bits chunks.
// Emit a directive that fills the expected size.
- uint64_t Size = AP.TM.getDataLayout()->getTypeAllocSize(CI->getType());
+ uint64_t Size = AP.TM.getSubtargetImpl()->getDataLayout()->getTypeAllocSize(
+ CI->getType());
Size -= (BitWidth / 64) * 8;
assert(Size && Size * 8 >= ExtraBitsSize &&
(ExtraBits & (((uint64_t)-1) >> (64 - ExtraBitsSize)))
}
static void emitGlobalConstantImpl(const Constant *CV, AsmPrinter &AP) {
- const DataLayout *DL = AP.TM.getDataLayout();
+ const DataLayout *DL = AP.TM.getSubtargetImpl()->getDataLayout();
uint64_t Size = DL->getTypeAllocSize(CV->getType());
if (isa<ConstantAggregateZero>(CV) || isa<UndefValue>(CV))
return AP.OutStreamer.EmitZeros(Size);
/// EmitGlobalConstant - Print a general LLVM constant to the .s file.
void AsmPrinter::EmitGlobalConstant(const Constant *CV) {
- uint64_t Size = TM.getDataLayout()->getTypeAllocSize(CV->getType());
+ uint64_t Size =
+ TM.getSubtargetImpl()->getDataLayout()->getTypeAllocSize(CV->getType());
if (Size)
emitGlobalConstantImpl(CV, *this);
else if (MAI->hasSubsectionsViaSymbols()) {
/// GetTempSymbol - Return the MCSymbol corresponding to the assembler
/// temporary label with the specified stem and unique ID.
MCSymbol *AsmPrinter::GetTempSymbol(Twine Name, unsigned ID) const {
- const DataLayout *DL = TM.getDataLayout();
+ const DataLayout *DL = TM.getSubtargetImpl()->getDataLayout();
return OutContext.GetOrCreateSymbol(Twine(DL->getPrivateGlobalPrefix()) +
Name + Twine(ID));
}
/// GetTempSymbol - Return an assembler temporary label with the specified
/// stem.
MCSymbol *AsmPrinter::GetTempSymbol(Twine Name) const {
- const DataLayout *DL = TM.getDataLayout();
+ const DataLayout *DL = TM.getSubtargetImpl()->getDataLayout();
return OutContext.GetOrCreateSymbol(Twine(DL->getPrivateGlobalPrefix())+
Name);
}
/// GetCPISymbol - Return the symbol for the specified constant pool entry.
MCSymbol *AsmPrinter::GetCPISymbol(unsigned CPID) const {
- const DataLayout *DL = TM.getDataLayout();
+ const DataLayout *DL = TM.getSubtargetImpl()->getDataLayout();
return OutContext.GetOrCreateSymbol
(Twine(DL->getPrivateGlobalPrefix()) + "CPI" + Twine(getFunctionNumber())
+ "_" + Twine(CPID));
/// GetJTSetSymbol - Return the symbol for the specified jump table .set
/// FIXME: privatize to AsmPrinter.
MCSymbol *AsmPrinter::GetJTSetSymbol(unsigned UID, unsigned MBBID) const {
- const DataLayout *DL = TM.getDataLayout();
+ const DataLayout *DL = TM.getSubtargetImpl()->getDataLayout();
return OutContext.GetOrCreateSymbol
(Twine(DL->getPrivateGlobalPrefix()) + Twine(getFunctionNumber()) + "_" +
Twine(UID) + "_set_" + Twine(MBBID));
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "asm-printer"
default:
llvm_unreachable("Invalid encoded value.");
case dwarf::DW_EH_PE_absptr:
- return TM.getDataLayout()->getPointerSize();
+ return TM.getSubtargetImpl()->getDataLayout()->getPointerSize();
case dwarf::DW_EH_PE_udata2:
return 2;
case dwarf::DW_EH_PE_udata4:
unsigned PieceSizeInBits,
unsigned PieceOffsetInBits) const {
assert(MLoc.isReg() && "MLoc must be a register");
- const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+ const TargetRegisterInfo *TRI = TM.getSubtargetImpl()->getRegisterInfo();
int Reg = TRI->getDwarfRegNum(MLoc.getReg(), false);
// If this is a valid register number, emit it.
void AsmPrinter::EmitDwarfRegOp(ByteStreamer &Streamer,
const MachineLocation &MLoc,
bool Indirect) const {
- const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+ const TargetRegisterInfo *TRI = TM.getSubtargetImpl()->getRegisterInfo();
int Reg = TRI->getDwarfRegNum(MLoc.getReg(), false);
if (Reg < 0) {
// We assume that pointers are always in an addressable register.
/// for their own strange codes.
void AsmPrinter::PrintSpecial(const MachineInstr *MI, raw_ostream &OS,
const char *Code) const {
- const DataLayout *DL = TM.getDataLayout();
+ const DataLayout *DL = TM.getSubtargetImpl()->getDataLayout();
if (!strcmp(Code, "private")) {
OS << DL->getPrivateGlobalPrefix();
} else if (!strcmp(Code, "comment")) {
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "dwarfdebug"
attachLowHighPC(SPCU, *SPDie, FunctionBeginSym, FunctionEndSym);
- const TargetRegisterInfo *RI = Asm->TM.getRegisterInfo();
+ const TargetRegisterInfo *RI = Asm->TM.getSubtargetImpl()->getRegisterInfo();
MachineLocation Location(RI->getFrameRegister(*Asm->MF));
SPCU.addAddress(*SPDie, dwarf::DW_AT_frame_base, Location);
Asm->OutStreamer.EmitLabel(FunctionBeginSym);
// Calculate history for local variables.
- calculateDbgValueHistory(MF, Asm->TM.getRegisterInfo(), DbgValues);
+ calculateDbgValueHistory(MF, Asm->TM.getSubtargetImpl()->getRegisterInfo(),
+ DbgValues);
// Request labels for the full history.
for (const auto &I : DbgValues) {
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
// AsmPrinter::EmitDwarfRegOpPiece.
void DwarfUnit::addRegisterOpPiece(DIELoc &TheDie, unsigned Reg,
unsigned SizeInBits, unsigned OffsetInBits) {
- const TargetRegisterInfo *RI = Asm->TM.getRegisterInfo();
+ const TargetRegisterInfo *RI = Asm->TM.getSubtargetImpl()->getRegisterInfo();
int DWReg = RI->getDwarfRegNum(Reg, false);
bool isSubRegister = DWReg < 0;
/// addRegisterOffset - Add register offset.
void DwarfUnit::addRegisterOffset(DIELoc &TheDie, unsigned Reg,
int64_t Offset) {
- const TargetRegisterInfo *RI = Asm->TM.getRegisterInfo();
+ const TargetRegisterInfo *RI = Asm->TM.getSubtargetImpl()->getRegisterInfo();
unsigned DWReg = RI->getDwarfRegNum(Reg, false);
- const TargetRegisterInfo *TRI = Asm->TM.getRegisterInfo();
+ const TargetRegisterInfo *TRI = Asm->TM.getSubtargetImpl()->getRegisterInfo();
if (Reg == TRI->getFrameRegister(*Asm->MF))
// If variable offset is based in frame register then use fbreg.
addUInt(TheDie, dwarf::DW_FORM_data1, dwarf::DW_OP_fbreg);
int FI = DV.getFrameIndex();
if (FI != ~0) {
unsigned FrameReg = 0;
- const TargetFrameLowering *TFI = Asm->TM.getFrameLowering();
+ const TargetFrameLowering *TFI =
+ Asm->TM.getSubtargetImpl()->getFrameLowering();
int Offset = TFI->getFrameIndexReference(*Asm->MF, FI, FrameReg);
MachineLocation Location(FrameReg, Offset);
addVariableAddress(DV, *VariableDie, Location);
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
void ErlangGCPrinter::finishAssembly(AsmPrinter &AP) {
MCStreamer &OS = AP.OutStreamer;
- unsigned IntPtrSize = AP.TM.getDataLayout()->getPointerSize();
+ unsigned IntPtrSize =
+ AP.TM.getSubtargetImpl()->getDataLayout()->getPointerSize();
// Put this in a custom .note section.
AP.OutStreamer.SwitchSection(AP.getObjFileLowering().getContext()
#include "llvm/Support/FormattedStream.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <cctype>
using namespace llvm;
/// either condition is detected in a function which uses the GC.
///
void OcamlGCMetadataPrinter::finishAssembly(AsmPrinter &AP) {
- unsigned IntPtrSize = AP.TM.getDataLayout()->getPointerSize();
+ unsigned IntPtrSize =
+ AP.TM.getSubtargetImpl()->getDataLayout()->getPointerSize();
AP.OutStreamer.SwitchSection(AP.getObjFileLowering().getTextSection());
EmitCamlGlobal(getModule(), AP, "code_end");
bool MadeChange = false;
for (Instruction *Inst : AtomicInsts) {
- if (!TM->getTargetLowering()->shouldExpandAtomicInIR(Inst))
+ if (!TM->getSubtargetImpl()->getTargetLowering()->shouldExpandAtomicInIR(
+ Inst))
continue;
if (AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(Inst))
// Load instructions don't actually need a leading fence, even in the
// SequentiallyConsistent case.
AtomicOrdering MemOpOrder =
- TM->getTargetLowering()->getInsertFencesForAtomic() ? Monotonic
- : LI->getOrdering();
+ TM->getSubtargetImpl()->getTargetLowering()->getInsertFencesForAtomic()
+ ? Monotonic
+ : LI->getOrdering();
// The only 64-bit load guaranteed to be single-copy atomic by the ARM ARM is
// an ldrexd (A3.5.3).
IRBuilder<> Builder(LI);
- Value *Val = TM->getTargetLowering()->emitLoadLinked(
+ Value *Val = TM->getSubtargetImpl()->getTargetLowering()->emitLoadLinked(
Builder, LI->getPointerOperand(), MemOpOrder);
insertTrailingFence(Builder, LI->getOrdering());
// Start the main loop block now that we've taken care of the preliminaries.
Builder.SetInsertPoint(LoopBB);
- Value *Loaded =
- TM->getTargetLowering()->emitLoadLinked(Builder, Addr, MemOpOrder);
+ Value *Loaded = TM->getSubtargetImpl()->getTargetLowering()->emitLoadLinked(
+ Builder, Addr, MemOpOrder);
Value *NewVal;
switch (AI->getOperation()) {
llvm_unreachable("Unknown atomic op");
}
- Value *StoreSuccess = TM->getTargetLowering()->emitStoreConditional(
- Builder, NewVal, Addr, MemOpOrder);
+ Value *StoreSuccess =
+ TM->getSubtargetImpl()->getTargetLowering()->emitStoreConditional(
+ Builder, NewVal, Addr, MemOpOrder);
Value *TryAgain = Builder.CreateICmpNE(
StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain");
Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);
// Start the main loop block now that we've taken care of the preliminaries.
Builder.SetInsertPoint(LoopBB);
- Value *Loaded =
- TM->getTargetLowering()->emitLoadLinked(Builder, Addr, MemOpOrder);
+ Value *Loaded = TM->getSubtargetImpl()->getTargetLowering()->emitLoadLinked(
+ Builder, Addr, MemOpOrder);
Value *ShouldStore =
Builder.CreateICmpEQ(Loaded, CI->getCompareOperand(), "should_store");
Builder.CreateCondBr(ShouldStore, TryStoreBB, FailureBB);
Builder.SetInsertPoint(TryStoreBB);
- Value *StoreSuccess = TM->getTargetLowering()->emitStoreConditional(
- Builder, CI->getNewValOperand(), Addr, MemOpOrder);
+ Value *StoreSuccess =
+ TM->getSubtargetImpl()->getTargetLowering()->emitStoreConditional(
+ Builder, CI->getNewValOperand(), Addr, MemOpOrder);
StoreSuccess = Builder.CreateICmpEQ(
StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success");
Builder.CreateCondBr(StoreSuccess, SuccessBB,
AtomicOrdering AtomicExpandLoadLinked::insertLeadingFence(IRBuilder<> &Builder,
AtomicOrdering Ord) {
- if (!TM->getTargetLowering()->getInsertFencesForAtomic())
+ if (!TM->getSubtargetImpl()->getTargetLowering()->getInsertFencesForAtomic())
return Ord;
if (Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent)
void AtomicExpandLoadLinked::insertTrailingFence(IRBuilder<> &Builder,
AtomicOrdering Ord) {
- if (!TM->getTargetLowering()->getInsertFencesForAtomic())
+ if (!TM->getSubtargetImpl()->getTargetLowering()->getInsertFencesForAtomic())
return;
if (Ord == Acquire || Ord == AcquireRelease)
/// Estimate the cost overhead of SK_Alternate shuffle.
unsigned getAltShuffleOverhead(Type *Ty) const;
- const TargetLoweringBase *getTLI() const { return TM->getTargetLowering(); }
+ const TargetLoweringBase *getTLI() const {
+ return TM->getSubtargetImpl()->getTargetLowering();
+ }
public:
BasicTTI() : ImmutablePass(ID), TM(nullptr) {
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
using namespace llvm;
bool EnableTailMerge = !MF.getTarget().requiresStructuredCFG() &&
PassConfig->getEnableTailMerge();
BranchFolder Folder(EnableTailMerge, /*CommonHoist=*/true);
- return Folder.OptimizeFunction(MF,
- MF.getTarget().getInstrInfo(),
- MF.getTarget().getRegisterInfo(),
- getAnalysisIfAvailable<MachineModuleInfo>());
+ return Folder.OptimizeFunction(
+ MF, MF.getTarget().getSubtargetImpl()->getInstrInfo(),
+ MF.getTarget().getSubtargetImpl()->getRegisterInfo(),
+ getAnalysisIfAvailable<MachineModuleInfo>());
}
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "calcspillweights"
void
VirtRegAuxInfo::calculateSpillWeightAndHint(LiveInterval &li) {
MachineRegisterInfo &mri = MF.getRegInfo();
- const TargetRegisterInfo &tri = *MF.getTarget().getRegisterInfo();
+ const TargetRegisterInfo &tri =
+ *MF.getTarget().getSubtargetImpl()->getRegisterInfo();
MachineBasicBlock *mbb = nullptr;
MachineLoop *loop = nullptr;
bool isExiting = false;
// it is a preferred candidate for spilling.
// FIXME: this gets much more complicated once we support non-trivial
// re-materialization.
- if (isRematerializable(li, LIS, *MF.getTarget().getInstrInfo()))
+ if (isRematerializable(li, LIS,
+ *MF.getTarget().getSubtargetImpl()->getInstrInfo()))
totalWeight *= 0.5F;
li.weight = normalize(totalWeight, li.getSize());
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
CCState::CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &mf,
const TargetMachine &tm, SmallVectorImpl<CCValAssign> &locs,
LLVMContext &C)
- : CallingConv(CC), IsVarArg(isVarArg), MF(mf), TM(tm),
- TRI(*TM.getRegisterInfo()), Locs(locs), Context(C),
- CallOrPrologue(Unknown) {
+ : CallingConv(CC), IsVarArg(isVarArg), MF(mf), TM(tm),
+ TRI(*TM.getSubtargetImpl()->getRegisterInfo()), Locs(locs), Context(C),
+ CallOrPrologue(Unknown) {
// No stack is used.
StackOffset = 0;
if (MinAlign > (int)Align)
Align = MinAlign;
MF.getFrameInfo()->ensureMaxAlignment(Align);
- TM.getTargetLowering()->HandleByVal(this, Size, Align);
+ TM.getSubtargetImpl()->getTargetLowering()->HandleByVal(this, Size, Align);
unsigned Offset = AllocateStack(Size, Align);
addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
}
PromotedInsts.clear();
ModifiedDT = false;
- if (TM) TLI = TM->getTargetLowering();
+ if (TM)
+ TLI = TM->getSubtargetImpl()->getTargetLowering();
TLInfo = &getAnalysis<TargetLibraryInfo>();
DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "post-RA-sched"
-CriticalAntiDepBreaker::
-CriticalAntiDepBreaker(MachineFunction& MFi, const RegisterClassInfo &RCI) :
- AntiDepBreaker(), MF(MFi),
- MRI(MF.getRegInfo()),
- TII(MF.getTarget().getInstrInfo()),
- TRI(MF.getTarget().getRegisterInfo()),
- RegClassInfo(RCI),
- Classes(TRI->getNumRegs(), nullptr),
- KillIndices(TRI->getNumRegs(), 0),
- DefIndices(TRI->getNumRegs(), 0),
- KeepRegs(TRI->getNumRegs(), false) {}
+CriticalAntiDepBreaker::CriticalAntiDepBreaker(MachineFunction &MFi,
+ const RegisterClassInfo &RCI)
+ : AntiDepBreaker(), MF(MFi), MRI(MF.getRegInfo()),
+ TII(MF.getTarget().getSubtargetImpl()->getInstrInfo()),
+ TRI(MF.getTarget().getSubtargetImpl()->getRegisterInfo()),
+ RegClassInfo(RCI), Classes(TRI->getNumRegs(), nullptr),
+ KillIndices(TRI->getNumRegs(), 0), DefIndices(TRI->getNumRegs(), 0),
+ KeepRegs(TRI->getNumRegs(), false) {}
CriticalAntiDepBreaker::~CriticalAntiDepBreaker() {
}
VLIWPacketizerList::VLIWPacketizerList(
MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT,
bool IsPostRA) : TM(MF.getTarget()), MF(MF) {
- TII = TM.getInstrInfo();
+ TII = TM.getSubtargetImpl()->getInstrInfo();
ResourceTracker = TII->CreateTargetScheduleState(&TM, nullptr);
VLIWScheduler = new DefaultVLIWScheduler(MF, MLI, MDT, IsPostRA);
}
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+
using namespace llvm;
#define DEBUG_TYPE "codegen-dce"
bool AnyChanges = false;
MRI = &MF.getRegInfo();
- TRI = MF.getTarget().getRegisterInfo();
- TII = MF.getTarget().getInstrInfo();
+ TRI = MF.getTarget().getSubtargetImpl()->getRegisterInfo();
+ TII = MF.getTarget().getSubtargetImpl()->getInstrInfo();
// Loop over all instructions in all blocks, from bottom to top, so that it's
// more likely that chains of dependent but ultimately dead instructions will
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/Pass.h"
#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
using namespace llvm;
return false;
// Find the rewind function if we didn't already.
- const TargetLowering *TLI = TM->getTargetLowering();
+ const TargetLowering *TLI = TM->getSubtargetImpl()->getTargetLowering();
if (!RewindFunction) {
LLVMContext &Ctx = Resumes[0]->getContext();
FunctionType *FTy = FunctionType::get(Type::getVoidTy(Ctx),
public:
/// runOnMachineFunction - Initialize per-function data structures.
void runOnMachineFunction(MachineFunction &MF) {
- TII = MF.getTarget().getInstrInfo();
- TRI = MF.getTarget().getRegisterInfo();
+ TII = MF.getTarget().getSubtargetImpl()->getInstrInfo();
+ TRI = MF.getTarget().getSubtargetImpl()->getRegisterInfo();
MRI = &MF.getRegInfo();
LiveRegUnits.clear();
LiveRegUnits.setUniverse(TRI->getNumRegUnits());
.enableEarlyIfConversion())
return false;
- TII = MF.getTarget().getInstrInfo();
- TRI = MF.getTarget().getRegisterInfo();
+ TII = MF.getTarget().getSubtargetImpl()->getInstrInfo();
+ TRI = MF.getTarget().getSubtargetImpl()->getRegisterInfo();
SchedModel =
MF.getTarget().getSubtarget<TargetSubtargetInfo>().getSchedModel();
MRI = &MF.getRegInfo();
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
MCSymbol *ErlangGC::InsertLabel(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
DebugLoc DL) const {
- const TargetInstrInfo* TII = MBB.getParent()->getTarget().getInstrInfo();
+ const TargetInstrInfo *TII =
+ MBB.getParent()->getTarget().getSubtargetImpl()->getInstrInfo();
MCSymbol *Label = MBB.getParent()->getContext().CreateTempSymbol();
BuildMI(MBB, MI, DL, TII->get(TargetOpcode::GC_LABEL)).addSym(Label);
return Label;
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+
using namespace llvm;
#define DEBUG_TYPE "execution-fix"
bool ExeDepsFix::runOnMachineFunction(MachineFunction &mf) {
MF = &mf;
- TII = MF->getTarget().getInstrInfo();
- TRI = MF->getTarget().getRegisterInfo();
+ TII = MF->getTarget().getSubtargetImpl()->getInstrInfo();
+ TRI = MF->getTarget().getSubtargetImpl()->getRegisterInfo();
LiveRegs = nullptr;
assert(NumRegs == RC->getNumRegs() && "Bad regclass");
#include "llvm/Support/Debug.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "expand-isel-pseudos"
bool ExpandISelPseudos::runOnMachineFunction(MachineFunction &MF) {
bool Changed = false;
- const TargetLowering *TLI = MF.getTarget().getTargetLowering();
+ const TargetLowering *TLI =
+ MF.getTarget().getSubtargetImpl()->getTargetLowering();
// Iterate through each instruction in the function, looking for pseudos.
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+
using namespace llvm;
#define DEBUG_TYPE "postrapseudos"
DEBUG(dbgs() << "Machine Function\n"
<< "********** EXPANDING POST-RA PSEUDO INSTRS **********\n"
<< "********** Function: " << MF.getName() << '\n');
- TRI = MF.getTarget().getRegisterInfo();
- TII = MF.getTarget().getInstrInfo();
+ TRI = MF.getTarget().getSubtargetImpl()->getRegisterInfo();
+ TII = MF.getTarget().getSubtargetImpl()->getInstrInfo();
bool MadeChange = false;
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
}
void GCMachineCodeAnalysis::FindStackOffsets(MachineFunction &MF) {
- const TargetFrameLowering *TFI = TM->getFrameLowering();
+ const TargetFrameLowering *TFI = TM->getSubtargetImpl()->getFrameLowering();
assert(TFI && "TargetRegisterInfo not available!");
for (GCFunctionInfo::roots_iterator RI = FI->roots_begin();
TM = &MF.getTarget();
MMI = &getAnalysis<MachineModuleInfo>();
- TII = TM->getInstrInfo();
+ TII = TM->getSubtargetImpl()->getInstrInfo();
// Find the size of the stack frame.
FI->setFrameSize(MF.getFrameInfo()->getStackSize());
#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "global-merge"
bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
Module &M, bool isConst, unsigned AddrSpace) const {
- const TargetLowering *TLI = TM->getTargetLowering();
+ const TargetLowering *TLI = TM->getSubtargetImpl()->getTargetLowering();
const DataLayout *DL = TLI->getDataLayout();
// FIXME: Infer the maximum possible offset depending on the actual users
DenseMap<unsigned, SmallVector<GlobalVariable*, 16> > Globals, ConstGlobals,
BSSGlobals;
- const TargetLowering *TLI = TM->getTargetLowering();
+ const TargetLowering *TLI = TM->getSubtargetImpl()->getTargetLowering();
const DataLayout *DL = TLI->getDataLayout();
unsigned MaxOffset = TLI->getMaximalGlobalOffset();
bool Changed = false;
INITIALIZE_PASS_END(IfConverter, "if-converter", "If Converter", false, false)
bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
- TLI = MF.getTarget().getTargetLowering();
- TII = MF.getTarget().getInstrInfo();
- TRI = MF.getTarget().getRegisterInfo();
+ TLI = MF.getTarget().getSubtargetImpl()->getTargetLowering();
+ TII = MF.getTarget().getSubtargetImpl()->getInstrInfo();
+ TRI = MF.getTarget().getSubtargetImpl()->getRegisterInfo();
MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
MRI = &MF.getRegInfo();
if (!PreRegAlloc) {
// Tail merge tend to expose more if-conversion opportunities.
BranchFolder BF(true, false);
- BFChange = BF.OptimizeFunction(MF, TII,
- MF.getTarget().getRegisterInfo(),
- getAnalysisIfAvailable<MachineModuleInfo>());
+ BFChange = BF.OptimizeFunction(
+ MF, TII, MF.getTarget().getSubtargetImpl()->getRegisterInfo(),
+ getAnalysisIfAvailable<MachineModuleInfo>());
}
DEBUG(dbgs() << "\nIfcvt: function (" << ++FnNum << ") \'"
if (MadeChange && IfCvtBranchFold) {
BranchFolder BF(false, false);
BF.OptimizeFunction(MF, TII,
- MF.getTarget().getRegisterInfo(),
+ MF.getTarget().getSubtargetImpl()->getRegisterInfo(),
getAnalysisIfAvailable<MachineModuleInfo>());
}
~InlineSpiller() {}
public:
- InlineSpiller(MachineFunctionPass &pass,
- MachineFunction &mf,
- VirtRegMap &vrm)
- : MF(mf),
- LIS(pass.getAnalysis<LiveIntervals>()),
- LSS(pass.getAnalysis<LiveStacks>()),
- AA(&pass.getAnalysis<AliasAnalysis>()),
- MDT(pass.getAnalysis<MachineDominatorTree>()),
- Loops(pass.getAnalysis<MachineLoopInfo>()),
- VRM(vrm),
- MFI(*mf.getFrameInfo()),
- MRI(mf.getRegInfo()),
- TII(*mf.getTarget().getInstrInfo()),
- TRI(*mf.getTarget().getRegisterInfo()),
- MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()) {}
+ InlineSpiller(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm)
+ : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
+ LSS(pass.getAnalysis<LiveStacks>()),
+ AA(&pass.getAnalysis<AliasAnalysis>()),
+ MDT(pass.getAnalysis<MachineDominatorTree>()),
+ Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
+ MFI(*mf.getFrameInfo()), MRI(mf.getRegInfo()),
+ TII(*mf.getTarget().getSubtargetImpl()->getInstrInfo()),
+ TRI(*mf.getTarget().getSubtargetImpl()->getRegisterInfo()),
+ MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()) {}
void spill(LiveRangeEdit &) override;
cl::desc("Enable the \"fast\" instruction selector"));
void LLVMTargetMachine::initAsmInfo() {
- MCAsmInfo *TmpAsmInfo = TheTarget.createMCAsmInfo(*getRegisterInfo(),
- TargetTriple);
+ MCAsmInfo *TmpAsmInfo = TheTarget.createMCAsmInfo(
+ *getSubtargetImpl()->getRegisterInfo(), getTargetTriple());
// TargetSelect.h moved to a different directory between LLVM 2.9 and 3.0,
// and if the old one gets included then MCAsmInfo will be NULL and
// we'll crash later.
// Install a MachineModuleInfo class, which is an immutable pass that holds
// all the per-module stuff we're generating, including MCContext.
- MachineModuleInfo *MMI =
- new MachineModuleInfo(*TM->getMCAsmInfo(), *TM->getRegisterInfo(),
- &TM->getTargetLowering()->getObjFileLowering());
+ MachineModuleInfo *MMI = new MachineModuleInfo(
+ *TM->getMCAsmInfo(), *TM->getSubtargetImpl()->getRegisterInfo(),
+ &TM->getSubtargetImpl()->getTargetLowering()->getObjFileLowering());
PM.add(MMI);
// Set up a MachineFunction for the rest of CodeGen to work on.
if (Options.MCOptions.MCSaveTempLabels)
Context->setAllowTemporaryLabels(false);
- const MCAsmInfo &MAI = *getMCAsmInfo();
- const MCRegisterInfo &MRI = *getRegisterInfo();
- const MCInstrInfo &MII = *getInstrInfo();
const MCSubtargetInfo &STI = getSubtarget<MCSubtargetInfo>();
+ const MCAsmInfo &MAI = *getMCAsmInfo();
+ const MCRegisterInfo &MRI = *getSubtargetImpl()->getRegisterInfo();
+ const MCInstrInfo &MII = *getSubtargetImpl()->getInstrInfo();
std::unique_ptr<MCStreamer> AsmStreamer;
switch (FileType) {
// Create the code emitter for the target if it exists. If not, .o file
// emission fails.
- const MCRegisterInfo &MRI = *getRegisterInfo();
+ const MCRegisterInfo &MRI = *getSubtargetImpl()->getRegisterInfo();
const MCSubtargetInfo &STI = getSubtarget<MCSubtargetInfo>();
- MCCodeEmitter *MCE = getTarget().createMCCodeEmitter(*getInstrInfo(), MRI,
- STI, *Ctx);
+ MCCodeEmitter *MCE = getTarget().createMCCodeEmitter(
+ *getSubtargetImpl()->getInstrInfo(), MRI, STI, *Ctx);
MCAsmBackend *MAB = getTarget().createMCAsmBackend(MRI, getTargetTriple(),
TargetCPU);
if (!MCE || !MAB)
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <memory>
MF = &mf;
LIS = &pass.getAnalysis<LiveIntervals>();
MDT = &pass.getAnalysis<MachineDominatorTree>();
- TRI = mf.getTarget().getRegisterInfo();
+ TRI = mf.getTarget().getSubtargetImpl()->getRegisterInfo();
LS.initialize(mf);
DEBUG(dbgs() << "********** COMPUTING LIVE DEBUG VARIABLES: "
<< mf.getName() << " **********\n");
DEBUG(dbgs() << "********** EMITTING LIVE DEBUG VARIABLES **********\n");
if (!MF)
return;
- const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
+ const TargetInstrInfo *TII =
+ MF->getTarget().getSubtargetImpl()->getInstrInfo();
for (unsigned i = 0, e = userValues.size(); i != e; ++i) {
DEBUG(userValues[i]->print(dbgs(), &MF->getTarget()));
userValues[i]->rewriteLocations(*VRM, *TRI);
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
#include <cmath>
#include <limits>
MF = &fn;
MRI = &MF->getRegInfo();
TM = &fn.getTarget();
- TRI = TM->getRegisterInfo();
- TII = TM->getInstrInfo();
+ TRI = TM->getSubtargetImpl()->getRegisterInfo();
+ TII = TM->getSubtargetImpl()->getInstrInfo();
AA = &getAnalysis<AliasAnalysis>();
Indexes = &getAnalysis<SlotIndexes>();
DomTree = &getAnalysis<MachineDominatorTree>();
}
bool LiveRegMatrix::runOnMachineFunction(MachineFunction &MF) {
- TRI = MF.getTarget().getRegisterInfo();
+ TRI = MF.getTarget().getSubtargetImpl()->getRegisterInfo();
MRI = &MF.getRegInfo();
LIS = &getAnalysis<LiveIntervals>();
VRM = &getAnalysis<VirtRegMap>();
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <limits>
using namespace llvm;
}
bool LiveStacks::runOnMachineFunction(MachineFunction &MF) {
- TRI = MF.getTarget().getRegisterInfo();
+ TRI = MF.getTarget().getSubtargetImpl()->getRegisterInfo();
// FIXME: No analysis is being done right now. We are relying on the
// register allocators to provide the information.
return false;
bool LiveVariables::runOnMachineFunction(MachineFunction &mf) {
MF = &mf;
MRI = &mf.getRegInfo();
- TRI = MF->getTarget().getRegisterInfo();
+ TRI = MF->getTarget().getSubtargetImpl()->getRegisterInfo();
unsigned NumRegs = TRI->getNumRegs();
PhysRegDef = new MachineInstr*[NumRegs];
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
bool LocalStackSlotPass::runOnMachineFunction(MachineFunction &MF) {
MachineFrameInfo *MFI = MF.getFrameInfo();
- const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
+ const TargetRegisterInfo *TRI =
+ MF.getTarget().getSubtargetImpl()->getRegisterInfo();
unsigned LocalObjectCount = MFI->getObjectIndexEnd();
// If the target doesn't want/need this pass, or if there are no locals
void LocalStackSlotPass::calculateFrameObjectOffsets(MachineFunction &Fn) {
// Loop over all of the stack objects, assigning sequential addresses...
MachineFrameInfo *MFI = Fn.getFrameInfo();
- const TargetFrameLowering &TFI = *Fn.getTarget().getFrameLowering();
+ const TargetFrameLowering &TFI =
+ *Fn.getTarget().getSubtargetImpl()->getFrameLowering();
bool StackGrowsDown =
TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
int64_t Offset = 0;
bool UsedBaseReg = false;
MachineFrameInfo *MFI = Fn.getFrameInfo();
- const TargetRegisterInfo *TRI = Fn.getTarget().getRegisterInfo();
- const TargetFrameLowering &TFI = *Fn.getTarget().getFrameLowering();
+ const TargetRegisterInfo *TRI =
+ Fn.getTarget().getSubtargetImpl()->getRegisterInfo();
+ const TargetFrameLowering &TFI =
+ *Fn.getTarget().getSubtargetImpl()->getFrameLowering();
bool StackGrowsDown =
TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
using namespace llvm;
const MachineFunction *MF = getParent();
MCContext &Ctx = MF->getContext();
const TargetMachine &TM = MF->getTarget();
- const char *Prefix = TM.getDataLayout()->getPrivateGlobalPrefix();
+ const char *Prefix =
+ TM.getSubtargetImpl()->getDataLayout()->getPrivateGlobalPrefix();
CachedMCSymbol = Ctx.GetOrCreateSymbol(Twine(Prefix) + "BB" +
Twine(MF->getFunctionNumber()) +
"_" + Twine(getNumber()));
OS << '\n';
- const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo();
+ const TargetRegisterInfo *TRI =
+ MF->getTarget().getSubtargetImpl()->getRegisterInfo();
if (!livein_empty()) {
if (Indexes) OS << '\t';
OS << " Live Ins:";
bool LiveIn = isLiveIn(PhysReg);
iterator I = SkipPHIsAndLabels(begin()), E = end();
MachineRegisterInfo &MRI = getParent()->getRegInfo();
- const TargetInstrInfo &TII = *getParent()->getTarget().getInstrInfo();
+ const TargetInstrInfo &TII =
+ *getParent()->getTarget().getSubtargetImpl()->getInstrInfo();
// Look for an existing copy.
if (LiveIn)
}
void MachineBasicBlock::updateTerminator() {
- const TargetInstrInfo *TII = getParent()->getTarget().getInstrInfo();
+ const TargetInstrInfo *TII =
+ getParent()->getTarget().getSubtargetImpl()->getInstrInfo();
// A block with no successors has no concerns with fall-through edges.
if (this->succ_empty()) return;
// Analyze the branches, if any, at the end of the block.
MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
- const TargetInstrInfo *TII = getParent()->getTarget().getInstrInfo();
+ const TargetInstrInfo *TII =
+ getParent()->getTarget().getSubtargetImpl()->getInstrInfo();
if (TII->AnalyzeBranch(*this, TBB, FBB, Cond)) {
// If we couldn't analyze the branch, examine the last instruction.
// If the block doesn't end in a known control barrier, assume fallthrough
// We may need to update this's terminator, but we can't do that if
// AnalyzeBranch fails. If this uses a jump table, we won't touch it.
- const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
+ const TargetInstrInfo *TII =
+ MF->getTarget().getSubtargetImpl()->getInstrInfo();
MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
if (TII->AnalyzeBranch(*this, TBB, FBB, Cond))
NMBB->addSuccessor(Succ);
if (!NMBB->isLayoutSuccessor(Succ)) {
Cond.clear();
- MF->getTarget().getInstrInfo()->InsertBranch(*NMBB, Succ, nullptr, Cond, dl);
+ MF->getTarget().getSubtargetImpl()->getInstrInfo()->InsertBranch(
+ *NMBB, Succ, nullptr, Cond, dl);
if (Indexes) {
for (instr_iterator I = NMBB->instr_begin(), E = NMBB->instr_end();
NMBB->addLiveIn(*I);
// Update LiveVariables.
- const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo();
+ const TargetRegisterInfo *TRI =
+ MF->getTarget().getSubtargetImpl()->getRegisterInfo();
if (LV) {
// Restore kills of virtual registers that were killed by the terminators.
while (!KilledRegs.empty()) {
#include "llvm/Support/Debug.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
using namespace llvm;
MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
MLI = &getAnalysis<MachineLoopInfo>();
- TII = F.getTarget().getInstrInfo();
- TLI = F.getTarget().getTargetLowering();
+ TII = F.getTarget().getSubtargetImpl()->getInstrInfo();
+ TLI = F.getTarget().getSubtargetImpl()->getTargetLowering();
assert(BlockToChain.empty());
buildCFGChains(F);
#include "llvm/Support/Debug.h"
#include "llvm/Support/RecyclingAllocator.h"
#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "machine-cse"
if (skipOptnoneFunction(*MF.getFunction()))
return false;
- TII = MF.getTarget().getInstrInfo();
- TRI = MF.getTarget().getRegisterInfo();
+ TII = MF.getTarget().getSubtargetImpl()->getInstrInfo();
+ TRI = MF.getTarget().getSubtargetImpl()->getRegisterInfo();
MRI = &MF.getRegInfo();
AA = &getAnalysis<AliasAnalysis>();
DT = &getAnalysis<MachineDominatorTree>();
}
bool MachineCombiner::runOnMachineFunction(MachineFunction &MF) {
- TII = MF.getTarget().getInstrInfo();
- TRI = MF.getTarget().getRegisterInfo();
const TargetSubtargetInfo &STI =
MF.getTarget().getSubtarget<TargetSubtargetInfo>();
+ TII = STI.getInstrInfo();
+ TRI = STI.getRegisterInfo();
SchedModel = STI.getSchedModel();
TSchedModel.init(*SchedModel, &STI, TII);
MRI = &MF.getRegInfo();
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "codegen-cp"
bool Changed = false;
- TRI = MF.getTarget().getRegisterInfo();
- TII = MF.getTarget().getInstrInfo();
+ TRI = MF.getTarget().getSubtargetImpl()->getRegisterInfo();
+ TII = MF.getTarget().getSubtargetImpl()->getInstrInfo();
MRI = &MF.getRegInfo();
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "codegen"
unsigned FunctionNum, MachineModuleInfo &mmi,
GCModuleInfo* gmi)
: Fn(F), Target(TM), Ctx(mmi.getContext()), MMI(mmi), GMI(gmi) {
- if (TM.getRegisterInfo())
+ if (TM.getSubtargetImpl()->getRegisterInfo())
RegInfo = new (Allocator) MachineRegisterInfo(TM);
else
RegInfo = nullptr;
getStackAlignment(AttributeSet::FunctionIndex));
ConstantPool = new (Allocator) MachineConstantPool(TM);
- Alignment = TM.getTargetLowering()->getMinFunctionAlignment();
+ Alignment =
+ TM.getSubtargetImpl()->getTargetLowering()->getMinFunctionAlignment();
// FIXME: Shouldn't use pref alignment if explicit alignment is set on Fn.
if (!Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
Attribute::OptimizeForSize))
- Alignment = std::max(Alignment,
- TM.getTargetLowering()->getPrefFunctionAlignment());
+ Alignment = std::max(
+ Alignment,
+ TM.getSubtargetImpl()->getTargetLowering()->getPrefFunctionAlignment());
FunctionNumber = FunctionNum;
JumpTableInfo = nullptr;
// Print Constant Pool
ConstantPool->print(OS);
- const TargetRegisterInfo *TRI = getTarget().getRegisterInfo();
+ const TargetRegisterInfo *TRI =
+ getTarget().getSubtargetImpl()->getRegisterInfo();
if (RegInfo && !RegInfo->livein_empty()) {
OS << "Function Live Ins: ";
/// normal 'L' label is returned.
MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
bool isLinkerPrivate) const {
- const DataLayout *DL = getTarget().getDataLayout();
+ const DataLayout *DL = getTarget().getSubtargetImpl()->getDataLayout();
assert(JumpTableInfo && "No jump tables");
assert(JTI < JumpTableInfo->getJumpTables().size() && "Invalid JTI!");
/// getPICBaseSymbol - Return a function-local symbol to represent the PIC
/// base.
MCSymbol *MachineFunction::getPICBaseSymbol() const {
- const DataLayout *DL = getTarget().getDataLayout();
+ const DataLayout *DL = getTarget().getSubtargetImpl()->getDataLayout();
return Ctx.GetOrCreateSymbol(Twine(DL->getPrivateGlobalPrefix())+
Twine(getFunctionNumber())+"$pb");
}
//===----------------------------------------------------------------------===//
const TargetFrameLowering *MachineFrameInfo::getFrameLowering() const {
- return TM.getFrameLowering();
+ return TM.getSubtargetImpl()->getFrameLowering();
}
/// ensureMaxAlignment - Make sure the function is at least Align bytes
const MachineFunction *MF = MBB->getParent();
assert(MF && "MBB must be part of a MachineFunction");
const TargetMachine &TM = MF->getTarget();
- const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+ const TargetRegisterInfo *TRI = TM.getSubtargetImpl()->getRegisterInfo();
BitVector BV(TRI->getNumRegs());
// Before CSI is calculated, no registers are considered pristine. They can be
}
unsigned MachineFrameInfo::estimateStackSize(const MachineFunction &MF) const {
- const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
- const TargetRegisterInfo *RegInfo = MF.getTarget().getRegisterInfo();
+ const TargetFrameLowering *TFI =
+ MF.getTarget().getSubtargetImpl()->getFrameLowering();
+ const TargetRegisterInfo *RegInfo =
+ MF.getTarget().getSubtargetImpl()->getRegisterInfo();
unsigned MaxAlign = getMaxAlignment();
int Offset = 0;
void MachineFrameInfo::print(const MachineFunction &MF, raw_ostream &OS) const{
if (Objects.empty()) return;
- const TargetFrameLowering *FI = MF.getTarget().getFrameLowering();
+ const TargetFrameLowering *FI =
+ MF.getTarget().getSubtargetImpl()->getFrameLowering();
int ValOffset = (FI ? FI->getOffsetOfLocalArea() : 0);
OS << "Frame Objects:\n";
void MachineConstantPoolValue::anchor() { }
const DataLayout *MachineConstantPool::getDataLayout() const {
- return TM.getDataLayout();
+ return TM.getSubtargetImpl()->getDataLayout();
}
Type *MachineConstantPoolEntry::getType() const {
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
//===----------------------------------------------------------------------===//
if (const MachineBasicBlock *MBB = MI->getParent())
if (const MachineFunction *MF = MBB->getParent())
TM = &MF->getTarget();
- const TargetRegisterInfo *TRI = TM ? TM->getRegisterInfo() : nullptr;
+ const TargetRegisterInfo *TRI =
+ TM ? TM->getSubtargetImpl()->getRegisterInfo() : nullptr;
switch (getType()) {
case MachineOperand::MO_Register:
OS << " = ";
// Print the opcode name.
- if (TM && TM->getInstrInfo())
- OS << TM->getInstrInfo()->getName(getOpcode());
+ if (TM && TM->getSubtargetImpl()->getInstrInfo())
+ OS << TM->getSubtargetImpl()->getInstrInfo()->getName(getOpcode());
else
OS << "UNKNOWN";
const MachineRegisterInfo &MRI = MF->getRegInfo();
if (MRI.use_empty(Reg)) {
bool HasAliasLive = false;
- for (MCRegAliasIterator AI(Reg, TM->getRegisterInfo(), true);
+ for (MCRegAliasIterator AI(
+ Reg, TM->getSubtargetImpl()->getRegisterInfo(), true);
AI.isValid(); ++AI) {
unsigned AliasReg = *AI;
if (!MRI.use_empty(AliasReg)) {
else
MO.print(OS, TM);
} else if (TM && (isInsertSubreg() || isRegSequence()) && MO.isImm()) {
- OS << TM->getRegisterInfo()->getSubRegIndexName(MO.getImm());
+ OS << TM->getSubtargetImpl()->getRegisterInfo()->getSubRegIndexName(
+ MO.getImm());
} else if (i == AsmDescOp && MO.isImm()) {
// Pretty print the inline asm operand descriptor.
OS << '$' << AsmOpCount++;
unsigned RCID = 0;
if (InlineAsm::hasRegClassConstraint(Flag, RCID)) {
if (TM)
- OS << ':' << TM->getRegisterInfo()->getRegClass(RCID)->getName();
+ OS << ':'
+ << TM->getSubtargetImpl()
+ ->getRegisterInfo()
+ ->getRegClass(RCID)
+ ->getName();
else
OS << ":RC" << RCID;
}
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
namespace {
MIBundleBuilder Bundle(MBB, FirstMI, LastMI);
const TargetMachine &TM = MBB.getParent()->getTarget();
- const TargetInstrInfo *TII = TM.getInstrInfo();
- const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+ const TargetInstrInfo *TII = TM.getSubtargetImpl()->getInstrInfo();
+ const TargetRegisterInfo *TRI = TM.getSubtargetImpl()->getRegisterInfo();
MachineInstrBuilder MIB = BuildMI(*MBB.getParent(), FirstMI->getDebugLoc(),
TII->get(TargetOpcode::BUNDLE));
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "machine-licm"
Changed = FirstInLoop = false;
TM = &MF.getTarget();
- TII = TM->getInstrInfo();
- TLI = TM->getTargetLowering();
- TRI = TM->getRegisterInfo();
+ TII = TM->getSubtargetImpl()->getInstrInfo();
+ TLI = TM->getSubtargetImpl()->getTargetLowering();
+ TRI = TM->getSubtargetImpl()->getRegisterInfo();
MFI = MF.getFrameInfo();
MRI = &MF.getRegInfo();
- InstrItins = TM->getInstrItineraryData();
+ InstrItins = TM->getSubtargetImpl()->getInstrItineraryData();
PreRegAlloc = MRI->isSSA();
#include "llvm/Support/raw_os_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
bool
MachineRegisterInfo::recomputeRegClass(unsigned Reg, const TargetMachine &TM) {
- const TargetInstrInfo *TII = TM.getInstrInfo();
+ const TargetInstrInfo *TII = TM.getSubtargetImpl()->getInstrInfo();
const TargetRegisterClass *OldRC = getRegClass(Reg);
const TargetRegisterClass *NewRC =
getTargetRegisterInfo()->getLargestLegalSuperClass(OldRC);
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Transforms/Utils/SSAUpdaterImpl.h"
using namespace llvm;
MachineSSAUpdater::MachineSSAUpdater(MachineFunction &MF,
SmallVectorImpl<MachineInstr*> *NewPHI)
: AV(nullptr), InsertedPHIs(NewPHI) {
- TII = MF.getTarget().getInstrInfo();
+ TII = MF.getTarget().getSubtargetImpl()->getInstrInfo();
MRI = &MF.getRegInfo();
}
/// Main driver for both MachineScheduler and PostMachineScheduler.
void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler) {
- const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
+ const TargetInstrInfo *TII =
+ MF->getTarget().getSubtargetImpl()->getInstrInfo();
bool IsPostRA = Scheduler.isPostRA();
// Visit all machine basic blocks.
const TargetMachine &TM = DAG->MF.getTarget();
if (!Top.HazardRec) {
Top.HazardRec =
- TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
+ TM.getSubtargetImpl()->getInstrInfo()->CreateTargetMIHazardRecognizer(
+ Itin, DAG);
}
if (!Bot.HazardRec) {
Bot.HazardRec =
- TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
+ TM.getSubtargetImpl()->getInstrInfo()->CreateTargetMIHazardRecognizer(
+ Itin, DAG);
}
}
MachineBasicBlock::iterator End,
unsigned NumRegionInstrs) {
const TargetMachine &TM = Context->MF->getTarget();
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
// Avoid setting up the register pressure tracker for small regions to save
// compile time. As a rough heuristic, only track pressure when the number of
const TargetMachine &TM = DAG->MF.getTarget();
if (!Top.HazardRec) {
Top.HazardRec =
- TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
+ TM.getSubtargetImpl()->getInstrInfo()->CreateTargetMIHazardRecognizer(
+ Itin, DAG);
}
}
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "machine-sink"
DEBUG(dbgs() << "******** Machine Sinking ********\n");
const TargetMachine &TM = MF.getTarget();
- TII = TM.getInstrInfo();
- TRI = TM.getRegisterInfo();
+ TII = TM.getSubtargetImpl()->getInstrInfo();
+ TRI = TM.getSubtargetImpl()->getRegisterInfo();
MRI = &MF.getRegInfo();
DT = &getAnalysis<MachineDominatorTree>();
LI = &getAnalysis<MachineLoopInfo>();
bool MachineTraceMetrics::runOnMachineFunction(MachineFunction &Func) {
MF = &Func;
- TII = MF->getTarget().getInstrInfo();
- TRI = MF->getTarget().getRegisterInfo();
+ TII = MF->getTarget().getSubtargetImpl()->getInstrInfo();
+ TRI = MF->getTarget().getSubtargetImpl()->getRegisterInfo();
MRI = &MF->getRegInfo();
Loops = &getAnalysis<MachineLoopInfo>();
const TargetSubtargetInfo &ST =
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
namespace {
this->MF = &MF;
TM = &MF.getTarget();
- TII = TM->getInstrInfo();
- TRI = TM->getRegisterInfo();
+ TII = TM->getSubtargetImpl()->getInstrInfo();
+ TRI = TM->getSubtargetImpl()->getRegisterInfo();
MRI = &MF.getRegInfo();
LiveVars = nullptr;
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "phi-opt"
return false;
MRI = &Fn.getRegInfo();
- TII = Fn.getTarget().getInstrInfo();
+ TII = Fn.getTarget().getSubtargetImpl()->getInstrInfo();
// Find dead PHI cycles and PHI cycles that can be replaced by a single
// value. InstCombine does these optimizations, but DAG legalization may
#include "llvm/Support/Debug.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
using namespace llvm;
// Insert a register to register copy at the top of the current block (but
// after any remaining phi nodes) which copies the new incoming register
// into the phi node destination.
- const TargetInstrInfo *TII = MF.getTarget().getInstrInfo();
+ const TargetInstrInfo *TII =
+ MF.getTarget().getSubtargetImpl()->getInstrInfo();
if (isSourceDefinedByImplicitDef(MPhi, MRI))
// If all sources of a PHI node are implicit_def, just emit an
// implicit_def instead of a copy.
#include "llvm/Support/Debug.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "peephole-opt"
// Ensure DstReg can get a register class that actually supports
// sub-registers. Don't change the class until we commit.
const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg);
- DstRC = TM->getRegisterInfo()->getSubClassWithSubReg(DstRC, SubIdx);
+ DstRC = TM->getSubtargetImpl()->getRegisterInfo()->getSubClassWithSubReg(
+ DstRC, SubIdx);
if (!DstRC)
return false;
// register.
// If UseSrcSubIdx is Set, SubIdx also applies to SrcReg, and only uses of
// SrcReg:SubIdx should be replaced.
- bool UseSrcSubIdx = TM->getRegisterInfo()->
- getSubClassWithSubReg(MRI->getRegClass(SrcReg), SubIdx) != nullptr;
+ bool UseSrcSubIdx =
+ TM->getSubtargetImpl()->getRegisterInfo()->getSubClassWithSubReg(
+ MRI->getRegClass(SrcReg), SubIdx) != nullptr;
// The source has other uses. See if we can replace the other uses with use of
// the result of the extension.
unsigned Src;
unsigned SrcSubReg;
bool ShouldRewrite = false;
- const TargetRegisterInfo &TRI = *TM->getRegisterInfo();
+ const TargetRegisterInfo &TRI = *TM->getSubtargetImpl()->getRegisterInfo();
// Follow the chain of copies until we reach the top of the use-def chain
// or find a more suitable source.
return false;
TM = &MF.getTarget();
- TII = TM->getInstrInfo();
+ TII = TM->getSubtargetImpl()->getInstrInfo();
MRI = &MF.getRegInfo();
DT = Aggressive ? &getAnalysis<MachineDominatorTree>() : nullptr;
: ScheduleDAGInstrs(MF, MLI, MDT, /*IsPostRA=*/true), AA(AA), EndIndex(0) {
const TargetMachine &TM = MF.getTarget();
- const InstrItineraryData *InstrItins = TM.getInstrItineraryData();
+ const InstrItineraryData *InstrItins =
+ TM.getSubtargetImpl()->getInstrItineraryData();
HazardRec =
- TM.getInstrInfo()->CreateTargetPostRAHazardRecognizer(InstrItins, this);
+ TM.getSubtargetImpl()->getInstrInfo()->CreateTargetPostRAHazardRecognizer(
+ InstrItins, this);
assert((AntiDepMode == TargetSubtargetInfo::ANTIDEP_NONE ||
MRI.tracksLiveness()) &&
if (skipOptnoneFunction(*Fn.getFunction()))
return false;
- TII = Fn.getTarget().getInstrInfo();
+ TII = Fn.getTarget().getSubtargetImpl()->getInstrInfo();
MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
AliasAnalysis *AA = &getAnalysis<AliasAnalysis>();
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
bool Changed = false;
- TII = MF.getTarget().getInstrInfo();
- TRI = MF.getTarget().getRegisterInfo();
+ TII = MF.getTarget().getSubtargetImpl()->getInstrInfo();
+ TRI = MF.getTarget().getSubtargetImpl()->getRegisterInfo();
MRI = &MF.getRegInfo();
assert(MRI->isSSA() && "ProcessImplicitDefs only works on SSA form.");
assert(WorkList.empty() && "Inconsistent worklist state");
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <climits>
using namespace llvm;
///
bool PEI::runOnMachineFunction(MachineFunction &Fn) {
const Function* F = Fn.getFunction();
- const TargetRegisterInfo *TRI = Fn.getTarget().getRegisterInfo();
- const TargetFrameLowering *TFI = Fn.getTarget().getFrameLowering();
+ const TargetRegisterInfo *TRI =
+ Fn.getTarget().getSubtargetImpl()->getRegisterInfo();
+ const TargetFrameLowering *TFI =
+ Fn.getTarget().getSubtargetImpl()->getFrameLowering();
assert(!Fn.getRegInfo().getNumVirtRegs() && "Regalloc must assign all vregs");
/// variables for the function's frame information and eliminate call frame
/// pseudo instructions.
void PEI::calculateCallsInformation(MachineFunction &Fn) {
- const TargetInstrInfo &TII = *Fn.getTarget().getInstrInfo();
- const TargetFrameLowering *TFI = Fn.getTarget().getFrameLowering();
+ const TargetInstrInfo &TII =
+ *Fn.getTarget().getSubtargetImpl()->getInstrInfo();
+ const TargetFrameLowering *TFI =
+ Fn.getTarget().getSubtargetImpl()->getFrameLowering();
MachineFrameInfo *MFI = Fn.getFrameInfo();
unsigned MaxCallFrameSize = 0;
/// calculateCalleeSavedRegisters - Scan the function for modified callee saved
/// registers.
void PEI::calculateCalleeSavedRegisters(MachineFunction &F) {
- const TargetRegisterInfo *RegInfo = F.getTarget().getRegisterInfo();
- const TargetFrameLowering *TFI = F.getTarget().getFrameLowering();
+ const TargetRegisterInfo *RegInfo =
+ F.getTarget().getSubtargetImpl()->getRegisterInfo();
+ const TargetFrameLowering *TFI =
+ F.getTarget().getSubtargetImpl()->getFrameLowering();
MachineFrameInfo *MFI = F.getFrameInfo();
// Get the callee saved register list...
if (CSI.empty())
return;
- const TargetInstrInfo &TII = *Fn.getTarget().getInstrInfo();
- const TargetFrameLowering *TFI = Fn.getTarget().getFrameLowering();
- const TargetRegisterInfo *TRI = Fn.getTarget().getRegisterInfo();
+ const TargetInstrInfo &TII =
+ *Fn.getTarget().getSubtargetImpl()->getInstrInfo();
+ const TargetFrameLowering *TFI =
+ Fn.getTarget().getSubtargetImpl()->getFrameLowering();
+ const TargetRegisterInfo *TRI =
+ Fn.getTarget().getSubtargetImpl()->getRegisterInfo();
MachineBasicBlock::iterator I;
// Spill using target interface.
/// abstract stack objects.
///
void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
- const TargetFrameLowering &TFI = *Fn.getTarget().getFrameLowering();
+ const TargetFrameLowering &TFI =
+ *Fn.getTarget().getSubtargetImpl()->getFrameLowering();
StackProtector *SP = &getAnalysis<StackProtector>();
bool StackGrowsDown =
// Make sure the special register scavenging spill slot is closest to the
// incoming stack pointer if a frame pointer is required and is closer
// to the incoming rather than the final stack pointer.
- const TargetRegisterInfo *RegInfo = Fn.getTarget().getRegisterInfo();
+ const TargetRegisterInfo *RegInfo =
+ Fn.getTarget().getSubtargetImpl()->getRegisterInfo();
bool EarlyScavengingSlots = (TFI.hasFP(Fn) &&
TFI.isFPCloseToIncomingSP() &&
RegInfo->useFPForScavengingIndex(Fn) &&
/// prolog and epilog code to the function.
///
void PEI::insertPrologEpilogCode(MachineFunction &Fn) {
- const TargetFrameLowering &TFI = *Fn.getTarget().getFrameLowering();
+ const TargetFrameLowering &TFI =
+ *Fn.getTarget().getSubtargetImpl()->getFrameLowering();
// Add prologue to the function...
TFI.emitPrologue(Fn);
void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn,
int &SPAdj) {
const TargetMachine &TM = Fn.getTarget();
- assert(TM.getRegisterInfo() && "TM::getRegisterInfo() must be implemented!");
- const TargetInstrInfo &TII = *Fn.getTarget().getInstrInfo();
- const TargetRegisterInfo &TRI = *TM.getRegisterInfo();
- const TargetFrameLowering *TFI = TM.getFrameLowering();
+ assert(TM.getSubtargetImpl()->getRegisterInfo() &&
+ "TM::getRegisterInfo() must be implemented!");
+ const TargetInstrInfo &TII =
+ *Fn.getTarget().getSubtargetImpl()->getInstrInfo();
+ const TargetRegisterInfo &TRI = *TM.getSubtargetImpl()->getRegisterInfo();
+ const TargetFrameLowering *TFI = TM.getSubtargetImpl()->getFrameLowering();
bool StackGrowsDown =
TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
int FrameSetupOpcode = TII.getCallFrameSetupOpcode();
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
using namespace llvm;
MF = &Fn;
MRI = &MF->getRegInfo();
TM = &Fn.getTarget();
- TRI = TM->getRegisterInfo();
- TII = TM->getInstrInfo();
+ TRI = TM->getSubtargetImpl()->getRegisterInfo();
+ TII = TM->getSubtargetImpl()->getInstrInfo();
MRI->freezeReservedRegs(Fn);
RegClassInfo.runOnMachineFunction(Fn);
UsedInInstr.clear();
MF = &mf;
const TargetMachine &TM = MF->getTarget();
- TRI = TM.getRegisterInfo();
- TII = TM.getInstrInfo();
+ TRI = TM.getSubtargetImpl()->getRegisterInfo();
+ TII = TM.getSubtargetImpl()->getInstrInfo();
RCI.runOnMachineFunction(mf);
EnableLocalReassign = EnableLocalReassignment ||
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <limits>
#include <memory>
#include <set>
LiveIntervals *LIS = const_cast<LiveIntervals*>(lis);
MachineRegisterInfo *mri = &mf->getRegInfo();
- const TargetRegisterInfo *tri = mf->getTarget().getRegisterInfo();
+ const TargetRegisterInfo *tri =
+ mf->getTarget().getSubtargetImpl()->getRegisterInfo();
std::unique_ptr<PBQPRAProblem> p(new PBQPRAProblem());
PBQPRAGraph &g = p->getGraph();
PBQPRAGraph &g = p->getGraph();
const TargetMachine &tm = mf->getTarget();
- CoalescerPair cp(*tm.getRegisterInfo());
+ CoalescerPair cp(*tm.getSubtargetImpl()->getRegisterInfo());
// Scan the machine function and add a coalescing cost whenever CoalescerPair
// gives the Ok.
mf = &MF;
tm = &mf->getTarget();
- tri = tm->getRegisterInfo();
- tii = tm->getInstrInfo();
+ tri = tm->getSubtargetImpl()->getRegisterInfo();
+ tii = tm->getSubtargetImpl()->getInstrInfo();
mri = &mf->getRegInfo();
lis = &getAnalysis<LiveIntervals>();
MF = &mf;
// Allocate new array the first time we see a new target.
- if (MF->getTarget().getRegisterInfo() != TRI) {
- TRI = MF->getTarget().getRegisterInfo();
+ if (MF->getTarget().getSubtargetImpl()->getRegisterInfo() != TRI) {
+ TRI = MF->getTarget().getSubtargetImpl()->getRegisterInfo();
RegClass.reset(new RCInfo[TRI->getNumRegClasses()]);
unsigned NumPSets = TRI->getNumRegPressureSets();
PSetLimits.reset(new unsigned[NumPSets]);
MF = &fn;
MRI = &fn.getRegInfo();
TM = &fn.getTarget();
- TRI = TM->getRegisterInfo();
- TII = TM->getInstrInfo();
+ TRI = TM->getSubtargetImpl()->getRegisterInfo();
+ TII = TM->getSubtargetImpl()->getInstrInfo();
LIS = &getAnalysis<LiveIntervals>();
AA = &getAnalysis<AliasAnalysis>();
Loops = &getAnalysis<MachineLoopInfo>();
reset();
MF = mf;
- TRI = MF->getTarget().getRegisterInfo();
+ TRI = MF->getTarget().getSubtargetImpl()->getRegisterInfo();
RCI = rci;
MRI = &MF->getRegInfo();
MBB = mbb;
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "reg-scavenging"
void RegScavenger::enterBasicBlock(MachineBasicBlock *mbb) {
MachineFunction &MF = *mbb->getParent();
const TargetMachine &TM = MF.getTarget();
- TII = TM.getInstrInfo();
- TRI = TM.getRegisterInfo();
+ TII = TM.getSubtargetImpl()->getInstrInfo();
+ TRI = TM.getSubtargetImpl()->getRegisterInfo();
MRI = &MF.getRegInfo();
assert((NumPhysRegs == 0 || NumPhysRegs == TRI->getNumRegs()) &&
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <climits>
using namespace llvm;
void SchedulingPriorityQueue::anchor() { }
ScheduleDAG::ScheduleDAG(MachineFunction &mf)
- : TM(mf.getTarget()),
- TII(TM.getInstrInfo()),
- TRI(TM.getRegisterInfo()),
- MF(mf), MRI(mf.getRegInfo()),
- EntrySU(), ExitSU() {
+ : TM(mf.getTarget()), TII(TM.getSubtargetImpl()->getInstrInfo()),
+ TRI(TM.getSubtargetImpl()->getRegisterInfo()), MF(mf),
+ MRI(mf.getRegInfo()), EntrySU(), ExitSU() {
#ifndef NDEBUG
StressSched = StressSchedOpt;
#endif
// FADD -> FMA combines:
if ((DAG.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast ||
DAG.getTarget().Options.UnsafeFPMath) &&
- DAG.getTarget().getTargetLowering()->isFMAFasterThanFMulAndFAdd(VT) &&
+ DAG.getTarget()
+ .getSubtargetImpl()
+ ->getTargetLowering()
+ ->isFMAFasterThanFMulAndFAdd(VT) &&
(!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT))) {
// fold (fadd (fmul x, y), z) -> (fma x, y, z)
// FSUB -> FMA combines:
if ((DAG.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast ||
DAG.getTarget().Options.UnsafeFPMath) &&
- DAG.getTarget().getTargetLowering()->isFMAFasterThanFMulAndFAdd(VT) &&
+ DAG.getTarget()
+ .getSubtargetImpl()
+ ->getTargetLowering()
+ ->isFMAFasterThanFMulAndFAdd(VT) &&
(!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT))) {
// fold (fsub (fmul x, y), z) -> (fma x, y, (fneg z))
// At this point, we know that we perform a cross-register-bank copy.
// Check if it is expensive.
- const TargetRegisterInfo *TRI = TLI.getTargetMachine().getRegisterInfo();
+ const TargetRegisterInfo *TRI =
+ TLI.getTargetMachine().getSubtargetImpl()->getRegisterInfo();
// Assume bitcasts are cheap, unless both register classes do not
// explicitly share a common sub class.
if (!TRI || TRI->getCommonSubClass(ArgRC, ResRC))
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "isel"
FastISel::FastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo)
- : FuncInfo(funcInfo),
- MF(funcInfo.MF),
- MRI(FuncInfo.MF->getRegInfo()),
- MFI(*FuncInfo.MF->getFrameInfo()),
- MCP(*FuncInfo.MF->getConstantPool()),
- TM(FuncInfo.MF->getTarget()),
- DL(*TM.getDataLayout()),
- TII(*TM.getInstrInfo()),
- TLI(*TM.getTargetLowering()),
- TRI(*TM.getRegisterInfo()),
- LibInfo(libInfo) {
-}
+ : FuncInfo(funcInfo), MF(funcInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
+ MFI(*FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
+ TM(FuncInfo.MF->getTarget()), DL(*TM.getSubtargetImpl()->getDataLayout()),
+ TII(*TM.getSubtargetImpl()->getInstrInfo()),
+ TLI(*TM.getSubtargetImpl()->getTargetLowering()),
+ TRI(*TM.getSubtargetImpl()->getRegisterInfo()), LibInfo(libInfo) {}
FastISel::~FastISel() {}
if (Alignment == 0) // Ensure that codegen never sees alignment 0.
Alignment = DL.getABITypeAlignment(ValTy);
- unsigned Size = TM.getDataLayout()->getTypeStoreSize(ValTy);
+ unsigned Size =
+ TM.getSubtargetImpl()->getDataLayout()->getTypeStoreSize(ValTy);
if (IsVolatile)
Flags |= MachineMemOperand::MOVolatile;
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
using namespace llvm;
void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
SelectionDAG *DAG) {
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
Fn = &fn;
MF = &mf;
(unsigned)TLI->getDataLayout()->getPrefTypeAlignment(
AI->getAllocatedType()),
AI->getAlignment());
- unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
+ unsigned StackAlign =
+ TM.getSubtargetImpl()->getFrameLowering()->getStackAlignment();
if (Align <= StackAlign)
Align = 0;
// Inform the Frame Information that we have variable-sized objects.
for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
EVT VT = ValueVTs[vti];
unsigned NumRegisters = TLI->getNumRegisters(Fn->getContext(), VT);
- const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
+ const TargetInstrInfo *TII =
+ MF->getTarget().getSubtargetImpl()->getInstrInfo();
for (unsigned i = 0; i != NumRegisters; ++i)
BuildMI(MBB, DL, TII->get(TargetOpcode::PHI), PHIReg + i);
PHIReg += NumRegisters;
/// CreateReg - Allocate a single virtual register for the given type.
unsigned FunctionLoweringInfo::CreateReg(MVT VT) {
- return RegInfo->
- createVirtualRegister(TM.getTargetLowering()->getRegClassFor(VT));
+ return RegInfo->createVirtualRegister(
+ TM.getSubtargetImpl()->getTargetLowering()->getRegClassFor(VT));
}
/// CreateRegs - Allocate the appropriate number of virtual registers of
/// will assign registers for each member or element.
///
unsigned FunctionLoweringInfo::CreateRegs(Type *Ty) {
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(*TLI, Ty, ValueVTs);
if (!Ty->isIntegerTy() || Ty->isVectorTy())
return;
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
SmallVector<EVT, 1> ValueVTs;
ComputeValueVTs(*TLI, Ty, ValueVTs);
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "instr-emitter"
Type *Type = CP->getType();
// MachineConstantPool wants an explicit alignment.
if (Align == 0) {
- Align = TM->getDataLayout()->getPrefTypeAlignment(Type);
+ Align =
+ TM->getSubtargetImpl()->getDataLayout()->getPrefTypeAlignment(Type);
if (Align == 0) {
// Alignment of vector types. FIXME!
- Align = TM->getDataLayout()->getTypeAllocSize(Type);
+ Align = TM->getSubtargetImpl()->getDataLayout()->getTypeAllocSize(Type);
}
}
/// at the given position in the given block.
InstrEmitter::InstrEmitter(MachineBasicBlock *mbb,
MachineBasicBlock::iterator insertpos)
- : MF(mbb->getParent()),
- MRI(&MF->getRegInfo()),
- TM(&MF->getTarget()),
- TII(TM->getInstrInfo()),
- TRI(TM->getRegisterInfo()),
- TLI(TM->getTargetLowering()),
- MBB(mbb), InsertPos(insertpos) {
-}
+ : MF(mbb->getParent()), MRI(&MF->getRegInfo()), TM(&MF->getTarget()),
+ TII(TM->getSubtargetImpl()->getInstrInfo()),
+ TRI(TM->getSubtargetImpl()->getRegisterInfo()),
+ TLI(TM->getSubtargetImpl()->getTargetLowering()), MBB(mbb),
+ InsertPos(insertpos) {}
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "legalizedag"
SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
Chain = SP.getValue(1);
unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
- unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
+ unsigned StackAlign =
+ TM.getSubtargetImpl()->getFrameLowering()->getStackAlignment();
Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
if (Align > StackAlign)
Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1,
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
cl::desc("Track reg pressure and switch priority to in-depth"));
ResourcePriorityQueue::ResourcePriorityQueue(SelectionDAGISel *IS)
- : Picker(this),
- InstrItins(
- IS->getTargetLowering()->getTargetMachine().getInstrItineraryData()) {
+ : Picker(this), InstrItins(IS->getTargetLowering()
+ ->getTargetMachine()
+ .getSubtargetImpl()
+ ->getInstrItineraryData()) {
const TargetMachine &TM = (*IS->MF).getTarget();
- TRI = TM.getRegisterInfo();
+ TRI = TM.getSubtargetImpl()->getRegisterInfo();
TLI = IS->getTargetLowering();
- TII = TM.getInstrInfo();
+ TII = TM.getSubtargetImpl()->getInstrInfo();
ResourcesModel = TII->CreateTargetScheduleState(&TM, nullptr);
// This hard requirement could be relaxed, but for now
// do not let it procede.
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <climits>
using namespace llvm;
if (DisableSchedCycles || !NeedLatency)
HazardRec = new ScheduleHazardRecognizer();
else
- HazardRec = tm.getInstrInfo()->CreateTargetHazardRecognizer(
- tm.getSubtargetImpl(), this);
+ HazardRec =
+ tm.getSubtargetImpl()->getInstrInfo()->CreateTargetHazardRecognizer(
+ tm.getSubtargetImpl(), this);
}
~ScheduleDAGRRList() {
llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
CodeGenOpt::Level OptLevel) {
const TargetMachine &TM = IS->TM;
- const TargetInstrInfo *TII = TM.getInstrInfo();
- const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+ const TargetInstrInfo *TII = TM.getSubtargetImpl()->getInstrInfo();
+ const TargetRegisterInfo *TRI = TM.getSubtargetImpl()->getRegisterInfo();
BURegReductionPriorityQueue *PQ =
new BURegReductionPriorityQueue(*IS->MF, false, false, TII, TRI, nullptr);
llvm::createSourceListDAGScheduler(SelectionDAGISel *IS,
CodeGenOpt::Level OptLevel) {
const TargetMachine &TM = IS->TM;
- const TargetInstrInfo *TII = TM.getInstrInfo();
- const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+ const TargetInstrInfo *TII = TM.getSubtargetImpl()->getInstrInfo();
+ const TargetRegisterInfo *TRI = TM.getSubtargetImpl()->getRegisterInfo();
SrcRegReductionPriorityQueue *PQ =
new SrcRegReductionPriorityQueue(*IS->MF, false, true, TII, TRI, nullptr);
llvm::createHybridListDAGScheduler(SelectionDAGISel *IS,
CodeGenOpt::Level OptLevel) {
const TargetMachine &TM = IS->TM;
- const TargetInstrInfo *TII = TM.getInstrInfo();
- const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+ const TargetInstrInfo *TII = TM.getSubtargetImpl()->getInstrInfo();
+ const TargetRegisterInfo *TRI = TM.getSubtargetImpl()->getRegisterInfo();
const TargetLowering *TLI = IS->getTargetLowering();
HybridBURRPriorityQueue *PQ =
llvm::createILPListDAGScheduler(SelectionDAGISel *IS,
CodeGenOpt::Level OptLevel) {
const TargetMachine &TM = IS->TM;
- const TargetInstrInfo *TII = TM.getInstrInfo();
- const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+ const TargetInstrInfo *TII = TM.getSubtargetImpl()->getInstrInfo();
+ const TargetRegisterInfo *TRI = TM.getSubtargetImpl()->getRegisterInfo();
const TargetLowering *TLI = IS->getTargetLowering();
ILPBURRPriorityQueue *PQ =
"instructions take for targets with no itinerary"));
ScheduleDAGSDNodes::ScheduleDAGSDNodes(MachineFunction &mf)
- : ScheduleDAG(mf), BB(nullptr), DAG(nullptr),
- InstrItins(mf.getTarget().getInstrItineraryData()) {}
+ : ScheduleDAG(mf), BB(nullptr), DAG(nullptr),
+ InstrItins(mf.getTarget().getSubtargetImpl()->getInstrItineraryData()) {}
/// Run - perform scheduling.
///
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <climits>
using namespace llvm;
: ScheduleDAGSDNodes(mf), AvailableQueue(availqueue), AA(aa) {
const TargetMachine &tm = mf.getTarget();
- HazardRec = tm.getInstrInfo()->CreateTargetHazardRecognizer(
- tm.getSubtargetImpl(), this);
+ HazardRec =
+ tm.getSubtargetImpl()->getInstrInfo()->CreateTargetHazardRecognizer(
+ tm.getSubtargetImpl(), this);
}
~ScheduleDAGVLIW() {
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSelectionDAGInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
#include <cmath>
PointerType::get(Type::getInt8Ty(*getContext()), 0) :
VT.getTypeForEVT(*getContext());
- return TM.getTargetLowering()->getDataLayout()->getABITypeAlignment(Ty);
+ return TM.getSubtargetImpl()
+ ->getTargetLowering()
+ ->getDataLayout()
+ ->getABITypeAlignment(Ty);
}
// EntryNode could meaningfully have debug info if we can find it...
SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
- : TM(tm), TSI(*tm.getSelectionDAGInfo()), TLI(nullptr), OptLevel(OL),
- EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
- Root(getEntryNode()), NewNodesMustHaveLegalTypes(false),
- UpdateListeners(nullptr) {
+ : TM(tm), TSI(*tm.getSubtargetImpl()->getSelectionDAGInfo()), TLI(nullptr),
+ OptLevel(OL),
+ EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
+ Root(getEntryNode()), NewNodesMustHaveLegalTypes(false),
+ UpdateListeners(nullptr) {
AllNodes.push_back(&EntryNode);
DbgInfo = new SDDbgInfo();
}
EVT EltVT = VT.getScalarType();
const ConstantInt *Elt = &Val;
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
// In some cases the vector type is legal but the element type is illegal and
// needs to be promoted, for example v8i8 on ARM. In this case, promote the
}
SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, bool isTarget) {
- return getConstant(Val, TM.getTargetLowering()->getPointerTy(), isTarget);
+ return getConstant(Val,
+ TM.getSubtargetImpl()->getTargetLowering()->getPointerTy(),
+ isTarget);
}
unsigned char TargetFlags) {
assert((TargetFlags == 0 || isTargetGA) &&
"Cannot set target flags on target-independent globals");
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
// Truncate (with sign-extension) the offset value to the pointer size.
unsigned BitWidth = TLI->getPointerTypeSizeInBits(GV->getType());
assert((TargetFlags == 0 || isTarget) &&
"Cannot set target flags on target-independent globals");
if (Alignment == 0)
- Alignment =
- TM.getTargetLowering()->getDataLayout()->getPrefTypeAlignment(C->getType());
+ Alignment = TM.getSubtargetImpl()
+ ->getTargetLowering()
+ ->getDataLayout()
+ ->getPrefTypeAlignment(C->getType());
unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opc, getVTList(VT), None);
assert((TargetFlags == 0 || isTarget) &&
"Cannot set target flags on target-independent globals");
if (Alignment == 0)
- Alignment =
- TM.getTargetLowering()->getDataLayout()->getPrefTypeAlignment(C->getType());
+ Alignment = TM.getSubtargetImpl()
+ ->getTargetLowering()
+ ->getDataLayout()
+ ->getPrefTypeAlignment(C->getType());
unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opc, getVTList(VT), None);
/// the target's desired shift amount type.
SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
EVT OpTy = Op.getValueType();
- EVT ShTy = TM.getTargetLowering()->getShiftAmountTy(LHSTy);
+ EVT ShTy =
+ TM.getSubtargetImpl()->getTargetLowering()->getShiftAmountTy(LHSTy);
if (OpTy == ShTy || OpTy.isVector()) return Op;
ISD::NodeType Opcode = OpTy.bitsGT(ShTy) ? ISD::TRUNCATE : ISD::ZERO_EXTEND;
MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
unsigned ByteSize = VT.getStoreSize();
Type *Ty = VT.getTypeForEVT(*getContext());
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
unsigned StackAlign =
std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty), minAlign);
VT2.getStoreSizeInBits())/8;
Type *Ty1 = VT1.getTypeForEVT(*getContext());
Type *Ty2 = VT2.getTypeForEVT(*getContext());
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
const DataLayout *TD = TLI->getDataLayout();
unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1),
TD->getPrefTypeAlignment(Ty2));
case ISD::SETFALSE2: return getConstant(0, VT);
case ISD::SETTRUE:
case ISD::SETTRUE2: {
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
TargetLowering::BooleanContent Cnt =
TLI->getBooleanContents(N1->getValueType(0));
return getConstant(
// Ensure that the constant occurs on the RHS.
ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
MVT CompVT = N1.getValueType().getSimpleVT();
- if (!TM.getTargetLowering()->isCondCodeLegal(SwappedCond, CompVT))
+ if (!TM.getSubtargetImpl()->getTargetLowering()->isCondCodeLegal(
+ SwappedCond, CompVT))
return SDValue();
return getSetCC(dl, VT, N2, N1, SwappedCond);
/// them in the KnownZero/KnownOne bitsets.
void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
APInt &KnownOne, unsigned Depth) const {
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
/// information. For example, immediately after an "SRA X, 2", we know that
/// the top 3 bits are all equal to each other, so we return 3.
unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
EVT VT = Op.getValueType();
assert(VT.isInteger() && "Invalid VT!");
unsigned VTBits = VT.getScalarType().getSizeInBits();
// Don't promote to an alignment that would require dynamic stack
// realignment.
- const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
+ const TargetRegisterInfo *TRI =
+ MF.getTarget().getSubtargetImpl()->getRegisterInfo();
if (!TRI->needsStackRealignment(MF))
while (NewAlign > Align &&
TLI.getDataLayout()->exceedsNaturalStackAlignment(NewAlign))
// beyond the given memory regions. But fixing this isn't easy, and most
// people don't care.
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
// Emit a library call.
TargetLowering::ArgListTy Args;
// FIXME: If the memmove is volatile, lowering it to plain libc memmove may
// not be safe. See memcpy above for more details.
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
// Emit a library call.
TargetLowering::ArgListTy Args;
return Result;
// Emit a library call.
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(*getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
EVT OperandVT = Operand.getValueType();
if (OperandVT.isVector()) {
// A vector operand; extract a single element.
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
EVT OperandEltVT = OperandVT.getVectorElementType();
Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl,
OperandEltVT,
const GlobalValue *GV2 = nullptr;
int64_t Offset1 = 0;
int64_t Offset2 = 0;
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
bool isGA1 = TLI->isGAPlusOffset(Loc.getNode(), GV1, Offset1);
bool isGA2 = TLI->isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
if (isGA1 && isGA2 && GV1 == GV2)
// If this is a GlobalAddress + cst, return the alignment.
const GlobalValue *GV;
int64_t GVOffset = 0;
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
unsigned PtrWidth = TLI->getPointerTypeSizeInBits(GV->getType());
APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetSelectionDAGInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
using namespace llvm;
AA = &aa;
GFI = gfi;
LibInfo = li;
- DL = DAG.getTarget().getDataLayout();
+ DL = DAG.getTarget().getSubtargetImpl()->getDataLayout();
Context = DAG.getContext();
LPadToCallSiteMap.clear();
}
DenseMap<const Value *, unsigned>::iterator It = FuncInfo.ValueMap.find(V);
if (It != FuncInfo.ValueMap.end()) {
unsigned InReg = It->second;
- RegsForValue RFV(*DAG.getContext(), *TM.getTargetLowering(),
- InReg, V->getType());
+ RegsForValue RFV(*DAG.getContext(),
+ *TM.getSubtargetImpl()->getTargetLowering(), InReg,
+ V->getType());
SDValue Chain = DAG.getEntryNode();
N = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
resolveDanglingDebugInfo(V, N);
/// getValueImpl - Helper function for getValue and getNonRegisterValue.
/// Create an SDValue for the given value.
SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
if (const Constant *C = dyn_cast<Constant>(V)) {
EVT VT = TLI->getValueType(V->getType(), true);
}
void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
SDValue Chain = getControlRoot();
SmallVector<ISD::OutputArg, 8> Outs;
SmallVector<SDValue, 8> OutVals;
bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
CallingConv::ID CallConv =
DAG.getMachineFunction().getFunction()->getCallingConv();
- Chain = TM.getTargetLowering()->LowerReturn(Chain, CallConv, isVarArg,
- Outs, OutVals, getCurSDLoc(),
- DAG);
+ Chain = TM.getSubtargetImpl()->getTargetLowering()->LowerReturn(
+ Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
// Verify that the target's LowerReturn behaved as expected.
assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
// jle foo
//
if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
- if (!TM.getTargetLowering()->isJumpExpensive() &&
- BOp->hasOneUse() &&
- (BOp->getOpcode() == Instruction::And ||
- BOp->getOpcode() == Instruction::Or)) {
+ if (!TM.getSubtargetImpl()->getTargetLowering()->isJumpExpensive() &&
+ BOp->hasOneUse() && (BOp->getOpcode() == Instruction::And ||
+ BOp->getOpcode() == Instruction::Or)) {
FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB,
BOp->getOpcode(), getEdgeWeight(BrMBB, Succ0MBB),
getEdgeWeight(BrMBB, Succ1MBB));
void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) {
// Emit the code for the jump table
assert(JT.Reg != -1U && "Should lower JT Header first!");
- EVT PTy = TM.getTargetLowering()->getPointerTy();
+ EVT PTy = TM.getSubtargetImpl()->getTargetLowering()->getPointerTy();
SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
JT.Reg, PTy);
SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
// can be used as an index into the jump table in a subsequent basic block.
// This value may be smaller or larger than the target's pointer type, and
// therefore require extension or truncating.
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
SwitchOp = DAG.getZExtOrTrunc(Sub, getCurSDLoc(), TLI->getPointerTy());
unsigned JumpTableReg = FuncInfo.CreateReg(TLI->getPointerTy());
MachineBasicBlock *ParentBB) {
// First create the loads to the guard/stack slot for the comparison.
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
EVT PtrTy = TLI->getPointerTy();
MachineFrameInfo *MFI = ParentBB->getParent()->getFrameInfo();
/// StackProtectorDescriptor.
void
SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
SDValue Chain = TLI->makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL,
MVT::isVoid, nullptr, 0, false,
getCurSDLoc(), false, false).second;
DAG.getConstant(B.First, VT));
// Check range
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
SDValue RangeCmp = DAG.getSetCC(getCurSDLoc(),
TLI->getSetCCResultType(*DAG.getContext(),
Sub.getValueType()),
Reg, VT);
SDValue Cmp;
unsigned PopCount = CountPopulation_64(B.Mask);
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
if (PopCount == 1) {
// Testing for a single bit; just compare the shift count with what it
// would need to be to shift a 1 bit in that position.
// If there aren't registers to copy the values into (e.g., during SjLj
// exceptions), then don't bother to create these DAG nodes.
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
if (TLI->getExceptionPointerRegister() == 0 &&
TLI->getExceptionSelectorRegister() == 0)
return;
for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I)
TSize += I->size();
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
if (!areJTsAllowed(*TLI) || TSize.ult(TLI->getMinimumJumpTableEntries()))
return false;
RSize -= J->size();
}
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
if (areJTsAllowed(*TLI)) {
// If our case is dense we *really* should handle it earlier!
assert((FMetric > 0) && "Should handle dense range earlier!");
const Value* SV,
MachineBasicBlock* Default,
MachineBasicBlock* SwitchBB) {
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
EVT PTy = TLI->getPointerTy();
unsigned IntPtrBits = PTy.getSizeInBits();
SDValue Op1 = getValue(I.getOperand(0));
SDValue Op2 = getValue(I.getOperand(1));
- EVT ShiftTy = TM.getTargetLowering()->getShiftAmountTy(Op2.getValueType());
+ EVT ShiftTy = TM.getSubtargetImpl()->getTargetLowering()->getShiftAmountTy(
+ Op2.getValueType());
// Coerce the shift amount to the right type if we can.
if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
if (isa<BinaryOperator>(&I) && cast<BinaryOperator>(&I)->isExact() &&
!isa<ConstantSDNode>(Op1) &&
isa<ConstantSDNode>(Op2) && !cast<ConstantSDNode>(Op2)->isNullValue())
- setValue(&I, TM.getTargetLowering()->BuildExactSDIV(Op1, Op2,
- getCurSDLoc(), DAG));
+ setValue(&I, TM.getSubtargetImpl()->getTargetLowering()->BuildExactSDIV(
+ Op1, Op2, getCurSDLoc(), DAG));
else
setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(),
Op1, Op2));
SDValue Op2 = getValue(I.getOperand(1));
ISD::CondCode Opcode = getICmpCondCode(predicate);
- EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
+ EVT DestVT =
+ TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
}
ISD::CondCode Condition = getFCmpCondCode(predicate);
if (TM.Options.NoNaNsFPMath)
Condition = getFCmpCodeWithoutNaN(Condition);
- EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
+ EVT DestVT =
+ TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
}
void SelectionDAGBuilder::visitSelect(const User &I) {
SmallVector<EVT, 4> ValueVTs;
- ComputeValueVTs(*TM.getTargetLowering(), I.getType(), ValueVTs);
+ ComputeValueVTs(*TM.getSubtargetImpl()->getTargetLowering(), I.getType(),
+ ValueVTs);
unsigned NumValues = ValueVTs.size();
if (NumValues == 0) return;
void SelectionDAGBuilder::visitTrunc(const User &I) {
// TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
+ EVT DestVT =
+ TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
}
// ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
// ZExt also can't be a cast to bool for same reason. So, nothing much to do
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
+ EVT DestVT =
+ TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N));
}
// SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
// SExt also can't be a cast to bool for same reason. So, nothing much to do
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
+ EVT DestVT =
+ TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitFPTrunc(const User &I) {
// FPTrunc is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
EVT DestVT = TLI->getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::FP_ROUND, getCurSDLoc(),
DestVT, N,
void SelectionDAGBuilder::visitFPExt(const User &I) {
// FPExt is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
+ EVT DestVT =
+ TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitFPToUI(const User &I) {
// FPToUI is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
+ EVT DestVT =
+ TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitFPToSI(const User &I) {
// FPToSI is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
+ EVT DestVT =
+ TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitUIToFP(const User &I) {
// UIToFP is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
+ EVT DestVT =
+ TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitSIToFP(const User &I) {
// SIToFP is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
+ EVT DestVT =
+ TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
}
// What to do depends on the size of the integer and the size of the pointer.
// We can either truncate, zero extend, or no-op, accordingly.
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
+ EVT DestVT =
+ TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
}
// What to do depends on the size of the integer and the size of the pointer.
// We can either truncate, zero extend, or no-op, accordingly.
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
+ EVT DestVT =
+ TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
}
void SelectionDAGBuilder::visitBitCast(const User &I) {
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
+ EVT DestVT =
+ TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
// BitCast assures us that source and destination are the same size so this is
// either a BITCAST or a no-op.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
const Value *SV = I.getOperand(0);
SDValue N = getValue(SV);
- EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
+ EVT DestVT =
+ TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
unsigned SrcAS = SV->getType()->getPointerAddressSpace();
unsigned DestAS = I.getType()->getPointerAddressSpace();
SDValue InVal = getValue(I.getOperand(1));
SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)),
getCurSDLoc(), TLI.getVectorIdxTy());
- setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
- TM.getTargetLowering()->getValueType(I.getType()),
- InVec, InVal, InIdx));
+ setValue(&I,
+ DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
+ TM.getSubtargetImpl()->getTargetLowering()->getValueType(
+ I.getType()),
+ InVec, InVal, InIdx));
}
void SelectionDAGBuilder::visitExtractElement(const User &I) {
SDValue InVec = getValue(I.getOperand(0));
SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)),
getCurSDLoc(), TLI.getVectorIdxTy());
- setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
- TM.getTargetLowering()->getValueType(I.getType()),
- InVec, InIdx));
+ setValue(&I,
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
+ TM.getSubtargetImpl()->getTargetLowering()->getValueType(
+ I.getType()),
+ InVec, InIdx));
}
// Utility for visitShuffleVector - Return true if every element in Mask,
ShuffleVectorInst::getShuffleMask(cast<Constant>(I.getOperand(2)), Mask);
unsigned MaskNumElts = Mask.size();
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
EVT VT = TLI->getValueType(I.getType());
EVT SrcVT = Src1.getValueType();
unsigned SrcNumElts = SrcVT.getVectorNumElements();
unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
SmallVector<EVT, 4> AggValueVTs;
ComputeValueVTs(*TLI, AggTy, AggValueVTs);
SmallVector<EVT, 4> ValValueVTs;
unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
SmallVector<EVT, 4> ValValueVTs;
ComputeValueVTs(*TLI, ValTy, ValValueVTs);
Ty = cast<SequentialType>(Ty)->getElementType();
// If this is a constant subscript, handle it quickly.
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
if (CI->isZero()) continue;
uint64_t Offs =
return; // getValue will auto-populate this.
Type *Ty = I.getAllocatedType();
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
uint64_t TySize = TLI->getDataLayout()->getTypeAllocSize(Ty);
unsigned Align =
std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty),
// Handle alignment. If the requested alignment is less than or equal to
// the stack alignment, ignore it. If the size is greater than or equal to
// the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
- unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
+ unsigned StackAlign =
+ TM.getSubtargetImpl()->getFrameLowering()->getStackAlignment();
if (Align <= StackAlign)
Align = 0;
SmallVector<EVT, 4> ValueVTs;
SmallVector<uint64_t, 4> Offsets;
- ComputeValueVTs(*TM.getTargetLowering(), Ty, ValueVTs, &Offsets);
+ ComputeValueVTs(*TM.getSubtargetImpl()->getTargetLowering(), Ty, ValueVTs,
+ &Offsets);
unsigned NumValues = ValueVTs.size();
if (NumValues == 0)
return;
Root = DAG.getRoot();
}
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
if (isVolatile)
Root = TLI->prepareVolatileOrAtomicLoad(Root, getCurSDLoc(), DAG);
SmallVector<EVT, 4> ValueVTs;
SmallVector<uint64_t, 4> Offsets;
- ComputeValueVTs(*TM.getTargetLowering(), SrcV->getType(), ValueVTs, &Offsets);
+ ComputeValueVTs(*TM.getSubtargetImpl()->getTargetLowering(), SrcV->getType(),
+ ValueVTs, &Offsets);
unsigned NumValues = ValueVTs.size();
if (NumValues == 0)
return;
SDValue InChain = getRoot();
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
if (TLI->getInsertFencesForAtomic())
InChain = InsertFenceForAtomic(InChain, SuccessOrder, Scope, true, dl,
DAG, *TLI);
SDValue InChain = getRoot();
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
if (TLI->getInsertFencesForAtomic())
InChain = InsertFenceForAtomic(InChain, Order, Scope, true, dl,
DAG, *TLI);
void SelectionDAGBuilder::visitFence(const FenceInst &I) {
SDLoc dl = getCurSDLoc();
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
SDValue Ops[3];
Ops[0] = getRoot();
Ops[1] = DAG.getConstant(I.getOrdering(), TLI->getPointerTy());
SDValue InChain = getRoot();
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
EVT VT = TLI->getValueType(I.getType());
if (I.getAlignment() < VT.getSizeInBits() / 8)
SDValue InChain = getRoot();
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
EVT VT = TLI->getValueType(I.getValueOperand()->getType());
if (I.getAlignment() < VT.getSizeInBits() / 8)
// Info is set by getTgtMemInstrinsic
TargetLowering::IntrinsicInfo Info;
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
bool IsTgtIntrinsic = TLI->getTgtMemIntrinsic(Info, I, Intrinsic);
// Add the intrinsic ID as an integer operand if it's not a target intrinsic.
return false;
MachineFunction &MF = DAG.getMachineFunction();
- const TargetInstrInfo *TII = DAG.getTarget().getInstrInfo();
+ const TargetInstrInfo *TII =
+ DAG.getTarget().getSubtargetImpl()->getInstrInfo();
// Ignore inlined function arguments here.
DIVariable DV(Variable);
/// otherwise lower it and return null.
const char *
SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
SDLoc sdl = getCurSDLoc();
DebugLoc dl = getCurDebugLoc();
SDValue Res;
case Intrinsic::read_register: {
Value *Reg = I.getArgOperand(0);
SDValue RegName = DAG.getMDNode(cast<MDNode>(Reg));
- EVT VT = TM.getTargetLowering()->getValueType(I.getType());
+ EVT VT =
+ TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::READ_REGISTER, sdl, VT, RegName));
return nullptr;
}
void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
bool isTailCall,
MachineBasicBlock *LandingPad) {
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
FunctionType *FTy = cast<FunctionType>(PT->getElementType());
Type *RetTy = FTy->getReturnType();
void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
SDValue Value,
bool IsSigned) {
- EVT VT = TM.getTargetLowering()->getValueType(I.getType(), true);
+ EVT VT = TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType(),
+ true);
if (IsSigned)
Value = DAG.getSExtOrTrunc(Value, getCurSDLoc(), VT);
else
const Value *Size = I.getArgOperand(2);
const ConstantInt *CSize = dyn_cast<ConstantInt>(Size);
if (CSize && CSize->getZExtValue() == 0) {
- EVT CallVT = TM.getTargetLowering()->getValueType(I.getType(), true);
+ EVT CallVT = TM.getSubtargetImpl()->getTargetLowering()->getValueType(
+ I.getType(), true);
setValue(&I, DAG.getConstant(0, CallVT));
return true;
}
// Require that we can find a legal MVT, and only do this if the target
// supports unaligned loads of that type. Expanding into byte loads would
// bloat the code.
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
if (ActuallyDoIt && CSize->getZExtValue() > 4) {
unsigned DstAS = LHS->getType()->getPointerAddressSpace();
unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
if (!RenameFn)
Callee = getValue(I.getCalledValue());
else
- Callee = DAG.getExternalSymbol(RenameFn,
- TM.getTargetLowering()->getPointerTy());
+ Callee = DAG.getExternalSymbol(
+ RenameFn, TM.getSubtargetImpl()->getTargetLowering()->getPointerTy());
// Check if we can potentially perform a tail call. More detailed checking is
// be done within LowerCallTo, after more information about the call is known.
/// ConstraintOperands - Information about all of the constraints.
SDISelAsmOperandInfoVector ConstraintOperands;
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
TargetLowering::AsmOperandInfoVector
TargetConstraints = TLI->ParseConstraints(CS);
}
void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
const DataLayout &DL = *TLI->getDataLayout();
SDValue V = DAG.getVAArg(TLI->getValueType(I.getType()), getCurSDLoc(),
getRoot(), getValue(I.getOperand(0)),
.setCallee(CI.getCallingConv(), retTy, Callee, std::move(Args), NumArgs)
.setDiscardResult(!CI.use_empty());
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
return TLI->LowerCallTo(CLI);
}
"Copy from a reg to the same reg!");
assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
RegsForValue RFV(V->getContext(), *TLI, Reg, V->getType());
SDValue Chain = DAG.getEntryNode();
RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V);
// Remember that this register needs to added to the machine PHI node as
// the input for this MBB.
SmallVector<EVT, 4> ValueVTs;
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
ComputeValueVTs(*TLI, PN->getType(), ValueVTs);
for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
EVT VT = ValueVTs[vti];
#include "llvm/Target/TargetIntrinsicInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
std::string SDNode::getOperationName(const SelectionDAG *G) const {
return "<<Unknown DAG Node>>";
if (isMachineOpcode()) {
if (G)
- if (const TargetInstrInfo *TII = G->getTarget().getInstrInfo())
+ if (const TargetInstrInfo *TII =
+ G->getTarget().getSubtargetImpl()->getInstrInfo())
if (getMachineOpcode() < TII->getNumOpcodes())
return TII->getName(getMachineOpcode());
return "<<Unknown Machine Node #" + utostr(getOpcode()) + ">>";
OS << LBB->getName() << " ";
OS << (const void*)BBDN->getBasicBlock() << ">";
} else if (const RegisterSDNode *R = dyn_cast<RegisterSDNode>(this)) {
- OS << ' ' << PrintReg(R->getReg(), G ? G->getTarget().getRegisterInfo() :nullptr);
+ OS << ' '
+ << PrintReg(R->getReg(),
+ G ? G->getTarget().getSubtargetImpl()->getRegisterInfo()
+ : nullptr);
} else if (const ExternalSymbolSDNode *ES =
dyn_cast<ExternalSymbolSDNode>(this)) {
OS << "'" << ES->getSymbol() << "'";
"-fast-isel-abort requires -fast-isel");
const Function &Fn = *mf.getFunction();
- const TargetInstrInfo &TII = *TM.getInstrInfo();
- const TargetRegisterInfo &TRI = *TM.getRegisterInfo();
- const TargetLowering *TLI = TM.getTargetLowering();
+ const TargetInstrInfo &TII = *TM.getSubtargetImpl()->getInstrInfo();
+ const TargetRegisterInfo &TRI = *TM.getSubtargetImpl()->getRegisterInfo();
+ const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
MF = &mf;
RegInfo = &MF->getRegInfo();
break;
for (const auto &MI : MBB) {
- const MCInstrDesc &MCID = TM.getInstrInfo()->get(MI.getOpcode());
+ const MCInstrDesc &MCID =
+ TM.getSubtargetImpl()->getInstrInfo()->get(MI.getOpcode());
if ((MCID.isCall() && !MCID.isReturn()) ||
MI.isStackAligningInlineAsm()) {
MFI->setHasCalls(true);
// Assign the call site to the landing pad's begin label.
MF->getMMI().setCallSiteLandingPad(Label, SDB->LPadToCallSiteMap[MBB]);
- const MCInstrDesc &II = TM.getInstrInfo()->get(TargetOpcode::EH_LABEL);
+ const MCInstrDesc &II =
+ TM.getSubtargetImpl()->getInstrInfo()->get(TargetOpcode::EH_LABEL);
BuildMI(*MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(), II)
.addSym(Label);
if (EmitNodeInfo & OPFL_MemRefs) {
// Only attach load or store memory operands if the generated
// instruction may load or store.
- const MCInstrDesc &MCID = TM.getInstrInfo()->get(TargetOpc);
+ const MCInstrDesc &MCID =
+ TM.getSubtargetImpl()->getInstrInfo()->get(TargetOpc);
bool mayLoad = MCID.mayLoad();
bool mayStore = MCID.mayStore();
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <cctype>
using namespace llvm;
std::make_pair(0u, static_cast<const TargetRegisterClass*>(nullptr));
// Figure out which register class contains this reg.
- const TargetRegisterInfo *RI = getTargetMachine().getRegisterInfo();
+ const TargetRegisterInfo *RI =
+ getTargetMachine().getSubtargetImpl()->getRegisterInfo();
for (TargetRegisterInfo::regclass_iterator RCI = RI->regclass_begin(),
E = RI->regclass_end(); RCI != E; ++RCI) {
const TargetRegisterClass *RC = *RCI;
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
// Create an alloca for the incoming jump buffer ptr and the new jump buffer
// that needs to be restored on all exits from the function. This is an alloca
// because the value needs to be added to the global context list.
- const TargetLowering *TLI = TM->getTargetLowering();
+ const TargetLowering *TLI = TM->getSubtargetImpl()->getTargetLowering();
unsigned Align =
TLI->getDataLayout()->getPrefTypeAlignment(FunctionContextTy);
FuncCtx = new AllocaInst(FunctionContextTy, nullptr, Align, "fn_context",
lis = &pass.getAnalysis<LiveIntervals>();
mfi = mf.getFrameInfo();
mri = &mf.getRegInfo();
- tii = mf.getTarget().getInstrInfo();
- tri = mf.getTarget().getRegisterInfo();
+ tii = mf.getTarget().getSubtargetImpl()->getInstrInfo();
+ tri = mf.getTarget().getSubtargetImpl()->getRegisterInfo();
}
/// Add spill ranges for every use/def of the live interval, inserting loads
// Split Analysis
//===----------------------------------------------------------------------===//
-SplitAnalysis::SplitAnalysis(const VirtRegMap &vrm,
- const LiveIntervals &lis,
+SplitAnalysis::SplitAnalysis(const VirtRegMap &vrm, const LiveIntervals &lis,
const MachineLoopInfo &mli)
- : MF(vrm.getMachineFunction()),
- VRM(vrm),
- LIS(lis),
- Loops(mli),
- TII(*MF.getTarget().getInstrInfo()),
- CurLI(nullptr),
- LastSplitPoint(MF.getNumBlockIDs()) {}
+ : MF(vrm.getMachineFunction()), VRM(vrm), LIS(lis), Loops(mli),
+ TII(*MF.getTarget().getSubtargetImpl()->getInstrInfo()), CurLI(nullptr),
+ LastSplitPoint(MF.getNumBlockIDs()) {}
void SplitAnalysis::clear() {
UseSlots.clear();
//===----------------------------------------------------------------------===//
/// Create a new SplitEditor for editing the LiveInterval analyzed by SA.
-SplitEditor::SplitEditor(SplitAnalysis &sa,
- LiveIntervals &lis,
- VirtRegMap &vrm,
+SplitEditor::SplitEditor(SplitAnalysis &sa, LiveIntervals &lis, VirtRegMap &vrm,
MachineDominatorTree &mdt,
MachineBlockFrequencyInfo &mbfi)
- : SA(sa), LIS(lis), VRM(vrm),
- MRI(vrm.getMachineFunction().getRegInfo()),
- MDT(mdt),
- TII(*vrm.getMachineFunction().getTarget().getInstrInfo()),
- TRI(*vrm.getMachineFunction().getTarget().getRegisterInfo()),
- MBFI(mbfi),
- Edit(nullptr),
- OpenIdx(0),
- SpillMode(SM_Partition),
- RegAssign(Allocator)
-{}
+ : SA(sa), LIS(lis), VRM(vrm), MRI(vrm.getMachineFunction().getRegInfo()),
+ MDT(mdt), TII(*vrm.getMachineFunction()
+ .getTarget()
+ .getSubtargetImpl()
+ ->getInstrInfo()),
+ TRI(*vrm.getMachineFunction()
+ .getTarget()
+ .getSubtargetImpl()
+ ->getRegisterInfo()),
+ MBFI(mbfi), Edit(nullptr), OpenIdx(0), SpillMode(SM_Partition),
+ RegAssign(Allocator) {}
void SplitEditor::reset(LiveRangeEdit &LRE, ComplementSpillMode SM) {
Edit = &LRE;
#include "llvm/CodeGen/StackMapLivenessAnalysis.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
DEBUG(dbgs() << "********** COMPUTING STACKMAP LIVENESS: "
<< _MF.getName() << " **********\n");
MF = &_MF;
- TRI = MF->getTarget().getRegisterInfo();
+ TRI = MF->getTarget().getSubtargetImpl()->getRegisterInfo();
++NumStackMapFuncVisited;
// Skip this function if there are no patchpoints to process.
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOpcodes.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <iterator>
using namespace llvm;
switch (MOI->getImm()) {
default: llvm_unreachable("Unrecognized operand type.");
case StackMaps::DirectMemRefOp: {
- unsigned Size = AP.TM.getDataLayout()->getPointerSizeInBits();
+ unsigned Size =
+ AP.TM.getSubtargetImpl()->getDataLayout()->getPointerSizeInBits();
assert((Size % 8) == 0 && "Need pointer size in bytes.");
Size /= 8;
unsigned Reg = (++MOI)->getReg();
assert(TargetRegisterInfo::isPhysicalRegister(MOI->getReg()) &&
"Virtreg operands should have been rewritten before now.");
const TargetRegisterClass *RC =
- AP.TM.getRegisterInfo()->getMinimalPhysRegClass(MOI->getReg());
+ AP.TM.getSubtargetImpl()->getRegisterInfo()->getMinimalPhysRegClass(
+ MOI->getReg());
assert(!MOI->getSubReg() && "Physical subreg still around.");
Locs.push_back(
Location(Location::Register, RC->getSize(), MOI->getReg(), 0));
StackMaps::LiveOutVec
StackMaps::parseRegisterLiveOutMask(const uint32_t *Mask) const {
assert(Mask && "No register mask specified");
- const TargetRegisterInfo *TRI = AP.TM.getRegisterInfo();
+ const TargetRegisterInfo *TRI = AP.TM.getSubtargetImpl()->getRegisterInfo();
LiveOutVec LiveOuts;
// Create a LiveOutReg for each bit that is set in the register mask.
// Record the stack size of the current function.
const MachineFrameInfo *MFI = AP.MF->getFrameInfo();
- const TargetRegisterInfo *RegInfo = AP.MF->getTarget().getRegisterInfo();
+ const TargetRegisterInfo *RegInfo =
+ AP.MF->getTarget().getSubtargetImpl()->getRegisterInfo();
const bool DynamicFrameSize = MFI->hasVarSizedObjects() ||
RegInfo->needsStackRealignment(*(AP.MF));
FnStackSize[AP.CurrentFnSym] =
MCContext &OutContext = AP.OutStreamer.getContext();
MCStreamer &OS = AP.OutStreamer;
- const TargetRegisterInfo *TRI = AP.TM.getRegisterInfo();
+ const TargetRegisterInfo *TRI = AP.TM.getSubtargetImpl()->getRegisterInfo();
// Create the section.
const MCSection *StackMapSection =
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <cstdlib>
using namespace llvm;
DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
DT = DTWP ? &DTWP->getDomTree() : nullptr;
- TLI = TM->getTargetLowering();
+ TLI = TM->getSubtargetImpl()->getTargetLowering();
Attribute Attr = Fn.getAttributes().getAttribute(
AttributeSet::FunctionIndex, "stack-protector-buffer-size");
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <vector>
using namespace llvm;
});
MFI = MF.getFrameInfo();
- TII = MF.getTarget().getInstrInfo();
+ TII = MF.getTarget().getSubtargetImpl()->getInstrInfo();
LS = &getAnalysis<LiveStacks>();
MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "tailduplication"
if (skipOptnoneFunction(*MF.getFunction()))
return false;
- TII = MF.getTarget().getInstrInfo();
- TRI = MF.getTarget().getRegisterInfo();
+ TII = MF.getTarget().getSubtargetImpl()->getInstrInfo();
+ TRI = MF.getTarget().getSubtargetImpl()->getRegisterInfo();
MRI = &MF.getRegInfo();
MMI = getAnalysisIfAvailable<MachineModuleInfo>();
MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <cstdlib>
using namespace llvm;
int TargetFrameLowering::getFrameIndexReference(const MachineFunction &MF,
int FI, unsigned &FrameReg) const {
- const TargetRegisterInfo *RI = MF.getTarget().getRegisterInfo();
+ const TargetRegisterInfo *RI =
+ MF.getTarget().getSubtargetImpl()->getRegisterInfo();
// By default, assume all frame indices are referenced via whatever
// getFrameRegister() says. The target can override this if it's doing
Offset = 0;
return true;
}
- unsigned BitSize = TM->getRegisterInfo()->getSubRegIdxSize(SubIdx);
+ unsigned BitSize =
+ TM->getSubtargetImpl()->getRegisterInfo()->getSubRegIdxSize(SubIdx);
// Convert bit size to byte size to be consistent with
// MCRegisterClass::getSize().
if (BitSize % 8)
return false;
- int BitOffset = TM->getRegisterInfo()->getSubRegIdxOffset(SubIdx);
+ int BitOffset =
+ TM->getSubtargetImpl()->getRegisterInfo()->getSubRegIdxOffset(SubIdx);
if (BitOffset < 0 || BitOffset % 8)
return false;
assert(RC->getSize() >= (Offset + Size) && "bad subregister range");
- if (!TM->getDataLayout()->isLittleEndian()) {
+ if (!TM->getSubtargetImpl()->getDataLayout()->isLittleEndian()) {
Offset = RC->getSize() - (Offset + Size);
}
return true;
const MachineOperand &MO = MI->getOperand(1-Ops[0]);
MachineBasicBlock::iterator Pos = MI;
- const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
+ const TargetRegisterInfo *TRI =
+ MF.getTarget().getSubtargetImpl()->getRegisterInfo();
if (Flags == MachineMemOperand::MOStore)
storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
// saves compile time, because it doesn't require every single
// stack slot reference to depend on the instruction that does the
// modification.
- const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
- const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
+ const TargetLowering &TLI =
+ *MF.getTarget().getSubtargetImpl()->getTargetLowering();
+ const TargetRegisterInfo *TRI =
+ MF.getTarget().getSubtargetImpl()->getRegisterInfo();
if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI))
return true;
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <cctype>
using namespace llvm;
/// NOTE: The constructor takes ownership of TLOF.
TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm,
const TargetLoweringObjectFile *tlof)
- : TM(tm), DL(TM.getDataLayout()), TLOF(*tlof) {
+ : TM(tm), DL(TM.getSubtargetImpl()->getDataLayout()), TLOF(*tlof) {
initActions();
// Perform these initializations only once.
// Add a new memory operand for this FI.
const MachineFrameInfo &MFI = *MF.getFrameInfo();
assert(MFI.getObjectOffset(FI) != -1);
- MachineMemOperand *MMO =
- MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
- MachineMemOperand::MOLoad,
- TM.getDataLayout()->getPointerSize(),
- MFI.getObjectAlignment(FI));
+ MachineMemOperand *MMO = MF.getMachineMemOperand(
+ MachinePointerInfo::getFixedStack(FI), MachineMemOperand::MOLoad,
+ TM.getSubtargetImpl()->getDataLayout()->getPointerSize(),
+ MFI.getObjectAlignment(FI));
MIB->addMemOperand(MF, MMO);
// Replace the instruction and update the operand index.
/// of the register class for the specified type and its associated "cost".
std::pair<const TargetRegisterClass*, uint8_t>
TargetLoweringBase::findRepresentativeClass(MVT VT) const {
- const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
+ const TargetRegisterInfo *TRI =
+ getTargetMachine().getSubtargetImpl()->getRegisterInfo();
const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
if (!RC)
return std::make_pair(RC, 0);
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
using namespace dwarf;
Flags,
SectionKind::getDataRel(),
0, Label->getName());
- unsigned Size = TM.getDataLayout()->getPointerSize();
+ unsigned Size = TM.getSubtargetImpl()->getDataLayout()->getPointerSize();
Streamer.SwitchSection(Sec);
- Streamer.EmitValueToAlignment(TM.getDataLayout()->getPointerABIAlignment());
+ Streamer.EmitValueToAlignment(
+ TM.getSubtargetImpl()->getDataLayout()->getPointerABIAlignment());
Streamer.EmitSymbolAttribute(Label, MCSA_ELF_TypeObject);
const MCExpr *E = MCConstantExpr::Create(Size, getContext());
Streamer.EmitELFSize(Label, E);
// FIXME: this is getting the alignment of the character, not the
// alignment of the global!
unsigned Align =
- TM.getDataLayout()->getPreferredAlignment(cast<GlobalVariable>(GV));
+ TM.getSubtargetImpl()->getDataLayout()->getPreferredAlignment(
+ cast<GlobalVariable>(GV));
const char *SizeSpec = ".rodata.str1.";
if (Kind.isMergeable2ByteCString())
// FIXME: Alignment check should be handled by section classifier.
if (Kind.isMergeable1ByteCString() &&
- TM.getDataLayout()->getPreferredAlignment(cast<GlobalVariable>(GV)) < 32)
+ TM.getSubtargetImpl()->getDataLayout()->getPreferredAlignment(
+ cast<GlobalVariable>(GV)) < 32)
return CStringSection;
// Do not put 16-bit arrays in the UString section if they have an
// externally visible label, this runs into issues with certain linker
// versions.
if (Kind.isMergeable2ByteCString() && !GV->hasExternalLinkage() &&
- TM.getDataLayout()->getPreferredAlignment(cast<GlobalVariable>(GV)) < 32)
+ TM.getSubtargetImpl()->getDataLayout()->getPreferredAlignment(
+ cast<GlobalVariable>(GV)) < 32)
return UStringSection;
if (Kind.isMergeableConst()) {
// for predicated defs.
unsigned Reg = DefMI->getOperand(DefOperIdx).getReg();
const MachineFunction &MF = *DefMI->getParent()->getParent();
- const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
+ const TargetRegisterInfo *TRI =
+ MF.getTarget().getSubtargetImpl()->getRegisterInfo();
if (!DepMI->readsRegister(Reg, TRI) && TII->isPredicated(DepMI))
return computeInstrLatency(DefMI);
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "twoaddrinstr"
MF = &Func;
const TargetMachine &TM = MF->getTarget();
MRI = &MF->getRegInfo();
- TII = TM.getInstrInfo();
- TRI = TM.getRegisterInfo();
- InstrItins = TM.getInstrItineraryData();
+ TII = TM.getSubtargetImpl()->getInstrInfo();
+ TRI = TM.getSubtargetImpl()->getRegisterInfo();
+ InstrItins = TM.getSubtargetImpl()->getInstrItineraryData();
LV = getAnalysisIfAvailable<LiveVariables>();
LIS = getAnalysisIfAvailable<LiveIntervals>();
AA = &getAnalysis<AliasAnalysis>();
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
using namespace llvm;
bool VirtRegMap::runOnMachineFunction(MachineFunction &mf) {
MRI = &mf.getRegInfo();
- TII = mf.getTarget().getInstrInfo();
- TRI = mf.getTarget().getRegisterInfo();
+ TII = mf.getTarget().getSubtargetImpl()->getInstrInfo();
+ TRI = mf.getTarget().getSubtargetImpl()->getRegisterInfo();
MF = &mf;
Virt2PhysMap.clear();
bool VirtRegRewriter::runOnMachineFunction(MachineFunction &fn) {
MF = &fn;
TM = &MF->getTarget();
- TRI = TM->getRegisterInfo();
- TII = TM->getInstrInfo();
+ TRI = TM->getSubtargetImpl()->getRegisterInfo();
+ TII = TM->getSubtargetImpl()->getInstrInfo();
MRI = &MF->getRegInfo();
Indexes = &getAnalysis<SlotIndexes>();
LIS = &getAnalysis<LiveIntervals>();
#include "llvm/Support/MutexGuard.h"
#include "llvm/Target/TargetJITInfo.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
sys::DynamicLibrary::LoadLibraryPermanently(nullptr, nullptr);
// If the target supports JIT code generation, create the JIT.
- if (TargetJITInfo *TJ = TM->getJITInfo()) {
+ if (TargetJITInfo *TJ = TM->getSubtargetImpl()->getJITInfo()) {
return new JIT(M, *TM, *TJ, JMM, GVsWithCode);
} else {
if (ErrorStr)
: ExecutionEngine(M), TM(tm), TJI(tji),
JMM(jmm ? jmm : JITMemoryManager::CreateDefaultMemManager()),
AllocateGVsWithCode(GVsWithCode), isAlreadyCodeGenerating(false) {
- setDataLayout(TM.getDataLayout());
+ setDataLayout(TM.getSubtargetImpl()->getDataLayout());
jitstate = new JITState(M);
// Add target data
MutexGuard locked(lock);
FunctionPassManager &PM = jitstate->getPM();
- M->setDataLayout(TM.getDataLayout());
+ M->setDataLayout(TM.getSubtargetImpl()->getDataLayout());
PM.add(new DataLayoutPass(M));
// Turn the machine code intermediate representation into bytes in memory that
jitstate = new JITState(M);
FunctionPassManager &PM = jitstate->getPM();
- M->setDataLayout(TM.getDataLayout());
+ M->setDataLayout(TM.getSubtargetImpl()->getDataLayout());
PM.add(new DataLayoutPass(M));
// Turn the machine code intermediate representation into bytes in memory
jitstate = new JITState(Modules[0]);
FunctionPassManager &PM = jitstate->getPM();
- M->setDataLayout(TM.getDataLayout());
+ M->setDataLayout(TM.getSubtargetImpl()->getDataLayout());
PM.add(new DataLayoutPass(M));
// Turn the machine code intermediate representation into bytes in memory
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/MutexGuard.h"
#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
ObjCache(nullptr) {
OwnedModules.addModule(m);
- setDataLayout(TM->getDataLayout());
+ setDataLayout(TM->getSubtargetImpl()->getDataLayout());
}
MCJIT::~MCJIT() {
PassManager PM;
- M->setDataLayout(TM->getDataLayout());
+ M->setDataLayout(TM->getSubtargetImpl()->getDataLayout());
PM.add(new DataLayoutPass(M));
// The RuntimeDyld will take ownership of this shortly
}
uint64_t MCJIT::getExistingSymbolAddress(const std::string &Name) {
- Mangler Mang(TM->getDataLayout());
+ Mangler Mang(TM->getSubtargetImpl()->getDataLayout());
SmallString<128> FullName;
Mang.getNameWithPrefix(FullName, Name);
return Dyld.getSymbolLoadAddress(FullName);
//
// This is the accessor for the target address, so make sure to check the
// load address of the symbol, not the local address.
- Mangler Mang(TM->getDataLayout());
+ Mangler Mang(TM->getSubtargetImpl()->getDataLayout());
SmallString<128> Name;
TM->getNameWithPrefix(Name, F, Mang);
return (void*)Dyld.getSymbolLoadAddress(Name);
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/IPO/PassManagerBuilder.h"
#include "llvm/Transforms/ObjCARC.h"
passes.add(createDebugInfoVerifierPass());
// mark which symbols can not be internalized
- Mangler Mangler(TargetMach->getDataLayout());
+ Mangler Mangler(TargetMach->getSubtargetImpl()->getDataLayout());
std::vector<const char*> MustPreserveList;
SmallPtrSet<GlobalValue*, 8> AsmUsed;
std::vector<StringRef> Libcalls;
TargetLibraryInfo TLI(Triple(TargetMach->getTargetTriple()));
- accumulateAndSortLibcalls(Libcalls, TLI, TargetMach->getTargetLowering());
+ accumulateAndSortLibcalls(
+ Libcalls, TLI, TargetMach->getSubtargetImpl()->getTargetLowering());
for (Module::iterator f = mergedModule->begin(),
e = mergedModule->end(); f != e; ++f)
passes.add(createDebugInfoVerifierPass());
// Add an appropriate DataLayout instance for this module...
- mergedModule->setDataLayout(TargetMach->getDataLayout());
+ mergedModule->setDataLayout(TargetMach->getSubtargetImpl()->getDataLayout());
passes.add(new DataLayoutPass(mergedModule));
// Add appropriate TargetLibraryInfo for this module.
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Transforms/Utils/GlobalStatus.h"
#include <system_error>
using namespace llvm;
TargetMachine *target = march->createTargetMachine(TripleStr, CPU, FeatureStr,
options);
M->materializeAllPermanently(true);
- M->setDataLayout(target->getDataLayout());
+ M->setDataLayout(target->getSubtargetImpl()->getDataLayout());
std::unique_ptr<object::IRObjectFile> IRObj(
new object::IRObjectFile(std::move(Buffer), std::move(M)));
MDString *MDOption = cast<MDString>(MDOptions->getOperand(ii));
StringRef Op = _linkeropt_strings.
GetOrCreateValue(MDOption->getString()).getKey();
- StringRef DepLibName = _target->getTargetLowering()->
- getObjFileLowering().getDepLibFromLinkerOpt(Op);
+ StringRef DepLibName = _target->getSubtargetImpl()
+ ->getTargetLowering()
+ ->getObjFileLowering()
+ .getDepLibFromLinkerOpt(Op);
if (!DepLibName.empty())
_deplibs.push_back(DepLibName.data());
else if (!Op.empty())
#include "AArch64.h"
#include "AArch64InstrInfo.h"
#include "AArch64RegisterInfo.h"
+#include "AArch64Subtarget.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
const TargetMachine &TM = mf.getTarget();
MRI = &mf.getRegInfo();
- TII = static_cast<const AArch64InstrInfo *>(TM.getInstrInfo());
+ TII = static_cast<const AArch64InstrInfo *>(
+ TM.getSubtargetImpl()->getInstrInfo());
// Just check things on a one-block-at-a-time basis.
for (MachineFunction::iterator I = mf.begin(), E = mf.end(); I != E; ++I)
MachineModuleInfoELF::SymbolListTy Stubs = MMIELF.GetGVStubList();
if (!Stubs.empty()) {
OutStreamer.SwitchSection(TLOFELF.getDataRelSection());
- const DataLayout *TD = TM.getDataLayout();
+ const DataLayout *TD = TM.getSubtargetImpl()->getDataLayout();
for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
OutStreamer.EmitLabel(Stubs[i].first);
const TargetRegisterClass *RC,
bool isVector, raw_ostream &O) {
assert(MO.isReg() && "Should only get here with a register!");
- const AArch64RegisterInfo *RI =
- static_cast<const AArch64RegisterInfo *>(TM.getRegisterInfo());
+ const AArch64RegisterInfo *RI = static_cast<const AArch64RegisterInfo *>(
+ TM.getSubtargetImpl()->getRegisterInfo());
unsigned Reg = MO.getReg();
unsigned RegToPrint = RC->getRegister(RI->getEncodingValue(Reg));
assert(RI->regsOverlap(RegToPrint, Reg));
#include "AArch64.h"
#include "AArch64InstrInfo.h"
#include "AArch64MachineFunctionInfo.h"
+#include "AArch64Subtarget.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
DEBUG(dbgs() << "***** AArch64BranchRelaxation *****\n");
- TII = (const AArch64InstrInfo *)MF->getTarget().getInstrInfo();
+ TII = (const AArch64InstrInfo *)MF->getTarget()
+ .getSubtargetImpl()
+ ->getInstrInfo();
// Renumber all of the machine basic blocks in the function, guaranteeing that
// the numbers agree with the position of the block in the function.
CCIf<!strconcat("ArgFlags.getOrigAlign() == ", Align), A>;
/// CCIfBigEndian - Match only if we're in big endian mode.
class CCIfBigEndian<CCAction A> :
- CCIf<"State.getTarget().getDataLayout()->isBigEndian()", A>;
+ CCIf<"State.getTarget().getSubtargetImpl()->getDataLayout()->isBigEndian()", A>;
//===----------------------------------------------------------------------===//
// ARM AAPCS64 Calling Convention
MachineFunction *MF = I->getParent()->getParent();
const AArch64TargetMachine *TM =
static_cast<const AArch64TargetMachine *>(&MF->getTarget());
- const AArch64InstrInfo *TII = TM->getInstrInfo();
+ const AArch64InstrInfo *TII = TM->getSubtargetImpl()->getInstrInfo();
// Insert a Copy from TLSBaseAddrReg to x0, which is where the rest of the
// code sequence assumes the address will be.
MachineFunction *MF = I->getParent()->getParent();
const AArch64TargetMachine *TM =
static_cast<const AArch64TargetMachine *>(&MF->getTarget());
- const AArch64InstrInfo *TII = TM->getInstrInfo();
+ const AArch64InstrInfo *TII = TM->getSubtargetImpl()->getInstrInfo();
// Create a virtual register for the TLS base address.
MachineRegisterInfo &RegInfo = MF->getRegInfo();
#include "AArch64.h"
#include "AArch64InstrInfo.h"
#include "AArch64MachineFunctionInfo.h"
+#include "AArch64Subtarget.h"
#include "MCTargetDesc/AArch64AddressingModes.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
const MapRegToId &RegToId,
const MachineInstr *DummyOp, bool ADRPMode) {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+ const TargetRegisterInfo *TRI = TM.getSubtargetImpl()->getRegisterInfo();
unsigned NbReg = RegToId.size();
bool AArch64CollectLOH::runOnMachineFunction(MachineFunction &MF) {
const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+ const TargetRegisterInfo *TRI = TM.getSubtargetImpl()->getRegisterInfo();
const MachineDominatorTree *MDT = &getAnalysis<MachineDominatorTree>();
MapRegToId RegToId;
MachineInstr *DummyOp = nullptr;
if (BasicBlockScopeOnly) {
- const AArch64InstrInfo *TII =
- static_cast<const AArch64InstrInfo *>(TM.getInstrInfo());
+ const AArch64InstrInfo *TII = static_cast<const AArch64InstrInfo *>(
+ TM.getSubtargetImpl()->getInstrInfo());
// For local analysis, create a dummy operation to record uses that are not
// local.
DummyOp = MF.CreateMachineInstr(TII->get(AArch64::COPY), DebugLoc());
/// runOnMachineFunction - Initialize per-function data structures.
void runOnMachineFunction(MachineFunction &MF) {
this->MF = &MF;
- TII = MF.getTarget().getInstrInfo();
- TRI = MF.getTarget().getRegisterInfo();
+ TII = MF.getTarget().getSubtargetImpl()->getInstrInfo();
+ TRI = MF.getTarget().getSubtargetImpl()->getRegisterInfo();
MRI = &MF.getRegInfo();
}
bool AArch64ConditionalCompares::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "********** AArch64 Conditional Compares **********\n"
<< "********** Function: " << MF.getName() << '\n');
- TII = MF.getTarget().getInstrInfo();
- TRI = MF.getTarget().getRegisterInfo();
+ TII = MF.getTarget().getSubtargetImpl()->getInstrInfo();
+ TRI = MF.getTarget().getSubtargetImpl()->getRegisterInfo();
SchedModel =
MF.getTarget().getSubtarget<TargetSubtargetInfo>().getSchedModel();
MRI = &MF.getRegInfo();
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
#define DEBUG_TYPE "aarch64-dead-defs"
// Scan the function for instructions that have a dead definition of a
// register. Replace that register with the zero register when possible.
bool AArch64DeadRegisterDefinitions::runOnMachineFunction(MachineFunction &MF) {
- TRI = MF.getTarget().getRegisterInfo();
+ TRI = MF.getTarget().getSubtargetImpl()->getRegisterInfo();
bool Changed = false;
DEBUG(dbgs() << "***** AArch64DeadRegisterDefinitions *****\n");
#include "MCTargetDesc/AArch64AddressingModes.h"
#include "AArch64InstrInfo.h"
+#include "AArch64Subtarget.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/Support/MathExtras.h"
}
bool AArch64ExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
- TII = static_cast<const AArch64InstrInfo *>(MF.getTarget().getInstrInfo());
+ TII = static_cast<const AArch64InstrInfo *>(
+ MF.getTarget().getSubtargetImpl()->getInstrInfo());
bool Modified = false;
for (auto &MBB : MF)
MFI->setFrameAddressIsTaken(true);
const AArch64RegisterInfo *RegInfo =
- static_cast<const AArch64RegisterInfo *>(TM.getRegisterInfo());
+ static_cast<const AArch64RegisterInfo *>(
+ TM.getSubtargetImpl()->getRegisterInfo());
unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF));
unsigned SrcReg = FramePtr;
const MachineFrameInfo *MFI = MF.getFrameInfo();
#ifndef NDEBUG
- const TargetRegisterInfo *RegInfo = MF.getTarget().getRegisterInfo();
+ const TargetRegisterInfo *RegInfo =
+ MF.getTarget().getSubtargetImpl()->getRegisterInfo();
assert(!RegInfo->needsStackRealignment(MF) &&
"No stack realignment on AArch64!");
#endif
void AArch64FrameLowering::eliminateCallFramePseudoInstr(
MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
- const AArch64InstrInfo *TII =
- static_cast<const AArch64InstrInfo *>(MF.getTarget().getInstrInfo());
+ const AArch64InstrInfo *TII = static_cast<const AArch64InstrInfo *>(
+ MF.getTarget().getSubtargetImpl()->getInstrInfo());
DebugLoc DL = I->getDebugLoc();
int Opc = I->getOpcode();
bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode();
uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0;
- const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+ const TargetFrameLowering *TFI =
+ MF.getTarget().getSubtargetImpl()->getFrameLowering();
if (!TFI->hasReservedCallFrame(MF)) {
unsigned Align = getStackAlignment();
MachineFrameInfo *MFI = MF.getFrameInfo();
MachineModuleInfo &MMI = MF.getMMI();
const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
- const TargetInstrInfo *TII = MF.getTarget().getInstrInfo();
+ const TargetInstrInfo *TII =
+ MF.getTarget().getSubtargetImpl()->getInstrInfo();
DebugLoc DL = MBB.findDebugLoc(MBBI);
// Add callee saved registers to move list.
if (CSI.empty())
return;
- const DataLayout *TD = MF.getTarget().getDataLayout();
+ const DataLayout *TD = MF.getTarget().getSubtargetImpl()->getDataLayout();
bool HasFP = hasFP(MF);
// Calculate amount of bytes used for return address storing.
const MachineFrameInfo *MFI = MF.getFrameInfo();
const Function *Fn = MF.getFunction();
const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>(
- MF.getTarget().getRegisterInfo());
- const TargetInstrInfo *TII = MF.getTarget().getInstrInfo();
+ MF.getTarget().getSubtargetImpl()->getRegisterInfo());
+ const TargetInstrInfo *TII =
+ MF.getTarget().getSubtargetImpl()->getInstrInfo();
MachineModuleInfo &MMI = MF.getMMI();
AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
bool needsFrameMoves = MMI.hasDebugInfo() || Fn->needsUnwindTableEntry();
TII->copyPhysReg(MBB, MBBI, DL, AArch64::X19, AArch64::SP, false);
if (needsFrameMoves) {
- const DataLayout *TD = MF.getTarget().getDataLayout();
+ const DataLayout *TD = MF.getTarget().getSubtargetImpl()->getDataLayout();
const int StackGrowth = -TD->getPointerSize(0);
unsigned FramePtr = RegInfo->getFrameRegister(MF);
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
assert(MBBI->isReturn() && "Can only insert epilog into returning blocks");
MachineFrameInfo *MFI = MF.getFrameInfo();
- const AArch64InstrInfo *TII =
- static_cast<const AArch64InstrInfo *>(MF.getTarget().getInstrInfo());
+ const AArch64InstrInfo *TII = static_cast<const AArch64InstrInfo *>(
+ MF.getTarget().getSubtargetImpl()->getInstrInfo());
const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>(
- MF.getTarget().getRegisterInfo());
+ MF.getTarget().getSubtargetImpl()->getRegisterInfo());
DebugLoc DL = MBBI->getDebugLoc();
unsigned RetOpcode = MBBI->getOpcode();
bool PreferFP) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>(
- MF.getTarget().getRegisterInfo());
+ MF.getTarget().getSubtargetImpl()->getRegisterInfo());
const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
int FPOffset = MFI->getObjectOffset(FI) + 16;
int Offset = MFI->getObjectOffset(FI) + MFI->getStackSize();
const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const {
MachineFunction &MF = *MBB.getParent();
- const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+ const TargetInstrInfo &TII =
+ *MF.getTarget().getSubtargetImpl()->getInstrInfo();
unsigned Count = CSI.size();
DebugLoc DL;
assert((Count & 1) == 0 && "Odd number of callee-saved regs to spill!");
const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const {
MachineFunction &MF = *MBB.getParent();
- const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+ const TargetInstrInfo &TII =
+ *MF.getTarget().getSubtargetImpl()->getInstrInfo();
unsigned Count = CSI.size();
DebugLoc DL;
assert((Count & 1) == 0 && "Odd number of callee-saved regs to spill!");
void AArch64FrameLowering::processFunctionBeforeCalleeSavedScan(
MachineFunction &MF, RegScavenger *RS) const {
const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>(
- MF.getTarget().getRegisterInfo());
+ MF.getTarget().getSubtargetImpl()->getRegisterInfo());
AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
MachineRegisterInfo *MRI = &MF.getRegInfo();
SmallVector<unsigned, 4> UnspilledCSGPRs;
// EndBB:
// Dest = PHI [IfTrue, TrueBB], [IfFalse, OrigBB]
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ const TargetInstrInfo *TII =
+ getTargetMachine().getSubtargetImpl()->getInstrInfo();
MachineFunction *MF = MBB->getParent();
const BasicBlock *LLVM_BB = MBB->getBasicBlock();
DebugLoc DL = MI->getDebugLoc();
// Add a register mask operand representing the call-preserved registers.
const uint32_t *Mask;
- const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
+ const TargetRegisterInfo *TRI =
+ getTargetMachine().getSubtargetImpl()->getRegisterInfo();
const AArch64RegisterInfo *ARI =
static_cast<const AArch64RegisterInfo *>(TRI);
if (IsThisReturn) {
// TLS calls preserve all registers except those that absolutely must be
// trashed: X0 (it takes an argument), LR (it's a call) and NZCV (let's not be
// silly).
- const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
+ const TargetRegisterInfo *TRI =
+ getTargetMachine().getSubtargetImpl()->getRegisterInfo();
const AArch64RegisterInfo *ARI =
static_cast<const AArch64RegisterInfo *>(TRI);
const uint32_t *Mask = ARI->getTLSCallPreservedMask();
// TLS calls preserve all registers except those that absolutely must be
// trashed: X0 (it takes an argument), LR (it's a call) and NZCV (let's not be
// silly).
- const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
+ const TargetRegisterInfo *TRI =
+ getTargetMachine().getSubtargetImpl()->getRegisterInfo();
const AArch64RegisterInfo *ARI =
static_cast<const AArch64RegisterInfo *>(TRI);
const uint32_t *Mask = ARI->getTLSCallPreservedMask();