From 420761a0f193e87d08ee1c51b26bba23ab4bac7f Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Fri, 20 Apr 2012 07:30:17 +0000 Subject: [PATCH] Convert more uses of XXXRegisterClass to &XXXRegClass. No functional change since they are equivalent. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155188 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/ARM/ARMBaseRegisterInfo.cpp | 6 +- lib/Target/ARM/ARMCodeEmitter.cpp | 6 +- lib/Target/ARM/ARMFastISel.cpp | 27 ++-- lib/Target/ARM/ARMFrameLowering.cpp | 16 +-- lib/Target/ARM/ARMISelLowering.cpp | 143 ++++++++++---------- lib/Target/ARM/ARMLoadStoreOptimizer.cpp | 2 +- lib/Target/ARM/Thumb1InstrInfo.cpp | 8 +- lib/Target/ARM/Thumb1RegisterInfo.cpp | 8 +- lib/Target/ARM/Thumb2InstrInfo.cpp | 12 +- lib/Target/CellSPU/SPUISelLowering.cpp | 38 +++--- lib/Target/CellSPU/SPUInstrInfo.cpp | 32 ++--- lib/Target/Hexagon/HexagonHardwareLoops.cpp | 2 +- lib/Target/Hexagon/HexagonISelDAGToDAG.cpp | 6 +- lib/Target/Hexagon/HexagonISelLowering.cpp | 14 +- lib/Target/Hexagon/HexagonInstrInfo.cpp | 29 ++-- lib/Target/MBlaze/MBlazeAsmPrinter.cpp | 2 +- lib/Target/MBlaze/MBlazeISelLowering.cpp | 34 ++--- lib/Target/MBlaze/MBlazeInstrInfo.cpp | 2 +- lib/Target/MSP430/MSP430ISelLowering.cpp | 27 ++-- lib/Target/Mips/MipsAsmPrinter.cpp | 10 +- lib/Target/Mips/MipsFrameLowering.cpp | 5 +- lib/Target/Mips/MipsISelDAGToDAG.cpp | 3 +- lib/Target/Mips/MipsISelLowering.cpp | 38 +++--- lib/Target/Mips/MipsInstrInfo.cpp | 20 +-- lib/Target/Mips/MipsMachineFunction.cpp | 6 +- lib/Target/Mips/MipsRegisterInfo.cpp | 17 ++- lib/Target/PTX/PTXISelLowering.cpp | 46 +++---- lib/Target/PTX/PTXMFInfoExtract.cpp | 12 +- lib/Target/XCore/XCoreFrameLowering.cpp | 2 +- lib/Target/XCore/XCoreISelLowering.cpp | 10 +- lib/Target/XCore/XCoreRegisterInfo.cpp | 5 +- 31 files changed, 292 insertions(+), 296 deletions(-) diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/lib/Target/ARM/ARMBaseRegisterInfo.cpp index 3907f753526..2a09b20b9c6 100644 --- a/lib/Target/ARM/ARMBaseRegisterInfo.cpp +++ b/lib/Target/ARM/ARMBaseRegisterInfo.cpp @@ -258,7 +258,7 @@ ARMBaseRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC) const TargetRegisterClass * ARMBaseRegisterInfo::getPointerRegClass(unsigned Kind) const { - return ARM::GPRRegisterClass; + return &ARM::GPRRegClass; } const TargetRegisterClass * @@ -369,7 +369,7 @@ ARMBaseRegisterInfo::getRawAllocationOrder(const TargetRegisterClass *RC, }; // We only support even/odd hints for GPR and rGPR. - if (RC != ARM::GPRRegisterClass && RC != ARM::rGPRRegisterClass) + if (RC != &ARM::GPRRegClass && RC != &ARM::rGPRRegClass) return RC->getRawAllocationOrder(MF); if (HintType == ARMRI::RegPairEven) { @@ -1110,7 +1110,7 @@ ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, // Must be addrmode4/6. MI.getOperand(i).ChangeToRegister(FrameReg, false, false, false); else { - ScratchReg = MF.getRegInfo().createVirtualRegister(ARM::GPRRegisterClass); + ScratchReg = MF.getRegInfo().createVirtualRegister(&ARM::GPRRegClass); if (!AFI->isThumbFunction()) emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset, Pred, PredReg, TII); diff --git a/lib/Target/ARM/ARMCodeEmitter.cpp b/lib/Target/ARM/ARMCodeEmitter.cpp index 32ef345c058..fc445a93392 100644 --- a/lib/Target/ARM/ARMCodeEmitter.cpp +++ b/lib/Target/ARM/ARMCodeEmitter.cpp @@ -1541,7 +1541,7 @@ void ARMCodeEmitter::emitMiscBranchInstruction(const MachineInstr &MI) { static unsigned encodeVFPRd(const MachineInstr &MI, unsigned OpIdx) { unsigned RegD = MI.getOperand(OpIdx).getReg(); unsigned Binary = 0; - bool isSPVFP = ARM::SPRRegisterClass->contains(RegD); + bool isSPVFP = ARM::SPRRegClass.contains(RegD); RegD = getARMRegisterNumbering(RegD); if (!isSPVFP) Binary |= RegD << ARMII::RegRdShift; @@ -1555,7 +1555,7 @@ static unsigned encodeVFPRd(const MachineInstr &MI, unsigned OpIdx) { static unsigned encodeVFPRn(const MachineInstr &MI, unsigned OpIdx) { unsigned RegN = MI.getOperand(OpIdx).getReg(); unsigned Binary = 0; - bool isSPVFP = ARM::SPRRegisterClass->contains(RegN); + bool isSPVFP = ARM::SPRRegClass.contains(RegN); RegN = getARMRegisterNumbering(RegN); if (!isSPVFP) Binary |= RegN << ARMII::RegRnShift; @@ -1569,7 +1569,7 @@ static unsigned encodeVFPRn(const MachineInstr &MI, unsigned OpIdx) { static unsigned encodeVFPRm(const MachineInstr &MI, unsigned OpIdx) { unsigned RegM = MI.getOperand(OpIdx).getReg(); unsigned Binary = 0; - bool isSPVFP = ARM::SPRRegisterClass->contains(RegM); + bool isSPVFP = ARM::SPRRegClass.contains(RegM); RegM = getARMRegisterNumbering(RegM); if (!isSPVFP) Binary |= RegM; diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp index 2e1eaca85b5..de3e430b440 100644 --- a/lib/Target/ARM/ARMFastISel.cpp +++ b/lib/Target/ARM/ARMFastISel.cpp @@ -910,8 +910,9 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3) { // put the alloca address into a register, set the base type back to // register and continue. This should almost never happen. if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { - const TargetRegisterClass *RC = isThumb2 ? ARM::tGPRRegisterClass - : ARM::GPRRegisterClass; + const TargetRegisterClass *RC = isThumb2 ? + (const TargetRegisterClass*)&ARM::tGPRRegClass : + (const TargetRegisterClass*)&ARM::GPRRegClass; unsigned ResultReg = createResultReg(RC); unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, @@ -1005,7 +1006,7 @@ bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, useAM3 = true; } } - RC = ARM::GPRRegisterClass; + RC = &ARM::GPRRegClass; break; case MVT::i16: if (isThumb2) { @@ -1017,7 +1018,7 @@ bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, Opc = isZExt ? ARM::LDRH : ARM::LDRSH; useAM3 = true; } - RC = ARM::GPRRegisterClass; + RC = &ARM::GPRRegClass; break; case MVT::i32: if (isThumb2) { @@ -1028,7 +1029,7 @@ bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, } else { Opc = ARM::LDRi12; } - RC = ARM::GPRRegisterClass; + RC = &ARM::GPRRegClass; break; case MVT::f32: if (!Subtarget->hasVFP2()) return false; @@ -1037,7 +1038,7 @@ bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, needVMOV = true; VT = MVT::i32; Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; - RC = ARM::GPRRegisterClass; + RC = &ARM::GPRRegClass; } else { Opc = ARM::VLDRS; RC = TLI.getRegClassFor(VT); @@ -1106,8 +1107,9 @@ bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, // This is mostly going to be Neon/vector support. default: return false; case MVT::i1: { - unsigned Res = createResultReg(isThumb2 ? ARM::tGPRRegisterClass : - ARM::GPRRegisterClass); + unsigned Res = createResultReg(isThumb2 ? + (const TargetRegisterClass*)&ARM::tGPRRegClass : + (const TargetRegisterClass*)&ARM::GPRRegClass); unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), Res) @@ -1491,8 +1493,9 @@ bool ARMFastISel::SelectCmp(const Instruction *I) { // Now set a register based on the comparison. Explicitly set the predicates // here. unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; - const TargetRegisterClass *RC = isThumb2 ? ARM::rGPRRegisterClass - : ARM::GPRRegisterClass; + const TargetRegisterClass *RC = isThumb2 ? + (const TargetRegisterClass*)&ARM::rGPRRegClass : + (const TargetRegisterClass*)&ARM::GPRRegClass; unsigned DestReg = createResultReg(RC); Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); unsigned ZeroReg = TargetMaterializeConstant(Zero); @@ -1516,7 +1519,7 @@ bool ARMFastISel::SelectFPExt(const Instruction *I) { unsigned Op = getRegForValue(V); if (Op == 0) return false; - unsigned Result = createResultReg(ARM::DPRRegisterClass); + unsigned Result = createResultReg(&ARM::DPRRegClass); AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::VCVTDS), Result) .addReg(Op)); @@ -1535,7 +1538,7 @@ bool ARMFastISel::SelectFPTrunc(const Instruction *I) { unsigned Op = getRegForValue(V); if (Op == 0) return false; - unsigned Result = createResultReg(ARM::SPRRegisterClass); + unsigned Result = createResultReg(&ARM::SPRRegClass); AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::VCVTSD), Result) .addReg(Op)); diff --git a/lib/Target/ARM/ARMFrameLowering.cpp b/lib/Target/ARM/ARMFrameLowering.cpp index 402ecb0c5ff..2629496cc59 100644 --- a/lib/Target/ARM/ARMFrameLowering.cpp +++ b/lib/Target/ARM/ARMFrameLowering.cpp @@ -790,7 +790,7 @@ static void emitAlignedDPRCS2Spills(MachineBasicBlock &MBB, // The writeback is only needed when emitting two vst1.64 instructions. if (NumAlignedDPRCS2Regs >= 6) { unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0, - ARM::QQPRRegisterClass); + &ARM::QQPRRegClass); MBB.addLiveIn(SupReg); AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VST1d64Qwb_fixed), ARM::R4) @@ -808,7 +808,7 @@ static void emitAlignedDPRCS2Spills(MachineBasicBlock &MBB, // 16-byte aligned vst1.64 with 4 d-regs, no writeback. if (NumAlignedDPRCS2Regs >= 4) { unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0, - ARM::QQPRRegisterClass); + &ARM::QQPRRegClass); MBB.addLiveIn(SupReg); AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VST1d64Q)) .addReg(ARM::R4).addImm(16).addReg(NextReg) @@ -820,7 +820,7 @@ static void emitAlignedDPRCS2Spills(MachineBasicBlock &MBB, // 16-byte aligned vst1.64 with 2 d-regs. if (NumAlignedDPRCS2Regs >= 2) { unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0, - ARM::QPRRegisterClass); + &ARM::QPRRegClass); MBB.addLiveIn(SupReg); AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VST1q64)) .addReg(ARM::R4).addImm(16).addReg(SupReg)); @@ -908,7 +908,7 @@ static void emitAlignedDPRCS2Restores(MachineBasicBlock &MBB, // 16-byte aligned vld1.64 with 4 d-regs and writeback. if (NumAlignedDPRCS2Regs >= 6) { unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0, - ARM::QQPRRegisterClass); + &ARM::QQPRRegClass); AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VLD1d64Qwb_fixed), NextReg) .addReg(ARM::R4, RegState::Define) .addReg(ARM::R4, RegState::Kill).addImm(16) @@ -924,7 +924,7 @@ static void emitAlignedDPRCS2Restores(MachineBasicBlock &MBB, // 16-byte aligned vld1.64 with 4 d-regs, no writeback. if (NumAlignedDPRCS2Regs >= 4) { unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0, - ARM::QQPRRegisterClass); + &ARM::QQPRRegClass); AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VLD1d64Q), NextReg) .addReg(ARM::R4).addImm(16) .addReg(SupReg, RegState::ImplicitDefine)); @@ -935,7 +935,7 @@ static void emitAlignedDPRCS2Restores(MachineBasicBlock &MBB, // 16-byte aligned vld1.64 with 2 d-regs. if (NumAlignedDPRCS2Regs >= 2) { unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0, - ARM::QPRRegisterClass); + &ARM::QPRRegClass); AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VLD1q64), SupReg) .addReg(ARM::R4).addImm(16)); NextReg += 2; @@ -1244,7 +1244,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, CanEliminateFrame = false; } - if (!ARM::GPRRegisterClass->contains(Reg)) + if (!ARM::GPRRegClass.contains(Reg)) continue; if (Spilled) { @@ -1404,7 +1404,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, } else if (!AFI->isThumb1OnlyFunction()) { // note: Thumb1 functions spill to R12, not the stack. Reserve a slot // closest to SP or frame pointer. - const TargetRegisterClass *RC = ARM::GPRRegisterClass; + const TargetRegisterClass *RC = &ARM::GPRRegClass; RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(), RC->getAlignment(), false)); diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index a103c94cede..e193672ef7a 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -153,12 +153,12 @@ void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT, } void ARMTargetLowering::addDRTypeForNEON(EVT VT) { - addRegisterClass(VT, ARM::DPRRegisterClass); + addRegisterClass(VT, &ARM::DPRRegClass); addTypeForNEON(VT, MVT::f64, MVT::v2i32); } void ARMTargetLowering::addQRTypeForNEON(EVT VT) { - addRegisterClass(VT, ARM::QPRRegisterClass); + addRegisterClass(VT, &ARM::QPRRegClass); addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); } @@ -431,14 +431,14 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) } if (Subtarget->isThumb1Only()) - addRegisterClass(MVT::i32, ARM::tGPRRegisterClass); + addRegisterClass(MVT::i32, &ARM::tGPRRegClass); else - addRegisterClass(MVT::i32, ARM::GPRRegisterClass); + addRegisterClass(MVT::i32, &ARM::GPRRegClass); if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { - addRegisterClass(MVT::f32, ARM::SPRRegisterClass); + addRegisterClass(MVT::f32, &ARM::SPRRegClass); if (!Subtarget->isFPOnlySP()) - addRegisterClass(MVT::f64, ARM::DPRRegisterClass); + addRegisterClass(MVT::f64, &ARM::DPRRegClass); setTruncStoreAction(MVT::f64, MVT::f32, Expand); } @@ -849,7 +849,7 @@ ARMTargetLowering::findRepresentativeClass(EVT VT) const{ // the cost is 1 for both f32 and f64. case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: - RRC = ARM::DPRRegisterClass; + RRC = &ARM::DPRRegClass; // When NEON is used for SP, only half of the register file is available // because operations that define both SP and DP results will be constrained // to the VFP2 class (D0-D15). We currently model this constraint prior to @@ -859,15 +859,15 @@ ARMTargetLowering::findRepresentativeClass(EVT VT) const{ break; case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: case MVT::v4f32: case MVT::v2f64: - RRC = ARM::DPRRegisterClass; + RRC = &ARM::DPRRegClass; Cost = 2; break; case MVT::v4i64: - RRC = ARM::DPRRegisterClass; + RRC = &ARM::DPRRegClass; Cost = 4; break; case MVT::v8i64: - RRC = ARM::DPRRegisterClass; + RRC = &ARM::DPRRegClass; Cost = 8; break; } @@ -1027,9 +1027,9 @@ const TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const { // load / store 4 to 8 consecutive D registers. if (Subtarget->hasNEON()) { if (VT == MVT::v4i64) - return ARM::QQPRRegisterClass; - else if (VT == MVT::v8i64) - return ARM::QQQQPRRegisterClass; + return &ARM::QQPRRegClass; + if (VT == MVT::v8i64) + return &ARM::QQQQPRRegClass; } return TargetLowering::getRegClassFor(VT); } @@ -2457,9 +2457,9 @@ ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, const TargetRegisterClass *RC; if (AFI->isThumb1OnlyFunction()) - RC = ARM::tGPRRegisterClass; + RC = &ARM::tGPRRegClass; else - RC = ARM::GPRRegisterClass; + RC = &ARM::GPRRegClass; // Transform the arguments stored in physical registers into virtual ones. unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); @@ -2543,9 +2543,9 @@ ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, for (; firstRegToSaveIndex < 4; ++firstRegToSaveIndex) { const TargetRegisterClass *RC; if (AFI->isThumb1OnlyFunction()) - RC = ARM::tGPRRegisterClass; + RC = &ARM::tGPRRegClass; else - RC = ARM::GPRRegisterClass; + RC = &ARM::GPRRegClass; unsigned VReg = MF.addLiveIn(GPRArgRegs[firstRegToSaveIndex], RC); SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); @@ -2627,14 +2627,15 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain, const TargetRegisterClass *RC; if (RegVT == MVT::f32) - RC = ARM::SPRRegisterClass; + RC = &ARM::SPRRegClass; else if (RegVT == MVT::f64) - RC = ARM::DPRRegisterClass; + RC = &ARM::DPRRegClass; else if (RegVT == MVT::v2f64) - RC = ARM::QPRRegisterClass; + RC = &ARM::QPRRegClass; else if (RegVT == MVT::i32) - RC = (AFI->isThumb1OnlyFunction() ? - ARM::tGPRRegisterClass : ARM::GPRRegisterClass); + RC = AFI->isThumb1OnlyFunction() ? + (const TargetRegisterClass*)&ARM::tGPRRegClass : + (const TargetRegisterClass*)&ARM::GPRRegClass; else llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); @@ -5252,14 +5253,14 @@ ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI, bool isThumb2 = Subtarget->isThumb2(); MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); - unsigned scratch = - MRI.createVirtualRegister(isThumb2 ? ARM::rGPRRegisterClass - : ARM::GPRRegisterClass); + unsigned scratch = MRI.createVirtualRegister(isThumb2 ? + (const TargetRegisterClass*)&ARM::rGPRRegClass : + (const TargetRegisterClass*)&ARM::GPRRegClass); if (isThumb2) { - MRI.constrainRegClass(dest, ARM::rGPRRegisterClass); - MRI.constrainRegClass(oldval, ARM::rGPRRegisterClass); - MRI.constrainRegClass(newval, ARM::rGPRRegisterClass); + MRI.constrainRegClass(dest, &ARM::rGPRRegClass); + MRI.constrainRegClass(oldval, &ARM::rGPRRegClass); + MRI.constrainRegClass(newval, &ARM::rGPRRegClass); } unsigned ldrOpc, strOpc; @@ -5362,8 +5363,8 @@ ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); if (isThumb2) { - MRI.constrainRegClass(dest, ARM::rGPRRegisterClass); - MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass); + MRI.constrainRegClass(dest, &ARM::rGPRRegClass); + MRI.constrainRegClass(ptr, &ARM::rGPRRegClass); } unsigned ldrOpc, strOpc; @@ -5394,8 +5395,9 @@ ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, BB->end()); exitMBB->transferSuccessorsAndUpdatePHIs(BB); - const TargetRegisterClass *TRC = - isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; + const TargetRegisterClass *TRC = isThumb2 ? + (const TargetRegisterClass*)&ARM::tGPRRegClass : + (const TargetRegisterClass*)&ARM::GPRRegClass; unsigned scratch = MRI.createVirtualRegister(TRC); unsigned scratch2 = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC); @@ -5469,8 +5471,8 @@ ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI, MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); if (isThumb2) { - MRI.constrainRegClass(dest, ARM::rGPRRegisterClass); - MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass); + MRI.constrainRegClass(dest, &ARM::rGPRRegClass); + MRI.constrainRegClass(ptr, &ARM::rGPRRegClass); } unsigned ldrOpc, strOpc, extendOpc; @@ -5504,8 +5506,9 @@ ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI, BB->end()); exitMBB->transferSuccessorsAndUpdatePHIs(BB); - const TargetRegisterClass *TRC = - isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; + const TargetRegisterClass *TRC = isThumb2 ? + (const TargetRegisterClass*)&ARM::tGPRRegClass : + (const TargetRegisterClass*)&ARM::GPRRegClass; unsigned scratch = MRI.createVirtualRegister(TRC); unsigned scratch2 = MRI.createVirtualRegister(TRC); @@ -5531,7 +5534,7 @@ ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI, // Sign extend the value, if necessary. if (signExtend && extendOpc) { - oldval = MRI.createVirtualRegister(ARM::GPRRegisterClass); + oldval = MRI.createVirtualRegister(&ARM::GPRRegClass); AddDefaultPred(BuildMI(BB, dl, TII->get(extendOpc), oldval) .addReg(dest) .addImm(0)); @@ -5586,9 +5589,9 @@ ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB, MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); if (isThumb2) { - MRI.constrainRegClass(destlo, ARM::rGPRRegisterClass); - MRI.constrainRegClass(desthi, ARM::rGPRRegisterClass); - MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass); + MRI.constrainRegClass(destlo, &ARM::rGPRRegClass); + MRI.constrainRegClass(desthi, &ARM::rGPRRegClass); + MRI.constrainRegClass(ptr, &ARM::rGPRRegClass); } unsigned ldrOpc = isThumb2 ? ARM::t2LDREXD : ARM::LDREXD; @@ -5614,8 +5617,9 @@ ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB, BB->end()); exitMBB->transferSuccessorsAndUpdatePHIs(BB); - const TargetRegisterClass *TRC = - isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; + const TargetRegisterClass *TRC = isThumb2 ? + (const TargetRegisterClass*)&ARM::tGPRRegClass : + (const TargetRegisterClass*)&ARM::GPRRegClass; unsigned storesuccess = MRI.createVirtualRegister(TRC); // thisMBB: @@ -5722,8 +5726,9 @@ SetupEntryBlockForSjLj(MachineInstr *MI, MachineBasicBlock *MBB, ARMConstantPoolMBB::Create(F->getContext(), DispatchBB, PCLabelId, PCAdj); unsigned CPI = MCP->getConstantPoolIndex(CPV, 4); - const TargetRegisterClass *TRC = - isThumb ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; + const TargetRegisterClass *TRC = isThumb ? + (const TargetRegisterClass*)&ARM::tGPRRegClass : + (const TargetRegisterClass*)&ARM::GPRRegClass; // Grab constant pool and fixed stack memory operands. MachineMemOperand *CPMMO = @@ -5827,8 +5832,9 @@ EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const { MachineFrameInfo *MFI = MF->getFrameInfo(); int FI = MFI->getFunctionContextIndex(); - const TargetRegisterClass *TRC = - Subtarget->isThumb() ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; + const TargetRegisterClass *TRC = Subtarget->isThumb() ? + (const TargetRegisterClass*)&ARM::tGPRRegClass : + (const TargetRegisterClass*)&ARM::GPRRegClass; // Get a mapping of the call site numbers to all of the landing pads they're // associated with. @@ -6176,14 +6182,12 @@ EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const { for (unsigned i = 0; SavedRegs[i] != 0; ++i) { unsigned Reg = SavedRegs[i]; if (Subtarget->isThumb2() && - !ARM::tGPRRegisterClass->contains(Reg) && - !ARM::hGPRRegisterClass->contains(Reg)) + !ARM::tGPRRegClass.contains(Reg) && + !ARM::hGPRRegClass.contains(Reg)) continue; - else if (Subtarget->isThumb1Only() && - !ARM::tGPRRegisterClass->contains(Reg)) + if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg)) continue; - else if (!Subtarget->isThumb() && - !ARM::GPRRegisterClass->contains(Reg)) + if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg)) continue; if (!DefRegs[Reg]) MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead); @@ -6517,10 +6521,12 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, MachineRegisterInfo &MRI = Fn->getRegInfo(); // In Thumb mode S must not be specified if source register is the SP or // PC and if destination register is the SP, so restrict register class - unsigned NewMovDstReg = MRI.createVirtualRegister( - isThumb2 ? ARM::rGPRRegisterClass : ARM::GPRRegisterClass); - unsigned NewRsbDstReg = MRI.createVirtualRegister( - isThumb2 ? ARM::rGPRRegisterClass : ARM::GPRRegisterClass); + unsigned NewMovDstReg = MRI.createVirtualRegister(isThumb2 ? + (const TargetRegisterClass*)&ARM::rGPRRegClass : + (const TargetRegisterClass*)&ARM::GPRRegClass); + unsigned NewRsbDstReg = MRI.createVirtualRegister(isThumb2 ? + (const TargetRegisterClass*)&ARM::rGPRRegClass : + (const TargetRegisterClass*)&ARM::GPRRegClass); // Transfer the remainder of BB and its successor edges to sinkMBB. SinkBB->splice(SinkBB->begin(), BB, @@ -9030,39 +9036,38 @@ ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, switch (Constraint[0]) { case 'l': // Low regs or general regs. if (Subtarget->isThumb()) - return RCPair(0U, ARM::tGPRRegisterClass); - else - return RCPair(0U, ARM::GPRRegisterClass); + return RCPair(0U, &ARM::tGPRRegClass); + return RCPair(0U, &ARM::GPRRegClass); case 'h': // High regs or no regs. if (Subtarget->isThumb()) - return RCPair(0U, ARM::hGPRRegisterClass); + return RCPair(0U, &ARM::hGPRRegClass); break; case 'r': - return RCPair(0U, ARM::GPRRegisterClass); + return RCPair(0U, &ARM::GPRRegClass); case 'w': if (VT == MVT::f32) - return RCPair(0U, ARM::SPRRegisterClass); + return RCPair(0U, &ARM::SPRRegClass); if (VT.getSizeInBits() == 64) - return RCPair(0U, ARM::DPRRegisterClass); + return RCPair(0U, &ARM::DPRRegClass); if (VT.getSizeInBits() == 128) - return RCPair(0U, ARM::QPRRegisterClass); + return RCPair(0U, &ARM::QPRRegClass); break; case 'x': if (VT == MVT::f32) - return RCPair(0U, ARM::SPR_8RegisterClass); + return RCPair(0U, &ARM::SPR_8RegClass); if (VT.getSizeInBits() == 64) - return RCPair(0U, ARM::DPR_8RegisterClass); + return RCPair(0U, &ARM::DPR_8RegClass); if (VT.getSizeInBits() == 128) - return RCPair(0U, ARM::QPR_8RegisterClass); + return RCPair(0U, &ARM::QPR_8RegClass); break; case 't': if (VT == MVT::f32) - return RCPair(0U, ARM::SPRRegisterClass); + return RCPair(0U, &ARM::SPRRegClass); break; } } if (StringRef("{cc}").equals_lower(Constraint)) - return std::make_pair(unsigned(ARM::CPSR), ARM::CCRRegisterClass); + return std::make_pair(unsigned(ARM::CPSR), &ARM::CCRRegClass); return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); } diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp index 9ef2ace29cf..87e68641085 100644 --- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -1326,7 +1326,7 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) { // First advance to the instruction just before the start of the chain. AdvanceRS(MBB, MemOps); // Find a scratch register. - unsigned Scratch = RS->FindUnusedReg(ARM::GPRRegisterClass); + unsigned Scratch = RS->FindUnusedReg(&ARM::GPRRegClass); // Process the load / store instructions. RS->forward(prior(MBBI)); diff --git a/lib/Target/ARM/Thumb1InstrInfo.cpp b/lib/Target/ARM/Thumb1InstrInfo.cpp index e03e75815c7..735b255759b 100644 --- a/lib/Target/ARM/Thumb1InstrInfo.cpp +++ b/lib/Target/ARM/Thumb1InstrInfo.cpp @@ -53,11 +53,11 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, unsigned SrcReg, bool isKill, int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const { - assert((RC == ARM::tGPRRegisterClass || + assert((RC == &ARM::tGPRRegClass || (TargetRegisterInfo::isPhysicalRegister(SrcReg) && isARMLowRegister(SrcReg))) && "Unknown regclass!"); - if (RC == ARM::tGPRRegisterClass || + if (RC == &ARM::tGPRRegClass || (TargetRegisterInfo::isPhysicalRegister(SrcReg) && isARMLowRegister(SrcReg))) { DebugLoc DL; @@ -81,11 +81,11 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, unsigned DestReg, int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const { - assert((RC == ARM::tGPRRegisterClass || + assert((RC == &ARM::tGPRRegClass || (TargetRegisterInfo::isPhysicalRegister(DestReg) && isARMLowRegister(DestReg))) && "Unknown regclass!"); - if (RC == ARM::tGPRRegisterClass || + if (RC == &ARM::tGPRRegClass || (TargetRegisterInfo::isPhysicalRegister(DestReg) && isARMLowRegister(DestReg))) { DebugLoc DL; diff --git a/lib/Target/ARM/Thumb1RegisterInfo.cpp b/lib/Target/ARM/Thumb1RegisterInfo.cpp index ef77bbd21a4..02d0ca2d402 100644 --- a/lib/Target/ARM/Thumb1RegisterInfo.cpp +++ b/lib/Target/ARM/Thumb1RegisterInfo.cpp @@ -49,13 +49,13 @@ const TargetRegisterClass* Thumb1RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC) const { if (ARM::tGPRRegClass.hasSubClassEq(RC)) - return ARM::tGPRRegisterClass; + return &ARM::tGPRRegClass; return ARMBaseRegisterInfo::getLargestLegalSuperClass(RC); } const TargetRegisterClass * Thumb1RegisterInfo::getPointerRegClass(unsigned Kind) const { - return ARM::tGPRRegisterClass; + return &ARM::tGPRRegClass; } /// emitLoadConstPool - Emits a load from constpool to materialize the @@ -109,7 +109,7 @@ void emitThumbRegPlusImmInReg(MachineBasicBlock &MBB, unsigned LdReg = DestReg; if (DestReg == ARM::SP) { assert(BaseReg == ARM::SP && "Unexpected!"); - LdReg = MF.getRegInfo().createVirtualRegister(ARM::tGPRRegisterClass); + LdReg = MF.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass); } if (NumBytes <= 255 && NumBytes >= 0) @@ -693,7 +693,7 @@ Thumb1RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, // register. The offset is already handled in the vreg value. MI.getOperand(i+1).ChangeToRegister(FrameReg, false, false, false); } else if (MI.mayStore()) { - VReg = MF.getRegInfo().createVirtualRegister(ARM::tGPRRegisterClass); + VReg = MF.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass); bool UseRR = false; if (Opcode == ARM::tSTRspi) { diff --git a/lib/Target/ARM/Thumb2InstrInfo.cpp b/lib/Target/ARM/Thumb2InstrInfo.cpp index 8ab486b0c1b..2097bb9a93d 100644 --- a/lib/Target/ARM/Thumb2InstrInfo.cpp +++ b/lib/Target/ARM/Thumb2InstrInfo.cpp @@ -126,9 +126,9 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, unsigned SrcReg, bool isKill, int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const { - if (RC == ARM::GPRRegisterClass || RC == ARM::tGPRRegisterClass || - RC == ARM::tcGPRRegisterClass || RC == ARM::rGPRRegisterClass || - RC == ARM::GPRnopcRegisterClass) { + if (RC == &ARM::GPRRegClass || RC == &ARM::tGPRRegClass || + RC == &ARM::tcGPRRegClass || RC == &ARM::rGPRRegClass || + RC == &ARM::GPRnopcRegClass) { DebugLoc DL; if (I != MBB.end()) DL = I->getDebugLoc(); @@ -153,9 +153,9 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, unsigned DestReg, int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const { - if (RC == ARM::GPRRegisterClass || RC == ARM::tGPRRegisterClass || - RC == ARM::tcGPRRegisterClass || RC == ARM::rGPRRegisterClass || - RC == ARM::GPRnopcRegisterClass) { + if (RC == &ARM::GPRRegClass || RC == &ARM::tGPRRegClass || + RC == &ARM::tcGPRRegClass || RC == &ARM::rGPRRegClass || + RC == &ARM::GPRnopcRegClass) { DebugLoc DL; if (I != MBB.end()) DL = I->getDebugLoc(); diff --git a/lib/Target/CellSPU/SPUISelLowering.cpp b/lib/Target/CellSPU/SPUISelLowering.cpp index 062374127e2..da6ed94af47 100644 --- a/lib/Target/CellSPU/SPUISelLowering.cpp +++ b/lib/Target/CellSPU/SPUISelLowering.cpp @@ -100,13 +100,13 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM) setLibcallName(RTLIB::DIV_F64, "__fast_divdf3"); // Set up the SPU's register classes: - addRegisterClass(MVT::i8, SPU::R8CRegisterClass); - addRegisterClass(MVT::i16, SPU::R16CRegisterClass); - addRegisterClass(MVT::i32, SPU::R32CRegisterClass); - addRegisterClass(MVT::i64, SPU::R64CRegisterClass); - addRegisterClass(MVT::f32, SPU::R32FPRegisterClass); - addRegisterClass(MVT::f64, SPU::R64FPRegisterClass); - addRegisterClass(MVT::i128, SPU::GPRCRegisterClass); + addRegisterClass(MVT::i8, &SPU::R8CRegClass); + addRegisterClass(MVT::i16, &SPU::R16CRegClass); + addRegisterClass(MVT::i32, &SPU::R32CRegClass); + addRegisterClass(MVT::i64, &SPU::R64CRegClass); + addRegisterClass(MVT::f32, &SPU::R32FPRegClass); + addRegisterClass(MVT::f64, &SPU::R64FPRegClass); + addRegisterClass(MVT::i128, &SPU::GPRCRegClass); // SPU has no sign or zero extended loads for i1, i8, i16: setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); @@ -397,12 +397,12 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM) // First set operation action for all vector types to expand. Then we // will selectively turn on ones that can be effectively codegen'd. - addRegisterClass(MVT::v16i8, SPU::VECREGRegisterClass); - addRegisterClass(MVT::v8i16, SPU::VECREGRegisterClass); - addRegisterClass(MVT::v4i32, SPU::VECREGRegisterClass); - addRegisterClass(MVT::v2i64, SPU::VECREGRegisterClass); - addRegisterClass(MVT::v4f32, SPU::VECREGRegisterClass); - addRegisterClass(MVT::v2f64, SPU::VECREGRegisterClass); + addRegisterClass(MVT::v16i8, &SPU::VECREGRegClass); + addRegisterClass(MVT::v8i16, &SPU::VECREGRegClass); + addRegisterClass(MVT::v4i32, &SPU::VECREGRegClass); + addRegisterClass(MVT::v2i64, &SPU::VECREGRegClass); + addRegisterClass(MVT::v4f32, &SPU::VECREGRegClass); + addRegisterClass(MVT::v2f64, &SPU::VECREGRegClass); for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { @@ -3139,16 +3139,16 @@ SPUTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, case 'b': // R1-R31 case 'r': // R0-R31 if (VT == MVT::i64) - return std::make_pair(0U, SPU::R64CRegisterClass); - return std::make_pair(0U, SPU::R32CRegisterClass); + return std::make_pair(0U, &SPU::R64CRegClass); + return std::make_pair(0U, &SPU::R32CRegClass); case 'f': if (VT == MVT::f32) - return std::make_pair(0U, SPU::R32FPRegisterClass); - else if (VT == MVT::f64) - return std::make_pair(0U, SPU::R64FPRegisterClass); + return std::make_pair(0U, &SPU::R32FPRegClass); + if (VT == MVT::f64) + return std::make_pair(0U, &SPU::R64FPRegClass); break; case 'v': - return std::make_pair(0U, SPU::GPRCRegisterClass); + return std::make_pair(0U, &SPU::GPRCRegClass); } } diff --git a/lib/Target/CellSPU/SPUInstrInfo.cpp b/lib/Target/CellSPU/SPUInstrInfo.cpp index a30de4688ee..b599d7bf762 100644 --- a/lib/Target/CellSPU/SPUInstrInfo.cpp +++ b/lib/Target/CellSPU/SPUInstrInfo.cpp @@ -143,21 +143,21 @@ SPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, const TargetRegisterInfo *TRI) const { unsigned opc; bool isValidFrameIdx = (FrameIdx < SPUFrameLowering::maxFrameOffset()); - if (RC == SPU::GPRCRegisterClass) + if (RC == &SPU::GPRCRegClass) opc = isValidFrameIdx ? SPU::STQDr128 : SPU::STQXr128; - else if (RC == SPU::R64CRegisterClass) + else if (RC == &SPU::R64CRegClass) opc = isValidFrameIdx ? SPU::STQDr64 : SPU::STQXr64; - else if (RC == SPU::R64FPRegisterClass) + else if (RC == &SPU::R64FPRegClass) opc = isValidFrameIdx ? SPU::STQDr64 : SPU::STQXr64; - else if (RC == SPU::R32CRegisterClass) + else if (RC == &SPU::R32CRegClass) opc = isValidFrameIdx ? SPU::STQDr32 : SPU::STQXr32; - else if (RC == SPU::R32FPRegisterClass) + else if (RC == &SPU::R32FPRegClass) opc = isValidFrameIdx ? SPU::STQDr32 : SPU::STQXr32; - else if (RC == SPU::R16CRegisterClass) + else if (RC == &SPU::R16CRegClass) opc = isValidFrameIdx ? SPU::STQDr16 : SPU::STQXr16; - else if (RC == SPU::R8CRegisterClass) + else if (RC == &SPU::R8CRegClass) opc = isValidFrameIdx ? SPU::STQDr8 : SPU::STQXr8; - else if (RC == SPU::VECREGRegisterClass) + else if (RC == &SPU::VECREGRegClass) opc = isValidFrameIdx ? SPU::STQDv16i8 : SPU::STQXv16i8; else llvm_unreachable("Unknown regclass!"); @@ -176,21 +176,21 @@ SPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, const TargetRegisterInfo *TRI) const { unsigned opc; bool isValidFrameIdx = (FrameIdx < SPUFrameLowering::maxFrameOffset()); - if (RC == SPU::GPRCRegisterClass) + if (RC == &SPU::GPRCRegClass) opc = isValidFrameIdx ? SPU::LQDr128 : SPU::LQXr128; - else if (RC == SPU::R64CRegisterClass) + else if (RC == &SPU::R64CRegClass) opc = isValidFrameIdx ? SPU::LQDr64 : SPU::LQXr64; - else if (RC == SPU::R64FPRegisterClass) + else if (RC == &SPU::R64FPRegClass) opc = isValidFrameIdx ? SPU::LQDr64 : SPU::LQXr64; - else if (RC == SPU::R32CRegisterClass) + else if (RC == &SPU::R32CRegClass) opc = isValidFrameIdx ? SPU::LQDr32 : SPU::LQXr32; - else if (RC == SPU::R32FPRegisterClass) + else if (RC == &SPU::R32FPRegClass) opc = isValidFrameIdx ? SPU::LQDr32 : SPU::LQXr32; - else if (RC == SPU::R16CRegisterClass) + else if (RC == &SPU::R16CRegClass) opc = isValidFrameIdx ? SPU::LQDr16 : SPU::LQXr16; - else if (RC == SPU::R8CRegisterClass) + else if (RC == &SPU::R8CRegClass) opc = isValidFrameIdx ? SPU::LQDr8 : SPU::LQXr8; - else if (RC == SPU::VECREGRegisterClass) + else if (RC == &SPU::VECREGRegClass) opc = isValidFrameIdx ? SPU::LQDv16i8 : SPU::LQXv16i8; else llvm_unreachable("Unknown regclass in loadRegFromStackSlot!"); diff --git a/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/lib/Target/Hexagon/HexagonHardwareLoops.cpp index 57772a514d5..c31cf43a11e 100644 --- a/lib/Target/Hexagon/HexagonHardwareLoops.cpp +++ b/lib/Target/Hexagon/HexagonHardwareLoops.cpp @@ -623,7 +623,7 @@ void HexagonFixupHwLoops::convertLoopInstr(MachineFunction &MF, const TargetInstrInfo *TII = MF.getTarget().getInstrInfo(); MachineBasicBlock *MBB = MII->getParent(); DebugLoc DL = MII->getDebugLoc(); - unsigned Scratch = RS.scavengeRegister(Hexagon::IntRegsRegisterClass, MII, 0); + unsigned Scratch = RS.scavengeRegister(&Hexagon::IntRegsRegClass, MII, 0); // First, set the LC0 with the trip count. if (MII->getOperand(1).isReg()) { diff --git a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp index 9df965efc14..e3520c401ec 100644 --- a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp +++ b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp @@ -1130,10 +1130,10 @@ SDNode *HexagonDAGToDAGISel::SelectIntrinsicWOChain(SDNode *N) { SDNode *Arg = N->getOperand(i).getNode(); const TargetRegisterClass *RC = TII->getRegClass(MCID, i, TRI); - if (RC == Hexagon::IntRegsRegisterClass || - RC == Hexagon::DoubleRegsRegisterClass) { + if (RC == &Hexagon::IntRegsRegClass || + RC == &Hexagon::DoubleRegsRegClass) { Ops.push_back(SDValue(Arg, 0)); - } else if (RC == Hexagon::PredRegsRegisterClass) { + } else if (RC == &Hexagon::PredRegsRegClass) { // Do the transfer. SDNode *PdRs = CurDAG->getMachineNode(Hexagon::TFR_PdRs, dl, MVT::i1, SDValue(Arg, 0)); diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp index d6da0d0911b..5ea97fdb818 100644 --- a/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -839,12 +839,12 @@ const { EVT RegVT = VA.getLocVT(); if (RegVT == MVT::i8 || RegVT == MVT::i16 || RegVT == MVT::i32) { unsigned VReg = - RegInfo.createVirtualRegister(Hexagon::IntRegsRegisterClass); + RegInfo.createVirtualRegister(&Hexagon::IntRegsRegClass); RegInfo.addLiveIn(VA.getLocReg(), VReg); InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT)); } else if (RegVT == MVT::i64) { unsigned VReg = - RegInfo.createVirtualRegister(Hexagon::DoubleRegsRegisterClass); + RegInfo.createVirtualRegister(&Hexagon::DoubleRegsRegClass); RegInfo.addLiveIn(VA.getLocReg(), VReg); InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT)); } else { @@ -1009,10 +1009,10 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine TM(targetmachine) { // Set up the register classes. - addRegisterClass(MVT::i32, Hexagon::IntRegsRegisterClass); - addRegisterClass(MVT::i64, Hexagon::DoubleRegsRegisterClass); + addRegisterClass(MVT::i32, &Hexagon::IntRegsRegClass); + addRegisterClass(MVT::i64, &Hexagon::DoubleRegsRegClass); - addRegisterClass(MVT::i1, Hexagon::PredRegsRegisterClass); + addRegisterClass(MVT::i1, &Hexagon::PredRegsRegClass); computeRegisterProperties(); @@ -1402,9 +1402,9 @@ HexagonTargetLowering::getRegForInlineAsmConstraint(const case MVT::i32: case MVT::i16: case MVT::i8: - return std::make_pair(0U, Hexagon::IntRegsRegisterClass); + return std::make_pair(0U, &Hexagon::IntRegsRegClass); case MVT::i64: - return std::make_pair(0U, Hexagon::DoubleRegsRegisterClass); + return std::make_pair(0U, &Hexagon::DoubleRegsRegClass); } default: llvm_unreachable("Unknown asm register class"); diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp index 77b366372cf..a3918692db0 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -370,15 +370,15 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MFI.getObjectSize(FI), Align); - if (Hexagon::IntRegsRegisterClass->hasSubClassEq(RC)) { + if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) { BuildMI(MBB, I, DL, get(Hexagon::STriw)) .addFrameIndex(FI).addImm(0) .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO); - } else if (Hexagon::DoubleRegsRegisterClass->hasSubClassEq(RC)) { + } else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) { BuildMI(MBB, I, DL, get(Hexagon::STrid)) .addFrameIndex(FI).addImm(0) .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO); - } else if (Hexagon::PredRegsRegisterClass->hasSubClassEq(RC)) { + } else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) { BuildMI(MBB, I, DL, get(Hexagon::STriw_pred)) .addFrameIndex(FI).addImm(0) .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO); @@ -416,13 +416,13 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MFI.getObjectSize(FI), Align); - if (RC == Hexagon::IntRegsRegisterClass) { + if (RC == &Hexagon::IntRegsRegClass) { BuildMI(MBB, I, DL, get(Hexagon::LDriw), DestReg) .addFrameIndex(FI).addImm(0).addMemOperand(MMO); - } else if (RC == Hexagon::DoubleRegsRegisterClass) { + } else if (RC == &Hexagon::DoubleRegsRegClass) { BuildMI(MBB, I, DL, get(Hexagon::LDrid), DestReg) .addFrameIndex(FI).addImm(0).addMemOperand(MMO); - } else if (RC == Hexagon::PredRegsRegisterClass) { + } else if (RC == &Hexagon::PredRegsRegClass) { BuildMI(MBB, I, DL, get(Hexagon::LDriw_pred), DestReg) .addFrameIndex(FI).addImm(0).addMemOperand(MMO); } else { @@ -452,15 +452,14 @@ unsigned HexagonInstrInfo::createVR(MachineFunction* MF, MVT VT) const { MachineRegisterInfo &RegInfo = MF->getRegInfo(); const TargetRegisterClass *TRC; - if (VT == MVT::i1) { - TRC = Hexagon::PredRegsRegisterClass; - } else if (VT == MVT::i32) { - TRC = Hexagon::IntRegsRegisterClass; - } else if (VT == MVT::i64) { - TRC = Hexagon::DoubleRegsRegisterClass; - } else { + if (VT == MVT::i1) + TRC = &Hexagon::PredRegsRegClass; + else if (VT == MVT::i32) + TRC = &Hexagon::IntRegsRegClass; + else if (VT == MVT::i64) + TRC = &Hexagon::DoubleRegsRegClass; + else llvm_unreachable("Cannot handle this register class"); - } unsigned NewReg = RegInfo.createVirtualRegister(TRC); return NewReg; @@ -1331,7 +1330,7 @@ HexagonInstrInfo::DefinesPredicate(MachineInstr *MI, MachineOperand MO = MI->getOperand(oper); if (MO.isReg() && MO.isDef()) { const TargetRegisterClass* RC = RI.getMinimalPhysRegClass(MO.getReg()); - if (RC == Hexagon::PredRegsRegisterClass) { + if (RC == &Hexagon::PredRegsRegClass) { Pred.push_back(MO); return true; } diff --git a/lib/Target/MBlaze/MBlazeAsmPrinter.cpp b/lib/Target/MBlaze/MBlazeAsmPrinter.cpp index 55fffe3ebfa..72c336557df 100644 --- a/lib/Target/MBlaze/MBlazeAsmPrinter.cpp +++ b/lib/Target/MBlaze/MBlazeAsmPrinter.cpp @@ -135,7 +135,7 @@ void MBlazeAsmPrinter::printSavedRegsBitmask() { for (unsigned i = 0, e = CSI.size(); i != e; ++i) { unsigned Reg = CSI[i].getReg(); unsigned RegNum = getMBlazeRegisterNumbering(Reg); - if (MBlaze::GPRRegisterClass->contains(Reg)) + if (MBlaze::GPRRegClass.contains(Reg)) CPUBitmask |= (1 << RegNum); } diff --git a/lib/Target/MBlaze/MBlazeISelLowering.cpp b/lib/Target/MBlaze/MBlazeISelLowering.cpp index edfc3355691..1aa2c3c58e7 100644 --- a/lib/Target/MBlaze/MBlazeISelLowering.cpp +++ b/lib/Target/MBlaze/MBlazeISelLowering.cpp @@ -62,9 +62,9 @@ MBlazeTargetLowering::MBlazeTargetLowering(MBlazeTargetMachine &TM) setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? // Set up the register classes - addRegisterClass(MVT::i32, MBlaze::GPRRegisterClass); + addRegisterClass(MVT::i32, &MBlaze::GPRRegClass); if (Subtarget->hasFPU()) { - addRegisterClass(MVT::f32, MBlaze::GPRRegisterClass); + addRegisterClass(MVT::f32, &MBlaze::GPRRegClass); setOperationAction(ISD::ConstantFP, MVT::f32, Legal); } @@ -291,12 +291,12 @@ MBlazeTargetLowering::EmitCustomShift(MachineInstr *MI, loop->addSuccessor(finish); loop->addSuccessor(loop); - unsigned IAMT = R.createVirtualRegister(MBlaze::GPRRegisterClass); + unsigned IAMT = R.createVirtualRegister(&MBlaze::GPRRegClass); BuildMI(MBB, dl, TII->get(MBlaze::ANDI), IAMT) .addReg(MI->getOperand(2).getReg()) .addImm(31); - unsigned IVAL = R.createVirtualRegister(MBlaze::GPRRegisterClass); + unsigned IVAL = R.createVirtualRegister(&MBlaze::GPRRegClass); BuildMI(MBB, dl, TII->get(MBlaze::ADDIK), IVAL) .addReg(MI->getOperand(1).getReg()) .addImm(0); @@ -305,14 +305,14 @@ MBlazeTargetLowering::EmitCustomShift(MachineInstr *MI, .addReg(IAMT) .addMBB(finish); - unsigned DST = R.createVirtualRegister(MBlaze::GPRRegisterClass); - unsigned NDST = R.createVirtualRegister(MBlaze::GPRRegisterClass); + unsigned DST = R.createVirtualRegister(&MBlaze::GPRRegClass); + unsigned NDST = R.createVirtualRegister(&MBlaze::GPRRegClass); BuildMI(loop, dl, TII->get(MBlaze::PHI), DST) .addReg(IVAL).addMBB(MBB) .addReg(NDST).addMBB(loop); - unsigned SAMT = R.createVirtualRegister(MBlaze::GPRRegisterClass); - unsigned NAMT = R.createVirtualRegister(MBlaze::GPRRegisterClass); + unsigned SAMT = R.createVirtualRegister(&MBlaze::GPRRegClass); + unsigned NAMT = R.createVirtualRegister(&MBlaze::GPRRegClass); BuildMI(loop, dl, TII->get(MBlaze::PHI), SAMT) .addReg(IAMT).addMBB(MBB) .addReg(NAMT).addMBB(loop); @@ -500,7 +500,7 @@ MBlazeTargetLowering::EmitCustomAtomic(MachineInstr *MI, case MBlaze::LAN32: opcode = MBlaze::AND; break; } - finalReg = R.createVirtualRegister(MBlaze::GPRRegisterClass); + finalReg = R.createVirtualRegister(&MBlaze::GPRRegClass); start->addSuccessor(exit); start->addSuccessor(start); @@ -510,7 +510,7 @@ MBlazeTargetLowering::EmitCustomAtomic(MachineInstr *MI, if (MI->getOpcode() == MBlaze::LAN32) { unsigned tmp = finalReg; - finalReg = R.createVirtualRegister(MBlaze::GPRRegisterClass); + finalReg = R.createVirtualRegister(&MBlaze::GPRRegClass); BuildMI(start, dl, TII->get(MBlaze::XORI), finalReg) .addReg(tmp) .addImm(-1); @@ -528,7 +528,7 @@ MBlazeTargetLowering::EmitCustomAtomic(MachineInstr *MI, final->addSuccessor(exit); final->addSuccessor(start); - unsigned CMP = R.createVirtualRegister(MBlaze::GPRRegisterClass); + unsigned CMP = R.createVirtualRegister(&MBlaze::GPRRegClass); BuildMI(start, dl, TII->get(MBlaze::CMP), CMP) .addReg(MI->getOperand(0).getReg()) .addReg(MI->getOperand(2).getReg()); @@ -543,7 +543,7 @@ MBlazeTargetLowering::EmitCustomAtomic(MachineInstr *MI, } } - unsigned CHK = R.createVirtualRegister(MBlaze::GPRRegisterClass); + unsigned CHK = R.createVirtualRegister(&MBlaze::GPRRegClass); BuildMI(final, dl, TII->get(MBlaze::SWX)) .addReg(finalReg) .addReg(MI->getOperand(1).getReg()) @@ -899,9 +899,9 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const TargetRegisterClass *RC; if (RegVT == MVT::i32) - RC = MBlaze::GPRRegisterClass; + RC = &MBlaze::GPRRegClass; else if (RegVT == MVT::f32) - RC = MBlaze::GPRRegisterClass; + RC = &MBlaze::GPRRegClass; else llvm_unreachable("RegVT not supported by LowerFormalArguments"); @@ -964,7 +964,7 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, StackPtr = DAG.getRegister(StackReg, getPointerTy()); // The last register argument that must be saved is MBlaze::R10 - const TargetRegisterClass *RC = MBlaze::GPRRegisterClass; + const TargetRegisterClass *RC = &MBlaze::GPRRegClass; unsigned Begin = getMBlazeRegisterNumbering(MBlaze::R5); unsigned Start = getMBlazeRegisterNumbering(ArgRegEnd+1); @@ -1124,14 +1124,14 @@ getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const { if (Constraint.size() == 1) { switch (Constraint[0]) { case 'r': - return std::make_pair(0U, MBlaze::GPRRegisterClass); + return std::make_pair(0U, &MBlaze::GPRRegClass); // TODO: These can't possibly be right, but match what was in // getRegClassForInlineAsmConstraint. case 'd': case 'y': case 'f': if (VT == MVT::f32) - return std::make_pair(0U, MBlaze::GPRRegisterClass); + return std::make_pair(0U, &MBlaze::GPRRegClass); } } return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); diff --git a/lib/Target/MBlaze/MBlazeInstrInfo.cpp b/lib/Target/MBlaze/MBlazeInstrInfo.cpp index db71434443b..b5025fc8ee6 100644 --- a/lib/Target/MBlaze/MBlazeInstrInfo.cpp +++ b/lib/Target/MBlaze/MBlazeInstrInfo.cpp @@ -287,7 +287,7 @@ unsigned MBlazeInstrInfo::getGlobalBaseReg(MachineFunction *MF) const { MachineRegisterInfo &RegInfo = MF->getRegInfo(); const TargetInstrInfo *TII = MF->getTarget().getInstrInfo(); - GlobalBaseReg = RegInfo.createVirtualRegister(MBlaze::GPRRegisterClass); + GlobalBaseReg = RegInfo.createVirtualRegister(&MBlaze::GPRRegClass); BuildMI(FirstMBB, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), GlobalBaseReg).addReg(MBlaze::R20); RegInfo.addLiveIn(MBlaze::R20); diff --git a/lib/Target/MSP430/MSP430ISelLowering.cpp b/lib/Target/MSP430/MSP430ISelLowering.cpp index 071a2f7de2c..f0d3774e3ba 100644 --- a/lib/Target/MSP430/MSP430ISelLowering.cpp +++ b/lib/Target/MSP430/MSP430ISelLowering.cpp @@ -64,8 +64,8 @@ MSP430TargetLowering::MSP430TargetLowering(MSP430TargetMachine &tm) : TD = getTargetData(); // Set up the register classes. - addRegisterClass(MVT::i8, MSP430::GR8RegisterClass); - addRegisterClass(MVT::i16, MSP430::GR16RegisterClass); + addRegisterClass(MVT::i8, &MSP430::GR8RegClass); + addRegisterClass(MVT::i16, &MSP430::GR16RegClass); // Compute derived properties from the register classes computeRegisterProperties(); @@ -226,9 +226,9 @@ getRegForInlineAsmConstraint(const std::string &Constraint, default: break; case 'r': // GENERAL_REGS if (VT == MVT::i8) - return std::make_pair(0U, MSP430::GR8RegisterClass); + return std::make_pair(0U, &MSP430::GR8RegClass); - return std::make_pair(0U, MSP430::GR16RegisterClass); + return std::make_pair(0U, &MSP430::GR16RegClass); } } @@ -330,8 +330,7 @@ MSP430TargetLowering::LowerCCCArguments(SDValue Chain, llvm_unreachable(0); } case MVT::i16: - unsigned VReg = - RegInfo.createVirtualRegister(MSP430::GR16RegisterClass); + unsigned VReg = RegInfo.createVirtualRegister(&MSP430::GR16RegClass); RegInfo.addLiveIn(VA.getLocReg(), VReg); SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, RegVT); @@ -1024,27 +1023,27 @@ MSP430TargetLowering::EmitShiftInstr(MachineInstr *MI, default: llvm_unreachable("Invalid shift opcode!"); case MSP430::Shl8: Opc = MSP430::SHL8r1; - RC = MSP430::GR8RegisterClass; + RC = &MSP430::GR8RegClass; break; case MSP430::Shl16: Opc = MSP430::SHL16r1; - RC = MSP430::GR16RegisterClass; + RC = &MSP430::GR16RegClass; break; case MSP430::Sra8: Opc = MSP430::SAR8r1; - RC = MSP430::GR8RegisterClass; + RC = &MSP430::GR8RegClass; break; case MSP430::Sra16: Opc = MSP430::SAR16r1; - RC = MSP430::GR16RegisterClass; + RC = &MSP430::GR16RegClass; break; case MSP430::Srl8: Opc = MSP430::SAR8r1c; - RC = MSP430::GR8RegisterClass; + RC = &MSP430::GR8RegClass; break; case MSP430::Srl16: Opc = MSP430::SAR16r1c; - RC = MSP430::GR16RegisterClass; + RC = &MSP430::GR16RegClass; break; } @@ -1072,8 +1071,8 @@ MSP430TargetLowering::EmitShiftInstr(MachineInstr *MI, LoopBB->addSuccessor(RemBB); LoopBB->addSuccessor(LoopBB); - unsigned ShiftAmtReg = RI.createVirtualRegister(MSP430::GR8RegisterClass); - unsigned ShiftAmtReg2 = RI.createVirtualRegister(MSP430::GR8RegisterClass); + unsigned ShiftAmtReg = RI.createVirtualRegister(&MSP430::GR8RegClass); + unsigned ShiftAmtReg2 = RI.createVirtualRegister(&MSP430::GR8RegClass); unsigned ShiftReg = RI.createVirtualRegister(RC); unsigned ShiftReg2 = RI.createVirtualRegister(RC); unsigned ShiftAmtSrcReg = MI->getOperand(2).getReg(); diff --git a/lib/Target/Mips/MipsAsmPrinter.cpp b/lib/Target/Mips/MipsAsmPrinter.cpp index 8206cfc1570..33337383a0b 100644 --- a/lib/Target/Mips/MipsAsmPrinter.cpp +++ b/lib/Target/Mips/MipsAsmPrinter.cpp @@ -197,9 +197,9 @@ void MipsAsmPrinter::printSavedRegsBitmask(raw_ostream &O) { const MachineFrameInfo *MFI = MF->getFrameInfo(); const std::vector &CSI = MFI->getCalleeSavedInfo(); // size of stack area to which FP callee-saved regs are saved. - unsigned CPURegSize = Mips::CPURegsRegisterClass->getSize(); - unsigned FGR32RegSize = Mips::FGR32RegisterClass->getSize(); - unsigned AFGR64RegSize = Mips::AFGR64RegisterClass->getSize(); + unsigned CPURegSize = Mips::CPURegsRegClass.getSize(); + unsigned FGR32RegSize = Mips::FGR32RegClass.getSize(); + unsigned AFGR64RegSize = Mips::AFGR64RegClass.getSize(); bool HasAFGR64Reg = false; unsigned CSFPRegsSize = 0; unsigned i, e = CSI.size(); @@ -207,11 +207,11 @@ void MipsAsmPrinter::printSavedRegsBitmask(raw_ostream &O) { // Set FPU Bitmask. for (i = 0; i != e; ++i) { unsigned Reg = CSI[i].getReg(); - if (Mips::CPURegsRegisterClass->contains(Reg)) + if (Mips::CPURegsRegClass.contains(Reg)) break; unsigned RegNum = getMipsRegisterNumbering(Reg); - if (Mips::AFGR64RegisterClass->contains(Reg)) { + if (Mips::AFGR64RegClass.contains(Reg)) { FPUBitmask |= (3 << RegNum); CSFPRegsSize += AFGR64RegSize; HasAFGR64Reg = true; diff --git a/lib/Target/Mips/MipsFrameLowering.cpp b/lib/Target/Mips/MipsFrameLowering.cpp index f8ea3d0321d..63afd8e5eac 100644 --- a/lib/Target/Mips/MipsFrameLowering.cpp +++ b/lib/Target/Mips/MipsFrameLowering.cpp @@ -217,7 +217,7 @@ void MipsFrameLowering::emitPrologue(MachineFunction &MF) const { // If Reg is a double precision register, emit two cfa_offsets, // one for each of the paired single precision registers. - if (Mips::AFGR64RegisterClass->contains(Reg)) { + if (Mips::AFGR64RegClass.contains(Reg)) { const uint16_t *SubRegs = RegInfo->getSubRegisters(Reg); MachineLocation DstML0(MachineLocation::VirtualFP, Offset); MachineLocation DstML1(MachineLocation::VirtualFP, Offset + 4); @@ -229,8 +229,7 @@ void MipsFrameLowering::emitPrologue(MachineFunction &MF) const { Moves.push_back(MachineMove(CSLabel, DstML0, SrcML0)); Moves.push_back(MachineMove(CSLabel, DstML1, SrcML1)); - } - else { + } else { // Reg is either in CPURegs or FGR32. DstML = MachineLocation(MachineLocation::VirtualFP, Offset); SrcML = MachineLocation(Reg); diff --git a/lib/Target/Mips/MipsISelDAGToDAG.cpp b/lib/Target/Mips/MipsISelDAGToDAG.cpp index f0651c61311..6e5bad7f153 100644 --- a/lib/Target/Mips/MipsISelDAGToDAG.cpp +++ b/lib/Target/Mips/MipsISelDAGToDAG.cpp @@ -134,7 +134,8 @@ void MipsDAGToDAGISel::InitGlobalBaseReg(MachineFunction &MF) { else { const TargetRegisterClass *RC; RC = Subtarget.isABI_N64() ? - Mips::CPU64RegsRegisterClass : Mips::CPURegsRegisterClass; + (const TargetRegisterClass*)&Mips::CPU64RegsRegClass : + (const TargetRegisterClass*)&Mips::CPURegsRegClass; V0 = RegInfo.createVirtualRegister(RC); V1 = RegInfo.createVirtualRegister(RC); diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp index 6a23bc3d1d7..50604cc5e55 100644 --- a/lib/Target/Mips/MipsISelLowering.cpp +++ b/lib/Target/Mips/MipsISelLowering.cpp @@ -96,20 +96,20 @@ MipsTargetLowering(MipsTargetMachine &TM) setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? // Set up the register classes - addRegisterClass(MVT::i32, Mips::CPURegsRegisterClass); + addRegisterClass(MVT::i32, &Mips::CPURegsRegClass); if (HasMips64) - addRegisterClass(MVT::i64, Mips::CPU64RegsRegisterClass); + addRegisterClass(MVT::i64, &Mips::CPU64RegsRegClass); if (!TM.Options.UseSoftFloat) { - addRegisterClass(MVT::f32, Mips::FGR32RegisterClass); + addRegisterClass(MVT::f32, &Mips::FGR32RegClass); // When dealing with single precision only, use libcalls if (!Subtarget->isSingleFloat()) { if (HasMips64) - addRegisterClass(MVT::f64, Mips::FGR64RegisterClass); + addRegisterClass(MVT::f64, &Mips::FGR64RegClass); else - addRegisterClass(MVT::f64, Mips::AFGR64RegisterClass); + addRegisterClass(MVT::f64, &Mips::AFGR64RegClass); } } @@ -2666,7 +2666,7 @@ static void ReadByValArg(MachineFunction &MF, SDValue Chain, DebugLoc dl, break; unsigned SrcReg = O32IntRegs[CurWord]; - unsigned Reg = AddLiveIn(MF, SrcReg, Mips::CPURegsRegisterClass); + unsigned Reg = AddLiveIn(MF, SrcReg, &Mips::CPURegsRegClass); SDValue StorePtr = DAG.getNode(ISD::ADD, dl, MVT::i32, FIN, DAG.getConstant(i * 4, MVT::i32)); SDValue Store = DAG.getStore(Chain, dl, DAG.getRegister(Reg, MVT::i32), @@ -2703,7 +2703,7 @@ CopyMips64ByValRegs(MachineFunction &MF, SDValue Chain, DebugLoc dl, // Copy arg registers. for (unsigned I = 0; (Reg != Mips64IntRegs + 8) && (I < NumRegs); ++Reg, ++I) { - unsigned VReg = AddLiveIn(MF, *Reg, Mips::CPU64RegsRegisterClass); + unsigned VReg = AddLiveIn(MF, *Reg, &Mips::CPU64RegsRegClass); SDValue StorePtr = DAG.getNode(ISD::ADD, dl, PtrTy, FIN, DAG.getConstant(I * 8, PtrTy)); SDValue Store = DAG.getStore(Chain, dl, DAG.getRegister(VReg, MVT::i64), @@ -2779,13 +2779,13 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain, const TargetRegisterClass *RC; if (RegVT == MVT::i32) - RC = Mips::CPURegsRegisterClass; + RC = &Mips::CPURegsRegClass; else if (RegVT == MVT::i64) - RC = Mips::CPU64RegsRegisterClass; + RC = &Mips::CPU64RegsRegClass; else if (RegVT == MVT::f32) - RC = Mips::FGR32RegisterClass; + RC = &Mips::FGR32RegClass; else if (RegVT == MVT::f64) - RC = HasMips64 ? Mips::FGR64RegisterClass : Mips::AFGR64RegisterClass; + RC = HasMips64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass; else llvm_unreachable("RegVT not supported by FormalArguments Lowering"); @@ -2859,8 +2859,9 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain, const uint16_t *ArgRegs = IsO32 ? O32IntRegs : Mips64IntRegs; unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs, NumOfRegs); int FirstRegSlotOffset = IsO32 ? 0 : -64 ; // offset of $a0's slot. - const TargetRegisterClass *RC - = IsO32 ? Mips::CPURegsRegisterClass : Mips::CPU64RegsRegisterClass; + const TargetRegisterClass *RC = IsO32 ? + (const TargetRegisterClass*)&Mips::CPURegsRegClass : + (const TargetRegisterClass*)&Mips::CPU64RegsRegClass; unsigned RegSize = RC->getSize(); int RegSlotOffset = FirstRegSlotOffset + Idx * RegSize; @@ -3049,17 +3050,16 @@ getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const case 'y': // Same as 'r'. Exists for compatibility. case 'r': if (VT == MVT::i32) - return std::make_pair(0U, Mips::CPURegsRegisterClass); + return std::make_pair(0U, &Mips::CPURegsRegClass); assert(VT == MVT::i64 && "Unexpected type."); - return std::make_pair(0U, Mips::CPU64RegsRegisterClass); + return std::make_pair(0U, &Mips::CPU64RegsRegClass); case 'f': if (VT == MVT::f32) - return std::make_pair(0U, Mips::FGR32RegisterClass); + return std::make_pair(0U, &Mips::FGR32RegClass); if ((VT == MVT::f64) && (!Subtarget->isSingleFloat())) { if (Subtarget->isFP64bit()) - return std::make_pair(0U, Mips::FGR64RegisterClass); - else - return std::make_pair(0U, Mips::AFGR64RegisterClass); + return std::make_pair(0U, &Mips::FGR64RegClass); + return std::make_pair(0U, &Mips::AFGR64RegClass); } } } diff --git a/lib/Target/Mips/MipsInstrInfo.cpp b/lib/Target/Mips/MipsInstrInfo.cpp index a3a18bff655..7578b17492e 100644 --- a/lib/Target/Mips/MipsInstrInfo.cpp +++ b/lib/Target/Mips/MipsInstrInfo.cpp @@ -189,15 +189,15 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, unsigned Opc = 0; - if (RC == Mips::CPURegsRegisterClass) + if (RC == &Mips::CPURegsRegClass) Opc = IsN64 ? Mips::SW_P8 : Mips::SW; - else if (RC == Mips::CPU64RegsRegisterClass) + else if (RC == &Mips::CPU64RegsRegClass) Opc = IsN64 ? Mips::SD_P8 : Mips::SD; - else if (RC == Mips::FGR32RegisterClass) + else if (RC == &Mips::FGR32RegClass) Opc = IsN64 ? Mips::SWC1_P8 : Mips::SWC1; - else if (RC == Mips::AFGR64RegisterClass) + else if (RC == &Mips::AFGR64RegClass) Opc = Mips::SDC1; - else if (RC == Mips::FGR64RegisterClass) + else if (RC == &Mips::FGR64RegClass) Opc = IsN64 ? Mips::SDC164_P8 : Mips::SDC164; assert(Opc && "Register class not handled!"); @@ -216,15 +216,15 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOLoad); unsigned Opc = 0; - if (RC == Mips::CPURegsRegisterClass) + if (RC == &Mips::CPURegsRegClass) Opc = IsN64 ? Mips::LW_P8 : Mips::LW; - else if (RC == Mips::CPU64RegsRegisterClass) + else if (RC == &Mips::CPU64RegsRegClass) Opc = IsN64 ? Mips::LD_P8 : Mips::LD; - else if (RC == Mips::FGR32RegisterClass) + else if (RC == &Mips::FGR32RegClass) Opc = IsN64 ? Mips::LWC1_P8 : Mips::LWC1; - else if (RC == Mips::AFGR64RegisterClass) + else if (RC == &Mips::AFGR64RegClass) Opc = Mips::LDC1; - else if (RC == Mips::FGR64RegisterClass) + else if (RC == &Mips::FGR64RegClass) Opc = IsN64 ? Mips::LDC164_P8 : Mips::LDC164; assert(Opc && "Register class not handled!"); diff --git a/lib/Target/Mips/MipsMachineFunction.cpp b/lib/Target/Mips/MipsMachineFunction.cpp index b00c62b09f4..9d3a27b518f 100644 --- a/lib/Target/Mips/MipsMachineFunction.cpp +++ b/lib/Target/Mips/MipsMachineFunction.cpp @@ -40,9 +40,9 @@ unsigned MipsFunctionInfo::getGlobalBaseReg() { if (FixGlobalBaseReg) // $gp is the global base register. return GlobalBaseReg = ST.isABI_N64() ? Mips::GP_64 : Mips::GP; - const TargetRegisterClass *RC; - RC = ST.isABI_N64() ? - Mips::CPU64RegsRegisterClass : Mips::CPURegsRegisterClass; + const TargetRegisterClass *RC = ST.isABI_N64() ? + (const TargetRegisterClass*)&Mips::CPU64RegsRegClass : + (const TargetRegisterClass*)&Mips::CPURegsRegClass; return GlobalBaseReg = MF.getRegInfo().createVirtualRegister(RC); } diff --git a/lib/Target/Mips/MipsRegisterInfo.cpp b/lib/Target/Mips/MipsRegisterInfo.cpp index f30de449f6d..67f2834ab68 100644 --- a/lib/Target/Mips/MipsRegisterInfo.cpp +++ b/lib/Target/Mips/MipsRegisterInfo.cpp @@ -94,7 +94,7 @@ getReservedRegs(const MachineFunction &MF) const { }; BitVector Reserved(getNumRegs()); - typedef TargetRegisterClass::iterator RegIter; + typedef TargetRegisterClass::const_iterator RegIter; for (unsigned I = 0; I < array_lengthof(ReservedCPURegs); ++I) Reserved.set(ReservedCPURegs[I]); @@ -104,18 +104,17 @@ getReservedRegs(const MachineFunction &MF) const { Reserved.set(ReservedCPU64Regs[I]); // Reserve all registers in AFGR64. - for (RegIter Reg = Mips::AFGR64RegisterClass->begin(); - Reg != Mips::AFGR64RegisterClass->end(); ++Reg) + for (RegIter Reg = Mips::AFGR64RegClass.begin(), + EReg = Mips::AFGR64RegClass.end(); Reg != EReg; ++Reg) Reserved.set(*Reg); - } - else { + } else { // Reserve all registers in CPU64Regs & FGR64. - for (RegIter Reg = Mips::CPU64RegsRegisterClass->begin(); - Reg != Mips::CPU64RegsRegisterClass->end(); ++Reg) + for (RegIter Reg = Mips::CPU64RegsRegClass.begin(), + EReg = Mips::CPU64RegsRegClass.end(); Reg != EReg; ++Reg) Reserved.set(*Reg); - for (RegIter Reg = Mips::FGR64RegisterClass->begin(); - Reg != Mips::FGR64RegisterClass->end(); ++Reg) + for (RegIter Reg = Mips::FGR64RegClass.begin(), + EReg = Mips::FGR64RegClass.end(); Reg != EReg; ++Reg) Reserved.set(*Reg); } diff --git a/lib/Target/PTX/PTXISelLowering.cpp b/lib/Target/PTX/PTXISelLowering.cpp index ef4455b96bc..4d5e9bf1a60 100644 --- a/lib/Target/PTX/PTXISelLowering.cpp +++ b/lib/Target/PTX/PTXISelLowering.cpp @@ -36,12 +36,12 @@ using namespace llvm; PTXTargetLowering::PTXTargetLowering(TargetMachine &TM) : TargetLowering(TM, new TargetLoweringObjectFileELF()) { // Set up the register classes. - addRegisterClass(MVT::i1, PTX::RegPredRegisterClass); - addRegisterClass(MVT::i16, PTX::RegI16RegisterClass); - addRegisterClass(MVT::i32, PTX::RegI32RegisterClass); - addRegisterClass(MVT::i64, PTX::RegI64RegisterClass); - addRegisterClass(MVT::f32, PTX::RegF32RegisterClass); - addRegisterClass(MVT::f64, PTX::RegF64RegisterClass); + addRegisterClass(MVT::i1, &PTX::RegPredRegClass); + addRegisterClass(MVT::i16, &PTX::RegI16RegClass); + addRegisterClass(MVT::i32, &PTX::RegI32RegClass); + addRegisterClass(MVT::i64, &PTX::RegI64RegClass); + addRegisterClass(MVT::f32, &PTX::RegF32RegClass); + addRegisterClass(MVT::f64, &PTX::RegF64RegClass); setBooleanContents(ZeroOrOneBooleanContent); setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? @@ -328,36 +328,30 @@ SDValue PTXTargetLowering:: } } else { for (unsigned i = 0, e = Outs.size(); i != e; ++i) { - EVT RegVT = Outs[i].VT; + EVT RegVT = Outs[i].VT; const TargetRegisterClass* TRC; - unsigned RegType; + unsigned RegType; // Determine which register class we need if (RegVT == MVT::i1) { - TRC = PTX::RegPredRegisterClass; + TRC = &PTX::RegPredRegClass; RegType = PTXRegisterType::Pred; - } - else if (RegVT == MVT::i16) { - TRC = PTX::RegI16RegisterClass; + } else if (RegVT == MVT::i16) { + TRC = &PTX::RegI16RegClass; RegType = PTXRegisterType::B16; - } - else if (RegVT == MVT::i32) { - TRC = PTX::RegI32RegisterClass; + } else if (RegVT == MVT::i32) { + TRC = &PTX::RegI32RegClass; RegType = PTXRegisterType::B32; - } - else if (RegVT == MVT::i64) { - TRC = PTX::RegI64RegisterClass; + } else if (RegVT == MVT::i64) { + TRC = &PTX::RegI64RegClass; RegType = PTXRegisterType::B64; - } - else if (RegVT == MVT::f32) { - TRC = PTX::RegF32RegisterClass; + } else if (RegVT == MVT::f32) { + TRC = &PTX::RegF32RegClass; RegType = PTXRegisterType::F32; - } - else if (RegVT == MVT::f64) { - TRC = PTX::RegF64RegisterClass; + } else if (RegVT == MVT::f64) { + TRC = &PTX::RegF64RegClass; RegType = PTXRegisterType::F64; - } - else { + } else { llvm_unreachable("Unknown parameter type"); } diff --git a/lib/Target/PTX/PTXMFInfoExtract.cpp b/lib/Target/PTX/PTXMFInfoExtract.cpp index 172a0e03135..f1676ca384c 100644 --- a/lib/Target/PTX/PTXMFInfoExtract.cpp +++ b/lib/Target/PTX/PTXMFInfoExtract.cpp @@ -59,17 +59,17 @@ bool PTXMFInfoExtract::runOnMachineFunction(MachineFunction &MF) { unsigned Reg = TargetRegisterInfo::index2VirtReg(i); const TargetRegisterClass *TRC = MRI.getRegClass(Reg); unsigned RegType; - if (TRC == PTX::RegPredRegisterClass) + if (TRC == &PTX::RegPredRegClass) RegType = PTXRegisterType::Pred; - else if (TRC == PTX::RegI16RegisterClass) + else if (TRC == &PTX::RegI16RegClass) RegType = PTXRegisterType::B16; - else if (TRC == PTX::RegI32RegisterClass) + else if (TRC == &PTX::RegI32RegClass) RegType = PTXRegisterType::B32; - else if (TRC == PTX::RegI64RegisterClass) + else if (TRC == &PTX::RegI64RegClass) RegType = PTXRegisterType::B64; - else if (TRC == PTX::RegF32RegisterClass) + else if (TRC == &PTX::RegF32RegClass) RegType = PTXRegisterType::F32; - else if (TRC == PTX::RegF64RegisterClass) + else if (TRC == &PTX::RegF64RegClass) RegType = PTXRegisterType::F64; else llvm_unreachable("Unkown register class."); diff --git a/lib/Target/XCore/XCoreFrameLowering.cpp b/lib/Target/XCore/XCoreFrameLowering.cpp index 50fda58cf57..1e82abf381d 100644 --- a/lib/Target/XCore/XCoreFrameLowering.cpp +++ b/lib/Target/XCore/XCoreFrameLowering.cpp @@ -341,7 +341,7 @@ XCoreFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, MachineFrameInfo *MFI = MF.getFrameInfo(); const TargetRegisterInfo *RegInfo = MF.getTarget().getRegisterInfo(); bool LRUsed = MF.getRegInfo().isPhysRegUsed(XCore::LR); - const TargetRegisterClass *RC = XCore::GRRegsRegisterClass; + const TargetRegisterClass *RC = &XCore::GRRegsRegClass; XCoreFunctionInfo *XFI = MF.getInfo(); if (LRUsed) { MF.getRegInfo().setPhysRegUnused(XCore::LR); diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp index fdf2b783241..ee4ba10912d 100644 --- a/lib/Target/XCore/XCoreISelLowering.cpp +++ b/lib/Target/XCore/XCoreISelLowering.cpp @@ -66,7 +66,7 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM) Subtarget(*XTM.getSubtargetImpl()) { // Set up the register classes. - addRegisterClass(MVT::i32, XCore::GRRegsRegisterClass); + addRegisterClass(MVT::i32, &XCore::GRRegsRegClass); // Compute derived properties from the register classes computeRegisterProperties(); @@ -1121,8 +1121,7 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain, llvm_unreachable(0); } case MVT::i32: - unsigned VReg = RegInfo.createVirtualRegister( - XCore::GRRegsRegisterClass); + unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass); RegInfo.addLiveIn(VA.getLocReg(), VReg); InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT)); } @@ -1172,8 +1171,7 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain, offset -= StackSlotSize; SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); // Move argument from phys reg -> virt reg - unsigned VReg = RegInfo.createVirtualRegister( - XCore::GRRegsRegisterClass); + unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass); RegInfo.addLiveIn(ArgRegs[i], VReg); SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); // Move argument from virt reg -> stack @@ -1611,7 +1609,7 @@ getRegForInlineAsmConstraint(const std::string &Constraint, switch (Constraint[0]) { default : break; case 'r': - return std::make_pair(0U, XCore::GRRegsRegisterClass); + return std::make_pair(0U, &XCore::GRRegsRegClass); } } // Use the default implementation in TargetLowering to convert the register diff --git a/lib/Target/XCore/XCoreRegisterInfo.cpp b/lib/Target/XCore/XCoreRegisterInfo.cpp index f3b4b4c4f88..6812f8f56da 100644 --- a/lib/Target/XCore/XCoreRegisterInfo.cpp +++ b/lib/Target/XCore/XCoreRegisterInfo.cpp @@ -205,8 +205,7 @@ XCoreRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, unsigned Reg = MI.getOperand(0).getReg(); bool isKill = MI.getOpcode() == XCore::STWFI && MI.getOperand(0).isKill(); - assert(XCore::GRRegsRegisterClass->contains(Reg) && - "Unexpected register operand"); + assert(XCore::GRRegsRegClass.contains(Reg) && "Unexpected register operand"); MachineBasicBlock &MBB = *MI.getParent(); @@ -217,7 +216,7 @@ XCoreRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, if (!RS) report_fatal_error("eliminateFrameIndex Frame size too big: " + Twine(Offset)); - unsigned ScratchReg = RS->scavengeRegister(XCore::GRRegsRegisterClass, II, + unsigned ScratchReg = RS->scavengeRegister(&XCore::GRRegsRegClass, II, SPAdj); loadConstant(MBB, II, ScratchReg, Offset, dl); switch (MI.getOpcode()) { -- 2.34.1