From: NAKAMURA Takumi Date: Tue, 22 Sep 2015 11:13:55 +0000 (+0000) Subject: Reformat. X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=commitdiff_plain;h=d4cdf1962b2242fb155ee2203199dd043725e7e1 Reformat. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@248261 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/include/llvm/IR/Constant.h b/include/llvm/IR/Constant.h index 019b4343a13..f33abbf62da 100644 --- a/include/llvm/IR/Constant.h +++ b/include/llvm/IR/Constant.h @@ -159,8 +159,8 @@ public: /// getIntegerValue - Return the value for an integer or pointer constant, /// or a vector thereof, with the given scalar value. - static Constant *getIntegerValue(Type* Ty, const APInt &V); - + static Constant *getIntegerValue(Type *Ty, const APInt &V); + /// removeDeadConstantUsers - If there are any dead constant users dangling /// off of this constant, remove them. This method is useful for clients /// that want to check to see if a global is unused, but don't want to deal diff --git a/include/llvm/IR/DerivedTypes.h b/include/llvm/IR/DerivedTypes.h index d28847e4ff3..694354ee3b2 100644 --- a/include/llvm/IR/DerivedTypes.h +++ b/include/llvm/IR/DerivedTypes.h @@ -147,9 +147,9 @@ static_assert(AlignOf::Alignment >= AlignOf::Alignment, /// and VectorType. class CompositeType : public Type { protected: - explicit CompositeType(LLVMContext &C, TypeID tid) : Type(C, tid) { } -public: + explicit CompositeType(LLVMContext &C, TypeID tid) : Type(C, tid) {} +public: /// getTypeAtIndex - Given an index value into the type, return the type of /// the element. /// @@ -212,16 +212,13 @@ public: /// StructType::create - This creates an identified struct. static StructType *create(LLVMContext &Context, StringRef Name); static StructType *create(LLVMContext &Context); - - static StructType *create(ArrayRef Elements, - StringRef Name, - bool isPacked = false); - static StructType *create(ArrayRef Elements); - static StructType *create(LLVMContext &Context, - ArrayRef Elements, - StringRef Name, + + static StructType *create(ArrayRef Elements, StringRef Name, bool isPacked = false); - static StructType *create(LLVMContext &Context, ArrayRef Elements); + static StructType *create(ArrayRef Elements); + static StructType *create(LLVMContext &Context, ArrayRef Elements, + StringRef Name, bool isPacked = false); + static StructType *create(LLVMContext &Context, ArrayRef Elements); static StructType *create(StringRef Name, Type *elt1, ...) LLVM_END_WITH_NULL; /// StructType::get - This static method is the primary way to create a @@ -250,8 +247,8 @@ public: bool isOpaque() const { return (getSubclassData() & SCDB_HasBody) == 0; } /// isSized - Return true if this is a sized type. - bool isSized(SmallPtrSetImpl *Visited = nullptr) const; - + bool isSized(SmallPtrSetImpl *Visited = nullptr) const; + /// hasName - Return true if this is a named struct that has a non-empty name. bool hasName() const { return SymbolTableEntry != nullptr; } diff --git a/include/llvm/IR/Function.h b/include/llvm/IR/Function.h index 0c507e4b972..ba334d65313 100644 --- a/include/llvm/IR/Function.h +++ b/include/llvm/IR/Function.h @@ -300,10 +300,8 @@ public: return AttributeSets.hasAttribute(AttributeSet::FunctionIndex, Attribute::ArgMemOnly); } - void setOnlyAccessesArgMemory() { - addFnAttr(Attribute::ArgMemOnly); - } - + void setOnlyAccessesArgMemory() { addFnAttr(Attribute::ArgMemOnly); } + /// @brief Determine if the function cannot return. bool doesNotReturn() const { return AttributeSets.hasAttribute(AttributeSet::FunctionIndex, @@ -397,10 +395,8 @@ public: } /// Optimize this function for minimum size (-Oz). - bool optForMinSize() const { - return hasFnAttribute(Attribute::MinSize); - }; - + bool optForMinSize() const { return hasFnAttribute(Attribute::MinSize); }; + /// Optimize this function for size (-Os) or minimum size (-Oz). bool optForSize() const { return hasFnAttribute(Attribute::OptimizeForSize) || optForMinSize(); diff --git a/include/llvm/IR/IRBuilder.h b/include/llvm/IR/IRBuilder.h index d46514614e4..5343d9fe358 100644 --- a/include/llvm/IR/IRBuilder.h +++ b/include/llvm/IR/IRBuilder.h @@ -314,10 +314,8 @@ public: } /// \brief Fetch the type representing a 128-bit integer. - IntegerType *getInt128Ty() { - return Type::getInt128Ty(Context); - } - + IntegerType *getInt128Ty() { return Type::getInt128Ty(Context); } + /// \brief Fetch the type representing an N-bit integer. IntegerType *getIntNTy(unsigned N) { return Type::getIntNTy(Context, N); diff --git a/include/llvm/IR/InstrTypes.h b/include/llvm/IR/InstrTypes.h index d84e66c50a3..bd45144c169 100644 --- a/include/llvm/IR/InstrTypes.h +++ b/include/llvm/IR/InstrTypes.h @@ -441,29 +441,29 @@ public: BO->setIsExact(true); return BO; } - -#define DEFINE_HELPERS(OPC, NUWNSWEXACT) \ - static BinaryOperator *Create ## NUWNSWEXACT ## OPC \ - (Value *V1, Value *V2, const Twine &Name = "") { \ - return Create ## NUWNSWEXACT(Instruction::OPC, V1, V2, Name); \ - } \ - static BinaryOperator *Create ## NUWNSWEXACT ## OPC \ - (Value *V1, Value *V2, const Twine &Name, BasicBlock *BB) { \ - return Create ## NUWNSWEXACT(Instruction::OPC, V1, V2, Name, BB); \ - } \ - static BinaryOperator *Create ## NUWNSWEXACT ## OPC \ - (Value *V1, Value *V2, const Twine &Name, Instruction *I) { \ - return Create ## NUWNSWEXACT(Instruction::OPC, V1, V2, Name, I); \ + +#define DEFINE_HELPERS(OPC, NUWNSWEXACT) \ + static BinaryOperator *Create##NUWNSWEXACT##OPC(Value *V1, Value *V2, \ + const Twine &Name = "") { \ + return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name); \ + } \ + static BinaryOperator *Create##NUWNSWEXACT##OPC( \ + Value *V1, Value *V2, const Twine &Name, BasicBlock *BB) { \ + return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name, BB); \ + } \ + static BinaryOperator *Create##NUWNSWEXACT##OPC( \ + Value *V1, Value *V2, const Twine &Name, Instruction *I) { \ + return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name, I); \ } - - DEFINE_HELPERS(Add, NSW) // CreateNSWAdd - DEFINE_HELPERS(Add, NUW) // CreateNUWAdd - DEFINE_HELPERS(Sub, NSW) // CreateNSWSub - DEFINE_HELPERS(Sub, NUW) // CreateNUWSub - DEFINE_HELPERS(Mul, NSW) // CreateNSWMul - DEFINE_HELPERS(Mul, NUW) // CreateNUWMul - DEFINE_HELPERS(Shl, NSW) // CreateNSWShl - DEFINE_HELPERS(Shl, NUW) // CreateNUWShl + + DEFINE_HELPERS(Add, NSW) // CreateNSWAdd + DEFINE_HELPERS(Add, NUW) // CreateNUWAdd + DEFINE_HELPERS(Sub, NSW) // CreateNSWSub + DEFINE_HELPERS(Sub, NUW) // CreateNUWSub + DEFINE_HELPERS(Mul, NSW) // CreateNSWMul + DEFINE_HELPERS(Mul, NUW) // CreateNUWMul + DEFINE_HELPERS(Shl, NSW) // CreateNSWShl + DEFINE_HELPERS(Shl, NUW) // CreateNUWShl DEFINE_HELPERS(SDiv, Exact) // CreateExactSDiv DEFINE_HELPERS(UDiv, Exact) // CreateExactUDiv diff --git a/include/llvm/IR/Instructions.h b/include/llvm/IR/Instructions.h index 1497231cbe0..825f4cace88 100644 --- a/include/llvm/IR/Instructions.h +++ b/include/llvm/IR/Instructions.h @@ -1689,10 +1689,9 @@ public: static inline bool classof(const Value *V) { return isa(V) && classof(cast(V)); } -private: - template - bool hasFnAttrImpl(AttrKind A) const { +private: + template bool hasFnAttrImpl(AttrKind A) const { if (AttributeList.hasAttribute(AttributeSet::FunctionIndex, A)) return true; if (const Function *F = getCalledFunction()) @@ -1924,8 +1923,9 @@ class InsertElementInst : public Instruction { InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr = "", Instruction *InsertBefore = nullptr); - InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, - const Twine &NameStr, BasicBlock *InsertAtEnd); + InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr, + BasicBlock *InsertAtEnd); + protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; @@ -2081,9 +2081,8 @@ class ExtractValueInst : public UnaryInstruction { const Twine &NameStr, BasicBlock *InsertAtEnd); // allocate space for exactly one operand - void *operator new(size_t s) { - return User::operator new(s, 1); - } + void *operator new(size_t s) { return User::operator new(s, 1); } + protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; @@ -2195,11 +2194,12 @@ class InsertValueInst : public Instruction { /// Constructors - These two constructors are convenience methods because one /// and two index insertvalue instructions are so common. - InsertValueInst(Value *Agg, Value *Val, - unsigned Idx, const Twine &NameStr = "", - Instruction *InsertBefore = nullptr); InsertValueInst(Value *Agg, Value *Val, unsigned Idx, - const Twine &NameStr, BasicBlock *InsertAtEnd); + const Twine &NameStr = "", + Instruction *InsertBefore = nullptr); + InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr, + BasicBlock *InsertAtEnd); + protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; @@ -2930,8 +2930,7 @@ public: typedef CaseIteratorT ParentTy; public: - - CaseIt(const ParentTy& Src) : ParentTy(Src) {} + CaseIt(const ParentTy &Src) : ParentTy(Src) {} CaseIt(SwitchInst *SI, unsigned CaseNum) : ParentTy(SI, CaseNum) {} /// Sets the new value for current case. diff --git a/include/llvm/IR/Module.h b/include/llvm/IR/Module.h index 4d9c266fdb7..b0bedf6ad1f 100644 --- a/include/llvm/IR/Module.h +++ b/include/llvm/IR/Module.h @@ -61,8 +61,11 @@ template<> struct ilist_traits static void destroySentinel(GlobalVariable*) {} GlobalVariable *provideInitialHead() const { return createSentinel(); } - GlobalVariable *ensureHead(GlobalVariable*) const { return createSentinel(); } - static void noteHead(GlobalVariable*, GlobalVariable*) {} + GlobalVariable *ensureHead(GlobalVariable *) const { + return createSentinel(); + } + static void noteHead(GlobalVariable *, GlobalVariable *) {} + private: mutable ilist_node Sentinel; }; @@ -76,8 +79,9 @@ template<> struct ilist_traits static void destroySentinel(GlobalAlias*) {} GlobalAlias *provideInitialHead() const { return createSentinel(); } - GlobalAlias *ensureHead(GlobalAlias*) const { return createSentinel(); } - static void noteHead(GlobalAlias*, GlobalAlias*) {} + GlobalAlias *ensureHead(GlobalAlias *) const { return createSentinel(); } + static void noteHead(GlobalAlias *, GlobalAlias *) {} + private: mutable ilist_node Sentinel; }; diff --git a/include/llvm/IR/SymbolTableListTraits.h b/include/llvm/IR/SymbolTableListTraits.h index 0a5149c3d93..9a7f28d0d46 100644 --- a/include/llvm/IR/SymbolTableListTraits.h +++ b/include/llvm/IR/SymbolTableListTraits.h @@ -29,10 +29,10 @@ namespace llvm { class ValueSymbolTable; - -template class ilist_iterator; -template class iplist; -template struct ilist_traits; + +template class ilist_iterator; +template class iplist; +template struct ilist_traits; // ValueSubClass - The type of objects that I hold, e.g. Instruction. // ItemParentClass - The type of object that owns the list, e.g. BasicBlock. diff --git a/include/llvm/IR/Type.h b/include/llvm/IR/Type.h index adfa5f25a22..8979f1dbd14 100644 --- a/include/llvm/IR/Type.h +++ b/include/llvm/IR/Type.h @@ -438,8 +438,7 @@ template <> struct isa_impl { // Provide specializations of GraphTraits to be able to treat a type as a // graph of sub types. - -template <> struct GraphTraits { +template <> struct GraphTraits { typedef Type NodeType; typedef Type::subtype_iterator ChildIteratorType; diff --git a/lib/Analysis/DemandedBits.cpp b/lib/Analysis/DemandedBits.cpp index d2eca03a3c6..7ca188145c0 100644 --- a/lib/Analysis/DemandedBits.cpp +++ b/lib/Analysis/DemandedBits.cpp @@ -55,8 +55,7 @@ DemandedBits::DemandedBits() : FunctionPass(ID) { initializeDemandedBitsPass(*PassRegistry::getPassRegistry()); } - -void DemandedBits::getAnalysisUsage(AnalysisUsage& AU) const { +void DemandedBits::getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesCFG(); AU.addRequired(); AU.addRequired(); diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 88ace8f5051..4e1c19b9d27 100644 --- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -2115,9 +2115,8 @@ bool TargetLowering::isGAPlusOffset(SDNode *N, const GlobalValue *&GA, return false; } - -SDValue TargetLowering:: -PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { +SDValue TargetLowering::PerformDAGCombine(SDNode *N, + DAGCombinerInfo &DCI) const { // Default implementation: no optimization. return SDValue(); } diff --git a/lib/Object/MachOObjectFile.cpp b/lib/Object/MachOObjectFile.cpp index 571ed7a14fb..c053313d293 100644 --- a/lib/Object/MachOObjectFile.cpp +++ b/lib/Object/MachOObjectFile.cpp @@ -1208,8 +1208,8 @@ dice_iterator MachOObjectFile::end_dices() const { return dice_iterator(DiceRef(DRI, this)); } -ExportEntry::ExportEntry(ArrayRef T) - : Trie(T), Malformed(false), Done(false) { } +ExportEntry::ExportEntry(ArrayRef T) + : Trie(T), Malformed(false), Done(false) {} void ExportEntry::moveToFirst() { pushNode(0); @@ -1277,11 +1277,10 @@ uint32_t ExportEntry::nodeOffset() const { return Stack.back().Start - Trie.begin(); } -ExportEntry::NodeState::NodeState(const uint8_t *Ptr) - : Start(Ptr), Current(Ptr), Flags(0), Address(0), Other(0), - ImportName(nullptr), ChildCount(0), NextChildIndex(0), - ParentStringLength(0), IsExportNode(false) { -} +ExportEntry::NodeState::NodeState(const uint8_t *Ptr) + : Start(Ptr), Current(Ptr), Flags(0), Address(0), Other(0), + ImportName(nullptr), ChildCount(0), NextChildIndex(0), + ParentStringLength(0), IsExportNode(false) {} void ExportEntry::pushNode(uint64_t offset) { const uint8_t *Ptr = Trie.begin() + offset; @@ -1559,9 +1558,7 @@ iterator_range MachOObjectFile::rebaseTable() const { return rebaseTable(getDyldInfoRebaseOpcodes(), is64Bit()); } - -MachOBindEntry::MachOBindEntry(ArrayRef Bytes, bool is64Bit, - Kind BK) +MachOBindEntry::MachOBindEntry(ArrayRef Bytes, bool is64Bit, Kind BK) : Opcodes(Bytes), Ptr(Bytes.begin()), SegmentOffset(0), SegmentIndex(0), Ordinal(0), Flags(0), Addend(0), RemainingLoopCount(0), AdvanceAmount(0), BindType(0), PointerSize(is64Bit ? 8 : 4), @@ -2206,10 +2203,10 @@ ArrayRef MachOObjectFile::getDyldInfoRebaseOpcodes() const { if (!DyldInfoLoadCmd) return None; - MachO::dyld_info_command DyldInfo - = getStruct(this, DyldInfoLoadCmd); - const uint8_t *Ptr = reinterpret_cast( - getPtr(this, DyldInfo.rebase_off)); + MachO::dyld_info_command DyldInfo = + getStruct(this, DyldInfoLoadCmd); + const uint8_t *Ptr = + reinterpret_cast(getPtr(this, DyldInfo.rebase_off)); return makeArrayRef(Ptr, DyldInfo.rebase_size); } @@ -2217,10 +2214,10 @@ ArrayRef MachOObjectFile::getDyldInfoBindOpcodes() const { if (!DyldInfoLoadCmd) return None; - MachO::dyld_info_command DyldInfo - = getStruct(this, DyldInfoLoadCmd); - const uint8_t *Ptr = reinterpret_cast( - getPtr(this, DyldInfo.bind_off)); + MachO::dyld_info_command DyldInfo = + getStruct(this, DyldInfoLoadCmd); + const uint8_t *Ptr = + reinterpret_cast(getPtr(this, DyldInfo.bind_off)); return makeArrayRef(Ptr, DyldInfo.bind_size); } @@ -2228,10 +2225,10 @@ ArrayRef MachOObjectFile::getDyldInfoWeakBindOpcodes() const { if (!DyldInfoLoadCmd) return None; - MachO::dyld_info_command DyldInfo - = getStruct(this, DyldInfoLoadCmd); - const uint8_t *Ptr = reinterpret_cast( - getPtr(this, DyldInfo.weak_bind_off)); + MachO::dyld_info_command DyldInfo = + getStruct(this, DyldInfoLoadCmd); + const uint8_t *Ptr = + reinterpret_cast(getPtr(this, DyldInfo.weak_bind_off)); return makeArrayRef(Ptr, DyldInfo.weak_bind_size); } @@ -2239,10 +2236,10 @@ ArrayRef MachOObjectFile::getDyldInfoLazyBindOpcodes() const { if (!DyldInfoLoadCmd) return None; - MachO::dyld_info_command DyldInfo - = getStruct(this, DyldInfoLoadCmd); - const uint8_t *Ptr = reinterpret_cast( - getPtr(this, DyldInfo.lazy_bind_off)); + MachO::dyld_info_command DyldInfo = + getStruct(this, DyldInfoLoadCmd); + const uint8_t *Ptr = + reinterpret_cast(getPtr(this, DyldInfo.lazy_bind_off)); return makeArrayRef(Ptr, DyldInfo.lazy_bind_size); } @@ -2250,10 +2247,10 @@ ArrayRef MachOObjectFile::getDyldInfoExportsTrie() const { if (!DyldInfoLoadCmd) return None; - MachO::dyld_info_command DyldInfo - = getStruct(this, DyldInfoLoadCmd); - const uint8_t *Ptr = reinterpret_cast( - getPtr(this, DyldInfo.export_off)); + MachO::dyld_info_command DyldInfo = + getStruct(this, DyldInfoLoadCmd); + const uint8_t *Ptr = + reinterpret_cast(getPtr(this, DyldInfo.export_off)); return makeArrayRef(Ptr, DyldInfo.export_size); } diff --git a/lib/Target/ARM/ARMTargetMachine.cpp b/lib/Target/ARM/ARMTargetMachine.cpp index 3b8a2e7737d..4fa4a08d985 100644 --- a/lib/Target/ARM/ARMTargetMachine.cpp +++ b/lib/Target/ARM/ARMTargetMachine.cpp @@ -230,8 +230,7 @@ TargetIRAnalysis ARMBaseTargetMachine::getTargetIRAnalysis() { }); } - -void ARMTargetMachine::anchor() { } +void ARMTargetMachine::anchor() {} ARMTargetMachine::ARMTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, @@ -245,7 +244,7 @@ ARMTargetMachine::ARMTargetMachine(const Target &T, const Triple &TT, "support ARM mode execution!"); } -void ARMLETargetMachine::anchor() { } +void ARMLETargetMachine::anchor() {} ARMLETargetMachine::ARMLETargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, @@ -254,7 +253,7 @@ ARMLETargetMachine::ARMLETargetMachine(const Target &T, const Triple &TT, CodeGenOpt::Level OL) : ARMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {} -void ARMBETargetMachine::anchor() { } +void ARMBETargetMachine::anchor() {} ARMBETargetMachine::ARMBETargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, @@ -263,7 +262,7 @@ ARMBETargetMachine::ARMBETargetMachine(const Target &T, const Triple &TT, CodeGenOpt::Level OL) : ARMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {} -void ThumbTargetMachine::anchor() { } +void ThumbTargetMachine::anchor() {} ThumbTargetMachine::ThumbTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, @@ -274,7 +273,7 @@ ThumbTargetMachine::ThumbTargetMachine(const Target &T, const Triple &TT, initAsmInfo(); } -void ThumbLETargetMachine::anchor() { } +void ThumbLETargetMachine::anchor() {} ThumbLETargetMachine::ThumbLETargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, @@ -283,7 +282,7 @@ ThumbLETargetMachine::ThumbLETargetMachine(const Target &T, const Triple &TT, CodeGenOpt::Level OL) : ThumbTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {} -void ThumbBETargetMachine::anchor() { } +void ThumbBETargetMachine::anchor() {} ThumbBETargetMachine::ThumbBETargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, diff --git a/lib/Target/PowerPC/PPCAsmPrinter.cpp b/lib/Target/PowerPC/PPCAsmPrinter.cpp index dd432bcbae5..e5531b8dc81 100644 --- a/lib/Target/PowerPC/PPCAsmPrinter.cpp +++ b/lib/Target/PowerPC/PPCAsmPrinter.cpp @@ -65,19 +65,20 @@ using namespace llvm; #define DEBUG_TYPE "asmprinter" namespace { - class PPCAsmPrinter : public AsmPrinter { - protected: - MapVector TOC; - const PPCSubtarget *Subtarget; - StackMaps SM; - public: - explicit PPCAsmPrinter(TargetMachine &TM, - std::unique_ptr Streamer) - : AsmPrinter(TM, std::move(Streamer)), SM(*this) {} - - const char *getPassName() const override { - return "PowerPC Assembly Printer"; - } +class PPCAsmPrinter : public AsmPrinter { +protected: + MapVector TOC; + const PPCSubtarget *Subtarget; + StackMaps SM; + +public: + explicit PPCAsmPrinter(TargetMachine &TM, + std::unique_ptr Streamer) + : AsmPrinter(TM, std::move(Streamer)), SM(*this) {} + + const char *getPassName() const override { + return "PowerPC Assembly Printer"; + } MCSymbol *lookUpOrCreateTOCEntry(MCSymbol *Sym); @@ -200,19 +201,19 @@ void PPCAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo, !GV->isStrongDefinitionForLinker()) { if (!GV->hasHiddenVisibility()) { SymToPrint = getSymbolWithGlobalValueBase(GV, "$non_lazy_ptr"); - MachineModuleInfoImpl::StubValueTy &StubSym = - MMI->getObjFileInfo() - .getGVStubEntry(SymToPrint); + MachineModuleInfoImpl::StubValueTy &StubSym = + MMI->getObjFileInfo().getGVStubEntry( + SymToPrint); if (!StubSym.getPointer()) StubSym = MachineModuleInfoImpl:: StubValueTy(getSymbol(GV), !GV->hasInternalLinkage()); } else if (GV->isDeclaration() || GV->hasCommonLinkage() || GV->hasAvailableExternallyLinkage()) { SymToPrint = getSymbolWithGlobalValueBase(GV, "$non_lazy_ptr"); - - MachineModuleInfoImpl::StubValueTy &StubSym = - MMI->getObjFileInfo(). - getHiddenGVStubEntry(SymToPrint); + + MachineModuleInfoImpl::StubValueTy &StubSym = + MMI->getObjFileInfo().getHiddenGVStubEntry( + SymToPrint); if (!StubSym.getPointer()) StubSym = MachineModuleInfoImpl:: StubValueTy(getSymbol(GV), !GV->hasInternalLinkage()); @@ -539,11 +540,12 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { MCSymbol *PICBase = MF->getPICBaseSymbol(); // Emit the 'bl'. - EmitToStreamer(*OutStreamer, MCInstBuilder(PPC::BL) - // FIXME: We would like an efficient form for this, so we don't have to do - // a lot of extra uniquing. - .addExpr(MCSymbolRefExpr::create(PICBase, OutContext))); - + EmitToStreamer(*OutStreamer, + MCInstBuilder(PPC::BL) + // FIXME: We would like an efficient form for this, so we + // don't have to do a lot of extra uniquing. + .addExpr(MCSymbolRefExpr::create(PICBase, OutContext))); + // Emit the label. OutStreamer->EmitLabel(PICBase); return; @@ -840,13 +842,12 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { return; } case PPC::PPC32GOT: { - MCSymbol *GOTSymbol = OutContext.getOrCreateSymbol(StringRef("_GLOBAL_OFFSET_TABLE_")); - const MCExpr *SymGotTlsL = - MCSymbolRefExpr::create(GOTSymbol, MCSymbolRefExpr::VK_PPC_LO, - OutContext); - const MCExpr *SymGotTlsHA = - MCSymbolRefExpr::create(GOTSymbol, MCSymbolRefExpr::VK_PPC_HA, - OutContext); + MCSymbol *GOTSymbol = + OutContext.getOrCreateSymbol(StringRef("_GLOBAL_OFFSET_TABLE_")); + const MCExpr *SymGotTlsL = MCSymbolRefExpr::create( + GOTSymbol, MCSymbolRefExpr::VK_PPC_LO, OutContext); + const MCExpr *SymGotTlsHA = MCSymbolRefExpr::create( + GOTSymbol, MCSymbolRefExpr::VK_PPC_HA, OutContext); EmitToStreamer(*OutStreamer, MCInstBuilder(PPC::LI) .addReg(MI->getOperand(0).getReg()) .addExpr(SymGotTlsL)); @@ -1293,8 +1294,8 @@ void PPCDarwinAsmPrinter::EmitStartOfAsmFile(Module &M) { // Prime text sections so they are adjacent. This reduces the likelihood a // large data or debug section causes a branch to exceed 16M limit. - const TargetLoweringObjectFileMachO &TLOFMacho = - static_cast(getObjFileLowering()); + const TargetLoweringObjectFileMachO &TLOFMacho = + static_cast(getObjFileLowering()); OutStreamer->SwitchSection(TLOFMacho.getTextCoalSection()); if (TM.getRelocationModel() == Reloc::PIC_) { OutStreamer->SwitchSection( @@ -1338,8 +1339,8 @@ EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) { S.EmitInstruction(Inst, *STI); }; - const TargetLoweringObjectFileMachO &TLOFMacho = - static_cast(getObjFileLowering()); + const TargetLoweringObjectFileMachO &TLOFMacho = + static_cast(getObjFileLowering()); // .lazy_symbol_pointer MCSection *LSPSection = TLOFMacho.getLazySymbolPointerSection(); @@ -1472,11 +1473,11 @@ bool PPCDarwinAsmPrinter::doFinalization(Module &M) { bool isPPC64 = getDataLayout().getPointerSizeInBits() == 64; // Darwin/PPC always uses mach-o. - const TargetLoweringObjectFileMachO &TLOFMacho = - static_cast(getObjFileLowering()); + const TargetLoweringObjectFileMachO &TLOFMacho = + static_cast(getObjFileLowering()); MachineModuleInfoMachO &MMIMacho = - MMI->getObjFileInfo(); - + MMI->getObjFileInfo(); + MachineModuleInfoMachO::SymbolListTy Stubs = MMIMacho.GetFnStubList(); if (!Stubs.empty()) EmitFunctionStubs(Stubs); diff --git a/lib/Target/PowerPC/PPCCTRLoops.cpp b/lib/Target/PowerPC/PPCCTRLoops.cpp index d43aa1a87b0..fbefcf900ca 100644 --- a/lib/Target/PowerPC/PPCCTRLoops.cpp +++ b/lib/Target/PowerPC/PPCCTRLoops.cpp @@ -546,10 +546,9 @@ bool PPCCTRLoops::convertToCTRLoop(Loop *L) { if (!ExitCount->getType()->isPointerTy() && ExitCount->getType() != CountType) ExitCount = SE->getZeroExtendExpr(ExitCount, CountType); - ExitCount = SE->getAddExpr(ExitCount, - SE->getConstant(CountType, 1)); - Value *ECValue = SCEVE.expandCodeFor(ExitCount, CountType, - Preheader->getTerminator()); + ExitCount = SE->getAddExpr(ExitCount, SE->getConstant(CountType, 1)); + Value *ECValue = + SCEVE.expandCodeFor(ExitCount, CountType, Preheader->getTerminator()); IRBuilder<> CountBuilder(Preheader->getTerminator()); Module *M = Preheader->getParent()->getParent(); diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index 61731f03ebd..92975e1d732 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -107,8 +107,8 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, isPPC64 ? MVT::i64 : MVT::i32); setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); - AddPromotedToType (ISD::UINT_TO_FP, MVT::i1, - isPPC64 ? MVT::i64 : MVT::i32); + AddPromotedToType(ISD::UINT_TO_FP, MVT::i1, + isPPC64 ? MVT::i64 : MVT::i32); } else { setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); @@ -403,9 +403,9 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, // will selectively turn on ones that can be effectively codegen'd. for (MVT VT : MVT::vector_valuetypes()) { // add/sub are legal for all supported vector VT's. - setOperationAction(ISD::ADD , VT, Legal); - setOperationAction(ISD::SUB , VT, Legal); - + setOperationAction(ISD::ADD, VT, Legal); + setOperationAction(ISD::SUB, VT, Legal); + // Vector instructions introduced in P8 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { setOperationAction(ISD::CTPOP, VT, Legal); @@ -519,8 +519,7 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); } - - if (Subtarget.hasP8Altivec()) + if (Subtarget.hasP8Altivec()) setOperationAction(ISD::MUL, MVT::v4i32, Legal); else setOperationAction(ISD::MUL, MVT::v4i32, Custom); @@ -5844,10 +5843,7 @@ SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, false, false, 0); } - - -SDValue -PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const { +SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const { MachineFunction &MF = DAG.getMachineFunction(); bool isPPC64 = Subtarget.isPPC64(); EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); @@ -6314,9 +6310,9 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::f64); - FPHalfs = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64, - FPHalfs, FPHalfs, FPHalfs, FPHalfs); - + FPHalfs = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64, FPHalfs, FPHalfs, + FPHalfs, FPHalfs); + Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); if (Op.getValueType() != MVT::v4f64) @@ -7350,10 +7346,9 @@ static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc, case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; case Intrinsic::ppc_altivec_vcmpequd_p: if (Subtarget.hasP8Altivec()) { - CompareOpc = 199; - isDot = 1; - } - else + CompareOpc = 199; + isDot = 1; + } else return false; break; @@ -7364,10 +7359,9 @@ static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc, case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; case Intrinsic::ppc_altivec_vcmpgtsd_p: if (Subtarget.hasP8Altivec()) { - CompareOpc = 967; - isDot = 1; - } - else + CompareOpc = 967; + isDot = 1; + } else return false; break; @@ -7376,10 +7370,9 @@ static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc, case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; case Intrinsic::ppc_altivec_vcmpgtud_p: if (Subtarget.hasP8Altivec()) { - CompareOpc = 711; - isDot = 1; - } - else + CompareOpc = 711; + isDot = 1; + } else return false; break; @@ -7392,10 +7385,9 @@ static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc, case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break; case Intrinsic::ppc_altivec_vcmpequd: if (Subtarget.hasP8Altivec()) { - CompareOpc = 199; - isDot = 0; - } - else + CompareOpc = 199; + isDot = 0; + } else return false; break; @@ -7406,10 +7398,9 @@ static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc, case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break; case Intrinsic::ppc_altivec_vcmpgtsd: if (Subtarget.hasP8Altivec()) { - CompareOpc = 967; - isDot = 0; - } - else + CompareOpc = 967; + isDot = 0; + } else return false; break; @@ -7418,10 +7409,9 @@ static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc, case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break; case Intrinsic::ppc_altivec_vcmpgtud: if (Subtarget.hasP8Altivec()) { - CompareOpc = 711; - isDot = 0; - } - else + CompareOpc = 711; + isDot = 0; + } else return false; break; @@ -7827,11 +7817,10 @@ SDValue PPCTargetLowering::LowerVectorStore(SDValue Op, SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); - Stores.push_back(DAG.getTruncStore(StoreChain, dl, Loads[i], Idx, - SN->getPointerInfo().getWithOffset(i), - MVT::i8 /* memory type */, - SN->isNonTemporal(), SN->isVolatile(), - 1 /* alignment */, SN->getAAInfo())); + Stores.push_back(DAG.getTruncStore( + StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i), + MVT::i8 /* memory type */, SN->isNonTemporal(), SN->isVolatile(), + 1 /* alignment */, SN->getAAInfo())); } StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); @@ -9225,9 +9214,9 @@ static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, int64_t Offset1 = 0, Offset2 = 0; getBaseWithConstantOffset(Loc, Base1, Offset1, DAG); getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG); - if (Base1 == Base2 && Offset1 == (Offset2 + Dist*Bytes)) - return true; - + if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes)) + return true; + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); const GlobalValue *GV1 = nullptr; const GlobalValue *GV2 = nullptr; @@ -9960,10 +9949,11 @@ SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, "Invalid extension type"); EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout()); SDValue ShiftCst = - DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy); - return DAG.getNode(ISD::SRA, dl, N->getValueType(0), - DAG.getNode(ISD::SHL, dl, N->getValueType(0), - N->getOperand(0), ShiftCst), ShiftCst); + DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy); + return DAG.getNode( + ISD::SRA, dl, N->getValueType(0), + DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst), + ShiftCst); } SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N, diff --git a/lib/Target/PowerPC/PPCRegisterInfo.cpp b/lib/Target/PowerPC/PPCRegisterInfo.cpp index 6f364bc0577..137e91f72fc 100644 --- a/lib/Target/PowerPC/PPCRegisterInfo.cpp +++ b/lib/Target/PowerPC/PPCRegisterInfo.cpp @@ -459,8 +459,8 @@ void PPCRegisterInfo::lowerCRSpilling(MachineBasicBlock::iterator II, // We need to store the CR in the low 4-bits of the saved value. First, issue // an MFOCRF to save all of the CRBits and, if needed, kill the SrcReg. BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), Reg) - .addReg(SrcReg, getKillRegState(MI.getOperand(0).isKill())); - + .addReg(SrcReg, getKillRegState(MI.getOperand(0).isKill())); + // If the saved register wasn't CR0, shift the bits left so that they are in // CR0's slot. if (SrcReg != PPC::CR0) { @@ -549,8 +549,8 @@ void PPCRegisterInfo::lowerCRBitSpilling(MachineBasicBlock::iterator II, .addReg(SrcReg, getKillRegState(MI.getOperand(0).isKill())); BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), Reg) - .addReg(getCRFromCRBit(SrcReg)); - + .addReg(getCRFromCRBit(SrcReg)); + // If the saved register wasn't CR0LT, shift the bits left so that the bit to // store is the first one. Mask all but that bit. unsigned Reg1 = Reg; @@ -602,10 +602,12 @@ void PPCRegisterInfo::lowerCRBitRestore(MachineBasicBlock::iterator II, unsigned ShiftBits = getEncodingValue(DestReg); // rlwimi r11, r10, 32-ShiftBits, ..., ... BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWIMI8 : PPC::RLWIMI), RegO) - .addReg(RegO, RegState::Kill).addReg(Reg, RegState::Kill) - .addImm(ShiftBits ? 32-ShiftBits : 0) - .addImm(ShiftBits).addImm(ShiftBits); - + .addReg(RegO, RegState::Kill) + .addReg(Reg, RegState::Kill) + .addImm(ShiftBits ? 32 - ShiftBits : 0) + .addImm(ShiftBits) + .addImm(ShiftBits); + BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MTOCRF8 : PPC::MTOCRF), getCRFromCRBit(DestReg)) .addReg(RegO, RegState::Kill) @@ -634,11 +636,11 @@ void PPCRegisterInfo::lowerVRSAVESpilling(MachineBasicBlock::iterator II, unsigned SrcReg = MI.getOperand(0).getReg(); BuildMI(MBB, II, dl, TII.get(PPC::MFVRSAVEv), Reg) - .addReg(SrcReg, getKillRegState(MI.getOperand(0).isKill())); - - addFrameReference(BuildMI(MBB, II, dl, TII.get(PPC::STW)) - .addReg(Reg, RegState::Kill), - FrameIndex); + .addReg(SrcReg, getKillRegState(MI.getOperand(0).isKill())); + + addFrameReference( + BuildMI(MBB, II, dl, TII.get(PPC::STW)).addReg(Reg, RegState::Kill), + FrameIndex); // Discard the pseudo instruction. MBB.erase(II); diff --git a/lib/Target/PowerPC/PPCTargetMachine.cpp b/lib/Target/PowerPC/PPCTargetMachine.cpp index 8ceba0dd858..6e4bc92581a 100644 --- a/lib/Target/PowerPC/PPCTargetMachine.cpp +++ b/lib/Target/PowerPC/PPCTargetMachine.cpp @@ -123,7 +123,7 @@ static std::string computeFSAdditions(StringRef FS, CodeGenOpt::Level OL, } if (OL != CodeGenOpt::None) { - if (!FullFS.empty()) + if (!FullFS.empty()) FullFS = "+invariant-function-descriptors," + FullFS; else FullFS = "+invariant-function-descriptors"; diff --git a/lib/Target/PowerPC/PPCVSXFMAMutate.cpp b/lib/Target/PowerPC/PPCVSXFMAMutate.cpp index 2be3a4aa20c..692dd525152 100644 --- a/lib/Target/PowerPC/PPCVSXFMAMutate.cpp +++ b/lib/Target/PowerPC/PPCVSXFMAMutate.cpp @@ -360,7 +360,6 @@ INITIALIZE_PASS_END(PPCVSXFMAMutate, DEBUG_TYPE, char &llvm::PPCVSXFMAMutateID = PPCVSXFMAMutate::ID; char PPCVSXFMAMutate::ID = 0; -FunctionPass* -llvm::createPPCVSXFMAMutatePass() { return new PPCVSXFMAMutate(); } - - +FunctionPass *llvm::createPPCVSXFMAMutatePass() { + return new PPCVSXFMAMutate(); +} diff --git a/lib/Target/X86/AsmParser/X86AsmParser.cpp b/lib/Target/X86/AsmParser/X86AsmParser.cpp index 7f8c92a7aa3..dca5db48cdf 100644 --- a/lib/Target/X86/AsmParser/X86AsmParser.cpp +++ b/lib/Target/X86/AsmParser/X86AsmParser.cpp @@ -2252,9 +2252,8 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, // Append default arguments to "ins[bwld]" if (Name.startswith("ins") && Operands.size() == 1 && - (Name == "insb" || Name == "insw" || Name == "insl" || - Name == "insd" )) { - AddDefaultSrcDestOperands(Operands, + (Name == "insb" || Name == "insw" || Name == "insl" || Name == "insd")) { + AddDefaultSrcDestOperands(Operands, X86Operand::CreateReg(X86::DX, NameLoc, NameLoc), DefaultMemDIOperand(NameLoc)); } diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 55f24cafcc8..a9590aafa97 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -2094,11 +2094,9 @@ bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS, #include "X86GenCallingConv.inc" -bool -X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv, - MachineFunction &MF, bool isVarArg, - const SmallVectorImpl &Outs, - LLVMContext &Context) const { +bool X86TargetLowering::CanLowerReturn( + CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, + const SmallVectorImpl &Outs, LLVMContext &Context) const { SmallVector RVLocs; CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); return CCInfo.CheckReturn(Outs, RetCC_X86); @@ -2531,15 +2529,10 @@ static ArrayRef get64BitArgumentXMMs(MachineFunction &MF, return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit)); } -SDValue -X86TargetLowering::LowerFormalArguments(SDValue Chain, - CallingConv::ID CallConv, - bool isVarArg, - const SmallVectorImpl &Ins, - SDLoc dl, - SelectionDAG &DAG, - SmallVectorImpl &InVals) - const { +SDValue X86TargetLowering::LowerFormalArguments( + SDValue Chain, CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl &Ins, SDLoc dl, SelectionDAG &DAG, + SmallVectorImpl &InVals) const { MachineFunction &MF = DAG.getMachineFunction(); X86MachineFunctionInfo *FuncInfo = MF.getInfo(); const TargetFrameLowering &TFI = *Subtarget->getFrameLowering(); @@ -3550,17 +3543,12 @@ bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, /// Check whether the call is eligible for tail call optimization. Targets /// that want to do tail call optimization should implement this function. -bool -X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, - CallingConv::ID CalleeCC, - bool isVarArg, - bool isCalleeStructRet, - bool isCallerStructRet, - Type *RetTy, - const SmallVectorImpl &Outs, - const SmallVectorImpl &OutVals, - const SmallVectorImpl &Ins, - SelectionDAG &DAG) const { +bool X86TargetLowering::IsEligibleForTailCallOptimization( + SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg, + bool isCalleeStructRet, bool isCallerStructRet, Type *RetTy, + const SmallVectorImpl &Outs, + const SmallVectorImpl &OutVals, + const SmallVectorImpl &Ins, SelectionDAG &DAG) const { if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC)) return false; @@ -22180,26 +22168,28 @@ static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root, // See if we can recurse into the operand to combine more things. switch (Op.getOpcode()) { - case X86ISD::PSHUFB: - HasPSHUFB = true; - case X86ISD::PSHUFD: - case X86ISD::PSHUFHW: - case X86ISD::PSHUFLW: - if (Op.getOperand(0).hasOneUse() && - combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1, - HasPSHUFB, DAG, DCI, Subtarget)) - return true; - break; + case X86ISD::PSHUFB: + HasPSHUFB = true; + case X86ISD::PSHUFD: + case X86ISD::PSHUFHW: + case X86ISD::PSHUFLW: + if (Op.getOperand(0).hasOneUse() && + combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1, + HasPSHUFB, DAG, DCI, Subtarget)) + return true; + break; - case X86ISD::UNPCKL: - case X86ISD::UNPCKH: - assert(Op.getOperand(0) == Op.getOperand(1) && "We only combine unary shuffles!"); - // We can't check for single use, we have to check that this shuffle is the only user. - if (Op->isOnlyUserOf(Op.getOperand(0).getNode()) && - combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1, - HasPSHUFB, DAG, DCI, Subtarget)) - return true; - break; + case X86ISD::UNPCKL: + case X86ISD::UNPCKH: + assert(Op.getOperand(0) == Op.getOperand(1) && + "We only combine unary shuffles!"); + // We can't check for single use, we have to check that this shuffle is the + // only user. + if (Op->isOnlyUserOf(Op.getOperand(0).getNode()) && + combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1, + HasPSHUFB, DAG, DCI, Subtarget)) + return true; + break; } // Minor canonicalization of the accumulated shuffle mask to make it easier diff --git a/lib/Transforms/Scalar/Float2Int.cpp b/lib/Transforms/Scalar/Float2Int.cpp index 57cdf81cc03..52b56e02890 100644 --- a/lib/Transforms/Scalar/Float2Int.cpp +++ b/lib/Transforms/Scalar/Float2Int.cpp @@ -538,7 +538,4 @@ bool Float2Int::runOnFunction(Function &F) { return Modified; } -FunctionPass *llvm::createFloat2IntPass() { - return new Float2Int(); -} - +FunctionPass *llvm::createFloat2IntPass() { return new Float2Int(); }