From: NAKAMURA Takumi Date: Tue, 22 Sep 2015 11:14:12 +0000 (+0000) Subject: Reformat comment lines. X-Git-Url: http://plrg.eecs.uci.edu/git/?a=commitdiff_plain;ds=sidebyside;h=6902c8db2603287335a7e46257b4530674983116;p=oota-llvm.git Reformat comment lines. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@248262 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/include/llvm/IR/Argument.h b/include/llvm/IR/Argument.h index fc04fe71cbf..97eedf9c4cd 100644 --- a/include/llvm/IR/Argument.h +++ b/include/llvm/IR/Argument.h @@ -64,8 +64,8 @@ public: /// containing function, return the number of bytes known to be /// dereferenceable. Otherwise, zero is returned. uint64_t getDereferenceableBytes() const; - - /// \brief If this argument has the dereferenceable_or_null attribute on + + /// \brief If this argument has the dereferenceable_or_null attribute on /// it in its containing function, return the number of bytes known to be /// dereferenceable. Otherwise, zero is returned. uint64_t getDereferenceableOrNullBytes() const; diff --git a/include/llvm/IR/Attributes.h b/include/llvm/IR/Attributes.h index 7c478c4b8c2..048a3a2854d 100644 --- a/include/llvm/IR/Attributes.h +++ b/include/llvm/IR/Attributes.h @@ -511,8 +511,8 @@ public: /// \brief Retrieve the stack alignment attribute, if it exists. uint64_t getStackAlignment() const { return StackAlignment; } - /// \brief Retrieve the number of dereferenceable bytes, if the dereferenceable - /// attribute exists (zero is returned otherwise). + /// \brief Retrieve the number of dereferenceable bytes, if the + /// dereferenceable attribute exists (zero is returned otherwise). uint64_t getDereferenceableBytes() const { return DerefBytes; } /// \brief Retrieve the number of dereferenceable_or_null bytes, if the diff --git a/include/llvm/IR/Constant.h b/include/llvm/IR/Constant.h index f33abbf62da..c17e5926aa9 100644 --- a/include/llvm/IR/Constant.h +++ b/include/llvm/IR/Constant.h @@ -24,18 +24,18 @@ namespace llvm { /// This is an important base class in LLVM. It provides the common facilities /// of all constant values in an LLVM program. A constant is a value that is /// immutable at runtime. Functions are constants because their address is -/// immutable. Same with global variables. -/// +/// immutable. Same with global variables. +/// /// All constants share the capabilities provided in this class. All constants /// can have a null value. They can have an operand list. Constants can be /// simple (integer and floating point values), complex (arrays and structures), -/// or expression based (computations yielding a constant value composed of +/// or expression based (computations yielding a constant value composed of /// only certain operators and other constant values). -/// -/// Note that Constants are immutable (once created they never change) -/// and are fully shared by structural equivalence. This means that two -/// structurally equivalent constants will always have the same address. -/// Constants are created on demand as needed and never deleted: thus clients +/// +/// Note that Constants are immutable (once created they never change) +/// and are fully shared by structural equivalence. This means that two +/// structurally equivalent constants will always have the same address. +/// Constants are created on demand as needed and never deleted: thus clients /// don't have to worry about the lifetime of the objects. /// @brief LLVM Constant Representation class Constant : public User { @@ -59,7 +59,7 @@ public: /// getAllOnesValue. bool isAllOnesValue() const; - /// isNegativeZeroValue - Return true if the value is what would be returned + /// isNegativeZeroValue - Return true if the value is what would be returned /// by getZeroValueForNegation. bool isNegativeZeroValue() const; @@ -96,7 +96,7 @@ public: /// whether or not it may generate a relocation entry. This must be /// conservative, so if it might codegen to a relocatable entry, it should say /// so. The return values are: - /// + /// /// NoRelocation: This constant pool entry is guaranteed to never have a /// relocation applied to it (because it holds a simple constant like /// '4'). diff --git a/include/llvm/IR/DerivedTypes.h b/include/llvm/IR/DerivedTypes.h index 694354ee3b2..6f3ed8aca32 100644 --- a/include/llvm/IR/DerivedTypes.h +++ b/include/llvm/IR/DerivedTypes.h @@ -174,7 +174,7 @@ public: /// Literal struct types (e.g. { i32, i32 }) are uniqued structurally, and must /// always have a body when created. You can get one of these by using one of /// the StructType::get() forms. -/// +/// /// Identified structs (e.g. %foo or %42) may optionally have a name and are not /// uniqued. The names for identified structs are managed at the LLVMContext /// level, so there can only be a single identified struct with a given name in @@ -205,7 +205,7 @@ class StructType : public CompositeType { /// pointer to the symbol table entry (maintained by LLVMContext) for the /// struct. This is null if the type is an literal struct or if it is /// a identified type that has an empty name. - /// + /// void *SymbolTableEntry; public: diff --git a/include/llvm/IR/InlineAsm.h b/include/llvm/IR/InlineAsm.h index aa0eccf666d..ba34513aac9 100644 --- a/include/llvm/IR/InlineAsm.h +++ b/include/llvm/IR/InlineAsm.h @@ -89,7 +89,7 @@ public: /// static bool Verify(FunctionType *Ty, StringRef Constraints); - // Constraint String Parsing + // Constraint String Parsing enum ConstraintPrefix { isInput, // 'x' isOutput, // '=x' @@ -157,8 +157,8 @@ public: /// The currently selected alternative constraint index. unsigned currentAlternativeIndex; - - ///Default constructor. + + /// Default constructor. ConstraintInfo(); /// Parse - Analyze the specified string (e.g. "=*&{eax}") and fill in the @@ -175,8 +175,8 @@ public: /// constraints and their prefixes. If this returns an empty vector, and if /// the constraint string itself isn't empty, there was an error parsing. static ConstraintInfoVector ParseConstraints(StringRef ConstraintString); - - /// ParseConstraints - Parse the constraints of this inlineasm object, + + /// ParseConstraints - Parse the constraints of this inlineasm object, /// returning them the same way that ParseConstraints(str) does. ConstraintInfoVector ParseConstraints() const { return ParseConstraints(Constraints); @@ -273,7 +273,7 @@ public: } /// getFlagWordForMatchingOp - Augment an existing flag word returned by - /// getFlagWord with information indicating that this input operand is tied + /// getFlagWord with information indicating that this input operand is tied /// to a previous output operand. static unsigned getFlagWordForMatchingOp(unsigned InputFlag, unsigned MatchedOperandNo) { diff --git a/include/llvm/IR/Instruction.def b/include/llvm/IR/Instruction.def index 72263f06ef4..ee4956bdf27 100644 --- a/include/llvm/IR/Instruction.def +++ b/include/llvm/IR/Instruction.def @@ -1,21 +1,21 @@ //===-- llvm/Instruction.def - File that describes Instructions -*- C++ -*-===// -// +// // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. -// +// //===----------------------------------------------------------------------===// // // This file contains descriptions of the various LLVM instructions. This is -// used as a central place for enumerating the different instructions and +// used as a central place for enumerating the different instructions and // should eventually be the place to put comments about the instructions. // //===----------------------------------------------------------------------===// // NOTE: NO INCLUDE GUARD DESIRED! -// Provide definitions of macros so that users of this file do not have to +// Provide definitions of macros so that users of this file do not have to // define everything to use it... // #ifndef FIRST_TERM_INST @@ -145,7 +145,7 @@ HANDLE_MEMORY_INST(38, AtomicRMW , AtomicRMWInst ) LAST_MEMORY_INST(38) // Cast operators ... -// NOTE: The order matters here because CastInst::isEliminableCastPair +// NOTE: The order matters here because CastInst::isEliminableCastPair // NOTE: (see Instructions.cpp) encodes a table based on this ordering. FIRST_CAST_INST(39) HANDLE_CAST_INST(39, Trunc , TruncInst ) // Truncate integers diff --git a/include/llvm/IR/LegacyPassManagers.h b/include/llvm/IR/LegacyPassManagers.h index ab2e2280f9a..ee054a860cd 100644 --- a/include/llvm/IR/LegacyPassManagers.h +++ b/include/llvm/IR/LegacyPassManagers.h @@ -440,7 +440,7 @@ public: /// doFinalization - Overrides ModulePass doFinalization for global /// finalization tasks - /// + /// using ModulePass::doFinalization; /// doFinalization - Run all of the finalizers for the function passes. diff --git a/include/llvm/IR/Type.h b/include/llvm/IR/Type.h index 8979f1dbd14..18086c96202 100644 --- a/include/llvm/IR/Type.h +++ b/include/llvm/IR/Type.h @@ -38,10 +38,10 @@ template struct GraphTraits; /// they are never changed. Also note that only one instance of a particular /// type is ever created. Thus seeing if two types are equal is a matter of /// doing a trivial pointer comparison. To enforce that no two equal instances -/// are created, Type instances can only be created via static factory methods +/// are created, Type instances can only be created via static factory methods /// in class Type and in derived classes. Once allocated, Types are never /// free'd. -/// +/// class Type { public: //===--------------------------------------------------------------------===// @@ -219,9 +219,9 @@ public: /// bool isVectorTy() const { return getTypeID() == VectorTyID; } - /// canLosslesslyBitCastTo - Return true if this type could be converted - /// with a lossless BitCast to type 'Ty'. For example, i8* to i32*. BitCasts - /// are valid for types of the same size only where no re-interpretation of + /// canLosslesslyBitCastTo - Return true if this type could be converted + /// with a lossless BitCast to type 'Ty'. For example, i8* to i32*. BitCasts + /// are valid for types of the same size only where no re-interpretation of /// the bits is done. /// @brief Determine if this type could be losslessly bitcast to Ty bool canLosslesslyBitCastTo(Type *Ty) const; diff --git a/include/llvm/IR/TypeFinder.h b/include/llvm/IR/TypeFinder.h index 73a63ad0349..5f3854377c1 100644 --- a/include/llvm/IR/TypeFinder.h +++ b/include/llvm/IR/TypeFinder.h @@ -7,7 +7,7 @@ // //===----------------------------------------------------------------------===// // -// This file contains the declaration of the TypeFinder class. +// This file contains the declaration of the TypeFinder class. // //===----------------------------------------------------------------------===// diff --git a/include/llvm/IR/ValueSymbolTable.h b/include/llvm/IR/ValueSymbolTable.h index bf1fade1cce..605c8a84535 100644 --- a/include/llvm/IR/ValueSymbolTable.h +++ b/include/llvm/IR/ValueSymbolTable.h @@ -65,7 +65,7 @@ public: public: /// This method finds the value with the given \p Name in the - /// the symbol table. + /// the symbol table. /// @returns the value associated with the \p Name /// @brief Lookup a named Value. Value *lookup(StringRef Name) const { return vmap.lookup(Name); } @@ -97,13 +97,13 @@ public: /// @brief Get a const_iterator to the end of the symbol table. inline const_iterator end() const { return vmap.end(); } - -/// @} -/// @name Mutators -/// @{ + + /// @} + /// @name Mutators + /// @{ private: /// This method adds the provided value \p N to the symbol table. The Value - /// must have a name which is used to place the value in the symbol table. + /// must have a name which is used to place the value in the symbol table. /// If the inserted name conflicts, this renames the value. /// @brief Add a named value to the symbol table void reinsertValue(Value *V); @@ -117,10 +117,10 @@ private: /// ValueName attached to the value, but it is no longer inserted in the /// symtab. void removeValueName(ValueName *V); - -/// @} -/// @name Internal Data -/// @{ + + /// @} + /// @name Internal Data + /// @{ private: ValueMap vmap; ///< The map that holds the symbol table. mutable uint32_t LastUnique; ///< Counter for tracking unique names diff --git a/lib/Analysis/DemandedBits.cpp b/lib/Analysis/DemandedBits.cpp index 7ca188145c0..a8e36dd12e0 100644 --- a/lib/Analysis/DemandedBits.cpp +++ b/lib/Analysis/DemandedBits.cpp @@ -1,4 +1,4 @@ -//===---- DemandedBits.cpp - Determine demanded bits -----------------------===// +//===---- DemandedBits.cpp - Determine demanded bits ----------------------===// // // The LLVM Compiler Infrastructure // @@ -315,7 +315,7 @@ bool DemandedBits::runOnFunction(Function& F) { !isAlwaysLive(UserI)) { AB = APInt(BitWidth, 0); } else { - // If all bits of the output are dead, then all bits of the input + // If all bits of the output are dead, then all bits of the input // Bits of each operand that are used to compute alive bits of the // output are alive, all others are dead. determineLiveOperandBits(UserI, I, OI.getOperandNo(), AOut, AB, diff --git a/lib/CodeGen/TwoAddressInstructionPass.cpp b/lib/CodeGen/TwoAddressInstructionPass.cpp index f3bc2636993..fa3c8be81ba 100644 --- a/lib/CodeGen/TwoAddressInstructionPass.cpp +++ b/lib/CodeGen/TwoAddressInstructionPass.cpp @@ -1212,8 +1212,8 @@ tryInstructionTransform(MachineBasicBlock::iterator &mi, // use this variable to check later. Because it might be better. // For example, we can just use `leal (%rsi,%rdi), %eax` and `ret` // instead of the following code. - // addl %esi, %edi - // movl %edi, %eax + // addl %esi, %edi + // movl %edi, %eax // ret bool Commuted = false; @@ -1661,8 +1661,8 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &Func) { unsigned DstReg = mi->getOperand(DstIdx).getReg(); if (SrcReg != DstReg && tryInstructionTransform(mi, nmi, SrcIdx, DstIdx, Dist, false)) { - // The tied operands have been eliminated or shifted further down the - // block to ease elimination. Continue processing with 'nmi'. + // The tied operands have been eliminated or shifted further down + // the block to ease elimination. Continue processing with 'nmi'. TiedOperands.clear(); mi = nmi; continue; diff --git a/lib/Object/MachOObjectFile.cpp b/lib/Object/MachOObjectFile.cpp index c053313d293..ed2eb09327f 100644 --- a/lib/Object/MachOObjectFile.cpp +++ b/lib/Object/MachOObjectFile.cpp @@ -1222,7 +1222,7 @@ void ExportEntry::moveToEnd() { } bool ExportEntry::operator==(const ExportEntry &Other) const { - // Common case, one at end, other iterating from begin. + // Common case, one at end, other iterating from begin. if (Done || Other.Done) return (Done == Other.Done); // Not equal if different stack sizes. @@ -1334,7 +1334,7 @@ void ExportEntry::pushDownUntilBottom() { // // There is one "export" node for each exported symbol. But because some // symbols may be a prefix of another symbol (e.g. _dup and _dup2), an export -// node may have child nodes too. +// node may have child nodes too. // // The algorithm for moveNext() is to keep moving down the leftmost unvisited // child until hitting a node with no children (which is an export node or diff --git a/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp index 64f4484f022..db8df5c2f01 100644 --- a/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp +++ b/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp @@ -512,8 +512,8 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) { return SelectCode(NewValue.getNode()); } - // getNode() may fold the bitcast if its input was another bitcast. If that - // happens we should only select the new store. + // getNode() may fold the bitcast if its input was another bitcast. If + // that happens we should only select the new store. N = NewStore.getNode(); } @@ -857,7 +857,8 @@ SDNode *AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) { unsigned Opc = (VT == MVT::f64) ? AMDGPU::V_DIV_SCALE_F64 : AMDGPU::V_DIV_SCALE_F32; - // src0_modifiers, src0, src1_modifiers, src1, src2_modifiers, src2, clamp, omod + // src0_modifiers, src0, src1_modifiers, src1, src2_modifiers, src2, clamp, + // omod SDValue Ops[8]; SelectVOP3Mods0(N->getOperand(0), Ops[1], Ops[0], Ops[6], Ops[7]); diff --git a/lib/Target/AMDGPU/SIISelLowering.cpp b/lib/Target/AMDGPU/SIISelLowering.cpp index 120966e92db..66fdeb57f3b 100644 --- a/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/lib/Target/AMDGPU/SIISelLowering.cpp @@ -569,7 +569,7 @@ SDValue SITargetLowering::LowerFormalArguments( } // The pointer to the list of arguments is stored in SGPR0, SGPR1 - // The pointer to the scratch buffer is stored in SGPR2, SGPR3 + // The pointer to the scratch buffer is stored in SGPR2, SGPR3 if (Info->getShaderType() == ShaderType::COMPUTE) { if (Subtarget->isAmdHsaOS()) Info->NumUserSGPRs = 2; // FIXME: Need to support scratch buffers. diff --git a/lib/Target/PowerPC/PPCAsmPrinter.cpp b/lib/Target/PowerPC/PPCAsmPrinter.cpp index e5531b8dc81..9589fee7db6 100644 --- a/lib/Target/PowerPC/PPCAsmPrinter.cpp +++ b/lib/Target/PowerPC/PPCAsmPrinter.cpp @@ -407,9 +407,9 @@ void PPCAsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM, // If we're on ELFv1, then we need to load the actual function pointer // from the function descriptor. if (!Subtarget->isELFv2ABI()) { - // Load the new TOC pointer and the function address, but not r11 - // (needing this is rare, and loading it here would prevent passing it - // via a 'nest' parameter. + // Load the new TOC pointer and the function address, but not r11 + // (needing this is rare, and loading it here would prevent passing it + // via a 'nest' parameter. EmitToStreamer(OutStreamer, MCInstBuilder(PPC::LD) .addReg(PPC::X2) .addImm(8) @@ -534,7 +534,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { case PPC::MovePCtoLR: case PPC::MovePCtoLR8: { // Transform %LR = MovePCtoLR - // Into this, where the label is the PIC base: + // Into this, where the label is the PIC base: // bl L1$pb // L1$pb: MCSymbol *PICBase = MF->getPICBaseSymbol(); diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index 92975e1d732..34a470f5c5a 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -1244,7 +1244,7 @@ static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). -/// The ShuffleKind distinguishes between big-endian merges with two +/// The ShuffleKind distinguishes between big-endian merges with two /// different inputs (0), either-endian merges with two identical inputs (1), /// and little-endian merges with two different inputs (2). For the latter, /// the input operands are swapped (see PPCInstrAltivec.td). @@ -1269,7 +1269,7 @@ bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). -/// The ShuffleKind distinguishes between big-endian merges with two +/// The ShuffleKind distinguishes between big-endian merges with two /// different inputs (0), either-endian merges with two identical inputs (1), /// and little-endian merges with two different inputs (2). For the latter, /// the input operands are swapped (see PPCInstrAltivec.td). @@ -1361,7 +1361,7 @@ static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset, * - 2 = little-endian merge with two different inputs (inputs are swapped for * little-endian merges). * \param[in] DAG The current SelectionDAG - * \return true iff this shuffle mask + * \return true iff this shuffle mask */ bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, unsigned ShuffleKind, SelectionDAG &DAG) { @@ -1388,7 +1388,7 @@ bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift /// amount, otherwise return -1. -/// The ShuffleKind distinguishes between big-endian operations with two +/// The ShuffleKind distinguishes between big-endian operations with two /// different inputs (0), either-endian operations with two identical inputs /// (1), and little-endian operations with two different inputs (2). For the /// latter, the input operands are swapped (see PPCInstrAltivec.td). @@ -2547,7 +2547,7 @@ SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG, #include "PPCGenCallingConv.inc" -// Function whose sole purpose is to kill compiler warnings +// Function whose sole purpose is to kill compiler warnings // stemming from unused functions included from PPCGenCallingConv.inc. CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const { return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS; @@ -4826,8 +4826,8 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee, continue; break; case MVT::v4f32: - // When using QPX, this is handled like a FP register, otherwise, it - // is an Altivec register. + // When using QPX, this is handled like a FP register, otherwise, it + // is an Altivec register. if (Subtarget.hasQPX()) { if (++NumFPRsUsed <= NumFPRs) continue; @@ -9504,7 +9504,7 @@ SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, } // Visit all inputs, collect all binary operations (and, or, xor and - // select) that are all fed by extensions. + // select) that are all fed by extensions. while (!BinOps.empty()) { SDValue BinOp = BinOps.back(); BinOps.pop_back(); @@ -9716,7 +9716,7 @@ SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, SmallPtrSet Visited; // Visit all inputs, collect all binary operations (and, or, xor and - // select) that are all fed by truncations. + // select) that are all fed by truncations. while (!BinOps.empty()) { SDValue BinOp = BinOps.back(); BinOps.pop_back(); diff --git a/lib/Target/PowerPC/PPCRegisterInfo.cpp b/lib/Target/PowerPC/PPCRegisterInfo.cpp index 137e91f72fc..b52c8f1f7ca 100644 --- a/lib/Target/PowerPC/PPCRegisterInfo.cpp +++ b/lib/Target/PowerPC/PPCRegisterInfo.cpp @@ -347,9 +347,9 @@ void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II) const { // Determine the previous frame's address. If FrameSize can't be // represented as 16 bits or we need special alignment, then we load the - // previous frame's address from 0(SP). Why not do an addis of the hi? - // Because R0 is our only safe tmp register and addi/addis treat R0 as zero. - // Constructing the constant and adding would take 3 instructions. + // previous frame's address from 0(SP). Why not do an addis of the hi? + // Because R0 is our only safe tmp register and addi/addis treat R0 as zero. + // Constructing the constant and adding would take 3 instructions. // Fortunately, a frame greater than 32K is rare. const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; @@ -802,8 +802,9 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, // If we're not using a Frame Pointer that has been set to the value of the // SP before having the stack size subtracted from it, then add the stack size // to Offset to get the correct offset. - // Naked functions have stack size 0, although getStackSize may not reflect that - // because we didn't call all the pieces that compute it for naked functions. + // Naked functions have stack size 0, although getStackSize may not reflect + // that because we didn't call all the pieces that compute it for naked + // functions. if (!MF.getFunction()->hasFnAttribute(Attribute::Naked)) { if (!(hasBasePointer(MF) && FrameIndex < 0)) Offset += MFI->getStackSize(); @@ -842,7 +843,7 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, .addImm(Offset); // Convert into indexed form of the instruction: - // + // // sth 0:rA, 1:imm 2:(rB) ==> sthx 0:rA, 2:rB, 1:r0 // addi 0:rA 1:rB, 2, imm ==> add 0:rA, 1:rB, 2:r0 unsigned OperandBase; diff --git a/lib/Target/PowerPC/PPCTargetMachine.cpp b/lib/Target/PowerPC/PPCTargetMachine.cpp index 6e4bc92581a..7485d1ba978 100644 --- a/lib/Target/PowerPC/PPCTargetMachine.cpp +++ b/lib/Target/PowerPC/PPCTargetMachine.cpp @@ -165,9 +165,9 @@ static PPCTargetMachine::PPCABI computeTargetABI(const Triple &TT, return PPCTargetMachine::PPC_ABI_UNKNOWN; } -// The FeatureString here is a little subtle. We are modifying the feature string -// with what are (currently) non-function specific overrides as it goes into the -// LLVMTargetMachine constructor and then using the stored value in the +// The FeatureString here is a little subtle. We are modifying the feature +// string with what are (currently) non-function specific overrides as it goes +// into the LLVMTargetMachine constructor and then using the stored value in the // Subtarget constructor below it. PPCTargetMachine::PPCTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index a9590aafa97..12b2f4ee8a1 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -10652,12 +10652,12 @@ static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2, DL, VT, V1, V2, Mask, Subtarget, DAG)) return Insertion; - // There is a really nice hard cut-over between AVX1 and AVX2 that means we can - // check for those subtargets here and avoid much of the subtarget querying in - // the per-vector-type lowering routines. With AVX1 we have essentially *zero* - // ability to manipulate a 256-bit vector with integer types. Since we'll use - // floating point types there eventually, just immediately cast everything to - // a float and operate entirely in that domain. + // There is a really nice hard cut-over between AVX1 and AVX2 that means we + // can check for those subtargets here and avoid much of the subtarget + // querying in the per-vector-type lowering routines. With AVX1 we have + // essentially *zero* ability to manipulate a 256-bit vector with integer + // types. Since we'll use floating point types there eventually, just + // immediately cast everything to a float and operate entirely in that domain. if (VT.isInteger() && !Subtarget->hasAVX2()) { int ElementBits = VT.getScalarSizeInBits(); if (ElementBits < 32) @@ -16018,7 +16018,8 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget SDValue Src2 = Op.getOperand(2); SDValue PassThru = Op.getOperand(3); SDValue Mask = Op.getOperand(4); - // We specify 2 possible modes for intrinsics, with/without rounding modes. + // We specify 2 possible modes for intrinsics, with/without rounding + // modes. // First, we check if the intrinsic have rounding mode (6 operands), // if not, we set rounding mode to "current". SDValue Rnd; @@ -16048,7 +16049,8 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget SDValue Imm = Op.getOperand(3); SDValue PassThru = Op.getOperand(4); SDValue Mask = Op.getOperand(5); - // We specify 2 possible modes for intrinsics, with/without rounding modes. + // We specify 2 possible modes for intrinsics, with/without rounding + // modes. // First, we check if the intrinsic have rounding mode (7 operands), // if not, we set rounding mode to "current". SDValue Rnd; @@ -22382,7 +22384,8 @@ combineRedundantDWordShuffle(SDValue N, MutableArrayRef Mask, return V; } -/// \brief Search for a combinable shuffle across a chain ending in pshuflw or pshufhw. +/// \brief Search for a combinable shuffle across a chain ending in pshuflw or +/// pshufhw. /// /// We walk up the chain, skipping shuffles of the other half and looking /// through shuffles which switch halves trying to find a shuffle of the same diff --git a/lib/Transforms/Scalar/Float2Int.cpp b/lib/Transforms/Scalar/Float2Int.cpp index 52b56e02890..b79b12ac157 100644 --- a/lib/Transforms/Scalar/Float2Int.cpp +++ b/lib/Transforms/Scalar/Float2Int.cpp @@ -43,7 +43,7 @@ using namespace llvm; // integer domain inputs, produce an integer output; fadd, for example. // // If a non-mappable instruction is seen, this entire def-use graph is marked -// as non-transformable. If we see an instruction that converts from the +// as non-transformable. If we see an instruction that converts from the // integer domain to FP domain (uitofp,sitofp), we terminate our walk. /// The largest integer type worth dealing with. @@ -181,7 +181,7 @@ ConstantRange Float2Int::validateRange(ConstantRange R) { // - walkForwards: Iterate over SeenInsts in reverse order, so we visit // defs before their uses. Calculate the real range info. -// Breadth-first walk of the use-def graph; determine the set of nodes +// Breadth-first walk of the use-def graph; determine the set of nodes // we care about and eagerly determine if some of them are poisonous. void Float2Int::walkBackwards(const SmallPtrSetImpl &Roots) { std::deque Worklist(Roots.begin(), Roots.end()); @@ -319,7 +319,7 @@ void Float2Int::walkForwards() { APFloat F = CF->getValueAPF(); // First, weed out obviously incorrect values. Non-finite numbers - // can't be represented and neither can negative zero, unless + // can't be represented and neither can negative zero, unless // we're in fast math mode. if (!F.isFinite() || (F.isZero() && F.isNegative() && isa(I) &&