From 5e261ee7b00d5e22acf7635e2044186c67dc3692 Mon Sep 17 00:00:00 2001 From: Benjamin Kramer Date: Mon, 9 Mar 2015 20:20:16 +0000 Subject: [PATCH] Remove the remaining uses of abs64 and nuke it. std::abs works just fine and we're already using it in many places. NFC intended. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@231696 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/Support/MathExtras.h | 7 ------- lib/Target/ARM/ARMISelLowering.cpp | 6 +++--- lib/Target/Hexagon/HexagonHardwareLoops.cpp | 2 +- lib/Target/PowerPC/PPCLoopDataPrefetch.cpp | 2 +- lib/Transforms/Scalar/AlignmentFromAssumptions.cpp | 2 +- lib/Transforms/Scalar/LoopStrengthReduce.cpp | 4 ++-- lib/Transforms/Vectorize/BBVectorize.cpp | 8 ++++---- 7 files changed, 12 insertions(+), 19 deletions(-) diff --git a/include/llvm/Support/MathExtras.h b/include/llvm/Support/MathExtras.h index 2459bfeeaf4..388d82ceba5 100644 --- a/include/llvm/Support/MathExtras.h +++ b/include/llvm/Support/MathExtras.h @@ -604,13 +604,6 @@ inline uint64_t OffsetToAlignment(uint64_t Value, uint64_t Align) { return RoundUpToAlignment(Value, Align) - Value; } -/// abs64 - absolute value of a 64-bit int. Not all environments support -/// "abs" on whatever their name for the 64-bit int type is. The absolute -/// value of the largest negative number is undefined, as with "abs". -inline int64_t abs64(int64_t x) { - return (x < 0) ? -x : x; -} - /// SignExtend32 - Sign extend B-bit number x to 32-bit int. /// Usage int32_t r = SignExtend32<5>(x); template inline int32_t SignExtend32(uint32_t x) { diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 6e9b758abe9..53765846d6f 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -10310,9 +10310,9 @@ bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM, bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { // Thumb2 and ARM modes can use cmn for negative immediates. if (!Subtarget->isThumb()) - return ARM_AM::getSOImmVal(llvm::abs64(Imm)) != -1; + return ARM_AM::getSOImmVal(std::abs(Imm)) != -1; if (Subtarget->isThumb2()) - return ARM_AM::getT2SOImmVal(llvm::abs64(Imm)) != -1; + return ARM_AM::getT2SOImmVal(std::abs(Imm)) != -1; // Thumb1 doesn't have cmn, and only 8-bit immediates. return Imm >= 0 && Imm <= 255; } @@ -10323,7 +10323,7 @@ bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { /// immediate into a register. bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const { // Same encoding for add/sub, just flip the sign. - int64_t AbsImm = llvm::abs64(Imm); + int64_t AbsImm = std::abs(Imm); if (!Subtarget->isThumb()) return ARM_AM::getSOImmVal(AbsImm) != -1; if (Subtarget->isThumb2()) diff --git a/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/lib/Target/Hexagon/HexagonHardwareLoops.cpp index 1577c33f4f6..c47ee9c8e2e 100644 --- a/lib/Target/Hexagon/HexagonHardwareLoops.cpp +++ b/lib/Target/Hexagon/HexagonHardwareLoops.cpp @@ -690,7 +690,7 @@ CountValue *HexagonHardwareLoops::computeCount(MachineLoop *Loop, // If the induction variable bump is not a power of 2, quit. // Othwerise we'd need a general integer division. - if (!isPowerOf2_64(abs64(IVBump))) + if (!isPowerOf2_64(std::abs(IVBump))) return nullptr; MachineBasicBlock *PH = Loop->getLoopPreheader(); diff --git a/lib/Target/PowerPC/PPCLoopDataPrefetch.cpp b/lib/Target/PowerPC/PPCLoopDataPrefetch.cpp index c1f204fcfcd..afa83cfd2b1 100644 --- a/lib/Target/PowerPC/PPCLoopDataPrefetch.cpp +++ b/lib/Target/PowerPC/PPCLoopDataPrefetch.cpp @@ -192,7 +192,7 @@ bool PPCLoopDataPrefetch::runOnLoop(Loop *L) { const SCEV *PtrDiff = SE->getMinusSCEV(LSCEVAddRec, K->second); if (const SCEVConstant *ConstPtrDiff = dyn_cast(PtrDiff)) { - int64_t PD = abs64(ConstPtrDiff->getValue()->getSExtValue()); + int64_t PD = std::abs(ConstPtrDiff->getValue()->getSExtValue()); if (PD < (int64_t) CacheLineSize) { DupPref = true; break; diff --git a/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp b/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp index 64e2c2b6281..9b74648ea46 100644 --- a/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp +++ b/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp @@ -124,7 +124,7 @@ static unsigned getNewAlignmentDiff(const SCEV *DiffSCEV, // If the displacement is not an exact multiple, but the remainder is a // constant, then return this remainder (but only if it is a power of 2). - uint64_t DiffUnitsAbs = abs64(DiffUnits); + uint64_t DiffUnitsAbs = std::abs(DiffUnits); if (isPowerOf2_64(DiffUnitsAbs)) return (unsigned) DiffUnitsAbs; } diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp index 318065e0dc1..2c0769a91e4 100644 --- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -3825,7 +3825,7 @@ void LSRInstance::GenerateCrossUseConstantOffsets() { if (C->getValue()->isNegative() != (NewF.BaseOffset < 0) && (C->getValue()->getValue().abs() * APInt(BitWidth, F.Scale)) - .ule(abs64(NewF.BaseOffset))) + .ule(std::abs(NewF.BaseOffset))) continue; // OK, looks good. @@ -3856,7 +3856,7 @@ void LSRInstance::GenerateCrossUseConstantOffsets() { J != JE; ++J) if (const SCEVConstant *C = dyn_cast(*J)) if ((C->getValue()->getValue() + NewF.BaseOffset).abs().slt( - abs64(NewF.BaseOffset)) && + std::abs(NewF.BaseOffset)) && (C->getValue()->getValue() + NewF.BaseOffset).countTrailingZeros() >= countTrailingZeros(NewF.BaseOffset)) diff --git a/lib/Transforms/Vectorize/BBVectorize.cpp b/lib/Transforms/Vectorize/BBVectorize.cpp index ec10bc36401..c16e4e089d7 100644 --- a/lib/Transforms/Vectorize/BBVectorize.cpp +++ b/lib/Transforms/Vectorize/BBVectorize.cpp @@ -649,11 +649,11 @@ namespace { if (VTy != VTy2 && Offset < 0) { int64_t VTy2TSS = (int64_t) DL->getTypeStoreSize(VTy2); OffsetInElmts = Offset/VTy2TSS; - return (abs64(Offset) % VTy2TSS) == 0; + return (std::abs(Offset) % VTy2TSS) == 0; } OffsetInElmts = Offset/VTyTSS; - return (abs64(Offset) % VTyTSS) == 0; + return (std::abs(Offset) % VTyTSS) == 0; } return false; @@ -984,8 +984,8 @@ namespace { unsigned IAlignment, JAlignment, IAddressSpace, JAddressSpace; int64_t OffsetInElmts = 0; if (getPairPtrInfo(I, J, IPtr, JPtr, IAlignment, JAlignment, - IAddressSpace, JAddressSpace, - OffsetInElmts) && abs64(OffsetInElmts) == 1) { + IAddressSpace, JAddressSpace, OffsetInElmts) && + std::abs(OffsetInElmts) == 1) { FixedOrder = (int) OffsetInElmts; unsigned BottomAlignment = IAlignment; if (OffsetInElmts < 0) BottomAlignment = JAlignment; -- 2.34.1