From 0973b7ddb8f8267132147c8b24dae7b2dfa1fd02 Mon Sep 17 00:00:00 2001 From: Benjamin Kramer Date: Fri, 10 Apr 2015 11:24:51 +0000 Subject: [PATCH] Reduce dyn_cast<> to isa<> or cast<> where possible. No functional change intended. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@234586 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Analysis/ValueTracking.cpp | 5 ++-- lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 14 +++++----- .../SelectionDAG/StatepointLowering.cpp | 2 +- lib/ExecutionEngine/Interpreter/Execution.cpp | 16 +++++------ lib/TableGen/Record.cpp | 2 +- lib/Target/AArch64/AArch64ISelDAGToDAG.cpp | 2 +- lib/Target/BPF/BPFISelDAGToDAG.cpp | 2 +- lib/Target/Hexagon/HexagonISelLowering.cpp | 2 +- lib/Target/Mips/MipsFastISel.cpp | 2 +- lib/Target/NVPTX/NVPTXAsmPrinter.cpp | 5 ++-- lib/Target/NVPTX/NVPTXISelLowering.cpp | 2 +- lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp | 6 ++-- lib/Target/R600/AMDGPUISelDAGToDAG.cpp | 2 +- lib/Target/R600/AMDGPUInstructions.td | 4 +-- lib/Target/R600/R600ISelLowering.cpp | 4 +-- .../R600/R600TextureIntrinsicsReplacer.cpp | 4 +-- lib/Target/X86/X86ISelLowering.cpp | 12 ++++---- lib/Target/X86/X86InstrFragmentsSIMD.td | 28 +++++++++---------- .../Instrumentation/AddressSanitizer.cpp | 2 +- .../Instrumentation/SanitizerCoverage.cpp | 2 +- lib/Transforms/Vectorize/SLPVectorizer.cpp | 4 +-- utils/yaml-bench/YAMLBench.cpp | 2 +- 22 files changed, 60 insertions(+), 64 deletions(-) diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp index f329e3a5084..a6b975b7b0d 100644 --- a/lib/Analysis/ValueTracking.cpp +++ b/lib/Analysis/ValueTracking.cpp @@ -694,10 +694,9 @@ static void computeKnownBitsFromAssume(Value *V, APInt &KnownZero, // We're running this loop for once for each value queried resulting in a // runtime of ~O(#assumes * #values). - assert(isa(I) && - dyn_cast(I)->getIntrinsicID() == Intrinsic::assume && + assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && "must be an assume intrinsic"); - + Value *Arg = I->getArgOperand(0); if (Arg == V && isValidAssumeForContext(I, Q)) { diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 2b169b755fe..37263ff78a8 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -1984,12 +1984,12 @@ SDValue DAGCombiner::visitMUL(SDNode *N) { N0IsConst = isConstantSplatVector(N0.getNode(), ConstValue0); N1IsConst = isConstantSplatVector(N1.getNode(), ConstValue1); } else { - N0IsConst = dyn_cast(N0) != nullptr; - ConstValue0 = N0IsConst ? (dyn_cast(N0))->getAPIntValue() - : APInt(); - N1IsConst = dyn_cast(N1) != nullptr; - ConstValue1 = N1IsConst ? (dyn_cast(N1))->getAPIntValue() - : APInt(); + N0IsConst = isa(N0); + if (N0IsConst) + ConstValue0 = cast(N0)->getAPIntValue(); + N1IsConst = isa(N1); + if (N1IsConst) + ConstValue1 = cast(N1)->getAPIntValue(); } // fold (mul c1, c2) -> c1*c2 @@ -11662,7 +11662,7 @@ SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode* N) { // type. if (V->getOperand(0).getValueType() != NVT) return SDValue(); - unsigned Idx = dyn_cast(N->getOperand(1))->getZExtValue(); + unsigned Idx = N->getConstantOperandVal(1); unsigned NumElems = NVT.getVectorNumElements(); assert((Idx % NumElems) == 0 && "IDX in concat is not a multiple of the result vector length."); diff --git a/lib/CodeGen/SelectionDAG/StatepointLowering.cpp b/lib/CodeGen/SelectionDAG/StatepointLowering.cpp index 150dac34f05..a9ffa728228 100644 --- a/lib/CodeGen/SelectionDAG/StatepointLowering.cpp +++ b/lib/CodeGen/SelectionDAG/StatepointLowering.cpp @@ -626,7 +626,7 @@ SelectionDAGBuilder::LowerStatepoint(ImmutableStatepoint ISP, // Add a leading constant argument with the Flags and the calling convention // masked together CallingConv::ID CallConv = CS.getCallingConv(); - int Flags = dyn_cast(CS.getArgument(2))->getZExtValue(); + int Flags = cast(CS.getArgument(2))->getZExtValue(); assert(Flags == 0 && "not expected to be used"); Ops.push_back(DAG.getTargetConstant(StackMaps::ConstantOp, MVT::i64)); Ops.push_back( diff --git a/lib/ExecutionEngine/Interpreter/Execution.cpp b/lib/ExecutionEngine/Interpreter/Execution.cpp index 2e8eb166c67..a26740b0da8 100644 --- a/lib/ExecutionEngine/Interpreter/Execution.cpp +++ b/lib/ExecutionEngine/Interpreter/Execution.cpp @@ -316,7 +316,7 @@ void Interpreter::visitICmpInst(ICmpInst &I) { #define IMPLEMENT_VECTOR_FCMP(OP) \ case Type::VectorTyID: \ - if(dyn_cast(Ty)->getElementType()->isFloatTy()) { \ + if (cast(Ty)->getElementType()->isFloatTy()) { \ IMPLEMENT_VECTOR_FCMP_T(OP, Float); \ } else { \ IMPLEMENT_VECTOR_FCMP_T(OP, Double); \ @@ -363,7 +363,7 @@ static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2, #define MASK_VECTOR_NANS(TY, X,Y, FLAG) \ if (TY->isVectorTy()) { \ - if (dyn_cast(TY)->getElementType()->isFloatTy()) { \ + if (cast(TY)->getElementType()->isFloatTy()) { \ MASK_VECTOR_NANS_T(X, Y, Float, FLAG) \ } else { \ MASK_VECTOR_NANS_T(X, Y, Double, FLAG) \ @@ -536,7 +536,7 @@ static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2, if(Ty->isVectorTy()) { assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); Dest.AggregateVal.resize( Src1.AggregateVal.size() ); - if(dyn_cast(Ty)->getElementType()->isFloatTy()) { + if (cast(Ty)->getElementType()->isFloatTy()) { for( size_t _i=0;_iisVectorTy()) { assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); Dest.AggregateVal.resize( Src1.AggregateVal.size() ); - if(dyn_cast(Ty)->getElementType()->isFloatTy()) { + if (cast(Ty)->getElementType()->isFloatTy()) { for( size_t _i=0;_i(Ty)->getElementType()->isFloatTy()) \ + if (cast(Ty)->getElementType()->isFloatTy()) \ FLOAT_VECTOR_FUNCTION(OP, FloatVal) \ else { \ - if (dyn_cast(Ty)->getElementType()->isDoubleTy()) \ + if (cast(Ty)->getElementType()->isDoubleTy()) \ FLOAT_VECTOR_FUNCTION(OP, DoubleVal) \ else { \ dbgs() << "Unhandled type for OP instruction: " << *Ty << "\n"; \ @@ -745,12 +745,12 @@ void Interpreter::visitBinaryOperator(BinaryOperator &I) { case Instruction::FMul: FLOAT_VECTOR_OP(*) break; case Instruction::FDiv: FLOAT_VECTOR_OP(/) break; case Instruction::FRem: - if (dyn_cast(Ty)->getElementType()->isFloatTy()) + if (cast(Ty)->getElementType()->isFloatTy()) for (unsigned i = 0; i < R.AggregateVal.size(); ++i) R.AggregateVal[i].FloatVal = fmod(Src1.AggregateVal[i].FloatVal, Src2.AggregateVal[i].FloatVal); else { - if (dyn_cast(Ty)->getElementType()->isDoubleTy()) + if (cast(Ty)->getElementType()->isDoubleTy()) for (unsigned i = 0; i < R.AggregateVal.size(); ++i) R.AggregateVal[i].DoubleVal = fmod(Src1.AggregateVal[i].DoubleVal, Src2.AggregateVal[i].DoubleVal); diff --git a/lib/TableGen/Record.cpp b/lib/TableGen/Record.cpp index 4ae99037099..8a8f0ee3a08 100644 --- a/lib/TableGen/Record.cpp +++ b/lib/TableGen/Record.cpp @@ -2040,7 +2040,7 @@ RecordKeeper::getAllDerivedDefinitions(const std::string &ClassName) const { /// to CurRec's name. Init *llvm::QualifyName(Record &CurRec, MultiClass *CurMultiClass, Init *Name, const std::string &Scoper) { - RecTy *Type = dyn_cast(Name)->getType(); + RecTy *Type = cast(Name)->getType(); BinOpInit *NewName = BinOpInit::get(BinOpInit::STRCONCAT, diff --git a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index 0a47dcb725f..f75700d6d33 100644 --- a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -848,7 +848,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size, // MOV X0, WideImmediate // LDR X2, [BaseReg, X0] if (isa(RHS)) { - int64_t ImmOff = (int64_t)dyn_cast(RHS)->getZExtValue(); + int64_t ImmOff = (int64_t)cast(RHS)->getZExtValue(); unsigned Scale = Log2_32(Size); // Skip the immediate can be seleced by load/store addressing mode. // Also skip the immediate can be encoded by a single ADD (SUB is also diff --git a/lib/Target/BPF/BPFISelDAGToDAG.cpp b/lib/Target/BPF/BPFISelDAGToDAG.cpp index b91b0e1390d..b2599fe9637 100644 --- a/lib/Target/BPF/BPFISelDAGToDAG.cpp +++ b/lib/Target/BPF/BPFISelDAGToDAG.cpp @@ -132,7 +132,7 @@ SDNode *BPFDAGToDAGISel::Select(SDNode *Node) { } case ISD::FrameIndex: { - int FI = dyn_cast(Node)->getIndex(); + int FI = cast(Node)->getIndex(); EVT VT = Node->getValueType(0); SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT); unsigned Opc = BPF::MOV_rr; diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp index a2209ab187e..51b79cdef9b 100644 --- a/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -2106,7 +2106,7 @@ HexagonTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { // is Big Endian. unsigned OpIdx = NElts - i - 1; SDValue Operand = BVN->getOperand(OpIdx); - if (dyn_cast(Operand)) + if (isa(Operand)) // This operand is already in ConstVal. continue; diff --git a/lib/Target/Mips/MipsFastISel.cpp b/lib/Target/Mips/MipsFastISel.cpp index 7de00818337..e8e3d3d4b92 100644 --- a/lib/Target/Mips/MipsFastISel.cpp +++ b/lib/Target/Mips/MipsFastISel.cpp @@ -440,7 +440,7 @@ bool MipsFastISel::computeAddress(const Value *Obj, Address &Addr) { bool MipsFastISel::computeCallAddress(const Value *V, Address &Addr) { const GlobalValue *GV = dyn_cast(V); - if (GV && isa(GV) && dyn_cast(GV)->isIntrinsic()) + if (GV && isa(GV) && cast(GV)->isIntrinsic()) return false; if (!GV) return false; diff --git a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp index 477d3c7a546..22178f635b1 100644 --- a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp +++ b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp @@ -1765,12 +1765,11 @@ void NVPTXAsmPrinter::bufferLEByte(const Constant *CPV, int Bytes, case Type::IntegerTyID: { const Type *ETy = CPV->getType(); if (ETy == Type::getInt8Ty(CPV->getContext())) { - unsigned char c = - (unsigned char)(dyn_cast(CPV))->getZExtValue(); + unsigned char c = (unsigned char)cast(CPV)->getZExtValue(); ptr = &c; aggBuffer->addBytes(ptr, 1, Bytes); } else if (ETy == Type::getInt16Ty(CPV->getContext())) { - short int16 = (short)(dyn_cast(CPV))->getZExtValue(); + short int16 = (short)cast(CPV)->getZExtValue(); ptr = (unsigned char *)&int16; aggBuffer->addBytes(ptr, 2, Bytes); } else if (ETy == Type::getInt32Ty(CPV->getContext())) { diff --git a/lib/Target/NVPTX/NVPTXISelLowering.cpp b/lib/Target/NVPTX/NVPTXISelLowering.cpp index ff74e6e8ff3..8b0665708b9 100644 --- a/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -3893,7 +3893,7 @@ static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, const SDNode *left = N0.getOperand(0).getNode(); const SDNode *right = N0.getOperand(1).getNode(); - if (dyn_cast(left) || dyn_cast(right)) + if (isa(left) || isa(right)) opIsLive = true; if (!opIsLive) diff --git a/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp b/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp index 32646ee1133..6ab0fadf9a3 100644 --- a/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp +++ b/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp @@ -70,8 +70,8 @@ static void convertTransferToLoop( // srcAddr and dstAddr are expected to be pointer types, // so no check is made here. - unsigned srcAS = dyn_cast(srcAddr->getType())->getAddressSpace(); - unsigned dstAS = dyn_cast(dstAddr->getType())->getAddressSpace(); + unsigned srcAS = cast(srcAddr->getType())->getAddressSpace(); + unsigned dstAS = cast(dstAddr->getType())->getAddressSpace(); // Cast pointers to (char *) srcAddr = builder.CreateBitCast(srcAddr, Type::getInt8PtrTy(Context, srcAS)); @@ -108,7 +108,7 @@ static void convertMemSetToLoop(Instruction *splitAt, Value *dstAddr, origBB->getTerminator()->setSuccessor(0, loopBB); IRBuilder<> builder(origBB, origBB->getTerminator()); - unsigned dstAS = dyn_cast(dstAddr->getType())->getAddressSpace(); + unsigned dstAS = cast(dstAddr->getType())->getAddressSpace(); // Cast pointer to the type of value getting stored dstAddr = diff --git a/lib/Target/R600/AMDGPUISelDAGToDAG.cpp b/lib/Target/R600/AMDGPUISelDAGToDAG.cpp index 7341cd97e61..def252a47b2 100644 --- a/lib/Target/R600/AMDGPUISelDAGToDAG.cpp +++ b/lib/Target/R600/AMDGPUISelDAGToDAG.cpp @@ -345,7 +345,7 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) { unsigned NOps = N->getNumOperands(); for (unsigned i = 0; i < NOps; i++) { // XXX: Why is this here? - if (dyn_cast(N->getOperand(i))) { + if (isa(N->getOperand(i))) { IsRegSeq = false; break; } diff --git a/lib/Target/R600/AMDGPUInstructions.td b/lib/Target/R600/AMDGPUInstructions.td index 4d08201d55e..eeb7f3fcde5 100644 --- a/lib/Target/R600/AMDGPUInstructions.td +++ b/lib/Target/R600/AMDGPUInstructions.td @@ -358,7 +358,7 @@ def atomic_load_umax_local : local_binary_atomic_op; def mskor_global : PatFrag<(ops node:$val, node:$ptr), (AMDGPUstore_mskor node:$val, node:$ptr), [{ - return dyn_cast(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS; + return cast(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS; }]>; @@ -389,7 +389,7 @@ def flat_store : PatFrag<(ops node:$val, node:$ptr), def mskor_flat : PatFrag<(ops node:$val, node:$ptr), (AMDGPUstore_mskor node:$val, node:$ptr), [{ - return dyn_cast(N)->getAddressSpace() == AMDGPUAS::FLAT_ADDRESS; + return cast(N)->getAddressSpace() == AMDGPUAS::FLAT_ADDRESS; }]>; class global_binary_atomic_op : PatFrag< diff --git a/lib/Target/R600/R600ISelLowering.cpp b/lib/Target/R600/R600ISelLowering.cpp index a34e2dc8f5c..b6b7067f7e1 100644 --- a/lib/Target/R600/R600ISelLowering.cpp +++ b/lib/Target/R600/R600ISelLowering.cpp @@ -1811,7 +1811,7 @@ SDValue Swz[4], SelectionDAG &DAG) const { BuildVector = CompactSwizzlableVector(DAG, BuildVector, SwizzleRemap); for (unsigned i = 0; i < 4; i++) { - unsigned Idx = dyn_cast(Swz[i])->getZExtValue(); + unsigned Idx = cast(Swz[i])->getZExtValue(); if (SwizzleRemap.find(Idx) != SwizzleRemap.end()) Swz[i] = DAG.getConstant(SwizzleRemap[Idx], MVT::i32); } @@ -1819,7 +1819,7 @@ SDValue Swz[4], SelectionDAG &DAG) const { SwizzleRemap.clear(); BuildVector = ReorganizeVector(DAG, BuildVector, SwizzleRemap); for (unsigned i = 0; i < 4; i++) { - unsigned Idx = dyn_cast(Swz[i])->getZExtValue(); + unsigned Idx = cast(Swz[i])->getZExtValue(); if (SwizzleRemap.find(Idx) != SwizzleRemap.end()) Swz[i] = DAG.getConstant(SwizzleRemap[Idx], MVT::i32); } diff --git a/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp b/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp index 419ec8b3d7e..2fc7b02f673 100644 --- a/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp +++ b/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp @@ -162,7 +162,7 @@ class R600TextureIntrinsicsReplacer : Value *SamplerId = I.getArgOperand(2); unsigned TextureType = - dyn_cast(I.getArgOperand(3))->getZExtValue(); + cast(I.getArgOperand(3))->getZExtValue(); unsigned SrcSelect[4] = { 0, 1, 2, 3 }; unsigned CT[4] = {1, 1, 1, 1}; @@ -186,7 +186,7 @@ class R600TextureIntrinsicsReplacer : Value *SamplerId = I.getArgOperand(5); unsigned TextureType = - dyn_cast(I.getArgOperand(6))->getZExtValue(); + cast(I.getArgOperand(6))->getZExtValue(); unsigned SrcSelect[4] = { 0, 1, 2, 3 }; unsigned CT[4] = {1, 1, 1, 1}; diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 2101724588b..4a031919395 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -11970,7 +11970,7 @@ static SDValue LowerZERO_EXTEND_AVX512(SDValue Op, // Now we have only mask extension assert(InVT.getVectorElementType() == MVT::i1); SDValue Cst = DAG.getTargetConstant(1, ExtVT.getScalarType()); - const Constant *C = (dyn_cast(Cst))->getConstantIntValue(); + const Constant *C = cast(Cst)->getConstantIntValue(); SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy()); unsigned Alignment = cast(CP)->getAlignment(); SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP, @@ -12046,7 +12046,7 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { } SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType()); - const Constant *C = (dyn_cast(Cst))->getConstantIntValue(); + const Constant *C = cast(Cst)->getConstantIntValue(); SDValue CP = DAG.getConstantPool(C, getPointerTy()); unsigned Alignment = cast(CP)->getAlignment(); SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP, @@ -15287,10 +15287,8 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget, } case PREFETCH: { SDValue Hint = Op.getOperand(6); - unsigned HintVal; - if (dyn_cast (Hint) == nullptr || - (HintVal = dyn_cast (Hint)->getZExtValue()) > 1) - llvm_unreachable("Wrong prefetch hint in intrinsic: should be 0 or 1"); + unsigned HintVal = cast(Hint)->getZExtValue(); + assert(HintVal < 2 && "Wrong prefetch hint in intrinsic: should be 0 or 1"); unsigned Opcode = (HintVal ? IntrData->Opc1 : IntrData->Opc0); SDValue Chain = Op.getOperand(0); SDValue Mask = Op.getOperand(2); @@ -24242,7 +24240,7 @@ TargetLowering::ConstraintWeight break; case 'G': case 'C': - if (dyn_cast(CallOperandVal)) { + if (isa(CallOperandVal)) { weight = CW_Constant; } break; diff --git a/lib/Target/X86/X86InstrFragmentsSIMD.td b/lib/Target/X86/X86InstrFragmentsSIMD.td index 0bdabdf30d0..b75a9f4b2d0 100644 --- a/lib/Target/X86/X86InstrFragmentsSIMD.td +++ b/lib/Target/X86/X86InstrFragmentsSIMD.td @@ -631,53 +631,53 @@ def vinsert256_insert : PatFrag<(ops node:$bigvec, node:$smallvec, def masked_load_aligned128 : PatFrag<(ops node:$src1, node:$src2, node:$src3), (masked_load node:$src1, node:$src2, node:$src3), [{ - if (dyn_cast(N)) - return cast(N)->getAlignment() >= 16; + if (auto *Load = dyn_cast(N)) + return Load->getAlignment() >= 16; return false; }]>; def masked_load_aligned256 : PatFrag<(ops node:$src1, node:$src2, node:$src3), (masked_load node:$src1, node:$src2, node:$src3), [{ - if (dyn_cast(N)) - return cast(N)->getAlignment() >= 32; + if (auto *Load = dyn_cast(N)) + return Load->getAlignment() >= 32; return false; }]>; def masked_load_aligned512 : PatFrag<(ops node:$src1, node:$src2, node:$src3), (masked_load node:$src1, node:$src2, node:$src3), [{ - if (dyn_cast(N)) - return cast(N)->getAlignment() >= 64; + if (auto *Load = dyn_cast(N)) + return Load->getAlignment() >= 64; return false; }]>; def masked_load_unaligned : PatFrag<(ops node:$src1, node:$src2, node:$src3), (masked_load node:$src1, node:$src2, node:$src3), [{ - return (dyn_cast(N) != 0); + return isa(N); }]>; def masked_store_aligned128 : PatFrag<(ops node:$src1, node:$src2, node:$src3), (masked_store node:$src1, node:$src2, node:$src3), [{ - if (dyn_cast(N)) - return cast(N)->getAlignment() >= 16; + if (auto *Store = dyn_cast(N)) + return Store->getAlignment() >= 16; return false; }]>; def masked_store_aligned256 : PatFrag<(ops node:$src1, node:$src2, node:$src3), (masked_store node:$src1, node:$src2, node:$src3), [{ - if (dyn_cast(N)) - return cast(N)->getAlignment() >= 32; + if (auto *Store = dyn_cast(N)) + return Store->getAlignment() >= 32; return false; }]>; def masked_store_aligned512 : PatFrag<(ops node:$src1, node:$src2, node:$src3), (masked_store node:$src1, node:$src2, node:$src3), [{ - if (dyn_cast(N)) - return cast(N)->getAlignment() >= 64; + if (auto *Store = dyn_cast(N)) + return Store->getAlignment() >= 64; return false; }]>; def masked_store_unaligned : PatFrag<(ops node:$src1, node:$src2, node:$src3), (masked_store node:$src1, node:$src2, node:$src3), [{ - return (dyn_cast(N) != 0); + return isa(N); }]>; diff --git a/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/lib/Transforms/Instrumentation/AddressSanitizer.cpp index b506eb03fb5..8d6d3ce3e61 100644 --- a/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -1053,7 +1053,7 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns, // path is rarely taken. This seems to be the case for SPEC benchmarks. TerminatorInst *CheckTerm = SplitBlockAndInsertIfThen( Cmp, InsertBefore, false, MDBuilder(*C).createBranchWeights(1, 100000)); - assert(dyn_cast(CheckTerm)->isUnconditional()); + assert(cast(CheckTerm)->isUnconditional()); BasicBlock *NextBB = CheckTerm->getSuccessor(0); IRB.SetInsertPoint(CheckTerm); Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize); diff --git a/lib/Transforms/Instrumentation/SanitizerCoverage.cpp b/lib/Transforms/Instrumentation/SanitizerCoverage.cpp index 5e6dc89132d..662513d508d 100644 --- a/lib/Transforms/Instrumentation/SanitizerCoverage.cpp +++ b/lib/Transforms/Instrumentation/SanitizerCoverage.cpp @@ -308,7 +308,7 @@ void SanitizerCoverageModule::InjectCoverageForIndirectCalls( IRBuilder<> IRB(I); CallSite CS(I); Value *Callee = CS.getCalledValue(); - if (dyn_cast(Callee)) continue; + if (isa(Callee)) continue; GlobalVariable *CalleeCache = new GlobalVariable( *F.getParent(), Ty, false, GlobalValue::PrivateLinkage, Constant::getNullValue(Ty), "__sancov_gen_callee_cache"); diff --git a/lib/Transforms/Vectorize/SLPVectorizer.cpp b/lib/Transforms/Vectorize/SLPVectorizer.cpp index 8fc4cc1466a..5eae4e278c5 100644 --- a/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -1183,7 +1183,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth) { case Instruction::ICmp: case Instruction::FCmp: { // Check that all of the compares have the same predicate. - CmpInst::Predicate P0 = dyn_cast(VL0)->getPredicate(); + CmpInst::Predicate P0 = cast(VL0)->getPredicate(); Type *ComparedTy = cast(VL[0])->getOperand(0)->getType(); for (unsigned i = 1, e = VL.size(); i < e; ++i) { CmpInst *Cmp = cast(VL[i]); @@ -2202,7 +2202,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) { if (Value *V = alreadyVectorized(E->Scalars)) return V; - CmpInst::Predicate P0 = dyn_cast(VL0)->getPredicate(); + CmpInst::Predicate P0 = cast(VL0)->getPredicate(); Value *V; if (Opcode == Instruction::FCmp) V = Builder.CreateFCmp(P0, L, R); diff --git a/utils/yaml-bench/YAMLBench.cpp b/utils/yaml-bench/YAMLBench.cpp index 8bd1ea17ea8..872f586ef7e 100644 --- a/utils/yaml-bench/YAMLBench.cpp +++ b/utils/yaml-bench/YAMLBench.cpp @@ -117,7 +117,7 @@ static void dumpNode( yaml::Node *n outs() << indent(Indent) << "}"; } else if (yaml::AliasNode *an = dyn_cast(n)){ outs() << "*" << an->getName(); - } else if (dyn_cast(n)) { + } else if (isa(n)) { outs() << prettyTag(n) << " null"; } } -- 2.34.1