#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Analysis/VectorUtils.h"
#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
}
// Otherwise, if the instruction is in the entry block, and is not an invoke,
- // then it obviously dominates all phi nodes.
+ // and is not a catchpad, then it obviously dominates all phi nodes.
if (I->getParent() == &I->getParent()->getParent()->getEntryBlock() &&
- !isa<InvokeInst>(I))
+ !isa<InvokeInst>(I) && !isa<CatchPadInst>(I))
return true;
return false;
// Evaluate the BinOp on the incoming phi values.
Value *CommonValue = nullptr;
- for (unsigned i = 0, e = PI->getNumIncomingValues(); i != e; ++i) {
- Value *Incoming = PI->getIncomingValue(i);
+ for (Value *Incoming : PI->incoming_values()) {
// If the incoming value is the phi node itself, it can safely be skipped.
if (Incoming == PI) continue;
Value *V = PI == LHS ?
// Evaluate the BinOp on the incoming phi values.
Value *CommonValue = nullptr;
- for (unsigned i = 0, e = PI->getNumIncomingValues(); i != e; ++i) {
- Value *Incoming = PI->getIncomingValue(i);
+ for (Value *Incoming : PI->incoming_values()) {
// If the incoming value is the phi node itself, it can safely be skipped.
if (Incoming == PI) continue;
Value *V = SimplifyCmpInst(Pred, Incoming, RHS, Q, MaxRecurse);
return X;
}
- // fsub nnan ninf x, x ==> 0.0
- if (FMF.noNaNs() && FMF.noInfs() && Op0 == Op1)
+ // fsub nnan x, x ==> 0.0
+ if (FMF.noNaNs() && Op0 == Op1)
return Constant::getNullValue(Op0->getType());
return nullptr;
if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZero()))
return Op0;
+ if (FMF.noNaNs()) {
+ // X / X -> 1.0 is legal when NaNs are ignored.
+ if (Op0 == Op1)
+ return ConstantFP::get(Op0->getType(), 1.0);
+
+ // -X / X -> -1.0 and
+ // X / -X -> -1.0 are legal when NaNs are ignored.
+ // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored.
+ if ((BinaryOperator::isFNeg(Op0, /*IgnoreZeroSign=*/true) &&
+ BinaryOperator::getFNegArgument(Op0) == Op1) ||
+ (BinaryOperator::isFNeg(Op1, /*IgnoreZeroSign=*/true) &&
+ BinaryOperator::getFNegArgument(Op1) == Op0))
+ return ConstantFP::get(Op0->getType(), -1.0);
+ }
+
return nullptr;
}
} else if (match(LHS, m_And(m_Value(), m_ConstantInt(CI2)))) {
// 'and x, CI2' produces [0, CI2].
Upper = CI2->getValue() + 1;
+ } else if (match(LHS, m_NUWAdd(m_Value(), m_ConstantInt(CI2)))) {
+ // 'add nuw x, CI2' produces [CI2, UINT_MAX].
+ Lower = CI2->getValue();
}
if (Lower != Upper) {
ConstantRange LHS_CR = ConstantRange(Lower, Upper);
// what constant folding can make out of it.
Constant *Null = Constant::getNullValue(GLHS->getPointerOperandType());
SmallVector<Value *, 4> IndicesLHS(GLHS->idx_begin(), GLHS->idx_end());
- Constant *NewLHS = ConstantExpr::getGetElementPtr(Null, IndicesLHS);
+ Constant *NewLHS = ConstantExpr::getGetElementPtr(
+ GLHS->getSourceElementType(), Null, IndicesLHS);
SmallVector<Value *, 4> IndicesRHS(GRHS->idx_begin(), GRHS->idx_end());
- Constant *NewRHS = ConstantExpr::getGetElementPtr(Null, IndicesRHS);
+ Constant *NewRHS = ConstantExpr::getGetElementPtr(
+ GLHS->getSourceElementType(), Null, IndicesRHS);
return ConstantExpr::getICmp(Pred, NewLHS, NewRHS);
}
}
/// SimplifyFCmpInst - Given operands for an FCmpInst, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const Query &Q, unsigned MaxRecurse) {
+ FastMathFlags FMF, const Query &Q,
+ unsigned MaxRecurse) {
CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!");
if (Pred == FCmpInst::FCMP_TRUE)
return ConstantInt::get(GetCompareTy(LHS), 1);
+ // UNO/ORD predicates can be trivially folded if NaNs are ignored.
+ if (FMF.noNaNs()) {
+ if (Pred == FCmpInst::FCMP_UNO)
+ return ConstantInt::get(GetCompareTy(LHS), 0);
+ if (Pred == FCmpInst::FCMP_ORD)
+ return ConstantInt::get(GetCompareTy(LHS), 1);
+ }
+
// fcmp pred x, undef and fcmp pred undef, x
// fold to true if unordered, false if ordered
if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS)) {
}
Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const DataLayout &DL,
+ FastMathFlags FMF, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
- return ::SimplifyFCmpInst(Predicate, LHS, RHS, Query(DL, TLI, DT, AC, CxtI),
- RecursionLimit);
+ return ::SimplifyFCmpInst(Predicate, LHS, RHS, FMF,
+ Query(DL, TLI, DT, AC, CxtI), RecursionLimit);
+}
+
+/// SimplifyWithOpReplaced - See if V simplifies when its operand Op is
+/// replaced with RepOp.
+static const Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
+ const Query &Q,
+ unsigned MaxRecurse) {
+ // Trivial replacement.
+ if (V == Op)
+ return RepOp;
+
+ auto *I = dyn_cast<Instruction>(V);
+ if (!I)
+ return nullptr;
+
+ // If this is a binary operator, try to simplify it with the replaced op.
+ if (auto *B = dyn_cast<BinaryOperator>(I)) {
+ // Consider:
+ // %cmp = icmp eq i32 %x, 2147483647
+ // %add = add nsw i32 %x, 1
+ // %sel = select i1 %cmp, i32 -2147483648, i32 %add
+ //
+ // We can't replace %sel with %add unless we strip away the flags.
+ if (isa<OverflowingBinaryOperator>(B))
+ if (B->hasNoSignedWrap() || B->hasNoUnsignedWrap())
+ return nullptr;
+ if (isa<PossiblyExactOperator>(B))
+ if (B->isExact())
+ return nullptr;
+
+ if (MaxRecurse) {
+ if (B->getOperand(0) == Op)
+ return SimplifyBinOp(B->getOpcode(), RepOp, B->getOperand(1), Q,
+ MaxRecurse - 1);
+ if (B->getOperand(1) == Op)
+ return SimplifyBinOp(B->getOpcode(), B->getOperand(0), RepOp, Q,
+ MaxRecurse - 1);
+ }
+ }
+
+ // Same for CmpInsts.
+ if (CmpInst *C = dyn_cast<CmpInst>(I)) {
+ if (MaxRecurse) {
+ if (C->getOperand(0) == Op)
+ return SimplifyCmpInst(C->getPredicate(), RepOp, C->getOperand(1), Q,
+ MaxRecurse - 1);
+ if (C->getOperand(1) == Op)
+ return SimplifyCmpInst(C->getPredicate(), C->getOperand(0), RepOp, Q,
+ MaxRecurse - 1);
+ }
+ }
+
+ // TODO: We could hand off more cases to instsimplify here.
+
+ // If all operands are constant after substituting Op for RepOp then we can
+ // constant fold the instruction.
+ if (Constant *CRepOp = dyn_cast<Constant>(RepOp)) {
+ // Build a list of all constant operands.
+ SmallVector<Constant *, 8> ConstOps;
+ for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
+ if (I->getOperand(i) == Op)
+ ConstOps.push_back(CRepOp);
+ else if (Constant *COp = dyn_cast<Constant>(I->getOperand(i)))
+ ConstOps.push_back(COp);
+ else
+ break;
+ }
+
+ // All operands were constants, fold it.
+ if (ConstOps.size() == I->getNumOperands()) {
+ if (CmpInst *C = dyn_cast<CmpInst>(I))
+ return ConstantFoldCompareInstOperands(C->getPredicate(), ConstOps[0],
+ ConstOps[1], Q.DL, Q.TLI);
+
+ if (LoadInst *LI = dyn_cast<LoadInst>(I))
+ if (!LI->isVolatile())
+ return ConstantFoldLoadFromConstPtr(ConstOps[0], Q.DL);
+
+ return ConstantFoldInstOperands(I->getOpcode(), I->getType(), ConstOps,
+ Q.DL, Q.TLI);
+ }
+ }
+
+ return nullptr;
}
/// SimplifySelectInst - Given operands for a SelectInst, see if we can fold
if (isa<UndefValue>(FalseVal)) // select C, X, undef -> X
return TrueVal;
- const auto *ICI = dyn_cast<ICmpInst>(CondVal);
- unsigned BitWidth = TrueVal->getType()->getScalarSizeInBits();
- if (ICI && BitWidth) {
+ if (const auto *ICI = dyn_cast<ICmpInst>(CondVal)) {
+ unsigned BitWidth = Q.DL.getTypeSizeInBits(TrueVal->getType());
ICmpInst::Predicate Pred = ICI->getPredicate();
+ Value *CmpLHS = ICI->getOperand(0);
+ Value *CmpRHS = ICI->getOperand(1);
APInt MinSignedValue = APInt::getSignBit(BitWidth);
Value *X;
const APInt *Y;
bool TrueWhenUnset;
bool IsBitTest = false;
if (ICmpInst::isEquality(Pred) &&
- match(ICI->getOperand(0), m_And(m_Value(X), m_APInt(Y))) &&
- match(ICI->getOperand(1), m_Zero())) {
+ match(CmpLHS, m_And(m_Value(X), m_APInt(Y))) &&
+ match(CmpRHS, m_Zero())) {
IsBitTest = true;
TrueWhenUnset = Pred == ICmpInst::ICMP_EQ;
- } else if (Pred == ICmpInst::ICMP_SLT &&
- match(ICI->getOperand(1), m_Zero())) {
- X = ICI->getOperand(0);
+ } else if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, m_Zero())) {
+ X = CmpLHS;
Y = &MinSignedValue;
IsBitTest = true;
TrueWhenUnset = false;
- } else if (Pred == ICmpInst::ICMP_SGT &&
- match(ICI->getOperand(1), m_AllOnes())) {
- X = ICI->getOperand(0);
+ } else if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, m_AllOnes())) {
+ X = CmpLHS;
Y = &MinSignedValue;
IsBitTest = true;
TrueWhenUnset = true;
return TrueWhenUnset ? TrueVal : FalseVal;
}
}
+ if (ICI->hasOneUse()) {
+ const APInt *C;
+ if (match(CmpRHS, m_APInt(C))) {
+ // X < MIN ? T : F --> F
+ if (Pred == ICmpInst::ICMP_SLT && C->isMinSignedValue())
+ return FalseVal;
+ // X < MIN ? T : F --> F
+ if (Pred == ICmpInst::ICMP_ULT && C->isMinValue())
+ return FalseVal;
+ // X > MAX ? T : F --> F
+ if (Pred == ICmpInst::ICMP_SGT && C->isMaxSignedValue())
+ return FalseVal;
+ // X > MAX ? T : F --> F
+ if (Pred == ICmpInst::ICMP_UGT && C->isMaxValue())
+ return FalseVal;
+ }
+ }
+
+ // If we have an equality comparison then we know the value in one of the
+ // arms of the select. See if substituting this value into the arm and
+ // simplifying the result yields the same value as the other arm.
+ if (Pred == ICmpInst::ICMP_EQ) {
+ if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q, MaxRecurse) ==
+ TrueVal ||
+ SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q, MaxRecurse) ==
+ TrueVal)
+ return FalseVal;
+ if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q, MaxRecurse) ==
+ FalseVal ||
+ SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q, MaxRecurse) ==
+ FalseVal)
+ return FalseVal;
+ } else if (Pred == ICmpInst::ICMP_NE) {
+ if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q, MaxRecurse) ==
+ FalseVal ||
+ SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q, MaxRecurse) ==
+ FalseVal)
+ return TrueVal;
+ if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q, MaxRecurse) ==
+ TrueVal ||
+ SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q, MaxRecurse) ==
+ TrueVal)
+ return TrueVal;
+ }
}
return nullptr;
/// SimplifyGEPInst - Given operands for an GetElementPtrInst, see if we can
/// fold the result. If not, this returns null.
-static Value *SimplifyGEPInst(ArrayRef<Value *> Ops, const Query &Q, unsigned) {
+static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
+ const Query &Q, unsigned) {
// The type of the GEP pointer operand.
- PointerType *PtrTy = cast<PointerType>(Ops[0]->getType()->getScalarType());
- unsigned AS = PtrTy->getAddressSpace();
+ unsigned AS =
+ cast<PointerType>(Ops[0]->getType()->getScalarType())->getAddressSpace();
// getelementptr P -> P.
if (Ops.size() == 1)
return Ops[0];
// Compute the (pointer) type returned by the GEP instruction.
- Type *LastType = GetElementPtrInst::getIndexedType(PtrTy, Ops.slice(1));
+ Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Ops.slice(1));
Type *GEPTy = PointerType::get(LastType, AS);
if (VectorType *VT = dyn_cast<VectorType>(Ops[0]->getType()))
GEPTy = VectorType::get(GEPTy, VT->getNumElements());
if (match(Ops[1], m_Zero()))
return Ops[0];
- Type *Ty = PtrTy->getElementType();
+ Type *Ty = SrcTy;
if (Ty->isSized()) {
Value *P;
uint64_t C;
if (!isa<Constant>(Ops[i]))
return nullptr;
- return ConstantExpr::getGetElementPtr(cast<Constant>(Ops[0]), Ops.slice(1));
+ return ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ops[0]),
+ Ops.slice(1));
}
Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
- return ::SimplifyGEPInst(Ops, Query(DL, TLI, DT, AC, CxtI), RecursionLimit);
+ return ::SimplifyGEPInst(
+ cast<PointerType>(Ops[0]->getType()->getScalarType())->getElementType(),
+ Ops, Query(DL, TLI, DT, AC, CxtI), RecursionLimit);
}
/// SimplifyInsertValueInst - Given operands for an InsertValueInst, see if we
RecursionLimit);
}
+/// SimplifyExtractValueInst - Given operands for an ExtractValueInst, see if we
+/// can fold the result. If not, this returns null.
+static Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
+ const Query &, unsigned) {
+ if (auto *CAgg = dyn_cast<Constant>(Agg))
+ return ConstantFoldExtractValueInstruction(CAgg, Idxs);
+
+ // extractvalue x, (insertvalue y, elt, n), n -> elt
+ unsigned NumIdxs = Idxs.size();
+ for (auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI != nullptr;
+ IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) {
+ ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices();
+ unsigned NumInsertValueIdxs = InsertValueIdxs.size();
+ unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs);
+ if (InsertValueIdxs.slice(0, NumCommonIdxs) ==
+ Idxs.slice(0, NumCommonIdxs)) {
+ if (NumIdxs == NumInsertValueIdxs)
+ return IVI->getInsertedValueOperand();
+ break;
+ }
+ }
+
+ return nullptr;
+}
+
+Value *llvm::SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
+ const DataLayout &DL,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT,
+ AssumptionCache *AC,
+ const Instruction *CxtI) {
+ return ::SimplifyExtractValueInst(Agg, Idxs, Query(DL, TLI, DT, AC, CxtI),
+ RecursionLimit);
+}
+
+/// SimplifyExtractElementInst - Given operands for an ExtractElementInst, see if we
+/// can fold the result. If not, this returns null.
+static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx, const Query &,
+ unsigned) {
+ if (auto *CVec = dyn_cast<Constant>(Vec)) {
+ if (auto *CIdx = dyn_cast<Constant>(Idx))
+ return ConstantFoldExtractElementInstruction(CVec, CIdx);
+
+ // The index is not relevant if our vector is a splat.
+ if (auto *Splat = CVec->getSplatValue())
+ return Splat;
+
+ if (isa<UndefValue>(Vec))
+ return UndefValue::get(Vec->getType()->getVectorElementType());
+ }
+
+ // If extracting a specified index from the vector, see if we can recursively
+ // find a previously computed scalar that was inserted into the vector.
+ if (auto *IdxC = dyn_cast<ConstantInt>(Idx))
+ if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue()))
+ return Elt;
+
+ return nullptr;
+}
+
+Value *llvm::SimplifyExtractElementInst(
+ Value *Vec, Value *Idx, const DataLayout &DL, const TargetLibraryInfo *TLI,
+ const DominatorTree *DT, AssumptionCache *AC, const Instruction *CxtI) {
+ return ::SimplifyExtractElementInst(Vec, Idx, Query(DL, TLI, DT, AC, CxtI),
+ RecursionLimit);
+}
+
/// SimplifyPHINode - See if we can fold the given phi. If not, returns null.
static Value *SimplifyPHINode(PHINode *PN, const Query &Q) {
// If all of the PHI's incoming values are the same then replace the PHI node
// with the common value.
Value *CommonValue = nullptr;
bool HasUndefInput = false;
- for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
- Value *Incoming = PN->getIncomingValue(i);
+ for (Value *Incoming : PN->incoming_values()) {
// If the incoming value is the phi node itself, it can safely be skipped.
if (Incoming == PN) continue;
if (isa<UndefValue>(Incoming)) {
const Query &Q, unsigned MaxRecurse) {
if (CmpInst::isIntPredicate((CmpInst::Predicate)Predicate))
return SimplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse);
- return SimplifyFCmpInst(Predicate, LHS, RHS, Q, MaxRecurse);
+ return SimplifyFCmpInst(Predicate, LHS, RHS, FastMathFlags(), Q, MaxRecurse);
}
Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
}
template <typename IterTy>
-static Value *SimplifyIntrinsic(Intrinsic::ID IID, IterTy ArgBegin, IterTy ArgEnd,
+static Value *SimplifyIntrinsic(Function *F, IterTy ArgBegin, IterTy ArgEnd,
const Query &Q, unsigned MaxRecurse) {
+ Intrinsic::ID IID = F->getIntrinsicID();
+ unsigned NumOperands = std::distance(ArgBegin, ArgEnd);
+ Type *ReturnType = F->getReturnType();
+
+ // Binary Ops
+ if (NumOperands == 2) {
+ Value *LHS = *ArgBegin;
+ Value *RHS = *(ArgBegin + 1);
+ if (IID == Intrinsic::usub_with_overflow ||
+ IID == Intrinsic::ssub_with_overflow) {
+ // X - X -> { 0, false }
+ if (LHS == RHS)
+ return Constant::getNullValue(ReturnType);
+
+ // X - undef -> undef
+ // undef - X -> undef
+ if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS))
+ return UndefValue::get(ReturnType);
+ }
+
+ if (IID == Intrinsic::uadd_with_overflow ||
+ IID == Intrinsic::sadd_with_overflow) {
+ // X + undef -> undef
+ if (isa<UndefValue>(RHS))
+ return UndefValue::get(ReturnType);
+ }
+
+ if (IID == Intrinsic::umul_with_overflow ||
+ IID == Intrinsic::smul_with_overflow) {
+ // X * 0 -> { 0, false }
+ if (match(RHS, m_Zero()))
+ return Constant::getNullValue(ReturnType);
+
+ // X * undef -> { 0, false }
+ if (match(RHS, m_Undef()))
+ return Constant::getNullValue(ReturnType);
+ }
+ }
+
// Perform idempotent optimizations
if (!IsIdempotent(IID))
return nullptr;
// Unary Ops
- if (std::distance(ArgBegin, ArgEnd) == 1)
+ if (NumOperands == 1)
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(*ArgBegin))
if (II->getIntrinsicID() == IID)
return II;
if (!F)
return nullptr;
- if (unsigned IID = F->getIntrinsicID())
- if (Value *Ret =
- SimplifyIntrinsic((Intrinsic::ID) IID, ArgBegin, ArgEnd, Q, MaxRecurse))
+ if (F->isIntrinsic())
+ if (Value *Ret = SimplifyIntrinsic(F, ArgBegin, ArgEnd, Q, MaxRecurse))
return Ret;
if (!canConstantFoldCallTo(F))
I->getOperand(1), DL, TLI, DT, AC, I);
break;
case Instruction::FCmp:
- Result =
- SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(), I->getOperand(0),
- I->getOperand(1), DL, TLI, DT, AC, I);
+ Result = SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(),
+ I->getOperand(0), I->getOperand(1),
+ I->getFastMathFlags(), DL, TLI, DT, AC, I);
break;
case Instruction::Select:
Result = SimplifySelectInst(I->getOperand(0), I->getOperand(1),
IV->getIndices(), DL, TLI, DT, AC, I);
break;
}
+ case Instruction::ExtractValue: {
+ auto *EVI = cast<ExtractValueInst>(I);
+ Result = SimplifyExtractValueInst(EVI->getAggregateOperand(),
+ EVI->getIndices(), DL, TLI, DT, AC, I);
+ break;
+ }
+ case Instruction::ExtractElement: {
+ auto *EEI = cast<ExtractElementInst>(I);
+ Result = SimplifyExtractElementInst(
+ EEI->getVectorOperand(), EEI->getIndexOperand(), DL, TLI, DT, AC, I);
+ break;
+ }
case Instruction::PHI:
Result = SimplifyPHINode(cast<PHINode>(I), Query(DL, TLI, DT, AC, I));
break;