1 //===- InstCombineInternal.h - InstCombine pass internals -------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
11 /// This file provides internal interfaces used to implement the InstCombine.
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H
16 #define LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/TargetFolder.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/IR/IRBuilder.h"
23 #include "llvm/IR/InstVisitor.h"
24 #include "llvm/IR/PatternMatch.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Support/KnownBits.h"
27 #include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
28 #include "llvm/Transforms/InstCombine/InstCombiner.h"
29 #include "llvm/Transforms/Utils/Local.h"
32 #define DEBUG_TYPE "instcombine"
34 using namespace llvm::PatternMatch;
36 // As a default, let's assume that we want to be aggressive,
37 // and attempt to traverse with no limits in attempt to sink negation.
38 static constexpr unsigned NegatorDefaultMaxDepth = ~0U;
40 // Let's guesstimate that most often we will end up visiting/producing
41 // fairly small number of new instructions.
42 static constexpr unsigned NegatorMaxNodesSSO = 16;
48 class AssumptionCache;
49 class BlockFrequencyInfo;
55 class OptimizationRemarkEmitter;
56 class ProfileSummaryInfo;
57 class TargetLibraryInfo;
60 class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final
61 : public InstCombiner,
62 public InstVisitor<InstCombinerImpl, Instruction *> {
64 InstCombinerImpl(InstCombineWorklist &Worklist, BuilderTy &Builder,
65 bool MinimizeSize, AAResults *AA, AssumptionCache &AC,
66 TargetLibraryInfo &TLI, TargetTransformInfo &TTI,
67 DominatorTree &DT, OptimizationRemarkEmitter &ORE,
68 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
69 const DataLayout &DL, LoopInfo *LI)
70 : InstCombiner(Worklist, Builder, MinimizeSize, AA, AC, TLI, TTI, DT, ORE,
73 virtual ~InstCombinerImpl() {}
75 /// Run the combiner over the entire worklist until it is empty.
77 /// \returns true if the IR is changed.
80 // Visitation implementation - Implement instruction combining for different
81 // instruction types. The semantics are as follows:
83 // null - No change was made
84 // I - Change was made, I is still valid, I may be dead though
85 // otherwise - Change was made, replace I with returned instruction
87 Instruction *visitFNeg(UnaryOperator &I);
88 Instruction *visitAdd(BinaryOperator &I);
89 Instruction *visitFAdd(BinaryOperator &I);
90 Value *OptimizePointerDifference(
91 Value *LHS, Value *RHS, Type *Ty, bool isNUW);
92 Instruction *visitSub(BinaryOperator &I);
93 Instruction *visitFSub(BinaryOperator &I);
94 Instruction *visitMul(BinaryOperator &I);
95 Instruction *visitFMul(BinaryOperator &I);
96 Instruction *visitURem(BinaryOperator &I);
97 Instruction *visitSRem(BinaryOperator &I);
98 Instruction *visitFRem(BinaryOperator &I);
99 bool simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I);
100 Instruction *commonIRemTransforms(BinaryOperator &I);
101 Instruction *commonIDivTransforms(BinaryOperator &I);
102 Instruction *visitUDiv(BinaryOperator &I);
103 Instruction *visitSDiv(BinaryOperator &I);
104 Instruction *visitFDiv(BinaryOperator &I);
105 Value *simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1, bool Inverted);
106 Instruction *visitAnd(BinaryOperator &I);
107 Instruction *visitOr(BinaryOperator &I);
108 bool sinkNotIntoOtherHandOfAndOrOr(BinaryOperator &I);
109 Instruction *visitXor(BinaryOperator &I);
110 Instruction *visitShl(BinaryOperator &I);
111 Value *reassociateShiftAmtsOfTwoSameDirectionShifts(
112 BinaryOperator *Sh0, const SimplifyQuery &SQ,
113 bool AnalyzeForSignBitExtraction = false);
114 Instruction *canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(
116 Instruction *foldVariableSignZeroExtensionOfVariableHighBitExtract(
117 BinaryOperator &OldAShr);
118 Instruction *visitAShr(BinaryOperator &I);
119 Instruction *visitLShr(BinaryOperator &I);
120 Instruction *commonShiftTransforms(BinaryOperator &I);
121 Instruction *visitFCmpInst(FCmpInst &I);
122 CmpInst *canonicalizeICmpPredicate(CmpInst &I);
123 Instruction *visitICmpInst(ICmpInst &I);
124 Instruction *FoldShiftByConstant(Value *Op0, Constant *Op1,
126 Instruction *commonCastTransforms(CastInst &CI);
127 Instruction *commonPointerCastTransforms(CastInst &CI);
128 Instruction *visitTrunc(TruncInst &CI);
129 Instruction *visitZExt(ZExtInst &CI);
130 Instruction *visitSExt(SExtInst &CI);
131 Instruction *visitFPTrunc(FPTruncInst &CI);
132 Instruction *visitFPExt(CastInst &CI);
133 Instruction *visitFPToUI(FPToUIInst &FI);
134 Instruction *visitFPToSI(FPToSIInst &FI);
135 Instruction *visitUIToFP(CastInst &CI);
136 Instruction *visitSIToFP(CastInst &CI);
137 Instruction *visitPtrToInt(PtrToIntInst &CI);
138 Instruction *visitIntToPtr(IntToPtrInst &CI);
139 Instruction *visitBitCast(BitCastInst &CI);
140 Instruction *visitAddrSpaceCast(AddrSpaceCastInst &CI);
141 Instruction *foldItoFPtoI(CastInst &FI);
142 Instruction *visitSelectInst(SelectInst &SI);
143 Instruction *visitCallInst(CallInst &CI);
144 Instruction *visitInvokeInst(InvokeInst &II);
145 Instruction *visitCallBrInst(CallBrInst &CBI);
147 Instruction *SliceUpIllegalIntegerPHI(PHINode &PN);
148 Instruction *visitPHINode(PHINode &PN);
149 Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP);
150 Instruction *visitAllocaInst(AllocaInst &AI);
151 Instruction *visitAllocSite(Instruction &FI);
152 Instruction *visitFree(CallInst &FI);
153 Instruction *visitLoadInst(LoadInst &LI);
154 Instruction *visitStoreInst(StoreInst &SI);
155 Instruction *visitAtomicRMWInst(AtomicRMWInst &SI);
156 Instruction *visitUnconditionalBranchInst(BranchInst &BI);
157 Instruction *visitBranchInst(BranchInst &BI);
158 Instruction *visitFenceInst(FenceInst &FI);
159 Instruction *visitSwitchInst(SwitchInst &SI);
160 Instruction *visitReturnInst(ReturnInst &RI);
161 Instruction *visitUnreachableInst(UnreachableInst &I);
163 foldAggregateConstructionIntoAggregateReuse(InsertValueInst &OrigIVI);
164 Instruction *visitInsertValueInst(InsertValueInst &IV);
165 Instruction *visitInsertElementInst(InsertElementInst &IE);
166 Instruction *visitExtractElementInst(ExtractElementInst &EI);
167 Instruction *visitShuffleVectorInst(ShuffleVectorInst &SVI);
168 Instruction *visitExtractValueInst(ExtractValueInst &EV);
169 Instruction *visitLandingPadInst(LandingPadInst &LI);
170 Instruction *visitVAEndInst(VAEndInst &I);
171 Value *pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI);
172 bool freezeDominatedUses(FreezeInst &FI);
173 Instruction *visitFreeze(FreezeInst &I);
175 /// Specify what to return for unhandled instructions.
176 Instruction *visitInstruction(Instruction &I) { return nullptr; }
178 /// True when DB dominates all uses of DI except UI.
179 /// UI must be in the same block as DI.
180 /// The routine checks that the DI parent and DB are different.
181 bool dominatesAllUses(const Instruction *DI, const Instruction *UI,
182 const BasicBlock *DB) const;
184 /// Try to replace select with select operand SIOpd in SI-ICmp sequence.
185 bool replacedSelectWithOperand(SelectInst *SI, const ICmpInst *Icmp,
186 const unsigned SIOpd);
188 LoadInst *combineLoadToNewType(LoadInst &LI, Type *NewTy,
189 const Twine &Suffix = "");
192 void annotateAnyAllocSite(CallBase &Call, const TargetLibraryInfo *TLI);
193 bool shouldChangeType(unsigned FromBitWidth, unsigned ToBitWidth) const;
194 bool shouldChangeType(Type *From, Type *To) const;
195 Value *dyn_castNegVal(Value *V) const;
196 Type *FindElementAtOffset(PointerType *PtrTy, int64_t Offset,
197 SmallVectorImpl<Value *> &NewIndices);
199 /// Classify whether a cast is worth optimizing.
201 /// This is a helper to decide whether the simplification of
202 /// logic(cast(A), cast(B)) to cast(logic(A, B)) should be performed.
204 /// \param CI The cast we are interested in.
206 /// \return true if this cast actually results in any code being generated and
207 /// if it cannot already be eliminated by some other transformation.
208 bool shouldOptimizeCast(CastInst *CI);
210 /// Try to optimize a sequence of instructions checking if an operation
211 /// on LHS and RHS overflows.
213 /// If this overflow check is done via one of the overflow check intrinsics,
214 /// then CtxI has to be the call instruction calling that intrinsic. If this
215 /// overflow check is done by arithmetic followed by a compare, then CtxI has
216 /// to be the arithmetic instruction.
218 /// If a simplification is possible, stores the simplified result of the
219 /// operation in OperationResult and result of the overflow check in
220 /// OverflowResult, and return true. If no simplification is possible,
222 bool OptimizeOverflowCheck(Instruction::BinaryOps BinaryOp, bool IsSigned,
223 Value *LHS, Value *RHS,
224 Instruction &CtxI, Value *&OperationResult,
225 Constant *&OverflowResult);
227 Instruction *visitCallBase(CallBase &Call);
228 Instruction *tryOptimizeCall(CallInst *CI);
229 bool transformConstExprCastCall(CallBase &Call);
230 Instruction *transformCallThroughTrampoline(CallBase &Call,
231 IntrinsicInst &Tramp);
233 Value *simplifyMaskedLoad(IntrinsicInst &II);
234 Instruction *simplifyMaskedStore(IntrinsicInst &II);
235 Instruction *simplifyMaskedGather(IntrinsicInst &II);
236 Instruction *simplifyMaskedScatter(IntrinsicInst &II);
238 /// Transform (zext icmp) to bitwise / integer operations in order to
241 /// \param ICI The icmp of the (zext icmp) pair we are interested in.
242 /// \parem CI The zext of the (zext icmp) pair we are interested in.
243 /// \param DoTransform Pass false to just test whether the given (zext icmp)
244 /// would be transformed. Pass true to actually perform the transformation.
246 /// \return null if the transformation cannot be performed. If the
247 /// transformation can be performed the new instruction that replaces the
248 /// (zext icmp) pair will be returned (if \p DoTransform is false the
249 /// unmodified \p ICI will be returned in this case).
250 Instruction *transformZExtICmp(ICmpInst *ICI, ZExtInst &CI,
251 bool DoTransform = true);
253 Instruction *transformSExtICmp(ICmpInst *ICI, Instruction &CI);
255 bool willNotOverflowSignedAdd(const Value *LHS, const Value *RHS,
256 const Instruction &CxtI) const {
257 return computeOverflowForSignedAdd(LHS, RHS, &CxtI) ==
258 OverflowResult::NeverOverflows;
261 bool willNotOverflowUnsignedAdd(const Value *LHS, const Value *RHS,
262 const Instruction &CxtI) const {
263 return computeOverflowForUnsignedAdd(LHS, RHS, &CxtI) ==
264 OverflowResult::NeverOverflows;
267 bool willNotOverflowAdd(const Value *LHS, const Value *RHS,
268 const Instruction &CxtI, bool IsSigned) const {
269 return IsSigned ? willNotOverflowSignedAdd(LHS, RHS, CxtI)
270 : willNotOverflowUnsignedAdd(LHS, RHS, CxtI);
273 bool willNotOverflowSignedSub(const Value *LHS, const Value *RHS,
274 const Instruction &CxtI) const {
275 return computeOverflowForSignedSub(LHS, RHS, &CxtI) ==
276 OverflowResult::NeverOverflows;
279 bool willNotOverflowUnsignedSub(const Value *LHS, const Value *RHS,
280 const Instruction &CxtI) const {
281 return computeOverflowForUnsignedSub(LHS, RHS, &CxtI) ==
282 OverflowResult::NeverOverflows;
285 bool willNotOverflowSub(const Value *LHS, const Value *RHS,
286 const Instruction &CxtI, bool IsSigned) const {
287 return IsSigned ? willNotOverflowSignedSub(LHS, RHS, CxtI)
288 : willNotOverflowUnsignedSub(LHS, RHS, CxtI);
291 bool willNotOverflowSignedMul(const Value *LHS, const Value *RHS,
292 const Instruction &CxtI) const {
293 return computeOverflowForSignedMul(LHS, RHS, &CxtI) ==
294 OverflowResult::NeverOverflows;
297 bool willNotOverflowUnsignedMul(const Value *LHS, const Value *RHS,
298 const Instruction &CxtI) const {
299 return computeOverflowForUnsignedMul(LHS, RHS, &CxtI) ==
300 OverflowResult::NeverOverflows;
303 bool willNotOverflowMul(const Value *LHS, const Value *RHS,
304 const Instruction &CxtI, bool IsSigned) const {
305 return IsSigned ? willNotOverflowSignedMul(LHS, RHS, CxtI)
306 : willNotOverflowUnsignedMul(LHS, RHS, CxtI);
309 bool willNotOverflow(BinaryOperator::BinaryOps Opcode, const Value *LHS,
310 const Value *RHS, const Instruction &CxtI,
311 bool IsSigned) const {
313 case Instruction::Add: return willNotOverflowAdd(LHS, RHS, CxtI, IsSigned);
314 case Instruction::Sub: return willNotOverflowSub(LHS, RHS, CxtI, IsSigned);
315 case Instruction::Mul: return willNotOverflowMul(LHS, RHS, CxtI, IsSigned);
316 default: llvm_unreachable("Unexpected opcode for overflow query");
320 Value *EmitGEPOffset(User *GEP);
321 Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN);
322 Instruction *foldCastedBitwiseLogic(BinaryOperator &I);
323 Instruction *narrowBinOp(TruncInst &Trunc);
324 Instruction *narrowMaskedBinOp(BinaryOperator &And);
325 Instruction *narrowMathIfNoOverflow(BinaryOperator &I);
326 Instruction *narrowFunnelShift(TruncInst &Trunc);
327 Instruction *optimizeBitCastFromPhi(CastInst &CI, PHINode *PN);
328 Instruction *matchSAddSubSat(SelectInst &MinMax1);
330 void freelyInvertAllUsersOf(Value *V);
332 /// Determine if a pair of casts can be replaced by a single cast.
334 /// \param CI1 The first of a pair of casts.
335 /// \param CI2 The second of a pair of casts.
337 /// \return 0 if the cast pair cannot be eliminated, otherwise returns an
338 /// Instruction::CastOps value for a cast that can replace the pair, casting
339 /// CI1->getSrcTy() to CI2->getDstTy().
341 /// \see CastInst::isEliminableCastPair
342 Instruction::CastOps isEliminableCastPair(const CastInst *CI1,
343 const CastInst *CI2);
344 Value *simplifyIntToPtrRoundTripCast(Value *Val);
346 Value *foldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS, BinaryOperator &And);
347 Value *foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS, BinaryOperator &Or);
348 Value *foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS, BinaryOperator &Xor);
350 /// Optimize (fcmp)&(fcmp) or (fcmp)|(fcmp).
351 /// NOTE: Unlike most of instcombine, this returns a Value which should
352 /// already be inserted into the function.
353 Value *foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS, bool IsAnd);
355 Value *foldAndOrOfICmpsOfAndWithPow2(ICmpInst *LHS, ICmpInst *RHS,
356 Instruction *CxtI, bool IsAnd,
357 bool IsLogical = false);
358 Value *matchSelectFromAndOr(Value *A, Value *B, Value *C, Value *D);
359 Value *getSelectCondition(Value *A, Value *B);
361 Instruction *foldIntrinsicWithOverflowCommon(IntrinsicInst *II);
362 Instruction *foldFPSignBitOps(BinaryOperator &I);
364 // Optimize one of these forms:
365 // and i1 Op, SI / select i1 Op, i1 SI, i1 false (if IsAnd = true)
366 // or i1 Op, SI / select i1 Op, i1 true, i1 SI (if IsAnd = false)
367 // into simplier select instruction using isImpliedCondition.
368 Instruction *foldAndOrOfSelectUsingImpliedCond(Value *Op, SelectInst &SI,
372 /// Inserts an instruction \p New before instruction \p Old
374 /// Also adds the new instruction to the worklist and returns \p New so that
375 /// it is suitable for use as the return from the visitation patterns.
376 Instruction *InsertNewInstBefore(Instruction *New, Instruction &Old) {
377 assert(New && !New->getParent() &&
378 "New instruction already inserted into a basic block!");
379 BasicBlock *BB = Old.getParent();
380 BB->getInstList().insert(Old.getIterator(), New); // Insert inst
385 /// Same as InsertNewInstBefore, but also sets the debug loc.
386 Instruction *InsertNewInstWith(Instruction *New, Instruction &Old) {
387 New->setDebugLoc(Old.getDebugLoc());
388 return InsertNewInstBefore(New, Old);
391 /// A combiner-aware RAUW-like routine.
393 /// This method is to be used when an instruction is found to be dead,
394 /// replaceable with another preexisting expression. Here we add all uses of
395 /// I to the worklist, replace all uses of I with the new value, then return
396 /// I, so that the inst combiner will know that I was modified.
397 Instruction *replaceInstUsesWith(Instruction &I, Value *V) {
398 // If there are no uses to replace, then we return nullptr to indicate that
399 // no changes were made to the program.
400 if (I.use_empty()) return nullptr;
402 Worklist.pushUsersToWorkList(I); // Add all modified instrs to worklist.
404 // If we are replacing the instruction with itself, this must be in a
405 // segment of unreachable code, so just clobber the instruction.
407 V = UndefValue::get(I.getType());
409 LLVM_DEBUG(dbgs() << "IC: Replacing " << I << "\n"
410 << " with " << *V << '\n');
412 I.replaceAllUsesWith(V);
417 /// Replace operand of instruction and add old operand to the worklist.
418 Instruction *replaceOperand(Instruction &I, unsigned OpNum, Value *V) {
419 Worklist.addValue(I.getOperand(OpNum));
420 I.setOperand(OpNum, V);
424 /// Replace use and add the previously used value to the worklist.
425 void replaceUse(Use &U, Value *NewValue) {
426 Worklist.addValue(U);
430 /// Create and insert the idiom we use to indicate a block is unreachable
431 /// without having to rewrite the CFG from within InstCombine.
432 void CreateNonTerminatorUnreachable(Instruction *InsertAt) {
433 auto &Ctx = InsertAt->getContext();
434 new StoreInst(ConstantInt::getTrue(Ctx),
435 UndefValue::get(Type::getInt1PtrTy(Ctx)),
440 /// Combiner aware instruction erasure.
442 /// When dealing with an instruction that has side effects or produces a void
443 /// value, we can't rely on DCE to delete the instruction. Instead, visit
444 /// methods should return the value returned by this function.
445 Instruction *eraseInstFromFunction(Instruction &I) override {
446 LLVM_DEBUG(dbgs() << "IC: ERASE " << I << '\n');
447 assert(I.use_empty() && "Cannot erase instruction that is used!");
450 // Make sure that we reprocess all operands now that we reduced their
452 for (Use &Operand : I.operands())
453 if (auto *Inst = dyn_cast<Instruction>(Operand))
459 return nullptr; // Don't do anything with FI
462 void computeKnownBits(const Value *V, KnownBits &Known,
463 unsigned Depth, const Instruction *CxtI) const {
464 llvm::computeKnownBits(V, Known, DL, Depth, &AC, CxtI, &DT);
467 KnownBits computeKnownBits(const Value *V, unsigned Depth,
468 const Instruction *CxtI) const {
469 return llvm::computeKnownBits(V, DL, Depth, &AC, CxtI, &DT);
472 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero = false,
474 const Instruction *CxtI = nullptr) {
475 return llvm::isKnownToBeAPowerOfTwo(V, DL, OrZero, Depth, &AC, CxtI, &DT);
478 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth = 0,
479 const Instruction *CxtI = nullptr) const {
480 return llvm::MaskedValueIsZero(V, Mask, DL, Depth, &AC, CxtI, &DT);
483 unsigned ComputeNumSignBits(const Value *Op, unsigned Depth = 0,
484 const Instruction *CxtI = nullptr) const {
485 return llvm::ComputeNumSignBits(Op, DL, Depth, &AC, CxtI, &DT);
488 OverflowResult computeOverflowForUnsignedMul(const Value *LHS,
490 const Instruction *CxtI) const {
491 return llvm::computeOverflowForUnsignedMul(LHS, RHS, DL, &AC, CxtI, &DT);
494 OverflowResult computeOverflowForSignedMul(const Value *LHS,
496 const Instruction *CxtI) const {
497 return llvm::computeOverflowForSignedMul(LHS, RHS, DL, &AC, CxtI, &DT);
500 OverflowResult computeOverflowForUnsignedAdd(const Value *LHS,
502 const Instruction *CxtI) const {
503 return llvm::computeOverflowForUnsignedAdd(LHS, RHS, DL, &AC, CxtI, &DT);
506 OverflowResult computeOverflowForSignedAdd(const Value *LHS,
508 const Instruction *CxtI) const {
509 return llvm::computeOverflowForSignedAdd(LHS, RHS, DL, &AC, CxtI, &DT);
512 OverflowResult computeOverflowForUnsignedSub(const Value *LHS,
514 const Instruction *CxtI) const {
515 return llvm::computeOverflowForUnsignedSub(LHS, RHS, DL, &AC, CxtI, &DT);
518 OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS,
519 const Instruction *CxtI) const {
520 return llvm::computeOverflowForSignedSub(LHS, RHS, DL, &AC, CxtI, &DT);
523 OverflowResult computeOverflow(
524 Instruction::BinaryOps BinaryOp, bool IsSigned,
525 Value *LHS, Value *RHS, Instruction *CxtI) const;
527 /// Performs a few simplifications for operators which are associative
529 bool SimplifyAssociativeOrCommutative(BinaryOperator &I);
531 /// Tries to simplify binary operations which some other binary
532 /// operation distributes over.
534 /// It does this by either by factorizing out common terms (eg "(A*B)+(A*C)"
535 /// -> "A*(B+C)") or expanding out if this results in simplifications (eg: "A
536 /// & (B | C) -> (A&B) | (A&C)" if this is a win). Returns the simplified
537 /// value, or null if it didn't simplify.
538 Value *SimplifyUsingDistributiveLaws(BinaryOperator &I);
540 /// Tries to simplify add operations using the definition of remainder.
542 /// The definition of remainder is X % C = X - (X / C ) * C. The add
543 /// expression X % C0 + (( X / C0 ) % C1) * C0 can be simplified to
545 Value *SimplifyAddWithRemainder(BinaryOperator &I);
547 // Binary Op helper for select operations where the expression can be
548 // efficiently reorganized.
549 Value *SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS,
552 /// This tries to simplify binary operations by factorizing out common terms
553 /// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
554 Value *tryFactorization(BinaryOperator &, Instruction::BinaryOps, Value *,
555 Value *, Value *, Value *);
557 /// Match a select chain which produces one of three values based on whether
558 /// the LHS is less than, equal to, or greater than RHS respectively.
559 /// Return true if we matched a three way compare idiom. The LHS, RHS, Less,
560 /// Equal and Greater values are saved in the matching process and returned to
562 bool matchThreeWayIntCompare(SelectInst *SI, Value *&LHS, Value *&RHS,
563 ConstantInt *&Less, ConstantInt *&Equal,
564 ConstantInt *&Greater);
566 /// Attempts to replace V with a simpler value based on the demanded
568 Value *SimplifyDemandedUseBits(Value *V, APInt DemandedMask, KnownBits &Known,
569 unsigned Depth, Instruction *CxtI);
570 bool SimplifyDemandedBits(Instruction *I, unsigned Op,
571 const APInt &DemandedMask, KnownBits &Known,
572 unsigned Depth = 0) override;
574 /// Helper routine of SimplifyDemandedUseBits. It computes KnownZero/KnownOne
575 /// bits. It also tries to handle simplifications that can be done based on
576 /// DemandedMask, but without modifying the Instruction.
577 Value *SimplifyMultipleUseDemandedBits(Instruction *I,
578 const APInt &DemandedMask,
580 unsigned Depth, Instruction *CxtI);
582 /// Helper routine of SimplifyDemandedUseBits. It tries to simplify demanded
583 /// bit for "r1 = shr x, c1; r2 = shl r1, c2" instruction sequence.
584 Value *simplifyShrShlDemandedBits(
585 Instruction *Shr, const APInt &ShrOp1, Instruction *Shl,
586 const APInt &ShlOp1, const APInt &DemandedMask, KnownBits &Known);
588 /// Tries to simplify operands to an integer instruction based on its
590 bool SimplifyDemandedInstructionBits(Instruction &Inst);
593 SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &UndefElts,
595 bool AllowMultipleUsers = false) override;
597 /// Canonicalize the position of binops relative to shufflevector.
598 Instruction *foldVectorBinop(BinaryOperator &Inst);
599 Instruction *foldVectorSelect(SelectInst &Sel);
601 /// Given a binary operator, cast instruction, or select which has a PHI node
602 /// as operand #0, see if we can fold the instruction into the PHI (which is
603 /// only possible if all operands to the PHI are constants).
604 Instruction *foldOpIntoPhi(Instruction &I, PHINode *PN);
606 /// Given an instruction with a select as one operand and a constant as the
607 /// other operand, try to fold the binary operator into the select arguments.
608 /// This also works for Cast instructions, which obviously do not have a
610 Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI);
612 /// This is a convenience wrapper function for the above two functions.
613 Instruction *foldBinOpIntoSelectOrPhi(BinaryOperator &I);
615 Instruction *foldAddWithConstant(BinaryOperator &Add);
617 /// Try to rotate an operation below a PHI node, using PHI nodes for
619 Instruction *foldPHIArgOpIntoPHI(PHINode &PN);
620 Instruction *foldPHIArgBinOpIntoPHI(PHINode &PN);
621 Instruction *foldPHIArgInsertValueInstructionIntoPHI(PHINode &PN);
622 Instruction *foldPHIArgExtractValueInstructionIntoPHI(PHINode &PN);
623 Instruction *foldPHIArgGEPIntoPHI(PHINode &PN);
624 Instruction *foldPHIArgLoadIntoPHI(PHINode &PN);
625 Instruction *foldPHIArgZextsIntoPHI(PHINode &PN);
627 /// If an integer typed PHI has only one use which is an IntToPtr operation,
628 /// replace the PHI with an existing pointer typed PHI if it exists. Otherwise
629 /// insert a new pointer typed PHI and replace the original one.
630 Instruction *foldIntegerTypedPHI(PHINode &PN);
632 /// Helper function for FoldPHIArgXIntoPHI() to set debug location for the
633 /// folded operation.
634 void PHIArgMergedDebugLoc(Instruction *Inst, PHINode &PN);
636 Instruction *foldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
637 ICmpInst::Predicate Cond, Instruction &I);
638 Instruction *foldAllocaCmp(ICmpInst &ICI, const AllocaInst *Alloca,
640 Instruction *foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
641 GlobalVariable *GV, CmpInst &ICI,
642 ConstantInt *AndCst = nullptr);
643 Instruction *foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
645 Instruction *foldICmpAddOpConst(Value *X, const APInt &C,
646 ICmpInst::Predicate Pred);
647 Instruction *foldICmpWithCastOp(ICmpInst &ICI);
649 Instruction *foldICmpUsingKnownBits(ICmpInst &Cmp);
650 Instruction *foldICmpWithDominatingICmp(ICmpInst &Cmp);
651 Instruction *foldICmpWithConstant(ICmpInst &Cmp);
652 Instruction *foldICmpInstWithConstant(ICmpInst &Cmp);
653 Instruction *foldICmpInstWithConstantNotInt(ICmpInst &Cmp);
654 Instruction *foldICmpBinOp(ICmpInst &Cmp, const SimplifyQuery &SQ);
655 Instruction *foldICmpEquality(ICmpInst &Cmp);
656 Instruction *foldIRemByPowerOfTwoToBitTest(ICmpInst &I);
657 Instruction *foldSignBitTest(ICmpInst &I);
658 Instruction *foldICmpWithZero(ICmpInst &Cmp);
660 Value *foldUnsignedMultiplicationOverflowCheck(ICmpInst &Cmp);
662 Instruction *foldICmpSelectConstant(ICmpInst &Cmp, SelectInst *Select,
664 Instruction *foldICmpTruncConstant(ICmpInst &Cmp, TruncInst *Trunc,
666 Instruction *foldICmpAndConstant(ICmpInst &Cmp, BinaryOperator *And,
668 Instruction *foldICmpXorConstant(ICmpInst &Cmp, BinaryOperator *Xor,
670 Instruction *foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or,
672 Instruction *foldICmpMulConstant(ICmpInst &Cmp, BinaryOperator *Mul,
674 Instruction *foldICmpShlConstant(ICmpInst &Cmp, BinaryOperator *Shl,
676 Instruction *foldICmpShrConstant(ICmpInst &Cmp, BinaryOperator *Shr,
678 Instruction *foldICmpSRemConstant(ICmpInst &Cmp, BinaryOperator *UDiv,
680 Instruction *foldICmpUDivConstant(ICmpInst &Cmp, BinaryOperator *UDiv,
682 Instruction *foldICmpDivConstant(ICmpInst &Cmp, BinaryOperator *Div,
684 Instruction *foldICmpSubConstant(ICmpInst &Cmp, BinaryOperator *Sub,
686 Instruction *foldICmpAddConstant(ICmpInst &Cmp, BinaryOperator *Add,
688 Instruction *foldICmpAndConstConst(ICmpInst &Cmp, BinaryOperator *And,
690 Instruction *foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And,
691 const APInt &C1, const APInt &C2);
692 Instruction *foldICmpShrConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1,
694 Instruction *foldICmpShlConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1,
697 Instruction *foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp,
700 Instruction *foldICmpIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II,
702 Instruction *foldICmpEqIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II,
705 // Helpers of visitSelectInst().
706 Instruction *foldSelectExtConst(SelectInst &Sel);
707 Instruction *foldSelectOpOp(SelectInst &SI, Instruction *TI, Instruction *FI);
708 Instruction *foldSelectIntoOp(SelectInst &SI, Value *, Value *);
709 Instruction *foldSPFofSPF(Instruction *Inner, SelectPatternFlavor SPF1,
710 Value *A, Value *B, Instruction &Outer,
711 SelectPatternFlavor SPF2, Value *C);
712 Instruction *foldSelectInstWithICmp(SelectInst &SI, ICmpInst *ICI);
713 Instruction *foldSelectValueEquivalence(SelectInst &SI, ICmpInst &ICI);
715 Value *insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi,
716 bool isSigned, bool Inside);
717 Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocaInst &AI);
718 bool mergeStoreIntoSuccessor(StoreInst &SI);
720 /// Given an initial instruction, check to see if it is the root of a
721 /// bswap/bitreverse idiom. If so, return the equivalent bswap/bitreverse
723 Instruction *matchBSwapOrBitReverse(Instruction &I, bool MatchBSwaps,
724 bool MatchBitReversals);
726 Instruction *SimplifyAnyMemTransfer(AnyMemTransferInst *MI);
727 Instruction *SimplifyAnyMemSet(AnyMemSetInst *MI);
729 Value *EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned);
731 /// Returns a value X such that Val = X * Scale, or null if none.
733 /// If the multiplication is known not to overflow then NoSignedWrap is set.
734 Value *Descale(Value *Val, APInt Scale, bool &NoSignedWrap);
737 class Negator final {
738 /// Top-to-bottom, def-to-use negated instruction tree we produced.
739 SmallVector<Instruction *, NegatorMaxNodesSSO> NewInstructions;
741 using BuilderTy = IRBuilder<TargetFolder, IRBuilderCallbackInserter>;
744 const DataLayout &DL;
746 const DominatorTree &DT;
748 const bool IsTrulyNegation;
750 SmallDenseMap<Value *, Value *> NegationsCache;
752 Negator(LLVMContext &C, const DataLayout &DL, AssumptionCache &AC,
753 const DominatorTree &DT, bool IsTrulyNegation);
755 #if LLVM_ENABLE_STATS
756 unsigned NumValuesVisitedInThisNegator = 0;
760 using Result = std::pair<ArrayRef<Instruction *> /*NewInstructions*/,
761 Value * /*NegatedRoot*/>;
763 std::array<Value *, 2> getSortedOperandsOfBinOp(Instruction *I);
765 LLVM_NODISCARD Value *visitImpl(Value *V, unsigned Depth);
767 LLVM_NODISCARD Value *negate(Value *V, unsigned Depth);
769 /// Recurse depth-first and attempt to sink the negation.
770 /// FIXME: use worklist?
771 LLVM_NODISCARD Optional<Result> run(Value *Root);
773 Negator(const Negator &) = delete;
774 Negator(Negator &&) = delete;
775 Negator &operator=(const Negator &) = delete;
776 Negator &operator=(Negator &&) = delete;
779 /// Attempt to negate \p Root. Retuns nullptr if negation can't be performed,
780 /// otherwise returns negated value.
781 LLVM_NODISCARD static Value *Negate(bool LHSIsZero, Value *Root,
782 InstCombinerImpl &IC);
785 } // end namespace llvm
789 #endif // LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H