1 //===- InstCombineInternal.h - InstCombine pass internals -------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
11 /// This file provides internal interfaces used to implement the InstCombine.
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H
16 #define LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/TargetFolder.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/IR/IRBuilder.h"
23 #include "llvm/IR/InstVisitor.h"
24 #include "llvm/IR/PatternMatch.h"
25 #include "llvm/IR/Value.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/KnownBits.h"
28 #include "llvm/Transforms/InstCombine/InstCombiner.h"
29 #include "llvm/Transforms/Utils/Local.h"
32 #define DEBUG_TYPE "instcombine"
33 #include "llvm/Transforms/Utils/InstructionWorklist.h"
35 using namespace llvm::PatternMatch;
37 // As a default, let's assume that we want to be aggressive,
38 // and attempt to traverse with no limits in attempt to sink negation.
39 static constexpr unsigned NegatorDefaultMaxDepth = ~0U;
41 // Let's guesstimate that most often we will end up visiting/producing
42 // fairly small number of new instructions.
43 static constexpr unsigned NegatorMaxNodesSSO = 16;
49 class AssumptionCache;
50 class BlockFrequencyInfo;
56 class OptimizationRemarkEmitter;
57 class ProfileSummaryInfo;
58 class TargetLibraryInfo;
61 class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final
62 : public InstCombiner,
63 public InstVisitor<InstCombinerImpl, Instruction *> {
65 InstCombinerImpl(InstructionWorklist &Worklist, BuilderTy &Builder,
66 bool MinimizeSize, AAResults *AA, AssumptionCache &AC,
67 TargetLibraryInfo &TLI, TargetTransformInfo &TTI,
68 DominatorTree &DT, OptimizationRemarkEmitter &ORE,
69 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
70 const DataLayout &DL, LoopInfo *LI)
71 : InstCombiner(Worklist, Builder, MinimizeSize, AA, AC, TLI, TTI, DT, ORE,
74 virtual ~InstCombinerImpl() = default;
76 /// Run the combiner over the entire worklist until it is empty.
78 /// \returns true if the IR is changed.
81 // Visitation implementation - Implement instruction combining for different
82 // instruction types. The semantics are as follows:
84 // null - No change was made
85 // I - Change was made, I is still valid, I may be dead though
86 // otherwise - Change was made, replace I with returned instruction
88 Instruction *visitFNeg(UnaryOperator &I);
89 Instruction *visitAdd(BinaryOperator &I);
90 Instruction *visitFAdd(BinaryOperator &I);
91 Value *OptimizePointerDifference(
92 Value *LHS, Value *RHS, Type *Ty, bool isNUW);
93 Instruction *visitSub(BinaryOperator &I);
94 Instruction *visitFSub(BinaryOperator &I);
95 Instruction *visitMul(BinaryOperator &I);
96 Instruction *visitFMul(BinaryOperator &I);
97 Instruction *visitURem(BinaryOperator &I);
98 Instruction *visitSRem(BinaryOperator &I);
99 Instruction *visitFRem(BinaryOperator &I);
100 bool simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I);
101 Instruction *commonIRemTransforms(BinaryOperator &I);
102 Instruction *commonIDivTransforms(BinaryOperator &I);
103 Instruction *visitUDiv(BinaryOperator &I);
104 Instruction *visitSDiv(BinaryOperator &I);
105 Instruction *visitFDiv(BinaryOperator &I);
106 Value *simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1, bool Inverted);
107 Instruction *visitAnd(BinaryOperator &I);
108 Instruction *visitOr(BinaryOperator &I);
109 bool sinkNotIntoOtherHandOfAndOrOr(BinaryOperator &I);
110 Instruction *visitXor(BinaryOperator &I);
111 Instruction *visitShl(BinaryOperator &I);
112 Value *reassociateShiftAmtsOfTwoSameDirectionShifts(
113 BinaryOperator *Sh0, const SimplifyQuery &SQ,
114 bool AnalyzeForSignBitExtraction = false);
115 Instruction *canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(
117 Instruction *foldVariableSignZeroExtensionOfVariableHighBitExtract(
118 BinaryOperator &OldAShr);
119 Instruction *visitAShr(BinaryOperator &I);
120 Instruction *visitLShr(BinaryOperator &I);
121 Instruction *commonShiftTransforms(BinaryOperator &I);
122 Instruction *visitFCmpInst(FCmpInst &I);
123 CmpInst *canonicalizeICmpPredicate(CmpInst &I);
124 Instruction *visitICmpInst(ICmpInst &I);
125 Instruction *FoldShiftByConstant(Value *Op0, Constant *Op1,
127 Instruction *commonCastTransforms(CastInst &CI);
128 Instruction *commonPointerCastTransforms(CastInst &CI);
129 Instruction *visitTrunc(TruncInst &CI);
130 Instruction *visitZExt(ZExtInst &CI);
131 Instruction *visitSExt(SExtInst &CI);
132 Instruction *visitFPTrunc(FPTruncInst &CI);
133 Instruction *visitFPExt(CastInst &CI);
134 Instruction *visitFPToUI(FPToUIInst &FI);
135 Instruction *visitFPToSI(FPToSIInst &FI);
136 Instruction *visitUIToFP(CastInst &CI);
137 Instruction *visitSIToFP(CastInst &CI);
138 Instruction *visitPtrToInt(PtrToIntInst &CI);
139 Instruction *visitIntToPtr(IntToPtrInst &CI);
140 Instruction *visitBitCast(BitCastInst &CI);
141 Instruction *visitAddrSpaceCast(AddrSpaceCastInst &CI);
142 Instruction *foldItoFPtoI(CastInst &FI);
143 Instruction *visitSelectInst(SelectInst &SI);
144 Instruction *visitCallInst(CallInst &CI);
145 Instruction *visitInvokeInst(InvokeInst &II);
146 Instruction *visitCallBrInst(CallBrInst &CBI);
148 Instruction *SliceUpIllegalIntegerPHI(PHINode &PN);
149 Instruction *visitPHINode(PHINode &PN);
150 Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP);
151 Instruction *visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src);
152 Instruction *visitGEPOfBitcast(BitCastInst *BCI, GetElementPtrInst &GEP);
153 Instruction *visitAllocaInst(AllocaInst &AI);
154 Instruction *visitAllocSite(Instruction &FI);
155 Instruction *visitFree(CallInst &FI, Value *FreedOp);
156 Instruction *visitLoadInst(LoadInst &LI);
157 Instruction *visitStoreInst(StoreInst &SI);
158 Instruction *visitAtomicRMWInst(AtomicRMWInst &SI);
159 Instruction *visitUnconditionalBranchInst(BranchInst &BI);
160 Instruction *visitBranchInst(BranchInst &BI);
161 Instruction *visitFenceInst(FenceInst &FI);
162 Instruction *visitSwitchInst(SwitchInst &SI);
163 Instruction *visitReturnInst(ReturnInst &RI);
164 Instruction *visitUnreachableInst(UnreachableInst &I);
166 foldAggregateConstructionIntoAggregateReuse(InsertValueInst &OrigIVI);
167 Instruction *visitInsertValueInst(InsertValueInst &IV);
168 Instruction *visitInsertElementInst(InsertElementInst &IE);
169 Instruction *visitExtractElementInst(ExtractElementInst &EI);
170 Instruction *visitShuffleVectorInst(ShuffleVectorInst &SVI);
171 Instruction *visitExtractValueInst(ExtractValueInst &EV);
172 Instruction *visitLandingPadInst(LandingPadInst &LI);
173 Instruction *visitVAEndInst(VAEndInst &I);
174 Value *pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI);
175 bool freezeOtherUses(FreezeInst &FI);
176 Instruction *foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN);
177 Instruction *visitFreeze(FreezeInst &I);
179 /// Specify what to return for unhandled instructions.
180 Instruction *visitInstruction(Instruction &I) { return nullptr; }
182 /// True when DB dominates all uses of DI except UI.
183 /// UI must be in the same block as DI.
184 /// The routine checks that the DI parent and DB are different.
185 bool dominatesAllUses(const Instruction *DI, const Instruction *UI,
186 const BasicBlock *DB) const;
188 /// Try to replace select with select operand SIOpd in SI-ICmp sequence.
189 bool replacedSelectWithOperand(SelectInst *SI, const ICmpInst *Icmp,
190 const unsigned SIOpd);
192 LoadInst *combineLoadToNewType(LoadInst &LI, Type *NewTy,
193 const Twine &Suffix = "");
196 bool annotateAnyAllocSite(CallBase &Call, const TargetLibraryInfo *TLI);
197 bool isDesirableIntType(unsigned BitWidth) const;
198 bool shouldChangeType(unsigned FromBitWidth, unsigned ToBitWidth) const;
199 bool shouldChangeType(Type *From, Type *To) const;
200 Value *dyn_castNegVal(Value *V) const;
202 /// Classify whether a cast is worth optimizing.
204 /// This is a helper to decide whether the simplification of
205 /// logic(cast(A), cast(B)) to cast(logic(A, B)) should be performed.
207 /// \param CI The cast we are interested in.
209 /// \return true if this cast actually results in any code being generated and
210 /// if it cannot already be eliminated by some other transformation.
211 bool shouldOptimizeCast(CastInst *CI);
213 /// Try to optimize a sequence of instructions checking if an operation
214 /// on LHS and RHS overflows.
216 /// If this overflow check is done via one of the overflow check intrinsics,
217 /// then CtxI has to be the call instruction calling that intrinsic. If this
218 /// overflow check is done by arithmetic followed by a compare, then CtxI has
219 /// to be the arithmetic instruction.
221 /// If a simplification is possible, stores the simplified result of the
222 /// operation in OperationResult and result of the overflow check in
223 /// OverflowResult, and return true. If no simplification is possible,
225 bool OptimizeOverflowCheck(Instruction::BinaryOps BinaryOp, bool IsSigned,
226 Value *LHS, Value *RHS,
227 Instruction &CtxI, Value *&OperationResult,
228 Constant *&OverflowResult);
230 Instruction *visitCallBase(CallBase &Call);
231 Instruction *tryOptimizeCall(CallInst *CI);
232 bool transformConstExprCastCall(CallBase &Call);
233 Instruction *transformCallThroughTrampoline(CallBase &Call,
234 IntrinsicInst &Tramp);
236 Value *simplifyMaskedLoad(IntrinsicInst &II);
237 Instruction *simplifyMaskedStore(IntrinsicInst &II);
238 Instruction *simplifyMaskedGather(IntrinsicInst &II);
239 Instruction *simplifyMaskedScatter(IntrinsicInst &II);
241 /// Transform (zext icmp) to bitwise / integer operations in order to
244 /// \param ICI The icmp of the (zext icmp) pair we are interested in.
245 /// \parem CI The zext of the (zext icmp) pair we are interested in.
247 /// \return null if the transformation cannot be performed. If the
248 /// transformation can be performed the new instruction that replaces the
249 /// (zext icmp) pair will be returned.
250 Instruction *transformZExtICmp(ICmpInst *ICI, ZExtInst &CI);
252 Instruction *transformSExtICmp(ICmpInst *ICI, Instruction &CI);
254 bool willNotOverflowSignedAdd(const Value *LHS, const Value *RHS,
255 const Instruction &CxtI) const {
256 return computeOverflowForSignedAdd(LHS, RHS, &CxtI) ==
257 OverflowResult::NeverOverflows;
260 bool willNotOverflowUnsignedAdd(const Value *LHS, const Value *RHS,
261 const Instruction &CxtI) const {
262 return computeOverflowForUnsignedAdd(LHS, RHS, &CxtI) ==
263 OverflowResult::NeverOverflows;
266 bool willNotOverflowAdd(const Value *LHS, const Value *RHS,
267 const Instruction &CxtI, bool IsSigned) const {
268 return IsSigned ? willNotOverflowSignedAdd(LHS, RHS, CxtI)
269 : willNotOverflowUnsignedAdd(LHS, RHS, CxtI);
272 bool willNotOverflowSignedSub(const Value *LHS, const Value *RHS,
273 const Instruction &CxtI) const {
274 return computeOverflowForSignedSub(LHS, RHS, &CxtI) ==
275 OverflowResult::NeverOverflows;
278 bool willNotOverflowUnsignedSub(const Value *LHS, const Value *RHS,
279 const Instruction &CxtI) const {
280 return computeOverflowForUnsignedSub(LHS, RHS, &CxtI) ==
281 OverflowResult::NeverOverflows;
284 bool willNotOverflowSub(const Value *LHS, const Value *RHS,
285 const Instruction &CxtI, bool IsSigned) const {
286 return IsSigned ? willNotOverflowSignedSub(LHS, RHS, CxtI)
287 : willNotOverflowUnsignedSub(LHS, RHS, CxtI);
290 bool willNotOverflowSignedMul(const Value *LHS, const Value *RHS,
291 const Instruction &CxtI) const {
292 return computeOverflowForSignedMul(LHS, RHS, &CxtI) ==
293 OverflowResult::NeverOverflows;
296 bool willNotOverflowUnsignedMul(const Value *LHS, const Value *RHS,
297 const Instruction &CxtI) const {
298 return computeOverflowForUnsignedMul(LHS, RHS, &CxtI) ==
299 OverflowResult::NeverOverflows;
302 bool willNotOverflowMul(const Value *LHS, const Value *RHS,
303 const Instruction &CxtI, bool IsSigned) const {
304 return IsSigned ? willNotOverflowSignedMul(LHS, RHS, CxtI)
305 : willNotOverflowUnsignedMul(LHS, RHS, CxtI);
308 bool willNotOverflow(BinaryOperator::BinaryOps Opcode, const Value *LHS,
309 const Value *RHS, const Instruction &CxtI,
310 bool IsSigned) const {
312 case Instruction::Add: return willNotOverflowAdd(LHS, RHS, CxtI, IsSigned);
313 case Instruction::Sub: return willNotOverflowSub(LHS, RHS, CxtI, IsSigned);
314 case Instruction::Mul: return willNotOverflowMul(LHS, RHS, CxtI, IsSigned);
315 default: llvm_unreachable("Unexpected opcode for overflow query");
319 Value *EmitGEPOffset(User *GEP);
320 Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN);
321 Instruction *foldBitcastExtElt(ExtractElementInst &ExtElt);
322 Instruction *foldCastedBitwiseLogic(BinaryOperator &I);
323 Instruction *foldBinopOfSextBoolToSelect(BinaryOperator &I);
324 Instruction *narrowBinOp(TruncInst &Trunc);
325 Instruction *narrowMaskedBinOp(BinaryOperator &And);
326 Instruction *narrowMathIfNoOverflow(BinaryOperator &I);
327 Instruction *narrowFunnelShift(TruncInst &Trunc);
328 Instruction *optimizeBitCastFromPhi(CastInst &CI, PHINode *PN);
329 Instruction *matchSAddSubSat(IntrinsicInst &MinMax1);
330 Instruction *foldNot(BinaryOperator &I);
332 void freelyInvertAllUsersOf(Value *V);
334 /// Determine if a pair of casts can be replaced by a single cast.
336 /// \param CI1 The first of a pair of casts.
337 /// \param CI2 The second of a pair of casts.
339 /// \return 0 if the cast pair cannot be eliminated, otherwise returns an
340 /// Instruction::CastOps value for a cast that can replace the pair, casting
341 /// CI1->getSrcTy() to CI2->getDstTy().
343 /// \see CastInst::isEliminableCastPair
344 Instruction::CastOps isEliminableCastPair(const CastInst *CI1,
345 const CastInst *CI2);
346 Value *simplifyIntToPtrRoundTripCast(Value *Val);
348 Value *foldAndOrOfICmps(ICmpInst *LHS, ICmpInst *RHS, Instruction &I,
349 bool IsAnd, bool IsLogical = false);
350 Value *foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS, BinaryOperator &Xor);
352 Value *foldEqOfParts(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd);
354 Value *foldAndOrOfICmpsUsingRanges(ICmpInst *ICmp1, ICmpInst *ICmp2,
357 /// Optimize (fcmp)&(fcmp) or (fcmp)|(fcmp).
358 /// NOTE: Unlike most of instcombine, this returns a Value which should
359 /// already be inserted into the function.
360 Value *foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS, bool IsAnd,
361 bool IsLogicalSelect = false);
363 Value *foldAndOrOfICmpsOfAndWithPow2(ICmpInst *LHS, ICmpInst *RHS,
364 Instruction *CxtI, bool IsAnd,
365 bool IsLogical = false);
366 Value *matchSelectFromAndOr(Value *A, Value *B, Value *C, Value *D);
367 Value *getSelectCondition(Value *A, Value *B);
369 Instruction *foldIntrinsicWithOverflowCommon(IntrinsicInst *II);
370 Instruction *foldFPSignBitOps(BinaryOperator &I);
372 // Optimize one of these forms:
373 // and i1 Op, SI / select i1 Op, i1 SI, i1 false (if IsAnd = true)
374 // or i1 Op, SI / select i1 Op, i1 true, i1 SI (if IsAnd = false)
375 // into simplier select instruction using isImpliedCondition.
376 Instruction *foldAndOrOfSelectUsingImpliedCond(Value *Op, SelectInst &SI,
380 /// Inserts an instruction \p New before instruction \p Old
382 /// Also adds the new instruction to the worklist and returns \p New so that
383 /// it is suitable for use as the return from the visitation patterns.
384 Instruction *InsertNewInstBefore(Instruction *New, Instruction &Old) {
385 assert(New && !New->getParent() &&
386 "New instruction already inserted into a basic block!");
387 BasicBlock *BB = Old.getParent();
388 BB->getInstList().insert(Old.getIterator(), New); // Insert inst
393 /// Same as InsertNewInstBefore, but also sets the debug loc.
394 Instruction *InsertNewInstWith(Instruction *New, Instruction &Old) {
395 New->setDebugLoc(Old.getDebugLoc());
396 return InsertNewInstBefore(New, Old);
399 /// A combiner-aware RAUW-like routine.
401 /// This method is to be used when an instruction is found to be dead,
402 /// replaceable with another preexisting expression. Here we add all uses of
403 /// I to the worklist, replace all uses of I with the new value, then return
404 /// I, so that the inst combiner will know that I was modified.
405 Instruction *replaceInstUsesWith(Instruction &I, Value *V) {
406 // If there are no uses to replace, then we return nullptr to indicate that
407 // no changes were made to the program.
408 if (I.use_empty()) return nullptr;
410 Worklist.pushUsersToWorkList(I); // Add all modified instrs to worklist.
412 // If we are replacing the instruction with itself, this must be in a
413 // segment of unreachable code, so just clobber the instruction.
415 V = PoisonValue::get(I.getType());
417 LLVM_DEBUG(dbgs() << "IC: Replacing " << I << "\n"
418 << " with " << *V << '\n');
420 I.replaceAllUsesWith(V);
425 /// Replace operand of instruction and add old operand to the worklist.
426 Instruction *replaceOperand(Instruction &I, unsigned OpNum, Value *V) {
427 Worklist.addValue(I.getOperand(OpNum));
428 I.setOperand(OpNum, V);
432 /// Replace use and add the previously used value to the worklist.
433 void replaceUse(Use &U, Value *NewValue) {
434 Worklist.addValue(U);
438 /// Create and insert the idiom we use to indicate a block is unreachable
439 /// without having to rewrite the CFG from within InstCombine.
440 void CreateNonTerminatorUnreachable(Instruction *InsertAt) {
441 auto &Ctx = InsertAt->getContext();
442 new StoreInst(ConstantInt::getTrue(Ctx),
443 PoisonValue::get(Type::getInt1PtrTy(Ctx)),
448 /// Combiner aware instruction erasure.
450 /// When dealing with an instruction that has side effects or produces a void
451 /// value, we can't rely on DCE to delete the instruction. Instead, visit
452 /// methods should return the value returned by this function.
453 Instruction *eraseInstFromFunction(Instruction &I) override {
454 LLVM_DEBUG(dbgs() << "IC: ERASE " << I << '\n');
455 assert(I.use_empty() && "Cannot erase instruction that is used!");
458 // Make sure that we reprocess all operands now that we reduced their
460 for (Use &Operand : I.operands())
461 if (auto *Inst = dyn_cast<Instruction>(Operand))
467 return nullptr; // Don't do anything with FI
470 void computeKnownBits(const Value *V, KnownBits &Known,
471 unsigned Depth, const Instruction *CxtI) const {
472 llvm::computeKnownBits(V, Known, DL, Depth, &AC, CxtI, &DT);
475 KnownBits computeKnownBits(const Value *V, unsigned Depth,
476 const Instruction *CxtI) const {
477 return llvm::computeKnownBits(V, DL, Depth, &AC, CxtI, &DT);
480 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero = false,
482 const Instruction *CxtI = nullptr) {
483 return llvm::isKnownToBeAPowerOfTwo(V, DL, OrZero, Depth, &AC, CxtI, &DT);
486 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth = 0,
487 const Instruction *CxtI = nullptr) const {
488 return llvm::MaskedValueIsZero(V, Mask, DL, Depth, &AC, CxtI, &DT);
491 unsigned ComputeNumSignBits(const Value *Op, unsigned Depth = 0,
492 const Instruction *CxtI = nullptr) const {
493 return llvm::ComputeNumSignBits(Op, DL, Depth, &AC, CxtI, &DT);
496 OverflowResult computeOverflowForUnsignedMul(const Value *LHS,
498 const Instruction *CxtI) const {
499 return llvm::computeOverflowForUnsignedMul(LHS, RHS, DL, &AC, CxtI, &DT);
502 OverflowResult computeOverflowForSignedMul(const Value *LHS,
504 const Instruction *CxtI) const {
505 return llvm::computeOverflowForSignedMul(LHS, RHS, DL, &AC, CxtI, &DT);
508 OverflowResult computeOverflowForUnsignedAdd(const Value *LHS,
510 const Instruction *CxtI) const {
511 return llvm::computeOverflowForUnsignedAdd(LHS, RHS, DL, &AC, CxtI, &DT);
514 OverflowResult computeOverflowForSignedAdd(const Value *LHS,
516 const Instruction *CxtI) const {
517 return llvm::computeOverflowForSignedAdd(LHS, RHS, DL, &AC, CxtI, &DT);
520 OverflowResult computeOverflowForUnsignedSub(const Value *LHS,
522 const Instruction *CxtI) const {
523 return llvm::computeOverflowForUnsignedSub(LHS, RHS, DL, &AC, CxtI, &DT);
526 OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS,
527 const Instruction *CxtI) const {
528 return llvm::computeOverflowForSignedSub(LHS, RHS, DL, &AC, CxtI, &DT);
531 OverflowResult computeOverflow(
532 Instruction::BinaryOps BinaryOp, bool IsSigned,
533 Value *LHS, Value *RHS, Instruction *CxtI) const;
535 /// Performs a few simplifications for operators which are associative
537 bool SimplifyAssociativeOrCommutative(BinaryOperator &I);
539 /// Tries to simplify binary operations which some other binary
540 /// operation distributes over.
542 /// It does this by either by factorizing out common terms (eg "(A*B)+(A*C)"
543 /// -> "A*(B+C)") or expanding out if this results in simplifications (eg: "A
544 /// & (B | C) -> (A&B) | (A&C)" if this is a win). Returns the simplified
545 /// value, or null if it didn't simplify.
546 Value *SimplifyUsingDistributiveLaws(BinaryOperator &I);
548 /// Tries to simplify add operations using the definition of remainder.
550 /// The definition of remainder is X % C = X - (X / C ) * C. The add
551 /// expression X % C0 + (( X / C0 ) % C1) * C0 can be simplified to
553 Value *SimplifyAddWithRemainder(BinaryOperator &I);
555 // Binary Op helper for select operations where the expression can be
556 // efficiently reorganized.
557 Value *SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS,
560 /// This tries to simplify binary operations by factorizing out common terms
561 /// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
562 Value *tryFactorization(BinaryOperator &, Instruction::BinaryOps, Value *,
563 Value *, Value *, Value *);
565 /// Match a select chain which produces one of three values based on whether
566 /// the LHS is less than, equal to, or greater than RHS respectively.
567 /// Return true if we matched a three way compare idiom. The LHS, RHS, Less,
568 /// Equal and Greater values are saved in the matching process and returned to
570 bool matchThreeWayIntCompare(SelectInst *SI, Value *&LHS, Value *&RHS,
571 ConstantInt *&Less, ConstantInt *&Equal,
572 ConstantInt *&Greater);
574 /// Attempts to replace V with a simpler value based on the demanded
576 Value *SimplifyDemandedUseBits(Value *V, APInt DemandedMask, KnownBits &Known,
577 unsigned Depth, Instruction *CxtI);
578 bool SimplifyDemandedBits(Instruction *I, unsigned Op,
579 const APInt &DemandedMask, KnownBits &Known,
580 unsigned Depth = 0) override;
582 /// Helper routine of SimplifyDemandedUseBits. It computes KnownZero/KnownOne
583 /// bits. It also tries to handle simplifications that can be done based on
584 /// DemandedMask, but without modifying the Instruction.
585 Value *SimplifyMultipleUseDemandedBits(Instruction *I,
586 const APInt &DemandedMask,
588 unsigned Depth, Instruction *CxtI);
590 /// Helper routine of SimplifyDemandedUseBits. It tries to simplify demanded
591 /// bit for "r1 = shr x, c1; r2 = shl r1, c2" instruction sequence.
592 Value *simplifyShrShlDemandedBits(
593 Instruction *Shr, const APInt &ShrOp1, Instruction *Shl,
594 const APInt &ShlOp1, const APInt &DemandedMask, KnownBits &Known);
596 /// Tries to simplify operands to an integer instruction based on its
598 bool SimplifyDemandedInstructionBits(Instruction &Inst);
601 SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &UndefElts,
603 bool AllowMultipleUsers = false) override;
605 /// Canonicalize the position of binops relative to shufflevector.
606 Instruction *foldVectorBinop(BinaryOperator &Inst);
607 Instruction *foldVectorSelect(SelectInst &Sel);
608 Instruction *foldSelectShuffle(ShuffleVectorInst &Shuf);
610 /// Given a binary operator, cast instruction, or select which has a PHI node
611 /// as operand #0, see if we can fold the instruction into the PHI (which is
612 /// only possible if all operands to the PHI are constants).
613 Instruction *foldOpIntoPhi(Instruction &I, PHINode *PN);
615 /// For a binary operator with 2 phi operands, try to hoist the binary
616 /// operation before the phi. This can result in fewer instructions in
617 /// patterns where at least one set of phi operands simplifies.
619 /// BB3: binop (phi [X, BB1], [C1, BB2]), (phi [Y, BB1], [C2, BB2])
621 /// BB1: BO = binop X, Y
622 /// BB3: phi [BO, BB1], [(binop C1, C2), BB2]
623 Instruction *foldBinopWithPhiOperands(BinaryOperator &BO);
625 /// Given an instruction with a select as one operand and a constant as the
626 /// other operand, try to fold the binary operator into the select arguments.
627 /// This also works for Cast instructions, which obviously do not have a
629 Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI,
630 bool FoldWithMultiUse = false);
632 /// This is a convenience wrapper function for the above two functions.
633 Instruction *foldBinOpIntoSelectOrPhi(BinaryOperator &I);
635 Instruction *foldAddWithConstant(BinaryOperator &Add);
637 /// Try to rotate an operation below a PHI node, using PHI nodes for
639 Instruction *foldPHIArgOpIntoPHI(PHINode &PN);
640 Instruction *foldPHIArgBinOpIntoPHI(PHINode &PN);
641 Instruction *foldPHIArgInsertValueInstructionIntoPHI(PHINode &PN);
642 Instruction *foldPHIArgExtractValueInstructionIntoPHI(PHINode &PN);
643 Instruction *foldPHIArgGEPIntoPHI(PHINode &PN);
644 Instruction *foldPHIArgLoadIntoPHI(PHINode &PN);
645 Instruction *foldPHIArgZextsIntoPHI(PHINode &PN);
646 Instruction *foldPHIArgIntToPtrToPHI(PHINode &PN);
648 /// If an integer typed PHI has only one use which is an IntToPtr operation,
649 /// replace the PHI with an existing pointer typed PHI if it exists. Otherwise
650 /// insert a new pointer typed PHI and replace the original one.
651 Instruction *foldIntegerTypedPHI(PHINode &PN);
653 /// Helper function for FoldPHIArgXIntoPHI() to set debug location for the
654 /// folded operation.
655 void PHIArgMergedDebugLoc(Instruction *Inst, PHINode &PN);
657 Instruction *foldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
658 ICmpInst::Predicate Cond, Instruction &I);
659 Instruction *foldSelectICmp(ICmpInst::Predicate Pred, SelectInst *SI,
660 Value *RHS, const ICmpInst &I);
661 Instruction *foldAllocaCmp(ICmpInst &ICI, const AllocaInst *Alloca);
662 Instruction *foldCmpLoadFromIndexedGlobal(LoadInst *LI,
663 GetElementPtrInst *GEP,
664 GlobalVariable *GV, CmpInst &ICI,
665 ConstantInt *AndCst = nullptr);
666 Instruction *foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
668 Instruction *foldICmpAddOpConst(Value *X, const APInt &C,
669 ICmpInst::Predicate Pred);
670 Instruction *foldICmpWithCastOp(ICmpInst &ICmp);
671 Instruction *foldICmpWithZextOrSext(ICmpInst &ICmp);
673 Instruction *foldICmpUsingKnownBits(ICmpInst &Cmp);
674 Instruction *foldICmpWithDominatingICmp(ICmpInst &Cmp);
675 Instruction *foldICmpWithConstant(ICmpInst &Cmp);
676 Instruction *foldICmpInstWithConstant(ICmpInst &Cmp);
677 Instruction *foldICmpInstWithConstantNotInt(ICmpInst &Cmp);
678 Instruction *foldICmpInstWithConstantAllowUndef(ICmpInst &Cmp,
680 Instruction *foldICmpBinOp(ICmpInst &Cmp, const SimplifyQuery &SQ);
681 Instruction *foldICmpEquality(ICmpInst &Cmp);
682 Instruction *foldIRemByPowerOfTwoToBitTest(ICmpInst &I);
683 Instruction *foldSignBitTest(ICmpInst &I);
684 Instruction *foldICmpWithZero(ICmpInst &Cmp);
686 Value *foldMultiplicationOverflowCheck(ICmpInst &Cmp);
688 Instruction *foldICmpBinOpWithConstant(ICmpInst &Cmp, BinaryOperator *BO,
690 Instruction *foldICmpSelectConstant(ICmpInst &Cmp, SelectInst *Select,
692 Instruction *foldICmpTruncConstant(ICmpInst &Cmp, TruncInst *Trunc,
694 Instruction *foldICmpAndConstant(ICmpInst &Cmp, BinaryOperator *And,
696 Instruction *foldICmpXorConstant(ICmpInst &Cmp, BinaryOperator *Xor,
698 Instruction *foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or,
700 Instruction *foldICmpMulConstant(ICmpInst &Cmp, BinaryOperator *Mul,
702 Instruction *foldICmpShlConstant(ICmpInst &Cmp, BinaryOperator *Shl,
704 Instruction *foldICmpShrConstant(ICmpInst &Cmp, BinaryOperator *Shr,
706 Instruction *foldICmpSRemConstant(ICmpInst &Cmp, BinaryOperator *UDiv,
708 Instruction *foldICmpUDivConstant(ICmpInst &Cmp, BinaryOperator *UDiv,
710 Instruction *foldICmpDivConstant(ICmpInst &Cmp, BinaryOperator *Div,
712 Instruction *foldICmpSubConstant(ICmpInst &Cmp, BinaryOperator *Sub,
714 Instruction *foldICmpAddConstant(ICmpInst &Cmp, BinaryOperator *Add,
716 Instruction *foldICmpAndConstConst(ICmpInst &Cmp, BinaryOperator *And,
718 Instruction *foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And,
719 const APInt &C1, const APInt &C2);
720 Instruction *foldICmpShrConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1,
722 Instruction *foldICmpShlConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1,
725 Instruction *foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp,
728 Instruction *foldICmpIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II,
730 Instruction *foldICmpEqIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II,
732 Instruction *foldICmpBitCast(ICmpInst &Cmp);
734 // Helpers of visitSelectInst().
735 Instruction *foldSelectExtConst(SelectInst &Sel);
736 Instruction *foldSelectOpOp(SelectInst &SI, Instruction *TI, Instruction *FI);
737 Instruction *foldSelectIntoOp(SelectInst &SI, Value *, Value *);
738 Instruction *foldSPFofSPF(Instruction *Inner, SelectPatternFlavor SPF1,
739 Value *A, Value *B, Instruction &Outer,
740 SelectPatternFlavor SPF2, Value *C);
741 Instruction *foldSelectInstWithICmp(SelectInst &SI, ICmpInst *ICI);
742 Instruction *foldSelectValueEquivalence(SelectInst &SI, ICmpInst &ICI);
744 Value *insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi,
745 bool isSigned, bool Inside);
746 Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocaInst &AI);
747 bool mergeStoreIntoSuccessor(StoreInst &SI);
749 /// Given an initial instruction, check to see if it is the root of a
750 /// bswap/bitreverse idiom. If so, return the equivalent bswap/bitreverse
752 Instruction *matchBSwapOrBitReverse(Instruction &I, bool MatchBSwaps,
753 bool MatchBitReversals);
755 Instruction *SimplifyAnyMemTransfer(AnyMemTransferInst *MI);
756 Instruction *SimplifyAnyMemSet(AnyMemSetInst *MI);
758 Value *EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned);
760 /// Returns a value X such that Val = X * Scale, or null if none.
762 /// If the multiplication is known not to overflow then NoSignedWrap is set.
763 Value *Descale(Value *Val, APInt Scale, bool &NoSignedWrap);
766 class Negator final {
767 /// Top-to-bottom, def-to-use negated instruction tree we produced.
768 SmallVector<Instruction *, NegatorMaxNodesSSO> NewInstructions;
770 using BuilderTy = IRBuilder<TargetFolder, IRBuilderCallbackInserter>;
773 const DataLayout &DL;
775 const DominatorTree &DT;
777 const bool IsTrulyNegation;
779 SmallDenseMap<Value *, Value *> NegationsCache;
781 Negator(LLVMContext &C, const DataLayout &DL, AssumptionCache &AC,
782 const DominatorTree &DT, bool IsTrulyNegation);
784 #if LLVM_ENABLE_STATS
785 unsigned NumValuesVisitedInThisNegator = 0;
789 using Result = std::pair<ArrayRef<Instruction *> /*NewInstructions*/,
790 Value * /*NegatedRoot*/>;
792 std::array<Value *, 2> getSortedOperandsOfBinOp(Instruction *I);
794 LLVM_NODISCARD Value *visitImpl(Value *V, unsigned Depth);
796 LLVM_NODISCARD Value *negate(Value *V, unsigned Depth);
798 /// Recurse depth-first and attempt to sink the negation.
799 /// FIXME: use worklist?
800 LLVM_NODISCARD Optional<Result> run(Value *Root);
802 Negator(const Negator &) = delete;
803 Negator(Negator &&) = delete;
804 Negator &operator=(const Negator &) = delete;
805 Negator &operator=(Negator &&) = delete;
808 /// Attempt to negate \p Root. Retuns nullptr if negation can't be performed,
809 /// otherwise returns negated value.
810 LLVM_NODISCARD static Value *Negate(bool LHSIsZero, Value *Root,
811 InstCombinerImpl &IC);
814 } // end namespace llvm
818 #endif // LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H