1 //===-- AMDGPUCodeGenPrepare.cpp ------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This pass does misc. AMDGPU optimizations on IR before instruction
13 //===----------------------------------------------------------------------===//
16 #include "AMDGPUSubtarget.h"
17 #include "AMDGPUTargetMachine.h"
18 #include "llvm/ADT/FloatingPointMode.h"
19 #include "llvm/ADT/StringRef.h"
20 #include "llvm/Analysis/AssumptionCache.h"
21 #include "llvm/Analysis/ConstantFolding.h"
22 #include "llvm/Analysis/LegacyDivergenceAnalysis.h"
23 #include "llvm/Analysis/Loads.h"
24 #include "llvm/Analysis/ValueTracking.h"
25 #include "llvm/CodeGen/Passes.h"
26 #include "llvm/CodeGen/TargetPassConfig.h"
27 #include "llvm/IR/Attributes.h"
28 #include "llvm/IR/BasicBlock.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DerivedTypes.h"
31 #include "llvm/IR/Dominators.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/InstVisitor.h"
35 #include "llvm/IR/InstrTypes.h"
36 #include "llvm/IR/Instruction.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Intrinsics.h"
40 #include "llvm/IR/LLVMContext.h"
41 #include "llvm/IR/Operator.h"
42 #include "llvm/IR/Type.h"
43 #include "llvm/IR/Value.h"
44 #include "llvm/InitializePasses.h"
45 #include "llvm/Pass.h"
46 #include "llvm/Support/Casting.h"
47 #include "llvm/Transforms/Utils/IntegerDivision.h"
51 #define DEBUG_TYPE "amdgpu-codegenprepare"
57 static cl::opt<bool> WidenLoads(
58 "amdgpu-codegenprepare-widen-constant-loads",
59 cl::desc("Widen sub-dword constant address space loads in AMDGPUCodeGenPrepare"),
63 static cl::opt<bool> UseMul24Intrin(
64 "amdgpu-codegenprepare-mul24",
65 cl::desc("Introduce mul24 intrinsics in AMDGPUCodeGenPrepare"),
69 // Legalize 64-bit division by using the generic IR expansion.
70 static cl::opt<bool> ExpandDiv64InIR(
71 "amdgpu-codegenprepare-expand-div64",
72 cl::desc("Expand 64-bit division in AMDGPUCodeGenPrepare"),
76 // Leave all division operations as they are. This supersedes ExpandDiv64InIR
77 // and is used for testing the legalizer.
78 static cl::opt<bool> DisableIDivExpand(
79 "amdgpu-codegenprepare-disable-idiv-expansion",
80 cl::desc("Prevent expanding integer division in AMDGPUCodeGenPrepare"),
84 class AMDGPUCodeGenPrepare : public FunctionPass,
85 public InstVisitor<AMDGPUCodeGenPrepare, bool> {
86 const GCNSubtarget *ST = nullptr;
87 AssumptionCache *AC = nullptr;
88 DominatorTree *DT = nullptr;
89 LegacyDivergenceAnalysis *DA = nullptr;
90 Module *Mod = nullptr;
91 const DataLayout *DL = nullptr;
92 bool HasUnsafeFPMath = false;
93 bool HasFP32Denormals = false;
95 /// Copies exact/nsw/nuw flags (if any) from binary operation \p I to
96 /// binary operation \p V.
98 /// \returns Binary operation \p V.
99 /// \returns \p T's base element bit width.
100 unsigned getBaseElementBitWidth(const Type *T) const;
102 /// \returns Equivalent 32 bit integer type for given type \p T. For example,
103 /// if \p T is i7, then i32 is returned; if \p T is <3 x i12>, then <3 x i32>
105 Type *getI32Ty(IRBuilder<> &B, const Type *T) const;
107 /// \returns True if binary operation \p I is a signed binary operation, false
109 bool isSigned(const BinaryOperator &I) const;
111 /// \returns True if the condition of 'select' operation \p I comes from a
112 /// signed 'icmp' operation, false otherwise.
113 bool isSigned(const SelectInst &I) const;
115 /// \returns True if type \p T needs to be promoted to 32 bit integer type,
117 bool needsPromotionToI32(const Type *T) const;
119 /// Promotes uniform binary operation \p I to equivalent 32 bit binary
122 /// \details \p I's base element bit width must be greater than 1 and less
123 /// than or equal 16. Promotion is done by sign or zero extending operands to
124 /// 32 bits, replacing \p I with equivalent 32 bit binary operation, and
125 /// truncating the result of 32 bit binary operation back to \p I's original
126 /// type. Division operation is not promoted.
128 /// \returns True if \p I is promoted to equivalent 32 bit binary operation,
130 bool promoteUniformOpToI32(BinaryOperator &I) const;
132 /// Promotes uniform 'icmp' operation \p I to 32 bit 'icmp' operation.
134 /// \details \p I's base element bit width must be greater than 1 and less
135 /// than or equal 16. Promotion is done by sign or zero extending operands to
136 /// 32 bits, and replacing \p I with 32 bit 'icmp' operation.
139 bool promoteUniformOpToI32(ICmpInst &I) const;
141 /// Promotes uniform 'select' operation \p I to 32 bit 'select'
144 /// \details \p I's base element bit width must be greater than 1 and less
145 /// than or equal 16. Promotion is done by sign or zero extending operands to
146 /// 32 bits, replacing \p I with 32 bit 'select' operation, and truncating the
147 /// result of 32 bit 'select' operation back to \p I's original type.
150 bool promoteUniformOpToI32(SelectInst &I) const;
152 /// Promotes uniform 'bitreverse' intrinsic \p I to 32 bit 'bitreverse'
155 /// \details \p I's base element bit width must be greater than 1 and less
156 /// than or equal 16. Promotion is done by zero extending the operand to 32
157 /// bits, replacing \p I with 32 bit 'bitreverse' intrinsic, shifting the
158 /// result of 32 bit 'bitreverse' intrinsic to the right with zero fill (the
159 /// shift amount is 32 minus \p I's base element bit width), and truncating
160 /// the result of the shift operation back to \p I's original type.
163 bool promoteUniformBitreverseToI32(IntrinsicInst &I) const;
166 unsigned numBitsUnsigned(Value *Op, unsigned ScalarSize) const;
167 unsigned numBitsSigned(Value *Op, unsigned ScalarSize) const;
168 bool isI24(Value *V, unsigned ScalarSize) const;
169 bool isU24(Value *V, unsigned ScalarSize) const;
171 /// Replace mul instructions with llvm.amdgcn.mul.u24 or llvm.amdgcn.mul.s24.
172 /// SelectionDAG has an issue where an and asserting the bits are known
173 bool replaceMulWithMul24(BinaryOperator &I) const;
175 /// Perform same function as equivalently named function in DAGCombiner. Since
176 /// we expand some divisions here, we need to perform this before obscuring.
177 bool foldBinOpIntoSelect(BinaryOperator &I) const;
179 bool divHasSpecialOptimization(BinaryOperator &I,
180 Value *Num, Value *Den) const;
181 int getDivNumBits(BinaryOperator &I,
182 Value *Num, Value *Den,
183 unsigned AtLeast, bool Signed) const;
185 /// Expands 24 bit div or rem.
186 Value* expandDivRem24(IRBuilder<> &Builder, BinaryOperator &I,
187 Value *Num, Value *Den,
188 bool IsDiv, bool IsSigned) const;
190 Value *expandDivRem24Impl(IRBuilder<> &Builder, BinaryOperator &I,
191 Value *Num, Value *Den, unsigned NumBits,
192 bool IsDiv, bool IsSigned) const;
194 /// Expands 32 bit div or rem.
195 Value* expandDivRem32(IRBuilder<> &Builder, BinaryOperator &I,
196 Value *Num, Value *Den) const;
198 Value *shrinkDivRem64(IRBuilder<> &Builder, BinaryOperator &I,
199 Value *Num, Value *Den) const;
200 void expandDivRem64(BinaryOperator &I) const;
202 /// Widen a scalar load.
204 /// \details \p Widen scalar load for uniform, small type loads from constant
205 // memory / to a full 32-bits and then truncate the input to allow a scalar
206 // load instead of a vector load.
210 bool canWidenScalarExtLoad(LoadInst &I) const;
215 AMDGPUCodeGenPrepare() : FunctionPass(ID) {}
217 bool visitFDiv(BinaryOperator &I);
219 bool visitInstruction(Instruction &I) { return false; }
220 bool visitBinaryOperator(BinaryOperator &I);
221 bool visitLoadInst(LoadInst &I);
222 bool visitICmpInst(ICmpInst &I);
223 bool visitSelectInst(SelectInst &I);
225 bool visitIntrinsicInst(IntrinsicInst &I);
226 bool visitBitreverseIntrinsicInst(IntrinsicInst &I);
228 bool doInitialization(Module &M) override;
229 bool runOnFunction(Function &F) override;
231 StringRef getPassName() const override { return "AMDGPU IR optimizations"; }
233 void getAnalysisUsage(AnalysisUsage &AU) const override {
234 AU.addRequired<AssumptionCacheTracker>();
235 AU.addRequired<LegacyDivergenceAnalysis>();
237 // FIXME: Division expansion needs to preserve the dominator tree.
238 if (!ExpandDiv64InIR)
239 AU.setPreservesAll();
243 } // end anonymous namespace
245 unsigned AMDGPUCodeGenPrepare::getBaseElementBitWidth(const Type *T) const {
246 assert(needsPromotionToI32(T) && "T does not need promotion to i32");
248 if (T->isIntegerTy())
249 return T->getIntegerBitWidth();
250 return cast<VectorType>(T)->getElementType()->getIntegerBitWidth();
253 Type *AMDGPUCodeGenPrepare::getI32Ty(IRBuilder<> &B, const Type *T) const {
254 assert(needsPromotionToI32(T) && "T does not need promotion to i32");
256 if (T->isIntegerTy())
257 return B.getInt32Ty();
258 return FixedVectorType::get(B.getInt32Ty(), cast<FixedVectorType>(T));
261 bool AMDGPUCodeGenPrepare::isSigned(const BinaryOperator &I) const {
262 return I.getOpcode() == Instruction::AShr ||
263 I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::SRem;
266 bool AMDGPUCodeGenPrepare::isSigned(const SelectInst &I) const {
267 return isa<ICmpInst>(I.getOperand(0)) ?
268 cast<ICmpInst>(I.getOperand(0))->isSigned() : false;
271 bool AMDGPUCodeGenPrepare::needsPromotionToI32(const Type *T) const {
272 const IntegerType *IntTy = dyn_cast<IntegerType>(T);
273 if (IntTy && IntTy->getBitWidth() > 1 && IntTy->getBitWidth() <= 16)
276 if (const VectorType *VT = dyn_cast<VectorType>(T)) {
277 // TODO: The set of packed operations is more limited, so may want to
278 // promote some anyway.
279 if (ST->hasVOP3PInsts())
282 return needsPromotionToI32(VT->getElementType());
288 // Return true if the op promoted to i32 should have nsw set.
289 static bool promotedOpIsNSW(const Instruction &I) {
290 switch (I.getOpcode()) {
291 case Instruction::Shl:
292 case Instruction::Add:
293 case Instruction::Sub:
295 case Instruction::Mul:
296 return I.hasNoUnsignedWrap();
302 // Return true if the op promoted to i32 should have nuw set.
303 static bool promotedOpIsNUW(const Instruction &I) {
304 switch (I.getOpcode()) {
305 case Instruction::Shl:
306 case Instruction::Add:
307 case Instruction::Mul:
309 case Instruction::Sub:
310 return I.hasNoUnsignedWrap();
316 bool AMDGPUCodeGenPrepare::canWidenScalarExtLoad(LoadInst &I) const {
317 Type *Ty = I.getType();
318 const DataLayout &DL = Mod->getDataLayout();
319 int TySize = DL.getTypeSizeInBits(Ty);
320 Align Alignment = DL.getValueOrABITypeAlignment(I.getAlign(), Ty);
322 return I.isSimple() && TySize < 32 && Alignment >= 4 && DA->isUniform(&I);
325 bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(BinaryOperator &I) const {
326 assert(needsPromotionToI32(I.getType()) &&
327 "I does not need promotion to i32");
329 if (I.getOpcode() == Instruction::SDiv ||
330 I.getOpcode() == Instruction::UDiv ||
331 I.getOpcode() == Instruction::SRem ||
332 I.getOpcode() == Instruction::URem)
335 IRBuilder<> Builder(&I);
336 Builder.SetCurrentDebugLocation(I.getDebugLoc());
338 Type *I32Ty = getI32Ty(Builder, I.getType());
339 Value *ExtOp0 = nullptr;
340 Value *ExtOp1 = nullptr;
341 Value *ExtRes = nullptr;
342 Value *TruncRes = nullptr;
345 ExtOp0 = Builder.CreateSExt(I.getOperand(0), I32Ty);
346 ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty);
348 ExtOp0 = Builder.CreateZExt(I.getOperand(0), I32Ty);
349 ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty);
352 ExtRes = Builder.CreateBinOp(I.getOpcode(), ExtOp0, ExtOp1);
353 if (Instruction *Inst = dyn_cast<Instruction>(ExtRes)) {
354 if (promotedOpIsNSW(cast<Instruction>(I)))
355 Inst->setHasNoSignedWrap();
357 if (promotedOpIsNUW(cast<Instruction>(I)))
358 Inst->setHasNoUnsignedWrap();
360 if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I))
361 Inst->setIsExact(ExactOp->isExact());
364 TruncRes = Builder.CreateTrunc(ExtRes, I.getType());
366 I.replaceAllUsesWith(TruncRes);
372 bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(ICmpInst &I) const {
373 assert(needsPromotionToI32(I.getOperand(0)->getType()) &&
374 "I does not need promotion to i32");
376 IRBuilder<> Builder(&I);
377 Builder.SetCurrentDebugLocation(I.getDebugLoc());
379 Type *I32Ty = getI32Ty(Builder, I.getOperand(0)->getType());
380 Value *ExtOp0 = nullptr;
381 Value *ExtOp1 = nullptr;
382 Value *NewICmp = nullptr;
385 ExtOp0 = Builder.CreateSExt(I.getOperand(0), I32Ty);
386 ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty);
388 ExtOp0 = Builder.CreateZExt(I.getOperand(0), I32Ty);
389 ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty);
391 NewICmp = Builder.CreateICmp(I.getPredicate(), ExtOp0, ExtOp1);
393 I.replaceAllUsesWith(NewICmp);
399 bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(SelectInst &I) const {
400 assert(needsPromotionToI32(I.getType()) &&
401 "I does not need promotion to i32");
403 IRBuilder<> Builder(&I);
404 Builder.SetCurrentDebugLocation(I.getDebugLoc());
406 Type *I32Ty = getI32Ty(Builder, I.getType());
407 Value *ExtOp1 = nullptr;
408 Value *ExtOp2 = nullptr;
409 Value *ExtRes = nullptr;
410 Value *TruncRes = nullptr;
413 ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty);
414 ExtOp2 = Builder.CreateSExt(I.getOperand(2), I32Ty);
416 ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty);
417 ExtOp2 = Builder.CreateZExt(I.getOperand(2), I32Ty);
419 ExtRes = Builder.CreateSelect(I.getOperand(0), ExtOp1, ExtOp2);
420 TruncRes = Builder.CreateTrunc(ExtRes, I.getType());
422 I.replaceAllUsesWith(TruncRes);
428 bool AMDGPUCodeGenPrepare::promoteUniformBitreverseToI32(
429 IntrinsicInst &I) const {
430 assert(I.getIntrinsicID() == Intrinsic::bitreverse &&
431 "I must be bitreverse intrinsic");
432 assert(needsPromotionToI32(I.getType()) &&
433 "I does not need promotion to i32");
435 IRBuilder<> Builder(&I);
436 Builder.SetCurrentDebugLocation(I.getDebugLoc());
438 Type *I32Ty = getI32Ty(Builder, I.getType());
440 Intrinsic::getDeclaration(Mod, Intrinsic::bitreverse, { I32Ty });
441 Value *ExtOp = Builder.CreateZExt(I.getOperand(0), I32Ty);
442 Value *ExtRes = Builder.CreateCall(I32, { ExtOp });
444 Builder.CreateLShr(ExtRes, 32 - getBaseElementBitWidth(I.getType()));
446 Builder.CreateTrunc(LShrOp, I.getType());
448 I.replaceAllUsesWith(TruncRes);
454 unsigned AMDGPUCodeGenPrepare::numBitsUnsigned(Value *Op,
455 unsigned ScalarSize) const {
456 KnownBits Known = computeKnownBits(Op, *DL, 0, AC);
457 return ScalarSize - Known.countMinLeadingZeros();
460 unsigned AMDGPUCodeGenPrepare::numBitsSigned(Value *Op,
461 unsigned ScalarSize) const {
462 // In order for this to be a signed 24-bit value, bit 23, must
464 return ScalarSize - ComputeNumSignBits(Op, *DL, 0, AC);
467 bool AMDGPUCodeGenPrepare::isI24(Value *V, unsigned ScalarSize) const {
468 return ScalarSize >= 24 && // Types less than 24-bit should be treated
469 // as unsigned 24-bit values.
470 numBitsSigned(V, ScalarSize) < 24;
473 bool AMDGPUCodeGenPrepare::isU24(Value *V, unsigned ScalarSize) const {
474 return numBitsUnsigned(V, ScalarSize) <= 24;
477 static void extractValues(IRBuilder<> &Builder,
478 SmallVectorImpl<Value *> &Values, Value *V) {
479 auto *VT = dyn_cast<FixedVectorType>(V->getType());
485 for (int I = 0, E = VT->getNumElements(); I != E; ++I)
486 Values.push_back(Builder.CreateExtractElement(V, I));
489 static Value *insertValues(IRBuilder<> &Builder,
491 SmallVectorImpl<Value *> &Values) {
492 if (Values.size() == 1)
495 Value *NewVal = UndefValue::get(Ty);
496 for (int I = 0, E = Values.size(); I != E; ++I)
497 NewVal = Builder.CreateInsertElement(NewVal, Values[I], I);
502 bool AMDGPUCodeGenPrepare::replaceMulWithMul24(BinaryOperator &I) const {
503 if (I.getOpcode() != Instruction::Mul)
506 Type *Ty = I.getType();
507 unsigned Size = Ty->getScalarSizeInBits();
508 if (Size <= 16 && ST->has16BitInsts())
511 // Prefer scalar if this could be s_mul_i32
512 if (DA->isUniform(&I))
515 Value *LHS = I.getOperand(0);
516 Value *RHS = I.getOperand(1);
517 IRBuilder<> Builder(&I);
518 Builder.SetCurrentDebugLocation(I.getDebugLoc());
520 Intrinsic::ID IntrID = Intrinsic::not_intrinsic;
522 // TODO: Should this try to match mulhi24?
523 if (ST->hasMulU24() && isU24(LHS, Size) && isU24(RHS, Size)) {
524 IntrID = Intrinsic::amdgcn_mul_u24;
525 } else if (ST->hasMulI24() && isI24(LHS, Size) && isI24(RHS, Size)) {
526 IntrID = Intrinsic::amdgcn_mul_i24;
530 SmallVector<Value *, 4> LHSVals;
531 SmallVector<Value *, 4> RHSVals;
532 SmallVector<Value *, 4> ResultVals;
533 extractValues(Builder, LHSVals, LHS);
534 extractValues(Builder, RHSVals, RHS);
537 IntegerType *I32Ty = Builder.getInt32Ty();
538 FunctionCallee Intrin = Intrinsic::getDeclaration(Mod, IntrID);
539 for (int I = 0, E = LHSVals.size(); I != E; ++I) {
541 if (IntrID == Intrinsic::amdgcn_mul_u24) {
542 LHS = Builder.CreateZExtOrTrunc(LHSVals[I], I32Ty);
543 RHS = Builder.CreateZExtOrTrunc(RHSVals[I], I32Ty);
545 LHS = Builder.CreateSExtOrTrunc(LHSVals[I], I32Ty);
546 RHS = Builder.CreateSExtOrTrunc(RHSVals[I], I32Ty);
549 Value *Result = Builder.CreateCall(Intrin, {LHS, RHS});
551 if (IntrID == Intrinsic::amdgcn_mul_u24) {
552 ResultVals.push_back(Builder.CreateZExtOrTrunc(Result,
553 LHSVals[I]->getType()));
555 ResultVals.push_back(Builder.CreateSExtOrTrunc(Result,
556 LHSVals[I]->getType()));
560 Value *NewVal = insertValues(Builder, Ty, ResultVals);
561 NewVal->takeName(&I);
562 I.replaceAllUsesWith(NewVal);
568 // Find a select instruction, which may have been casted. This is mostly to deal
569 // with cases where i16 selects were promoted here to i32.
570 static SelectInst *findSelectThroughCast(Value *V, CastInst *&Cast) {
572 if (SelectInst *Sel = dyn_cast<SelectInst>(V))
575 if ((Cast = dyn_cast<CastInst>(V))) {
576 if (SelectInst *Sel = dyn_cast<SelectInst>(Cast->getOperand(0)))
583 bool AMDGPUCodeGenPrepare::foldBinOpIntoSelect(BinaryOperator &BO) const {
584 // Don't do this unless the old select is going away. We want to eliminate the
585 // binary operator, not replace a binop with a select.
590 // TODO: Should probably try to handle some cases with multiple
591 // users. Duplicating the select may be profitable for division.
592 SelectInst *Sel = findSelectThroughCast(BO.getOperand(0), CastOp);
593 if (!Sel || !Sel->hasOneUse()) {
595 Sel = findSelectThroughCast(BO.getOperand(1), CastOp);
598 if (!Sel || !Sel->hasOneUse())
601 Constant *CT = dyn_cast<Constant>(Sel->getTrueValue());
602 Constant *CF = dyn_cast<Constant>(Sel->getFalseValue());
603 Constant *CBO = dyn_cast<Constant>(BO.getOperand(SelOpNo ^ 1));
604 if (!CBO || !CT || !CF)
608 if (!CastOp->hasOneUse())
610 CT = ConstantFoldCastOperand(CastOp->getOpcode(), CT, BO.getType(), *DL);
611 CF = ConstantFoldCastOperand(CastOp->getOpcode(), CF, BO.getType(), *DL);
614 // TODO: Handle special 0/-1 cases DAG combine does, although we only really
615 // need to handle divisions here.
616 Constant *FoldedT = SelOpNo ?
617 ConstantFoldBinaryOpOperands(BO.getOpcode(), CBO, CT, *DL) :
618 ConstantFoldBinaryOpOperands(BO.getOpcode(), CT, CBO, *DL);
619 if (isa<ConstantExpr>(FoldedT))
622 Constant *FoldedF = SelOpNo ?
623 ConstantFoldBinaryOpOperands(BO.getOpcode(), CBO, CF, *DL) :
624 ConstantFoldBinaryOpOperands(BO.getOpcode(), CF, CBO, *DL);
625 if (isa<ConstantExpr>(FoldedF))
628 IRBuilder<> Builder(&BO);
629 Builder.SetCurrentDebugLocation(BO.getDebugLoc());
630 if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(&BO))
631 Builder.setFastMathFlags(FPOp->getFastMathFlags());
633 Value *NewSelect = Builder.CreateSelect(Sel->getCondition(),
635 NewSelect->takeName(&BO);
636 BO.replaceAllUsesWith(NewSelect);
637 BO.eraseFromParent();
639 CastOp->eraseFromParent();
640 Sel->eraseFromParent();
644 // Optimize fdiv with rcp:
646 // 1/x -> rcp(x) when rcp is sufficiently accurate or inaccurate rcp is
647 // allowed with unsafe-fp-math or afn.
649 // a/b -> a*rcp(b) when inaccurate rcp is allowed with unsafe-fp-math or afn.
650 static Value *optimizeWithRcp(Value *Num, Value *Den, bool AllowInaccurateRcp,
651 bool RcpIsAccurate, IRBuilder<> &Builder,
654 if (!AllowInaccurateRcp && !RcpIsAccurate)
657 Type *Ty = Den->getType();
658 if (const ConstantFP *CLHS = dyn_cast<ConstantFP>(Num)) {
659 if (AllowInaccurateRcp || RcpIsAccurate) {
660 if (CLHS->isExactlyValue(1.0)) {
661 Function *Decl = Intrinsic::getDeclaration(
662 Mod, Intrinsic::amdgcn_rcp, Ty);
664 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
665 // the CI documentation has a worst case error of 1 ulp.
666 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
667 // use it as long as we aren't trying to use denormals.
669 // v_rcp_f16 and v_rsq_f16 DO support denormals.
671 // NOTE: v_sqrt and v_rcp will be combined to v_rsq later. So we don't
672 // insert rsq intrinsic here.
675 return Builder.CreateCall(Decl, { Den });
678 // Same as for 1.0, but expand the sign out of the constant.
679 if (CLHS->isExactlyValue(-1.0)) {
680 Function *Decl = Intrinsic::getDeclaration(
681 Mod, Intrinsic::amdgcn_rcp, Ty);
683 // -1.0 / x -> rcp (fneg x)
684 Value *FNeg = Builder.CreateFNeg(Den);
685 return Builder.CreateCall(Decl, { FNeg });
690 if (AllowInaccurateRcp) {
691 Function *Decl = Intrinsic::getDeclaration(
692 Mod, Intrinsic::amdgcn_rcp, Ty);
694 // Turn into multiply by the reciprocal.
695 // x / y -> x * (1.0 / y)
696 Value *Recip = Builder.CreateCall(Decl, { Den });
697 return Builder.CreateFMul(Num, Recip);
702 // optimize with fdiv.fast:
704 // a/b -> fdiv.fast(a, b) when !fpmath >= 2.5ulp with denormals flushed.
706 // 1/x -> fdiv.fast(1,x) when !fpmath >= 2.5ulp.
708 // NOTE: optimizeWithRcp should be tried first because rcp is the preference.
709 static Value *optimizeWithFDivFast(Value *Num, Value *Den, float ReqdAccuracy,
710 bool HasDenormals, IRBuilder<> &Builder,
712 // fdiv.fast can achieve 2.5 ULP accuracy.
713 if (ReqdAccuracy < 2.5f)
716 // Only have fdiv.fast for f32.
717 Type *Ty = Den->getType();
718 if (!Ty->isFloatTy())
721 bool NumIsOne = false;
722 if (const ConstantFP *CNum = dyn_cast<ConstantFP>(Num)) {
723 if (CNum->isExactlyValue(+1.0) || CNum->isExactlyValue(-1.0))
727 // fdiv does not support denormals. But 1.0/x is always fine to use it.
728 if (HasDenormals && !NumIsOne)
731 Function *Decl = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_fdiv_fast);
732 return Builder.CreateCall(Decl, { Num, Den });
735 // Optimizations is performed based on fpmath, fast math flags as well as
736 // denormals to optimize fdiv with either rcp or fdiv.fast.
739 // 1/x -> rcp(x) when rcp is sufficiently accurate or inaccurate rcp is
740 // allowed with unsafe-fp-math or afn.
742 // a/b -> a*rcp(b) when inaccurate rcp is allowed with unsafe-fp-math or afn.
745 // a/b -> fdiv.fast(a, b) when !fpmath >= 2.5ulp with denormals flushed.
747 // 1/x -> fdiv.fast(1,x) when !fpmath >= 2.5ulp.
749 // NOTE: rcp is the preference in cases that both are legal.
750 bool AMDGPUCodeGenPrepare::visitFDiv(BinaryOperator &FDiv) {
752 Type *Ty = FDiv.getType()->getScalarType();
754 // No intrinsic for fdiv16 if target does not support f16.
755 if (Ty->isHalfTy() && !ST->has16BitInsts())
758 const FPMathOperator *FPOp = cast<const FPMathOperator>(&FDiv);
759 const float ReqdAccuracy = FPOp->getFPAccuracy();
761 // Inaccurate rcp is allowed with unsafe-fp-math or afn.
762 FastMathFlags FMF = FPOp->getFastMathFlags();
763 const bool AllowInaccurateRcp = HasUnsafeFPMath || FMF.approxFunc();
765 // rcp_f16 is accurate for !fpmath >= 1.0ulp.
766 // rcp_f32 is accurate for !fpmath >= 1.0ulp and denormals are flushed.
767 // rcp_f64 is never accurate.
768 const bool RcpIsAccurate = (Ty->isHalfTy() && ReqdAccuracy >= 1.0f) ||
769 (Ty->isFloatTy() && !HasFP32Denormals && ReqdAccuracy >= 1.0f);
771 IRBuilder<> Builder(FDiv.getParent(), std::next(FDiv.getIterator()));
772 Builder.setFastMathFlags(FMF);
773 Builder.SetCurrentDebugLocation(FDiv.getDebugLoc());
775 Value *Num = FDiv.getOperand(0);
776 Value *Den = FDiv.getOperand(1);
778 Value *NewFDiv = nullptr;
779 if (auto *VT = dyn_cast<FixedVectorType>(FDiv.getType())) {
780 NewFDiv = UndefValue::get(VT);
782 // FIXME: Doesn't do the right thing for cases where the vector is partially
783 // constant. This works when the scalarizer pass is run first.
784 for (unsigned I = 0, E = VT->getNumElements(); I != E; ++I) {
785 Value *NumEltI = Builder.CreateExtractElement(Num, I);
786 Value *DenEltI = Builder.CreateExtractElement(Den, I);
788 Value *NewElt = optimizeWithRcp(NumEltI, DenEltI, AllowInaccurateRcp,
789 RcpIsAccurate, Builder, Mod);
790 if (!NewElt) // Try fdiv.fast.
791 NewElt = optimizeWithFDivFast(NumEltI, DenEltI, ReqdAccuracy,
792 HasFP32Denormals, Builder, Mod);
793 if (!NewElt) // Keep the original.
794 NewElt = Builder.CreateFDiv(NumEltI, DenEltI);
796 NewFDiv = Builder.CreateInsertElement(NewFDiv, NewElt, I);
798 } else { // Scalar FDiv.
800 NewFDiv = optimizeWithRcp(Num, Den, AllowInaccurateRcp, RcpIsAccurate,
802 if (!NewFDiv) { // Try fdiv.fast.
803 NewFDiv = optimizeWithFDivFast(Num, Den, ReqdAccuracy, HasFP32Denormals,
809 FDiv.replaceAllUsesWith(NewFDiv);
810 NewFDiv->takeName(&FDiv);
811 FDiv.eraseFromParent();
817 static bool hasUnsafeFPMath(const Function &F) {
818 Attribute Attr = F.getFnAttribute("unsafe-fp-math");
819 return Attr.getValueAsString() == "true";
822 static std::pair<Value*, Value*> getMul64(IRBuilder<> &Builder,
823 Value *LHS, Value *RHS) {
824 Type *I32Ty = Builder.getInt32Ty();
825 Type *I64Ty = Builder.getInt64Ty();
827 Value *LHS_EXT64 = Builder.CreateZExt(LHS, I64Ty);
828 Value *RHS_EXT64 = Builder.CreateZExt(RHS, I64Ty);
829 Value *MUL64 = Builder.CreateMul(LHS_EXT64, RHS_EXT64);
830 Value *Lo = Builder.CreateTrunc(MUL64, I32Ty);
831 Value *Hi = Builder.CreateLShr(MUL64, Builder.getInt64(32));
832 Hi = Builder.CreateTrunc(Hi, I32Ty);
833 return std::make_pair(Lo, Hi);
836 static Value* getMulHu(IRBuilder<> &Builder, Value *LHS, Value *RHS) {
837 return getMul64(Builder, LHS, RHS).second;
840 /// Figure out how many bits are really needed for this ddivision. \p AtLeast is
841 /// an optimization hint to bypass the second ComputeNumSignBits call if we the
842 /// first one is insufficient. Returns -1 on failure.
843 int AMDGPUCodeGenPrepare::getDivNumBits(BinaryOperator &I,
844 Value *Num, Value *Den,
845 unsigned AtLeast, bool IsSigned) const {
846 const DataLayout &DL = Mod->getDataLayout();
847 unsigned LHSSignBits = ComputeNumSignBits(Num, DL, 0, AC, &I);
848 if (LHSSignBits < AtLeast)
851 unsigned RHSSignBits = ComputeNumSignBits(Den, DL, 0, AC, &I);
852 if (RHSSignBits < AtLeast)
855 unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
856 unsigned DivBits = Num->getType()->getScalarSizeInBits() - SignBits;
862 // The fractional part of a float is enough to accurately represent up to
863 // a 24-bit signed integer.
864 Value *AMDGPUCodeGenPrepare::expandDivRem24(IRBuilder<> &Builder,
866 Value *Num, Value *Den,
867 bool IsDiv, bool IsSigned) const {
868 int DivBits = getDivNumBits(I, Num, Den, 9, IsSigned);
871 return expandDivRem24Impl(Builder, I, Num, Den, DivBits, IsDiv, IsSigned);
874 Value *AMDGPUCodeGenPrepare::expandDivRem24Impl(IRBuilder<> &Builder,
876 Value *Num, Value *Den,
878 bool IsDiv, bool IsSigned) const {
879 Type *I32Ty = Builder.getInt32Ty();
880 Num = Builder.CreateTrunc(Num, I32Ty);
881 Den = Builder.CreateTrunc(Den, I32Ty);
883 Type *F32Ty = Builder.getFloatTy();
884 ConstantInt *One = Builder.getInt32(1);
888 // char|short jq = ia ^ ib;
889 JQ = Builder.CreateXor(Num, Den);
891 // jq = jq >> (bitsize - 2)
892 JQ = Builder.CreateAShr(JQ, Builder.getInt32(30));
895 JQ = Builder.CreateOr(JQ, One);
898 // int ia = (int)LHS;
904 // float fa = (float)ia;
905 Value *FA = IsSigned ? Builder.CreateSIToFP(IA, F32Ty)
906 : Builder.CreateUIToFP(IA, F32Ty);
908 // float fb = (float)ib;
909 Value *FB = IsSigned ? Builder.CreateSIToFP(IB,F32Ty)
910 : Builder.CreateUIToFP(IB,F32Ty);
912 Function *RcpDecl = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_rcp,
913 Builder.getFloatTy());
914 Value *RCP = Builder.CreateCall(RcpDecl, { FB });
915 Value *FQM = Builder.CreateFMul(FA, RCP);
918 CallInst *FQ = Builder.CreateUnaryIntrinsic(Intrinsic::trunc, FQM);
919 FQ->copyFastMathFlags(Builder.getFastMathFlags());
921 // float fqneg = -fq;
922 Value *FQNeg = Builder.CreateFNeg(FQ);
924 // float fr = mad(fqneg, fb, fa);
925 auto FMAD = !ST->hasMadMacF32Insts()
927 : (Intrinsic::ID)Intrinsic::amdgcn_fmad_ftz;
928 Value *FR = Builder.CreateIntrinsic(FMAD,
929 {FQNeg->getType()}, {FQNeg, FB, FA}, FQ);
932 Value *IQ = IsSigned ? Builder.CreateFPToSI(FQ, I32Ty)
933 : Builder.CreateFPToUI(FQ, I32Ty);
936 FR = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, FR, FQ);
939 FB = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, FB, FQ);
941 // int cv = fr >= fb;
942 Value *CV = Builder.CreateFCmpOGE(FR, FB);
944 // jq = (cv ? jq : 0);
945 JQ = Builder.CreateSelect(CV, JQ, Builder.getInt32(0));
948 Value *Div = Builder.CreateAdd(IQ, JQ);
952 // Rem needs compensation, it's easier to recompute it
953 Value *Rem = Builder.CreateMul(Div, Den);
954 Res = Builder.CreateSub(Num, Rem);
957 if (DivBits != 0 && DivBits < 32) {
958 // Extend in register from the number of bits this divide really is.
960 int InRegBits = 32 - DivBits;
962 Res = Builder.CreateShl(Res, InRegBits);
963 Res = Builder.CreateAShr(Res, InRegBits);
965 ConstantInt *TruncMask
966 = Builder.getInt32((UINT64_C(1) << DivBits) - 1);
967 Res = Builder.CreateAnd(Res, TruncMask);
974 // Try to recognize special cases the DAG will emit special, better expansions
975 // than the general expansion we do here.
977 // TODO: It would be better to just directly handle those optimizations here.
978 bool AMDGPUCodeGenPrepare::divHasSpecialOptimization(
979 BinaryOperator &I, Value *Num, Value *Den) const {
980 if (Constant *C = dyn_cast<Constant>(Den)) {
981 // Arbitrary constants get a better expansion as long as a wider mulhi is
983 if (C->getType()->getScalarSizeInBits() <= 32)
986 // TODO: Sdiv check for not exact for some reason.
988 // If there's no wider mulhi, there's only a better expansion for powers of
990 // TODO: Should really know for each vector element.
991 if (isKnownToBeAPowerOfTwo(C, *DL, true, 0, AC, &I, DT))
997 if (BinaryOperator *BinOpDen = dyn_cast<BinaryOperator>(Den)) {
998 // fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2
999 if (BinOpDen->getOpcode() == Instruction::Shl &&
1000 isa<Constant>(BinOpDen->getOperand(0)) &&
1001 isKnownToBeAPowerOfTwo(BinOpDen->getOperand(0), *DL, true,
1010 static Value *getSign32(Value *V, IRBuilder<> &Builder, const DataLayout *DL) {
1011 // Check whether the sign can be determined statically.
1012 KnownBits Known = computeKnownBits(V, *DL);
1013 if (Known.isNegative())
1014 return Constant::getAllOnesValue(V->getType());
1015 if (Known.isNonNegative())
1016 return Constant::getNullValue(V->getType());
1017 return Builder.CreateAShr(V, Builder.getInt32(31));
1020 Value *AMDGPUCodeGenPrepare::expandDivRem32(IRBuilder<> &Builder,
1021 BinaryOperator &I, Value *X,
1023 Instruction::BinaryOps Opc = I.getOpcode();
1024 assert(Opc == Instruction::URem || Opc == Instruction::UDiv ||
1025 Opc == Instruction::SRem || Opc == Instruction::SDiv);
1029 Builder.setFastMathFlags(FMF);
1031 if (divHasSpecialOptimization(I, X, Y))
1032 return nullptr; // Keep it for later optimization.
1034 bool IsDiv = Opc == Instruction::UDiv || Opc == Instruction::SDiv;
1035 bool IsSigned = Opc == Instruction::SRem || Opc == Instruction::SDiv;
1037 Type *Ty = X->getType();
1038 Type *I32Ty = Builder.getInt32Ty();
1039 Type *F32Ty = Builder.getFloatTy();
1041 if (Ty->getScalarSizeInBits() < 32) {
1043 X = Builder.CreateSExt(X, I32Ty);
1044 Y = Builder.CreateSExt(Y, I32Ty);
1046 X = Builder.CreateZExt(X, I32Ty);
1047 Y = Builder.CreateZExt(Y, I32Ty);
1051 if (Value *Res = expandDivRem24(Builder, I, X, Y, IsDiv, IsSigned)) {
1052 return IsSigned ? Builder.CreateSExtOrTrunc(Res, Ty) :
1053 Builder.CreateZExtOrTrunc(Res, Ty);
1056 ConstantInt *Zero = Builder.getInt32(0);
1057 ConstantInt *One = Builder.getInt32(1);
1059 Value *Sign = nullptr;
1061 Value *SignX = getSign32(X, Builder, DL);
1062 Value *SignY = getSign32(Y, Builder, DL);
1063 // Remainder sign is the same as LHS
1064 Sign = IsDiv ? Builder.CreateXor(SignX, SignY) : SignX;
1066 X = Builder.CreateAdd(X, SignX);
1067 Y = Builder.CreateAdd(Y, SignY);
1069 X = Builder.CreateXor(X, SignX);
1070 Y = Builder.CreateXor(Y, SignY);
1073 // The algorithm here is based on ideas from "Software Integer Division", Tom
1074 // Rodeheffer, August 2008.
1076 // unsigned udiv(unsigned x, unsigned y) {
1077 // // Initial estimate of inv(y). The constant is less than 2^32 to ensure
1078 // // that this is a lower bound on inv(y), even if some of the calculations
1080 // unsigned z = (unsigned)((4294967296.0 - 512.0) * v_rcp_f32((float)y));
1082 // // One round of UNR (Unsigned integer Newton-Raphson) to improve z.
1083 // // Empirically this is guaranteed to give a "two-y" lower bound on
1085 // z += umulh(z, -y * z);
1087 // // Quotient/remainder estimate.
1088 // unsigned q = umulh(x, z);
1089 // unsigned r = x - q * y;
1091 // // Two rounds of quotient/remainder refinement.
1104 // Initial estimate of inv(y).
1105 Value *FloatY = Builder.CreateUIToFP(Y, F32Ty);
1106 Function *Rcp = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_rcp, F32Ty);
1107 Value *RcpY = Builder.CreateCall(Rcp, {FloatY});
1108 Constant *Scale = ConstantFP::get(F32Ty, BitsToFloat(0x4F7FFFFE));
1109 Value *ScaledY = Builder.CreateFMul(RcpY, Scale);
1110 Value *Z = Builder.CreateFPToUI(ScaledY, I32Ty);
1112 // One round of UNR.
1113 Value *NegY = Builder.CreateSub(Zero, Y);
1114 Value *NegYZ = Builder.CreateMul(NegY, Z);
1115 Z = Builder.CreateAdd(Z, getMulHu(Builder, Z, NegYZ));
1117 // Quotient/remainder estimate.
1118 Value *Q = getMulHu(Builder, X, Z);
1119 Value *R = Builder.CreateSub(X, Builder.CreateMul(Q, Y));
1121 // First quotient/remainder refinement.
1122 Value *Cond = Builder.CreateICmpUGE(R, Y);
1124 Q = Builder.CreateSelect(Cond, Builder.CreateAdd(Q, One), Q);
1125 R = Builder.CreateSelect(Cond, Builder.CreateSub(R, Y), R);
1127 // Second quotient/remainder refinement.
1128 Cond = Builder.CreateICmpUGE(R, Y);
1131 Res = Builder.CreateSelect(Cond, Builder.CreateAdd(Q, One), Q);
1133 Res = Builder.CreateSelect(Cond, Builder.CreateSub(R, Y), R);
1136 Res = Builder.CreateXor(Res, Sign);
1137 Res = Builder.CreateSub(Res, Sign);
1140 Res = Builder.CreateTrunc(Res, Ty);
1145 Value *AMDGPUCodeGenPrepare::shrinkDivRem64(IRBuilder<> &Builder,
1147 Value *Num, Value *Den) const {
1148 if (!ExpandDiv64InIR && divHasSpecialOptimization(I, Num, Den))
1149 return nullptr; // Keep it for later optimization.
1151 Instruction::BinaryOps Opc = I.getOpcode();
1153 bool IsDiv = Opc == Instruction::SDiv || Opc == Instruction::UDiv;
1154 bool IsSigned = Opc == Instruction::SDiv || Opc == Instruction::SRem;
1156 int NumDivBits = getDivNumBits(I, Num, Den, 32, IsSigned);
1157 if (NumDivBits == -1)
1160 Value *Narrowed = nullptr;
1161 if (NumDivBits <= 24) {
1162 Narrowed = expandDivRem24Impl(Builder, I, Num, Den, NumDivBits,
1164 } else if (NumDivBits <= 32) {
1165 Narrowed = expandDivRem32(Builder, I, Num, Den);
1169 return IsSigned ? Builder.CreateSExt(Narrowed, Num->getType()) :
1170 Builder.CreateZExt(Narrowed, Num->getType());
1176 void AMDGPUCodeGenPrepare::expandDivRem64(BinaryOperator &I) const {
1177 Instruction::BinaryOps Opc = I.getOpcode();
1178 // Do the general expansion.
1179 if (Opc == Instruction::UDiv || Opc == Instruction::SDiv) {
1180 expandDivisionUpTo64Bits(&I);
1184 if (Opc == Instruction::URem || Opc == Instruction::SRem) {
1185 expandRemainderUpTo64Bits(&I);
1189 llvm_unreachable("not a division");
1192 bool AMDGPUCodeGenPrepare::visitBinaryOperator(BinaryOperator &I) {
1193 if (foldBinOpIntoSelect(I))
1196 if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
1197 DA->isUniform(&I) && promoteUniformOpToI32(I))
1200 if (UseMul24Intrin && replaceMulWithMul24(I))
1203 bool Changed = false;
1204 Instruction::BinaryOps Opc = I.getOpcode();
1205 Type *Ty = I.getType();
1206 Value *NewDiv = nullptr;
1207 unsigned ScalarSize = Ty->getScalarSizeInBits();
1209 SmallVector<BinaryOperator *, 8> Div64ToExpand;
1211 if ((Opc == Instruction::URem || Opc == Instruction::UDiv ||
1212 Opc == Instruction::SRem || Opc == Instruction::SDiv) &&
1214 !DisableIDivExpand) {
1215 Value *Num = I.getOperand(0);
1216 Value *Den = I.getOperand(1);
1217 IRBuilder<> Builder(&I);
1218 Builder.SetCurrentDebugLocation(I.getDebugLoc());
1220 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
1221 NewDiv = UndefValue::get(VT);
1223 for (unsigned N = 0, E = VT->getNumElements(); N != E; ++N) {
1224 Value *NumEltN = Builder.CreateExtractElement(Num, N);
1225 Value *DenEltN = Builder.CreateExtractElement(Den, N);
1228 if (ScalarSize <= 32) {
1229 NewElt = expandDivRem32(Builder, I, NumEltN, DenEltN);
1231 NewElt = Builder.CreateBinOp(Opc, NumEltN, DenEltN);
1233 // See if this 64-bit division can be shrunk to 32/24-bits before
1234 // producing the general expansion.
1235 NewElt = shrinkDivRem64(Builder, I, NumEltN, DenEltN);
1237 // The general 64-bit expansion introduces control flow and doesn't
1238 // return the new value. Just insert a scalar copy and defer
1240 NewElt = Builder.CreateBinOp(Opc, NumEltN, DenEltN);
1241 Div64ToExpand.push_back(cast<BinaryOperator>(NewElt));
1245 NewDiv = Builder.CreateInsertElement(NewDiv, NewElt, N);
1248 if (ScalarSize <= 32)
1249 NewDiv = expandDivRem32(Builder, I, Num, Den);
1251 NewDiv = shrinkDivRem64(Builder, I, Num, Den);
1253 Div64ToExpand.push_back(&I);
1258 I.replaceAllUsesWith(NewDiv);
1259 I.eraseFromParent();
1264 if (ExpandDiv64InIR) {
1265 // TODO: We get much worse code in specially handled constant cases.
1266 for (BinaryOperator *Div : Div64ToExpand) {
1267 expandDivRem64(*Div);
1275 bool AMDGPUCodeGenPrepare::visitLoadInst(LoadInst &I) {
1279 if ((I.getPointerAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
1280 I.getPointerAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
1281 canWidenScalarExtLoad(I)) {
1282 IRBuilder<> Builder(&I);
1283 Builder.SetCurrentDebugLocation(I.getDebugLoc());
1285 Type *I32Ty = Builder.getInt32Ty();
1286 Type *PT = PointerType::get(I32Ty, I.getPointerAddressSpace());
1287 Value *BitCast= Builder.CreateBitCast(I.getPointerOperand(), PT);
1288 LoadInst *WidenLoad = Builder.CreateLoad(I32Ty, BitCast);
1289 WidenLoad->copyMetadata(I);
1291 // If we have range metadata, we need to convert the type, and not make
1292 // assumptions about the high bits.
1293 if (auto *Range = WidenLoad->getMetadata(LLVMContext::MD_range)) {
1294 ConstantInt *Lower =
1295 mdconst::extract<ConstantInt>(Range->getOperand(0));
1297 if (Lower->getValue().isNullValue()) {
1298 WidenLoad->setMetadata(LLVMContext::MD_range, nullptr);
1300 Metadata *LowAndHigh[] = {
1301 ConstantAsMetadata::get(ConstantInt::get(I32Ty, Lower->getValue().zext(32))),
1302 // Don't make assumptions about the high bits.
1303 ConstantAsMetadata::get(ConstantInt::get(I32Ty, 0))
1306 WidenLoad->setMetadata(LLVMContext::MD_range,
1307 MDNode::get(Mod->getContext(), LowAndHigh));
1311 int TySize = Mod->getDataLayout().getTypeSizeInBits(I.getType());
1312 Type *IntNTy = Builder.getIntNTy(TySize);
1313 Value *ValTrunc = Builder.CreateTrunc(WidenLoad, IntNTy);
1314 Value *ValOrig = Builder.CreateBitCast(ValTrunc, I.getType());
1315 I.replaceAllUsesWith(ValOrig);
1316 I.eraseFromParent();
1323 bool AMDGPUCodeGenPrepare::visitICmpInst(ICmpInst &I) {
1324 bool Changed = false;
1326 if (ST->has16BitInsts() && needsPromotionToI32(I.getOperand(0)->getType()) &&
1328 Changed |= promoteUniformOpToI32(I);
1333 bool AMDGPUCodeGenPrepare::visitSelectInst(SelectInst &I) {
1334 bool Changed = false;
1336 if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
1338 Changed |= promoteUniformOpToI32(I);
1343 bool AMDGPUCodeGenPrepare::visitIntrinsicInst(IntrinsicInst &I) {
1344 switch (I.getIntrinsicID()) {
1345 case Intrinsic::bitreverse:
1346 return visitBitreverseIntrinsicInst(I);
1352 bool AMDGPUCodeGenPrepare::visitBitreverseIntrinsicInst(IntrinsicInst &I) {
1353 bool Changed = false;
1355 if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
1357 Changed |= promoteUniformBitreverseToI32(I);
1362 bool AMDGPUCodeGenPrepare::doInitialization(Module &M) {
1364 DL = &Mod->getDataLayout();
1368 bool AMDGPUCodeGenPrepare::runOnFunction(Function &F) {
1369 if (skipFunction(F))
1372 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
1376 const AMDGPUTargetMachine &TM = TPC->getTM<AMDGPUTargetMachine>();
1377 ST = &TM.getSubtarget<GCNSubtarget>(F);
1378 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1379 DA = &getAnalysis<LegacyDivergenceAnalysis>();
1381 auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
1382 DT = DTWP ? &DTWP->getDomTree() : nullptr;
1384 HasUnsafeFPMath = hasUnsafeFPMath(F);
1386 AMDGPU::SIModeRegisterDefaults Mode(F);
1387 HasFP32Denormals = Mode.allFP32Denormals();
1389 bool MadeChange = false;
1391 Function::iterator NextBB;
1392 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; FI = NextBB) {
1393 BasicBlock *BB = &*FI;
1394 NextBB = std::next(FI);
1396 BasicBlock::iterator Next;
1397 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; I = Next) {
1398 Next = std::next(I);
1400 MadeChange |= visit(*I);
1402 if (Next != E) { // Control flow changed
1403 BasicBlock *NextInstBB = Next->getParent();
1404 if (NextInstBB != BB) {
1416 INITIALIZE_PASS_BEGIN(AMDGPUCodeGenPrepare, DEBUG_TYPE,
1417 "AMDGPU IR optimizations", false, false)
1418 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1419 INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis)
1420 INITIALIZE_PASS_END(AMDGPUCodeGenPrepare, DEBUG_TYPE, "AMDGPU IR optimizations",
1423 char AMDGPUCodeGenPrepare::ID = 0;
1425 FunctionPass *llvm::createAMDGPUCodeGenPreparePass() {
1426 return new AMDGPUCodeGenPrepare();