1 //===- AMDGPULibCalls.cpp -------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file does AMD library function optimizations.
12 //===----------------------------------------------------------------------===//
15 #include "AMDGPULibFunc.h"
16 #include "AMDGPUSubtarget.h"
17 #include "llvm/ADT/StringRef.h"
18 #include "llvm/ADT/StringSet.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/Loads.h"
21 #include "llvm/IR/Constants.h"
22 #include "llvm/IR/DerivedTypes.h"
23 #include "llvm/IR/Function.h"
24 #include "llvm/IR/IRBuilder.h"
25 #include "llvm/IR/Instructions.h"
26 #include "llvm/IR/Intrinsics.h"
27 #include "llvm/IR/LLVMContext.h"
28 #include "llvm/IR/Module.h"
29 #include "llvm/IR/ValueSymbolTable.h"
30 #include "llvm/InitializePasses.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/MathExtras.h"
33 #include "llvm/Support/raw_ostream.h"
34 #include "llvm/Target/TargetMachine.h"
38 #define DEBUG_TYPE "amdgpu-simplifylib"
42 static cl::opt<bool> EnablePreLink("amdgpu-prelink",
43 cl::desc("Enable pre-link mode optimizations"),
47 static cl::list<std::string> UseNative("amdgpu-use-native",
48 cl::desc("Comma separated list of functions to replace with native, or all"),
49 cl::CommaSeparated, cl::ValueOptional,
52 #define MATH_PI numbers::pi
53 #define MATH_E numbers::e
54 #define MATH_SQRT2 numbers::sqrt2
55 #define MATH_SQRT1_2 numbers::inv_sqrt2
59 class AMDGPULibCalls {
62 typedef llvm::AMDGPULibFunc FuncInfo;
64 const TargetMachine *TM;
67 bool AllNative = false;
69 bool useNativeFunc(const StringRef F) const;
71 // Return a pointer (pointer expr) to the function if function defintion with
72 // "FuncName" exists. It may create a new function prototype in pre-link mode.
73 FunctionCallee getFunction(Module *M, const FuncInfo &fInfo);
75 // Replace a normal function with its native version.
76 bool replaceWithNative(CallInst *CI, const FuncInfo &FInfo);
78 bool parseFunctionName(const StringRef& FMangledName,
79 FuncInfo *FInfo=nullptr /*out*/);
81 bool TDOFold(CallInst *CI, const FuncInfo &FInfo);
83 /* Specialized optimizations */
85 // recip (half or native)
86 bool fold_recip(CallInst *CI, IRBuilder<> &B, const FuncInfo &FInfo);
88 // divide (half or native)
89 bool fold_divide(CallInst *CI, IRBuilder<> &B, const FuncInfo &FInfo);
92 bool fold_pow(CallInst *CI, IRBuilder<> &B, const FuncInfo &FInfo);
95 bool fold_rootn(CallInst *CI, IRBuilder<> &B, const FuncInfo &FInfo);
98 bool fold_fma_mad(CallInst *CI, IRBuilder<> &B, const FuncInfo &FInfo);
100 // -fuse-native for sincos
101 bool sincosUseNative(CallInst *aCI, const FuncInfo &FInfo);
103 // evaluate calls if calls' arguments are constants.
104 bool evaluateScalarMathFunc(FuncInfo &FInfo, double& Res0,
105 double& Res1, Constant *copr0, Constant *copr1, Constant *copr2);
106 bool evaluateCall(CallInst *aCI, FuncInfo &FInfo);
109 bool fold_exp(CallInst *CI, IRBuilder<> &B, const FuncInfo &FInfo);
112 bool fold_exp2(CallInst *CI, IRBuilder<> &B, const FuncInfo &FInfo);
115 bool fold_exp10(CallInst *CI, IRBuilder<> &B, const FuncInfo &FInfo);
118 bool fold_log(CallInst *CI, IRBuilder<> &B, const FuncInfo &FInfo);
121 bool fold_log2(CallInst *CI, IRBuilder<> &B, const FuncInfo &FInfo);
124 bool fold_log10(CallInst *CI, IRBuilder<> &B, const FuncInfo &FInfo);
127 bool fold_sqrt(CallInst *CI, IRBuilder<> &B, const FuncInfo &FInfo);
130 bool fold_sincos(CallInst * CI, IRBuilder<> &B, AliasAnalysis * AA);
132 // __read_pipe/__write_pipe
133 bool fold_read_write_pipe(CallInst *CI, IRBuilder<> &B, FuncInfo &FInfo);
135 // llvm.amdgcn.wavefrontsize
136 bool fold_wavefrontsize(CallInst *CI, IRBuilder<> &B);
138 // Get insertion point at entry.
139 BasicBlock::iterator getEntryIns(CallInst * UI);
140 // Insert an Alloc instruction.
141 AllocaInst* insertAlloca(CallInst * UI, IRBuilder<> &B, const char *prefix);
142 // Get a scalar native builtin signle argument FP function
143 FunctionCallee getNativeFunction(Module *M, const FuncInfo &FInfo);
148 bool isUnsafeMath(const CallInst *CI) const;
150 void replaceCall(Value *With) {
151 CI->replaceAllUsesWith(With);
152 CI->eraseFromParent();
156 AMDGPULibCalls(const TargetMachine *TM_ = nullptr) : TM(TM_) {}
158 bool fold(CallInst *CI, AliasAnalysis *AA = nullptr);
160 void initNativeFuncs();
162 // Replace a normal math function call with that native version
163 bool useNative(CallInst *CI);
166 } // end llvm namespace
170 class AMDGPUSimplifyLibCalls : public FunctionPass {
172 AMDGPULibCalls Simplifier;
175 static char ID; // Pass identification
177 AMDGPUSimplifyLibCalls(const TargetMachine *TM = nullptr)
178 : FunctionPass(ID), Simplifier(TM) {
179 initializeAMDGPUSimplifyLibCallsPass(*PassRegistry::getPassRegistry());
182 void getAnalysisUsage(AnalysisUsage &AU) const override {
183 AU.addRequired<AAResultsWrapperPass>();
186 bool runOnFunction(Function &M) override;
189 class AMDGPUUseNativeCalls : public FunctionPass {
191 AMDGPULibCalls Simplifier;
194 static char ID; // Pass identification
196 AMDGPUUseNativeCalls() : FunctionPass(ID) {
197 initializeAMDGPUUseNativeCallsPass(*PassRegistry::getPassRegistry());
198 Simplifier.initNativeFuncs();
201 bool runOnFunction(Function &F) override;
204 } // end anonymous namespace.
206 char AMDGPUSimplifyLibCalls::ID = 0;
207 char AMDGPUUseNativeCalls::ID = 0;
209 INITIALIZE_PASS_BEGIN(AMDGPUSimplifyLibCalls, "amdgpu-simplifylib",
210 "Simplify well-known AMD library calls", false, false)
211 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
212 INITIALIZE_PASS_END(AMDGPUSimplifyLibCalls, "amdgpu-simplifylib",
213 "Simplify well-known AMD library calls", false, false)
215 INITIALIZE_PASS(AMDGPUUseNativeCalls, "amdgpu-usenative",
216 "Replace builtin math calls with that native versions.",
219 template <typename IRB>
220 static CallInst *CreateCallEx(IRB &B, FunctionCallee Callee, Value *Arg,
221 const Twine &Name = "") {
222 CallInst *R = B.CreateCall(Callee, Arg, Name);
223 if (Function *F = dyn_cast<Function>(Callee.getCallee()))
224 R->setCallingConv(F->getCallingConv());
228 template <typename IRB>
229 static CallInst *CreateCallEx2(IRB &B, FunctionCallee Callee, Value *Arg1,
230 Value *Arg2, const Twine &Name = "") {
231 CallInst *R = B.CreateCall(Callee, {Arg1, Arg2}, Name);
232 if (Function *F = dyn_cast<Function>(Callee.getCallee()))
233 R->setCallingConv(F->getCallingConv());
237 // Data structures for table-driven optimizations.
238 // FuncTbl works for both f32 and f64 functions with 1 input argument
245 /* a list of {result, input} */
246 static const TableEntry tbl_acos[] = {
247 {MATH_PI / 2.0, 0.0},
248 {MATH_PI / 2.0, -0.0},
252 static const TableEntry tbl_acosh[] = {
255 static const TableEntry tbl_acospi[] = {
261 static const TableEntry tbl_asin[] = {
264 {MATH_PI / 2.0, 1.0},
265 {-MATH_PI / 2.0, -1.0}
267 static const TableEntry tbl_asinh[] = {
271 static const TableEntry tbl_asinpi[] = {
277 static const TableEntry tbl_atan[] = {
280 {MATH_PI / 4.0, 1.0},
281 {-MATH_PI / 4.0, -1.0}
283 static const TableEntry tbl_atanh[] = {
287 static const TableEntry tbl_atanpi[] = {
293 static const TableEntry tbl_cbrt[] = {
299 static const TableEntry tbl_cos[] = {
303 static const TableEntry tbl_cosh[] = {
307 static const TableEntry tbl_cospi[] = {
311 static const TableEntry tbl_erfc[] = {
315 static const TableEntry tbl_erf[] = {
319 static const TableEntry tbl_exp[] = {
324 static const TableEntry tbl_exp2[] = {
329 static const TableEntry tbl_exp10[] = {
334 static const TableEntry tbl_expm1[] = {
338 static const TableEntry tbl_log[] = {
342 static const TableEntry tbl_log2[] = {
346 static const TableEntry tbl_log10[] = {
350 static const TableEntry tbl_rsqrt[] = {
354 static const TableEntry tbl_sin[] = {
358 static const TableEntry tbl_sinh[] = {
362 static const TableEntry tbl_sinpi[] = {
366 static const TableEntry tbl_sqrt[] = {
371 static const TableEntry tbl_tan[] = {
375 static const TableEntry tbl_tanh[] = {
379 static const TableEntry tbl_tanpi[] = {
383 static const TableEntry tbl_tgamma[] = {
390 static bool HasNative(AMDGPULibFunc::EFuncId id) {
392 case AMDGPULibFunc::EI_DIVIDE:
393 case AMDGPULibFunc::EI_COS:
394 case AMDGPULibFunc::EI_EXP:
395 case AMDGPULibFunc::EI_EXP2:
396 case AMDGPULibFunc::EI_EXP10:
397 case AMDGPULibFunc::EI_LOG:
398 case AMDGPULibFunc::EI_LOG2:
399 case AMDGPULibFunc::EI_LOG10:
400 case AMDGPULibFunc::EI_POWR:
401 case AMDGPULibFunc::EI_RECIP:
402 case AMDGPULibFunc::EI_RSQRT:
403 case AMDGPULibFunc::EI_SIN:
404 case AMDGPULibFunc::EI_SINCOS:
405 case AMDGPULibFunc::EI_SQRT:
406 case AMDGPULibFunc::EI_TAN:
415 const TableEntry *table; // variable size: from 0 to (size - 1)
417 TableRef() : size(0), table(nullptr) {}
420 TableRef(const TableEntry (&tbl)[N]) : size(N), table(&tbl[0]) {}
423 static TableRef getOptTable(AMDGPULibFunc::EFuncId id) {
425 case AMDGPULibFunc::EI_ACOS: return TableRef(tbl_acos);
426 case AMDGPULibFunc::EI_ACOSH: return TableRef(tbl_acosh);
427 case AMDGPULibFunc::EI_ACOSPI: return TableRef(tbl_acospi);
428 case AMDGPULibFunc::EI_ASIN: return TableRef(tbl_asin);
429 case AMDGPULibFunc::EI_ASINH: return TableRef(tbl_asinh);
430 case AMDGPULibFunc::EI_ASINPI: return TableRef(tbl_asinpi);
431 case AMDGPULibFunc::EI_ATAN: return TableRef(tbl_atan);
432 case AMDGPULibFunc::EI_ATANH: return TableRef(tbl_atanh);
433 case AMDGPULibFunc::EI_ATANPI: return TableRef(tbl_atanpi);
434 case AMDGPULibFunc::EI_CBRT: return TableRef(tbl_cbrt);
435 case AMDGPULibFunc::EI_NCOS:
436 case AMDGPULibFunc::EI_COS: return TableRef(tbl_cos);
437 case AMDGPULibFunc::EI_COSH: return TableRef(tbl_cosh);
438 case AMDGPULibFunc::EI_COSPI: return TableRef(tbl_cospi);
439 case AMDGPULibFunc::EI_ERFC: return TableRef(tbl_erfc);
440 case AMDGPULibFunc::EI_ERF: return TableRef(tbl_erf);
441 case AMDGPULibFunc::EI_EXP: return TableRef(tbl_exp);
442 case AMDGPULibFunc::EI_NEXP2:
443 case AMDGPULibFunc::EI_EXP2: return TableRef(tbl_exp2);
444 case AMDGPULibFunc::EI_EXP10: return TableRef(tbl_exp10);
445 case AMDGPULibFunc::EI_EXPM1: return TableRef(tbl_expm1);
446 case AMDGPULibFunc::EI_LOG: return TableRef(tbl_log);
447 case AMDGPULibFunc::EI_NLOG2:
448 case AMDGPULibFunc::EI_LOG2: return TableRef(tbl_log2);
449 case AMDGPULibFunc::EI_LOG10: return TableRef(tbl_log10);
450 case AMDGPULibFunc::EI_NRSQRT:
451 case AMDGPULibFunc::EI_RSQRT: return TableRef(tbl_rsqrt);
452 case AMDGPULibFunc::EI_NSIN:
453 case AMDGPULibFunc::EI_SIN: return TableRef(tbl_sin);
454 case AMDGPULibFunc::EI_SINH: return TableRef(tbl_sinh);
455 case AMDGPULibFunc::EI_SINPI: return TableRef(tbl_sinpi);
456 case AMDGPULibFunc::EI_NSQRT:
457 case AMDGPULibFunc::EI_SQRT: return TableRef(tbl_sqrt);
458 case AMDGPULibFunc::EI_TAN: return TableRef(tbl_tan);
459 case AMDGPULibFunc::EI_TANH: return TableRef(tbl_tanh);
460 case AMDGPULibFunc::EI_TANPI: return TableRef(tbl_tanpi);
461 case AMDGPULibFunc::EI_TGAMMA: return TableRef(tbl_tgamma);
467 static inline int getVecSize(const AMDGPULibFunc& FInfo) {
468 return FInfo.getLeads()[0].VectorSize;
471 static inline AMDGPULibFunc::EType getArgType(const AMDGPULibFunc& FInfo) {
472 return (AMDGPULibFunc::EType)FInfo.getLeads()[0].ArgType;
475 FunctionCallee AMDGPULibCalls::getFunction(Module *M, const FuncInfo &fInfo) {
476 // If we are doing PreLinkOpt, the function is external. So it is safe to
477 // use getOrInsertFunction() at this stage.
479 return EnablePreLink ? AMDGPULibFunc::getOrInsertFunction(M, fInfo)
480 : AMDGPULibFunc::getFunction(M, fInfo);
483 bool AMDGPULibCalls::parseFunctionName(const StringRef& FMangledName,
485 return AMDGPULibFunc::parse(FMangledName, *FInfo);
488 bool AMDGPULibCalls::isUnsafeMath(const CallInst *CI) const {
489 if (auto Op = dyn_cast<FPMathOperator>(CI))
492 const Function *F = CI->getParent()->getParent();
493 Attribute Attr = F->getFnAttribute("unsafe-fp-math");
494 return Attr.getValueAsString() == "true";
497 bool AMDGPULibCalls::useNativeFunc(const StringRef F) const {
499 std::find(UseNative.begin(), UseNative.end(), F) != UseNative.end();
502 void AMDGPULibCalls::initNativeFuncs() {
503 AllNative = useNativeFunc("all") ||
504 (UseNative.getNumOccurrences() && UseNative.size() == 1 &&
505 UseNative.begin()->empty());
508 bool AMDGPULibCalls::sincosUseNative(CallInst *aCI, const FuncInfo &FInfo) {
509 bool native_sin = useNativeFunc("sin");
510 bool native_cos = useNativeFunc("cos");
512 if (native_sin && native_cos) {
513 Module *M = aCI->getModule();
514 Value *opr0 = aCI->getArgOperand(0);
517 nf.getLeads()[0].ArgType = FInfo.getLeads()[0].ArgType;
518 nf.getLeads()[0].VectorSize = FInfo.getLeads()[0].VectorSize;
520 nf.setPrefix(AMDGPULibFunc::NATIVE);
521 nf.setId(AMDGPULibFunc::EI_SIN);
522 FunctionCallee sinExpr = getFunction(M, nf);
524 nf.setPrefix(AMDGPULibFunc::NATIVE);
525 nf.setId(AMDGPULibFunc::EI_COS);
526 FunctionCallee cosExpr = getFunction(M, nf);
527 if (sinExpr && cosExpr) {
528 Value *sinval = CallInst::Create(sinExpr, opr0, "splitsin", aCI);
529 Value *cosval = CallInst::Create(cosExpr, opr0, "splitcos", aCI);
530 new StoreInst(cosval, aCI->getArgOperand(1), aCI);
532 DEBUG_WITH_TYPE("usenative", dbgs() << "<useNative> replace " << *aCI
533 << " with native version of sin/cos");
542 bool AMDGPULibCalls::useNative(CallInst *aCI) {
544 Function *Callee = aCI->getCalledFunction();
547 if (!parseFunctionName(Callee->getName(), &FInfo) || !FInfo.isMangled() ||
548 FInfo.getPrefix() != AMDGPULibFunc::NOPFX ||
549 getArgType(FInfo) == AMDGPULibFunc::F64 || !HasNative(FInfo.getId()) ||
550 !(AllNative || useNativeFunc(FInfo.getName()))) {
554 if (FInfo.getId() == AMDGPULibFunc::EI_SINCOS)
555 return sincosUseNative(aCI, FInfo);
557 FInfo.setPrefix(AMDGPULibFunc::NATIVE);
558 FunctionCallee F = getFunction(aCI->getModule(), FInfo);
562 aCI->setCalledFunction(F);
563 DEBUG_WITH_TYPE("usenative", dbgs() << "<useNative> replace " << *aCI
564 << " with native version");
568 // Clang emits call of __read_pipe_2 or __read_pipe_4 for OpenCL read_pipe
569 // builtin, with appended type size and alignment arguments, where 2 or 4
570 // indicates the original number of arguments. The library has optimized version
571 // of __read_pipe_2/__read_pipe_4 when the type size and alignment has the same
572 // power of 2 value. This function transforms __read_pipe_2 to __read_pipe_2_N
573 // for such cases where N is the size in bytes of the type (N = 1, 2, 4, 8, ...,
574 // 128). The same for __read_pipe_4, write_pipe_2, and write_pipe_4.
575 bool AMDGPULibCalls::fold_read_write_pipe(CallInst *CI, IRBuilder<> &B,
577 auto *Callee = CI->getCalledFunction();
578 if (!Callee->isDeclaration())
581 assert(Callee->hasName() && "Invalid read_pipe/write_pipe function");
582 auto *M = Callee->getParent();
583 auto &Ctx = M->getContext();
584 std::string Name = std::string(Callee->getName());
585 auto NumArg = CI->getNumArgOperands();
586 if (NumArg != 4 && NumArg != 6)
588 auto *PacketSize = CI->getArgOperand(NumArg - 2);
589 auto *PacketAlign = CI->getArgOperand(NumArg - 1);
590 if (!isa<ConstantInt>(PacketSize) || !isa<ConstantInt>(PacketAlign))
592 unsigned Size = cast<ConstantInt>(PacketSize)->getZExtValue();
593 Align Alignment = cast<ConstantInt>(PacketAlign)->getAlignValue();
594 if (Alignment != Size)
599 PtrElemTy = Type::getIntNTy(Ctx, Size * 8);
601 PtrElemTy = FixedVectorType::get(Type::getInt64Ty(Ctx), Size / 8);
602 unsigned PtrArgLoc = CI->getNumArgOperands() - 3;
603 auto PtrArg = CI->getArgOperand(PtrArgLoc);
604 unsigned PtrArgAS = PtrArg->getType()->getPointerAddressSpace();
605 auto *PtrTy = llvm::PointerType::get(PtrElemTy, PtrArgAS);
607 SmallVector<llvm::Type *, 6> ArgTys;
608 for (unsigned I = 0; I != PtrArgLoc; ++I)
609 ArgTys.push_back(CI->getArgOperand(I)->getType());
610 ArgTys.push_back(PtrTy);
612 Name = Name + "_" + std::to_string(Size);
613 auto *FTy = FunctionType::get(Callee->getReturnType(),
614 ArrayRef<Type *>(ArgTys), false);
615 AMDGPULibFunc NewLibFunc(Name, FTy);
616 FunctionCallee F = AMDGPULibFunc::getOrInsertFunction(M, NewLibFunc);
620 auto *BCast = B.CreatePointerCast(PtrArg, PtrTy);
621 SmallVector<Value *, 6> Args;
622 for (unsigned I = 0; I != PtrArgLoc; ++I)
623 Args.push_back(CI->getArgOperand(I));
624 Args.push_back(BCast);
626 auto *NCI = B.CreateCall(F, Args);
627 NCI->setAttributes(CI->getAttributes());
628 CI->replaceAllUsesWith(NCI);
629 CI->dropAllReferences();
630 CI->eraseFromParent();
635 // This function returns false if no change; return true otherwise.
636 bool AMDGPULibCalls::fold(CallInst *CI, AliasAnalysis *AA) {
638 Function *Callee = CI->getCalledFunction();
640 // Ignore indirect calls.
641 if (Callee == 0) return false;
643 BasicBlock *BB = CI->getParent();
644 LLVMContext &Context = CI->getParent()->getContext();
645 IRBuilder<> B(Context);
647 // Set the builder to the instruction after the call.
648 B.SetInsertPoint(BB, CI->getIterator());
650 // Copy fast flags from the original call.
651 if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(CI))
652 B.setFastMathFlags(FPOp->getFastMathFlags());
654 switch (Callee->getIntrinsicID()) {
657 case Intrinsic::amdgcn_wavefrontsize:
658 return !EnablePreLink && fold_wavefrontsize(CI, B);
662 if (!parseFunctionName(Callee->getName(), &FInfo))
665 // Further check the number of arguments to see if they match.
666 if (CI->getNumArgOperands() != FInfo.getNumArgs())
669 if (TDOFold(CI, FInfo))
672 // Under unsafe-math, evaluate calls if possible.
673 // According to Brian Sumner, we can do this for all f32 function calls
674 // using host's double function calls.
675 if (isUnsafeMath(CI) && evaluateCall(CI, FInfo))
678 // Specilized optimizations for each function call
679 switch (FInfo.getId()) {
680 case AMDGPULibFunc::EI_RECIP:
681 // skip vector function
682 assert ((FInfo.getPrefix() == AMDGPULibFunc::NATIVE ||
683 FInfo.getPrefix() == AMDGPULibFunc::HALF) &&
684 "recip must be an either native or half function");
685 return (getVecSize(FInfo) != 1) ? false : fold_recip(CI, B, FInfo);
687 case AMDGPULibFunc::EI_DIVIDE:
688 // skip vector function
689 assert ((FInfo.getPrefix() == AMDGPULibFunc::NATIVE ||
690 FInfo.getPrefix() == AMDGPULibFunc::HALF) &&
691 "divide must be an either native or half function");
692 return (getVecSize(FInfo) != 1) ? false : fold_divide(CI, B, FInfo);
694 case AMDGPULibFunc::EI_POW:
695 case AMDGPULibFunc::EI_POWR:
696 case AMDGPULibFunc::EI_POWN:
697 return fold_pow(CI, B, FInfo);
699 case AMDGPULibFunc::EI_ROOTN:
700 // skip vector function
701 return (getVecSize(FInfo) != 1) ? false : fold_rootn(CI, B, FInfo);
703 case AMDGPULibFunc::EI_FMA:
704 case AMDGPULibFunc::EI_MAD:
705 case AMDGPULibFunc::EI_NFMA:
706 // skip vector function
707 return (getVecSize(FInfo) != 1) ? false : fold_fma_mad(CI, B, FInfo);
709 case AMDGPULibFunc::EI_SQRT:
710 return isUnsafeMath(CI) && fold_sqrt(CI, B, FInfo);
711 case AMDGPULibFunc::EI_COS:
712 case AMDGPULibFunc::EI_SIN:
713 if ((getArgType(FInfo) == AMDGPULibFunc::F32 ||
714 getArgType(FInfo) == AMDGPULibFunc::F64)
715 && (FInfo.getPrefix() == AMDGPULibFunc::NOPFX))
716 return fold_sincos(CI, B, AA);
719 case AMDGPULibFunc::EI_READ_PIPE_2:
720 case AMDGPULibFunc::EI_READ_PIPE_4:
721 case AMDGPULibFunc::EI_WRITE_PIPE_2:
722 case AMDGPULibFunc::EI_WRITE_PIPE_4:
723 return fold_read_write_pipe(CI, B, FInfo);
732 bool AMDGPULibCalls::TDOFold(CallInst *CI, const FuncInfo &FInfo) {
733 // Table-Driven optimization
734 const TableRef tr = getOptTable(FInfo.getId());
738 int const sz = (int)tr.size;
739 const TableEntry * const ftbl = tr.table;
740 Value *opr0 = CI->getArgOperand(0);
742 if (getVecSize(FInfo) > 1) {
743 if (ConstantDataVector *CV = dyn_cast<ConstantDataVector>(opr0)) {
744 SmallVector<double, 0> DVal;
745 for (int eltNo = 0; eltNo < getVecSize(FInfo); ++eltNo) {
746 ConstantFP *eltval = dyn_cast<ConstantFP>(
747 CV->getElementAsConstant((unsigned)eltNo));
748 assert(eltval && "Non-FP arguments in math function!");
750 for (int i=0; i < sz; ++i) {
751 if (eltval->isExactlyValue(ftbl[i].input)) {
752 DVal.push_back(ftbl[i].result);
758 // This vector constants not handled yet.
762 LLVMContext &context = CI->getParent()->getParent()->getContext();
764 if (getArgType(FInfo) == AMDGPULibFunc::F32) {
765 SmallVector<float, 0> FVal;
766 for (unsigned i = 0; i < DVal.size(); ++i) {
767 FVal.push_back((float)DVal[i]);
769 ArrayRef<float> tmp(FVal);
770 nval = ConstantDataVector::get(context, tmp);
772 ArrayRef<double> tmp(DVal);
773 nval = ConstantDataVector::get(context, tmp);
775 LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> " << *nval << "\n");
781 if (ConstantFP *CF = dyn_cast<ConstantFP>(opr0)) {
782 for (int i = 0; i < sz; ++i) {
783 if (CF->isExactlyValue(ftbl[i].input)) {
784 Value *nval = ConstantFP::get(CF->getType(), ftbl[i].result);
785 LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> " << *nval << "\n");
796 bool AMDGPULibCalls::replaceWithNative(CallInst *CI, const FuncInfo &FInfo) {
797 Module *M = CI->getModule();
798 if (getArgType(FInfo) != AMDGPULibFunc::F32 ||
799 FInfo.getPrefix() != AMDGPULibFunc::NOPFX ||
800 !HasNative(FInfo.getId()))
803 AMDGPULibFunc nf = FInfo;
804 nf.setPrefix(AMDGPULibFunc::NATIVE);
805 if (FunctionCallee FPExpr = getFunction(M, nf)) {
806 LLVM_DEBUG(dbgs() << "AMDIC: " << *CI << " ---> ");
808 CI->setCalledFunction(FPExpr);
810 LLVM_DEBUG(dbgs() << *CI << '\n');
817 // [native_]half_recip(c) ==> 1.0/c
818 bool AMDGPULibCalls::fold_recip(CallInst *CI, IRBuilder<> &B,
819 const FuncInfo &FInfo) {
820 Value *opr0 = CI->getArgOperand(0);
821 if (ConstantFP *CF = dyn_cast<ConstantFP>(opr0)) {
822 // Just create a normal div. Later, InstCombine will be able
823 // to compute the divide into a constant (avoid check float infinity
824 // or subnormal at this point).
825 Value *nval = B.CreateFDiv(ConstantFP::get(CF->getType(), 1.0),
828 LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> " << *nval << "\n");
835 // [native_]half_divide(x, c) ==> x/c
836 bool AMDGPULibCalls::fold_divide(CallInst *CI, IRBuilder<> &B,
837 const FuncInfo &FInfo) {
838 Value *opr0 = CI->getArgOperand(0);
839 Value *opr1 = CI->getArgOperand(1);
840 ConstantFP *CF0 = dyn_cast<ConstantFP>(opr0);
841 ConstantFP *CF1 = dyn_cast<ConstantFP>(opr1);
843 if ((CF0 && CF1) || // both are constants
844 (CF1 && (getArgType(FInfo) == AMDGPULibFunc::F32)))
845 // CF1 is constant && f32 divide
847 Value *nval1 = B.CreateFDiv(ConstantFP::get(opr1->getType(), 1.0),
848 opr1, "__div2recip");
849 Value *nval = B.CreateFMul(opr0, nval1, "__div2mul");
857 static double log2(double V) {
858 #if _XOPEN_SOURCE >= 600 || defined(_ISOC99_SOURCE) || _POSIX_C_SOURCE >= 200112L
861 return log(V) / numbers::ln2;
866 bool AMDGPULibCalls::fold_pow(CallInst *CI, IRBuilder<> &B,
867 const FuncInfo &FInfo) {
868 assert((FInfo.getId() == AMDGPULibFunc::EI_POW ||
869 FInfo.getId() == AMDGPULibFunc::EI_POWR ||
870 FInfo.getId() == AMDGPULibFunc::EI_POWN) &&
871 "fold_pow: encounter a wrong function call");
876 ConstantAggregateZero *CZero;
879 opr0 = CI->getArgOperand(0);
880 opr1 = CI->getArgOperand(1);
881 CZero = dyn_cast<ConstantAggregateZero>(opr1);
882 if (getVecSize(FInfo) == 1) {
883 eltType = opr0->getType();
884 CF = dyn_cast<ConstantFP>(opr1);
885 CINT = dyn_cast<ConstantInt>(opr1);
887 VectorType *VTy = dyn_cast<VectorType>(opr0->getType());
888 assert(VTy && "Oprand of vector function should be of vectortype");
889 eltType = VTy->getElementType();
890 ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(opr1);
892 // Now, only Handle vector const whose elements have the same value.
893 CF = CDV ? dyn_cast_or_null<ConstantFP>(CDV->getSplatValue()) : nullptr;
894 CINT = CDV ? dyn_cast_or_null<ConstantInt>(CDV->getSplatValue()) : nullptr;
897 // No unsafe math , no constant argument, do nothing
898 if (!isUnsafeMath(CI) && !CF && !CINT && !CZero)
901 // 0x1111111 means that we don't do anything for this call.
902 int ci_opr1 = (CINT ? (int)CINT->getSExtValue() : 0x1111111);
904 if ((CF && CF->isZero()) || (CINT && ci_opr1 == 0) || CZero) {
905 // pow/powr/pown(x, 0) == 1
906 LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> 1\n");
907 Constant *cnval = ConstantFP::get(eltType, 1.0);
908 if (getVecSize(FInfo) > 1) {
909 cnval = ConstantDataVector::getSplat(getVecSize(FInfo), cnval);
914 if ((CF && CF->isExactlyValue(1.0)) || (CINT && ci_opr1 == 1)) {
915 // pow/powr/pown(x, 1.0) = x
916 LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> " << *opr0 << "\n");
920 if ((CF && CF->isExactlyValue(2.0)) || (CINT && ci_opr1 == 2)) {
921 // pow/powr/pown(x, 2.0) = x*x
922 LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> " << *opr0 << " * " << *opr0
924 Value *nval = B.CreateFMul(opr0, opr0, "__pow2");
928 if ((CF && CF->isExactlyValue(-1.0)) || (CINT && ci_opr1 == -1)) {
929 // pow/powr/pown(x, -1.0) = 1.0/x
930 LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> 1 / " << *opr0 << "\n");
931 Constant *cnval = ConstantFP::get(eltType, 1.0);
932 if (getVecSize(FInfo) > 1) {
933 cnval = ConstantDataVector::getSplat(getVecSize(FInfo), cnval);
935 Value *nval = B.CreateFDiv(cnval, opr0, "__powrecip");
940 Module *M = CI->getModule();
941 if (CF && (CF->isExactlyValue(0.5) || CF->isExactlyValue(-0.5))) {
942 // pow[r](x, [-]0.5) = sqrt(x)
943 bool issqrt = CF->isExactlyValue(0.5);
944 if (FunctionCallee FPExpr =
945 getFunction(M, AMDGPULibFunc(issqrt ? AMDGPULibFunc::EI_SQRT
946 : AMDGPULibFunc::EI_RSQRT,
948 LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> "
949 << FInfo.getName().c_str() << "(" << *opr0 << ")\n");
950 Value *nval = CreateCallEx(B,FPExpr, opr0, issqrt ? "__pow2sqrt"
957 if (!isUnsafeMath(CI))
960 // Unsafe Math optimization
962 // Remember that ci_opr1 is set if opr1 is integral
964 double dval = (getArgType(FInfo) == AMDGPULibFunc::F32)
965 ? (double)CF->getValueAPF().convertToFloat()
966 : CF->getValueAPF().convertToDouble();
967 int ival = (int)dval;
968 if ((double)ival == dval) {
971 ci_opr1 = 0x11111111;
974 // pow/powr/pown(x, c) = [1/](x*x*..x); where
975 // trunc(c) == c && the number of x == c && |c| <= 12
976 unsigned abs_opr1 = (ci_opr1 < 0) ? -ci_opr1 : ci_opr1;
977 if (abs_opr1 <= 12) {
981 cnval = ConstantFP::get(eltType, 1.0);
982 if (getVecSize(FInfo) > 1) {
983 cnval = ConstantDataVector::getSplat(getVecSize(FInfo), cnval);
987 Value *valx2 = nullptr;
989 while (abs_opr1 > 0) {
990 valx2 = valx2 ? B.CreateFMul(valx2, valx2, "__powx2") : opr0;
992 nval = nval ? B.CreateFMul(nval, valx2, "__powprod") : valx2;
999 cnval = ConstantFP::get(eltType, 1.0);
1000 if (getVecSize(FInfo) > 1) {
1001 cnval = ConstantDataVector::getSplat(getVecSize(FInfo), cnval);
1003 nval = B.CreateFDiv(cnval, nval, "__1powprod");
1005 LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> "
1006 << ((ci_opr1 < 0) ? "1/prod(" : "prod(") << *opr0
1012 // powr ---> exp2(y * log2(x))
1013 // pown/pow ---> powr(fabs(x), y) | (x & ((int)y << 31))
1014 FunctionCallee ExpExpr =
1015 getFunction(M, AMDGPULibFunc(AMDGPULibFunc::EI_EXP2, FInfo));
1019 bool needlog = false;
1020 bool needabs = false;
1021 bool needcopysign = false;
1022 Constant *cnval = nullptr;
1023 if (getVecSize(FInfo) == 1) {
1024 CF = dyn_cast<ConstantFP>(opr0);
1027 double V = (getArgType(FInfo) == AMDGPULibFunc::F32)
1028 ? (double)CF->getValueAPF().convertToFloat()
1029 : CF->getValueAPF().convertToDouble();
1031 V = log2(std::abs(V));
1032 cnval = ConstantFP::get(eltType, V);
1033 needcopysign = (FInfo.getId() != AMDGPULibFunc::EI_POWR) &&
1037 needcopysign = needabs = FInfo.getId() != AMDGPULibFunc::EI_POWR &&
1038 (!CF || CF->isNegative());
1041 ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(opr0);
1045 needcopysign = needabs = FInfo.getId() != AMDGPULibFunc::EI_POWR;
1047 assert ((int)CDV->getNumElements() == getVecSize(FInfo) &&
1048 "Wrong vector size detected");
1050 SmallVector<double, 0> DVal;
1051 for (int i=0; i < getVecSize(FInfo); ++i) {
1052 double V = (getArgType(FInfo) == AMDGPULibFunc::F32)
1053 ? (double)CDV->getElementAsFloat(i)
1054 : CDV->getElementAsDouble(i);
1055 if (V < 0.0) needcopysign = true;
1056 V = log2(std::abs(V));
1059 if (getArgType(FInfo) == AMDGPULibFunc::F32) {
1060 SmallVector<float, 0> FVal;
1061 for (unsigned i=0; i < DVal.size(); ++i) {
1062 FVal.push_back((float)DVal[i]);
1064 ArrayRef<float> tmp(FVal);
1065 cnval = ConstantDataVector::get(M->getContext(), tmp);
1067 ArrayRef<double> tmp(DVal);
1068 cnval = ConstantDataVector::get(M->getContext(), tmp);
1073 if (needcopysign && (FInfo.getId() == AMDGPULibFunc::EI_POW)) {
1074 // We cannot handle corner cases for a general pow() function, give up
1075 // unless y is a constant integral value. Then proceed as if it were pown.
1076 if (getVecSize(FInfo) == 1) {
1077 if (const ConstantFP *CF = dyn_cast<ConstantFP>(opr1)) {
1078 double y = (getArgType(FInfo) == AMDGPULibFunc::F32)
1079 ? (double)CF->getValueAPF().convertToFloat()
1080 : CF->getValueAPF().convertToDouble();
1081 if (y != (double)(int64_t)y)
1086 if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(opr1)) {
1087 for (int i=0; i < getVecSize(FInfo); ++i) {
1088 double y = (getArgType(FInfo) == AMDGPULibFunc::F32)
1089 ? (double)CDV->getElementAsFloat(i)
1090 : CDV->getElementAsDouble(i);
1091 if (y != (double)(int64_t)y)
1101 FunctionCallee AbsExpr =
1102 getFunction(M, AMDGPULibFunc(AMDGPULibFunc::EI_FABS, FInfo));
1105 nval = CreateCallEx(B, AbsExpr, opr0, "__fabs");
1107 nval = cnval ? cnval : opr0;
1110 FunctionCallee LogExpr =
1111 getFunction(M, AMDGPULibFunc(AMDGPULibFunc::EI_LOG2, FInfo));
1114 nval = CreateCallEx(B,LogExpr, nval, "__log2");
1117 if (FInfo.getId() == AMDGPULibFunc::EI_POWN) {
1118 // convert int(32) to fp(f32 or f64)
1119 opr1 = B.CreateSIToFP(opr1, nval->getType(), "pownI2F");
1121 nval = B.CreateFMul(opr1, nval, "__ylogx");
1122 nval = CreateCallEx(B,ExpExpr, nval, "__exp2");
1126 Type* rTy = opr0->getType();
1127 Type* nTyS = eltType->isDoubleTy() ? B.getInt64Ty() : B.getInt32Ty();
1129 if (const auto *vTy = dyn_cast<FixedVectorType>(rTy))
1130 nTy = FixedVectorType::get(nTyS, vTy);
1131 unsigned size = nTy->getScalarSizeInBits();
1132 opr_n = CI->getArgOperand(1);
1133 if (opr_n->getType()->isIntegerTy())
1134 opr_n = B.CreateZExtOrBitCast(opr_n, nTy, "__ytou");
1136 opr_n = B.CreateFPToSI(opr1, nTy, "__ytou");
1138 Value *sign = B.CreateShl(opr_n, size-1, "__yeven");
1139 sign = B.CreateAnd(B.CreateBitCast(opr0, nTy), sign, "__pow_sign");
1140 nval = B.CreateOr(B.CreateBitCast(nval, nTy), sign);
1141 nval = B.CreateBitCast(nval, opr0->getType());
1144 LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> "
1145 << "exp2(" << *opr1 << " * log2(" << *opr0 << "))\n");
1151 bool AMDGPULibCalls::fold_rootn(CallInst *CI, IRBuilder<> &B,
1152 const FuncInfo &FInfo) {
1153 Value *opr0 = CI->getArgOperand(0);
1154 Value *opr1 = CI->getArgOperand(1);
1156 ConstantInt *CINT = dyn_cast<ConstantInt>(opr1);
1160 int ci_opr1 = (int)CINT->getSExtValue();
1161 if (ci_opr1 == 1) { // rootn(x, 1) = x
1162 LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> " << *opr0 << "\n");
1166 if (ci_opr1 == 2) { // rootn(x, 2) = sqrt(x)
1167 Module *M = CI->getModule();
1168 if (FunctionCallee FPExpr =
1169 getFunction(M, AMDGPULibFunc(AMDGPULibFunc::EI_SQRT, FInfo))) {
1170 LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> sqrt(" << *opr0 << ")\n");
1171 Value *nval = CreateCallEx(B,FPExpr, opr0, "__rootn2sqrt");
1175 } else if (ci_opr1 == 3) { // rootn(x, 3) = cbrt(x)
1176 Module *M = CI->getModule();
1177 if (FunctionCallee FPExpr =
1178 getFunction(M, AMDGPULibFunc(AMDGPULibFunc::EI_CBRT, FInfo))) {
1179 LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> cbrt(" << *opr0 << ")\n");
1180 Value *nval = CreateCallEx(B,FPExpr, opr0, "__rootn2cbrt");
1184 } else if (ci_opr1 == -1) { // rootn(x, -1) = 1.0/x
1185 LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> 1.0 / " << *opr0 << "\n");
1186 Value *nval = B.CreateFDiv(ConstantFP::get(opr0->getType(), 1.0),
1191 } else if (ci_opr1 == -2) { // rootn(x, -2) = rsqrt(x)
1192 Module *M = CI->getModule();
1193 if (FunctionCallee FPExpr =
1194 getFunction(M, AMDGPULibFunc(AMDGPULibFunc::EI_RSQRT, FInfo))) {
1195 LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> rsqrt(" << *opr0
1197 Value *nval = CreateCallEx(B,FPExpr, opr0, "__rootn2rsqrt");
1205 bool AMDGPULibCalls::fold_fma_mad(CallInst *CI, IRBuilder<> &B,
1206 const FuncInfo &FInfo) {
1207 Value *opr0 = CI->getArgOperand(0);
1208 Value *opr1 = CI->getArgOperand(1);
1209 Value *opr2 = CI->getArgOperand(2);
1211 ConstantFP *CF0 = dyn_cast<ConstantFP>(opr0);
1212 ConstantFP *CF1 = dyn_cast<ConstantFP>(opr1);
1213 if ((CF0 && CF0->isZero()) || (CF1 && CF1->isZero())) {
1214 // fma/mad(a, b, c) = c if a=0 || b=0
1215 LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> " << *opr2 << "\n");
1219 if (CF0 && CF0->isExactlyValue(1.0f)) {
1220 // fma/mad(a, b, c) = b+c if a=1
1221 LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> " << *opr1 << " + " << *opr2
1223 Value *nval = B.CreateFAdd(opr1, opr2, "fmaadd");
1227 if (CF1 && CF1->isExactlyValue(1.0f)) {
1228 // fma/mad(a, b, c) = a+c if b=1
1229 LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> " << *opr0 << " + " << *opr2
1231 Value *nval = B.CreateFAdd(opr0, opr2, "fmaadd");
1235 if (ConstantFP *CF = dyn_cast<ConstantFP>(opr2)) {
1237 // fma/mad(a, b, c) = a*b if c=0
1238 LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> " << *opr0 << " * "
1240 Value *nval = B.CreateFMul(opr0, opr1, "fmamul");
1249 // Get a scalar native builtin signle argument FP function
1250 FunctionCallee AMDGPULibCalls::getNativeFunction(Module *M,
1251 const FuncInfo &FInfo) {
1252 if (getArgType(FInfo) == AMDGPULibFunc::F64 || !HasNative(FInfo.getId()))
1254 FuncInfo nf = FInfo;
1255 nf.setPrefix(AMDGPULibFunc::NATIVE);
1256 return getFunction(M, nf);
1259 // fold sqrt -> native_sqrt (x)
1260 bool AMDGPULibCalls::fold_sqrt(CallInst *CI, IRBuilder<> &B,
1261 const FuncInfo &FInfo) {
1262 if (getArgType(FInfo) == AMDGPULibFunc::F32 && (getVecSize(FInfo) == 1) &&
1263 (FInfo.getPrefix() != AMDGPULibFunc::NATIVE)) {
1264 if (FunctionCallee FPExpr = getNativeFunction(
1265 CI->getModule(), AMDGPULibFunc(AMDGPULibFunc::EI_SQRT, FInfo))) {
1266 Value *opr0 = CI->getArgOperand(0);
1267 LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> "
1268 << "sqrt(" << *opr0 << ")\n");
1269 Value *nval = CreateCallEx(B,FPExpr, opr0, "__sqrt");
1277 // fold sin, cos -> sincos.
1278 bool AMDGPULibCalls::fold_sincos(CallInst *CI, IRBuilder<> &B,
1279 AliasAnalysis *AA) {
1280 AMDGPULibFunc fInfo;
1281 if (!AMDGPULibFunc::parse(CI->getCalledFunction()->getName(), fInfo))
1284 assert(fInfo.getId() == AMDGPULibFunc::EI_SIN ||
1285 fInfo.getId() == AMDGPULibFunc::EI_COS);
1286 bool const isSin = fInfo.getId() == AMDGPULibFunc::EI_SIN;
1288 Value *CArgVal = CI->getArgOperand(0);
1289 BasicBlock * const CBB = CI->getParent();
1291 int const MaxScan = 30;
1293 { // fold in load value.
1294 LoadInst *LI = dyn_cast<LoadInst>(CArgVal);
1295 if (LI && LI->getParent() == CBB) {
1296 BasicBlock::iterator BBI = LI->getIterator();
1297 Value *AvailableVal = FindAvailableLoadedValue(LI, CBB, BBI, MaxScan, AA);
1299 CArgVal->replaceAllUsesWith(AvailableVal);
1300 if (CArgVal->getNumUses() == 0)
1301 LI->eraseFromParent();
1302 CArgVal = CI->getArgOperand(0);
1307 Module *M = CI->getModule();
1308 fInfo.setId(isSin ? AMDGPULibFunc::EI_COS : AMDGPULibFunc::EI_SIN);
1309 std::string const PairName = fInfo.mangle();
1311 CallInst *UI = nullptr;
1312 for (User* U : CArgVal->users()) {
1313 CallInst *XI = dyn_cast_or_null<CallInst>(U);
1314 if (!XI || XI == CI || XI->getParent() != CBB)
1317 Function *UCallee = XI->getCalledFunction();
1318 if (!UCallee || !UCallee->getName().equals(PairName))
1321 BasicBlock::iterator BBI = CI->getIterator();
1322 if (BBI == CI->getParent()->begin())
1325 for (int I = MaxScan; I > 0 && BBI != CBB->begin(); --BBI, --I) {
1326 if (cast<Instruction>(BBI) == XI) {
1334 if (!UI) return false;
1336 // Merge the sin and cos.
1338 // for OpenCL 2.0 we have only generic implementation of sincos
1340 AMDGPULibFunc nf(AMDGPULibFunc::EI_SINCOS, fInfo);
1341 nf.getLeads()[0].PtrKind = AMDGPULibFunc::getEPtrKindFromAddrSpace(AMDGPUAS::FLAT_ADDRESS);
1342 FunctionCallee Fsincos = getFunction(M, nf);
1343 if (!Fsincos) return false;
1345 BasicBlock::iterator ItOld = B.GetInsertPoint();
1346 AllocaInst *Alloc = insertAlloca(UI, B, "__sincos_");
1347 B.SetInsertPoint(UI);
1350 Type *PTy = Fsincos.getFunctionType()->getParamType(1);
1351 // The allocaInst allocates the memory in private address space. This need
1352 // to be bitcasted to point to the address space of cos pointer type.
1353 // In OpenCL 2.0 this is generic, while in 1.2 that is private.
1354 if (PTy->getPointerAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS)
1355 P = B.CreateAddrSpaceCast(Alloc, PTy);
1356 CallInst *Call = CreateCallEx2(B, Fsincos, UI->getArgOperand(0), P);
1358 LLVM_DEBUG(errs() << "AMDIC: fold_sincos (" << *CI << ", " << *UI << ") with "
1361 if (!isSin) { // CI->cos, UI->sin
1362 B.SetInsertPoint(&*ItOld);
1363 UI->replaceAllUsesWith(&*Call);
1364 Instruction *Reload = B.CreateLoad(Alloc->getAllocatedType(), Alloc);
1365 CI->replaceAllUsesWith(Reload);
1366 UI->eraseFromParent();
1367 CI->eraseFromParent();
1368 } else { // CI->sin, UI->cos
1369 Instruction *Reload = B.CreateLoad(Alloc->getAllocatedType(), Alloc);
1370 UI->replaceAllUsesWith(Reload);
1371 CI->replaceAllUsesWith(Call);
1372 UI->eraseFromParent();
1373 CI->eraseFromParent();
1378 bool AMDGPULibCalls::fold_wavefrontsize(CallInst *CI, IRBuilder<> &B) {
1382 StringRef CPU = TM->getTargetCPU();
1383 StringRef Features = TM->getTargetFeatureString();
1384 if ((CPU.empty() || CPU.equals_lower("generic")) &&
1385 (Features.empty() ||
1386 Features.find_lower("wavefrontsize") == StringRef::npos))
1389 Function *F = CI->getParent()->getParent();
1390 const GCNSubtarget &ST = TM->getSubtarget<GCNSubtarget>(*F);
1391 unsigned N = ST.getWavefrontSize();
1393 LLVM_DEBUG(errs() << "AMDIC: fold_wavefrontsize (" << *CI << ") with "
1396 CI->replaceAllUsesWith(ConstantInt::get(B.getInt32Ty(), N));
1397 CI->eraseFromParent();
1401 // Get insertion point at entry.
1402 BasicBlock::iterator AMDGPULibCalls::getEntryIns(CallInst * UI) {
1403 Function * Func = UI->getParent()->getParent();
1404 BasicBlock * BB = &Func->getEntryBlock();
1405 assert(BB && "Entry block not found!");
1406 BasicBlock::iterator ItNew = BB->begin();
1410 // Insert a AllocsInst at the beginning of function entry block.
1411 AllocaInst* AMDGPULibCalls::insertAlloca(CallInst *UI, IRBuilder<> &B,
1412 const char *prefix) {
1413 BasicBlock::iterator ItNew = getEntryIns(UI);
1414 Function *UCallee = UI->getCalledFunction();
1415 Type *RetType = UCallee->getReturnType();
1416 B.SetInsertPoint(&*ItNew);
1417 AllocaInst *Alloc = B.CreateAlloca(RetType, 0,
1418 std::string(prefix) + UI->getName());
1419 Alloc->setAlignment(
1420 Align(UCallee->getParent()->getDataLayout().getTypeAllocSize(RetType)));
1424 bool AMDGPULibCalls::evaluateScalarMathFunc(FuncInfo &FInfo,
1425 double& Res0, double& Res1,
1426 Constant *copr0, Constant *copr1,
1428 // By default, opr0/opr1/opr3 holds values of float/double type.
1429 // If they are not float/double, each function has to its
1430 // operand separately.
1431 double opr0=0.0, opr1=0.0, opr2=0.0;
1432 ConstantFP *fpopr0 = dyn_cast_or_null<ConstantFP>(copr0);
1433 ConstantFP *fpopr1 = dyn_cast_or_null<ConstantFP>(copr1);
1434 ConstantFP *fpopr2 = dyn_cast_or_null<ConstantFP>(copr2);
1436 opr0 = (getArgType(FInfo) == AMDGPULibFunc::F64)
1437 ? fpopr0->getValueAPF().convertToDouble()
1438 : (double)fpopr0->getValueAPF().convertToFloat();
1442 opr1 = (getArgType(FInfo) == AMDGPULibFunc::F64)
1443 ? fpopr1->getValueAPF().convertToDouble()
1444 : (double)fpopr1->getValueAPF().convertToFloat();
1448 opr2 = (getArgType(FInfo) == AMDGPULibFunc::F64)
1449 ? fpopr2->getValueAPF().convertToDouble()
1450 : (double)fpopr2->getValueAPF().convertToFloat();
1453 switch (FInfo.getId()) {
1454 default : return false;
1456 case AMDGPULibFunc::EI_ACOS:
1460 case AMDGPULibFunc::EI_ACOSH:
1461 // acosh(x) == log(x + sqrt(x*x - 1))
1462 Res0 = log(opr0 + sqrt(opr0*opr0 - 1.0));
1465 case AMDGPULibFunc::EI_ACOSPI:
1466 Res0 = acos(opr0) / MATH_PI;
1469 case AMDGPULibFunc::EI_ASIN:
1473 case AMDGPULibFunc::EI_ASINH:
1474 // asinh(x) == log(x + sqrt(x*x + 1))
1475 Res0 = log(opr0 + sqrt(opr0*opr0 + 1.0));
1478 case AMDGPULibFunc::EI_ASINPI:
1479 Res0 = asin(opr0) / MATH_PI;
1482 case AMDGPULibFunc::EI_ATAN:
1486 case AMDGPULibFunc::EI_ATANH:
1487 // atanh(x) == (log(x+1) - log(x-1))/2;
1488 Res0 = (log(opr0 + 1.0) - log(opr0 - 1.0))/2.0;
1491 case AMDGPULibFunc::EI_ATANPI:
1492 Res0 = atan(opr0) / MATH_PI;
1495 case AMDGPULibFunc::EI_CBRT:
1496 Res0 = (opr0 < 0.0) ? -pow(-opr0, 1.0/3.0) : pow(opr0, 1.0/3.0);
1499 case AMDGPULibFunc::EI_COS:
1503 case AMDGPULibFunc::EI_COSH:
1507 case AMDGPULibFunc::EI_COSPI:
1508 Res0 = cos(MATH_PI * opr0);
1511 case AMDGPULibFunc::EI_EXP:
1515 case AMDGPULibFunc::EI_EXP2:
1516 Res0 = pow(2.0, opr0);
1519 case AMDGPULibFunc::EI_EXP10:
1520 Res0 = pow(10.0, opr0);
1523 case AMDGPULibFunc::EI_EXPM1:
1524 Res0 = exp(opr0) - 1.0;
1527 case AMDGPULibFunc::EI_LOG:
1531 case AMDGPULibFunc::EI_LOG2:
1532 Res0 = log(opr0) / log(2.0);
1535 case AMDGPULibFunc::EI_LOG10:
1536 Res0 = log(opr0) / log(10.0);
1539 case AMDGPULibFunc::EI_RSQRT:
1540 Res0 = 1.0 / sqrt(opr0);
1543 case AMDGPULibFunc::EI_SIN:
1547 case AMDGPULibFunc::EI_SINH:
1551 case AMDGPULibFunc::EI_SINPI:
1552 Res0 = sin(MATH_PI * opr0);
1555 case AMDGPULibFunc::EI_SQRT:
1559 case AMDGPULibFunc::EI_TAN:
1563 case AMDGPULibFunc::EI_TANH:
1567 case AMDGPULibFunc::EI_TANPI:
1568 Res0 = tan(MATH_PI * opr0);
1571 case AMDGPULibFunc::EI_RECIP:
1575 // two-arg functions
1576 case AMDGPULibFunc::EI_DIVIDE:
1580 case AMDGPULibFunc::EI_POW:
1581 case AMDGPULibFunc::EI_POWR:
1582 Res0 = pow(opr0, opr1);
1585 case AMDGPULibFunc::EI_POWN: {
1586 if (ConstantInt *iopr1 = dyn_cast_or_null<ConstantInt>(copr1)) {
1587 double val = (double)iopr1->getSExtValue();
1588 Res0 = pow(opr0, val);
1594 case AMDGPULibFunc::EI_ROOTN: {
1595 if (ConstantInt *iopr1 = dyn_cast_or_null<ConstantInt>(copr1)) {
1596 double val = (double)iopr1->getSExtValue();
1597 Res0 = pow(opr0, 1.0 / val);
1604 case AMDGPULibFunc::EI_SINCOS:
1609 // three-arg functions
1610 case AMDGPULibFunc::EI_FMA:
1611 case AMDGPULibFunc::EI_MAD:
1612 Res0 = opr0 * opr1 + opr2;
1619 bool AMDGPULibCalls::evaluateCall(CallInst *aCI, FuncInfo &FInfo) {
1620 int numArgs = (int)aCI->getNumArgOperands();
1624 Constant *copr0 = nullptr;
1625 Constant *copr1 = nullptr;
1626 Constant *copr2 = nullptr;
1628 if ((copr0 = dyn_cast<Constant>(aCI->getArgOperand(0))) == nullptr)
1633 if ((copr1 = dyn_cast<Constant>(aCI->getArgOperand(1))) == nullptr) {
1634 if (FInfo.getId() != AMDGPULibFunc::EI_SINCOS)
1640 if ((copr2 = dyn_cast<Constant>(aCI->getArgOperand(2))) == nullptr)
1644 // At this point, all arguments to aCI are constants.
1646 // max vector size is 16, and sincos will generate two results.
1647 double DVal0[16], DVal1[16];
1648 bool hasTwoResults = (FInfo.getId() == AMDGPULibFunc::EI_SINCOS);
1649 if (getVecSize(FInfo) == 1) {
1650 if (!evaluateScalarMathFunc(FInfo, DVal0[0],
1651 DVal1[0], copr0, copr1, copr2)) {
1655 ConstantDataVector *CDV0 = dyn_cast_or_null<ConstantDataVector>(copr0);
1656 ConstantDataVector *CDV1 = dyn_cast_or_null<ConstantDataVector>(copr1);
1657 ConstantDataVector *CDV2 = dyn_cast_or_null<ConstantDataVector>(copr2);
1658 for (int i=0; i < getVecSize(FInfo); ++i) {
1659 Constant *celt0 = CDV0 ? CDV0->getElementAsConstant(i) : nullptr;
1660 Constant *celt1 = CDV1 ? CDV1->getElementAsConstant(i) : nullptr;
1661 Constant *celt2 = CDV2 ? CDV2->getElementAsConstant(i) : nullptr;
1662 if (!evaluateScalarMathFunc(FInfo, DVal0[i],
1663 DVal1[i], celt0, celt1, celt2)) {
1669 LLVMContext &context = CI->getParent()->getParent()->getContext();
1670 Constant *nval0, *nval1;
1671 if (getVecSize(FInfo) == 1) {
1672 nval0 = ConstantFP::get(CI->getType(), DVal0[0]);
1674 nval1 = ConstantFP::get(CI->getType(), DVal1[0]);
1676 if (getArgType(FInfo) == AMDGPULibFunc::F32) {
1677 SmallVector <float, 0> FVal0, FVal1;
1678 for (int i=0; i < getVecSize(FInfo); ++i)
1679 FVal0.push_back((float)DVal0[i]);
1680 ArrayRef<float> tmp0(FVal0);
1681 nval0 = ConstantDataVector::get(context, tmp0);
1682 if (hasTwoResults) {
1683 for (int i=0; i < getVecSize(FInfo); ++i)
1684 FVal1.push_back((float)DVal1[i]);
1685 ArrayRef<float> tmp1(FVal1);
1686 nval1 = ConstantDataVector::get(context, tmp1);
1689 ArrayRef<double> tmp0(DVal0);
1690 nval0 = ConstantDataVector::get(context, tmp0);
1691 if (hasTwoResults) {
1692 ArrayRef<double> tmp1(DVal1);
1693 nval1 = ConstantDataVector::get(context, tmp1);
1698 if (hasTwoResults) {
1700 assert(FInfo.getId() == AMDGPULibFunc::EI_SINCOS &&
1701 "math function with ptr arg not supported yet");
1702 new StoreInst(nval1, aCI->getArgOperand(1), aCI);
1709 // Public interface to the Simplify LibCalls pass.
1710 FunctionPass *llvm::createAMDGPUSimplifyLibCallsPass(const TargetMachine *TM) {
1711 return new AMDGPUSimplifyLibCalls(TM);
1714 FunctionPass *llvm::createAMDGPUUseNativeCallsPass() {
1715 return new AMDGPUUseNativeCalls();
1718 bool AMDGPUSimplifyLibCalls::runOnFunction(Function &F) {
1719 if (skipFunction(F))
1722 bool Changed = false;
1723 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1725 LLVM_DEBUG(dbgs() << "AMDIC: process function ";
1726 F.printAsOperand(dbgs(), false, F.getParent()); dbgs() << '\n';);
1728 for (auto &BB : F) {
1729 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ) {
1730 // Ignore non-calls.
1731 CallInst *CI = dyn_cast<CallInst>(I);
1733 // Ignore intrinsics that do not become real instructions.
1734 if (!CI || isa<DbgInfoIntrinsic>(CI) || CI->isLifetimeStartOrEnd())
1737 // Ignore indirect calls.
1738 Function *Callee = CI->getCalledFunction();
1739 if (Callee == 0) continue;
1741 LLVM_DEBUG(dbgs() << "AMDIC: try folding " << *CI << "\n";
1743 if(Simplifier.fold(CI, AA))
1750 bool AMDGPUUseNativeCalls::runOnFunction(Function &F) {
1751 if (skipFunction(F) || UseNative.empty())
1754 bool Changed = false;
1755 for (auto &BB : F) {
1756 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ) {
1757 // Ignore non-calls.
1758 CallInst *CI = dyn_cast<CallInst>(I);
1762 // Ignore indirect calls.
1763 Function *Callee = CI->getCalledFunction();
1764 if (Callee == 0) continue;
1766 if(Simplifier.useNative(CI))