1 //===-- NVPTXTargetTransformInfo.h - NVPTX specific TTI ---------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file a TargetTransformInfo::Concept conforming object specific to the
11 /// NVPTX target machine. It uses the target's detailed information to
12 /// provide more precise answers to certain TTI queries, while letting the
13 /// target independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
17 #ifndef LLVM_LIB_TARGET_NVPTX_NVPTXTARGETTRANSFORMINFO_H
18 #define LLVM_LIB_TARGET_NVPTX_NVPTXTARGETTRANSFORMINFO_H
21 #include "NVPTXTargetMachine.h"
22 #include "llvm/Analysis/TargetTransformInfo.h"
23 #include "llvm/CodeGen/BasicTTIImpl.h"
24 #include "llvm/CodeGen/TargetLowering.h"
28 class NVPTXTTIImpl : public BasicTTIImplBase<NVPTXTTIImpl> {
29 typedef BasicTTIImplBase<NVPTXTTIImpl> BaseT;
30 typedef TargetTransformInfo TTI;
33 const NVPTXSubtarget *ST;
34 const NVPTXTargetLowering *TLI;
36 const NVPTXSubtarget *getST() const { return ST; };
37 const NVPTXTargetLowering *getTLI() const { return TLI; };
40 explicit NVPTXTTIImpl(const NVPTXTargetMachine *TM, const Function &F)
41 : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl()),
42 TLI(ST->getTargetLowering()) {}
44 bool hasBranchDivergence() { return true; }
46 bool isSourceOfDivergence(const Value *V);
48 unsigned getFlatAddressSpace() const {
49 return AddressSpace::ADDRESS_SPACE_GENERIC;
52 // NVPTX has infinite registers of all kinds, but the actual machine doesn't.
53 // We conservatively return 1 here which is just enough to enable the
54 // vectorizers but disables heuristics based on the number of registers.
55 // FIXME: Return a more reasonable number, while keeping an eye on
56 // LoopVectorizer's unrolling heuristics.
57 unsigned getNumberOfRegisters(bool Vector) const { return 1; }
59 // Only <2 x half> should be vectorized, so always return 32 for the vector
61 unsigned getRegisterBitWidth(bool Vector) const { return 32; }
62 unsigned getMinVectorRegisterBitWidth() const { return 32; }
64 // We don't want to prevent inlining because of target-cpu and -features
65 // attributes that were added to newer versions of LLVM/Clang: There are
66 // no incompatible functions in PTX, ptxas will throw errors in such cases.
67 bool areInlineCompatible(const Function *Caller,
68 const Function *Callee) const {
72 // Increase the inlining cost threshold by a factor of 5, reflecting that
73 // calls are particularly expensive in NVPTX.
74 unsigned getInliningThresholdMultiplier() { return 5; }
76 int getArithmeticInstrCost(
77 unsigned Opcode, Type *Ty,
78 TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
79 TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
80 TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
81 TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None,
82 ArrayRef<const Value *> Args = ArrayRef<const Value *>());
84 void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
85 TTI::UnrollingPreferences &UP);
86 bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) {
87 // Volatile loads/stores are only supported for shared and global address
88 // spaces, or for generic AS that maps to them.
89 if (!(AddrSpace == llvm::ADDRESS_SPACE_GENERIC ||
90 AddrSpace == llvm::ADDRESS_SPACE_GLOBAL ||
91 AddrSpace == llvm::ADDRESS_SPACE_SHARED))
94 switch(I->getOpcode()){
97 case Instruction::Load:
98 case Instruction::Store:
104 } // end namespace llvm