1 //===-- PPCTargetTransformInfo.cpp - PPC specific TTI ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "PPCTargetTransformInfo.h"
11 #include "llvm/Analysis/TargetTransformInfo.h"
12 #include "llvm/CodeGen/BasicTTIImpl.h"
13 #include "llvm/Support/CommandLine.h"
14 #include "llvm/Support/Debug.h"
15 #include "llvm/Target/CostTable.h"
16 #include "llvm/Target/TargetLowering.h"
19 #define DEBUG_TYPE "ppctti"
21 static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting",
22 cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden);
24 // This is currently only used for the data prefetch pass which is only enabled
25 // for BG/Q by default.
26 static cl::opt<unsigned>
27 CacheLineSize("ppc-loop-prefetch-cache-line", cl::Hidden, cl::init(64),
28 cl::desc("The loop prefetch cache line size"));
30 //===----------------------------------------------------------------------===//
34 //===----------------------------------------------------------------------===//
36 TargetTransformInfo::PopcntSupportKind
37 PPCTTIImpl::getPopcntSupport(unsigned TyWidth) {
38 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
39 if (ST->hasPOPCNTD() != PPCSubtarget::POPCNTD_Unavailable && TyWidth <= 64)
40 return ST->hasPOPCNTD() == PPCSubtarget::POPCNTD_Slow ?
41 TTI::PSK_SlowHardware : TTI::PSK_FastHardware;
42 return TTI::PSK_Software;
45 int PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
46 if (DisablePPCConstHoist)
47 return BaseT::getIntImmCost(Imm, Ty);
49 assert(Ty->isIntegerTy());
51 unsigned BitSize = Ty->getPrimitiveSizeInBits();
58 if (Imm.getBitWidth() <= 64) {
59 if (isInt<16>(Imm.getSExtValue()))
60 return TTI::TCC_Basic;
62 if (isInt<32>(Imm.getSExtValue())) {
63 // A constant that can be materialized using lis.
64 if ((Imm.getZExtValue() & 0xFFFF) == 0)
65 return TTI::TCC_Basic;
67 return 2 * TTI::TCC_Basic;
71 return 4 * TTI::TCC_Basic;
74 int PPCTTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
76 if (DisablePPCConstHoist)
77 return BaseT::getIntImmCost(IID, Idx, Imm, Ty);
79 assert(Ty->isIntegerTy());
81 unsigned BitSize = Ty->getPrimitiveSizeInBits();
88 case Intrinsic::sadd_with_overflow:
89 case Intrinsic::uadd_with_overflow:
90 case Intrinsic::ssub_with_overflow:
91 case Intrinsic::usub_with_overflow:
92 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(Imm.getSExtValue()))
95 case Intrinsic::experimental_stackmap:
96 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
99 case Intrinsic::experimental_patchpoint_void:
100 case Intrinsic::experimental_patchpoint_i64:
101 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
102 return TTI::TCC_Free;
105 return PPCTTIImpl::getIntImmCost(Imm, Ty);
108 int PPCTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
110 if (DisablePPCConstHoist)
111 return BaseT::getIntImmCost(Opcode, Idx, Imm, Ty);
113 assert(Ty->isIntegerTy());
115 unsigned BitSize = Ty->getPrimitiveSizeInBits();
119 unsigned ImmIdx = ~0U;
120 bool ShiftedFree = false, RunFree = false, UnsignedFree = false,
124 return TTI::TCC_Free;
125 case Instruction::GetElementPtr:
126 // Always hoist the base address of a GetElementPtr. This prevents the
127 // creation of new constants for every base constant that gets constant
128 // folded with the offset.
130 return 2 * TTI::TCC_Basic;
131 return TTI::TCC_Free;
132 case Instruction::And:
133 RunFree = true; // (for the rotate-and-mask instructions)
135 case Instruction::Add:
136 case Instruction::Or:
137 case Instruction::Xor:
140 case Instruction::Sub:
141 case Instruction::Mul:
142 case Instruction::Shl:
143 case Instruction::LShr:
144 case Instruction::AShr:
147 case Instruction::ICmp:
150 // Zero comparisons can use record-form instructions.
152 case Instruction::Select:
155 case Instruction::PHI:
156 case Instruction::Call:
157 case Instruction::Ret:
158 case Instruction::Load:
159 case Instruction::Store:
163 if (ZeroFree && Imm == 0)
164 return TTI::TCC_Free;
166 if (Idx == ImmIdx && Imm.getBitWidth() <= 64) {
167 if (isInt<16>(Imm.getSExtValue()))
168 return TTI::TCC_Free;
171 if (Imm.getBitWidth() <= 32 &&
172 (isShiftedMask_32(Imm.getZExtValue()) ||
173 isShiftedMask_32(~Imm.getZExtValue())))
174 return TTI::TCC_Free;
177 (isShiftedMask_64(Imm.getZExtValue()) ||
178 isShiftedMask_64(~Imm.getZExtValue())))
179 return TTI::TCC_Free;
182 if (UnsignedFree && isUInt<16>(Imm.getZExtValue()))
183 return TTI::TCC_Free;
185 if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0)
186 return TTI::TCC_Free;
189 return PPCTTIImpl::getIntImmCost(Imm, Ty);
192 void PPCTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
193 TTI::UnrollingPreferences &UP) {
194 if (ST->getDarwinDirective() == PPC::DIR_A2) {
195 // The A2 is in-order with a deep pipeline, and concatenation unrolling
196 // helps expose latency-hiding opportunities to the instruction scheduler.
197 UP.Partial = UP.Runtime = true;
199 // We unroll a lot on the A2 (hundreds of instructions), and the benefits
200 // often outweigh the cost of a division to compute the trip count.
201 UP.AllowExpensiveTripCount = true;
204 BaseT::getUnrollingPreferences(L, SE, UP);
207 bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) {
208 // On the A2, always unroll aggressively. For QPX unaligned loads, we depend
209 // on combining the loads generated for consecutive accesses, and failure to
210 // do so is particularly expensive. This makes it much more likely (compared
211 // to only using concatenation unrolling).
212 if (ST->getDarwinDirective() == PPC::DIR_A2)
215 return LoopHasReductions;
218 bool PPCTTIImpl::expandMemCmp(Instruction *I, unsigned &MaxLoadSize) {
223 bool PPCTTIImpl::enableInterleavedAccessVectorization() {
227 unsigned PPCTTIImpl::getNumberOfRegisters(bool Vector) {
228 if (Vector && !ST->hasAltivec() && !ST->hasQPX())
230 return ST->hasVSX() ? 64 : 32;
233 unsigned PPCTTIImpl::getRegisterBitWidth(bool Vector) const {
235 if (ST->hasQPX()) return 256;
236 if (ST->hasAltivec()) return 128;
246 unsigned PPCTTIImpl::getCacheLineSize() {
247 // Check first if the user specified a custom line size.
248 if (CacheLineSize.getNumOccurrences() > 0)
249 return CacheLineSize;
251 // On P7, P8 or P9 we have a cache line size of 128.
252 unsigned Directive = ST->getDarwinDirective();
253 if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 ||
254 Directive == PPC::DIR_PWR9)
257 // On other processors return a default of 64 bytes.
261 unsigned PPCTTIImpl::getPrefetchDistance() {
262 // This seems like a reasonable default for the BG/Q (this pass is enabled, by
263 // default, only on the BG/Q).
267 unsigned PPCTTIImpl::getMaxInterleaveFactor(unsigned VF) {
268 unsigned Directive = ST->getDarwinDirective();
269 // The 440 has no SIMD support, but floating-point instructions
270 // have a 5-cycle latency, so unroll by 5x for latency hiding.
271 if (Directive == PPC::DIR_440)
274 // The A2 has no SIMD support, but floating-point instructions
275 // have a 6-cycle latency, so unroll by 6x for latency hiding.
276 if (Directive == PPC::DIR_A2)
279 // FIXME: For lack of any better information, do no harm...
280 if (Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500)
283 // For P7 and P8, floating-point instructions have a 6-cycle latency and
284 // there are two execution units, so unroll by 12x for latency hiding.
285 // FIXME: the same for P9 as previous gen until POWER9 scheduling is ready
286 if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 ||
287 Directive == PPC::DIR_PWR9)
290 // For most things, modern systems have two execution units (and
291 // out-of-order execution).
295 int PPCTTIImpl::getArithmeticInstrCost(
296 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
297 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
298 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args) {
299 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
301 // Fallback to the default implementation.
302 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info,
303 Opd1PropInfo, Opd2PropInfo);
306 int PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
308 // Legalize the type.
309 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
311 // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations
312 // (at least in the sense that there need only be one non-loop-invariant
313 // instruction). We need one such shuffle instruction for each actual
314 // register (this is not true for arbitrary shuffles, but is true for the
315 // structured types of shuffles covered by TTI::ShuffleKind).
319 int PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
320 const Instruction *I) {
321 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
323 return BaseT::getCastInstrCost(Opcode, Dst, Src);
326 int PPCTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
327 const Instruction *I) {
328 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I);
331 int PPCTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
332 assert(Val->isVectorTy() && "This must be a vector type");
334 int ISD = TLI->InstructionOpcodeToISD(Opcode);
335 assert(ISD && "Invalid opcode");
337 if (ST->hasVSX() && Val->getScalarType()->isDoubleTy()) {
338 // Double-precision scalars are already located in index #0.
342 return BaseT::getVectorInstrCost(Opcode, Val, Index);
343 } else if (ST->hasQPX() && Val->getScalarType()->isFloatingPointTy()) {
344 // Floating point scalars are already located in index #0.
348 return BaseT::getVectorInstrCost(Opcode, Val, Index);
351 // Estimated cost of a load-hit-store delay. This was obtained
352 // experimentally as a minimum needed to prevent unprofitable
353 // vectorization for the paq8p benchmark. It may need to be
354 // raised further if other unprofitable cases remain.
355 unsigned LHSPenalty = 2;
356 if (ISD == ISD::INSERT_VECTOR_ELT)
359 // Vector element insert/extract with Altivec is very expensive,
360 // because they require store and reload with the attendant
361 // processor stall for load-hit-store. Until VSX is available,
362 // these need to be estimated as very costly.
363 if (ISD == ISD::EXTRACT_VECTOR_ELT ||
364 ISD == ISD::INSERT_VECTOR_ELT)
365 return LHSPenalty + BaseT::getVectorInstrCost(Opcode, Val, Index);
367 return BaseT::getVectorInstrCost(Opcode, Val, Index);
370 int PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
371 unsigned AddressSpace, const Instruction *I) {
372 // Legalize the type.
373 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
374 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
377 int Cost = BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
379 bool IsAltivecType = ST->hasAltivec() &&
380 (LT.second == MVT::v16i8 || LT.second == MVT::v8i16 ||
381 LT.second == MVT::v4i32 || LT.second == MVT::v4f32);
382 bool IsVSXType = ST->hasVSX() &&
383 (LT.second == MVT::v2f64 || LT.second == MVT::v2i64);
384 bool IsQPXType = ST->hasQPX() &&
385 (LT.second == MVT::v4f64 || LT.second == MVT::v4f32);
387 // VSX has 32b/64b load instructions. Legalization can handle loading of
388 // 32b/64b to VSR correctly and cheaply. But BaseT::getMemoryOpCost and
389 // PPCTargetLowering can't compute the cost appropriately. So here we
390 // explicitly check this case.
391 unsigned MemBytes = Src->getPrimitiveSizeInBits();
392 if (Opcode == Instruction::Load && ST->hasVSX() && IsAltivecType &&
393 (MemBytes == 64 || (ST->hasP8Vector() && MemBytes == 32)))
396 // Aligned loads and stores are easy.
397 unsigned SrcBytes = LT.second.getStoreSize();
398 if (!SrcBytes || !Alignment || Alignment >= SrcBytes)
401 // If we can use the permutation-based load sequence, then this is also
402 // relatively cheap (not counting loop-invariant instructions): one load plus
403 // one permute (the last load in a series has extra cost, but we're
404 // neglecting that here). Note that on the P7, we could do unaligned loads
405 // for Altivec types using the VSX instructions, but that's more expensive
406 // than using the permutation-based load sequence. On the P8, that's no
408 if (Opcode == Instruction::Load &&
409 ((!ST->hasP8Vector() && IsAltivecType) || IsQPXType) &&
410 Alignment >= LT.second.getScalarType().getStoreSize())
411 return Cost + LT.first; // Add the cost of the permutations.
413 // For VSX, we can do unaligned loads and stores on Altivec/VSX types. On the
414 // P7, unaligned vector loads are more expensive than the permutation-based
415 // load sequence, so that might be used instead, but regardless, the net cost
416 // is about the same (not counting loop-invariant instructions).
417 if (IsVSXType || (ST->hasVSX() && IsAltivecType))
420 // Newer PPC supports unaligned memory access.
421 if (TLI->allowsMisalignedMemoryAccesses(LT.second, 0))
424 // PPC in general does not support unaligned loads and stores. They'll need
425 // to be decomposed based on the alignment factor.
427 // Add the cost of each scalar load or store.
428 Cost += LT.first*(SrcBytes/Alignment-1);
430 // For a vector type, there is also scalarization overhead (only for
431 // stores, loads are expanded using the vector-load + permutation sequence,
432 // which is much less expensive).
433 if (Src->isVectorTy() && Opcode == Instruction::Store)
434 for (int i = 0, e = Src->getVectorNumElements(); i < e; ++i)
435 Cost += getVectorInstrCost(Instruction::ExtractElement, Src, i);
440 int PPCTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
442 ArrayRef<unsigned> Indices,
444 unsigned AddressSpace) {
445 assert(isa<VectorType>(VecTy) &&
446 "Expect a vector type for interleaved memory op");
448 // Legalize the type.
449 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, VecTy);
451 // Firstly, the cost of load/store operation.
452 int Cost = getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace);
454 // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations
455 // (at least in the sense that there need only be one non-loop-invariant
456 // instruction). For each result vector, we need one shuffle per incoming
457 // vector (except that the first shuffle can take two incoming vectors
458 // because it does not need to take itself).
459 Cost += Factor*(LT.first-1);